Merge tag 'upstream-go1.22.1'

Bug: 330574836
Test: builds
Change-Id: Icaf805d49ad96dd3f2960c5f92b4eeb7c131291c
diff --git a/.github/ISSUE_TEMPLATE/00-bug.md b/.github/ISSUE_TEMPLATE/00-bug.md
deleted file mode 100644
index f056dab..0000000
--- a/.github/ISSUE_TEMPLATE/00-bug.md
+++ /dev/null
@@ -1,45 +0,0 @@
----
-name: Bugs
-about: The go command, standard library, or anything else
-title: "affected/package: "
----
-
-<!--
-Please answer these questions before submitting your issue. Thanks!
--->
-
-### What version of Go are you using (`go version`)?
-
-<pre>
-$ go version
-
-</pre>
-
-### Does this issue reproduce with the latest release?
-
-
-
-### What operating system and processor architecture are you using (`go env`)?
-
-<details><summary><code>go env</code> Output</summary><br><pre>
-$ go env
-
-</pre></details>
-
-### What did you do?
-
-<!--
-If possible, provide a recipe for reproducing the error.
-A complete runnable program is good.
-A link on go.dev/play is best.
--->
-
-
-
-### What did you expect to see?
-
-
-
-### What did you see instead?
-
-
diff --git a/.github/ISSUE_TEMPLATE/00-bug.yml b/.github/ISSUE_TEMPLATE/00-bug.yml
new file mode 100644
index 0000000..5b0fda4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/00-bug.yml
@@ -0,0 +1,94 @@
+# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#creating-issue-forms
+# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
+name: Bugs
+description: The go command, standard library, or anything else
+title: "import/path: issue title"
+
+body:
+  - type: markdown
+    attributes:
+      value: |
+        Thanks for helping us improve! 🙏 Please answer these questions and provide as much information as possible about your problem.
+
+  - type: input
+    id: go-version
+    attributes:
+      label: Go version
+      description: |
+        What version of Go are you using (`go version`)?
+
+        Note: we only [support](https://go.dev/doc/devel/release#policy) the two most recent major releases.
+      placeholder: ex. go version go1.20.7 darwin/arm64
+    validations:
+      required: true
+
+  - type: textarea
+    id: go-env
+    attributes:
+      label: "Output of `go env` in your module/workspace:"
+      placeholder: |
+        GO111MODULE=""
+        GOARCH="arm64"
+        GOBIN="/Users/gopher/go/bin"
+        GOCACHE="/Users/gopher/go/cache"
+        GOENV="/Users/gopher/Library/Application Support/go/env"
+        GOEXE=""
+        GOEXPERIMENT=""
+        GOFLAGS=""
+        GOHOSTARCH="arm64"
+        GOHOSTOS="darwin"
+        GOINSECURE=""
+        GOMODCACHE="/Users/gopher/go/pkg/mod"
+        GONOPROXY=""
+        GONOSUMDB=""
+        GOOS="darwin"
+        GOPATH="/Users/gopher/go"
+        GOPRIVATE=""
+        GOPROXY="https://proxy.golang.org,direct"
+        GOROOT="/usr/local/go"
+        GOSUMDB="sum.golang.org"
+        GOTMPDIR=""
+        GOTOOLDIR="/usr/local/go/pkg/tool/darwin_arm64"
+        GOVCS=""
+        GOVERSION="go1.20.7"
+        GCCGO="gccgo"
+        AR="ar"
+        CC="clang"
+        CXX="clang++"
+        CGO_ENABLED="1"
+        GOMOD="/dev/null"
+        GOWORK=""
+        CGO_CFLAGS="-O2 -g"
+        CGO_CPPFLAGS=""
+        CGO_CXXFLAGS="-O2 -g"
+        CGO_FFLAGS="-O2 -g"
+        CGO_LDFLAGS="-O2 -g"
+        PKG_CONFIG="pkg-config"
+        GOGCCFLAGS="-fPIC -arch arm64 -pthread -fno-caret-diagnostics -Qunused-arguments -fmessage-length=0 -fdebug-prefix-map=/var/folders/44/nbbyll_10jd0z8rj_qxm43740000gn/T/go-build2331607515=/tmp/go-build -gno-record-gcc-switches -fno-common"
+      render: shell
+    validations:
+      required: true
+
+  - type: textarea
+    id: what-did-you-do
+    attributes:
+      label: "What did you do?"
+      description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
+    validations:
+      required: true
+
+  - type: textarea
+    id: actual-behavior
+    attributes:
+      label: "What did you see happen?"
+      description: Command invocations and their associated output, functions with their arguments and return results, full stacktraces for panics (upload a file if it is very long), etc. Prefer copying text output over using screenshots.
+    validations:
+      required: true
+
+  - type: textarea
+    id: expected-behavior
+    attributes:
+      label: "What did you expect to see?"
+      description: Why is the current output incorrect, and any additional context we may need to understand the issue.
+    validations:
+      required: true
diff --git a/.github/ISSUE_TEMPLATE/01-pkgsite.md b/.github/ISSUE_TEMPLATE/01-pkgsite.md
deleted file mode 100644
index 31f0fd1..0000000
--- a/.github/ISSUE_TEMPLATE/01-pkgsite.md
+++ /dev/null
@@ -1,49 +0,0 @@
----
-name: Pkg.go.dev bugs or feature requests
-about: Issues or feature requests for the documentation site
-title: "x/pkgsite: "
-labels: pkgsite
----
-
-<!--
-Please answer these questions before submitting your issue. Thanks!
--->
-
-### What is the URL of the page with the issue?
-
-
-
-### What is your user agent?
-
-<!--
-You can find your user agent here:
-https://www.google.com/search?q=what+is+my+user+agent
--->
-
-
-
-### Screenshot
-
-<!--
-Please paste a screenshot of the page.
--->
-
-
-
-### What did you do?
-
-<!--
-If possible, provide a recipe for reproducing the error.
-
-Starting with a Private/Incognito tab/window may help rule out problematic browser extensions.
--->
-
-
-
-### What did you expect to see?
-
-
-
-### What did you see instead?
-
-
diff --git a/.github/ISSUE_TEMPLATE/01-pkgsite.yml b/.github/ISSUE_TEMPLATE/01-pkgsite.yml
new file mode 100644
index 0000000..aaf39b2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/01-pkgsite.yml
@@ -0,0 +1,47 @@
+name: Pkg.go.dev bugs or feature requests
+description: Issues or feature requests for the documentation site
+title: "x/pkgsite: issue title"
+labels: ["pkgsite"]
+body:
+  - type: markdown
+    attributes:
+      value: "Please answer these questions before submitting your issue. Thanks!"
+  - type: input
+    id: url
+    attributes:
+      label: "What is the URL of the page with the issue?"
+    validations:
+      required: true
+  - type: input
+    id: user-agent
+    attributes:
+      label: "What is your user agent?"
+      description: "You can find your user agent here: https://www.google.com/search?q=what+is+my+user+agent"
+    validations:
+      required: true
+  - type: textarea
+    id: screenshot
+    attributes:
+      label: "Screenshot"
+      description: "Please paste a screenshot of the page."
+    validations:
+      required: false
+  - type: textarea
+    id: what-did-you-do
+    attributes:
+      label: "What did you do?"
+      description: "If possible, provide a recipe for reproducing the error. Starting with a Private/Incognito tab/window may help rule out problematic browser extensions."
+    validations:
+      required: true
+  - type: textarea
+    id: actual-behavior
+    attributes:
+      label: "What did you see happen?"
+    validations:
+      required: true
+  - type: textarea
+    id: expected-behavior
+    attributes:
+      label: "What did you expect to see?"
+    validations:
+      required: true
diff --git a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md
deleted file mode 100644
index 97fe317..0000000
--- a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-name: Pkg.go.dev package removal request
-about: Request a package be removed from the documentation site (pkg.go.dev)
-title: "x/pkgsite: package removal request for [type path here]"
-labels: pkgsite/package-removal
----
-
-<!--
-Please answer these questions before submitting your issue. Thanks!
--->
-
-### What is the path of the package that you would like to have removed?
-
-<!---
-We can remove packages with a shared path prefix.
-For example, a request for "github.com/author" would remove all pkg.go.dev pages with that package path prefix.
---->
-
-
-
-### Are you the owner of this package?
-
-<!---
-Only the package owners can request to have their packages removed from pkg.go.dev.
---->
-
-
-
-### What is the reason that you could not retract this package instead?
-
-<!---
-If you would like to have your module removed from pkg.go.dev, we recommend that you retract them, so that they can be removed from the go command and proxy.golang.org as well.
-
-Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version. For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8
-
-See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
---->
-
-
diff --git a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml
new file mode 100644
index 0000000..693f499
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml
@@ -0,0 +1,42 @@
+name: Pkg.go.dev package removal request
+description: Request a package be removed from the documentation site (pkg.go.dev)
+title: "x/pkgsite: package removal request for [type path here]"
+labels: ["pkgsite/package-removal"]
+body:
+  - type: markdown
+    attributes:
+      value: "Please answer these questions before submitting your issue. Thanks!"
+  - type: input
+    id: package-path
+    attributes:
+      label: "What is the path of the package that you would like to have removed?"
+      description: |
+        We can remove packages with a shared path prefix.
+        For example, a request for 'github.com/author' would remove all pkg.go.dev pages with that package path prefix.
+    validations:
+      required: true
+  - type: textarea
+    id: package-owner
+    attributes:
+      label: "Are you the owner of this package?"
+      description: |
+        Only the package owners can request to have their packages removed from pkg.go.dev.
+        If the package path doesn't include your github username, please provide some other form of proof of ownership.
+    validations:
+      required: true
+  - type: textarea
+    id: retraction-reason
+    attributes:
+      label: "What is the reason that you could not retract this package instead?"
+      description: |
+        Requesting we remove a module here only hides the generated documentation on pkg.go.dev.
+        It does not affect the behaviour of proxy.golang.org or the go command.
+        Instead we recommend using the retract directive which will be processed by all 3 of the above.
+
+        If you have deleted your repo, please recreate it and publish a retraction.
+
+        Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version.
+        For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8.
+        See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
+    validations:
+      required: true
diff --git a/.github/ISSUE_TEMPLATE/03-gopls.md b/.github/ISSUE_TEMPLATE/03-gopls.md
deleted file mode 100644
index c4934c3..0000000
--- a/.github/ISSUE_TEMPLATE/03-gopls.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-name: Gopls bugs or feature requests
-about: Issues or feature requests for the Go language server (gopls)
-title: "x/tools/gopls: "
-labels: gopls Tools
----
-
-<!--
-Please answer these questions before submitting your issue. Thanks!
--->
-
-### gopls version
-
-<!--
-Output of `gopls -v version` on the command line
--->
-
-
-
-### go env
-
-<!--
-Output of `go env` on the command line in your workspace directory
--->
-
-
-### What did you do?
-
-<!--
-If possible, provide a recipe for reproducing the error.
-A complete runnable program is good.
-A link on go.dev/play is better.
-A failing unit test is the best.
--->
-
-
-
-### What did you expect to see?
-
-
-
-### What did you see instead?
-
-
-
-### Editor and settings
-
-<!--
-Your editor and any settings you have configured (for example, your VSCode settings.json file)
--->
-
-
-
-### Logs
-
-<!--
-If possible please include gopls logs. Instructions for capturing them can be found here:
-https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs
--->
-
-
diff --git a/.github/ISSUE_TEMPLATE/03-gopls.yml b/.github/ISSUE_TEMPLATE/03-gopls.yml
new file mode 100644
index 0000000..5db1315
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/03-gopls.yml
@@ -0,0 +1,56 @@
+name: Gopls bugs or feature requests
+description: Issues or feature requests for the Go language server (gopls)
+title: "x/tools/gopls: issue title"
+labels: ["gopls", "Tools"]
+body:
+  - type: markdown
+    attributes:
+      value: "Please answer these questions before submitting your issue. Thanks!"
+  - type: input
+    id: gopls-version
+    attributes:
+      label: "gopls version"
+      description: "Output of `gopls -v version` on the command line"
+    validations:
+      required: true
+  - type: textarea
+    id: go-env
+    attributes:
+      label: "go env"
+      description: "Output of `go env` on the command line in your workspace directory"
+      render: shell
+    validations:
+      required: true
+  - type: textarea
+    id: what-did-you-do
+    attributes:
+      label: "What did you do?"
+      description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is better. A failing unit test is the best."
+    validations:
+      required: true
+  - type: textarea
+    id: actual-behavior
+    attributes:
+      label: "What did you see happen?"
+    validations:
+      required: true
+  - type: textarea
+    id: expected-behavior
+    attributes:
+      label: "What did you expect to see?"
+    validations:
+      required: true
+  - type: textarea
+    id: editor-and-settings
+    attributes:
+      label: "Editor and settings"
+      description: "Your editor and any settings you have configured (for example, your VSCode settings.json file)"
+    validations:
+      required: false
+  - type: textarea
+    id: logs
+    attributes:
+      label: "Logs"
+      description: "If possible please include gopls logs. Instructions for capturing them can be found here: https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs"
+    validations:
+      required: false
diff --git a/.github/ISSUE_TEMPLATE/04-vuln.md b/.github/ISSUE_TEMPLATE/04-vuln.md
deleted file mode 100644
index 7e129d7..0000000
--- a/.github/ISSUE_TEMPLATE/04-vuln.md
+++ /dev/null
@@ -1,51 +0,0 @@
----
-name: Go vulnerability management - bugs and feature requests
-about: Issues or feature requests about Go vulnerability management
-title: "x/vuln: "
-labels: "vulncheck or vulndb"
----
-
-<!--
-Please answer these questions before submitting your issue. Thanks!
-
-To add a new vulnerability to the Go vulnerability database
-(https://vuln.go.dev), see https://go.dev/s/vulndb-report-new.
-
-To report an issue about a report, see https://go.dev/s/vulndb-report-feedback.
--->
-
-### What version of Go are you using (`go version`)?
-
-<pre>
-$ go version
-
-</pre>
-
-### Does this issue reproduce at the latest version of golang.org/x/vuln?
-
-
-
-### What operating system and processor architecture are you using (`go env`)?
-
-<details><summary><code>go env</code> Output</summary><br><pre>
-$ go env
-
-</pre></details>
-
-### What did you do?
-
-<!--
-If possible, provide a recipe for reproducing the error.
-A complete runnable program is good.
-A link on go.dev/play is best.
--->
-
-
-
-### What did you expect to see?
-
-
-
-### What did you see instead?
-
-
diff --git a/.github/ISSUE_TEMPLATE/04-vuln.yml b/.github/ISSUE_TEMPLATE/04-vuln.yml
new file mode 100644
index 0000000..dd40af9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/04-vuln.yml
@@ -0,0 +1,52 @@
+name: Go vulnerability management - bugs and feature requests
+description: Issues or feature requests about Go vulnerability management
+title: "x/vuln: issue title"
+labels: ["vulncheck or vulndb"]
+body:
+  - type: markdown
+    attributes:
+      value: "Please answer these questions before submitting your issue. Thanks! To add a new vulnerability to the Go vulnerability database (https://vuln.go.dev), see https://go.dev/s/vulndb-report-new. To report an issue about a report, see https://go.dev/s/vulndb-report-feedback."
+  - type: textarea
+    id: govulncheck-version
+    attributes:
+      label: govulncheck version
+      description: What version of govulncheck are you using (`govulncheck -version`)?
+      placeholder: |
+        Go: devel go1.22-0262ea1ff9 Thu Oct 26 18:46:50 2023 +0000
+        Scanner: govulncheck@v1.0.2-0.20231108200754-fcf7dff7b242
+        DB: https://vuln.go.dev
+        DB updated: 2023-11-21 15:39:17 +0000 UTC
+    validations:
+      required: true
+  - type: textarea
+    id: reproduce-latest-version
+    attributes:
+      label: "Does this issue reproduce at the latest version of golang.org/x/vuln?"
+    validations:
+      required: true
+  - type: textarea
+    id: go-env
+    attributes:
+      label: "Output of `go env` in your module/workspace:"
+      render: shell
+    validations:
+      required: true
+  - type: textarea
+    id: what-did-you-do
+    attributes:
+      label: "What did you do?"
+      description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
+    validations:
+      required: true
+  - type: textarea
+    id: actual-behavior
+    attributes:
+      label: "What did you see happen?"
+    validations:
+      required: true
+  - type: textarea
+    id: expected-behavior
+    attributes:
+      label: "What did you expect to see?"
+    validations:
+      required: true
diff --git a/.github/ISSUE_TEMPLATE/10-proposal.md b/.github/ISSUE_TEMPLATE/10-proposal.md
deleted file mode 100644
index ab30ddf..0000000
--- a/.github/ISSUE_TEMPLATE/10-proposal.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-name: Proposals
-about: New external API or other notable changes
-title: "proposal: affected/package: "
-labels: Proposal
----
-
-<!--
-Our proposal process is documented here:
-https://go.dev/s/proposal-process
--->
-
-
diff --git a/.github/ISSUE_TEMPLATE/10-proposal.yml b/.github/ISSUE_TEMPLATE/10-proposal.yml
new file mode 100644
index 0000000..d2a256c
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/10-proposal.yml
@@ -0,0 +1,15 @@
+name: Proposals
+description: New external API or other notable changes
+title: "proposal: import/path: proposal title"
+labels: ["Proposal"]
+body:
+  - type: markdown
+    attributes:
+      value: "Our proposal process is documented here: https://go.dev/s/proposal-process"
+  - type: textarea
+    id: proposal-details
+    attributes:
+      label: "Proposal Details"
+      description: "Please provide the details of your proposal here."
+    validations:
+      required: true
diff --git a/.github/ISSUE_TEMPLATE/11-language-change.md b/.github/ISSUE_TEMPLATE/11-language-change.md
deleted file mode 100644
index 2032301..0000000
--- a/.github/ISSUE_TEMPLATE/11-language-change.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-name: Language Change Proposals
-about: Changes to the language
-title: "proposal: Go 2: "
-labels: Proposal Go2 LanguageChange
----
-
-<!--
-Our process for evaluating language changes can be found here:
-https://go.googlesource.com/proposal/+/refs/heads/master#language-changes
--->
-
-### Author background
-
-- **Would you consider yourself a novice, intermediate, or experienced Go programmer?**
-- **What other languages do you have experience with?**
-
-### Related proposals
-
-- **Has this idea, or one like it, been proposed before?**
-  - **If so, how does this proposal differ?**
-- **Does this affect error handling?**
-  - **If so, how does this differ from previous error handling proposals?**
-- **Is this about generics?**
-  - **If so, how does this relate to the accepted design and other generics proposals?**
-
-### Proposal
-
-- **What is the proposed change?**
-- **Who does this proposal help, and why?**
-- **Please describe as precisely as possible the change to the language.**
-- **What would change in the language spec?**
-- **Please also describe the change informally, as in a class teaching Go.**
-- **Is this change backward compatible?**
-  - Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
-    Show example code before and after the change.
-  - **Before**
-  - **After**
-- **Orthogonality: how does this change interact or overlap with existing features?**
-- **Is the goal of this change a performance improvement?**
-  - **If so, what quantifiable improvement should we expect?**
-  - **How would we measure it?**
-
-### Costs
-
-- **Would this change make Go easier or harder to learn, and why?**
-- **What is the cost of this proposal? (Every language change has a cost).**
-- **How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected?**
-- **What is the compile time cost?**
-- **What is the run time cost?**
-- **Can you describe a possible implementation?**
-- **Do you have a prototype? (This is not required.)**
diff --git a/.github/ISSUE_TEMPLATE/11-language-change.yml b/.github/ISSUE_TEMPLATE/11-language-change.yml
new file mode 100644
index 0000000..37ba2d7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/11-language-change.yml
@@ -0,0 +1,165 @@
+name: Language Change Proposals
+description: Changes to the language
+labels: ["Proposal", "v2", "LanguageChange"]
+title: "proposal: Go 2: proposal title"
+
+
+body:
+  - type: markdown
+    attributes:
+      value: |
+       ## Our process for evaluating language changes can be found [here](https://go.googlesource.com/proposal/+/refs/heads/master#language-changes)
+
+  - type: dropdown
+    id: author-go-experience
+    attributes:
+      label: "Go Programming Experience"
+      description: "Would you consider yourself a novice, intermediate, or experienced Go programmer?"
+      options:
+        - "Novice"
+        - "Intermediate"
+        - "Experienced"
+      default: 1
+
+  - type: input
+    id: author-other-languages-experience
+    attributes:
+      label: "Other Languages Experience"
+      description: "What other languages do you have experience with?"
+      placeholder: "Go, Python, JS, Rust"
+    validations:
+      required: false
+
+  - type: checkboxes
+    id: related-idea
+    attributes:
+      label: "Related Idea"
+      options:
+        - label: "Has this idea, or one like it, been proposed before?"
+        - label: "Does this affect error handling?"
+        - label: "Is this about generics?"
+        - label: "Is this change backward compatible? Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit"
+
+  - type: textarea
+    id: related-proposals
+    attributes:
+      label: Has this idea, or one like it, been proposed before?
+      description: If so, how does this proposal differ?
+      placeholder: |
+       Yes or No
+
+       If yes, 
+        1. Mention the related proposals 
+        2. then describe how this proposal differs       
+    validations:
+      required: true
+
+  - type: textarea
+    id: error-handling-proposal
+    attributes:
+      label: Does this affect error handling?
+      description: If so, how does this differ from previous error handling proposals?
+      placeholder: |
+       Yes or No
+
+       If yes, 
+        1.how does this differ from previous error handling proposals?
+
+    validations:
+      required: true
+
+  - type: textarea
+    id: generics-proposal
+    attributes:
+      label: Is this about generics?
+      description: If so, how does this relate to the accepted design and other generics proposals?
+      placeholder: |
+       Yes or No
+
+       If yes, 
+        1. how does this relate to the accepted design and other generics proposals?
+
+    validations:
+      required: true
+
+  - type: textarea
+    id: proposal
+    attributes:
+      label: "Proposal"
+      description: "What is the proposed change? Who does this proposal help, and why? Please describe as precisely as possible the change to the language."
+    validations:
+      required: true
+
+  - type: textarea
+    id: language-spec-changes
+    attributes:
+      label: "Language Spec Changes"
+      description: "What would change in the language spec?"
+    validations:
+      required: false
+
+  - type: textarea
+    id: informal-change
+    attributes:
+      label: "Informal Change"
+      description: "Please also describe the change informally, as in a class teaching Go."
+    validations:
+      required: false
+
+  - type: textarea
+    id: go-backwards-compatiblity
+    attributes:
+      label: Is this change backward compatible?
+      description: Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
+      placeholder: |
+       Yes or No
+
+       If yes, 
+        1. Show example code before and after the change.
+
+    validations:
+      required: true
+
+  - type: textarea
+    id: orthogonality
+    attributes:
+      label: "Orthogonality: How does this change interact or overlap with existing features?"
+      description: "Is the goal of this change a performance improvement? If so, what quantifiable improvement should we expect? How would we measure it?"
+    validations:
+      required: false
+
+  - type: textarea
+    id: learning-curve
+    attributes:
+      label: "Would this change make Go easier or harder to learn, and why?"
+
+  - type: textarea
+    id: cost-description
+    attributes:
+      label: "Cost Description"
+      description: "What is the cost of this proposal? (Every language change has a cost)"
+
+  - type: input
+    id: go-toolchain
+    attributes:
+      label: Changes to Go ToolChain
+      description: "How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected? "
+    validations:
+      required: false
+
+  - type: input
+    id: perf-costs
+    attributes:
+      label: Performance Costs
+      description: "What is the compile time cost? What is the run time cost? "
+    validations:
+      required: false
+
+  - type: textarea
+    id: prototype
+    attributes:
+      label: "Prototype"
+      description: "Can you describe a possible implementation?"
+    validations:
+      required: false
+
diff --git a/.github/ISSUE_TEMPLATE/12-telemetry.yml b/.github/ISSUE_TEMPLATE/12-telemetry.yml
new file mode 100644
index 0000000..4215abf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/12-telemetry.yml
@@ -0,0 +1,68 @@
+name: Go Telemetry Proposals
+description: New telemetry counter or update on an existing one
+title: "x/telemetry/config: proposal title"
+labels: ["Telemetry-Proposal"]
+projects: ["golang/29"]
+body:
+- type: textarea
+  attributes:
+    label: Counter names
+    description: Names of counters to add or update.
+  validations:
+    required: true
+- type: textarea
+  attributes:
+    label: Description
+    description: What do these counters measure?
+  validations:
+    required: true
+- type: textarea
+  attributes:
+    label: Rationale
+    description: |
+      Why is the counter important?
+      For example, what new insights will it provide, and how will that information be used?
+      If this is about updating existing counters, why is the change necessary?
+  validations:
+    required: true
+- type: textarea
+  attributes:
+    label: Do the counters carry sensitive user information?
+  validations:
+    required: true
+- type: textarea
+  attributes:
+    label: How?
+    description: |
+      How do we plan to compute the info?
+      If available, include the code location or cl that uses the golang.org/x/telemetry/counter API.
+  validations:
+    required: true
+- type: textarea
+  attributes:
+    label: Proposed Graph Config
+    description: |
+      Approved telemetry counters are maintained as [Go Telemetry Graph Config](https://golang.org/x/telemetry/internal/graphconfig) records.
+      Please draft the record entry for your proposal here.
+      If multiple records need to be included, separate them with `---` lines.
+      You can check the list of the approved counters and their current configuration in [config.txt](https://go.googlesource.com/telemetry/+/master/internal/configgen/config.txt).
+    render: Text
+    value: |
+      counter: gopls/bug
+      title: Gopls bug reports
+      description: Stacks of bugs encountered on the gopls server.
+      type: partition, histogram, stack # choose only one.
+      program: golang.org/x/tools/gopls
+      counter: gopls/bug
+      depth: 16  # only if type is stack.
+      version: v0.13.0  # the first binary version containing this counter.
+  validations:
+      required: true
+- type: dropdown
+  attributes:
+    label: New or Update
+    description: Is this a new counter? See [config.txt](https://go.googlesource.com/telemetry/+/master/internal/configgen/config.txt) for the list of approved counters.
+    options:
+      - New
+      - Update
+    default: 0
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index c07f1e4..d6257da 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -1,4 +1,4 @@
-blank_issues_enabled: false
+blank_issues_enabled: true
 contact_links:
   - name: Questions
     about: Please use one of the forums for questions or general discussions
diff --git a/METADATA b/METADATA
index f2874e4..35b5cbb 100644
--- a/METADATA
+++ b/METADATA
@@ -10,7 +10,7 @@
     type: GIT
     value: "https://github.com/golang/go"
   }
-  version: "go1.21.4"
-  last_upgrade_date { year: 2023 month: 11 day: 8 }
+  version: "go1.22.1"
+  last_upgrade_date { year: 2024 month: 3 day: 20 }
   license_type: NOTICE
 }
diff --git a/SECURITY.md b/SECURITY.md
index ab608f3..e1718f0 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -10,4 +10,4 @@
 
 ## Reporting a Vulnerability
 
-See https://go.dev/security for how to report a vulnerability.
+See https://go.dev/security/policy for how to report a vulnerability.
diff --git a/VERSION b/VERSION
index 74f07e5..37f601f 100644
--- a/VERSION
+++ b/VERSION
@@ -1,2 +1,2 @@
-go1.21.4
-time 2023-11-01T20:46:39Z
+go1.22.1
+time 2024-02-29T18:18:48Z
diff --git a/api/go1.21.txt b/api/go1.21.txt
index 50b6a5c..1dc5fda 100644
--- a/api/go1.21.txt
+++ b/api/go1.21.txt
@@ -167,7 +167,12 @@
 pkg flag, func BoolFunc(string, string, func(string) error) #53747
 pkg flag, method (*FlagSet) BoolFunc(string, string, func(string) error) #53747
 pkg go/ast, func IsGenerated(*File) bool #28089
+pkg go/ast, func NewPackage //deprecated #52463
 pkg go/ast, type File struct, GoVersion string #59033
+pkg go/ast, type Importer //deprecated #52463
+pkg go/ast, type Object //deprecated #52463
+pkg go/ast, type Package //deprecated #52463
+pkg go/ast, type Scope //deprecated #52463
 pkg go/build/constraint, func GoVersion(Expr) string #59033
 pkg go/build, type Directive struct #56986
 pkg go/build, type Directive struct, Pos token.Position #56986
diff --git a/api/go1.22.txt b/api/go1.22.txt
new file mode 100644
index 0000000..55f2185
--- /dev/null
+++ b/api/go1.22.txt
@@ -0,0 +1,135 @@
+pkg archive/tar, method (*Writer) AddFS(fs.FS) error #58000
+pkg archive/zip, method (*Writer) AddFS(fs.FS) error #54898
+pkg cmp, func Or[$0 comparable](...$0) $0 #60204
+pkg crypto/x509, func OIDFromInts([]uint64) (OID, error) #60665
+pkg crypto/x509, method (*CertPool) AddCertWithConstraint(*Certificate, func([]*Certificate) error) #57178
+pkg crypto/x509, method (OID) Equal(OID) bool #60665
+pkg crypto/x509, method (OID) EqualASN1OID(asn1.ObjectIdentifier) bool #60665
+pkg crypto/x509, method (OID) String() string #60665
+pkg crypto/x509, type Certificate struct, Policies []OID #60665
+pkg crypto/x509, type OID struct #60665
+pkg database/sql, method (*Null[$0]) Scan(interface{}) error #60370
+pkg database/sql, method (Null[$0]) Value() (driver.Value, error) #60370
+pkg database/sql, type Null[$0 interface{}] struct #60370
+pkg database/sql, type Null[$0 interface{}] struct, V $0 #60370
+pkg database/sql, type Null[$0 interface{}] struct, Valid bool #60370
+pkg debug/elf, const R_LARCH_64_PCREL = 109 #63725
+pkg debug/elf, const R_LARCH_64_PCREL R_LARCH #63725
+pkg debug/elf, const R_LARCH_ADD6 = 105 #63725
+pkg debug/elf, const R_LARCH_ADD6 R_LARCH #63725
+pkg debug/elf, const R_LARCH_ADD_ULEB128 = 107 #63725
+pkg debug/elf, const R_LARCH_ADD_ULEB128 R_LARCH #63725
+pkg debug/elf, const R_LARCH_ALIGN = 102 #63725
+pkg debug/elf, const R_LARCH_ALIGN R_LARCH #63725
+pkg debug/elf, const R_LARCH_CFA = 104 #63725
+pkg debug/elf, const R_LARCH_CFA R_LARCH #63725
+pkg debug/elf, const R_LARCH_DELETE = 101 #63725
+pkg debug/elf, const R_LARCH_DELETE R_LARCH #63725
+pkg debug/elf, const R_LARCH_PCREL20_S2 = 103 #63725
+pkg debug/elf, const R_LARCH_PCREL20_S2 R_LARCH #63725
+pkg debug/elf, const R_LARCH_SUB6 = 106 #63725
+pkg debug/elf, const R_LARCH_SUB6 R_LARCH #63725
+pkg debug/elf, const R_LARCH_SUB_ULEB128 = 108 #63725
+pkg debug/elf, const R_LARCH_SUB_ULEB128 R_LARCH #63725
+pkg debug/elf, const R_MIPS_PC32 = 248 #61974
+pkg debug/elf, const R_MIPS_PC32 R_MIPS #61974
+pkg encoding/base32, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
+pkg encoding/base32, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693
+pkg encoding/base64, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
+pkg encoding/base64, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693
+pkg encoding/hex, func AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
+pkg encoding/hex, func AppendEncode([]uint8, []uint8) []uint8 #53693
+pkg go/ast, func NewPackage //deprecated #52463
+pkg go/ast, func Unparen(Expr) Expr #60061
+pkg go/ast, type Importer //deprecated #52463
+pkg go/ast, type Object //deprecated #52463
+pkg go/ast, type Package //deprecated #52463
+pkg go/ast, type Scope //deprecated #52463
+pkg go/types, func NewAlias(*TypeName, Type) *Alias #63223
+pkg go/types, func Unalias(Type) Type #63223
+pkg go/types, method (*Alias) Obj() *TypeName #63223
+pkg go/types, method (*Alias) String() string #63223
+pkg go/types, method (*Alias) Underlying() Type #63223
+pkg go/types, method (*Info) PkgNameOf(*ast.ImportSpec) *PkgName #62037
+pkg go/types, method (Checker) PkgNameOf(*ast.ImportSpec) *PkgName #62037
+pkg go/types, type Alias struct #63223
+pkg go/types, type Info struct, FileVersions map[*ast.File]string #62605
+pkg go/version, func Compare(string, string) int #62039
+pkg go/version, func IsValid(string) bool #62039
+pkg go/version, func Lang(string) string #62039
+pkg html/template, const ErrJSTemplate //deprecated #61619
+pkg io, method (*SectionReader) Outer() (ReaderAt, int64, int64) #61870
+pkg log/slog, func SetLogLoggerLevel(Level) Level #62418
+pkg math/big, method (*Rat) FloatPrec() (int, bool) #50489
+pkg math/rand/v2, func ExpFloat64() float64 #61716
+pkg math/rand/v2, func Float32() float32 #61716
+pkg math/rand/v2, func Float64() float64 #61716
+pkg math/rand/v2, func Int() int #61716
+pkg math/rand/v2, func Int32() int32 #61716
+pkg math/rand/v2, func Int32N(int32) int32 #61716
+pkg math/rand/v2, func Int64() int64 #61716
+pkg math/rand/v2, func Int64N(int64) int64 #61716
+pkg math/rand/v2, func IntN(int) int #61716
+pkg math/rand/v2, func N[$0 intType]($0) $0 #61716
+pkg math/rand/v2, func New(Source) *Rand #61716
+pkg math/rand/v2, func NewChaCha8([32]uint8) *ChaCha8 #61716
+pkg math/rand/v2, func NewPCG(uint64, uint64) *PCG #61716
+pkg math/rand/v2, func NewZipf(*Rand, float64, float64, uint64) *Zipf #61716
+pkg math/rand/v2, func NormFloat64() float64 #61716
+pkg math/rand/v2, func Perm(int) []int #61716
+pkg math/rand/v2, func Shuffle(int, func(int, int)) #61716
+pkg math/rand/v2, func Uint32() uint32 #61716
+pkg math/rand/v2, func Uint32N(uint32) uint32 #61716
+pkg math/rand/v2, func Uint64() uint64 #61716
+pkg math/rand/v2, func Uint64N(uint64) uint64 #61716
+pkg math/rand/v2, func UintN(uint) uint #61716
+pkg math/rand/v2, method (*ChaCha8) MarshalBinary() ([]uint8, error) #61716
+pkg math/rand/v2, method (*ChaCha8) Seed([32]uint8) #61716
+pkg math/rand/v2, method (*ChaCha8) Uint64() uint64 #61716
+pkg math/rand/v2, method (*ChaCha8) UnmarshalBinary([]uint8) error #61716
+pkg math/rand/v2, method (*PCG) MarshalBinary() ([]uint8, error) #61716
+pkg math/rand/v2, method (*PCG) Seed(uint64, uint64) #61716
+pkg math/rand/v2, method (*PCG) Uint64() uint64 #61716
+pkg math/rand/v2, method (*PCG) UnmarshalBinary([]uint8) error #61716
+pkg math/rand/v2, method (*Rand) ExpFloat64() float64 #61716
+pkg math/rand/v2, method (*Rand) Float32() float32 #61716
+pkg math/rand/v2, method (*Rand) Float64() float64 #61716
+pkg math/rand/v2, method (*Rand) Int() int #61716
+pkg math/rand/v2, method (*Rand) Int32() int32 #61716
+pkg math/rand/v2, method (*Rand) Int32N(int32) int32 #61716
+pkg math/rand/v2, method (*Rand) Int64() int64 #61716
+pkg math/rand/v2, method (*Rand) Int64N(int64) int64 #61716
+pkg math/rand/v2, method (*Rand) IntN(int) int #61716
+pkg math/rand/v2, method (*Rand) NormFloat64() float64 #61716
+pkg math/rand/v2, method (*Rand) Perm(int) []int #61716
+pkg math/rand/v2, method (*Rand) Shuffle(int, func(int, int)) #61716
+pkg math/rand/v2, method (*Rand) Uint32() uint32 #61716
+pkg math/rand/v2, method (*Rand) Uint32N(uint32) uint32 #61716
+pkg math/rand/v2, method (*Rand) Uint64() uint64 #61716
+pkg math/rand/v2, method (*Rand) Uint64N(uint64) uint64 #61716
+pkg math/rand/v2, method (*Rand) UintN(uint) uint #61716
+pkg math/rand/v2, method (*Zipf) Uint64() uint64 #61716
+pkg math/rand/v2, type ChaCha8 struct #61716
+pkg math/rand/v2, type PCG struct #61716
+pkg math/rand/v2, type Rand struct #61716
+pkg math/rand/v2, type Source interface { Uint64 } #61716
+pkg math/rand/v2, type Source interface, Uint64() uint64 #61716
+pkg math/rand/v2, type Zipf struct #61716
+pkg net, method (*TCPConn) WriteTo(io.Writer) (int64, error) #58808
+pkg net/http, func FileServerFS(fs.FS) Handler #51971
+pkg net/http, func NewFileTransportFS(fs.FS) RoundTripper #51971
+pkg net/http, func ServeFileFS(ResponseWriter, *Request, fs.FS, string) #51971
+pkg net/http, method (*Request) PathValue(string) string #61410
+pkg net/http, method (*Request) SetPathValue(string, string) #61410
+pkg net/netip, method (AddrPort) Compare(AddrPort) int #61642
+pkg os, method (*File) WriteTo(io.Writer) (int64, error) #58808
+pkg reflect, func PtrTo //deprecated #59599
+pkg reflect, func TypeFor[$0 interface{}]() Type #60088
+pkg slices, func Concat[$0 interface{ ~[]$1 }, $1 interface{}](...$0) $0 #56353
+pkg syscall (linux-386), type SysProcAttr struct, PidFD *int #51246
+pkg syscall (linux-386-cgo), type SysProcAttr struct, PidFD *int #51246
+pkg syscall (linux-amd64), type SysProcAttr struct, PidFD *int #51246
+pkg syscall (linux-amd64-cgo), type SysProcAttr struct, PidFD *int #51246
+pkg syscall (linux-arm), type SysProcAttr struct, PidFD *int #51246
+pkg syscall (linux-arm-cgo), type SysProcAttr struct, PidFD *int #51246
+pkg testing/slogtest, func Run(*testing.T, func(*testing.T) slog.Handler, func(*testing.T) map[string]interface{}) #61758
diff --git a/codereview.cfg b/codereview.cfg
index f5b11bd..718a5fe 100644
--- a/codereview.cfg
+++ b/codereview.cfg
@@ -1,2 +1,2 @@
-branch: release-branch.go1.21
+branch: release-branch.go1.22
 parent-branch: master
diff --git a/doc/asm.html b/doc/asm.html
index f7787a4..dd395ec 100644
--- a/doc/asm.html
+++ b/doc/asm.html
@@ -464,6 +464,23 @@
 </li>
 </ul>
 
+<h3 id="special-instructions">Special instructions</h3>
+
+<p>
+The <code>PCALIGN</code> pseudo-instruction is used to indicate that the next instruction should be aligned
+to a specified boundary by padding with no-op instructions.
+</p>
+
+<p>
+It is currently supported on arm64, amd64, ppc64, loong64 and riscv64.
+
+For example, the start of the <code>MOVD</code> instruction below is aligned to 32 bytes:
+<pre>
+PCALIGN $32
+MOVD $2, R0
+</pre>
+</p>
+
 <h3 id="data-offsets">Interacting with Go types and constants</h3>
 
 <p>
diff --git a/doc/go1.17_spec.html b/doc/go1.17_spec.html
index 0b374e7..c87d9af 100644
--- a/doc/go1.17_spec.html
+++ b/doc/go1.17_spec.html
@@ -7,8 +7,11 @@
 <h2 id="Introduction">Introduction</h2>
 
 <p>
-This is a reference manual for the Go programming language. For
-more information and other documents, see <a href="/">golang.org</a>.
+This is the reference manual for the Go programming language as it was for
+language version 1.17, in October 2021, before the introduction of generics.
+It is provided for historical interest.
+The current reference manual can be found <a href="/doc/go_spec.html">here</a>.
+For more information and other documents, see <a href="/">go.dev</a>.
 </p>
 
 <p>
@@ -914,7 +917,7 @@
 or assignment. For instance, <code>int32</code> and <code>int</code>
 are not the same type even though they may have the same size on a
 particular architecture.
-
+</p>
 
 <h3 id="String_types">String types</h3>
 
@@ -1451,6 +1454,7 @@
 stored in them, with the exception of <code>nil</code> maps.
 A <code>nil</code> map is equivalent to an empty map except that no elements
 may be added.
+</p>
 
 <h3 id="Channel_types">Channel types</h3>
 
@@ -3641,6 +3645,8 @@
 statements, not expressions, they fall
 outside the operator hierarchy.
 As a consequence, statement <code>*p++</code> is the same as <code>(*p)++</code>.
+</p>
+
 <p>
 There are five precedence levels for binary operators.
 Multiplication operators bind strongest, followed by addition
diff --git a/doc/go_mem.html b/doc/go_mem.html
index 661e1e7..026c117 100644
--- a/doc/go_mem.html
+++ b/doc/go_mem.html
@@ -159,6 +159,7 @@
 For an ordinary (non-synchronizing) data read <i>r</i> on a memory location <i>x</i>,
 <i>W</i>(<i>r</i>) must be a write <i>w</i> that is <i>visible</i> to <i>r</i>,
 where visible means that both of the following hold:
+</p>
 
 <ol>
 <li><i>w</i> happens before <i>r</i>.
@@ -221,7 +222,7 @@
 </p>
 
 <p>
-First, any implementation can, upon detecting a data race,
+Any implementation can, upon detecting a data race,
 report the race and halt execution of the program.
 Implementations using ThreadSanitizer
 (accessed with “<code>go</code> <code>build</code> <code>-race</code>”)
@@ -229,7 +230,18 @@
 </p>
 
 <p>
-Otherwise, a read <i>r</i> of a memory location <i>x</i>
+A read of an array, struct, or complex number
+may by implemented as a read of each individual sub-value
+(array element, struct field, or real/imaginary component),
+in any order.
+Similarly, a write of an array, struct, or complex number
+may be implemented as a write of each individual sub-value,
+in any order.
+</p>
+
+<p>
+A read <i>r</i> of a memory location <i>x</i>
+holding a value
 that is not larger than a machine word must observe
 some write <i>w</i> such that <i>r</i> does not happen before <i>w</i>
 and there is no write <i>w'</i> such that <i>w</i> happens before <i>w'</i>
diff --git a/doc/go_spec.html b/doc/go_spec.html
index c39a442..7b9dd38 100644
--- a/doc/go_spec.html
+++ b/doc/go_spec.html
@@ -1,6 +1,6 @@
 <!--{
 	"Title": "The Go Programming Language Specification",
-	"Subtitle": "Version of Aug 2, 2023",
+	"Subtitle": "Language version go1.22 (Feb 6, 2024)",
 	"Path": "/ref/spec"
 }-->
 
@@ -10,7 +10,7 @@
 This is the reference manual for the Go programming language.
 The pre-Go1.18 version, without generics, can be found
 <a href="/doc/go1.17_spec.html">here</a>.
-For more information and other documents, see <a href="/">golang.org</a>.
+For more information and other documents, see <a href="/">go.dev</a>.
 </p>
 
 <p>
@@ -70,6 +70,14 @@
 language.
 </p>
 
+<p>
+A link of the form [<a href="#Language_versions">Go 1.xx</a>] indicates that a described
+language feature (or some aspect of it) was changed or added with language version 1.xx and
+thus requires at minimum that language version to build.
+For details, see the <a href="#Language_versions">linked section</a>
+in the <a href="#Appendix">appendix</a>.
+</p>
+
 <h2 id="Source_code_representation">Source code representation</h2>
 
 <p>
@@ -263,7 +271,8 @@
 
 <p>
 The following character sequences represent <a href="#Operators">operators</a>
-(including <a href="#Assignment_statements">assignment operators</a>) and punctuation:
+(including <a href="#Assignment_statements">assignment operators</a>) and punctuation
+[<a href="#Go_1.18">Go 1.18</a>]:
 </p>
 <pre class="grammar">
 +    &amp;     +=    &amp;=     &amp;&amp;    ==    !=    (    )
@@ -281,7 +290,8 @@
 <a href="#Constants">integer constant</a>.
 An optional prefix sets a non-decimal base: <code>0b</code> or <code>0B</code>
 for binary, <code>0</code>, <code>0o</code>, or <code>0O</code> for octal,
-and <code>0x</code> or <code>0X</code> for hexadecimal.
+and <code>0x</code> or <code>0X</code> for hexadecimal
+[<a href="#Go_1.13">Go 1.13</a>].
 A single <code>0</code> is considered a decimal zero.
 In hexadecimal literals, letters <code>a</code> through <code>f</code>
 and <code>A</code> through <code>F</code> represent values 10 through 15.
@@ -347,7 +357,8 @@
 and an exponent part (<code>p</code> or <code>P</code> followed by an optional sign and decimal digits).
 One of the integer part or the fractional part may be elided; the radix point may be elided as well,
 but the exponent part is required. (This syntax matches the one given in IEEE 754-2008 §5.12.3.)
-An exponent value exp scales the mantissa (integer and fractional part) by 2<sup>exp</sup>.
+An exponent value exp scales the mantissa (integer and fractional part) by 2<sup>exp</sup>
+[<a href="#Go_1.13">Go 1.13</a>].
 </p>
 
 <p>
@@ -411,7 +422,8 @@
 <a href="#Floating-point_literals">floating-point</a> literal
 followed by the lowercase letter <code>i</code>.
 The value of an imaginary literal is the value of the respective
-integer or floating-point literal multiplied by the imaginary unit <i>i</i>.
+integer or floating-point literal multiplied by the imaginary unit <i>i</i>
+[<a href="#Go_1.13">Go 1.13</a>]
 </p>
 
 <pre class="ebnf">
@@ -884,7 +896,7 @@
 or assignment. For instance, <code>int32</code> and <code>int</code>
 are not the same type even though they may have the same size on a
 particular architecture.
-
+</p>
 
 <h3 id="String_types">String types</h3>
 
@@ -1340,6 +1352,7 @@
 
 <p>
 For convenience, the predeclared type <code>any</code> is an alias for the empty interface.
+[<a href="#Go_1.18">Go 1.18</a>]
 </p>
 
 <p>
@@ -1375,13 +1388,15 @@
 In a slightly more general form
 an interface <code>T</code> may use a (possibly qualified) interface type
 name <code>E</code> as an interface element. This is called
-<i>embedding</i> interface <code>E</code> in <code>T</code>.
+<i>embedding</i> interface <code>E</code> in <code>T</code>
+[<a href="#Go_1.14">Go 1.14</a>].
 The type set of <code>T</code> is the <i>intersection</i> of the type sets
 defined by <code>T</code>'s explicitly declared methods and the type sets
 of <code>T</code>’s embedded interfaces.
 In other words, the type set of <code>T</code> is the set of all types that implement all the
 explicitly declared methods of <code>T</code> and also all the methods of
-<code>E</code>.
+<code>E</code>
+[<a href="#Go_1.18">Go 1.18</a>].
 </p>
 
 <pre>
@@ -1420,7 +1435,8 @@
 <p>
 In their most general form, an interface element may also be an arbitrary type term
 <code>T</code>, or a term of the form <code>~T</code> specifying the underlying type <code>T</code>,
-or a union of terms <code>t<sub>1</sub>|t<sub>2</sub>|…|t<sub>n</sub></code>.
+or a union of terms <code>t<sub>1</sub>|t<sub>2</sub>|…|t<sub>n</sub></code>
+[<a href="#Go_1.18">Go 1.18</a>].
 Together with method specifications, these elements enable the precise
 definition of an interface's type set as follows:
 </p>
@@ -2303,7 +2319,9 @@
 
 <p>
 The following identifiers are implicitly declared in the
-<a href="#Blocks">universe block</a>:
+<a href="#Blocks">universe block</a>
+[<a href="#Go_1.18">Go 1.18</a>]
+[<a href="#Go_1.21">Go 1.21</a>]:
 </p>
 <pre class="grammar">
 Types:
@@ -2487,7 +2505,8 @@
 <h4 id="Alias_declarations">Alias declarations</h4>
 
 <p>
-An alias declaration binds an identifier to the given type.
+An alias declaration binds an identifier to the given type
+[<a href="#Go_1.9">Go 1.9</a>].
 </p>
 
 <pre class="ebnf">
@@ -2636,7 +2655,8 @@
 A type parameter list declares the <i>type parameters</i> of a generic function or type declaration.
 The type parameter list looks like an ordinary <a href="#Function_types">function parameter list</a>
 except that the type parameter names must all be present and the list is enclosed
-in square brackets rather than parentheses.
+in square brackets rather than parentheses
+[<a href="#Go_1.18">Go 1.18</a>].
 </p>
 
 <pre class="ebnf">
@@ -2719,7 +2739,8 @@
 <p>
 A <i>type constraint</i> is an <a href="#Interface_types">interface</a> that defines the
 set of permissible type arguments for the respective type parameter and controls the
-operations supported by values of that type parameter.
+operations supported by values of that type parameter
+[<a href="#Go_1.18">Go 1.18</a>].
 </p>
 
 <pre class="ebnf">
@@ -2749,7 +2770,8 @@
 The <a href="#Predeclared_identifiers">predeclared</a>
 <a href="#Interface_types">interface type</a> <code>comparable</code>
 denotes the set of all non-interface types that are
-<a href="#Comparison_operators">strictly comparable</a>.
+<a href="#Comparison_operators">strictly comparable</a>
+[<a href="#Go_1.18">Go 1.18</a>].
 </p>
 
 <p>
@@ -2782,7 +2804,8 @@
 if <code>T</code> <a href="#Implementing_an_interface">implements</a> <code>C</code>.
 As an exception, a <a href="#Comparison_operators">strictly comparable</a>
 type constraint may also be satisfied by a <a href="#Comparison_operators">comparable</a>
-(not necessarily strictly comparable) type argument.
+(not necessarily strictly comparable) type argument
+[<a href="#Go_1.20">Go 1.20</a>].
 More precisely:
 </p>
 
@@ -3910,7 +3933,7 @@
 	    that <code>P</code> is instantiated with, and the type of <code>a[x]</code> is
 	    the type of the (identical) element types.</li>
 	<li><code>a[x]</code> may not be assigned to if <code>P</code>'s type set
-	    includes string types.
+	    includes string types.</li>
 </ul>
 
 <p>
@@ -4306,7 +4329,7 @@
 
 <p>
 A generic function or type is <i>instantiated</i> by substituting <i>type arguments</i>
-for the type parameters.
+for the type parameters [<a href="#Go_1.18">Go 1.18</a>].
 Instantiation proceeds in two steps:
 </p>
 
@@ -4491,7 +4514,7 @@
 and for which no explicit type arguments is provided.
 These type parameters are called <i>bound</i> type parameters.
 For instance, in the <code>dedup</code> example above, the type parameters
-<code>P</code> and <code>E</code> are bound to <code>dedup</code>.
+<code>S</code> and <code>E</code> are bound to <code>dedup</code>.
 An argument to a generic function call may be a generic function itself.
 The type parameters of that function are included in the set of bound
 type parameters.
@@ -4639,7 +4662,7 @@
 an entry for each type parameter.
 </p>
 
-</pre>
+<p>
 For example, given the type equation with the bound type parameter
 <code>P</code>
 </p>
@@ -4759,6 +4782,7 @@
 
 <p>
 The right operand in a shift expression must have <a href="#Numeric_types">integer type</a>
+[<a href="#Go_1.13">Go 1.13</a>]
 or be an untyped constant <a href="#Representability">representable</a> by a
 value of type <code>uint</code>.
 If the left operand of a non-constant shift expression is an untyped constant,
@@ -4803,6 +4827,7 @@
 statements, not expressions, they fall
 outside the operator hierarchy.
 As a consequence, statement <code>*p++</code> is the same as <code>(*p)++</code>.
+</p>
 <p>
 There are five precedence levels for binary operators.
 Multiplication operators bind strongest, followed by addition
@@ -4825,12 +4850,13 @@
 </p>
 
 <pre>
-+x
-23 + 3*x[i]
-x &lt;= f()
-^a &gt;&gt; b
-f() || g()
-x == y+1 &amp;&amp; &lt;-chanInt &gt; 0
++x                         // x
+42 + a - b                 // (42 + a) - b
+23 + 3*x[i]                // 23 + (3 * x[i])
+x &lt;= f()                   // x &lt;= f()
+^a &gt;&gt; b                    // (^a) >> b
+f() || g()                 // f() || g()
+x == y+1 &amp;&amp; &lt;-chanInt &gt; 0  // (x == (y+1)) && ((<-chanInt) > 0)
 </pre>
 
 
@@ -5207,7 +5233,7 @@
 <p>
 Logical operators apply to <a href="#Boolean_types">boolean</a> values
 and yield a result of the same type as the operands.
-The right operand is evaluated conditionally.
+The left operand is evaluated, and then the right if the condition requires it.
 </p>
 
 <pre class="grammar">
@@ -5424,7 +5450,8 @@
 	<code>x</code> is a string and <code>T</code> is a slice of bytes or runes.
 	</li>
 	<li>
-	<code>x</code> is a slice, <code>T</code> is an array or a pointer to an array,
+	<code>x</code> is a slice, <code>T</code> is an array [<a href="#Go_1.20">Go 1.20</a>]
+	or a pointer to an array [<a href="#Go_1.17">Go 1.17</a>],
 	and the slice and array types have <a href="#Type_identity">identical</a> element types.
 	</li>
 </ul>
@@ -5569,7 +5596,7 @@
 
 <li>
 Converting a value of a string type to a slice of bytes type
-yields a slice whose successive elements are the bytes of the string.
+yields a non-nil slice whose successive elements are the bytes of the string.
 
 <pre>
 []byte("hellø")             // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
@@ -5785,24 +5812,28 @@
 Otherwise, when evaluating the <a href="#Operands">operands</a> of an
 expression, assignment, or
 <a href="#Return_statements">return statement</a>,
-all function calls, method calls, and
-communication operations are evaluated in lexical left-to-right
-order.
+all function calls, method calls,
+<a href="#Receive operator">receive operations</a>,
+and <a href="#Logical_operators">binary logical operations</a>
+are evaluated in lexical left-to-right order.
 </p>
 
 <p>
 For example, in the (function-local) assignment
 </p>
 <pre>
-y[f()], ok = g(h(), i()+x[j()], &lt;-c), k()
+y[f()], ok = g(z || h(), i()+x[j()], &lt;-c), k()
 </pre>
 <p>
 the function calls and communication happen in the order
-<code>f()</code>, <code>h()</code>, <code>i()</code>, <code>j()</code>,
+<code>f()</code>, <code>h()</code> (if <code>z</code>
+evaluates to false), <code>i()</code>, <code>j()</code>,
 <code>&lt;-c</code>, <code>g()</code>, and <code>k()</code>.
 However, the order of those events compared to the evaluation
 and indexing of <code>x</code> and the evaluation
-of <code>y</code> is not specified.
+of <code>y</code> and <code>z</code> is not specified,
+except as required lexically. For instance, <code>g</code>
+cannot be called before its arguments are evaluated.
 </p>
 
 <pre>
@@ -6510,7 +6541,6 @@
 and a <i>post</i> statement, such as an assignment,
 an increment or decrement statement. The init statement may be a
 <a href="#Short_variable_declarations">short variable declaration</a>, but the post statement must not.
-Variables declared by the init statement are re-used in each iteration.
 </p>
 
 <pre class="ebnf">
@@ -6542,12 +6572,55 @@
 for      { S() }    is the same as    for true     { S() }
 </pre>
 
+<p>
+Each iteration has its own separate declared variable (or variables)
+[<a href="#Go_1.22">Go 1.22</a>].
+The variable used by the first iteration is declared by the init statement.
+The variable used by each subsequent iteration is declared implicitly before
+executing the post statement and initialized to the value of the previous
+iteration's variable at that moment.
+</p>
+
+<pre>
+var prints []func()
+for i := 0; i < 5; i++ {
+	prints = append(prints, func() { println(i) })
+	i++
+}
+for _, p := range prints {
+	p()
+}
+</pre>
+
+<p>
+prints
+</p>
+
+<pre>
+1
+3
+5
+</pre>
+
+<p>
+Prior to [<a href="#Go_1.22">Go 1.22</a>], iterations share one set of variables
+instead of having their own separate variables.
+In that case, the example above prints
+</p>
+
+<pre>
+6
+6
+6
+</pre>
+
 <h4 id="For_range">For statements with <code>range</code> clause</h4>
 
 <p>
 A "for" statement with a "range" clause
-iterates through all entries of an array, slice, string or map,
-or values received on a channel. For each entry it assigns <i>iteration values</i>
+iterates through all entries of an array, slice, string or map, values received on
+a channel, or integer values from zero to an upper limit [<a href="#Go_1.22">Go 1.22</a>].
+For each entry it assigns <i>iteration values</i>
 to corresponding <i>iteration variables</i> if present and then executes the block.
 </p>
 
@@ -6558,12 +6631,12 @@
 <p>
 The expression on the right in the "range" clause is called the <i>range expression</i>,
 its <a href="#Core_types">core type</a> must be
-an array, pointer to an array, slice, string, map, or channel permitting
-<a href="#Receive_operator">receive operations</a>.
+an array, pointer to an array, slice, string, map, channel permitting
+<a href="#Receive_operator">receive operations</a>, or an integer.
 As with an assignment, if present the operands on the left must be
 <a href="#Address_operators">addressable</a> or map index expressions; they
-denote the iteration variables. If the range expression is a channel, at most
-one iteration variable is permitted, otherwise there may be up to two.
+denote the iteration variables. If the range expression is a channel or integer,
+at most one iteration variable is permitted, otherwise there may be up to two.
 If the last iteration variable is the <a href="#Blank_identifier">blank identifier</a>,
 the range clause is equivalent to the same clause without that identifier.
 </p>
@@ -6588,6 +6661,7 @@
 string          s  string type            index    i  int    see below  rune
 map             m  map[K]V                key      k  K      m[k]       V
 channel         c  chan E, &lt;-chan E       element  e  E
+integer         n  integer type           value    i  see below
 </pre>
 
 <ol>
@@ -6626,22 +6700,36 @@
 the channel until the channel is <a href="#Close">closed</a>. If the channel
 is <code>nil</code>, the range expression blocks forever.
 </li>
-</ol>
 
-<p>
-The iteration values are assigned to the respective
-iteration variables as in an <a href="#Assignment_statements">assignment statement</a>.
-</p>
+<li>
+For an integer value <code>n</code>, the iteration values 0 through <code>n-1</code>
+are produced in increasing order.
+If <code>n</code> &lt= 0, the loop does not run any iterations.
+</li>
+</ol>
 
 <p>
 The iteration variables may be declared by the "range" clause using a form of
 <a href="#Short_variable_declarations">short variable declaration</a>
 (<code>:=</code>).
-In this case their types are set to the types of the respective iteration values
-and their <a href="#Declarations_and_scope">scope</a> is the block of the "for"
-statement; they are re-used in each iteration.
-If the iteration variables are declared outside the "for" statement,
-after execution their values will be those of the last iteration.
+In this case their <a href="#Declarations_and_scope">scope</a> is the block of the "for" statement
+and each iteration has its own new variables [<a href="#Go_1.22">Go 1.22</a>]
+(see also <a href="#For_clause">"for" statements with a ForClause</a>).
+If the range expression is a (possibly untyped) integer expression <code>n</code>,
+the variable has the same type as if it was
+<a href="#Variable_declarations">declared</a> with initialization
+expression <code>n</code>.
+Otherwise, the variables have the types of their respective iteration values.
+</p>
+
+<p>
+If the iteration variables are not explicitly declared by the "range" clause,
+they must be preexisting.
+In this case, the iteration values are assigned to the respective variables
+as in an <a href="#Assignment_statements">assignment statement</a>.
+If the range expression is a (possibly untyped) integer expression <code>n</code>,
+<code>n</code> too must be <a href="#Assignability">assignable</a> to the iteration variable;
+if there is no iteration variable, <code>n</code> must be assignable to <code>int</code>.
 </p>
 
 <pre>
@@ -6678,6 +6766,17 @@
 
 // empty a channel
 for range ch {}
+
+// call f(0), f(1), ... f(9)
+for i := range 10 {
+	// type of i is int (default type for untyped constant 10)
+	f(i)
+}
+
+// invalid: 256 cannot be assigned to uint8
+var u uint8
+for u = range 256 {
+}
 </pre>
 
 
@@ -7229,7 +7328,8 @@
 <p>
 The built-in function <code>clear</code> takes an argument of <a href="#Map_types">map</a>,
 <a href="#Slice_types">slice</a>, or <a href="#Type_parameter_declarations">type parameter</a> type,
-and deletes or zeroes out all elements.
+and deletes or zeroes out all elements
+[<a href="#Go_1.21">Go 1.21</a>].
 </p>
 
 <pre class="grammar">
@@ -7496,7 +7596,8 @@
 The built-in functions <code>min</code> and <code>max</code> compute the
 smallest&mdash;or largest, respectively&mdash;value of a fixed number of
 arguments of <a href="#Comparison_operators">ordered types</a>.
-There must be at least one argument.
+There must be at least one argument
+[<a href="#Go_1.21">Go 1.21</a>].
 </p>
 
 <p>
@@ -8212,8 +8313,8 @@
 <p>
 A <code>Pointer</code> is a <a href="#Pointer_types">pointer type</a> but a <code>Pointer</code>
 value may not be <a href="#Address_operators">dereferenced</a>.
-Any pointer or value of <a href="#Underlying_types">underlying type</a> <code>uintptr</code> can be
-<a href="#Conversions">converted</a> to a type of underlying type <code>Pointer</code> and vice versa.
+Any pointer or value of <a href="#Core_types">core type</a> <code>uintptr</code> can be
+<a href="#Conversions">converted</a> to a type of core type <code>Pointer</code> and vice versa.
 The effect of converting between <code>Pointer</code> and <code>uintptr</code> is implementation-defined.
 </p>
 
@@ -8224,6 +8325,10 @@
 type ptr unsafe.Pointer
 bits = *(*uint64)(ptr(&amp;f))
 
+func f[P ~*B, B any](p P) uintptr {
+	return uintptr(unsafe.Pointer(p))
+}
+
 var p ptr = nil
 </pre>
 
@@ -8272,7 +8377,8 @@
 
 <p>
 The function <code>Add</code> adds <code>len</code> to <code>ptr</code>
-and returns the updated pointer <code>unsafe.Pointer(uintptr(ptr) + uintptr(len))</code>.
+and returns the updated pointer <code>unsafe.Pointer(uintptr(ptr) + uintptr(len))</code>
+[<a href="#Go_1.17">Go 1.17</a>].
 The <code>len</code> argument must be of <a href="#Numeric_types">integer type</a> or an untyped <a href="#Constants">constant</a>.
 A constant <code>len</code> argument must be <a href="#Representability">representable</a> by a value of type <code>int</code>;
 if it is an untyped constant it is given type <code>int</code>.
@@ -8292,7 +8398,8 @@
 <p>
 except that, as a special case, if <code>ptr</code>
 is <code>nil</code> and <code>len</code> is zero,
-<code>Slice</code> returns <code>nil</code>.
+<code>Slice</code> returns <code>nil</code>
+[<a href="#Go_1.17">Go 1.17</a>].
 </p>
 
 <p>
@@ -8301,14 +8408,16 @@
 if it is an untyped constant it is given type <code>int</code>.
 At run time, if <code>len</code> is negative,
 or if <code>ptr</code> is <code>nil</code> and <code>len</code> is not zero,
-a <a href="#Run_time_panics">run-time panic</a> occurs.
+a <a href="#Run_time_panics">run-time panic</a> occurs
+[<a href="#Go_1.17">Go 1.17</a>].
 </p>
 
 <p>
 The function <code>SliceData</code> returns a pointer to the underlying array of the <code>slice</code> argument.
 If the slice's capacity <code>cap(slice)</code> is not zero, that pointer is <code>&slice[:1][0]</code>.
 If <code>slice</code> is <code>nil</code>, the result is <code>nil</code>.
-Otherwise it  is a non-<code>nil</code> pointer to an unspecified memory address.
+Otherwise it  is a non-<code>nil</code> pointer to an unspecified memory address
+[<a href="#Go_1.20">Go 1.20</a>].
 </p>
 
 <p>
@@ -8317,12 +8426,14 @@
 The same requirements apply to the <code>ptr</code> and <code>len</code> argument as in the function
 <code>Slice</code>. If <code>len</code> is zero, the result is the empty string <code>""</code>.
 Since Go strings are immutable, the bytes passed to <code>String</code> must not be modified afterwards.
+[<a href="#Go_1.20">Go 1.20</a>]
 </p>
 
 <p>
 The function <code>StringData</code> returns a pointer to the underlying bytes of the <code>str</code> argument.
 For an empty string the return value is unspecified, and may be <code>nil</code>.
-Since Go strings are immutable, the bytes returned by <code>StringData</code> must not be modified.
+Since Go strings are immutable, the bytes returned by <code>StringData</code> must not be modified
+[<a href="#Go_1.20">Go 1.20</a>].
 </p>
 
 <h3 id="Size_and_alignment_guarantees">Size and alignment guarantees</h3>
@@ -8363,6 +8474,145 @@
 
 <h2 id="Appendix">Appendix</h2>
 
+<h3 id="Language_versions">Language versions</h3>
+
+<p>
+The <a href="/doc/go1compat">Go 1 compatibility guarantee</a> ensures that
+programs written to the Go 1 specification will continue to compile and run
+correctly, unchanged, over the lifetime of that specification.
+More generally, as adjustments are made and features added to the language,
+the compatibility guarantee ensures that a Go program that works with a
+specific Go language version will continue to work with any subsequent version.
+</p>
+
+<p>
+For instance, the ability to use the prefix <code>0b</code> for binary
+integer literals was introduced with Go 1.13, indicated
+by [<a href="#Go_1.13">Go 1.13</a>] in the section on
+<a href="#Integer_literals">integer literals</a>.
+Source code containing an integer literal such as <code>0b1011</code>
+will be rejected if the implied or required language version used by
+the compiler is older than Go 1.13.
+</p>
+
+<p>
+The following table describes the minimum language version required for
+features introduced after Go 1.
+</p>
+
+<h4 id="Go_1.9">Go 1.9</h4>
+<ul>
+<li>
+An <a href="#Alias_declarations">alias declaration</a> may be used to declare an alias name for a type.
+</li>
+</ul>
+
+<h4 id="Go_1.13">Go 1.13</h4>
+<ul>
+<li>
+<a href="#Integer_literals">Integer literals</a> may use the prefixes <code>0b</code>, <code>0B</code>, <code>0o</code>,
+and <code>0O</code> for binary, and octal literals, respectively.
+</li>
+<li>
+Hexadecimal <a href="#Floating-point_literals">floating-point literals</a> may be written using the prefixes
+<code>0x</code> and <code>0X</code>.
+</li>
+<li>
+The <a href="#Imaginary_literals">imaginary suffix</a> <code>i</code> may be used with any (binary, decimal, hexadecimal)
+integer or floating-point literal, not just decimal literals.
+</li>
+<li>
+The digits of any number literal may be <a href="#Integer_literals">separated</a> (grouped)
+using underscores <code>_</code>.
+</li>
+<li>
+The shift count in a <a href="#Operators">shift operation</a> may be a signed integer type.
+</li>
+</ul>
+
+<h4 id="Go_1.14">Go 1.14</h4>
+<ul>
+<li>
+Emdedding a method more than once through different <a href="#Embedded_interfaces">embedded interfaces</a>
+is not an error.
+</li>
+</ul>
+
+<h4 id="Go_1.17">Go 1.17</h4>
+<ul>
+<li>
+A slice may be <a href="#Conversions">converted</a> to an array pointer if the slice and array element
+types match, and the array is not longer than the slice.
+</li>
+<li>
+The built-in <a href="#Package_unsafe">package <code>unsafe</code></a> includes the new functions
+<code>Add</code> and <code>Slice</code>.
+</li>
+</ul>
+
+<h4 id="Go_1.18">Go 1.18</h4>
+<p>
+The 1.18 release adds polymorphic functions and types ("generics") to the language.
+Specifically:
+</p>
+<ul>
+<li>
+The set of <a href="#Operators_and_punctuation">operators and punctuation</a> includes the new token <code>~</code>.
+</li>
+<li>
+Function and type declarations may declare <a href="#Type_parameter_declarations">type parameters</a>.
+</li>
+<li>
+Interface types may <a href="#General_interfaces">embed arbitrary types</a> (not just type names of interfaces)
+as well as union and <code>~T</code> type elements.
+</li>
+<li>
+The set of <a href="#Predeclared_identifiers">predeclared</a> types includes the new types
+<code>any</code> and <code>comparable</code>.
+</li>
+</ul>
+
+<h4 id="Go_1.20">Go 1.20</h4>
+<ul>
+<li>
+A slice may be <a href="#Conversions">converted</a> to an array if the slice and array element
+types match and the array is not longer than the slice.
+</li>
+<li>
+The built-in <a href="#Package_unsafe">package <code>unsafe</code></a> includes the new functions
+<code>SliceData</code>, <code>String</code>, and <code>StringData</code>.
+</li>
+<li>
+<a href="#Comparison_operators">Comparable types</a> (such as ordinary interfaces) may satisfy
+<code>comparable</code> constraints, even if the type arguments are not strictly comparable.
+</li>
+</ul>
+
+<h4 id="Go_1.21">Go 1.21</h4>
+<ul>
+<li>
+The set of <a href="#Predeclared_identifiers">predeclared</a> functions includes the new functions
+<code>min</code>, <code>max</code>, and <code>clear</code>.
+</li>
+<li>
+<a href="#Type_inference">Type inference</a> uses the types of interface methods for inference.
+It also infers type arguments for generic functions assigned to variables or
+passed as arguments to other (possibly generic) functions.
+</li>
+</ul>
+
+<h4 id="Go_1.22">Go 1.22</h4>
+<ul>
+<li>
+In a <a href="#For_statements">"for" statement</a>, each iteration has its own set of iteration
+variables rather than sharing the same variables in each iteration.
+</li>
+<li>
+A "for" statement with <a href="#For_range">"range" clause</a> may iterate over
+integer values from zero to an upper limit.
+</li>
+</ul>
+
 <h3 id="Type_unification_rules">Type unification rules</h3>
 
 <p>
@@ -8487,7 +8737,7 @@
 	identical <a href="#Interface_types">type terms</a>,
 	both or neither embed the predeclared type
 	<a href="#Predeclared_identifiers">comparable</a>,
-	corresponding method types unify per the element matching mode,
+	corresponding method types unify exactly,
 	and the method set of one of the interfaces is a subset of
 	the method set of the other interface.
 </li>
diff --git a/doc/godebug.md b/doc/godebug.md
index d265555..a7619c9 100644
--- a/doc/godebug.md
+++ b/doc/godebug.md
@@ -129,11 +129,71 @@
 ### Go 1.22
 
 Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
-that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize`setting](/pkg/crypto/tls#Conn.Handshake).
+that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize` setting](/pkg/crypto/tls#Conn.Handshake).
 The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
 denial of service attacks, this setting and default was backported to Go
 1.19.13, Go 1.20.8, and Go 1.21.1.
 
+Go 1.22 made it an error for a request or response read by a net/http
+client or server to have an empty Content-Length header.
+This behavior is controlled by the `httplaxcontentlength` setting.
+
+Go 1.22 changed the behavior of ServeMux to accept extended
+patterns and unescape both patterns and request paths by segment.
+This behavior can be controlled by the
+[`httpmuxgo121` setting](/pkg/net/http/#ServeMux).
+
+Go 1.22 added the [Alias type](/pkg/go/types#Alias) to [go/types](/pkg/go/types)
+for the explicit representation of [type aliases](/ref/spec#Type_declarations).
+Whether the type checker produces `Alias` types or not is controlled by the
+[`gotypesalias` setting](/pkg/go/types#Alias).
+For Go 1.22 it defaults to `gotypesalias=0`.
+For Go 1.23, `gotypealias=1` will become the default.
+This setting will be removed in a future release, Go 1.24 at the earliest.
+
+Go 1.22 changed the default minimum TLS version supported by both servers
+and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
+[`tls10server` setting](/pkg/crypto/tls/#Config).
+
+Go 1.22 changed the default TLS cipher suites used by clients and servers when
+not explicitly configured, removing the cipher suites which used RSA based key
+exchange. The default can be revert using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
+
+Go 1.22 disabled
+[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
+when the connection supports neither TLS 1.3 nor Extended Master Secret
+(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
+setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
+
+Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
+In particular, a common default Linux kernel configuration can result in
+significant memory overheads, and Go 1.22 no longer works around this default.
+To work around this issue without adjusting kernel settings, transparent huge
+pages can be disabled for Go memory with the
+[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
+This behavior was backported to Go 1.21.1, but the setting is only available
+starting with Go 1.21.6.
+This setting may be removed in a future release, and users impacted by this issue
+should adjust their Linux configuration according to the recommendations in the
+[GC guide](/doc/gc-guide#Linux_transparent_huge_pages), or switch to a Linux
+distribution that disables transparent huge pages altogether.
+
+Go 1.22 added contention on runtime-internal locks to the [`mutex`
+profile](/pkg/runtime/pprof#Profile). Contention on these locks is always
+reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of
+runtime locks can be enabled with the [`runtimecontentionstacks`
+setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have
+non-standard semantics, see setting documentation for details.
+
+Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate)
+field, [`Policies`](/pkg/crypto/x509/#Certificate.Policies), which supports
+certificate policy OIDs with components larger than 31 bits. By default this
+field is only used during parsing, when it is populated with policy OIDs, but
+not used during marshaling. It can be used to marshal these larger OIDs, instead
+of the existing PolicyIdentifiers field, by using the
+[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
+
+
 ### Go 1.21
 
 Go 1.21 made it a run-time error to call `panic` with a nil interface value,
@@ -191,7 +251,7 @@
 ### Go 1.18
 
 Go 1.18 removed support for SHA1 in most X.509 certificates,
-controlled by the [`x509sha1` setting](/crypto/x509#InsecureAlgorithmError).
+controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
 This setting will be removed in a future release, Go 1.22 at the earliest.
 
 ### Go 1.10
diff --git a/lib/time/update.bash b/lib/time/update.bash
index 605afa7..e728500 100755
--- a/lib/time/update.bash
+++ b/lib/time/update.bash
@@ -24,8 +24,8 @@
 # in the CL match the update.bash in the CL.
 
 # Versions to use.
-CODE=2023c
-DATA=2023c
+CODE=2023d
+DATA=2023d
 
 set -e
 
diff --git a/lib/time/zoneinfo.zip b/lib/time/zoneinfo.zip
index 417ee2b..7cf689f 100644
--- a/lib/time/zoneinfo.zip
+++ b/lib/time/zoneinfo.zip
Binary files differ
diff --git a/misc/go.mod b/misc/go.mod
index d5494b1..ef9d188 100644
--- a/misc/go.mod
+++ b/misc/go.mod
@@ -3,4 +3,4 @@
 // tests and tools.
 module misc
 
-go 1.21
+go 1.22
diff --git a/misc/go_android_exec/main.go b/misc/go_android_exec/main.go
index 554810c..33b6693 100644
--- a/misc/go_android_exec/main.go
+++ b/misc/go_android_exec/main.go
@@ -204,6 +204,7 @@
 		`; export GOPROXY=` + os.Getenv("GOPROXY") +
 		`; export GOCACHE="` + deviceRoot + `/gocache"` +
 		`; export PATH="` + deviceGoroot + `/bin":$PATH` +
+		`; export HOME="` + deviceRoot + `/home"` +
 		`; cd "` + deviceCwd + `"` +
 		"; '" + deviceBin + "' " + strings.Join(os.Args[2:], " ")
 	code, err := adbRun(cmd)
diff --git a/misc/ios/detect.go b/misc/ios/detect.go
index 1cb8ae5..1a72eaf 100644
--- a/misc/ios/detect.go
+++ b/misc/ios/detect.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // detect attempts to autodetect the correct
 // values of the environment variables
diff --git a/misc/wasm/go_wasip1_wasm_exec b/misc/wasm/go_wasip1_wasm_exec
index 0351994..cd16b96 100755
--- a/misc/wasm/go_wasip1_wasm_exec
+++ b/misc/wasm/go_wasip1_wasm_exec
@@ -14,7 +14,15 @@
 		exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
 		;;
 	"wasmtime" | "")
-		exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
+		# Match the major version in "wasmtime-cli 14.0.0". For versions before 14
+		# we need to use the old CLI. This requires Bash v3.0 and above.
+		# TODO(johanbrandhorst): Remove this condition once 1.22 is released.
+		# From 1.23 onwards we'll only support the new wasmtime CLI.
+		if [[ "$(wasmtime --version)" =~ wasmtime-cli[[:space:]]([0-9]+)\.[0-9]+\.[0-9]+ && "${BASH_REMATCH[1]}" -lt 14 ]]; then
+			exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
+		else
+			exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
+		fi
 		;;
 	*)
 		echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go
index dc9d350..4910908 100644
--- a/src/archive/tar/common.go
+++ b/src/archive/tar/common.go
@@ -632,7 +632,7 @@
 	c_ISSOCK = 0140000 // Socket
 )
 
-// FileInfoHeader creates a partially-populated Header from fi.
+// FileInfoHeader creates a partially-populated [Header] from fi.
 // If fi describes a symlink, FileInfoHeader records link as the link target.
 // If fi describes a directory, a slash is appended to the name.
 //
@@ -727,10 +727,3 @@
 		return false
 	}
 }
-
-func min(a, b int64) int64 {
-	if a < b {
-		return a
-	}
-	return b
-}
diff --git a/src/archive/tar/format.go b/src/archive/tar/format.go
index e50124d..9954b4d 100644
--- a/src/archive/tar/format.go
+++ b/src/archive/tar/format.go
@@ -33,7 +33,7 @@
 //	sub-second times  |     no |       yes |        no
 //	sparse files      |     no |       yes |       yes
 //
-// The table's upper portion shows the Header fields, where each format reports
+// The table's upper portion shows the [Header] fields, where each format reports
 // the maximum number of bytes allowed for each string field and
 // the integer type used to store each numeric field
 // (where timestamps are stored as the number of seconds since the Unix epoch).
diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
index cfa5044..0811779 100644
--- a/src/archive/tar/reader.go
+++ b/src/archive/tar/reader.go
@@ -35,7 +35,7 @@
 	WriteTo(io.Writer) (int64, error)
 }
 
-// NewReader creates a new Reader reading from r.
+// NewReader creates a new [Reader] reading from r.
 func NewReader(r io.Reader) *Reader {
 	return &Reader{r: r, curr: &regFileReader{r, 0}}
 }
@@ -47,10 +47,10 @@
 //
 // If Next encounters a non-local name (as defined by [filepath.IsLocal])
 // and the GODEBUG environment variable contains `tarinsecurepath=0`,
-// Next returns the header with an ErrInsecurePath error.
+// Next returns the header with an [ErrInsecurePath] error.
 // A future version of Go may introduce this behavior by default.
 // Programs that want to accept non-local names can ignore
-// the ErrInsecurePath error and use the returned header.
+// the [ErrInsecurePath] error and use the returned header.
 func (tr *Reader) Next() (*Header, error) {
 	if tr.err != nil {
 		return nil, tr.err
@@ -623,14 +623,14 @@
 
 // Read reads from the current file in the tar archive.
 // It returns (0, io.EOF) when it reaches the end of that file,
-// until Next is called to advance to the next file.
+// until [Next] is called to advance to the next file.
 //
 // If the current file is sparse, then the regions marked as a hole
 // are read back as NUL-bytes.
 //
-// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
-// the Header.Size claims.
+// Calling Read on special types like [TypeLink], [TypeSymlink], [TypeChar],
+// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [io.EOF]) regardless of what
+// the [Header.Size] claims.
 func (tr *Reader) Read(b []byte) (int, error) {
 	if tr.err != nil {
 		return 0, tr.err
diff --git a/src/archive/tar/strconv.go b/src/archive/tar/strconv.go
index ac31963..ac8105e 100644
--- a/src/archive/tar/strconv.go
+++ b/src/archive/tar/strconv.go
@@ -73,7 +73,7 @@
 	// in the V7 path field as a directory even though the full path
 	// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
 	if len(s) > len(b) && b[len(b)-1] == '/' {
-		n := len(strings.TrimRight(s[:len(b)], "/"))
+		n := len(strings.TrimRight(s[:len(b)-1], "/"))
 		b[n] = 0 // Replace trailing slash with NUL terminator
 	}
 }
diff --git a/src/archive/tar/writer.go b/src/archive/tar/writer.go
index 1c95f07..73bad32 100644
--- a/src/archive/tar/writer.go
+++ b/src/archive/tar/writer.go
@@ -5,8 +5,10 @@
 package tar
 
 import (
+	"errors"
 	"fmt"
 	"io"
+	"io/fs"
 	"path"
 	"sort"
 	"strings"
@@ -14,7 +16,7 @@
 )
 
 // Writer provides sequential writing of a tar archive.
-// Write.WriteHeader begins a new file with the provided Header,
+// [Writer.WriteHeader] begins a new file with the provided [Header],
 // and then Writer can be treated as an io.Writer to supply that file's data.
 type Writer struct {
 	w    io.Writer
@@ -44,7 +46,7 @@
 // Flush finishes writing the current file's block padding.
 // The current file must be fully written before Flush can be called.
 //
-// This is unnecessary as the next call to WriteHeader or Close
+// This is unnecessary as the next call to [Writer.WriteHeader] or [Writer.Close]
 // will implicitly flush out the file's padding.
 func (tw *Writer) Flush() error {
 	if tw.err != nil {
@@ -403,6 +405,43 @@
 	return nil
 }
 
+// AddFS adds the files from fs.FS to the archive.
+// It walks the directory tree starting at the root of the filesystem
+// adding each file to the tar archive while maintaining the directory structure.
+func (tw *Writer) AddFS(fsys fs.FS) error {
+	return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
+		if err != nil {
+			return err
+		}
+		if d.IsDir() {
+			return nil
+		}
+		info, err := d.Info()
+		if err != nil {
+			return err
+		}
+		// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
+		if !info.Mode().IsRegular() {
+			return errors.New("tar: cannot add non-regular file")
+		}
+		h, err := FileInfoHeader(info, "")
+		if err != nil {
+			return err
+		}
+		h.Name = name
+		if err := tw.WriteHeader(h); err != nil {
+			return err
+		}
+		f, err := fsys.Open(name)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		_, err = io.Copy(tw, f)
+		return err
+	})
+}
+
 // splitUSTARPath splits a path according to USTAR prefix and suffix rules.
 // If the path is not splittable, then it will return ("", "", false).
 func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
@@ -425,12 +464,12 @@
 }
 
 // Write writes to the current file in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// Header.Size bytes are written after WriteHeader.
+// Write returns the error [ErrWriteTooLong] if more than
+// Header.Size bytes are written after [Writer.WriteHeader].
 //
-// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
-// of what the Header.Size claims.
+// Calling Write on special types like [TypeLink], [TypeSymlink], [TypeChar],
+// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [ErrWriteTooLong]) regardless
+// of what the [Header.Size] claims.
 func (tw *Writer) Write(b []byte) (int, error) {
 	if tw.err != nil {
 		return 0, tw.err
@@ -464,7 +503,7 @@
 }
 
 // Close closes the tar archive by flushing the padding, and writing the footer.
-// If the current file (from a prior call to WriteHeader) is not fully written,
+// If the current file (from a prior call to [Writer.WriteHeader]) is not fully written,
 // then this returns an error.
 func (tw *Writer) Close() error {
 	if tw.err == ErrWriteAfterClose {
diff --git a/src/archive/tar/writer_test.go b/src/archive/tar/writer_test.go
index f6d75c5..a9936d6 100644
--- a/src/archive/tar/writer_test.go
+++ b/src/archive/tar/writer_test.go
@@ -9,12 +9,14 @@
 	"encoding/hex"
 	"errors"
 	"io"
+	"io/fs"
 	"os"
 	"path"
 	"reflect"
 	"sort"
 	"strings"
 	"testing"
+	"testing/fstest"
 	"testing/iotest"
 	"time"
 )
@@ -1333,3 +1335,67 @@
 		}
 	}
 }
+
+func TestWriterAddFS(t *testing.T) {
+	fsys := fstest.MapFS{
+		"file.go":              {Data: []byte("hello")},
+		"subfolder/another.go": {Data: []byte("world")},
+	}
+	var buf bytes.Buffer
+	tw := NewWriter(&buf)
+	if err := tw.AddFS(fsys); err != nil {
+		t.Fatal(err)
+	}
+
+	// Test that we can get the files back from the archive
+	tr := NewReader(&buf)
+
+	entries, err := fsys.ReadDir(".")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var curfname string
+	for _, entry := range entries {
+		curfname = entry.Name()
+		if entry.IsDir() {
+			curfname += "/"
+			continue
+		}
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			break // End of archive
+		}
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		data, err := io.ReadAll(tr)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if hdr.Name != curfname {
+			t.Fatalf("got filename %v, want %v",
+				curfname, hdr.Name)
+		}
+
+		origdata := fsys[curfname].Data
+		if string(data) != string(origdata) {
+			t.Fatalf("got file content %v, want %v",
+				data, origdata)
+		}
+	}
+}
+
+func TestWriterAddFSNonRegularFiles(t *testing.T) {
+	fsys := fstest.MapFS{
+		"device":  {Data: []byte("hello"), Mode: 0755 | fs.ModeDevice},
+		"symlink": {Data: []byte("world"), Mode: 0755 | fs.ModeSymlink},
+	}
+	var buf bytes.Buffer
+	tw := NewWriter(&buf)
+	if err := tw.AddFS(fsys); err == nil {
+		t.Fatal("expected error, got nil")
+	}
+}
diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
index 1fde1de..ff6fedf 100644
--- a/src/archive/zip/reader.go
+++ b/src/archive/zip/reader.go
@@ -48,15 +48,15 @@
 	fileList     []fileListEntry
 }
 
-// A ReadCloser is a Reader that must be closed when no longer needed.
+// A ReadCloser is a [Reader] that must be closed when no longer needed.
 type ReadCloser struct {
 	f *os.File
 	Reader
 }
 
 // A File is a single file in a ZIP archive.
-// The file information is in the embedded FileHeader.
-// The file content can be accessed by calling Open.
+// The file information is in the embedded [FileHeader].
+// The file content can be accessed by calling [File.Open].
 type File struct {
 	FileHeader
 	zip          *Reader
@@ -93,16 +93,16 @@
 	return r, err
 }
 
-// NewReader returns a new Reader reading from r, which is assumed to
+// NewReader returns a new [Reader] reading from r, which is assumed to
 // have the given size in bytes.
 //
 // If any file inside the archive uses a non-local name
 // (as defined by [filepath.IsLocal]) or a name containing backslashes
 // and the GODEBUG environment variable contains `zipinsecurepath=0`,
-// NewReader returns the reader with an ErrInsecurePath error.
+// NewReader returns the reader with an [ErrInsecurePath] error.
 // A future version of Go may introduce this behavior by default.
 // Programs that want to accept non-local names can ignore
-// the ErrInsecurePath error and use the returned reader.
+// the [ErrInsecurePath] error and use the returned reader.
 func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
 	if size < 0 {
 		return nil, errors.New("zip: size cannot be negative")
@@ -178,7 +178,7 @@
 
 // RegisterDecompressor registers or overrides a custom decompressor for a
 // specific method ID. If a decompressor for a given method is not found,
-// Reader will default to looking up the decompressor at the package level.
+// [Reader] will default to looking up the decompressor at the package level.
 func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
 	if r.decompressors == nil {
 		r.decompressors = make(map[uint16]Decompressor)
@@ -202,7 +202,7 @@
 // DataOffset returns the offset of the file's possibly-compressed
 // data, relative to the beginning of the zip file.
 //
-// Most callers should instead use Open, which transparently
+// Most callers should instead use [File.Open], which transparently
 // decompresses data and verifies checksums.
 func (f *File) DataOffset() (offset int64, err error) {
 	bodyOffset, err := f.findBodyOffset()
@@ -212,7 +212,7 @@
 	return f.headerOffset + bodyOffset, nil
 }
 
-// Open returns a ReadCloser that provides access to the File's contents.
+// Open returns a [ReadCloser] that provides access to the [File]'s contents.
 // Multiple files may be read concurrently.
 func (f *File) Open() (io.ReadCloser, error) {
 	bodyOffset, err := f.findBodyOffset()
@@ -255,7 +255,7 @@
 	return rc, nil
 }
 
-// OpenRaw returns a Reader that provides access to the File's contents without
+// OpenRaw returns a [Reader] that provides access to the [File]'s contents without
 // decompression.
 func (f *File) OpenRaw() (io.Reader, error) {
 	bodyOffset, err := f.findBodyOffset()
@@ -469,8 +469,8 @@
 
 				const ticksPerSecond = 1e7    // Windows timestamp resolution
 				ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
-				secs := int64(ts / ticksPerSecond)
-				nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
+				secs := ts / ticksPerSecond
+				nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
 				epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
 				modified = time.Unix(epoch.Unix()+secs, nsecs)
 			}
diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
index a67c335..631515c 100644
--- a/src/archive/zip/reader_test.go
+++ b/src/archive/zip/reader_test.go
@@ -1186,7 +1186,7 @@
 		0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
 	}
 	// Read in the archive.
-	_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	_, err := NewReader(bytes.NewReader(data), int64(len(data)))
 	if err != nil {
 		t.Errorf("Error reading the archive: %v", err)
 	}
@@ -1333,7 +1333,7 @@
 		0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
 		0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
 	}
-	r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	r, err := NewReader(bytes.NewReader(data), int64(len(data)))
 	if err != ErrInsecurePath {
 		t.Fatalf("Error reading the archive: %v", err)
 	}
@@ -1559,7 +1559,7 @@
 		0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
 		0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
 	}
-	r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	r, err := NewReader(bytes.NewReader(data), int64(len(data)))
 	if err != ErrInsecurePath {
 		t.Fatalf("Error reading the archive: %v", err)
 	}
@@ -1822,7 +1822,7 @@
 		}
 	}()
 	// Previously, this would trigger a panic as we attempt to read from
-	// a io.SectionReader which would access a slice at a negative offset
+	// an io.SectionReader which would access a slice at a negative offset
 	// as the section reader offset & size were < 0.
 	NewReader(bytes.NewReader(data), int64(len(data))+1875)
 }
diff --git a/src/archive/zip/register.go b/src/archive/zip/register.go
index 4389246..18e620d 100644
--- a/src/archive/zip/register.go
+++ b/src/archive/zip/register.go
@@ -19,7 +19,7 @@
 type Compressor func(w io.Writer) (io.WriteCloser, error)
 
 // A Decompressor returns a new decompressing reader, reading from r.
-// The ReadCloser's Close method must be used to release associated resources.
+// The [io.ReadCloser]'s Close method must be used to release associated resources.
 // The Decompressor itself must be safe to invoke from multiple goroutines
 // simultaneously, but each returned reader will be used only by
 // one goroutine at a time.
@@ -115,7 +115,7 @@
 }
 
 // RegisterDecompressor allows custom decompressors for a specified method ID.
-// The common methods Store and Deflate are built in.
+// The common methods [Store] and [Deflate] are built in.
 func RegisterDecompressor(method uint16, dcomp Decompressor) {
 	if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
 		panic("decompressor already registered")
@@ -123,7 +123,7 @@
 }
 
 // RegisterCompressor registers custom compressors for a specified method ID.
-// The common methods Store and Deflate are built in.
+// The common methods [Store] and [Deflate] are built in.
 func RegisterCompressor(method uint16, comp Compressor) {
 	if _, dup := compressors.LoadOrStore(method, comp); dup {
 		panic("compressor already registered")
diff --git a/src/archive/zip/struct.go b/src/archive/zip/struct.go
index 9a8e67c..867dd5c 100644
--- a/src/archive/zip/struct.go
+++ b/src/archive/zip/struct.go
@@ -17,7 +17,7 @@
 the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
 fields must be used instead.
 
-[ZIP specification]: https://www.pkware.com/appnote
+[ZIP specification]: https://support.pkware.com/pkzip/appnote
 */
 package zip
 
@@ -82,7 +82,7 @@
 // FileHeader describes a file within a ZIP file.
 // See the [ZIP specification] for details.
 //
-// [ZIP specification]: https://www.pkware.com/appnote
+// [ZIP specification]: https://support.pkware.com/pkzip/appnote
 type FileHeader struct {
 	// Name is the name of the file.
 	//
@@ -160,12 +160,12 @@
 	ExternalAttrs uint32 // Meaning depends on CreatorVersion
 }
 
-// FileInfo returns an fs.FileInfo for the FileHeader.
+// FileInfo returns an fs.FileInfo for the [FileHeader].
 func (h *FileHeader) FileInfo() fs.FileInfo {
 	return headerFileInfo{h}
 }
 
-// headerFileInfo implements fs.FileInfo.
+// headerFileInfo implements [fs.FileInfo].
 type headerFileInfo struct {
 	fh *FileHeader
 }
@@ -194,7 +194,7 @@
 	return fs.FormatFileInfo(fi)
 }
 
-// FileInfoHeader creates a partially-populated FileHeader from an
+// FileInfoHeader creates a partially-populated [FileHeader] from an
 // fs.FileInfo.
 // Because fs.FileInfo's Name method returns only the base name of
 // the file it describes, it may be necessary to modify the Name field
@@ -245,7 +245,7 @@
 
 // msDosTimeToTime converts an MS-DOS date and time into a time.Time.
 // The resolution is 2s.
-// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
+// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
 func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
 	return time.Date(
 		// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
@@ -265,7 +265,7 @@
 
 // timeToMsDosTime converts a time.Time to an MS-DOS date and time.
 // The resolution is 2s.
-// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
+// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
 func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
 	fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
 	fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
@@ -273,17 +273,17 @@
 }
 
 // ModTime returns the modification time in UTC using the legacy
-// ModifiedDate and ModifiedTime fields.
+// [ModifiedDate] and [ModifiedTime] fields.
 //
-// Deprecated: Use Modified instead.
+// Deprecated: Use [Modified] instead.
 func (h *FileHeader) ModTime() time.Time {
 	return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
 }
 
-// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
+// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields
 // to the given time in UTC.
 //
-// Deprecated: Use Modified instead.
+// Deprecated: Use [Modified] instead.
 func (h *FileHeader) SetModTime(t time.Time) {
 	t = t.UTC() // Convert to UTC for compatibility
 	h.Modified = t
@@ -309,7 +309,7 @@
 	msdosReadOnly = 0x01
 )
 
-// Mode returns the permission and mode bits for the FileHeader.
+// Mode returns the permission and mode bits for the [FileHeader].
 func (h *FileHeader) Mode() (mode fs.FileMode) {
 	switch h.CreatorVersion >> 8 {
 	case creatorUnix, creatorMacOSX:
@@ -323,7 +323,7 @@
 	return mode
 }
 
-// SetMode changes the permission and mode bits for the FileHeader.
+// SetMode changes the permission and mode bits for the [FileHeader].
 func (h *FileHeader) SetMode(mode fs.FileMode) {
 	h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
 	h.ExternalAttrs = fileModeToUnixMode(mode) << 16
diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go
index 3b23cc3..e33df24 100644
--- a/src/archive/zip/writer.go
+++ b/src/archive/zip/writer.go
@@ -11,6 +11,7 @@
 	"hash"
 	"hash/crc32"
 	"io"
+	"io/fs"
 	"strings"
 	"unicode/utf8"
 )
@@ -40,7 +41,7 @@
 	raw    bool
 }
 
-// NewWriter returns a new Writer writing a zip file to w.
+// NewWriter returns a new [Writer] writing a zip file to w.
 func NewWriter(w io.Writer) *Writer {
 	return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
 }
@@ -63,7 +64,7 @@
 }
 
 // SetComment sets the end-of-central-directory comment field.
-// It can only be called before Close.
+// It can only be called before [Writer.Close].
 func (w *Writer) SetComment(comment string) error {
 	if len(comment) > uint16max {
 		return errors.New("zip: Writer.Comment too long")
@@ -207,14 +208,14 @@
 }
 
 // Create adds a file to the zip file using the provided name.
-// It returns a Writer to which the file contents should be written.
-// The file contents will be compressed using the Deflate method.
+// It returns a [Writer] to which the file contents should be written.
+// The file contents will be compressed using the [Deflate] method.
 // The name must be a relative path: it must not start with a drive
 // letter (e.g. C:) or leading slash, and only forward slashes are
 // allowed. To create a directory instead of a file, add a trailing
 // slash to the name.
-// The file's contents must be written to the io.Writer before the next
-// call to Create, CreateHeader, or Close.
+// The file's contents must be written to the [io.Writer] before the next
+// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
 func (w *Writer) Create(name string) (io.Writer, error) {
 	header := &FileHeader{
 		Name:   name,
@@ -261,13 +262,13 @@
 	return nil
 }
 
-// CreateHeader adds a file to the zip archive using the provided FileHeader
-// for the file metadata. Writer takes ownership of fh and may mutate
-// its fields. The caller must not modify fh after calling CreateHeader.
+// CreateHeader adds a file to the zip archive using the provided [FileHeader]
+// for the file metadata. [Writer] takes ownership of fh and may mutate
+// its fields. The caller must not modify fh after calling [Writer.CreateHeader].
 //
-// This returns a Writer to which the file contents should be written.
+// This returns a [Writer] to which the file contents should be written.
 // The file's contents must be written to the io.Writer before the next
-// call to Create, CreateHeader, CreateRaw, or Close.
+// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
 func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
 	if err := w.prepare(fh); err != nil {
 		return nil, err
@@ -405,8 +406,8 @@
 	// flags.
 	if h.raw && !h.hasDataDescriptor() {
 		b.uint32(h.CRC32)
-		b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
-		b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
+		b.uint32(uint32(min(h.CompressedSize64, uint32max)))
+		b.uint32(uint32(min(h.UncompressedSize64, uint32max)))
 	} else {
 		// When this package handle the compression, these values are
 		// always written to the trailing data descriptor.
@@ -426,26 +427,19 @@
 	return err
 }
 
-func min64(x, y uint64) uint64 {
-	if x < y {
-		return x
-	}
-	return y
-}
-
-// CreateRaw adds a file to the zip archive using the provided FileHeader and
-// returns a Writer to which the file contents should be written. The file's
-// contents must be written to the io.Writer before the next call to Create,
-// CreateHeader, CreateRaw, or Close.
+// CreateRaw adds a file to the zip archive using the provided [FileHeader] and
+// returns a [Writer] to which the file contents should be written. The file's
+// contents must be written to the io.Writer before the next call to [Writer.Create],
+// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
 //
-// In contrast to CreateHeader, the bytes passed to Writer are not compressed.
+// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
 func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
 	if err := w.prepare(fh); err != nil {
 		return nil, err
 	}
 
-	fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
-	fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
+	fh.CompressedSize = uint32(min(fh.CompressedSize64, uint32max))
+	fh.UncompressedSize = uint32(min(fh.UncompressedSize64, uint32max))
 
 	h := &header{
 		FileHeader: fh,
@@ -470,7 +464,7 @@
 	return fw, nil
 }
 
-// Copy copies the file f (obtained from a Reader) into w. It copies the raw
+// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw
 // form directly bypassing decompression, compression, and validation.
 func (w *Writer) Copy(f *File) error {
 	r, err := f.OpenRaw()
@@ -486,7 +480,7 @@
 }
 
 // RegisterCompressor registers or overrides a custom compressor for a specific
-// method ID. If a compressor for a given method is not found, Writer will
+// method ID. If a compressor for a given method is not found, [Writer] will
 // default to looking up the compressor at the package level.
 func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
 	if w.compressors == nil {
@@ -495,6 +489,44 @@
 	w.compressors[method] = comp
 }
 
+// AddFS adds the files from fs.FS to the archive.
+// It walks the directory tree starting at the root of the filesystem
+// adding each file to the zip using deflate while maintaining the directory structure.
+func (w *Writer) AddFS(fsys fs.FS) error {
+	return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
+		if err != nil {
+			return err
+		}
+		if d.IsDir() {
+			return nil
+		}
+		info, err := d.Info()
+		if err != nil {
+			return err
+		}
+		if !info.Mode().IsRegular() {
+			return errors.New("zip: cannot add non-regular file")
+		}
+		h, err := FileInfoHeader(info)
+		if err != nil {
+			return err
+		}
+		h.Name = name
+		h.Method = Deflate
+		fw, err := w.CreateHeader(h)
+		if err != nil {
+			return err
+		}
+		f, err := fsys.Open(name)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		_, err = io.Copy(fw, f)
+		return err
+	})
+}
+
 func (w *Writer) compressor(method uint16) Compressor {
 	comp := w.compressors[method]
 	if comp == nil {
diff --git a/src/archive/zip/writer_test.go b/src/archive/zip/writer_test.go
index 2b73eca..bd33a07 100644
--- a/src/archive/zip/writer_test.go
+++ b/src/archive/zip/writer_test.go
@@ -16,6 +16,7 @@
 	"os"
 	"strings"
 	"testing"
+	"testing/fstest"
 	"time"
 )
 
@@ -602,3 +603,71 @@
 		}
 	})
 }
+
+func writeTestsToFS(tests []WriteTest) fs.FS {
+	fsys := fstest.MapFS{}
+	for _, wt := range tests {
+		fsys[wt.Name] = &fstest.MapFile{
+			Data: wt.Data,
+			Mode: wt.Mode,
+		}
+	}
+	return fsys
+}
+
+func TestWriterAddFS(t *testing.T) {
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+	tests := []WriteTest{
+		{
+			Name: "file.go",
+			Data: []byte("hello"),
+			Mode: 0644,
+		},
+		{
+			Name: "subfolder/another.go",
+			Data: []byte("world"),
+			Mode: 0644,
+		},
+	}
+	err := w.AddFS(writeTestsToFS(tests))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range tests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+func TestIssue61875(t *testing.T) {
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+	tests := []WriteTest{
+		{
+			Name:   "symlink",
+			Data:   []byte("../link/target"),
+			Method: Deflate,
+			Mode:   0755 | fs.ModeSymlink,
+		},
+		{
+			Name:   "device",
+			Data:   []byte(""),
+			Method: Deflate,
+			Mode:   0755 | fs.ModeDevice,
+		},
+	}
+	err := w.AddFS(writeTestsToFS(tests))
+	if err == nil {
+		t.Errorf("expected error, got nil")
+	}
+}
diff --git a/src/archive/zip/zip_test.go b/src/archive/zip/zip_test.go
index 7d1de07..e766128 100644
--- a/src/archive/zip/zip_test.go
+++ b/src/archive/zip/zip_test.go
@@ -198,13 +198,6 @@
 	return len(p), nil
 }
 
-func min(x, y int64) int64 {
-	if x < y {
-		return x
-	}
-	return y
-}
-
 func memset(a []byte, b byte) {
 	if len(a) == 0 {
 		return
@@ -597,7 +590,7 @@
 	}
 
 	// read back zip file and check that we get to the end of it
-	r, err := NewReader(buf, int64(buf.Size()))
+	r, err := NewReader(buf, buf.Size())
 	if err != nil {
 		t.Fatal("reader:", err)
 	}
diff --git a/src/bufio/bufio.go b/src/bufio/bufio.go
index 8469b9e..880e527 100644
--- a/src/bufio/bufio.go
+++ b/src/bufio/bufio.go
@@ -41,24 +41,21 @@
 const minReadBufferSize = 16
 const maxConsecutiveEmptyReads = 100
 
-// NewReaderSize returns a new Reader whose buffer has at least the specified
-// size. If the argument io.Reader is already a Reader with large enough
-// size, it returns the underlying Reader.
+// NewReaderSize returns a new [Reader] whose buffer has at least the specified
+// size. If the argument io.Reader is already a [Reader] with large enough
+// size, it returns the underlying [Reader].
 func NewReaderSize(rd io.Reader, size int) *Reader {
 	// Is it already a Reader?
 	b, ok := rd.(*Reader)
 	if ok && len(b.buf) >= size {
 		return b
 	}
-	if size < minReadBufferSize {
-		size = minReadBufferSize
-	}
 	r := new(Reader)
-	r.reset(make([]byte, size), rd)
+	r.reset(make([]byte, max(size, minReadBufferSize)), rd)
 	return r
 }
 
-// NewReader returns a new Reader whose buffer has the default size.
+// NewReader returns a new [Reader] whose buffer has the default size.
 func NewReader(rd io.Reader) *Reader {
 	return NewReaderSize(rd, defaultBufSize)
 }
@@ -68,9 +65,9 @@
 
 // Reset discards any buffered data, resets all state, and switches
 // the buffered reader to read from r.
-// Calling Reset on the zero value of Reader initializes the internal buffer
+// Calling Reset on the zero value of [Reader] initializes the internal buffer
 // to the default size.
-// Calling b.Reset(b) (that is, resetting a Reader to itself) does nothing.
+// Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing.
 func (b *Reader) Reset(r io.Reader) {
 	// If a Reader r is passed to NewReader, NewReader will return r.
 	// Different layers of code may do that, and then later pass r
@@ -135,9 +132,9 @@
 // Peek returns the next n bytes without advancing the reader. The bytes stop
 // being valid at the next read call. If Peek returns fewer than n bytes, it
 // also returns an error explaining why the read is short. The error is
-// ErrBufferFull if n is larger than b's buffer size.
+// [ErrBufferFull] if n is larger than b's buffer size.
 //
-// Calling Peek prevents a UnreadByte or UnreadRune call from succeeding
+// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
 // until the next read operation.
 func (b *Reader) Peek(n int) ([]byte, error) {
 	if n < 0 {
@@ -207,10 +204,10 @@
 
 // Read reads data into p.
 // It returns the number of bytes read into p.
-// The bytes are taken from at most one Read on the underlying Reader,
+// The bytes are taken from at most one Read on the underlying [Reader],
 // hence n may be less than len(p).
 // To read exactly len(p) bytes, use io.ReadFull(b, p).
-// If the underlying Reader can return a non-zero count with io.EOF,
+// If the underlying [Reader] can return a non-zero count with io.EOF,
 // then this Read method can do so as well; see the [io.Reader] docs.
 func (b *Reader) Read(p []byte) (n int, err error) {
 	n = len(p)
@@ -280,7 +277,7 @@
 // UnreadByte unreads the last byte. Only the most recently read byte can be unread.
 //
 // UnreadByte returns an error if the most recent method called on the
-// Reader was not a read operation. Notably, Peek, Discard, and WriteTo are not
+// [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not
 // considered read operations.
 func (b *Reader) UnreadByte() error {
 	if b.lastByte < 0 || b.r == 0 && b.w > 0 {
@@ -321,8 +318,8 @@
 }
 
 // UnreadRune unreads the last rune. If the most recent method called on
-// the Reader was not a ReadRune, UnreadRune returns an error. (In this
-// regard it is stricter than UnreadByte, which will unread the last byte
+// the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this
+// regard it is stricter than [Reader.UnreadByte], which will unread the last byte
 // from any read operation.)
 func (b *Reader) UnreadRune() error {
 	if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
@@ -342,10 +339,10 @@
 // The bytes stop being valid at the next read.
 // If ReadSlice encounters an error before finding a delimiter,
 // it returns all the data in the buffer and the error itself (often io.EOF).
-// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
+// ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim.
 // Because the data returned from ReadSlice will be overwritten
 // by the next I/O operation, most clients should use
-// ReadBytes or ReadString instead.
+// [Reader.ReadBytes] or ReadString instead.
 // ReadSlice returns err != nil if and only if line does not end in delim.
 func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
 	s := 0 // search start index
@@ -389,7 +386,7 @@
 }
 
 // ReadLine is a low-level line-reading primitive. Most callers should use
-// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
+// [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner].
 //
 // ReadLine tries to return a single line, not including the end-of-line bytes.
 // If the line was too long for the buffer then isPrefix is set and the
@@ -401,7 +398,7 @@
 //
 // The text returned from ReadLine does not include the line end ("\r\n" or "\n").
 // No indication or error is given if the input ends without a final line end.
-// Calling UnreadByte after ReadLine will always unread the last byte read
+// Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read
 // (possibly a character belonging to the line end) even if that byte is not
 // part of the line returned by ReadLine.
 func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
@@ -511,9 +508,9 @@
 }
 
 // WriteTo implements io.WriterTo.
-// This may make multiple calls to the Read method of the underlying Reader.
-// If the underlying reader supports the WriteTo method,
-// this calls the underlying WriteTo without buffering.
+// This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
+// If the underlying reader supports the [Reader.WriteTo] method,
+// this calls the underlying [Reader.WriteTo] without buffering.
 func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
 	b.lastByte = -1
 	b.lastRuneSize = -1
@@ -558,7 +555,7 @@
 
 var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
 
-// writeBuf writes the Reader's buffer to the writer.
+// writeBuf writes the [Reader]'s buffer to the writer.
 func (b *Reader) writeBuf(w io.Writer) (int64, error) {
 	n, err := w.Write(b.buf[b.r:b.w])
 	if n < 0 {
@@ -570,12 +567,12 @@
 
 // buffered output
 
-// Writer implements buffering for an io.Writer object.
-// If an error occurs writing to a Writer, no more data will be
-// accepted and all subsequent writes, and Flush, will return the error.
+// Writer implements buffering for an [io.Writer] object.
+// If an error occurs writing to a [Writer], no more data will be
+// accepted and all subsequent writes, and [Writer.Flush], will return the error.
 // After all data has been written, the client should call the
-// Flush method to guarantee all data has been forwarded to
-// the underlying io.Writer.
+// [Writer.Flush] method to guarantee all data has been forwarded to
+// the underlying [io.Writer].
 type Writer struct {
 	err error
 	buf []byte
@@ -583,9 +580,9 @@
 	wr  io.Writer
 }
 
-// NewWriterSize returns a new Writer whose buffer has at least the specified
-// size. If the argument io.Writer is already a Writer with large enough
-// size, it returns the underlying Writer.
+// NewWriterSize returns a new [Writer] whose buffer has at least the specified
+// size. If the argument io.Writer is already a [Writer] with large enough
+// size, it returns the underlying [Writer].
 func NewWriterSize(w io.Writer, size int) *Writer {
 	// Is it already a Writer?
 	b, ok := w.(*Writer)
@@ -601,9 +598,9 @@
 	}
 }
 
-// NewWriter returns a new Writer whose buffer has the default size.
-// If the argument io.Writer is already a Writer with large enough buffer size,
-// it returns the underlying Writer.
+// NewWriter returns a new [Writer] whose buffer has the default size.
+// If the argument io.Writer is already a [Writer] with large enough buffer size,
+// it returns the underlying [Writer].
 func NewWriter(w io.Writer) *Writer {
 	return NewWriterSize(w, defaultBufSize)
 }
@@ -613,9 +610,9 @@
 
 // Reset discards any unflushed buffered data, clears any error, and
 // resets b to write its output to w.
-// Calling Reset on the zero value of Writer initializes the internal buffer
+// Calling Reset on the zero value of [Writer] initializes the internal buffer
 // to the default size.
-// Calling w.Reset(w) (that is, resetting a Writer to itself) does nothing.
+// Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing.
 func (b *Writer) Reset(w io.Writer) {
 	// If a Writer w is passed to NewWriter, NewWriter will return w.
 	// Different layers of code may do that, and then later pass w
@@ -631,7 +628,7 @@
 	b.wr = w
 }
 
-// Flush writes any buffered data to the underlying io.Writer.
+// Flush writes any buffered data to the underlying [io.Writer].
 func (b *Writer) Flush() error {
 	if b.err != nil {
 		return b.err
@@ -660,7 +657,7 @@
 
 // AvailableBuffer returns an empty buffer with b.Available() capacity.
 // This buffer is intended to be appended to and
-// passed to an immediately succeeding Write call.
+// passed to an immediately succeeding [Writer.Write] call.
 // The buffer is only valid until the next write operation on b.
 func (b *Writer) AvailableBuffer() []byte {
 	return b.buf[b.n:][:0]
@@ -777,7 +774,7 @@
 	return nn, nil
 }
 
-// ReadFrom implements io.ReaderFrom. If the underlying writer
+// ReadFrom implements [io.ReaderFrom]. If the underlying writer
 // supports the ReadFrom method, this calls the underlying ReadFrom.
 // If there is buffered data and an underlying ReadFrom, this fills
 // the buffer and writes it before calling ReadFrom.
@@ -829,14 +826,14 @@
 
 // buffered input and output
 
-// ReadWriter stores pointers to a Reader and a Writer.
-// It implements io.ReadWriter.
+// ReadWriter stores pointers to a [Reader] and a [Writer].
+// It implements [io.ReadWriter].
 type ReadWriter struct {
 	*Reader
 	*Writer
 }
 
-// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
+// NewReadWriter allocates a new [ReadWriter] that dispatches to r and w.
 func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
 	return &ReadWriter{r, w}
 }
diff --git a/src/bufio/example_test.go b/src/bufio/example_test.go
index a864d11..6d219ae 100644
--- a/src/bufio/example_test.go
+++ b/src/bufio/example_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"bufio"
+	"bytes"
 	"fmt"
 	"os"
 	"strconv"
@@ -137,3 +138,36 @@
 	}
 	// Output: "1" "2" "3" "4" ""
 }
+
+// Use a Scanner with a custom split function to parse a comma-separated
+// list with an empty final value but stops at the token "STOP".
+func ExampleScanner_earlyStop() {
+	onComma := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
+		i := bytes.IndexByte(data, ',')
+		if i == -1 {
+			if !atEOF {
+				return 0, nil, nil
+			}
+			// If we have reached the end, return the last token.
+			return 0, data, bufio.ErrFinalToken
+		}
+		// If the token is "STOP", stop the scanning and ignore the rest.
+		if string(data[:i]) == "STOP" {
+			return i + 1, nil, bufio.ErrFinalToken
+		}
+		// Otherwise, return the token before the comma.
+		return i + 1, data[:i], nil
+	}
+	const input = "1,2,STOP,4,"
+	scanner := bufio.NewScanner(strings.NewReader(input))
+	scanner.Split(onComma)
+	for scanner.Scan() {
+		fmt.Printf("Got a token %q\n", scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		fmt.Fprintln(os.Stderr, "reading input:", err)
+	}
+	// Output:
+	// Got a token "1"
+	// Got a token "2"
+}
diff --git a/src/bufio/scan.go b/src/bufio/scan.go
index e247cbc..a26b2ff 100644
--- a/src/bufio/scan.go
+++ b/src/bufio/scan.go
@@ -13,19 +13,19 @@
 
 // Scanner provides a convenient interface for reading data such as
 // a file of newline-delimited lines of text. Successive calls to
-// the Scan method will step through the 'tokens' of a file, skipping
+// the [Scanner.Scan] method will step through the 'tokens' of a file, skipping
 // the bytes between the tokens. The specification of a token is
-// defined by a split function of type SplitFunc; the default split
-// function breaks the input into lines with line termination stripped. Split
+// defined by a split function of type [SplitFunc]; the default split
+// function breaks the input into lines with line termination stripped. [Scanner.Split]
 // functions are defined in this package for scanning a file into
 // lines, bytes, UTF-8-encoded runes, and space-delimited words. The
 // client may instead provide a custom split function.
 //
 // Scanning stops unrecoverably at EOF, the first I/O error, or a token too
-// large to fit in the buffer. When a scan stops, the reader may have
+// large to fit in the [Scanner.Buffer]. When a scan stops, the reader may have
 // advanced arbitrarily far past the last token. Programs that need more
 // control over error handling or large tokens, or must run sequential scans
-// on a reader, should use bufio.Reader instead.
+// on a reader, should use [bufio.Reader] instead.
 type Scanner struct {
 	r            io.Reader // The reader provided by the client.
 	split        SplitFunc // The function to split the tokens.
@@ -42,21 +42,23 @@
 
 // SplitFunc is the signature of the split function used to tokenize the
 // input. The arguments are an initial substring of the remaining unprocessed
-// data and a flag, atEOF, that reports whether the Reader has no more data
+// data and a flag, atEOF, that reports whether the [Reader] has no more data
 // to give. The return values are the number of bytes to advance the input
 // and the next token to return to the user, if any, plus an error, if any.
 //
 // Scanning stops if the function returns an error, in which case some of
-// the input may be discarded. If that error is ErrFinalToken, scanning
-// stops with no error.
+// the input may be discarded. If that error is [ErrFinalToken], scanning
+// stops with no error. A non-nil token delivered with [ErrFinalToken]
+// will be the last token, and a nil token with [ErrFinalToken]
+// immediately stops the scanning.
 //
-// Otherwise, the Scanner advances the input. If the token is not nil,
-// the Scanner returns it to the user. If the token is nil, the
+// Otherwise, the [Scanner] advances the input. If the token is not nil,
+// the [Scanner] returns it to the user. If the token is nil, the
 // Scanner reads more data and continues scanning; if there is no more
-// data--if atEOF was true--the Scanner returns. If the data does not
+// data--if atEOF was true--the [Scanner] returns. If the data does not
 // yet hold a complete token, for instance if it has no newline while
-// scanning lines, a SplitFunc can return (0, nil, nil) to signal the
-// Scanner to read more data into the slice and try again with a
+// scanning lines, a [SplitFunc] can return (0, nil, nil) to signal the
+// [Scanner] to read more data into the slice and try again with a
 // longer slice starting at the same point in the input.
 //
 // The function is never called with an empty data slice unless atEOF
@@ -74,7 +76,7 @@
 
 const (
 	// MaxScanTokenSize is the maximum size used to buffer a token
-	// unless the user provides an explicit buffer with Scanner.Buffer.
+	// unless the user provides an explicit buffer with [Scanner.Buffer].
 	// The actual maximum token size may be smaller as the buffer
 	// may need to include, for instance, a newline.
 	MaxScanTokenSize = 64 * 1024
@@ -82,8 +84,8 @@
 	startBufSize = 4096 // Size of initial allocation for buffer.
 )
 
-// NewScanner returns a new Scanner to read from r.
-// The split function defaults to ScanLines.
+// NewScanner returns a new [Scanner] to read from r.
+// The split function defaults to [ScanLines].
 func NewScanner(r io.Reader) *Scanner {
 	return &Scanner{
 		r:            r,
@@ -92,7 +94,7 @@
 	}
 }
 
-// Err returns the first non-EOF error that was encountered by the Scanner.
+// Err returns the first non-EOF error that was encountered by the [Scanner].
 func (s *Scanner) Err() error {
 	if s.err == io.EOF {
 		return nil
@@ -100,34 +102,36 @@
 	return s.err
 }
 
-// Bytes returns the most recent token generated by a call to Scan.
+// Bytes returns the most recent token generated by a call to [Scanner.Scan].
 // The underlying array may point to data that will be overwritten
 // by a subsequent call to Scan. It does no allocation.
 func (s *Scanner) Bytes() []byte {
 	return s.token
 }
 
-// Text returns the most recent token generated by a call to Scan
+// Text returns the most recent token generated by a call to [Scanner.Scan]
 // as a newly allocated string holding its bytes.
 func (s *Scanner) Text() string {
 	return string(s.token)
 }
 
 // ErrFinalToken is a special sentinel error value. It is intended to be
-// returned by a Split function to indicate that the token being delivered
-// with the error is the last token and scanning should stop after this one.
-// After ErrFinalToken is received by Scan, scanning stops with no error.
+// returned by a Split function to indicate that the scanning should stop
+// with no error. If the token being delivered with this error is not nil,
+// the token is the last token.
+//
 // The value is useful to stop processing early or when it is necessary to
-// deliver a final empty token. One could achieve the same behavior
-// with a custom error value but providing one here is tidier.
+// deliver a final empty token (which is different from a nil token).
+// One could achieve the same behavior with a custom error value but
+// providing one here is tidier.
 // See the emptyFinalToken example for a use of this value.
 var ErrFinalToken = errors.New("final token")
 
-// Scan advances the Scanner to the next token, which will then be
-// available through the Bytes or Text method. It returns false when the
-// scan stops, either by reaching the end of the input or an error.
-// After Scan returns false, the Err method will return any error that
-// occurred during scanning, except that if it was io.EOF, Err
+// Scan advances the [Scanner] to the next token, which will then be
+// available through the [Scanner.Bytes] or [Scanner.Text] method. It returns false when
+// there are no more tokens, either by reaching the end of the input or an error.
+// After Scan returns false, the [Scanner.Err] method will return any error that
+// occurred during scanning, except that if it was [io.EOF], [Scanner.Err]
 // will return nil.
 // Scan panics if the split function returns too many empty
 // tokens without advancing the input. This is a common error mode for
@@ -148,7 +152,10 @@
 				if err == ErrFinalToken {
 					s.token = token
 					s.done = true
-					return true
+					// When token is not nil, it means the scanning stops
+					// with a trailing token, and thus the return value
+					// should be true to indicate the existence of the token.
+					return token != nil
 				}
 				s.setErr(err)
 				return false
@@ -198,9 +205,7 @@
 			if newSize == 0 {
 				newSize = startBufSize
 			}
-			if newSize > s.maxTokenSize {
-				newSize = s.maxTokenSize
-			}
+			newSize = min(newSize, s.maxTokenSize)
 			newBuf := make([]byte, newSize)
 			copy(newBuf, s.buf[s.start:s.end])
 			s.buf = newBuf
@@ -255,13 +260,13 @@
 	}
 }
 
-// Buffer sets the initial buffer to use when scanning and the maximum
-// size of buffer that may be allocated during scanning. The maximum
-// token size is the larger of max and cap(buf). If max <= cap(buf),
-// Scan will use this buffer only and do no allocation.
+// Buffer sets the initial buffer to use when scanning
+// and the maximum size of buffer that may be allocated during scanning.
+// The maximum token size must be less than the larger of max and cap(buf).
+// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
 //
-// By default, Scan uses an internal buffer and sets the
-// maximum token size to MaxScanTokenSize.
+// By default, [Scanner.Scan] uses an internal buffer and sets the
+// maximum token size to [MaxScanTokenSize].
 //
 // Buffer panics if it is called after scanning has started.
 func (s *Scanner) Buffer(buf []byte, max int) {
@@ -272,8 +277,8 @@
 	s.maxTokenSize = max
 }
 
-// Split sets the split function for the Scanner.
-// The default split function is ScanLines.
+// Split sets the split function for the [Scanner].
+// The default split function is [ScanLines].
 //
 // Split panics if it is called after scanning has started.
 func (s *Scanner) Split(split SplitFunc) {
@@ -285,7 +290,7 @@
 
 // Split functions
 
-// ScanBytes is a split function for a Scanner that returns each byte as a token.
+// ScanBytes is a split function for a [Scanner] that returns each byte as a token.
 func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
 	if atEOF && len(data) == 0 {
 		return 0, nil, nil
@@ -295,7 +300,7 @@
 
 var errorRune = []byte(string(utf8.RuneError))
 
-// ScanRunes is a split function for a Scanner that returns each
+// ScanRunes is a split function for a [Scanner] that returns each
 // UTF-8-encoded rune as a token. The sequence of runes returned is
 // equivalent to that from a range loop over the input as a string, which
 // means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
@@ -341,7 +346,7 @@
 	return data
 }
 
-// ScanLines is a split function for a Scanner that returns each line of
+// ScanLines is a split function for a [Scanner] that returns each line of
 // text, stripped of any trailing end-of-line marker. The returned line may
 // be empty. The end-of-line marker is one optional carriage return followed
 // by one mandatory newline. In regular expression notation, it is `\r?\n`.
@@ -388,7 +393,7 @@
 	return false
 }
 
-// ScanWords is a split function for a Scanner that returns each
+// ScanWords is a split function for a [Scanner] that returns each
 // space-separated word of text, with surrounding spaces deleted. It will
 // never return an empty string. The definition of space is set by
 // unicode.IsSpace.
diff --git a/src/bufio/scan_test.go b/src/bufio/scan_test.go
index e99b09f..6b64f7b 100644
--- a/src/bufio/scan_test.go
+++ b/src/bufio/scan_test.go
@@ -68,7 +68,7 @@
 		var i, runeCount int
 		var expect rune
 		// Use a string range loop to validate the sequence of runes.
-		for i, expect = range string(test) {
+		for i, expect = range test {
 			if !s.Scan() {
 				break
 			}
diff --git a/src/buildall.bash b/src/buildall.bash
index 4e9b15b..3b8f6ee 100755
--- a/src/buildall.bash
+++ b/src/buildall.bash
@@ -6,8 +6,12 @@
 # Usage: buildall.bash [-e] [pattern]
 #
 # buildall.bash builds the standard library for all Go-supported
-# architectures. It is used by the "misc-compile" trybot builders,
-# as a smoke test to quickly flag portability issues.
+# architectures.
+#
+# Originally the Go build system used it as a smoke test to quickly
+# flag portability issues in builders named "misc-compile" or "all-compile".
+# As of CL 464955, the build system uses make.bash -compile-only instead,
+# so this script no longer runs in any automated fashion.
 #
 # Options:
 #   -e: stop at first failure
diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go
index da0ace1..668c799 100644
--- a/src/builtin/builtin.go
+++ b/src/builtin/builtin.go
@@ -284,9 +284,10 @@
 // by restoring normal execution and retrieves the error value passed to the
 // call of panic. If recover is called outside the deferred function it will
 // not stop a panicking sequence. In this case, or when the goroutine is not
-// panicking, or if the argument supplied to panic was nil, recover returns
-// nil. Thus the return value from recover reports whether the goroutine is
-// panicking.
+// panicking, recover returns nil.
+//
+// Prior to Go 1.21, recover would also return nil if panic is called with
+// a nil argument. See [panic] for details.
 func recover() any
 
 // The print built-in function formats its arguments in an
diff --git a/src/bytes/boundary_test.go b/src/bytes/boundary_test.go
index f9855fc..67f377e 100644
--- a/src/bytes/boundary_test.go
+++ b/src/bytes/boundary_test.go
@@ -98,3 +98,18 @@
 	}
 	q[len(q)-1] = 0
 }
+
+func TestCountNearPageBoundary(t *testing.T) {
+	t.Parallel()
+	b := dangerousSlice(t)
+	for i := range b {
+		c := Count(b[i:], []byte{1})
+		if c != 0 {
+			t.Fatalf("Count(b[%d:], {1})=%d, want 0\n", i, c)
+		}
+		c = Count(b[:i], []byte{0})
+		if c != i {
+			t.Fatalf("Count(b[:%d], {0})=%d, want %d\n", i, c, i)
+		}
+	}
+}
diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go
index 5a68188..ba844ba 100644
--- a/src/bytes/buffer.go
+++ b/src/bytes/buffer.go
@@ -15,7 +15,7 @@
 // smallBufferSize is an initial allocation minimal capacity.
 const smallBufferSize = 64
 
-// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// A Buffer is a variable-sized buffer of bytes with [Buffer.Read] and [Buffer.Write] methods.
 // The zero value for Buffer is an empty buffer ready to use.
 type Buffer struct {
 	buf      []byte // contents are the bytes buf[off : len(buf)]
@@ -48,19 +48,19 @@
 
 // Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
 // The slice is valid for use only until the next buffer modification (that is,
-// only until the next call to a method like Read, Write, Reset, or Truncate).
+// only until the next call to a method like [Buffer.Read], [Buffer.Write], [Buffer.Reset], or [Buffer.Truncate]).
 // The slice aliases the buffer content at least until the next buffer modification,
 // so immediate changes to the slice will affect the result of future reads.
 func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
 
 // AvailableBuffer returns an empty buffer with b.Available() capacity.
 // This buffer is intended to be appended to and
-// passed to an immediately succeeding Write call.
+// passed to an immediately succeeding [Buffer.Write] call.
 // The buffer is only valid until the next write operation on b.
 func (b *Buffer) AvailableBuffer() []byte { return b.buf[len(b.buf):] }
 
 // String returns the contents of the unread portion of the buffer
-// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+// as a string. If the [Buffer] is a nil pointer, it returns "<nil>".
 //
 // To build strings more efficiently, see the strings.Builder type.
 func (b *Buffer) String() string {
@@ -102,7 +102,7 @@
 
 // Reset resets the buffer to be empty,
 // but it retains the underlying storage for use by future writes.
-// Reset is the same as Truncate(0).
+// Reset is the same as [Buffer.Truncate](0).
 func (b *Buffer) Reset() {
 	b.buf = b.buf[:0]
 	b.off = 0
@@ -160,7 +160,7 @@
 // another n bytes. After Grow(n), at least n bytes can be written to the
 // buffer without another allocation.
 // If n is negative, Grow will panic.
-// If the buffer can't grow it will panic with ErrTooLarge.
+// If the buffer can't grow it will panic with [ErrTooLarge].
 func (b *Buffer) Grow(n int) {
 	if n < 0 {
 		panic("bytes.Buffer.Grow: negative count")
@@ -171,7 +171,7 @@
 
 // Write appends the contents of p to the buffer, growing the buffer as
 // needed. The return value n is the length of p; err is always nil. If the
-// buffer becomes too large, Write will panic with ErrTooLarge.
+// buffer becomes too large, Write will panic with [ErrTooLarge].
 func (b *Buffer) Write(p []byte) (n int, err error) {
 	b.lastRead = opInvalid
 	m, ok := b.tryGrowByReslice(len(p))
@@ -183,7 +183,7 @@
 
 // WriteString appends the contents of s to the buffer, growing the buffer as
 // needed. The return value n is the length of s; err is always nil. If the
-// buffer becomes too large, WriteString will panic with ErrTooLarge.
+// buffer becomes too large, WriteString will panic with [ErrTooLarge].
 func (b *Buffer) WriteString(s string) (n int, err error) {
 	b.lastRead = opInvalid
 	m, ok := b.tryGrowByReslice(len(s))
@@ -194,7 +194,7 @@
 }
 
 // MinRead is the minimum slice size passed to a Read call by
-// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// [Buffer.ReadFrom]. As long as the [Buffer] has at least MinRead bytes beyond
 // what is required to hold the contents of r, ReadFrom will not grow the
 // underlying buffer.
 const MinRead = 512
@@ -202,7 +202,7 @@
 // ReadFrom reads data from r until EOF and appends it to the buffer, growing
 // the buffer as needed. The return value n is the number of bytes read. Any
 // error except io.EOF encountered during the read is also returned. If the
-// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+// buffer becomes too large, ReadFrom will panic with [ErrTooLarge].
 func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
 	b.lastRead = opInvalid
 	for {
@@ -279,9 +279,9 @@
 }
 
 // WriteByte appends the byte c to the buffer, growing the buffer as needed.
-// The returned error is always nil, but is included to match bufio.Writer's
+// The returned error is always nil, but is included to match [bufio.Writer]'s
 // WriteByte. If the buffer becomes too large, WriteByte will panic with
-// ErrTooLarge.
+// [ErrTooLarge].
 func (b *Buffer) WriteByte(c byte) error {
 	b.lastRead = opInvalid
 	m, ok := b.tryGrowByReslice(1)
@@ -294,8 +294,8 @@
 
 // WriteRune appends the UTF-8 encoding of Unicode code point r to the
 // buffer, returning its length and an error, which is always nil but is
-// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
-// if it becomes too large, WriteRune will panic with ErrTooLarge.
+// included to match [bufio.Writer]'s WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with [ErrTooLarge].
 func (b *Buffer) WriteRune(r rune) (n int, err error) {
 	// Compare as uint32 to correctly handle negative runes.
 	if uint32(r) < utf8.RuneSelf {
@@ -334,7 +334,7 @@
 }
 
 // Next returns a slice containing the next n bytes from the buffer,
-// advancing the buffer as if the bytes had been returned by Read.
+// advancing the buffer as if the bytes had been returned by [Buffer.Read].
 // If there are fewer than n bytes in the buffer, Next returns the entire buffer.
 // The slice is only valid until the next call to a read or write method.
 func (b *Buffer) Next(n int) []byte {
@@ -388,10 +388,10 @@
 	return r, n, nil
 }
 
-// UnreadRune unreads the last rune returned by ReadRune.
+// UnreadRune unreads the last rune returned by [Buffer.ReadRune].
 // If the most recent read or write operation on the buffer was
-// not a successful ReadRune, UnreadRune returns an error.  (In this regard
-// it is stricter than UnreadByte, which will unread the last byte
+// not a successful [Buffer.ReadRune], UnreadRune returns an error.  (In this regard
+// it is stricter than [Buffer.UnreadByte], which will unread the last byte
 // from any read operation.)
 func (b *Buffer) UnreadRune() error {
 	if b.lastRead <= opInvalid {
@@ -460,23 +460,23 @@
 	return string(slice), err
 }
 
-// NewBuffer creates and initializes a new Buffer using buf as its
-// initial contents. The new Buffer takes ownership of buf, and the
+// NewBuffer creates and initializes a new [Buffer] using buf as its
+// initial contents. The new [Buffer] takes ownership of buf, and the
 // caller should not use buf after this call. NewBuffer is intended to
-// prepare a Buffer to read existing data. It can also be used to set
+// prepare a [Buffer] to read existing data. It can also be used to set
 // the initial size of the internal buffer for writing. To do that,
 // buf should have the desired capacity but a length of zero.
 //
-// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// sufficient to initialize a Buffer.
+// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
+// sufficient to initialize a [Buffer].
 func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
 
-// NewBufferString creates and initializes a new Buffer using string s as its
+// NewBufferString creates and initializes a new [Buffer] using string s as its
 // initial contents. It is intended to prepare a buffer to read an existing
 // string.
 //
-// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// sufficient to initialize a Buffer.
+// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
+// sufficient to initialize a [Buffer].
 func NewBufferString(s string) *Buffer {
 	return &Buffer{buf: []byte(s)}
 }
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index c54e52e..0679b43 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -112,7 +112,7 @@
 	case n == 0:
 		return len(s)
 	case n == 1:
-		return LastIndexByte(s, sep[0])
+		return bytealg.LastIndexByte(s, sep[0])
 	case n == len(s):
 		if Equal(s, sep) {
 			return 0
@@ -121,35 +121,12 @@
 	case n > len(s):
 		return -1
 	}
-	// Rabin-Karp search from the end of the string
-	hashss, pow := bytealg.HashStrRevBytes(sep)
-	last := len(s) - n
-	var h uint32
-	for i := len(s) - 1; i >= last; i-- {
-		h = h*bytealg.PrimeRK + uint32(s[i])
-	}
-	if h == hashss && Equal(s[last:], sep) {
-		return last
-	}
-	for i := last - 1; i >= 0; i-- {
-		h *= bytealg.PrimeRK
-		h += uint32(s[i])
-		h -= pow * uint32(s[i+n])
-		if h == hashss && Equal(s[i:i+n], sep) {
-			return i
-		}
-	}
-	return -1
+	return bytealg.LastIndexRabinKarp(s, sep)
 }
 
 // LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
 func LastIndexByte(s []byte, c byte) int {
-	for i := len(s) - 1; i >= 0; i-- {
-		if s[i] == c {
-			return i
-		}
-	}
-	return -1
+	return bytealg.LastIndexByte(s, c)
 }
 
 // IndexRune interprets s as a sequence of UTF-8-encoded code points.
@@ -557,12 +534,12 @@
 	return b
 }
 
-// HasPrefix tests whether the byte slice s begins with prefix.
+// HasPrefix reports whether the byte slice s begins with prefix.
 func HasPrefix(s, prefix []byte) bool {
 	return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
 }
 
-// HasSuffix tests whether the byte slice s ends with suffix.
+// HasSuffix reports whether the byte slice s ends with suffix.
 func HasSuffix(s, suffix []byte) bool {
 	return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
 }
@@ -1336,7 +1313,7 @@
 			// we should cutover at even larger average skips,
 			// because Equal becomes that much more expensive.
 			// This code does not take that effect into account.
-			j := bytealg.IndexRabinKarpBytes(s[i:], sep)
+			j := bytealg.IndexRabinKarp(s[i:], sep)
 			if j < 0 {
 				return -1
 			}
diff --git a/src/bytes/example_test.go b/src/bytes/example_test.go
index 41a5e2e..5a66b1e 100644
--- a/src/bytes/example_test.go
+++ b/src/bytes/example_test.go
@@ -81,9 +81,9 @@
 	var b bytes.Buffer
 	b.Grow(64)
 	b.Write([]byte("abcde"))
-	fmt.Printf("%s\n", string(b.Next(2)))
-	fmt.Printf("%s\n", string(b.Next(2)))
-	fmt.Printf("%s", string(b.Next(2)))
+	fmt.Printf("%s\n", b.Next(2))
+	fmt.Printf("%s\n", b.Next(2))
+	fmt.Printf("%s", b.Next(2))
 	// Output:
 	// ab
 	// cd
@@ -212,6 +212,17 @@
 	// false
 }
 
+func ExampleContainsFunc() {
+	f := func(r rune) bool {
+		return r >= 'a' && r <= 'z'
+	}
+	fmt.Println(bytes.ContainsFunc([]byte("HELLO"), f))
+	fmt.Println(bytes.ContainsFunc([]byte("World"), f))
+	// Output:
+	// false
+	// true
+}
+
 func ExampleCount() {
 	fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
 	fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
diff --git a/src/bytes/reader.go b/src/bytes/reader.go
index 81c22aa..9ef4901 100644
--- a/src/bytes/reader.go
+++ b/src/bytes/reader.go
@@ -13,7 +13,7 @@
 // A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
 // io.ByteScanner, and io.RuneScanner interfaces by reading from
 // a byte slice.
-// Unlike a Buffer, a Reader is read-only and supports seeking.
+// Unlike a [Buffer], a Reader is read-only and supports seeking.
 // The zero value for Reader operates like a Reader of an empty slice.
 type Reader struct {
 	s        []byte
@@ -31,11 +31,11 @@
 }
 
 // Size returns the original length of the underlying byte slice.
-// Size is the number of bytes available for reading via ReadAt.
-// The result is unaffected by any method calls except Reset.
+// Size is the number of bytes available for reading via [Reader.ReadAt].
+// The result is unaffected by any method calls except [Reader.Reset].
 func (r *Reader) Size() int64 { return int64(len(r.s)) }
 
-// Read implements the io.Reader interface.
+// Read implements the [io.Reader] interface.
 func (r *Reader) Read(b []byte) (n int, err error) {
 	if r.i >= int64(len(r.s)) {
 		return 0, io.EOF
@@ -46,7 +46,7 @@
 	return
 }
 
-// ReadAt implements the io.ReaderAt interface.
+// ReadAt implements the [io.ReaderAt] interface.
 func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
 	// cannot modify state - see io.ReaderAt
 	if off < 0 {
@@ -62,7 +62,7 @@
 	return
 }
 
-// ReadByte implements the io.ByteReader interface.
+// ReadByte implements the [io.ByteReader] interface.
 func (r *Reader) ReadByte() (byte, error) {
 	r.prevRune = -1
 	if r.i >= int64(len(r.s)) {
@@ -73,7 +73,7 @@
 	return b, nil
 }
 
-// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
+// UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
 func (r *Reader) UnreadByte() error {
 	if r.i <= 0 {
 		return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
@@ -83,7 +83,7 @@
 	return nil
 }
 
-// ReadRune implements the io.RuneReader interface.
+// ReadRune implements the [io.RuneReader] interface.
 func (r *Reader) ReadRune() (ch rune, size int, err error) {
 	if r.i >= int64(len(r.s)) {
 		r.prevRune = -1
@@ -99,7 +99,7 @@
 	return
 }
 
-// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
+// UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
 func (r *Reader) UnreadRune() error {
 	if r.i <= 0 {
 		return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
@@ -112,7 +112,7 @@
 	return nil
 }
 
-// Seek implements the io.Seeker interface.
+// Seek implements the [io.Seeker] interface.
 func (r *Reader) Seek(offset int64, whence int) (int64, error) {
 	r.prevRune = -1
 	var abs int64
@@ -133,7 +133,7 @@
 	return abs, nil
 }
 
-// WriteTo implements the io.WriterTo interface.
+// WriteTo implements the [io.WriterTo] interface.
 func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
 	r.prevRune = -1
 	if r.i >= int64(len(r.s)) {
@@ -152,8 +152,8 @@
 	return
 }
 
-// Reset resets the Reader to be reading from b.
+// Reset resets the [Reader.Reader] to be reading from b.
 func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
 
-// NewReader returns a new Reader reading from b.
+// NewReader returns a new [Reader.Reader] reading from b.
 func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }
diff --git a/src/cmd/api/api_test.go b/src/cmd/api/api_test.go
index 910e046..ba358d3 100644
--- a/src/cmd/api/api_test.go
+++ b/src/cmd/api/api_test.go
@@ -285,6 +285,25 @@
 	}
 }
 
+func TestIssue64958(t *testing.T) {
+	defer func() {
+		if x := recover(); x != nil {
+			t.Errorf("expected no panic; recovered %v", x)
+		}
+	}()
+
+	testenv.MustHaveGoBuild(t)
+
+	for _, context := range contexts {
+		w := NewWalker(context, "testdata/src/issue64958")
+		pkg, err := w.importFrom("p", "", 0)
+		if err != nil {
+			t.Errorf("expected no error importing; got %T", err)
+		}
+		w.export(pkg)
+	}
+}
+
 func TestCheck(t *testing.T) {
 	if !*flagCheck {
 		t.Skip("-check not specified")
diff --git a/src/cmd/api/main_test.go b/src/cmd/api/main_test.go
index f3201e9..7985055 100644
--- a/src/cmd/api/main_test.go
+++ b/src/cmd/api/main_test.go
@@ -106,7 +106,7 @@
 	}
 
 	var nextFiles []string
-	if strings.Contains(runtime.Version(), "devel") {
+	if v := runtime.Version(); strings.Contains(v, "devel") || strings.Contains(v, "beta") {
 		next, err := filepath.Glob(filepath.Join(testenv.GOROOT(t), "api/next/*.txt"))
 		if err != nil {
 			t.Fatal(err)
@@ -490,7 +490,8 @@
 		if w.context.Dir != "" {
 			cmd.Dir = w.context.Dir
 		}
-		out, err := cmd.CombinedOutput()
+		cmd.Stderr = os.Stderr
+		out, err := cmd.Output()
 		if err != nil {
 			log.Fatalf("loading imports: %v\n%s", err, out)
 		}
@@ -956,17 +957,17 @@
 	if w.isDeprecated(obj) {
 		w.emitf("type %s //deprecated", name)
 	}
+	typ := obj.Type()
+	if obj.IsAlias() {
+		w.emitf("type %s = %s", name, w.typeString(typ))
+		return
+	}
 	if tparams := obj.Type().(*types.Named).TypeParams(); tparams != nil {
 		var buf bytes.Buffer
 		buf.WriteString(name)
 		w.writeTypeParams(&buf, tparams, true)
 		name = buf.String()
 	}
-	typ := obj.Type()
-	if obj.IsAlias() {
-		w.emitf("type %s = %s", name, w.typeString(typ))
-		return
-	}
 	switch typ := typ.Underlying().(type) {
 	case *types.Struct:
 		w.emitStructType(name, typ)
diff --git a/src/cmd/api/testdata/src/issue64958/p/p.go b/src/cmd/api/testdata/src/issue64958/p/p.go
new file mode 100644
index 0000000..feba867
--- /dev/null
+++ b/src/cmd/api/testdata/src/issue64958/p/p.go
@@ -0,0 +1,3 @@
+package p
+
+type BasicAlias = uint8
diff --git a/src/cmd/asm/doc.go b/src/cmd/asm/doc.go
index bb9166b..179ac14 100644
--- a/src/cmd/asm/doc.go
+++ b/src/cmd/asm/doc.go
@@ -33,6 +33,8 @@
 		Dump instructions as they are parsed.
 	-dynlink
 		Support references to Go symbols defined in other shared libraries.
+	-e
+		No limit on number of errors reported.
 	-gensymabis
 		Write symbol ABI information to output file. Don't assemble.
 	-o file
@@ -45,6 +47,8 @@
 		Enable spectre mitigations in list (all, ret).
 	-trimpath prefix
 		Remove prefix from recorded source file paths.
+	-v
+		Print debug output.
 
 Input language:
 
diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go
index 563e794..375ef80 100644
--- a/src/cmd/asm/internal/asm/asm.go
+++ b/src/cmd/asm/internal/asm/asm.go
@@ -445,7 +445,7 @@
 			//   BC x,CR0EQ,...
 			//   BC x,CR1LT,...
 			//   BC x,CR1GT,...
-			// The first and second case demonstrate a symbol name which is
+			// The first and second cases demonstrate a symbol name which is
 			// effectively discarded. In these cases, the offset determines
 			// the CR bit.
 			prog.Reg = a[1].Reg
@@ -909,7 +909,7 @@
 	p.append(prog, cond, true)
 }
 
-// symbolName returns the symbol name, or an error string if none if available.
+// symbolName returns the symbol name, or an error string if none is available.
 func symbolName(addr *obj.Addr) string {
 	if addr.Sym != nil {
 		return addr.Sym.Name
diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go
index ef41667..6e1aa1c 100644
--- a/src/cmd/asm/internal/asm/endtoend_test.go
+++ b/src/cmd/asm/internal/asm/endtoend_test.go
@@ -30,7 +30,7 @@
 	architecture, ctxt := setArch(goarch)
 	architecture.Init(ctxt)
 	lexer := lex.NewLexer(input)
-	parser := NewParser(ctxt, architecture, lexer, false)
+	parser := NewParser(ctxt, architecture, lexer)
 	pList := new(obj.Plist)
 	var ok bool
 	testOut = new(strings.Builder) // The assembler writes test output to this buffer.
@@ -68,6 +68,11 @@
 			continue
 		}
 
+		// Ignore GLOBL.
+		if strings.HasPrefix(line, "GLOBL ") {
+			continue
+		}
+
 		// The general form of a test input line is:
 		//	// comment
 		//	INST args [// printed form] [// hex encoding]
@@ -136,11 +141,17 @@
 		// Turn relative (PC) into absolute (PC) automatically,
 		// so that most branch instructions don't need comments
 		// giving the absolute form.
-		if len(f) > 0 && strings.HasSuffix(printed, "(PC)") {
-			last := f[len(f)-1]
-			n, err := strconv.Atoi(last[:len(last)-len("(PC)")])
+		if len(f) > 0 && strings.Contains(printed, "(PC)") {
+			index := len(f) - 1
+			suf := "(PC)"
+			for !strings.HasSuffix(f[index], suf) {
+				index--
+				suf = "(PC),"
+			}
+			str := f[index]
+			n, err := strconv.Atoi(str[:len(str)-len(suf)])
 			if err == nil {
-				f[len(f)-1] = fmt.Sprintf("%d(PC)", seq+n)
+				f[index] = fmt.Sprintf("%d%s", seq+n, suf)
 			}
 		}
 
@@ -186,7 +197,7 @@
 		t.Errorf(format, args...)
 		ok = false
 	}
-	obj.Flushplist(ctxt, pList, nil, "")
+	obj.Flushplist(ctxt, pList, nil)
 
 	for p := top; p != nil; p = p.Link {
 		if p.As == obj.ATEXT {
@@ -272,8 +283,9 @@
 func testErrors(t *testing.T, goarch, file string, flags ...string) {
 	input := filepath.Join("testdata", file+".s")
 	architecture, ctxt := setArch(goarch)
+	architecture.Init(ctxt)
 	lexer := lex.NewLexer(input)
-	parser := NewParser(ctxt, architecture, lexer, false)
+	parser := NewParser(ctxt, architecture, lexer)
 	pList := new(obj.Plist)
 	var ok bool
 	ctxt.Bso = bufio.NewWriter(os.Stdout)
@@ -299,7 +311,7 @@
 		}
 	}
 	pList.Firstpc, ok = parser.Parse()
-	obj.Flushplist(ctxt, pList, nil, "")
+	obj.Flushplist(ctxt, pList, nil)
 	if ok && !failed {
 		t.Errorf("asm: %s had no errors", file)
 	}
@@ -366,10 +378,10 @@
 }
 
 func TestARMEndToEnd(t *testing.T) {
-	defer func(old int) { buildcfg.GOARM = old }(buildcfg.GOARM)
+	defer func(old int) { buildcfg.GOARM.Version = old }(buildcfg.GOARM.Version)
 	for _, goarm := range []int{5, 6, 7} {
 		t.Logf("GOARM=%d", goarm)
-		buildcfg.GOARM = goarm
+		buildcfg.GOARM.Version = goarm
 		testEndToEnd(t, "arm", "arm")
 		if goarm == 6 {
 			testEndToEnd(t, "arm", "armv6")
diff --git a/src/cmd/asm/internal/asm/expr_test.go b/src/cmd/asm/internal/asm/expr_test.go
index e9c92df..1251594 100644
--- a/src/cmd/asm/internal/asm/expr_test.go
+++ b/src/cmd/asm/internal/asm/expr_test.go
@@ -57,7 +57,7 @@
 }
 
 func TestExpr(t *testing.T) {
-	p := NewParser(nil, nil, nil, false) // Expression evaluation uses none of these fields of the parser.
+	p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
 	for i, test := range exprTests {
 		p.start(lex.Tokenize(test.input))
 		result := int64(p.expr())
@@ -113,7 +113,7 @@
 }
 
 func runBadTest(i int, test badExprTest, t *testing.T) (err error) {
-	p := NewParser(nil, nil, nil, false) // Expression evaluation uses none of these fields of the parser.
+	p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
 	p.start(lex.Tokenize(test.input))
 	return tryParse(t, func() {
 		p.expr()
diff --git a/src/cmd/asm/internal/asm/line_test.go b/src/cmd/asm/internal/asm/line_test.go
index da857ce..01b058b 100644
--- a/src/cmd/asm/internal/asm/line_test.go
+++ b/src/cmd/asm/internal/asm/line_test.go
@@ -39,7 +39,7 @@
 	for i, test := range tests {
 		arch, ctxt := setArch(goarch)
 		tokenizer := lex.NewTokenizer("", strings.NewReader(test.input+"\n"), nil)
-		parser := NewParser(ctxt, arch, tokenizer, false)
+		parser := NewParser(ctxt, arch, tokenizer)
 
 		err := tryParse(t, func() {
 			parser.Parse()
diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go
index c7e251f..579f533 100644
--- a/src/cmd/asm/internal/asm/operand_test.go
+++ b/src/cmd/asm/internal/asm/operand_test.go
@@ -23,12 +23,14 @@
 	if architecture == nil {
 		panic("asm: unrecognized architecture " + goarch)
 	}
-	return architecture, obj.Linknew(architecture.LinkArch)
+	ctxt := obj.Linknew(architecture.LinkArch)
+	ctxt.Pkgpath = "pkg"
+	return architecture, ctxt
 }
 
 func newParser(goarch string) *Parser {
 	architecture, ctxt := setArch(goarch)
-	return NewParser(ctxt, architecture, nil, false)
+	return NewParser(ctxt, architecture, nil)
 }
 
 // tryParse executes parse func in panicOnError=true context.
@@ -76,7 +78,7 @@
 		addr := obj.Addr{}
 		parser.operand(&addr)
 		var result string
-		if parser.compilingRuntime {
+		if parser.allowABI {
 			result = obj.DconvWithABIDetail(&emptyProg, &addr)
 		} else {
 			result = obj.Dconv(&emptyProg, &addr)
@@ -91,7 +93,7 @@
 	parser := newParser("amd64")
 	testOperandParser(t, parser, amd64OperandTests)
 	testBadOperandParser(t, parser, amd64BadOperandTests)
-	parser.compilingRuntime = true
+	parser.allowABI = true
 	testOperandParser(t, parser, amd64RuntimeOperandTests)
 	testBadOperandParser(t, parser, amd64BadOperandRuntimeTests)
 }
@@ -304,8 +306,8 @@
 	{"x·y+8(SB)", "x.y+8(SB)"},
 	{"x·y+8(SP)", "x.y+8(SP)"},
 	{"y+56(FP)", "y+56(FP)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·callReflect(SB)", "\"\".callReflect(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·callReflect(SB)", "pkg.callReflect(SB)"},
 	{"[X0-X0]", "[X0-X0]"},
 	{"[ Z9 - Z12 ]", "[Z9-Z12]"},
 	{"[X0-AX]", "[X0-AX]"},
@@ -391,8 +393,8 @@
 	{"sec+4(FP)", "sec+4(FP)"},
 	{"shifts<>(SB)(CX*8)", "shifts<>(SB)(CX*8)"},
 	{"x+4(FP)", "x+4(FP)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·reflectcall(SB)", "\"\".reflectcall(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·reflectcall(SB)", "pkg.reflectcall(SB)"},
 	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
 }
 
@@ -440,7 +442,7 @@
 	{"gosave<>(SB)", "gosave<>(SB)"},
 	{"retlo+12(FP)", "retlo+12(FP)"},
 	{"runtime·gogo(SB)", "runtime.gogo(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
 	{"(R1, R3)", "(R1, R3)"},
 	{"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
 	{"[):[o-FP", ""},     // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
@@ -629,8 +631,8 @@
 	{"g", "g"},
 	{"ret+8(FP)", "ret+8(FP)"},
 	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·trunc(SB)", "pkg.trunc(SB)"},
 	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
 }
 
@@ -662,7 +664,7 @@
 	{"$(8-1)", "$7"},
 	{"a+0(FP)", "a(FP)"},
 	{"a1+8(FP)", "a1+8(FP)"},
-	{"·AddInt32(SB)", `"".AddInt32(SB)`},
+	{"·AddInt32(SB)", `pkg.AddInt32(SB)`},
 	{"runtime·divWVW(SB)", "runtime.divWVW(SB)"},
 	{"$argframe+0(FP)", "$argframe(FP)"},
 	{"$asmcgocall<>(SB)", "$asmcgocall<>(SB)"},
@@ -763,8 +765,8 @@
 	{"RSB", "R28"},
 	{"ret+8(FP)", "ret+8(FP)"},
 	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·trunc(SB)", "pkg.trunc(SB)"},
 	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
 }
 
@@ -846,8 +848,8 @@
 	{"g", "g"},
 	{"ret+8(FP)", "ret+8(FP)"},
 	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·trunc(SB)", "pkg.trunc(SB)"},
 	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
 }
 
@@ -928,8 +930,8 @@
 	{"g", "g"},
 	{"ret+8(FP)", "ret+8(FP)"},
 	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·trunc(SB)", "pkg.trunc(SB)"},
 	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
 }
 
@@ -1026,7 +1028,7 @@
 	{"g", "g"},
 	{"ret+8(FP)", "ret+8(FP)"},
 	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
+	{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
+	{"·trunc(SB)", "pkg.trunc(SB)"},
 	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
 }
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index 37f8e6c..ef6c840 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -12,6 +12,7 @@
 	"log"
 	"os"
 	"strconv"
+	"strings"
 	"text/scanner"
 	"unicode/utf8"
 
@@ -21,31 +22,33 @@
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
 	"cmd/internal/obj/x86"
+	"cmd/internal/objabi"
 	"cmd/internal/src"
 	"cmd/internal/sys"
 )
 
 type Parser struct {
-	lex              lex.TokenReader
-	lineNum          int   // Line number in source file.
-	errorLine        int   // Line number of last error.
-	errorCount       int   // Number of errors.
-	sawCode          bool  // saw code in this file (as opposed to comments and blank lines)
-	pc               int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA.
-	input            []lex.Token
-	inputPos         int
-	pendingLabels    []string // Labels to attach to next instruction.
-	labels           map[string]*obj.Prog
-	toPatch          []Patch
-	addr             []obj.Addr
-	arch             *arch.Arch
-	ctxt             *obj.Link
-	firstProg        *obj.Prog
-	lastProg         *obj.Prog
-	dataAddr         map[string]int64 // Most recent address for DATA for this symbol.
-	isJump           bool             // Instruction being assembled is a jump.
-	compilingRuntime bool
-	errorWriter      io.Writer
+	lex           lex.TokenReader
+	lineNum       int   // Line number in source file.
+	errorLine     int   // Line number of last error.
+	errorCount    int   // Number of errors.
+	sawCode       bool  // saw code in this file (as opposed to comments and blank lines)
+	pc            int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA.
+	input         []lex.Token
+	inputPos      int
+	pendingLabels []string // Labels to attach to next instruction.
+	labels        map[string]*obj.Prog
+	toPatch       []Patch
+	addr          []obj.Addr
+	arch          *arch.Arch
+	ctxt          *obj.Link
+	firstProg     *obj.Prog
+	lastProg      *obj.Prog
+	dataAddr      map[string]int64 // Most recent address for DATA for this symbol.
+	isJump        bool             // Instruction being assembled is a jump.
+	allowABI      bool             // Whether ABI selectors are allowed.
+	pkgPrefix     string           // Prefix to add to local symbols.
+	errorWriter   io.Writer
 }
 
 type Patch struct {
@@ -53,15 +56,20 @@
 	label string
 }
 
-func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader, compilingRuntime bool) *Parser {
+func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser {
+	pkgPrefix := obj.UnlinkablePkg
+	if ctxt != nil {
+		pkgPrefix = objabi.PathToPrefix(ctxt.Pkgpath)
+	}
 	return &Parser{
-		ctxt:             ctxt,
-		arch:             ar,
-		lex:              lexer,
-		labels:           make(map[string]*obj.Prog),
-		dataAddr:         make(map[string]int64),
-		errorWriter:      os.Stderr,
-		compilingRuntime: compilingRuntime,
+		ctxt:        ctxt,
+		arch:        ar,
+		lex:         lexer,
+		labels:      make(map[string]*obj.Prog),
+		dataAddr:    make(map[string]int64),
+		errorWriter: os.Stderr,
+		allowABI:    ctxt != nil && objabi.LookupPkgSpecial(ctxt.Pkgpath).AllowAsmABI,
+		pkgPrefix:   pkgPrefix,
 	}
 }
 
@@ -401,7 +409,7 @@
 			fallthrough
 		default:
 			// We have a symbol. Parse $sym±offset(symkind)
-			p.symbolReference(a, name, prefix)
+			p.symbolReference(a, p.qualifySymbol(name), prefix)
 		}
 		// fmt.Printf("SYM %s\n", obj.Dconv(&emptyProg, 0, a))
 		if p.peek() == scanner.EOF {
@@ -769,6 +777,16 @@
 	}
 }
 
+// qualifySymbol returns name as a package-qualified symbol name. If
+// name starts with a period, qualifySymbol prepends the package
+// prefix. Otherwise it returns name unchanged.
+func (p *Parser) qualifySymbol(name string) string {
+	if strings.HasPrefix(name, ".") {
+		name = p.pkgPrefix + name
+	}
+	return name
+}
+
 // symbolReference parses a symbol that is known not to be a register.
 func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) {
 	// Identifier is a name.
@@ -864,7 +882,7 @@
 		isStatic = true
 	} else if tok == scanner.Ident {
 		abistr := p.get(scanner.Ident).String()
-		if !p.compilingRuntime {
+		if !p.allowABI {
 			if issueError {
 				p.errorf("ABI selector only permitted when compiling runtime, reference was to %q", name)
 			}
@@ -901,6 +919,7 @@
 	if tok.ScanToken != scanner.Ident || p.atStartOfRegister(name) {
 		return "", obj.ABI0, false
 	}
+	name = p.qualifySymbol(name)
 	// Parse optional <> (indicates a static symbol) or
 	// <ABIxxx> (selecting text symbol with specific ABI).
 	noErrMsg := false
@@ -928,7 +947,7 @@
 }
 
 // registerIndirect parses the general form of a register indirection.
-// It is can be (R1), (R2*scale), (R1)(R2*scale), (R1)(R2.SXTX<<3) or (R1)(R2<<3)
+// It can be (R1), (R2*scale), (R1)(R2*scale), (R1)(R2.SXTX<<3) or (R1)(R2<<3)
 // where R1 may be a simple register or register pair R:R or (R, R) or (R+R).
 // Or it might be a pseudo-indirection like (FP).
 // We are sitting on the opening parenthesis.
@@ -1186,7 +1205,7 @@
 	a.Offset = x86.EncodeRegisterRange(lo, hi)
 }
 
-// register number is ARM-specific. It returns the number of the specified register.
+// registerNumber is ARM-specific. It returns the number of the specified register.
 func (p *Parser) registerNumber(name string) uint16 {
 	if p.arch.Family == sys.ARM && name == "g" {
 		return 10
diff --git a/src/cmd/asm/internal/asm/pseudo_test.go b/src/cmd/asm/internal/asm/pseudo_test.go
index 5e6fcf8..b9be6a7 100644
--- a/src/cmd/asm/internal/asm/pseudo_test.go
+++ b/src/cmd/asm/internal/asm/pseudo_test.go
@@ -64,16 +64,16 @@
 	}
 
 	testcats := []struct {
-		compilingRuntime bool
-		tests            []errtest
+		allowABI bool
+		tests    []errtest
 	}{
 		{
-			compilingRuntime: false,
-			tests:            nonRuntimeTests,
+			allowABI: false,
+			tests:    nonRuntimeTests,
 		},
 		{
-			compilingRuntime: true,
-			tests:            runtimeTests,
+			allowABI: true,
+			tests:    runtimeTests,
 		},
 	}
 
@@ -85,7 +85,7 @@
 
 	for _, cat := range testcats {
 		for _, test := range cat.tests {
-			parser.compilingRuntime = cat.compilingRuntime
+			parser.allowABI = cat.allowABI
 			parser.errorCount = 0
 			parser.lineNum++
 			if !parser.pseudo(test.pseudo, tokenize(test.operands)) {
diff --git a/src/cmd/asm/internal/asm/testdata/arm.s b/src/cmd/asm/internal/asm/testdata/arm.s
index 2ba22c7..93edc88 100644
--- a/src/cmd/asm/internal/asm/testdata/arm.s
+++ b/src/cmd/asm/internal/asm/testdata/arm.s
@@ -870,10 +870,13 @@
 	BIC.S	R0@>R1, R2           // 7021d2e1
 
 // SRL
+	SRL	$0, R5, R6           // 0560a0e1
+	SRL	$1, R5, R6           // a560a0e1
 	SRL	$14, R5, R6          // 2567a0e1
 	SRL	$15, R5, R6          // a567a0e1
 	SRL	$30, R5, R6          // 256fa0e1
 	SRL	$31, R5, R6          // a56fa0e1
+	SRL	$32, R5, R6          // 2560a0e1
 	SRL.S	$14, R5, R6          // 2567b0e1
 	SRL.S	$15, R5, R6          // a567b0e1
 	SRL.S	$30, R5, R6          // 256fb0e1
@@ -892,10 +895,13 @@
 	SRL.S	R5, R7               // 3775b0e1
 
 // SRA
+	SRA	$0, R5, R6           // 0560a0e1
+	SRA	$1, R5, R6           // c560a0e1
 	SRA	$14, R5, R6          // 4567a0e1
 	SRA	$15, R5, R6          // c567a0e1
 	SRA	$30, R5, R6          // 456fa0e1
 	SRA	$31, R5, R6          // c56fa0e1
+	SRA	$32, R5, R6          // 4560a0e1
 	SRA.S	$14, R5, R6          // 4567b0e1
 	SRA.S	$15, R5, R6          // c567b0e1
 	SRA.S	$30, R5, R6          // 456fb0e1
@@ -914,6 +920,8 @@
 	SRA.S	R5, R7               // 5775b0e1
 
 // SLL
+	SLL	$0, R5, R6           // 0560a0e1
+	SLL	$1, R5, R6           // 8560a0e1
 	SLL	$14, R5, R6          // 0567a0e1
 	SLL	$15, R5, R6          // 8567a0e1
 	SLL	$30, R5, R6          // 056fa0e1
@@ -935,6 +943,20 @@
 	SLL	R5, R7               // 1775a0e1
 	SLL.S	R5, R7               // 1775b0e1
 
+// Ops with zero shifts should encode as left shifts
+	ADD	R0<<0, R1, R2	     // 002081e0
+	ADD	R0>>0, R1, R2	     // 002081e0
+	ADD	R0->0, R1, R2	     // 002081e0
+	ADD	R0@>0, R1, R2	     // 002081e0
+	MOVW	R0<<0(R1), R2        // 002091e7
+	MOVW	R0>>0(R1), R2        // 002091e7
+	MOVW	R0->0(R1), R2        // 002091e7
+	MOVW	R0@>0(R1), R2        // 002091e7
+	MOVW	R0, R1<<0(R2)        // 010082e7
+	MOVW	R0, R1>>0(R2)        // 010082e7
+	MOVW	R0, R1->0(R2)        // 010082e7
+	MOVW	R0, R1@>0(R2)        // 010082e7
+
 // MULA / MULS
 	MULAWT		R1, R2, R3, R4       // c23124e1
 	MULAWB		R1, R2, R3, R4       // 823124e1
diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s
index 11bd678..ecad08b 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64.s
@@ -557,37 +557,92 @@
 	FMOVQ	65520(R10), F10 // 4afdff3d
 	FMOVQ	64(RSP), F11    // eb13c03d
 
-// large aligned offset, use two instructions(add+ldr/store).
-	MOVB	R1, 0x1001(R2) // MOVB	R1, 4097(R2)  // 5b04409161070039
-	MOVH	R1, 0x2002(R2) // MOVH	R1, 8194(R2)  // 5b08409161070079
-	MOVW	R1, 0x4004(R2) // MOVW	R1, 16388(R2) // 5b104091610700b9
-	MOVD	R1, 0x8008(R2) // MOVD	R1, 32776(R2) // 5b204091610700f9
-	FMOVS	F1, 0x4004(R2) // FMOVS	F1, 16388(R2) // 5b104091610700bd
-	FMOVD	F1, 0x8008(R2) // FMOVD	F1, 32776(R2) // 5b204091610700fd
+// medium offsets that either fit a single instruction or can use add+ldr/str
+	MOVD -4095(R17), R3                        // 3bfe3fd1630340f9
+	MOVD -391(R17), R3                         // 3b1e06d1630340f9
+	MOVD -257(R17), R3                         // 3b0604d1630340f9
+	MOVD -256(R17), R3                         // 230250f8
+	MOVD 255(R17), R3                          // 23f24ff8
+	MOVD 256(R17), R3                          // 238240f9
+	MOVD 257(R17), R3                          // 3b060491630340f9
+	MOVD 391(R17), R3                          // 3b1e0691630340f9
+	MOVD 4095(R17), R3                         // 3bfe3f91630340f9
 
-	MOVB	0x1001(R1), R2 // MOVB	4097(R1), R2  // 3b04409162078039
-	MOVH	0x2002(R1), R2 // MOVH	8194(R1), R2  // 3b08409162078079
-	MOVW	0x4004(R1), R2 // MOVW	16388(R1), R2 // 3b104091620780b9
-	MOVD	0x8008(R1), R2 // MOVD	32776(R1), R2 // 3b204091620740f9
-	FMOVS	0x4004(R1), F2 // FMOVS	16388(R1), F2 // 3b104091620740bd
-	FMOVD	0x8008(R1), F2 // FMOVD	32776(R1), F2 // 3b204091620740fd
+	MOVD R0, -4095(R17)                        // 3bfe3fd1600300f9
+	MOVD R0, -391(R17)                         // 3b1e06d1600300f9
+	MOVD R0, -257(R17)                         // 3b0604d1600300f9
+	MOVD R0, -256(R17)                         // 200210f8
+	MOVD R0, 255(R17)                          // 20f20ff8
+	MOVD R0, 256(R17)                          // 208200f9
+	MOVD R0, 257(R17)                          // 3b060491600300f9
+	MOVD R0, 391(R17)                          // 3b1e0691600300f9
+	MOVD R0, 4095(R17)                         // 3bfe3f91600300f9
+	MOVD R0, 4096(R17)                         // 200208f9
+	MOVD R3, -4095(R17)                        // 3bfe3fd1630300f9
+	MOVD R3, -391(R17)                         // 3b1e06d1630300f9
+	MOVD R3, -257(R17)                         // 3b0604d1630300f9
+	MOVD R3, -256(R17)                         // 230210f8
+	MOVD R3, 255(R17)                          // 23f20ff8
+	MOVD R3, 256(R17)                          // 238200f9
+	MOVD R3, 257(R17)                          // 3b060491630300f9
+	MOVD R3, 391(R17)                          // 3b1e0691630300f9
+	MOVD R3, 4095(R17)                         // 3bfe3f91630300f9
+
+// large aligned offset, use two instructions(add+ldr/str).
+	MOVB	R1, 0x1001(R2) 		// MOVB		R1, 4097(R2)		// 5b04409161070039
+	MOVB	R1, 0xffffff(R2)	// MOVB		R1, 16777215(R2)	// 5bfc7f9161ff3f39
+	MOVH	R1, 0x2002(R2)		// MOVH		R1, 8194(R2)		// 5b08409161070079
+	MOVH	R1, 0x1000ffe(R2)	// MOVH		R1, 16781310(R2)	// 5bfc7f9161ff3f79
+	MOVW	R1, 0x4004(R2)		// MOVW		R1, 16388(R2)		// 5b104091610700b9
+	MOVW	R1, 0x1002ffc(R2)	// MOVW		R1, 16789500(R2)	// 5bfc7f9161ff3fb9
+	MOVD	R1, 0x8008(R2)		// MOVD		R1, 32776(R2)		// 5b204091610700f9
+	MOVD	R1, 0x1006ff8(R2)	// MOVD		R1, 16805880(R2)	// 5bfc7f9161ff3ff9
+	FMOVS	F1, 0x4004(R2)		// FMOVS	F1, 16388(R2)		// 5b104091610700bd
+	FMOVS	F1, 0x1002ffc(R2)	// FMOVS	F1, 16789500(R2)	// 5bfc7f9161ff3fbd
+	FMOVD	F1, 0x8008(R2)		// FMOVD	F1, 32776(R2)		// 5b204091610700fd
+	FMOVD	F1, 0x1006ff8(R2)	// FMOVD	F1, 16805880(R2)	// 5bfc7f9161ff3ffd
+
+	MOVB	0x1001(R1), R2 		// MOVB		4097(R1), R2		// 3b04409162078039
+	MOVB	0xffffff(R1), R2	// MOVB		16777215(R1), R2	// 3bfc7f9162ffbf39
+	MOVH	0x2002(R1), R2		// MOVH		8194(R1), R2		// 3b08409162078079
+	MOVH	0x1000ffe(R1), R2	// MOVH		16781310(R1), R2	// 3bfc7f9162ffbf79
+	MOVW	0x4004(R1), R2		// MOVW		16388(R1), R2		// 3b104091620780b9
+	MOVW	0x1002ffc(R1), R2	// MOVW		16789500(R1), R2	// 3bfc7f9162ffbfb9
+	MOVD	0x8008(R1), R2		// MOVD		32776(R1), R2		// 3b204091620740f9
+	MOVD	0x1006ff8(R1), R2	// MOVD		16805880(R1), R2	// 3bfc7f9162ff7ff9
+	FMOVS	0x4004(R1), F2		// FMOVS	16388(R1), F2		// 3b104091620740bd
+	FMOVS	0x1002ffc(R1), F2	// FMOVS	16789500(R1), F2	// 3bfc7f9162ff7fbd
+	FMOVD	0x8008(R1), F2		// FMOVD	32776(R1), F2		// 3b204091620740fd
+	FMOVD	0x1006ff8(R1), F2	// FMOVD	16805880(R1), F2	// 3bfc7f9162ff7ffd
 
 // very large or unaligned offset uses constant pool.
 // the encoding cannot be checked as the address of the constant pool is unknown.
 // here we only test that they can be assembled.
-	MOVB	R1, 0x44332211(R2) // MOVB	R1, 1144201745(R2)
-	MOVH	R1, 0x44332211(R2) // MOVH	R1, 1144201745(R2)
-	MOVW	R1, 0x44332211(R2) // MOVW	R1, 1144201745(R2)
-	MOVD	R1, 0x44332211(R2) // MOVD	R1, 1144201745(R2)
-	FMOVS	F1, 0x44332211(R2) // FMOVS	F1, 1144201745(R2)
-	FMOVD	F1, 0x44332211(R2) // FMOVD	F1, 1144201745(R2)
+	MOVB	R1, 0x1000000(R2)	// MOVB		R1, 16777216(R2)
+	MOVB	R1, 0x44332211(R2)	// MOVB		R1, 1144201745(R2)
+	MOVH	R1, 0x1001000(R2)	// MOVH		R1, 16781312(R2)
+	MOVH	R1, 0x44332211(R2)	// MOVH		R1, 1144201745(R2)
+	MOVW	R1, 0x1003000(R2)	// MOVW		R1, 16789504(R2)
+	MOVW	R1, 0x44332211(R2)	// MOVW		R1, 1144201745(R2)
+	MOVD	R1, 0x1007000(R2)	// MOVD		R1, 16805888(R2)
+	MOVD	R1, 0x44332211(R2)	// MOVD		R1, 1144201745(R2)
+	FMOVS	F1, 0x1003000(R2)	// FMOVS	F1, 16789504(R2)
+	FMOVS	F1, 0x44332211(R2)	// FMOVS	F1, 1144201745(R2)
+	FMOVD	F1, 0x1007000(R2)	// FMOVD	F1, 16805888(R2)
+	FMOVD	F1, 0x44332211(R2)	// FMOVD	F1, 1144201745(R2)
 
-	MOVB	0x44332211(R1), R2 // MOVB	1144201745(R1), R2
-	MOVH	0x44332211(R1), R2 // MOVH	1144201745(R1), R2
-	MOVW	0x44332211(R1), R2 // MOVW	1144201745(R1), R2
-	MOVD	0x44332211(R1), R2 // MOVD	1144201745(R1), R2
-	FMOVS	0x44332211(R1), F2 // FMOVS	1144201745(R1), F2
-	FMOVD	0x44332211(R1), F2 // FMOVD	1144201745(R1), F2
+	MOVB	0x1000000(R1), R2	// MOVB		16777216(R1), R2
+	MOVB	0x44332211(R1), R2	// MOVB		1144201745(R1), R2
+	MOVH	0x1000000(R1), R2	// MOVH		16777216(R1), R2
+	MOVH	0x44332211(R1), R2	// MOVH		1144201745(R1), R2
+	MOVW	0x1000000(R1), R2	// MOVW		16777216(R1), R2
+	MOVW	0x44332211(R1), R2	// MOVW		1144201745(R1), R2
+	MOVD	0x1000000(R1), R2	// MOVD		16777216(R1), R2
+	MOVD	0x44332211(R1), R2	// MOVD		1144201745(R1), R2
+	FMOVS	0x1000000(R1), F2	// FMOVS	16777216(R1), F2
+	FMOVS	0x44332211(R1), F2	// FMOVS	1144201745(R1), F2
+	FMOVD	0x1000000(R1), F2	// FMOVD	16777216(R1), F2
+	FMOVD	0x44332211(R1), F2	// FMOVD	1144201745(R1), F2
 
 // shifted or extended register offset.
 	MOVD	(R2)(R6.SXTW), R4               // 44c866f8
@@ -595,6 +650,7 @@
 	MOVD	(R3)(R6*1), R5                  // 656866f8
 	MOVD	(R2)(R6), R4                    // 446866f8
 	MOVWU	(R19)(R20<<2), R20              // 747a74b8
+	MOVD	(R2)(R3<<0), R1                 // 416863f8
 	MOVD	(R2)(R6<<3), R4                 // 447866f8
 	MOVD	(R3)(R7.SXTX<<3), R8            // 68f867f8
 	MOVWU	(R5)(R4.UXTW), R10              // aa4864b8
@@ -604,7 +660,7 @@
 	MOVHU	(R1)(R2<<1), R5                 // 25786278
 	MOVB	(R9)(R3.UXTW), R6               // 2649a338
 	MOVB	(R10)(R6), R15                  // 4f69a638
-	MOVB	(R29)(R30<<0), R14              // ae7bbe38
+	MOVB	(R29)(R30<<0), R14              // ae6bbe38
 	MOVB	(R29)(R30), R14                 // ae6bbe38
 	MOVH	(R5)(R7.SXTX<<1), R19           // b3f8a778
 	MOVH	(R8)(R4<<1), R10                // 0a79a478
@@ -925,6 +981,14 @@
 	ADR	next, R11     // ADR R11 // 2b000010
 next:
 	NOP
+	ADR -2(PC), R10    // 0a000010
+	ADR 2(PC), R16     // 10000010
+	ADR -26(PC), R1    // 01000010
+	ADR 12(PC), R2     // 02000010
+	ADRP -2(PC), R10   // 0a000090
+	ADRP 2(PC), R16    // 10000090
+	ADRP -26(PC), R1   // 01000090
+	ADRP 12(PC), R2    // 02000090
 
 // LDP/STP
 	LDP	(R0), (R0, R1)      // 000440a9
@@ -947,6 +1011,7 @@
 	LDP	-8(R0), (R1, R2)    // 01887fa9
 	LDP	x(SB), (R1, R2)
 	LDP	x+8(SB), (R1, R2)
+	LDP	8(R1), (ZR, R2)     // 3f8840a9
 	LDPW	-5(R0), (R1, R2)    // 1b1400d1610b4029
 	LDPW	(R0), (R1, R2)      // 01084029
 	LDPW	4(R0), (R1, R2)     // 01884029
@@ -964,6 +1029,7 @@
 	LDPW	1024(RSP), (R1, R2) // fb031091610b4029
 	LDPW	x(SB), (R1, R2)
 	LDPW	x+8(SB), (R1, R2)
+	LDPW	8(R1), (ZR, R2)     // 3f084129
 	LDPSW	(R0), (R1, R2)      // 01084069
 	LDPSW	4(R0), (R1, R2)     // 01884069
 	LDPSW	-4(R0), (R1, R2)    // 01887f69
@@ -980,6 +1046,7 @@
 	LDPSW	1024(RSP), (R1, R2) // fb031091610b4069
 	LDPSW	x(SB), (R1, R2)
 	LDPSW	x+8(SB), (R1, R2)
+	LDPSW	8(R1), (ZR, R2)     // 3f084169
 	STP	(R3, R4), (R5)      // a31000a9
 	STP	(R3, R4), 8(R5)     // a39000a9
 	STP.W	(R3, R4), 8(R5)     // a39080a9
diff --git a/src/cmd/asm/internal/asm/testdata/arm64enc.s b/src/cmd/asm/internal/asm/testdata/arm64enc.s
index 7ef3a7f..cc002a1 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64enc.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64enc.s
@@ -186,7 +186,7 @@
 	MOVBU.P 42(R2), R12                        // 4ca44238
 	MOVBU.W -27(R2), R14                       // 4e5c5e38
 	MOVBU 2916(R24), R3                        // 03936d39
-	MOVBU (R19)(R14<<0), R23                   // 777a6e38
+	MOVBU (R19)(R14<<0), R23                   // 776a6e38
 	MOVBU (R2)(R8.SXTX), R19                   // 53e86838
 	MOVBU (R27)(R23), R14                      // 6e6b7738
 	MOVHU.P 107(R14), R13                      // cdb54678
diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s
index 354b64d..3ac8788 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64error.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64error.s
@@ -66,7 +66,6 @@
 	LDP.W	8(R3), (R2, R3)                                  // ERROR "constrained unpredictable behavior"
 	LDP	(R1), (R2, R2)                                   // ERROR "constrained unpredictable behavior"
 	LDP	(R0), (F0, F1)                                   // ERROR "invalid register pair"
-	LDP	(R0), (R3, ZR)                                   // ERROR "invalid register pair"
 	LDXPW	(RSP), (R2, R2)                                  // ERROR "constrained unpredictable behavior"
 	LDAXPW	(R5), (R2, R2)                                   // ERROR "constrained unpredictable behavior"
 	MOVD.P	300(R2), R3                                      // ERROR "offset out of range [-256,255]"
@@ -166,94 +165,94 @@
 	FSTPD	(R1, R2), (R0)                                   // ERROR "invalid register pair"
 	FMOVS	(F2), F0                                         // ERROR "illegal combination"
 	FMOVD	F0, (F1)                                         // ERROR "illegal combination"
-	LDADDAD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDAW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDAH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDAB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDALD	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDADDALW	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDADDALH	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDADDALB	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDADDD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDLD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDLW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDLH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDADDLB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRAD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRAW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRAH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRAB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRALD	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDCLRALW	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDCLRALH	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDCLRALB	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDCLRD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRLD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRLW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRLH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDCLRLB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORAD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORAW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORAH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORAB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORALD	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDEORALW	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDEORALH	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDEORALB	R5, (R6), RSP                            // ERROR "illegal destination register"
-	LDEORD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORLD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORLW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORLH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDEORLB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORAD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORAW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORAH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORAB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORALD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORALW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORALH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORALB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORLD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORLW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORLH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	LDORLB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPAD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPAW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPAH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPAB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPALD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPALW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPALH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPALB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPLD	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPLW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPLH	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	SWPLB	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	STXR	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	STXRW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	STLXR	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	STLXRW	R5, (R6), RSP                                    // ERROR "illegal destination register"
-	STXP	(R5, R7), (R6), RSP                              // ERROR "illegal destination register"
-	STXPW	(R5, R7), (R6), RSP                              // ERROR "illegal destination register"
-	STLXP	(R5, R7), (R6), RSP                              // ERROR "illegal destination register"
-	STLXP	(R5, R7), (R6), RSP                              // ERROR "illegal destination register"
+	LDADDAD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDAW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDAH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDAB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDALD	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDADDALW	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDADDALH	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDADDALB	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDADDD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDLD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDLW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDLH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDADDLB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRAD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRAW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRAH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRAB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRALD	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDCLRALW	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDCLRALH	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDCLRALB	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDCLRD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRLD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRLW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRLH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDCLRLB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORAD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORAW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORAH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORAB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORALD	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDEORALW	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDEORALH	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDEORALB	R5, (R6), RSP                            // ERROR "illegal combination"
+	LDEORD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORLD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORLW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORLH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDEORLB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORAD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORAW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORAH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORAB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORALD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORALW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORALH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORALB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORLD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORLW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORLH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	LDORLB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPAD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPAW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPAH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPAB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPALD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPALW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPALH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPALB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPLD	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPLW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPLH	R5, (R6), RSP                                    // ERROR "illegal combination"
+	SWPLB	R5, (R6), RSP                                    // ERROR "illegal combination"
+	STXR	R5, (R6), RSP                                    // ERROR "illegal combination"
+	STXRW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	STLXR	R5, (R6), RSP                                    // ERROR "illegal combination"
+	STLXRW	R5, (R6), RSP                                    // ERROR "illegal combination"
+	STXP	(R5, R7), (R6), RSP                              // ERROR "illegal combination"
+	STXPW	(R5, R7), (R6), RSP                              // ERROR "illegal combination"
+	STLXP	(R5, R7), (R6), RSP                              // ERROR "illegal combination"
+	STLXP	(R5, R7), (R6), RSP                              // ERROR "illegal combination"
 	MSR	OSLAR_EL1, R5                                    // ERROR "illegal combination"
 	MRS	R11, AIDR_EL1                                    // ERROR "illegal combination"
 	MSR	R6, AIDR_EL1                                     // ERROR "system register is not writable"
diff --git a/src/cmd/asm/internal/asm/testdata/mips.s b/src/cmd/asm/internal/asm/testdata/mips.s
index 7136d68..f65eba0 100644
--- a/src/cmd/asm/internal/asm/testdata/mips.s
+++ b/src/cmd/asm/internal/asm/testdata/mips.s
@@ -429,6 +429,11 @@
 	CLZ	R1, R2 // 70221020
 	CLO	R1, R2 // 70221021
 
+	WSBH	R1, R2 // 7c0110a0
+
+	SEB	R1, R2 // 7c011420
+	SEH	R1, R2 // 7c011620
+
 	// to (Hi, Lo)
 	MADD	R2, R1 // 70220000
 	MSUB	R2, R1 // 70220004
diff --git a/src/cmd/asm/internal/asm/testdata/mips64.s b/src/cmd/asm/internal/asm/testdata/mips64.s
index 8f628e2..ea4bb80 100644
--- a/src/cmd/asm/internal/asm/testdata/mips64.s
+++ b/src/cmd/asm/internal/asm/testdata/mips64.s
@@ -587,8 +587,17 @@
 	CALL	foo(SB)
 	RET	foo(SB)
 
+	// unary operation
 	NEGW	R1, R2 // 00011023
 	NEGV	R1, R2 // 0001102f
+
+	WSBH	R1, R2 // 7c0110a0
+	DSBH	R1, R2 // 7c0110a4
+	DSHD	R1, R2 // 7c011164
+
+	SEB	R1, R2 // 7c011420
+	SEH	R1, R2 // 7c011620
+
 	RET
 
 // MSA VMOVI
diff --git a/src/cmd/asm/internal/asm/testdata/ppc64.s b/src/cmd/asm/internal/asm/testdata/ppc64.s
index a0f1276..01052b4 100644
--- a/src/cmd/asm/internal/asm/testdata/ppc64.s
+++ b/src/cmd/asm/internal/asm/testdata/ppc64.s
@@ -17,14 +17,14 @@
 	MOVD $1, R3                     // 38600001
 	MOVD $-1, R4                    // 3880ffff
 	MOVD $65535, R5                 // 6005ffff
-	MOVD $65536, R6                 // 64060001
+	MOVD $65536, R6                 // 3cc00001
 	MOVD $-32767, R5                // 38a08001
 	MOVD $-32768, R6                // 38c08000
 	MOVD $1234567, R5               // 6405001260a5d687 or 0600001238a0d687
 	MOVW $1, R3                     // 38600001
 	MOVW $-1, R4                    // 3880ffff
 	MOVW $65535, R5                 // 6005ffff
-	MOVW $65536, R6                 // 64060001
+	MOVW $65536, R6                 // 3cc00001
 	MOVW $-32767, R5                // 38a08001
 	MOVW $-32768, R6                // 38c08000
 	MOVW $1234567, R5               // 6405001260a5d687 or 0600001238a0d687
@@ -32,7 +32,26 @@
 	MOVW $2147483649, R5            // 6405800060a50001 or 0600800038a00001
 	MOVD $2147483649, R5            // 6405800060a50001 or 0600800038a00001
 	// Hex constant 0xFFFFFFFF80000001
-	MOVD $-2147483647, R5    	// 3ca0800060a50001 or 0603800038a00001
+	MOVD $-2147483647, R5           // 3ca0800060a50001 or 0603800038a00001
+	// Hex constant 0xFFFFFFFE00000002 (load of constant on < power10, pli on >= power10
+	MOVD $-8589934590, R5           // 3ca00000e8a50000 or 0602000038a00002
+
+	// For backwards compatibility, MOVW $const,Rx and MOVWZ $const,Rx assemble identically
+	// and accept the same constants.
+	MOVW $2147483648, R5            // 64058000
+	MOVWZ $-2147483648, R5          // 3ca08000
+
+	// TODO: These are preprocessed by the assembler into MOVD $const>>shift, R5; SLD $shift, R5.
+	//       This only captures the MOVD. Should the SLD be appended to the encoding by the test?
+	// Hex constant 0x20004000000
+	MOVD $2199090364416, R5         // 60058001
+	// Hex constant 0xFFFFFE0004000000
+	MOVD $-2198956146688, R5        // 38a08001
+	// TODO: On GOPPC64={power8,power9}, this is preprocessed into MOVD $-1, R5; RLDC R5, $33, $63, R5.
+	//       This only captures the MOVD. Should the RLDC be appended to the encoding by the test?
+	// Hex constant 0xFFFFFFFE00000001
+	MOVD $-8589934591, R5           // 38a0ffff or 0602000038a00001
+
 	MOVD 8(R3), R4                  // e8830008
 	MOVD (R3)(R4), R5               // 7ca4182a
 	MOVD (R3)(R0), R5               // 7ca0182a
@@ -164,6 +183,10 @@
 	ADD $-32767, R5, R4             // 38858001
 	ADD $-32768, R6                 // 38c68000
 	ADD $-32768, R6, R5             // 38a68000
+	// Hex constant 0xFFFFFFFE00000000
+	ADD $-8589934592, R5            // 3fe0fffe600000007bff83e4600000007cbf2a14 or 0602000038a50000
+	// Hex constant 0xFFFFFFFE00010001
+	ADD $-8589869055, R5            // 3fe0fffe63ff00017bff83e463ff00017cbf2a14 or 0602000138a50001
 
 	//TODO: this compiles to add r5,r6,r0. It should be addi r5,r6,0.
 	//      this is OK since r0 == $0, but the latter is preferred.
@@ -174,6 +197,7 @@
 	ADDEX R3, R5, $3, R6            // 7cc32f54
 	ADDEX R3, $3, R5, R6            // 7cc32f54
 	ADDIS $8, R3                    // 3c630008
+	ADD   $524288, R3               // 3c630008
 	ADDIS $1000, R3, R4             // 3c8303e8
 
 	ANDCC $1, R3                    // 70630001
@@ -192,6 +216,7 @@
 	ANDCC $1234567, R5, R6          // 641f001263ffd6877fe62839
 	ANDISCC $1, R3                  // 74630001
 	ANDISCC $1000, R3, R4           // 746403e8
+	ANDCC $65536000, R3, R4         // 746403e8
 
 	OR $1, R3                       // 60630001
 	OR $1, R3, R4                   // 60640001
@@ -207,7 +232,10 @@
 	OR $-32768, R6, R7              // 3be080007fe73378
 	OR $1234567, R5                 // 641f001263ffd6877fe52b78
 	OR $1234567, R5, R3             // 641f001263ffd6877fe32b78
-	ORIS $255, R3, R4
+	OR $2147483648, R5, R3          // 64a38000
+	OR $2147483649, R5, R3          // 641f800063ff00017fe32b78
+	ORIS $255, R3, R4               // 646400ff
+	OR $16711680, R3, R4            // 646400ff
 
 	XOR $1, R3                      // 68630001
 	XOR $1, R3, R4                  // 68640001
@@ -223,7 +251,8 @@
 	XOR $-32768, R6, R7             // 3be080007fe73278
 	XOR $1234567, R5                // 641f001263ffd6877fe52a78
 	XOR $1234567, R5, R3            // 641f001263ffd6877fe32a78
-	XORIS $15, R3, R4
+	XORIS $15, R3, R4               // 6c64000f
+	XOR   $983040, R3, R4           // 6c64000f
 
 	// TODO: the order of CR operands don't match
 	CMP R3, R4                      // 7c232000
@@ -233,7 +262,6 @@
 	CMPB R3,R4,R4                   // 7c6423f8
 	CMPEQB R3,R4,CR6                // 7f0321c0
 
-	// TODO: constants for ADDC?
 	ADD R3, R4                      // 7c841a14
 	ADD R3, R4, R5                  // 7ca41a14
 	ADDC R3, R4                     // 7c841814
@@ -246,6 +274,8 @@
 	ADDV R3, R4                     // 7c841e14
 	ADDVCC R3, R4                   // 7c841e15
 	ADDCCC R3, R4, R5               // 7ca41815
+	ADDCCC $65536, R4, R5           // 641f0001600000007cbf2015
+	ADDCCC $65537, R4, R5           // 641f000163ff00017cbf2015
 	ADDME R3, R4                    // 7c8301d4
 	ADDMECC R3, R4                  // 7c8301d5
 	ADDMEV R3, R4                   // 7c8305d4
@@ -299,6 +329,8 @@
 	SUBECC R3, R4, R5               // 7ca32111
 	SUBEV R3, R4, R5                // 7ca32510
 	SUBEVCC R3, R4, R5              // 7ca32511
+	SUBC R3, $65536, R4             // 3fe00001600000007c83f810
+	SUBC R3, $65537, R4             // 3fe0000163ff00017c83f810
 
 	MULLW R3, R4                    // 7c8419d6
 	MULLW R3, R4, R5                // 7ca419d6
@@ -393,12 +425,19 @@
 	EXTSHCC R3, R4                  // 7c640735
 	EXTSW R3, R4                    // 7c6407b4
 	EXTSWCC R3, R4                  // 7c6407b5
+	RLWMI $7, R3, $4026531855, R6   // 50663f06
+	RLWMI $7, R3, $1, R6            // 50663ffe
+	RLWMI $7, R3, $2147483648, R6   // 50663800
 	RLWMI $7, R3, $65535, R6        // 50663c3e
 	RLWMI $7, R3, $16, $31, R6      // 50663c3e
 	RLWMICC $7, R3, $65535, R6      // 50663c3f
 	RLWMICC $7, R3, $16, $31, R6    // 50663c3f
 	RLWNM $3, R4, $7, R6            // 54861f7e
+	RLWNM $0, R4, $7, R6            // 5486077e
+	RLWNM R0, R4, $7, R6            // 5c86077e
 	RLWNM $3, R4, $29, $31, R6      // 54861f7e
+	RLWNM $0, R4, $29, $31, R6      // 5486077e
+	RLWNM R0, R4, $29, $31, R6      // 5c86077e
 	RLWNM R3, R4, $7, R6            // 5c861f7e
 	RLWNM R3, R4, $29, $31, R6      // 5c861f7e
 	RLWNMCC $3, R4, $7, R6          // 54861f7f
@@ -410,6 +449,10 @@
 	RLDIMI $0, R4, $7, R6           // 788601cc
 	RLDIMICC $0, R4, $7, R6         // 788601cd
 	RLDC $0, R4, $15, R6            // 78860728
+	RLDC R3, $32, $12, R4           // 7864030a
+	RLDC R3, $8, $32, R4            // 78644028
+	RLDCCC R3, $32, $12, R4         // 7864030b
+	RLDCCC R3, $8, $32, R4          // 78644029
 	RLDCCC $0, R4, $15, R6          // 78860729
 	RLDCL $0, R4, $7, R6            // 78860770
 	RLDCLCC $0, R4, $15, R6         // 78860721
diff --git a/src/cmd/asm/internal/asm/testdata/riscv64.s b/src/cmd/asm/internal/asm/testdata/riscv64.s
index 5353832..072302b 100644
--- a/src/cmd/asm/internal/asm/testdata/riscv64.s
+++ b/src/cmd/asm/internal/asm/testdata/riscv64.s
@@ -94,6 +94,10 @@
 
 	SUB	X6, X5, X7				// b3836240
 	SUB	X5, X6					// 33035340
+	SUB	$-2047, X5, X6				// 1383f27f
+	SUB	$2048, X5, X6				// 13830280
+	SUB	$-2047, X5				// 9382f27f
+	SUB	$2048, X5				// 93820280
 
 	SRA	X6, X5, X7				// b3d36240
 	SRA	X5, X6					// 33535340
@@ -157,6 +161,7 @@
 	ADDW	$1, X6					// 1b031300
 	SLLW	$1, X6					// 1b131300
 	SRLW	$1, X6					// 1b531300
+	SUBW	$1, X6					// 1b03f3ff
 	SRAW	$1, X6					// 1b531340
 
 	// 5.3: Load and Store Instructions (RV64I)
@@ -354,6 +359,14 @@
 	MOVD	F0, 4(X5)				// 27b20200
 	MOVD	F0, F1					// d3000022
 
+	// TLS load with local-exec (LUI + ADDIW + ADD of TP + load)
+	MOV	tls(SB), X5				// b70f00009b8f0f00b38f4f0083b20f00
+	MOVB	tls(SB), X5				// b70f00009b8f0f00b38f4f0083820f00
+
+	// TLS store with local-exec (LUI + ADDIW + ADD of TP + store)
+	MOV	X5, tls(SB)				// b70f00009b8f0f00b38f4f0023b05f00
+	MOVB	X5, tls(SB)				// b70f00009b8f0f00b38f4f0023805f00
+
 	// NOT pseudo-instruction
 	NOT	X5					// 93c2f2ff
 	NOT	X5, X6					// 13c3f2ff
@@ -373,7 +386,7 @@
 	JMP	4(X5)					// 67804200
 
 	// CALL and JMP to symbol are encoded as JAL (using LR or ZERO
-	// respectively), with a R_RISCV_CALL relocation. The linker resolves
+	// respectively), with a R_RISCV_JAL relocation. The linker resolves
 	// the real address and updates the immediate, using a trampoline in
 	// the case where the address is not directly reachable.
 	CALL	asmtest(SB)				// ef000000
@@ -407,3 +420,5 @@
 	FLTD	F0, F1, X5				// d39200a2
 	FLED	F0, F1, X5				// d38200a2
 	FEQD	F0, F1, X5				// d3a200a2
+
+GLOBL tls(SB), TLSBSS, $8
diff --git a/src/cmd/asm/internal/asm/testdata/riscv64error.s b/src/cmd/asm/internal/asm/testdata/riscv64error.s
index cdb8a02..2dc9db3 100644
--- a/src/cmd/asm/internal/asm/testdata/riscv64error.s
+++ b/src/cmd/asm/internal/asm/testdata/riscv64error.s
@@ -38,5 +38,8 @@
 	SLLIW	$-1, X5, X6			// ERROR "shift amount out of range 0 to 31"
 	SRLIW	$-1, X5, X6			// ERROR "shift amount out of range 0 to 31"
 	SRAIW	$-1, X5, X6			// ERROR "shift amount out of range 0 to 31"
-
+	SD	X5, 4294967296(X6)		// ERROR "constant 4294967296 too large"
+	SRLI	$1, X5, F1			// ERROR "expected integer register in rd position but got non-integer register F1"
+	SRLI	$1, F1, X5			// ERROR "expected integer register in rs1 position but got non-integer register F1"
+	FNES	F1, (X5)			// ERROR "needs an integer register output"
 	RET
diff --git a/src/cmd/asm/internal/asm/testdata/s390x.s b/src/cmd/asm/internal/asm/testdata/s390x.s
index 78ccb96..9771906 100644
--- a/src/cmd/asm/internal/asm/testdata/s390x.s
+++ b/src/cmd/asm/internal/asm/testdata/s390x.s
@@ -415,6 +415,14 @@
 
 	SYNC                           // 07e0
 
+	KM	R2, R4                 // b92e0024
+	KMC	R2, R6                 // b92f0026
+	KLMD	R2, R8                 // b93f0028
+	KIMD	R0, R4                 // b93e0004
+	KDSA	R0, R8                 // b93a0008
+	KMA	R2, R6, R4              // b9296024
+	KMCTR   R2, R6, R4              // b92d6024
+
 	// vector add and sub instructions
 	VAB	V3, V4, V4              // e743400000f3
 	VAH	V3, V4, V4              // e743400010f3
diff --git a/src/cmd/asm/internal/flags/flags.go b/src/cmd/asm/internal/flags/flags.go
index b9a94a0..e15a062 100644
--- a/src/cmd/asm/internal/flags/flags.go
+++ b/src/cmd/asm/internal/flags/flags.go
@@ -16,17 +16,16 @@
 )
 
 var (
-	Debug            = flag.Bool("debug", false, "dump instructions as they are parsed")
-	OutputFile       = flag.String("o", "", "output file; default foo.o for /a/b/c/foo.s as first argument")
-	TrimPath         = flag.String("trimpath", "", "remove prefix from recorded source file paths")
-	Shared           = flag.Bool("shared", false, "generate code that can be linked into a shared library")
-	Dynlink          = flag.Bool("dynlink", false, "support references to Go symbols defined in other shared libraries")
-	Linkshared       = flag.Bool("linkshared", false, "generate code that will be linked against Go shared libraries")
-	AllErrors        = flag.Bool("e", false, "no limit on number of errors reported")
-	SymABIs          = flag.Bool("gensymabis", false, "write symbol ABI information to output file, don't assemble")
-	Importpath       = flag.String("p", obj.UnlinkablePkg, "set expected package import to path")
-	Spectre          = flag.String("spectre", "", "enable spectre mitigations in `list` (all, ret)")
-	CompilingRuntime = flag.Bool("compiling-runtime", false, "source to be compiled is part of the Go runtime")
+	Debug      = flag.Bool("debug", false, "dump instructions as they are parsed")
+	OutputFile = flag.String("o", "", "output file; default foo.o for /a/b/c/foo.s as first argument")
+	TrimPath   = flag.String("trimpath", "", "remove prefix from recorded source file paths")
+	Shared     = flag.Bool("shared", false, "generate code that can be linked into a shared library")
+	Dynlink    = flag.Bool("dynlink", false, "support references to Go symbols defined in other shared libraries")
+	Linkshared = flag.Bool("linkshared", false, "generate code that will be linked against Go shared libraries")
+	AllErrors  = flag.Bool("e", false, "no limit on number of errors reported")
+	SymABIs    = flag.Bool("gensymabis", false, "write symbol ABI information to output file, don't assemble")
+	Importpath = flag.String("p", obj.UnlinkablePkg, "set expected package import to path")
+	Spectre    = flag.String("spectre", "", "enable spectre mitigations in `list` (all, ret)")
 )
 
 var DebugFlags struct {
diff --git a/src/cmd/asm/internal/lex/input.go b/src/cmd/asm/internal/lex/input.go
index 276b4b0..da4ebe6 100644
--- a/src/cmd/asm/internal/lex/input.go
+++ b/src/cmd/asm/internal/lex/input.go
@@ -6,7 +6,6 @@
 
 import (
 	"fmt"
-	"internal/buildcfg"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -46,21 +45,6 @@
 // predefine installs the macros set by the -D flag on the command line.
 func predefine(defines flags.MultiFlag) map[string]*Macro {
 	macros := make(map[string]*Macro)
-
-	// Set macros for GOEXPERIMENTs so we can easily switch
-	// runtime assembly code based on them.
-	if *flags.CompilingRuntime {
-		for _, exp := range buildcfg.Experiment.Enabled() {
-			// Define macro.
-			name := "GOEXPERIMENT_" + exp
-			macros[name] = &Macro{
-				name:   name,
-				args:   nil,
-				tokens: Tokenize("1"),
-			}
-		}
-	}
-
 	for _, name := range defines {
 		value := "1"
 		i := strings.IndexRune(name, '=')
diff --git a/src/cmd/asm/internal/lex/lex.go b/src/cmd/asm/internal/lex/lex.go
index 7cd41a5..f1923be 100644
--- a/src/cmd/asm/internal/lex/lex.go
+++ b/src/cmd/asm/internal/lex/lex.go
@@ -105,13 +105,9 @@
 
 // Make returns a Token with the given rune (ScanToken) and text representation.
 func Make(token ScanToken, text string) Token {
-	// If the symbol starts with center dot, as in ·x, rewrite it as ""·x
-	if token == scanner.Ident && strings.HasPrefix(text, "\u00B7") {
-		text = `""` + text
-	}
 	// Substitute the substitutes for . and /.
-	text = strings.Replace(text, "\u00B7", ".", -1)
-	text = strings.Replace(text, "\u2215", "/", -1)
+	text = strings.ReplaceAll(text, "\u00B7", ".")
+	text = strings.ReplaceAll(text, "\u2215", "/")
 	return Token{ScanToken: token, text: text}
 }
 
diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go
index 6a25fd4..ba69195 100644
--- a/src/cmd/asm/main.go
+++ b/src/cmd/asm/main.go
@@ -35,7 +35,6 @@
 	if architecture == nil {
 		log.Fatalf("unrecognized architecture %s", GOARCH)
 	}
-
 	ctxt := obj.Linknew(architecture.LinkArch)
 	ctxt.Debugasm = flags.PrintOut
 	ctxt.Debugvlog = flags.DebugV
@@ -76,12 +75,19 @@
 		fmt.Fprintf(buf, "!\n")
 	}
 
+	// Set macros for GOEXPERIMENTs so we can easily switch
+	// runtime assembly code based on them.
+	if objabi.LookupPkgSpecial(ctxt.Pkgpath).AllowAsmABI {
+		for _, exp := range buildcfg.Experiment.Enabled() {
+			flags.D = append(flags.D, "GOEXPERIMENT_"+exp)
+		}
+	}
+
 	var ok, diag bool
 	var failedFile string
 	for _, f := range flag.Args() {
 		lexer := lex.NewLexer(f)
-		parser := asm.NewParser(ctxt, architecture, lexer,
-			*flags.CompilingRuntime)
+		parser := asm.NewParser(ctxt, architecture, lexer)
 		ctxt.DiagFunc = func(format string, args ...interface{}) {
 			diag = true
 			log.Printf(format, args...)
@@ -93,7 +99,7 @@
 			pList.Firstpc, ok = parser.Parse()
 			// reports errors to parser.Errorf
 			if ok {
-				obj.Flushplist(ctxt, pList, nil, *flags.Importpath)
+				obj.Flushplist(ctxt, pList, nil)
 			}
 		}
 		if !ok {
diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go
index b1a288f..c2e3751 100644
--- a/src/cmd/cgo/doc.go
+++ b/src/cmd/cgo/doc.go
@@ -686,7 +686,7 @@
 	_cgo_export.c   # for gcc
 	_cgo_export.h   # for gcc
 	_cgo_main.c     # for gcc
-	_cgo_flags      # for alternative build tools
+	_cgo_flags      # for build tool (if -gccgo)
 
 The file x.cgo1.go is a copy of x.go with the import "C" removed and
 references to C.xxx replaced with names like _Cfunc_xxx or _Ctype_xxx.
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index 7ae6fbc..6e7556d 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -73,27 +73,41 @@
 	return s
 }
 
-// DiscardCgoDirectives processes the import C preamble, and discards
-// all #cgo CFLAGS and LDFLAGS directives, so they don't make their
-// way into _cgo_export.h.
-func (f *File) DiscardCgoDirectives() {
+// ProcessCgoDirectives processes the import C preamble:
+//  1. discards all #cgo CFLAGS, LDFLAGS, nocallback and noescape directives,
+//     so they don't make their way into _cgo_export.h.
+//  2. parse the nocallback and noescape directives.
+func (f *File) ProcessCgoDirectives() {
 	linesIn := strings.Split(f.Preamble, "\n")
 	linesOut := make([]string, 0, len(linesIn))
+	f.NoCallbacks = make(map[string]bool)
+	f.NoEscapes = make(map[string]bool)
 	for _, line := range linesIn {
 		l := strings.TrimSpace(line)
 		if len(l) < 5 || l[:4] != "#cgo" || !unicode.IsSpace(rune(l[4])) {
 			linesOut = append(linesOut, line)
 		} else {
 			linesOut = append(linesOut, "")
+
+			// #cgo (nocallback|noescape) <function name>
+			if fields := strings.Fields(l); len(fields) == 3 {
+				directive := fields[1]
+				funcName := fields[2]
+				if directive == "nocallback" {
+					fatalf("#cgo nocallback disabled until Go 1.23")
+					f.NoCallbacks[funcName] = true
+				} else if directive == "noescape" {
+					fatalf("#cgo noescape disabled until Go 1.23")
+					f.NoEscapes[funcName] = true
+				}
+			}
 		}
 	}
 	f.Preamble = strings.Join(linesOut, "\n")
 }
 
-// addToFlag appends args to flag. All flags are later written out onto the
-// _cgo_flags file for the build system to use.
+// addToFlag appends args to flag.
 func (p *Package) addToFlag(flag string, args []string) {
-	p.CgoFlags[flag] = append(p.CgoFlags[flag], args...)
 	if flag == "CFLAGS" {
 		// We'll also need these when preprocessing for dwarf information.
 		// However, discard any -g options: we need to be able
@@ -104,6 +118,9 @@
 			}
 		}
 	}
+	if flag == "LDFLAGS" {
+		p.LdFlags = append(p.LdFlags, args...)
+	}
 }
 
 // splitQuoted splits the string s around each instance of one or more consecutive
@@ -581,7 +598,7 @@
 			// As of https://reviews.llvm.org/D123534, clang
 			// now emits DW_TAG_variable DIEs that have
 			// no name (so as to be able to describe the
-			// type and source locations of constant strings
+			// type and source locations of constant strings)
 			// like the second arg in the call below:
 			//
 			//     myfunction(42, "foo")
diff --git a/src/cmd/cgo/internal/test/callback_windows.go b/src/cmd/cgo/internal/test/callback_windows.go
new file mode 100644
index 0000000..77bdfa4
--- /dev/null
+++ b/src/cmd/cgo/internal/test/callback_windows.go
@@ -0,0 +1,109 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+#include <windows.h>
+USHORT backtrace(ULONG FramesToCapture, PVOID *BackTrace) {
+#ifdef _AMD64_
+	CONTEXT context;
+	RtlCaptureContext(&context);
+	ULONG64 ControlPc;
+	ControlPc = context.Rip;
+	int i;
+	for (i = 0; i < FramesToCapture; i++) {
+		PRUNTIME_FUNCTION FunctionEntry;
+		ULONG64 ImageBase;
+		VOID *HandlerData;
+		ULONG64 EstablisherFrame;
+
+		FunctionEntry = RtlLookupFunctionEntry(ControlPc, &ImageBase, NULL);
+
+		if (!FunctionEntry) {
+			// For simplicity, don't unwind leaf entries, which are not used in this test.
+			break;
+		} else {
+			RtlVirtualUnwind(0, ImageBase, ControlPc, FunctionEntry, &context, &HandlerData, &EstablisherFrame, NULL);
+		}
+
+		ControlPc = context.Rip;
+		// Check if we left the user range.
+		if (ControlPc < 0x10000) {
+			break;
+		}
+
+		BackTrace[i] = (PVOID)(ControlPc);
+	}
+	return i;
+#else
+	return 0;
+#endif
+}
+*/
+import "C"
+
+import (
+	"internal/testenv"
+	"reflect"
+	"runtime"
+	"strings"
+	"testing"
+	"unsafe"
+)
+
+// Test that the stack can be unwound through a call out and call back
+// into Go.
+func testCallbackCallersSEH(t *testing.T) {
+	testenv.SkipIfOptimizationOff(t) // This test requires inlining.
+	if runtime.Compiler != "gc" {
+		// The exact function names are not going to be the same.
+		t.Skip("skipping for non-gc toolchain")
+	}
+	if runtime.GOARCH != "amd64" {
+		// TODO: support SEH on other architectures.
+		t.Skip("skipping on non-amd64")
+	}
+	// Only frames in the test package are checked.
+	want := []string{
+		"test._Cfunc_backtrace",
+		"test.testCallbackCallersSEH.func1.1",
+		"test.testCallbackCallersSEH.func1",
+		"test.goCallback",
+		"test._Cfunc_callback",
+		"test.nestedCall.func1",
+		"test.nestedCall",
+		"test.testCallbackCallersSEH",
+		"test.TestCallbackCallersSEH",
+	}
+	pc := make([]uintptr, 100)
+	n := 0
+	nestedCall(func() {
+		n = int(C.backtrace(C.DWORD(len(pc)), (*C.PVOID)(unsafe.Pointer(&pc[0]))))
+	})
+	got := make([]string, 0, n)
+	for i := 0; i < n; i++ {
+		f := runtime.FuncForPC(pc[i] - 1)
+		if f == nil {
+			continue
+		}
+		fname := f.Name()
+		switch fname {
+		case "goCallback":
+			// TODO(qmuntal): investigate why this function doesn't appear
+			// when using the external linker.
+			continue
+		}
+		// In module mode, this package has a fully-qualified import path.
+		// Remove it if present.
+		fname = strings.TrimPrefix(fname, "cmd/cgo/internal/")
+		if !strings.HasPrefix(fname, "test.") {
+			continue
+		}
+		got = append(got, fname)
+	}
+	if !reflect.DeepEqual(want, got) {
+		t.Errorf("incorrect backtrace:\nwant:\t%v\ngot:\t%v", want, got)
+	}
+}
diff --git a/src/cmd/cgo/internal/test/callstub_linux_ppc64le.go b/src/cmd/cgo/internal/test/callstub_linux_ppc64le.go
new file mode 100644
index 0000000..93c29e1
--- /dev/null
+++ b/src/cmd/cgo/internal/test/callstub_linux_ppc64le.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+// extern int notoc_func(void);
+// int TestPPC64Stubs(void) {
+//	return notoc_func();
+// }
+import "C"
+import "testing"
+
+func testPPC64CallStubs(t *testing.T) {
+	// Verify the trampolines run on the testing machine. If they
+	// do not, or are missing, a crash is expected.
+	if C.TestPPC64Stubs() != 0 {
+		t.Skipf("This test requires binutils 2.35 or newer.")
+	}
+}
diff --git a/src/cmd/cgo/internal/test/issue1435.go b/src/cmd/cgo/internal/test/issue1435.go
index a672e26..1588d39 100644
--- a/src/cmd/cgo/internal/test/issue1435.go
+++ b/src/cmd/cgo/internal/test/issue1435.go
@@ -8,6 +8,7 @@
 
 import (
 	"fmt"
+	"internal/testenv"
 	"os"
 	"runtime"
 	"sort"
@@ -145,6 +146,13 @@
 	if syscall.Getuid() != 0 {
 		t.Skip("skipping root only test")
 	}
+	if testing.Short() && testenv.Builder() != "" && os.Getenv("USER") == "swarming" {
+		// The Go build system's swarming user is known not to be root.
+		// Unfortunately, it sometimes appears as root due the current
+		// implementation of a no-network check using 'unshare -n -r'.
+		// Since this test does need root to work, we need to skip it.
+		t.Skip("skipping root only test on a non-root builder")
+	}
 	if runtime.GOOS == "linux" {
 		if _, err := os.Stat("/etc/alpine-release"); err == nil {
 			t.Skip("skipping failing test on alpine - go.dev/issue/19938")
diff --git a/src/cmd/cgo/internal/test/issue18146.go b/src/cmd/cgo/internal/test/issue18146.go
index d302bd0..112b7ee 100644
--- a/src/cmd/cgo/internal/test/issue18146.go
+++ b/src/cmd/cgo/internal/test/issue18146.go
@@ -72,7 +72,7 @@
 			}()
 		}
 		runtime.GOMAXPROCS(threads)
-		argv := append(os.Args, "-test.run=NoSuchTestExists")
+		argv := append(os.Args, "-test.run=^$")
 		if err := syscall.Exec(os.Args[0], argv, os.Environ()); err != nil {
 			t.Fatal(err)
 		}
@@ -85,7 +85,7 @@
 		}
 	}()
 
-	args := append(append([]string(nil), os.Args[1:]...), "-test.run=Test18146")
+	args := append(append([]string(nil), os.Args[1:]...), "-test.run=^Test18146$")
 	for n := attempts; n > 0; n-- {
 		cmd := exec.Command(os.Args[0], args...)
 		cmd.Env = append(os.Environ(), "test18146=exec")
diff --git a/src/cmd/cgo/internal/test/issue4029.c b/src/cmd/cgo/internal/test/issue4029.c
index 212d692..7a8fdc1 100644
--- a/src/cmd/cgo/internal/test/issue4029.c
+++ b/src/cmd/cgo/internal/test/issue4029.c
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !windows && !static && (!darwin || (!internal_pie && !arm64))
+//go:build !windows && !static && !(darwin && internal)
 
 #include <stdint.h>
 #include <dlfcn.h>
diff --git a/src/cmd/cgo/internal/test/issue4029.go b/src/cmd/cgo/internal/test/issue4029.go
index 686b767..506c999 100644
--- a/src/cmd/cgo/internal/test/issue4029.go
+++ b/src/cmd/cgo/internal/test/issue4029.go
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !windows && !static && (!darwin || (!internal_pie && !arm64))
+//go:build !windows && !static && !(darwin && internal)
 
-// Excluded in darwin internal linking PIE mode, as dynamic export is not
-// supported.
-// Excluded in internal linking mode on darwin/arm64, as it is always PIE.
+// Excluded in darwin internal linking PIE (which is the default) mode,
+// as dynamic export is not supported.
 
 package cgotest
 
diff --git a/src/cmd/cgo/internal/test/issue4029w.go b/src/cmd/cgo/internal/test/issue4029w.go
index 91dad6a..aa4c2f5 100644
--- a/src/cmd/cgo/internal/test/issue4029w.go
+++ b/src/cmd/cgo/internal/test/issue4029w.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build windows || static || (darwin && internal_pie) || (darwin && arm64)
+//go:build windows || static || (darwin && internal)
 
 package cgotest
 
diff --git a/src/cmd/cgo/internal/test/issue4339.c b/src/cmd/cgo/internal/test/issue4339.c
index 15d0004..d0e6487 100644
--- a/src/cmd/cgo/internal/test/issue4339.c
+++ b/src/cmd/cgo/internal/test/issue4339.c
@@ -1,3 +1,7 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 #include <stdio.h>
 #include "issue4339.h"
 
diff --git a/src/cmd/cgo/internal/test/issue4339.h b/src/cmd/cgo/internal/test/issue4339.h
index 20f6ceb..99a0996 100644
--- a/src/cmd/cgo/internal/test/issue4339.h
+++ b/src/cmd/cgo/internal/test/issue4339.h
@@ -1,3 +1,7 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 typedef struct Issue4339 Issue4339;
 
 struct Issue4339 {
diff --git a/src/cmd/cgo/internal/test/issue8756.go b/src/cmd/cgo/internal/test/issue8756.go
index 817f449..d8eadfd 100644
--- a/src/cmd/cgo/internal/test/issue8756.go
+++ b/src/cmd/cgo/internal/test/issue8756.go
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package cgotest
 
 /*
diff --git a/src/cmd/cgo/internal/test/issue8756/issue8756.go b/src/cmd/cgo/internal/test/issue8756/issue8756.go
index 223397f..02a1424 100644
--- a/src/cmd/cgo/internal/test/issue8756/issue8756.go
+++ b/src/cmd/cgo/internal/test/issue8756/issue8756.go
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package issue8756
 
 /*
diff --git a/src/cmd/cgo/internal/test/issue8828/trivial.go b/src/cmd/cgo/internal/test/issue8828/trivial.go
index e7b9a4e..9f26196 100644
--- a/src/cmd/cgo/internal/test/issue8828/trivial.go
+++ b/src/cmd/cgo/internal/test/issue8828/trivial.go
@@ -1,3 +1,7 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package issue8828
 
 //void foo();
diff --git a/src/cmd/cgo/internal/test/issue9026/issue9026.go b/src/cmd/cgo/internal/test/issue9026/issue9026.go
index ff269ca..13bc180 100644
--- a/src/cmd/cgo/internal/test/issue9026/issue9026.go
+++ b/src/cmd/cgo/internal/test/issue9026/issue9026.go
@@ -1,3 +1,7 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package issue9026
 
 // This file appears in its own package since the assertion tests the
diff --git a/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s b/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s
index 1f492ea..3edba3d 100644
--- a/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s
+++ b/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors.  All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s b/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s
index fa34f6b..0f10e3a 100644
--- a/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s
+++ b/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s
@@ -1,4 +1,4 @@
-// Copyright 2020 The Go Authors.  All rights reserved.
+// Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/cgo/internal/test/issue9400_linux.go b/src/cmd/cgo/internal/test/issue9400_linux.go
index 1511e25..41b9ab9 100644
--- a/src/cmd/cgo/internal/test/issue9400_linux.go
+++ b/src/cmd/cgo/internal/test/issue9400_linux.go
@@ -48,7 +48,7 @@
 	}
 
 	// Disable GC for the duration of the test.
-	// This avoids a potential GC deadlock when spinning in uninterruptable ASM below #49695.
+	// This avoids a potential GC deadlock when spinning in uninterruptible ASM below #49695.
 	defer debug.SetGCPercent(debug.SetGCPercent(-1))
 	// SetGCPercent waits until the mark phase is over, but the runtime
 	// also preempts at the start of the sweep phase, so make sure that's
diff --git a/src/cmd/cgo/internal/test/issue9510a/a.go b/src/cmd/cgo/internal/test/issue9510a/a.go
index 1a5224b..f0a0128 100644
--- a/src/cmd/cgo/internal/test/issue9510a/a.go
+++ b/src/cmd/cgo/internal/test/issue9510a/a.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package issue9510a
 
 /*
diff --git a/src/cmd/cgo/internal/test/issue9510b/b.go b/src/cmd/cgo/internal/test/issue9510b/b.go
index 5016b39..6e22508 100644
--- a/src/cmd/cgo/internal/test/issue9510b/b.go
+++ b/src/cmd/cgo/internal/test/issue9510b/b.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package issue9510b
 
 /*
diff --git a/src/cmd/cgo/internal/test/linux_ppc64le_test.go b/src/cmd/cgo/internal/test/linux_ppc64le_test.go
new file mode 100644
index 0000000..67b6b16
--- /dev/null
+++ b/src/cmd/cgo/internal/test/linux_ppc64le_test.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le && linux && cgo
+
+package cgotest
+
+import "testing"
+
+func TestPPC64CallStubs(t *testing.T) {
+	testPPC64CallStubs(t)
+}
diff --git a/src/cmd/cgo/internal/test/seh_internal_windows_test.go b/src/cmd/cgo/internal/test/seh_internal_windows_test.go
new file mode 100644
index 0000000..708ffdc
--- /dev/null
+++ b/src/cmd/cgo/internal/test/seh_internal_windows_test.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && windows && internal
+
+package cgotest
+
+import (
+	"internal/testenv"
+	"testing"
+)
+
+func TestCallbackCallersSEH(t *testing.T) {
+	testenv.SkipFlaky(t, 65116)
+}
diff --git a/src/cmd/cgo/internal/test/seh_windows_test.go b/src/cmd/cgo/internal/test/seh_windows_test.go
new file mode 100644
index 0000000..4a8d5bb
--- /dev/null
+++ b/src/cmd/cgo/internal/test/seh_windows_test.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && windows && !internal
+
+package cgotest
+
+import "testing"
+
+func TestCallbackCallersSEH(t *testing.T) { testCallbackCallersSEH(t) }
diff --git a/src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S b/src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S
new file mode 100644
index 0000000..0c51970
--- /dev/null
+++ b/src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S
@@ -0,0 +1,122 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// When linking C ELFv2 objects, the Go linker may need to insert calling stubs.
+// A call stub is usually needed when the ELFv2 st_other attribute is different
+// between caller and callee.
+//
+// The type of call stub inserted will vary depending on GOPPC64 and the
+// buildmode (e.g pie builds shared code, default builds fixed-position code).
+// CI is set up to run for P8 and P10 machines, and this test is run in both
+// pie and default modes.
+//
+// Several functions are written with interesting st_other attributes, and
+// call each other to test various calling combinations which require stubs.
+//
+// The call tree is as follows, starting from TestPPC64Stubs (A C function):
+// TestPPC64Stubs (compiled PIC by default by Go)
+//   notoc_func          [called TOC -> NOTOC (but R2 is preserved)]
+//     toc_func          [called NOTOC -> TOC]
+//       notoc_nor2_func [called TOC -> NOTOC]
+//       random          [dynamic TOC call]
+//     random		 [dynamic NOTOC call]
+//
+// Depending on the GOPPC64/buildmode used, and type of call, one of 7 stubs may need inserted:
+//
+// TOC   -> NOTOC:     Save R2, call global entry. (valid for any GOPPC64)
+//                      TOC save slot is rewrittent to restore TOC.
+// NOTOC -> TOC [P10]: A PIC call stub using P10 instructions to call the global entry
+// NOTOC -> TOC [P8]:  A PIC call stub using P8 instructions to call the global entry
+//
+// TOC   -> dynamic:              A PLT call stub is generated which saves R2.
+//                                 TOC save slot is rewritten to restore TOC.
+// NOTOC -> dynamic [P10]:        A stub using pcrel instructions is generated.
+// NOTOC -> dynamic [P8/default]: A P8 compatible, non-PIC stub is generated
+// NOTOC -> dynamic [P8/pie]:     A P8 compatible, PIC stub is generated
+//
+//
+// Some notes about other cases:
+//   TOC -> TOC, NOTOC -> NOTOC, NOTOC -> TOC  local calls do not require require call stubs.
+//   TOC -> NOTOC (R2 is preserved, st_other==0): A special case where a call stub is not needed.
+
+// This test requires a binutils with power10 and ELFv2 1.5 support. This is earliest verified version.
+.if .gasversion. >= 23500
+
+// A function which does not guarantee R2 is preserved.
+// R2 is clobbered here to ensure the stubs preserve it.
+	.globl	notoc_nor2_func
+	.type	notoc_nor2_func, @function
+notoc_nor2_func:
+	.localentry notoc_nor2_func,1
+	li	2,0
+	blr
+
+// A function which expects R2 to hold TOC, and has a distinct local entry.
+	.globl	toc_func
+	.type	toc_func, @function
+toc_func:
+	addis	2,12,.TOC.-toc_func@ha
+	addi	2,2,.TOC.-toc_func@l
+	.localentry toc_func, .-toc_func
+	mflr	0
+	std	0,16(1)
+	stdu	1,-32(1)
+
+	// Call a NOTOC function which clobbers R2.
+	bl	notoc_nor2_func
+	nop
+
+	// Call libc random. This should generate a TOC relative plt stub.
+	bl	random
+	nop
+
+	addi	1,1,32
+	ld 	0,16(1)
+	mtlr	0
+	blr
+
+// An ELFv2 st_other==0 function. It preserves R2 (TOC), but does not use it.
+	.globl	notoc_func
+	.type	notoc_func, @function
+notoc_func:
+	// Save R2 and LR and stack a frame.
+	mflr	0
+	std	0,16(1)
+	stdu	1,-32(1)
+
+	// Save R2 in TOC save slot.
+	std	2,24(1)
+
+	// clobber R2
+	li	2,0
+
+	// Call type2_func. A call stub from notoc to toc should be inserted.
+	bl	toc_func@notoc
+
+	// Call libc random. A notoc plt stub should be inserted.
+	bl	random@notoc
+
+	// Return 0 to indicate the test ran.
+	li	3,0
+
+	// Restore R2
+	ld	2,24(1)
+
+	// Restore LR and pop stack
+	addi	1,1,32
+	ld 	0,16(1)
+	mtlr	0
+	blr
+
+.else
+
+// A stub for older binutils
+	.globl	notoc_func
+	.type	notoc_func, @function
+notoc_func:
+	// Return 1 to indicate the test was skipped.
+	li	3,1
+	blr
+
+.endif
diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go
index 7da5a85..9b3790e 100644
--- a/src/cmd/cgo/internal/test/test.go
+++ b/src/cmd/cgo/internal/test/test.go
@@ -115,6 +115,14 @@
 	return x+y;
 };
 
+// escape vs noescape
+
+// TODO(#56378): enable in Go 1.23:
+// #cgo noescape handleGoStringPointerNoescape
+void handleGoStringPointerNoescape(void *s) {}
+
+void handleGoStringPointerEscape(void *s) {}
+
 // Following mimics vulkan complex definitions for benchmarking cgocheck overhead.
 
 typedef uint32_t VkFlags;
@@ -1106,6 +1114,18 @@
 			C.handleComplexPointer(&a0)
 		}
 	})
+	b.Run("string-pointer-escape", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			var s string
+			C.handleGoStringPointerEscape(unsafe.Pointer(&s))
+		}
+	})
+	b.Run("string-pointer-noescape", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			var s string
+			C.handleGoStringPointerNoescape(unsafe.Pointer(&s))
+		}
+	})
 	b.Run("eight-pointers", func(b *testing.B) {
 		var a0, a1, a2, a3, a4, a5, a6, a7 C.VkDeviceCreateInfo
 		for i := 0; i < b.N; i++ {
diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go
index 7fe6782..7e9a274 100644
--- a/src/cmd/cgo/internal/testcshared/cshared_test.go
+++ b/src/cmd/cgo/internal/testcshared/cshared_test.go
@@ -182,6 +182,8 @@
 	if len(extraEnv) > 0 {
 		cmd.Env = append(os.Environ(), extraEnv...)
 	}
+	stderr := new(strings.Builder)
+	cmd.Stderr = stderr
 
 	if GOOS != "windows" {
 		// TestUnexportedSymbols relies on file descriptor 30
@@ -192,11 +194,13 @@
 		cmd.ExtraFiles = make([]*os.File, 28)
 	}
 
-	out, err := cmd.CombinedOutput()
+	t.Logf("run: %v", args)
+	out, err := cmd.Output()
+	if stderr.Len() > 0 {
+		t.Logf("stderr:\n%s", stderr)
+	}
 	if err != nil {
 		t.Fatalf("command failed: %v\n%v\n%s\n", args, err, out)
-	} else {
-		t.Logf("run: %v", args)
 	}
 	return string(out)
 }
@@ -602,9 +606,13 @@
 	defer os.Remove(bin)
 	defer os.Remove(pkgname + ".h")
 
-	out := runExe(t, nil, bin, "./"+libname)
+	args := []string{bin, "./" + libname}
+	if testing.Verbose() {
+		args = append(args, "verbose")
+	}
+	out := runExe(t, nil, args...)
 	if strings.TrimSpace(out) != "PASS" {
-		t.Error(run(t, nil, bin, libname, "verbose"))
+		t.Errorf("%v%s", args, out)
 	}
 }
 
diff --git a/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go b/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go
index 4ca44e5..c70dd68 100644
--- a/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go
+++ b/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go
@@ -31,15 +31,24 @@
 	signal.Reset(syscall.SIGIO)
 }
 
-// SawSIGIO returns whether we saw a SIGIO within a brief pause.
+// AwaitSIGIO blocks indefinitely until a SIGIO is reported.
+//
+//export AwaitSIGIO
+func AwaitSIGIO() {
+	<-sigioChan
+}
+
+// SawSIGIO reports whether we saw a SIGIO within a brief pause.
 //
 //export SawSIGIO
-func SawSIGIO() C.int {
+func SawSIGIO() bool {
+	timer := time.NewTimer(100 * time.Millisecond)
 	select {
 	case <-sigioChan:
-		return 1
-	case <-time.After(100 * time.Millisecond):
-		return 0
+		timer.Stop()
+		return true
+	case <-timer.C:
+		return false
 	}
 }
 
diff --git a/src/cmd/cgo/internal/testcshared/testdata/main4.c b/src/cmd/cgo/internal/testcshared/testdata/main4.c
index 6c16364..467a611 100644
--- a/src/cmd/cgo/internal/testcshared/testdata/main4.c
+++ b/src/cmd/cgo/internal/testcshared/testdata/main4.c
@@ -88,7 +88,7 @@
 	setsid();
 
 	if (verbose) {
-		printf("calling sigaction\n");
+		fprintf(stderr, "calling sigaction\n");
 	}
 
 	memset(&sa, 0, sizeof sa);
@@ -107,7 +107,7 @@
 	}
 
 	if (verbose) {
-		printf("calling dlopen\n");
+		fprintf(stderr, "calling dlopen\n");
 	}
 
 	handle = dlopen(argv[1], RTLD_NOW | RTLD_GLOBAL);
@@ -117,7 +117,7 @@
 	}
 
 	if (verbose) {
-		printf("calling dlsym\n");
+		fprintf(stderr, "calling dlsym\n");
 	}
 
 	// Start some goroutines.
@@ -128,7 +128,7 @@
 	}
 
 	if (verbose) {
-		printf("calling RunGoroutines\n");
+		fprintf(stderr, "calling RunGoroutines\n");
 	}
 
 	fn();
@@ -137,7 +137,7 @@
 	// will be delivered to a goroutine.
 
 	if (verbose) {
-		printf("calling pthread_sigmask\n");
+		fprintf(stderr, "calling pthread_sigmask\n");
 	}
 
 	if (sigemptyset(&mask) < 0) {
@@ -153,7 +153,7 @@
 	}
 
 	if (verbose) {
-		printf("calling kill\n");
+		fprintf(stderr, "calling kill\n");
 	}
 
 	if (kill(0, SIGIO) < 0) {
@@ -161,7 +161,7 @@
 	}
 
 	if (verbose) {
-		printf("waiting for sigioSeen\n");
+		fprintf(stderr, "waiting for sigioSeen\n");
 	}
 
 	// Wait until the signal has been delivered.
@@ -178,13 +178,13 @@
 	}
 
 	if (verbose) {
-		printf("calling setjmp\n");
+		fprintf(stderr, "calling setjmp\n");
 	}
 
 	// Test that a SIGSEGV on this thread is delivered to us.
 	if (setjmp(jmp) == 0) {
 		if (verbose) {
-			printf("triggering SIGSEGV\n");
+			fprintf(stderr, "triggering SIGSEGV\n");
 		}
 
 		*nullPointer = '\0';
@@ -194,7 +194,7 @@
 	}
 
 	if (verbose) {
-		printf("calling dlsym\n");
+		fprintf(stderr, "calling dlsym\n");
 	}
 
 	// Make sure that a SIGSEGV in Go causes a run-time panic.
@@ -205,7 +205,7 @@
 	}
 
 	if (verbose) {
-		printf("calling TestSEGV\n");
+		fprintf(stderr, "calling TestSEGV\n");
 	}
 
 	fn();
diff --git a/src/cmd/cgo/internal/testcshared/testdata/main5.c b/src/cmd/cgo/internal/testcshared/testdata/main5.c
index e7bebab..563329e 100644
--- a/src/cmd/cgo/internal/testcshared/testdata/main5.c
+++ b/src/cmd/cgo/internal/testcshared/testdata/main5.c
@@ -7,6 +7,7 @@
 // This is a lot like ../testcarchive/main3.c.
 
 #include <signal.h>
+#include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -29,8 +30,10 @@
 	int verbose;
 	struct sigaction sa;
 	void* handle;
-	void (*fn1)(void);
-	int (*sawSIGIO)(void);
+	void (*catchSIGIO)(void);
+	void (*resetSIGIO)(void);
+	void (*awaitSIGIO)();
+	bool (*sawSIGIO)();
 	int i;
 	struct timespec ts;
 
@@ -38,7 +41,7 @@
 	setvbuf(stdout, NULL, _IONBF, 0);
 
 	if (verbose) {
-		printf("calling sigaction\n");
+		fprintf(stderr, "calling sigaction\n");
 	}
 
 	memset(&sa, 0, sizeof sa);
@@ -52,7 +55,7 @@
 	}
 
 	if (verbose) {
-		printf("calling dlopen\n");
+		fprintf(stderr, "calling dlopen\n");
 	}
 
 	handle = dlopen(argv[1], RTLD_NOW | RTLD_GLOBAL);
@@ -65,7 +68,7 @@
 	// installed for SIGIO.
 
 	if (verbose) {
-		printf("raising SIGIO\n");
+		fprintf(stderr, "raising SIGIO\n");
 	}
 
 	if (raise(SIGIO) < 0) {
@@ -73,7 +76,7 @@
 	}
 
 	if (verbose) {
-		printf("waiting for sigioSeen\n");
+		fprintf(stderr, "waiting for sigioSeen\n");
 	}
 
 	// Wait until the signal has been delivered.
@@ -94,23 +97,23 @@
 	// Tell the Go code to catch SIGIO.
 
 	if (verbose) {
-		printf("calling dlsym\n");
+		fprintf(stderr, "calling dlsym\n");
 	}
 
-	fn1 = (void(*)(void))dlsym(handle, "CatchSIGIO");
-	if (fn1 == NULL) {
+	catchSIGIO = (void(*)(void))dlsym(handle, "CatchSIGIO");
+	if (catchSIGIO == NULL) {
 		fprintf(stderr, "%s\n", dlerror());
 		exit(EXIT_FAILURE);
 	}
 
 	if (verbose) {
-		printf("calling CatchSIGIO\n");
+		fprintf(stderr, "calling CatchSIGIO\n");
 	}
 
-	fn1();
+	catchSIGIO();
 
 	if (verbose) {
-		printf("raising SIGIO\n");
+		fprintf(stderr, "raising SIGIO\n");
 	}
 
 	if (raise(SIGIO) < 0) {
@@ -118,24 +121,21 @@
 	}
 
 	if (verbose) {
-		printf("calling dlsym\n");
+		fprintf(stderr, "calling dlsym\n");
 	}
 
 	// Check that the Go code saw SIGIO.
-	sawSIGIO = (int (*)(void))dlsym(handle, "SawSIGIO");
-	if (sawSIGIO == NULL) {
+	awaitSIGIO = (void (*)(void))dlsym(handle, "AwaitSIGIO");
+	if (awaitSIGIO == NULL) {
 		fprintf(stderr, "%s\n", dlerror());
 		exit(EXIT_FAILURE);
 	}
 
 	if (verbose) {
-		printf("calling SawSIGIO\n");
+		fprintf(stderr, "calling AwaitSIGIO\n");
 	}
 
-	if (!sawSIGIO()) {
-		fprintf(stderr, "Go handler did not see SIGIO\n");
-		exit(EXIT_FAILURE);
-	}
+	awaitSIGIO();
 
 	if (sigioSeen != 0) {
 		fprintf(stderr, "C handler saw SIGIO when only Go handler should have\n");
@@ -145,23 +145,29 @@
 	// Tell the Go code to stop catching SIGIO.
 
 	if (verbose) {
-		printf("calling dlsym\n");
+		fprintf(stderr, "calling dlsym\n");
 	}
 
-	fn1 = (void(*)(void))dlsym(handle, "ResetSIGIO");
-	if (fn1 == NULL) {
+	resetSIGIO = (void (*)(void))dlsym(handle, "ResetSIGIO");
+	if (resetSIGIO == NULL) {
 		fprintf(stderr, "%s\n", dlerror());
 		exit(EXIT_FAILURE);
 	}
 
 	if (verbose) {
-		printf("calling ResetSIGIO\n");
+		fprintf(stderr, "calling ResetSIGIO\n");
 	}
 
-	fn1();
+	resetSIGIO();
+
+	sawSIGIO = (bool (*)(void))dlsym(handle, "SawSIGIO");
+	if (sawSIGIO == NULL) {
+		fprintf(stderr, "%s\n", dlerror());
+		exit(EXIT_FAILURE);
+	}
 
 	if (verbose) {
-		printf("raising SIGIO\n");
+		fprintf(stderr, "raising SIGIO\n");
 	}
 
 	if (raise(SIGIO) < 0) {
@@ -169,7 +175,7 @@
 	}
 
 	if (verbose) {
-		printf("calling SawSIGIO\n");
+		fprintf(stderr, "calling SawSIGIO\n");
 	}
 
 	if (sawSIGIO()) {
@@ -178,7 +184,7 @@
 	}
 
 	if (verbose) {
-		printf("waiting for sigioSeen\n");
+		fprintf(stderr, "waiting for sigioSeen\n");
 	}
 
 	// Wait until the signal has been delivered.
diff --git a/src/cmd/cgo/internal/testerrors/errors_test.go b/src/cmd/cgo/internal/testerrors/errors_test.go
index 870d05b..8623624 100644
--- a/src/cmd/cgo/internal/testerrors/errors_test.go
+++ b/src/cmd/cgo/internal/testerrors/errors_test.go
@@ -39,16 +39,23 @@
 				continue
 			}
 
-			_, frag, ok := bytes.Cut(line, []byte("ERROR HERE: "))
-			if !ok {
-				continue
+			if _, frag, ok := bytes.Cut(line, []byte("ERROR HERE: ")); ok {
+				re, err := regexp.Compile(fmt.Sprintf(":%d:.*%s", i+1, frag))
+				if err != nil {
+					t.Errorf("Invalid regexp after `ERROR HERE: `: %#q", frag)
+					continue
+				}
+				errors = append(errors, re)
 			}
-			re, err := regexp.Compile(fmt.Sprintf(":%d:.*%s", i+1, frag))
-			if err != nil {
-				t.Errorf("Invalid regexp after `ERROR HERE: `: %#q", frag)
-				continue
+
+			if _, frag, ok := bytes.Cut(line, []byte("ERROR MESSAGE: ")); ok {
+				re, err := regexp.Compile(string(frag))
+				if err != nil {
+					t.Errorf("Invalid regexp after `ERROR MESSAGE: `: %#q", frag)
+					continue
+				}
+				errors = append(errors, re)
 			}
-			errors = append(errors, re)
 		}
 		if len(errors) == 0 {
 			t.Fatalf("cannot find ERROR HERE")
@@ -166,3 +173,8 @@
 		t.Fatalf("succeeded unexpectedly")
 	}
 }
+
+func TestNotMatchedCFunction(t *testing.T) {
+	file := "notmatchedcfunction.go"
+	check(t, file)
+}
diff --git a/src/cmd/cgo/internal/testerrors/ptr_test.go b/src/cmd/cgo/internal/testerrors/ptr_test.go
index 7f56501..8fff761 100644
--- a/src/cmd/cgo/internal/testerrors/ptr_test.go
+++ b/src/cmd/cgo/internal/testerrors/ptr_test.go
@@ -14,7 +14,6 @@
 	"os"
 	"os/exec"
 	"path/filepath"
-	"runtime"
 	"slices"
 	"strings"
 	"sync/atomic"
@@ -253,7 +252,10 @@
 	{
 		// Exported functions may not return Go pointers.
 		name: "export1",
-		c:    `extern unsigned char *GoFn21();`,
+		c: `#ifdef _WIN32
+		    __declspec(dllexport)
+			#endif
+		    extern unsigned char *GoFn21();`,
 		support: `//export GoFn21
 		          func GoFn21() *byte { return new(byte) }`,
 		body: `C.GoFn21()`,
@@ -263,6 +265,9 @@
 		// Returning a C pointer is fine.
 		name: "exportok",
 		c: `#include <stdlib.h>
+		    #ifdef _WIN32
+		    __declspec(dllexport)
+			#endif
 		    extern unsigned char *GoFn22();`,
 		support: `//export GoFn22
 		          func GoFn22() *byte { return (*byte)(C.malloc(1)) }`,
@@ -472,10 +477,6 @@
 func TestPointerChecks(t *testing.T) {
 	testenv.MustHaveGoBuild(t)
 	testenv.MustHaveCGO(t)
-	if runtime.GOOS == "windows" {
-		// TODO: Skip just the cases that fail?
-		t.Skipf("some tests fail to build on %s", runtime.GOOS)
-	}
 
 	var gopath string
 	var dir string
diff --git a/src/cmd/cgo/internal/testerrors/testdata/err5.go b/src/cmd/cgo/internal/testerrors/testdata/err5.go
index 779d745..c12a290 100644
--- a/src/cmd/cgo/internal/testerrors/testdata/err5.go
+++ b/src/cmd/cgo/internal/testerrors/testdata/err5.go
@@ -5,6 +5,7 @@
 package main
 
 //line /tmp/_cgo_.go:1
-//go:cgo_dynamic_linker "/elf/interp" // ERROR HERE: only allowed in cgo-generated code
+//go:cgo_dynamic_linker "/elf/interp"
+// ERROR MESSAGE: only allowed in cgo-generated code
 
 func main() {}
diff --git a/src/cmd/cgo/internal/testerrors/testdata/notmatchedcfunction.go b/src/cmd/cgo/internal/testerrors/testdata/notmatchedcfunction.go
new file mode 100644
index 0000000..5ec9ec5
--- /dev/null
+++ b/src/cmd/cgo/internal/testerrors/testdata/notmatchedcfunction.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+// TODO(#56378): change back to "#cgo noescape noMatchedCFunction: no matched C function" in Go 1.23
+// ERROR MESSAGE: #cgo noescape disabled until Go 1.23
+#cgo noescape noMatchedCFunction
+*/
+import "C"
+
+func main() {
+}
diff --git a/src/cmd/cgo/internal/testfortran/fortran_test.go b/src/cmd/cgo/internal/testfortran/fortran_test.go
index eaa36ac..0eae7c5 100644
--- a/src/cmd/cgo/internal/testfortran/fortran_test.go
+++ b/src/cmd/cgo/internal/testfortran/fortran_test.go
@@ -5,7 +5,6 @@
 package fortran
 
 import (
-	"fmt"
 	"internal/testenv"
 	"os"
 	"os/exec"
@@ -75,11 +74,18 @@
 
 	// Finally, run the actual test.
 	t.Log("go", "run", "./testdata/testprog")
-	out, err := exec.Command("go", "run", "./testdata/testprog").CombinedOutput()
-	if err == nil && string(out) != "ok\n" {
-		err = fmt.Errorf("expected ok")
+	var stdout, stderr strings.Builder
+	cmd := exec.Command("go", "run", "./testdata/testprog")
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+	err := cmd.Run()
+	t.Logf("%v", cmd)
+	if stderr.Len() != 0 {
+		t.Logf("stderr:\n%s", stderr.String())
 	}
 	if err != nil {
-		t.Errorf("%s\nOutput:\n%s", err, string(out))
+		t.Errorf("%v\n%s", err, stdout.String())
+	} else if stdout.String() != "ok\n" {
+		t.Errorf("stdout:\n%s\nwant \"ok\"", stdout.String())
 	}
 }
diff --git a/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go b/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go
index d8004ce..e98d76c 100644
--- a/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go
+++ b/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go
@@ -6,7 +6,10 @@
 
 // int the_answer();
 import "C"
-import "os"
+import (
+	"fmt"
+	"os"
+)
 
 func TheAnswer() int {
 	return int(C.the_answer())
@@ -14,8 +17,8 @@
 
 func main() {
 	if a := TheAnswer(); a != 42 {
-		println("Unexpected result for The Answer. Got:", a, " Want: 42")
+		fmt.Fprintln(os.Stderr, "Unexpected result for The Answer. Got:", a, " Want: 42")
 		os.Exit(1)
 	}
-	println("ok")
+	fmt.Fprintln(os.Stdout, "ok")
 }
diff --git a/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go b/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go
index b0c5074..d3ab190 100644
--- a/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go
+++ b/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Go Authors. All rights reserve d.
+// Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/cgo/internal/testplugin/plugin_test.go b/src/cmd/cgo/internal/testplugin/plugin_test.go
index 7f5b1bf..1e32ff8 100644
--- a/src/cmd/cgo/internal/testplugin/plugin_test.go
+++ b/src/cmd/cgo/internal/testplugin/plugin_test.go
@@ -367,25 +367,16 @@
 	t.Parallel()
 	goCmd(t, "build", "-o", "forkexec.exe", "./forkexec/main.go")
 
-	var cmd *exec.Cmd
-	done := make(chan int, 1)
-
-	go func() {
-		for i := 0; i < 100; i++ {
-			cmd = exec.Command("./forkexec.exe", "1")
-			err := cmd.Run()
-			if err != nil {
-				t.Errorf("running command failed: %v", err)
-				break
+	for i := 0; i < 100; i++ {
+		cmd := testenv.Command(t, "./forkexec.exe", "1")
+		err := cmd.Run()
+		if err != nil {
+			if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+				t.Logf("stderr:\n%s", ee.Stderr)
 			}
+			t.Errorf("running command failed: %v", err)
+			break
 		}
-		done <- 1
-	}()
-	select {
-	case <-done:
-	case <-time.After(5 * time.Minute):
-		cmd.Process.Kill()
-		t.Fatalf("subprocess hang")
 	}
 }
 
diff --git a/src/cmd/cgo/internal/testsanitizers/cc_test.go b/src/cmd/cgo/internal/testsanitizers/cc_test.go
index 6eb5a64..e650de8 100644
--- a/src/cmd/cgo/internal/testsanitizers/cc_test.go
+++ b/src/cmd/cgo/internal/testsanitizers/cc_test.go
@@ -16,8 +16,10 @@
 	"encoding/json"
 	"errors"
 	"fmt"
+	"internal/testenv"
 	"os"
 	"os/exec"
+	"os/user"
 	"path/filepath"
 	"regexp"
 	"strconv"
@@ -266,12 +268,28 @@
 	case "gcc":
 		return compiler.major >= 10
 	case "clang":
+		// TODO(65606): The clang toolchain on the LUCI builders is not built against
+		// zlib, the ASAN runtime can't actually symbolize its own stack trace. Once
+		// this is resolved, one way or another, switch this back to 'true'. We still
+		// have coverage from the 'gcc' case above.
+		if inLUCIBuild() {
+			return false
+		}
 		return true
 	default:
 		return false
 	}
 }
 
+// inLUCIBuild returns true if we're currently executing in a LUCI build.
+func inLUCIBuild() bool {
+	u, err := user.Current()
+	if err != nil {
+		return false
+	}
+	return testenv.Builder() != "" && u.Username == "swarming"
+}
+
 // compilerRequiredTsanVersion reports whether the compiler is the version required by Tsan.
 // Only restrictions for ppc64le are known; otherwise return true.
 func compilerRequiredTsanVersion(goos, goarch string) bool {
@@ -293,11 +311,17 @@
 	}
 	switch compiler.name {
 	case "gcc":
+		if goarch == "loong64" {
+			return compiler.major >= 14
+		}
 		if goarch == "ppc64le" {
 			return compiler.major >= 9
 		}
 		return compiler.major >= 7
 	case "clang":
+		if goarch == "loong64" {
+			return compiler.major >= 16
+		}
 		return compiler.major >= 9
 	default:
 		return false
diff --git a/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go b/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go
index f84c9f3..3f5b1d9 100644
--- a/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go
+++ b/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go
@@ -7,11 +7,14 @@
 package sanitizers_test
 
 import (
+	"internal/testenv"
 	"strings"
 	"testing"
 )
 
 func TestLibFuzzer(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+	testenv.MustHaveCGO(t)
 	goos, err := goEnv("GOOS")
 	if err != nil {
 		t.Fatal(err)
diff --git a/src/cmd/cgo/internal/testsanitizers/msan_test.go b/src/cmd/cgo/internal/testsanitizers/msan_test.go
index 1a22b52..83d66f6 100644
--- a/src/cmd/cgo/internal/testsanitizers/msan_test.go
+++ b/src/cmd/cgo/internal/testsanitizers/msan_test.go
@@ -8,11 +8,14 @@
 
 import (
 	"internal/platform"
+	"internal/testenv"
 	"strings"
 	"testing"
 )
 
 func TestMSAN(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+	testenv.MustHaveCGO(t)
 	goos, err := goEnv("GOOS")
 	if err != nil {
 		t.Fatal(err)
diff --git a/src/cmd/cgo/internal/testshared/shared_test.go b/src/cmd/cgo/internal/testshared/shared_test.go
index 796c46b..814b999 100644
--- a/src/cmd/cgo/internal/testshared/shared_test.go
+++ b/src/cmd/cgo/internal/testshared/shared_test.go
@@ -96,6 +96,10 @@
 
 // TestMain calls testMain so that the latter can use defer (TestMain exits with os.Exit).
 func testMain(m *testing.M) (int, error) {
+	if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
+		globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") }
+		return m.Run(), nil
+	}
 	if !platform.BuildModeSupported(runtime.Compiler, "shared", runtime.GOOS, runtime.GOARCH) {
 		globalSkip = func(t testing.TB) { t.Skip("shared build mode not supported") }
 		return m.Run(), nil
@@ -1155,6 +1159,12 @@
 	goCmd(t, "run", "-linkshared", "./issue47837/main")
 }
 
+func TestIssue62277(t *testing.T) {
+	globalSkip(t)
+	goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue62277/p")
+	goCmd(t, "test", "-linkshared", "./issue62277")
+}
+
 // Test that we can build std in shared mode.
 func TestStd(t *testing.T) {
 	if testing.Short() {
diff --git a/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go b/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go
index 94f38cf5..18d774b 100644
--- a/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go
+++ b/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go
@@ -2,6 +2,12 @@
 
 import "testshared/depBase"
 
+func init() {
+	if !depBase.Initialized {
+		panic("depBase not initialized")
+	}
+}
+
 var W int = 1
 
 var hasProg depBase.HasProg
diff --git a/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go b/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go
index e7cc7c8..a143fe2 100644
--- a/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go
+++ b/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go
@@ -7,8 +7,24 @@
 import (
 	"os"
 	"reflect"
+
+	"testshared/depBaseInternal"
 )
 
+// Issue 61973: indirect dependencies are not initialized.
+func init() {
+	if !depBaseInternal.Initialized {
+		panic("depBaseInternal not initialized")
+	}
+	if os.Stdout == nil {
+		panic("os.Stdout is nil")
+	}
+
+	Initialized = true
+}
+
+var Initialized bool
+
 var SlicePtr interface{} = &[]int{}
 
 var V int = 1
diff --git a/src/cmd/cgo/internal/testshared/testdata/depBaseInternal/dep.go b/src/cmd/cgo/internal/testshared/testdata/depBaseInternal/dep.go
new file mode 100644
index 0000000..906bff0
--- /dev/null
+++ b/src/cmd/cgo/internal/testshared/testdata/depBaseInternal/dep.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// depBaseInternal is only imported by depBase.
+
+package depBaseInternal
+
+var Initialized bool
+
+func init() {
+	Initialized = true
+}
diff --git a/src/cmd/cgo/internal/testshared/testdata/issue62277/issue62277_test.go b/src/cmd/cgo/internal/testshared/testdata/issue62277/issue62277_test.go
new file mode 100644
index 0000000..89a0601
--- /dev/null
+++ b/src/cmd/cgo/internal/testshared/testdata/issue62277/issue62277_test.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue62277_test
+
+import (
+	"testing"
+
+	"testshared/issue62277/p"
+)
+
+func TestIssue62277(t *testing.T) {
+	t.Log(p.S)
+	t.Log(p.T)
+}
diff --git a/src/cmd/cgo/internal/testshared/testdata/issue62277/p/p.go b/src/cmd/cgo/internal/testshared/testdata/issue62277/p/p.go
new file mode 100644
index 0000000..97bde0c
--- /dev/null
+++ b/src/cmd/cgo/internal/testshared/testdata/issue62277/p/p.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var S = func() []string {
+	return []string{"LD_LIBRARY_PATH"}
+}()
+
+var T []string
+
+func init() {
+	T = func() []string {
+		return []string{"LD_LIBRARY_PATH"}
+	}()
+}
diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go
index 78020ae..fce2671 100644
--- a/src/cmd/cgo/main.go
+++ b/src/cmd/cgo/main.go
@@ -38,7 +38,7 @@
 	IntSize     int64
 	GccOptions  []string
 	GccIsClang  bool
-	CgoFlags    map[string][]string // #cgo flags (CFLAGS, LDFLAGS)
+	LdFlags     []string // #cgo LDFLAGS
 	Written     map[string]bool
 	Name        map[string]*Name // accumulated Name from Files
 	ExpFunc     []*ExpFunc       // accumulated ExpFunc from Files
@@ -48,6 +48,8 @@
 	Preamble    string          // collected preamble for _cgo_export.h
 	typedefs    map[string]bool // type names that appear in the types of the objects we're interested in
 	typedefList []typedefInfo
+	noCallbacks map[string]bool // C function names with #cgo nocallback directive
+	noEscapes   map[string]bool // C function names with #cgo noescape directive
 }
 
 // A typedefInfo is an element on Package.typedefList: a typedef name
@@ -59,16 +61,18 @@
 
 // A File collects information about a single Go input file.
 type File struct {
-	AST      *ast.File           // parsed AST
-	Comments []*ast.CommentGroup // comments from file
-	Package  string              // Package name
-	Preamble string              // C preamble (doc comment on import "C")
-	Ref      []*Ref              // all references to C.xxx in AST
-	Calls    []*Call             // all calls to C.xxx in AST
-	ExpFunc  []*ExpFunc          // exported functions for this file
-	Name     map[string]*Name    // map from Go name to Name
-	NamePos  map[*Name]token.Pos // map from Name to position of the first reference
-	Edit     *edit.Buffer
+	AST         *ast.File           // parsed AST
+	Comments    []*ast.CommentGroup // comments from file
+	Package     string              // Package name
+	Preamble    string              // C preamble (doc comment on import "C")
+	Ref         []*Ref              // all references to C.xxx in AST
+	Calls       []*Call             // all calls to C.xxx in AST
+	ExpFunc     []*ExpFunc          // exported functions for this file
+	Name        map[string]*Name    // map from Go name to Name
+	NamePos     map[*Name]token.Pos // map from Name to position of the first reference
+	NoCallbacks map[string]bool     // C function names that with #cgo nocallback directive
+	NoEscapes   map[string]bool     // C function names that with #cgo noescape directive
+	Edit        *edit.Buffer
 }
 
 func (f *File) offset(p token.Pos) int {
@@ -374,7 +378,7 @@
 		f := new(File)
 		f.Edit = edit.NewBuffer(b)
 		f.ParseGo(input, b)
-		f.DiscardCgoDirectives()
+		f.ProcessCgoDirectives()
 		fs[i] = f
 	}
 
@@ -413,6 +417,25 @@
 			p.writeOutput(f, input)
 		}
 	}
+	cFunctions := make(map[string]bool)
+	for _, key := range nameKeys(p.Name) {
+		n := p.Name[key]
+		if n.FuncType != nil {
+			cFunctions[n.C] = true
+		}
+	}
+
+	for funcName := range p.noEscapes {
+		if _, found := cFunctions[funcName]; !found {
+			error_(token.NoPos, "#cgo noescape %s: no matched C function", funcName)
+		}
+	}
+
+	for funcName := range p.noCallbacks {
+		if _, found := cFunctions[funcName]; !found {
+			error_(token.NoPos, "#cgo nocallback %s: no matched C function", funcName)
+		}
+	}
 
 	if !*godefs {
 		p.writeDefs()
@@ -450,10 +473,11 @@
 	os.Setenv("LC_ALL", "C")
 
 	p := &Package{
-		PtrSize:  ptrSize,
-		IntSize:  intSize,
-		CgoFlags: make(map[string][]string),
-		Written:  make(map[string]bool),
+		PtrSize:     ptrSize,
+		IntSize:     intSize,
+		Written:     make(map[string]bool),
+		noCallbacks: make(map[string]bool),
+		noEscapes:   make(map[string]bool),
 	}
 	p.addToFlag("CFLAGS", args)
 	return p
@@ -487,6 +511,14 @@
 		}
 	}
 
+	// merge nocallback & noescape
+	for k, v := range f.NoCallbacks {
+		p.noCallbacks[k] = v
+	}
+	for k, v := range f.NoEscapes {
+		p.noEscapes[k] = v
+	}
+
 	if f.ExpFunc != nil {
 		p.ExpFunc = append(p.ExpFunc, f.ExpFunc...)
 		p.Preamble += "\n" + f.Preamble
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index b2933e2..2189ad5 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -45,18 +45,17 @@
 
 	var gccgoInit strings.Builder
 
-	fflg := creat(*objDir + "_cgo_flags")
-	for k, v := range p.CgoFlags {
-		for _, arg := range v {
-			fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, arg)
+	if !*gccgo {
+		for _, arg := range p.LdFlags {
+			fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
 		}
-		if k == "LDFLAGS" && !*gccgo {
-			for _, arg := range v {
-				fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
-			}
+	} else {
+		fflg := creat(*objDir + "_cgo_flags")
+		for _, arg := range p.LdFlags {
+			fmt.Fprintf(fflg, "_CGO_LDFLAGS=%s\n", arg)
 		}
+		fflg.Close()
 	}
-	fflg.Close()
 
 	// Write C main file for using gcc to resolve imports.
 	fmt.Fprintf(fm, "#include <stddef.h>\n") // For size_t below.
@@ -106,6 +105,8 @@
 		fmt.Fprintf(fgo2, "//go:linkname _Cgo_use runtime.cgoUse\n")
 		fmt.Fprintf(fgo2, "func _Cgo_use(interface{})\n")
 	}
+	fmt.Fprintf(fgo2, "//go:linkname _Cgo_no_callback runtime.cgoNoCallback\n")
+	fmt.Fprintf(fgo2, "func _Cgo_no_callback(bool)\n")
 
 	typedefNames := make([]string, 0, len(typedef))
 	for name := range typedef {
@@ -612,6 +613,12 @@
 		arg = "uintptr(unsafe.Pointer(&r1))"
 	}
 
+	noCallback := p.noCallbacks[n.C]
+	if noCallback {
+		// disable cgocallback, will check it in runtime.
+		fmt.Fprintf(fgo2, "\t_Cgo_no_callback(true)\n")
+	}
+
 	prefix := ""
 	if n.AddError {
 		prefix = "errno := "
@@ -620,13 +627,21 @@
 	if n.AddError {
 		fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n")
 	}
-	fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
-	if d.Type.Params != nil {
-		for i := range d.Type.Params.List {
-			fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i)
-		}
+	if noCallback {
+		fmt.Fprintf(fgo2, "\t_Cgo_no_callback(false)\n")
 	}
-	fmt.Fprintf(fgo2, "\t}\n")
+
+	// skip _Cgo_use when noescape exist,
+	// so that the compiler won't force to escape them to heap.
+	if !p.noEscapes[n.C] {
+		fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
+		if d.Type.Params != nil {
+			for i := range d.Type.Params.List {
+				fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i)
+			}
+		}
+		fmt.Fprintf(fgo2, "\t}\n")
+	}
 	fmt.Fprintf(fgo2, "\treturn\n")
 	fmt.Fprintf(fgo2, "}\n")
 }
@@ -895,6 +910,8 @@
 	fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunknown-pragmas\"\n")
 	fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wpragmas\"\n")
 	fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Waddress-of-packed-member\"\n")
+	fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n")
+	fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunaligned-access\"\n")
 
 	fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *), void *, int, size_t);\n")
 	fmt.Fprintf(fgcc, "extern size_t _cgo_wait_runtime_init_done(void);\n")
@@ -1507,6 +1524,8 @@
 #pragma GCC diagnostic ignored "-Wunknown-pragmas"
 #pragma GCC diagnostic ignored "-Wpragmas"
 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+#pragma GCC diagnostic ignored "-Wunknown-warning-option"
+#pragma GCC diagnostic ignored "-Wunaligned-access"
 
 #include <errno.h>
 #include <string.h>
@@ -1612,9 +1631,11 @@
 func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
 
 //go:linkname _cgoCheckPointer runtime.cgoCheckPointer
+//go:noescape
 func _cgoCheckPointer(interface{}, interface{})
 
 //go:linkname _cgoCheckResult runtime.cgoCheckResult
+//go:noescape
 func _cgoCheckResult(interface{})
 `
 
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
index 9c4eeeb..9b99a1b 100644
--- a/src/cmd/compile/README.md
+++ b/src/cmd/compile/README.md
@@ -49,7 +49,6 @@
 
 * `cmd/compile/internal/types` (compiler types)
 * `cmd/compile/internal/ir` (compiler AST)
-* `cmd/compile/internal/typecheck` (AST transformations)
 * `cmd/compile/internal/noder` (create compiler AST)
 
 The compiler middle end uses its own AST definition and representation of Go
@@ -58,19 +57,9 @@
 and types2 representations to ir and types. This process is referred to as
 "noding."
 
-There are currently two noding implementations:
-
-1. irgen (aka "-G=3" or sometimes "noder2") is the implementation used starting
-   with Go 1.18, and
-
-2. Unified IR is another, in-development implementation (enabled with
-   `GOEXPERIMENT=unified`), which also implements import/export and inlining.
-
-Up through Go 1.18, there was a third noding implementation (just
-"noder" or "-G=0"), which directly converted the pre-type-checked
-syntax representation into IR and then invoked package typecheck's
-type checker. This implementation was removed after Go 1.18, so now
-package typecheck is only used for IR transformations.
+Noding using a process called Unified IR, which builds a node representation
+using a serialized version of the typechecked code from step 2.
+Unified IR is also involved in import/export of packages and inlining.
 
 ### 4. Middle end
 
@@ -151,7 +140,177 @@
 final object file. The object file will also contain reflect data, export data,
 and debugging information.
 
+### 8. Tips
+
+#### Getting Started
+
+* If you have never contributed to the compiler before, a simple way to begin
+  can be adding a log statement or `panic("here")` to get some
+  initial insight into whatever you are investigating.
+
+* The compiler itself provides logging, debugging and visualization capabilities,
+  such as:
+   ```
+   $ go build -gcflags=-m=2                   # print optimization info, including inlining, escape analysis
+   $ go build -gcflags=-d=ssa/check_bce/debug # print bounds check info
+   $ go build -gcflags=-W                     # print internal parse tree after type checking
+   $ GOSSAFUNC=Foo go build                   # generate ssa.html file for func Foo
+   $ go build -gcflags=-S                     # print assembly
+   $ go tool compile -bench=out.txt x.go      # print timing of compiler phases
+   ```
+
+  Some flags alter the compiler behavior, such as:
+   ```
+   $ go tool compile -h file.go               # panic on first compile error encountered
+   $ go build -gcflags=-d=checkptr=2          # enable additional unsafe pointer checking
+   ```
+
+  There are many additional flags. Some descriptions are available via:
+   ```
+   $ go tool compile -h              # compiler flags, e.g., go build -gcflags='-m=1 -l'
+   $ go tool compile -d help         # debug flags, e.g., go build -gcflags=-d=checkptr=2
+   $ go tool compile -d ssa/help     # ssa flags, e.g., go build -gcflags=-d=ssa/prove/debug=2
+   ```
+
+  There are some additional details about `-gcflags` and the differences between `go build`
+  vs. `go tool compile` in a [section below](#-gcflags-and-go-build-vs-go-tool-compile).
+
+* In general, when investigating a problem in the compiler you usually want to
+  start with the simplest possible reproduction and understand exactly what is
+  happening with it.
+
+#### Testing your changes
+
+* Be sure to read the [Quickly testing your changes](https://go.dev/doc/contribute#quick_test)
+  section of the Go Contribution Guide.
+
+* Some tests live within the cmd/compile packages and can be run by `go test ./...` or similar,
+  but many cmd/compile tests are in the top-level
+  [test](https://github.com/golang/go/tree/master/test) directory:
+
+  ```
+  $ go test cmd/internal/testdir                           # all tests in 'test' dir
+  $ go test cmd/internal/testdir -run='Test/escape.*.go'   # test specific files in 'test' dir
+  ```
+  For details, see the [testdir README](https://github.com/golang/go/tree/master/test#readme).
+  The `errorCheck` method in [testdir_test.go](https://github.com/golang/go/blob/master/src/cmd/internal/testdir/testdir_test.go)
+  is helpful for a description of the `ERROR` comments used in many of those tests.
+
+  In addition, the `go/types` package from the standard library and `cmd/compile/internal/types2`
+  have shared tests in `src/internal/types/testdata`, and both type checkers
+  should be checked if anything changes there.
+
+* The new [application-based coverage profiling](https://go.dev/testing/coverage/) can be used
+  with the compiler, such as:
+
+  ```
+  $ go install -cover -coverpkg=cmd/compile/... cmd/compile  # build compiler with coverage instrumentation
+  $ mkdir /tmp/coverdir                                      # pick location for coverage data
+  $ GOCOVERDIR=/tmp/coverdir go test [...]                   # use compiler, saving coverage data
+  $ go tool covdata textfmt -i=/tmp/coverdir -o coverage.out # convert to traditional coverage format
+  $ go tool cover -html coverage.out                         # view coverage via traditional tools
+  ```
+
+#### Juggling compiler versions
+
+* Many of the compiler tests use the version of the `go` command found in your PATH and
+  its corresponding `compile` binary.
+
+* If you are in a branch and your PATH includes `<go-repo>/bin`,
+  doing `go install cmd/compile` will build the compiler using the code from your
+  branch and install it to the proper location so that subsequent `go` commands
+  like `go build` or `go test ./...` will exercise your freshly built compiler.
+
+* [toolstash](https://pkg.go.dev/golang.org/x/tools/cmd/toolstash) provides a way
+  to save, run, and restore a known good copy of the Go toolchain. For example, it can be
+  a good practice to initially build your branch, save that version of
+  the toolchain, then restore the known good version of the tools to compile
+  your work-in-progress version of the compiler.
+
+  Sample set up steps:
+  ```
+  $ go install golang.org/x/tools/cmd/toolstash@latest
+  $ git clone https://go.googlesource.com/go
+  $ cd go
+  $ git checkout -b mybranch
+  $ ./src/all.bash               # build and confirm good starting point
+  $ export PATH=$PWD/bin:$PATH
+  $ toolstash save               # save current tools
+  ```
+  After that, your edit/compile/test cycle can be similar to:
+  ```
+  <... make edits to cmd/compile source ...>
+  $ toolstash restore && go install cmd/compile   # restore known good tools to build compiler
+  <... 'go build', 'go test', etc. ...>           # use freshly built compiler
+  ```
+
+* toolstash also allows comparing the installed vs. stashed copy of
+  the compiler, such as if you expect equivalent behavior after a refactor.
+  For example, to check that your changed compiler produces identical object files to
+  the stashed compiler while building the standard library:
+  ```
+  $ toolstash restore && go install cmd/compile   # build latest compiler
+  $ go build -toolexec "toolstash -cmp" -a -v std # compare latest vs. saved compiler
+  ```
+
+* If versions appear to get out of sync (for example, with errors like
+  `linked object header mismatch` with version strings like
+  `devel go1.21-db3f952b1f`), you might need to do
+  `toolstash restore && go install cmd/...` to update all the tools under cmd.
+
+#### Additional helpful tools
+
+* [compilebench](https://pkg.go.dev/golang.org/x/tools/cmd/compilebench) benchmarks
+  the speed of the compiler.
+
+* [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) is the standard tool
+  for reporting performance changes resulting from compiler modifications,
+  including whether any improvements are statistically significant:
+  ```
+  $ go test -bench=SomeBenchmarks -count=20 > new.txt   # use new compiler
+  $ toolstash restore                                   # restore old compiler
+  $ go test -bench=SomeBenchmarks -count=20 > old.txt   # use old compiler
+  $ benchstat old.txt new.txt                           # compare old vs. new
+  ```
+
+* [bent](https://pkg.go.dev/golang.org/x/benchmarks/cmd/bent) facilitates running a
+  large set of benchmarks from various community Go projects inside a Docker container.
+
+* [perflock](https://github.com/aclements/perflock) helps obtain more consistent
+  benchmark results, including by manipulating CPU frequency scaling settings on Linux.
+
+* [view-annotated-file](https://github.com/loov/view-annotated-file) (from the community)
+   overlays inlining, bounds check, and escape info back onto the source code.
+
+* [godbolt.org](https://go.godbolt.org) is widely used to examine
+  and share assembly output from many compilers, including the Go compiler. It can also
+  [compare](https://go.godbolt.org/z/5Gs1G4bKG) assembly for different versions of
+  a function or across Go compiler versions, which can be helpful for investigations and
+  bug reports.
+
+#### -gcflags and 'go build' vs. 'go tool compile'
+
+* `-gcflags` is a go command [build flag](https://pkg.go.dev/cmd/go#hdr-Compile_packages_and_dependencies).
+  `go build -gcflags=<args>` passes the supplied `<args>` to the underlying
+  `compile` invocation(s) while still doing everything that the `go build` command
+  normally does (e.g., handling the build cache, modules, and so on). In contrast,
+  `go tool compile <args>` asks the `go` command to invoke `compile <args>` a single time
+  without involving the standard `go build` machinery. In some cases, it can be helpful to have
+  fewer moving parts by doing `go tool compile <args>`, such as if you have a
+  small standalone source file that can be compiled without any assistance from `go build`.
+  In other cases, it is more convenient to pass `-gcflags` to a build command like
+  `go build`, `go test`, or `go install`.
+
+* `-gcflags` by default applies to the packages named on the command line, but can
+  use package patterns such as `-gcflags='all=-m=1 -l'`, or multiple package patterns such as
+  `-gcflags='all=-m=1' -gcflags='fmt=-m=2'`. For details, see the
+  [cmd/go documentation](https://pkg.go.dev/cmd/go#hdr-Compile_packages_and_dependencies).
+
 ### Further reading
 
 To dig deeper into how the SSA package works, including its passes and rules,
 head to [cmd/compile/internal/ssa/README.md](internal/ssa/README.md).
+
+Finally, if something in this README or the SSA README is unclear
+or if you have an idea for an improvement, feel free to leave a comment in
+[issue 30074](https://go.dev/issue/30074).
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
index 14464ed..eae230d 100644
--- a/src/cmd/compile/abi-internal.md
+++ b/src/cmd/compile/abi-internal.md
@@ -136,7 +136,7 @@
 1. Let NI and NFP be the length of integer and floating-point register
    sequences defined by the architecture.
    Let I and FP be 0; these are the indexes of the next integer and
-   floating-pointer register.
+   floating-point register.
    Let S, the type sequence defining the stack frame, be empty.
 1. If F is a method, assign F’s receiver.
 1. For each argument A of F, assign A.
@@ -633,6 +633,56 @@
 Functions are allowed to modify it between calls (as long as they
 restore it), but as of this writing Go code never does.
 
+### loong64 architecture
+
+The loong64 architecture uses R4 – R19 for integer arguments and integer results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+Registers R20 - R21, R23 – R28, R30 - R31, F16 – F31 are permanent scratch registers.
+
+Register R2 is reserved and never used.
+
+Register R20, R21 is Used by runtime.duffcopy, runtime.duffzero.
+
+Special-purpose registers used within Go generated code and Go assembly code
+are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| R0 | Zero value | Same | Same |
+| R1 | Link register | Link register | Scratch |
+| R3 | Stack pointer | Same | Same |
+| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero |
+| R22 | Current goroutine | Same | Same |
+| R29 | Closure context pointer | Same | Same |
+| R30, R31 | used by the assembler | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s stack-based
+calling convention.
+
+#### Stack layout
+
+The stack pointer, R3, grows down and is aligned to 8 bytes.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+    +------------------------------+
+    | ... locals ...               |
+    | ... outgoing arguments ...   |
+    | return PC                    | ← R3 points to
+    +------------------------------+ ↓ lower addresses
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+The "return PC" is loaded to the link register, R1, as part of the
+loong64 `JAL` operation.
+
+#### Flags
+All bits in CSR are system flags and are not modified by Go.
+
 ### ppc64 architecture
 
 The ppc64 architecture uses R3 – R10 and R14 – R17 for integer arguments
diff --git a/src/cmd/compile/default.pgo b/src/cmd/compile/default.pgo
index 2ba7968..0f925ec 100644
--- a/src/cmd/compile/default.pgo
+++ b/src/cmd/compile/default.pgo
Binary files differ
diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go
index 0a60368..507899e 100644
--- a/src/cmd/compile/doc.go
+++ b/src/cmd/compile/doc.go
@@ -295,5 +295,27 @@
 single argument, f. This is optional, but helps alert the reader that
 the function is accessed from outside the package.
 
+	//go:wasmimport importmodule importname
+
+The //go:wasmimport directive is wasm-only and must be followed by a
+function declaration.
+It specifies that the function is provided by a wasm module identified
+by ``importmodule`` and ``importname``.
+
+	//go:wasmimport a_module f
+	func g()
+
+The types of parameters and return values to the Go function are translated to
+Wasm according to the following table:
+
+    Go types        Wasm types
+    int32, uint32   i32
+    int64, uint64   i64
+    float32         f32
+    float64         f64
+    unsafe.Pointer  i32
+
+Any other parameter types are disallowed by the compiler.
+
 */
 package main
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
index 71fbb88..607d462 100644
--- a/src/cmd/compile/internal/abi/abiutils.go
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -8,8 +8,10 @@
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
+	"cmd/internal/obj"
 	"cmd/internal/src"
 	"fmt"
+	"math"
 	"sync"
 )
 
@@ -96,7 +98,7 @@
 // (as described above), not architected registers.
 type ABIParamAssignment struct {
 	Type      *types.Type
-	Name      types.Object // should always be *ir.Name, used to match with a particular ssa.OpArg.
+	Name      *ir.Name
 	Registers []RegIndex
 	offset    int32
 }
@@ -173,7 +175,7 @@
 				rts = appendParamTypes(rts, t.Elem())
 			}
 		case types.TSTRUCT:
-			for _, f := range t.FieldSlice() {
+			for _, f := range t.Fields() {
 				if f.Type.Size() > 0 { // embedded zero-width types receive no registers
 					rts = appendParamTypes(rts, f.Type)
 				}
@@ -212,7 +214,7 @@
 				offsets, at = appendParamOffsets(offsets, at, t.Elem())
 			}
 		case types.TSTRUCT:
-			for i, f := range t.FieldSlice() {
+			for i, f := range t.Fields() {
 				offsets, at = appendParamOffsets(offsets, at, f.Type)
 				if f.Type.Size() == 0 && i == t.NumFields()-1 {
 					at++ // last field has zero width
@@ -258,166 +260,131 @@
 // by the ABI rules for parameter passing and result returning.
 type ABIConfig struct {
 	// Do we need anything more than this?
-	offsetForLocals  int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures.
-	regAmounts       RegAmounts
-	regsForTypeCache map[*types.Type]int
+	offsetForLocals int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures.
+	regAmounts      RegAmounts
+	which           obj.ABI
 }
 
 // NewABIConfig returns a new ABI configuration for an architecture with
 // iRegsCount integer/pointer registers and fRegsCount floating point registers.
-func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64) *ABIConfig {
-	return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
+func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64, which uint8) *ABIConfig {
+	return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, which: obj.ABI(which)}
 }
 
-// Copy returns a copy of an ABIConfig for use in a function's compilation so that access to the cache does not need to be protected with a mutex.
-func (a *ABIConfig) Copy() *ABIConfig {
-	b := *a
-	b.regsForTypeCache = make(map[*types.Type]int)
-	return &b
+// Copy returns config.
+//
+// TODO(mdempsky): Remove.
+func (config *ABIConfig) Copy() *ABIConfig {
+	return config
+}
+
+// Which returns the ABI number
+func (config *ABIConfig) Which() obj.ABI {
+	return config.which
 }
 
 // LocalsOffset returns the architecture-dependent offset from SP for args and results.
 // In theory this is only used for debugging; it ought to already be incorporated into
 // results from the ABI-related methods
-func (a *ABIConfig) LocalsOffset() int64 {
-	return a.offsetForLocals
+func (config *ABIConfig) LocalsOffset() int64 {
+	return config.offsetForLocals
 }
 
 // FloatIndexFor translates r into an index in the floating point parameter
 // registers.  If the result is negative, the input index was actually for the
 // integer parameter registers.
-func (a *ABIConfig) FloatIndexFor(r RegIndex) int64 {
-	return int64(r) - int64(a.regAmounts.intRegs)
+func (config *ABIConfig) FloatIndexFor(r RegIndex) int64 {
+	return int64(r) - int64(config.regAmounts.intRegs)
 }
 
-// NumParamRegs returns the number of parameter registers used for a given type,
-// without regard for the number available.
-func (a *ABIConfig) NumParamRegs(t *types.Type) int {
-	var n int
-	if n, ok := a.regsForTypeCache[t]; ok {
-		return n
+// NumParamRegs returns the total number of registers used to
+// represent a parameter of the given type, which must be register
+// assignable.
+func (config *ABIConfig) NumParamRegs(typ *types.Type) int {
+	intRegs, floatRegs := typ.Registers()
+	if intRegs == math.MaxUint8 && floatRegs == math.MaxUint8 {
+		base.Fatalf("cannot represent parameters of type %v in registers", typ)
 	}
-
-	if t.IsScalar() || t.IsPtrShaped() {
-		if t.IsComplex() {
-			n = 2
-		} else {
-			n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
-		}
-	} else {
-		typ := t.Kind()
-		switch typ {
-		case types.TARRAY:
-			n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
-		case types.TSTRUCT:
-			for _, f := range t.FieldSlice() {
-				n += a.NumParamRegs(f.Type)
-			}
-		case types.TSLICE:
-			n = a.NumParamRegs(synthSlice)
-		case types.TSTRING:
-			n = a.NumParamRegs(synthString)
-		case types.TINTER:
-			n = a.NumParamRegs(synthIface)
-		}
-	}
-	a.regsForTypeCache[t] = n
-
-	return n
+	return int(intRegs) + int(floatRegs)
 }
 
-// preAllocateParams gets the slice sizes right for inputs and outputs.
-func (a *ABIParamResultInfo) preAllocateParams(hasRcvr bool, nIns, nOuts int) {
-	if hasRcvr {
-		nIns++
-	}
-	a.inparams = make([]ABIParamAssignment, 0, nIns)
-	a.outparams = make([]ABIParamAssignment, 0, nOuts)
-}
-
-// ABIAnalyzeTypes takes an optional receiver type, arrays of ins and outs, and returns an ABIParamResultInfo,
+// ABIAnalyzeTypes takes slices of parameter and result types, and returns an ABIParamResultInfo,
 // based on the given configuration.  This is the same result computed by config.ABIAnalyze applied to the
 // corresponding method/function type, except that all the embedded parameter names are nil.
 // This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type.
-func (config *ABIConfig) ABIAnalyzeTypes(rcvr *types.Type, ins, outs []*types.Type) *ABIParamResultInfo {
+func (config *ABIConfig) ABIAnalyzeTypes(params, results []*types.Type) *ABIParamResultInfo {
 	setup()
 	s := assignState{
 		stackOffset: config.offsetForLocals,
 		rTotal:      config.regAmounts,
 	}
-	result := &ABIParamResultInfo{config: config}
-	result.preAllocateParams(rcvr != nil, len(ins), len(outs))
 
-	// Receiver
-	if rcvr != nil {
-		result.inparams = append(result.inparams,
-			s.assignParamOrReturn(rcvr, nil, false))
+	assignParams := func(params []*types.Type, isResult bool) []ABIParamAssignment {
+		res := make([]ABIParamAssignment, len(params))
+		for i, param := range params {
+			res[i] = s.assignParam(param, nil, isResult)
+		}
+		return res
 	}
 
+	info := &ABIParamResultInfo{config: config}
+
 	// Inputs
-	for _, t := range ins {
-		result.inparams = append(result.inparams,
-			s.assignParamOrReturn(t, nil, false))
-	}
+	info.inparams = assignParams(params, false)
 	s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize))
-	result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+	info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
 
 	// Outputs
 	s.rUsed = RegAmounts{}
-	for _, t := range outs {
-		result.outparams = append(result.outparams, s.assignParamOrReturn(t, nil, true))
-	}
+	info.outparams = assignParams(results, true)
 	// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
 	// TODO in theory could align offset only to minimum required by spilled data types.
-	result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
-	result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
-	result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+	info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+	info.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+	info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
 
-	return result
+	return info
 }
 
 // ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description
 // 'config' and analyzes the function to determine how its parameters
 // and results will be passed (in registers or on the stack), returning
 // an ABIParamResultInfo object that holds the results of the analysis.
-func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Func) *ABIParamResultInfo {
+func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Type) *ABIParamResultInfo {
 	setup()
 	s := assignState{
 		stackOffset: config.offsetForLocals,
 		rTotal:      config.regAmounts,
 	}
-	result := &ABIParamResultInfo{config: config}
-	result.preAllocateParams(ft.Receiver != nil, ft.Params.NumFields(), ft.Results.NumFields())
 
-	// Receiver
-	// TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters
-	if ft.Receiver != nil && ft.Receiver.NumFields() != 0 {
-		r := ft.Receiver.FieldSlice()[0]
-		result.inparams = append(result.inparams,
-			s.assignParamOrReturn(r.Type, r.Nname, false))
+	assignParams := func(params []*types.Field, isResult bool) []ABIParamAssignment {
+		res := make([]ABIParamAssignment, len(params))
+		for i, param := range params {
+			var name *ir.Name
+			if param.Nname != nil {
+				name = param.Nname.(*ir.Name)
+			}
+			res[i] = s.assignParam(param.Type, name, isResult)
+		}
+		return res
 	}
 
+	info := &ABIParamResultInfo{config: config}
+
 	// Inputs
-	ifsl := ft.Params.FieldSlice()
-	for _, f := range ifsl {
-		result.inparams = append(result.inparams,
-			s.assignParamOrReturn(f.Type, f.Nname, false))
-	}
+	info.inparams = assignParams(ft.RecvParams(), false)
 	s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize))
-	result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+	info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
 
 	// Outputs
 	s.rUsed = RegAmounts{}
-	ofsl := ft.Results.FieldSlice()
-	for _, f := range ofsl {
-		result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, f.Nname, true))
-	}
+	info.outparams = assignParams(ft.Results(), true)
 	// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
 	// TODO in theory could align offset only to minimum required by spilled data types.
-	result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
-	result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
-	result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
-	return result
+	info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+	info.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+	info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+	return info
 }
 
 // ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also
@@ -428,38 +395,31 @@
 // outputs because their frame location transitions from BOGUS_FUNARG_OFFSET
 // to zero to an as-if-AUTO offset that has no use for callers.
 func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResultInfo {
-	ft := t.FuncType()
-	result := config.ABIAnalyzeFuncType(ft)
+	result := config.ABIAnalyzeFuncType(t)
 
 	// Fill in the frame offsets for receiver, inputs, results
-	k := 0
-	if t.NumRecvs() != 0 {
-		config.updateOffset(result, ft.Receiver.FieldSlice()[0], result.inparams[0], false, setNname)
-		k++
+	for i, f := range t.RecvParams() {
+		config.updateOffset(result, f, result.inparams[i], false, setNname)
 	}
-	for i, f := range ft.Params.FieldSlice() {
-		config.updateOffset(result, f, result.inparams[k+i], false, setNname)
-	}
-	for i, f := range ft.Results.FieldSlice() {
+	for i, f := range t.Results() {
 		config.updateOffset(result, f, result.outparams[i], true, setNname)
 	}
 	return result
 }
 
-func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isReturn, setNname bool) {
+func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isResult, setNname bool) {
+	if f.Offset != types.BADWIDTH {
+		base.Fatalf("field offset for %s at %s has been set to %d", f.Sym, base.FmtPos(f.Pos), f.Offset)
+	}
+
 	// Everything except return values in registers has either a frame home (if not in a register) or a frame spill location.
-	if !isReturn || len(a.Registers) == 0 {
+	if !isResult || len(a.Registers) == 0 {
 		// The type frame offset DOES NOT show effects of minimum frame size.
 		// Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set
 		off := a.FrameOffset(result)
-		fOffset := f.Offset
-		if fOffset == types.BOGUS_FUNARG_OFFSET {
-			if setNname && f.Nname != nil {
-				f.Nname.(*ir.Name).SetFrameOffset(off)
-				f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
-			}
-		} else {
-			base.Fatalf("field offset for %s at %s has been set to %d", f.Sym.Name, base.FmtPos(f.Pos), fOffset)
+		if setNname && f.Nname != nil {
+			f.Nname.(*ir.Name).SetFrameOffset(off)
+			f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
 		}
 	} else {
 		if setNname && f.Nname != nil {
@@ -524,7 +484,6 @@
 type assignState struct {
 	rTotal      RegAmounts // total reg amounts from ABI rules
 	rUsed       RegAmounts // regs used by params completely assigned so far
-	pUsed       RegAmounts // regs used by the current param (or pieces therein)
 	stackOffset int64      // current stack offset
 	spillOffset int64      // current spill offset
 }
@@ -542,12 +501,11 @@
 	return types.RoundUp(a, int64(t))
 }
 
-// stackSlot returns a stack offset for a param or result of the
-// specified type.
-func (state *assignState) stackSlot(t *types.Type) int64 {
-	rv := align(state.stackOffset, t)
-	state.stackOffset = rv + t.Size()
-	return rv
+// nextSlot allocates the next available slot for typ.
+func nextSlot(offsetp *int64, typ *types.Type) int64 {
+	offset := align(*offsetp, typ)
+	*offsetp = offset + typ.Size()
+	return offset
 }
 
 // allocateRegs returns an ordered list of register indices for a parameter or result
@@ -585,7 +543,7 @@
 			}
 			return regs
 		case types.TSTRUCT:
-			for _, f := range t.FieldSlice() {
+			for _, f := range t.Fields() {
 				regs = state.allocateRegs(regs, f.Type)
 			}
 			return regs
@@ -601,105 +559,6 @@
 	panic("unreachable")
 }
 
-// regAllocate creates a register ABIParamAssignment object for a param
-// or result with the specified type, as a final step (this assumes
-// that all of the safety/suitability analysis is complete).
-func (state *assignState) regAllocate(t *types.Type, name types.Object, isReturn bool) ABIParamAssignment {
-	spillLoc := int64(-1)
-	if !isReturn {
-		// Spill for register-resident t must be aligned for storage of a t.
-		spillLoc = align(state.spillOffset, t)
-		state.spillOffset = spillLoc + t.Size()
-	}
-	return ABIParamAssignment{
-		Type:      t,
-		Name:      name,
-		Registers: state.allocateRegs([]RegIndex{}, t),
-		offset:    int32(spillLoc),
-	}
-}
-
-// stackAllocate creates a stack memory ABIParamAssignment object for
-// a param or result with the specified type, as a final step (this
-// assumes that all of the safety/suitability analysis is complete).
-func (state *assignState) stackAllocate(t *types.Type, name types.Object) ABIParamAssignment {
-	return ABIParamAssignment{
-		Type:   t,
-		Name:   name,
-		offset: int32(state.stackSlot(t)),
-	}
-}
-
-// intUsed returns the number of integer registers consumed
-// at a given point within an assignment stage.
-func (state *assignState) intUsed() int {
-	return state.rUsed.intRegs + state.pUsed.intRegs
-}
-
-// floatUsed returns the number of floating point registers consumed at
-// a given point within an assignment stage.
-func (state *assignState) floatUsed() int {
-	return state.rUsed.floatRegs + state.pUsed.floatRegs
-}
-
-// regassignIntegral examines a param/result of integral type 't' to
-// determines whether it can be register-assigned. Returns TRUE if we
-// can register allocate, FALSE otherwise (and updates state
-// accordingly).
-func (state *assignState) regassignIntegral(t *types.Type) bool {
-	regsNeeded := int(types.RoundUp(t.Size(), int64(types.PtrSize)) / int64(types.PtrSize))
-	if t.IsComplex() {
-		regsNeeded = 2
-	}
-
-	// Floating point and complex.
-	if t.IsFloat() || t.IsComplex() {
-		if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
-			// not enough regs
-			return false
-		}
-		state.pUsed.floatRegs += regsNeeded
-		return true
-	}
-
-	// Non-floating point
-	if regsNeeded+state.intUsed() > state.rTotal.intRegs {
-		// not enough regs
-		return false
-	}
-	state.pUsed.intRegs += regsNeeded
-	return true
-}
-
-// regassignArray processes an array type (or array component within some
-// other enclosing type) to determine if it can be register assigned.
-// Returns TRUE if we can register allocate, FALSE otherwise.
-func (state *assignState) regassignArray(t *types.Type) bool {
-
-	nel := t.NumElem()
-	if nel == 0 {
-		return true
-	}
-	if nel > 1 {
-		// Not an array of length 1: stack assign
-		return false
-	}
-	// Visit element
-	return state.regassign(t.Elem())
-}
-
-// regassignStruct processes a struct type (or struct component within
-// some other enclosing type) to determine if it can be register
-// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
-func (state *assignState) regassignStruct(t *types.Type) bool {
-	for _, field := range t.FieldSlice() {
-		if !state.regassign(field.Type) {
-			return false
-		}
-	}
-	return true
-}
-
 // synthOnce ensures that we only create the synth* fake types once.
 var synthOnce sync.Once
 
@@ -737,47 +596,42 @@
 	})
 }
 
-// regassign examines a given param type (or component within some
-// composite) to determine if it can be register assigned.  Returns
-// TRUE if we can register allocate, FALSE otherwise.
-func (state *assignState) regassign(pt *types.Type) bool {
-	typ := pt.Kind()
-	if pt.IsScalar() || pt.IsPtrShaped() {
-		return state.regassignIntegral(pt)
-	}
-	switch typ {
-	case types.TARRAY:
-		return state.regassignArray(pt)
-	case types.TSTRUCT:
-		return state.regassignStruct(pt)
-	case types.TSLICE:
-		return state.regassignStruct(synthSlice)
-	case types.TSTRING:
-		return state.regassignStruct(synthString)
-	case types.TINTER:
-		return state.regassignStruct(synthIface)
-	default:
-		base.Fatalf("not expected")
-		panic("unreachable")
-	}
-}
-
-// assignParamOrReturn processes a given receiver, param, or result
+// assignParam processes a given receiver, param, or result
 // of field f to determine whether it can be register assigned.
 // The result of the analysis is recorded in the result
 // ABIParamResultInfo held in 'state'.
-func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment {
-	state.pUsed = RegAmounts{}
-	if pt.Size() == types.BADWIDTH {
-		base.Fatalf("should never happen")
-		panic("unreachable")
-	} else if pt.Size() == 0 {
-		return state.stackAllocate(pt, n)
-	} else if state.regassign(pt) {
-		return state.regAllocate(pt, n, isReturn)
-	} else {
-		return state.stackAllocate(pt, n)
+func (state *assignState) assignParam(typ *types.Type, name *ir.Name, isResult bool) ABIParamAssignment {
+	registers := state.tryAllocRegs(typ)
+
+	var offset int64 = -1
+	if registers == nil { // stack allocated; needs stack slot
+		offset = nextSlot(&state.stackOffset, typ)
+	} else if !isResult { // register-allocated param; needs spill slot
+		offset = nextSlot(&state.spillOffset, typ)
 	}
+
+	return ABIParamAssignment{
+		Type:      typ,
+		Name:      name,
+		Registers: registers,
+		offset:    int32(offset),
+	}
+}
+
+// tryAllocRegs attempts to allocate registers to represent a
+// parameter of the given type. If unsuccessful, it returns nil.
+func (state *assignState) tryAllocRegs(typ *types.Type) []RegIndex {
+	if typ.Size() == 0 {
+		return nil // zero-size parameters are defined as being stack allocated
+	}
+
+	intRegs, floatRegs := typ.Registers()
+	if int(intRegs) > state.rTotal.intRegs-state.rUsed.intRegs || int(floatRegs) > state.rTotal.floatRegs-state.rUsed.floatRegs {
+		return nil // too few available registers
+	}
+
+	regs := make([]RegIndex, 0, int(intRegs)+int(floatRegs))
+	return state.allocateRegs(regs, typ)
 }
 
 // ComputePadding returns a list of "post element" padding values in
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 1138758..ab762c2 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -252,7 +252,8 @@
 		ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB,
 		ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
 		ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
-		ssa.OpAMD64PXOR,
+		ssa.OpAMD64MINSS, ssa.OpAMD64MINSD,
+		ssa.OpAMD64POR, ssa.OpAMD64PXOR,
 		ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
 		ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
 		ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ:
@@ -713,9 +714,9 @@
 		p.To.Offset = v.AuxInt
 	case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
 		ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
-		ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
-		ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
-		ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
+		ssa.OpAMD64BTSQconst,
+		ssa.OpAMD64BTCQconst,
+		ssa.OpAMD64BTRQconst:
 		op := v.Op
 		if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
 			// Emit 32-bit version because it's shorter
@@ -850,7 +851,8 @@
 		}
 		fallthrough
 	case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
-		ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
+		ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify,
+		ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTCQconstmodify:
 		sc := v.AuxValAndOff()
 		off := sc.Off64()
 		val := sc.Val64()
@@ -1187,6 +1189,15 @@
 		p.To.Reg = v.Args[0].Reg()
 		ssagen.AddAux(&p.To, v)
 
+	case ssa.OpAMD64SETEQstoreidx1, ssa.OpAMD64SETNEstoreidx1,
+		ssa.OpAMD64SETLstoreidx1, ssa.OpAMD64SETLEstoreidx1,
+		ssa.OpAMD64SETGstoreidx1, ssa.OpAMD64SETGEstoreidx1,
+		ssa.OpAMD64SETBstoreidx1, ssa.OpAMD64SETBEstoreidx1,
+		ssa.OpAMD64SETAstoreidx1, ssa.OpAMD64SETAEstoreidx1:
+		p := s.Prog(v.Op.Asm())
+		memIdx(&p.To, v)
+		ssagen.AddAux(&p.To, v)
+
 	case ssa.OpAMD64SETNEF:
 		t := v.RegTmp()
 		p := s.Prog(v.Op.Asm())
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index 23e52ba..43d8118 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -15,7 +15,7 @@
 	arch.LinkArch = &arm.Linkarm
 	arch.REGSP = arm.REGSP
 	arch.MAXWIDTH = (1 << 32) - 1
-	arch.SoftFloat = buildcfg.GOARM == 5
+	arch.SoftFloat = buildcfg.GOARM.SoftFloat
 	arch.ZeroRange = zerorange
 	arch.Ginsnop = ginsnop
 
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 7fcbb4d..638ed3e 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -289,7 +289,7 @@
 	case ssa.OpARMANDconst, ssa.OpARMBICconst:
 		// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
 		// BFC is only available on ARMv7, and its result and source are in the same register
-		if buildcfg.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
+		if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() {
 			var val uint32
 			if v.Op == ssa.OpARMANDconst {
 				val = ^uint32(v.AuxInt)
@@ -646,7 +646,7 @@
 			default:
 			}
 		}
-		if buildcfg.GOARM >= 6 {
+		if buildcfg.GOARM.Version >= 6 {
 			// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
 			genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
 			return
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index a0b432b..27b4e88 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -215,6 +215,10 @@
 		ssa.OpARM64FNMULD,
 		ssa.OpARM64FDIVS,
 		ssa.OpARM64FDIVD,
+		ssa.OpARM64FMINS,
+		ssa.OpARM64FMIND,
+		ssa.OpARM64FMAXS,
+		ssa.OpARM64FMAXD,
 		ssa.OpARM64ROR,
 		ssa.OpARM64RORW:
 		r := v.Reg()
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
index 8caeb41..ee3772c 100644
--- a/src/cmd/compile/internal/base/base.go
+++ b/src/cmd/compile/internal/base/base.go
@@ -107,7 +107,7 @@
 	//    interface. Instead, live is estimated by knowing the adjusted value of
 	//    GOGC and the new heap goal following a GC (this requires knowing that
 	//    at least one GC has occurred):
-	//		  estLive = 100 * newGoal / (100 + currentGogc)]
+	//		  estLive = 100 * newGoal / (100 + currentGogc)
 	//    this new value of GOGC
 	//		  newGogc = 100*requestedHeapGoal/estLive - 100
 	//    will result in the desired goal. The logging code checks that the
@@ -219,54 +219,3 @@
 
 	forEachGC(adjustFunc)
 }
-
-func Compiling(pkgs []string) bool {
-	if Ctxt.Pkgpath != "" {
-		for _, p := range pkgs {
-			if Ctxt.Pkgpath == p {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-// The racewalk pass is currently handled in three parts.
-//
-// First, for flag_race, it inserts calls to racefuncenter and
-// racefuncexit at the start and end (respectively) of each
-// function. This is handled below.
-//
-// Second, during buildssa, it inserts appropriate instrumentation
-// calls immediately before each memory load or store. This is handled
-// by the (*state).instrument method in ssa.go, so here we just set
-// the Func.InstrumentBody flag as needed. For background on why this
-// is done during SSA construction rather than a separate SSA pass,
-// see issue #19054.
-//
-// Third we remove calls to racefuncenter and racefuncexit, for leaf
-// functions without instrumented operations. This is done as part of
-// ssa opt pass via special rule.
-
-// TODO(dvyukov): do not instrument initialization as writes:
-// a := make([]int, 10)
-
-// Do not instrument the following packages at all,
-// at best instrumentation would cause infinite recursion.
-var NoInstrumentPkgs = []string{
-	"runtime/internal/atomic",
-	"runtime/internal/math",
-	"runtime/internal/sys",
-	"runtime/internal/syscall",
-	"runtime",
-	"runtime/race",
-	"runtime/msan",
-	"runtime/asan",
-	"internal/cpu",
-	"internal/abi",
-}
-
-// Don't insert racefuncenter/racefuncexit into the following packages.
-// Memory accesses in the packages are either uninteresting or will cause false positives.
-var NoRacePkgs = []string{"sync", "sync/atomic"}
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index 1f05ed9..420ad13 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -21,8 +21,13 @@
 	Closure               int    `help:"print information about closure compilation"`
 	Defer                 int    `help:"print information about defer compilation"`
 	DisableNil            int    `help:"disable nil checks" concurrent:"ok"`
+	DumpInlFuncProps      string `help:"dump function properties from inl heuristics to specified file"`
+	DumpInlCallSiteScores int    `help:"dump scored callsites during inlining"`
+	InlScoreAdj           string `help:"set inliner score adjustments (ex: -d=inlscoreadj=panicPathAdj:10/passConstToNestedIfAdj:-90)"`
+	InlBudgetSlack        int    `help:"amount to expand the initial inline budget when new inliner enabled. Defaults to 80 if option not set." concurrent:"ok"`
 	DumpPtrs              int    `help:"show Node pointers values in dump output"`
 	DwarfInl              int    `help:"print information about DWARF inlined function creation"`
+	EscapeMutationsCalls  int    `help:"print extra escape analysis diagnostics about mutations and calls" concurrent:"ok"`
 	Export                int    `help:"print export data"`
 	Fmahash               string `help:"hash value for use in debugging platform-dependent multiply-add use" concurrent:"ok"`
 	GCAdjust              int    `help:"log adjustments to GOGC" concurrent:"ok"`
@@ -31,11 +36,11 @@
 	Gossahash             string `help:"hash value for use in debugging the compiler"`
 	InlFuncsWithClosures  int    `help:"allow functions with closures to be inlined" concurrent:"ok"`
 	InlStaticInit         int    `help:"allow static initialization of inlined calls" concurrent:"ok"`
-	InterfaceCycles       int    `help:"allow anonymous interface cycles"`
 	Libfuzzer             int    `help:"enable coverage instrumentation for libfuzzer"`
 	LoopVar               int    `help:"shared (0, default), 1 (private loop variables), 2, private + log"`
 	LoopVarHash           string `help:"for debugging changes in loop behavior. Overrides experiment and loopvar flag."`
 	LocationLists         int    `help:"print information about DWARF location list creation"`
+	MaxShapeLen           int    `help:"hash shape names longer than this threshold (default 500)" concurrent:"ok"`
 	Nil                   int    `help:"print information about nil checks"`
 	NoOpenDefer           int    `help:"disable open-coded defers" concurrent:"ok"`
 	NoRefName             int    `help:"do not include referenced symbol names in object file" concurrent:"ok"`
@@ -45,18 +50,22 @@
 	Shapify               int    `help:"print information about shaping recursive types"`
 	Slice                 int    `help:"print information about slice compilation"`
 	SoftFloat             int    `help:"force compiler to emit soft-float code" concurrent:"ok"`
+	StaticCopy            int    `help:"print information about missed static copies" concurrent:"ok"`
 	SyncFrames            int    `help:"how many writer stack frames to include at sync points in unified export data"`
 	TypeAssert            int    `help:"print information about type assertion inlining"`
 	WB                    int    `help:"print information about write barriers"`
 	ABIWrap               int    `help:"print information about ABI wrapper generation"`
 	MayMoreStack          string `help:"call named function before all stack growth checks" concurrent:"ok"`
 	PGODebug              int    `help:"debug profile-guided optimizations"`
+	PGOHash               string `help:"hash value for debugging profile-guided optimizations" concurrent:"ok"`
 	PGOInline             int    `help:"enable profile-guided inlining" concurrent:"ok"`
 	PGOInlineCDFThreshold string `help:"cumulative threshold percentage for determining call sites as hot candidates for inlining" concurrent:"ok"`
 	PGOInlineBudget       int    `help:"inline budget for hot functions" concurrent:"ok"`
-	PGODevirtualize       int    `help:"enable profile-guided devirtualization" concurrent:"ok"`
+	PGODevirtualize       int    `help:"enable profile-guided devirtualization; 0 to disable, 1 to enable interface devirtualization, 2 to enable function devirtualization" concurrent:"ok"`
+	RangeFuncCheck        int    `help:"insert code to check behavior of range iterator functions" concurrent:"ok"`
 	WrapGlobalMapDbg      int    `help:"debug trace output for global map init wrapping"`
 	WrapGlobalMapCtl      int    `help:"global map init wrap control (0 => default, 1 => off, 2 => stress mode, no size cutoff)"`
+	ZeroCopy              int    `help:"enable zero-copy string->[]byte conversions" concurrent:"ok"`
 
 	ConcurrentOk bool // true if only concurrentOk flags seen
 }
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index 753a60a..a3144f8 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -5,11 +5,11 @@
 package base
 
 import (
+	"cmd/internal/cov/covcmd"
 	"encoding/json"
 	"flag"
 	"fmt"
 	"internal/buildcfg"
-	"internal/coverage"
 	"internal/platform"
 	"log"
 	"os"
@@ -98,6 +98,7 @@
 	DwarfLocationLists *bool        "help:\"add location lists to DWARF in optimized mode\""                      // &Ctxt.Flag_locationlists, set below
 	Dynlink            *bool        "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
 	EmbedCfg           func(string) "help:\"read go:embed configuration from `file`\""
+	Env                func(string) "help:\"add `definition` of the form key=value to environment\""
 	GenDwarfInl        int          "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
 	GoVersion          string       "help:\"required version of the runtime\""
 	ImportCfg          func(string) "help:\"read import configuration from `file`\""
@@ -132,17 +133,25 @@
 			Patterns map[string][]string
 			Files    map[string]string
 		}
-		ImportDirs   []string                   // appended to by -I
-		ImportMap    map[string]string          // set by -importcfg
-		PackageFile  map[string]string          // set by -importcfg; nil means not in use
-		CoverageInfo *coverage.CoverFixupConfig // set by -coveragecfg
-		SpectreIndex bool                       // set by -spectre=index or -spectre=all
+		ImportDirs   []string                 // appended to by -I
+		ImportMap    map[string]string        // set by -importcfg
+		PackageFile  map[string]string        // set by -importcfg; nil means not in use
+		CoverageInfo *covcmd.CoverFixupConfig // set by -coveragecfg
+		SpectreIndex bool                     // set by -spectre=index or -spectre=all
 		// Whether we are adding any sort of code instrumentation, such as
 		// when the race detector is enabled.
 		Instrumenting bool
 	}
 }
 
+func addEnv(s string) {
+	i := strings.Index(s, "=")
+	if i < 0 {
+		log.Fatal("-env argument must be of the form key=value")
+	}
+	os.Setenv(s[:i], s[i+1:])
+}
+
 // ParseFlags parses the command-line flags into Flag.
 func ParseFlags() {
 	Flag.I = addImportDir
@@ -158,6 +167,7 @@
 	*Flag.DwarfLocationLists = true
 	Flag.Dynlink = &Ctxt.Flag_dynlink
 	Flag.EmbedCfg = readEmbedCfg
+	Flag.Env = addEnv
 	Flag.GenDwarfInl = 2
 	Flag.ImportCfg = readImportCfg
 	Flag.CoverageCfg = readCoverageCfg
@@ -166,11 +176,14 @@
 	Flag.WB = true
 
 	Debug.ConcurrentOk = true
+	Debug.MaxShapeLen = 500
 	Debug.InlFuncsWithClosures = 1
 	Debug.InlStaticInit = 1
 	Debug.PGOInline = 1
-	Debug.PGODevirtualize = 1
+	Debug.PGODevirtualize = 2
 	Debug.SyncFrames = -1 // disable sync markers by default
+	Debug.ZeroCopy = 1
+	Debug.RangeFuncCheck = 1
 
 	Debug.Checkptr = -1 // so we can tell whether it is set explicitly
 
@@ -190,6 +203,12 @@
 		hashDebug = NewHashDebug("gossahash", Debug.Gossahash, nil)
 	}
 
+	// Compute whether we're compiling the runtime from the package path. Test
+	// code can also use the flag to set this explicitly.
+	if Flag.Std && objabi.LookupPkgSpecial(Ctxt.Pkgpath).Runtime {
+		Flag.CompilingRuntime = true
+	}
+
 	// Three inputs govern loop iteration variable rewriting, hash, experiment, flag.
 	// The loop variable rewriting is:
 	// IF non-empty hash, then hash determines behavior (function+line match) (*)
@@ -238,6 +257,9 @@
 	if Debug.Fmahash != "" {
 		FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil)
 	}
+	if Debug.PGOHash != "" {
+		PGOHash = NewHashDebug("pgohash", Debug.PGOHash, nil)
+	}
 
 	if Flag.MSan && !platform.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
 		log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH)
@@ -306,9 +328,6 @@
 		}
 	}
 
-	if Flag.CompilingRuntime && Flag.N != 0 {
-		log.Fatal("cannot disable optimizations while compiling runtime")
-	}
 	if Flag.LowerC < 1 {
 		log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
 	}
@@ -317,6 +336,11 @@
 	}
 
 	if Flag.CompilingRuntime {
+		// It is not possible to build the runtime with no optimizations,
+		// because the compiler cannot eliminate enough write barriers.
+		Flag.N = 0
+		Ctxt.Flag_optimize = true
+
 		// Runtime can't use -d=checkptr, at least not yet.
 		Debug.Checkptr = 0
 
@@ -470,26 +494,22 @@
 			continue
 		}
 
-		var verb, args string
-		if i := strings.Index(line, " "); i < 0 {
-			verb = line
-		} else {
-			verb, args = line[:i], strings.TrimSpace(line[i+1:])
+		verb, args, found := strings.Cut(line, " ")
+		if found {
+			args = strings.TrimSpace(args)
 		}
-		var before, after string
-		if i := strings.Index(args, "="); i >= 0 {
-			before, after = args[:i], args[i+1:]
-		}
+		before, after, hasEq := strings.Cut(args, "=")
+
 		switch verb {
 		default:
 			log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
 		case "importmap":
-			if before == "" || after == "" {
+			if !hasEq || before == "" || after == "" {
 				log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
 			}
 			Flag.Cfg.ImportMap[before] = after
 		case "packagefile":
-			if before == "" || after == "" {
+			if !hasEq || before == "" || after == "" {
 				log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
 			}
 			Flag.Cfg.PackageFile[before] = after
@@ -498,7 +518,7 @@
 }
 
 func readCoverageCfg(file string) {
-	var cfg coverage.CoverFixupConfig
+	var cfg covcmd.CoverFixupConfig
 	data, err := os.ReadFile(file)
 	if err != nil {
 		log.Fatalf("-coveragecfg: %v", err)
diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go
index 167b0df..8342a5b 100644
--- a/src/cmd/compile/internal/base/hashdebug.go
+++ b/src/cmd/compile/internal/base/hashdebug.go
@@ -55,6 +55,7 @@
 
 var FmaHash *HashDebug     // for debugging fused-multiply-add floating point changes
 var LoopVarHash *HashDebug // for debugging shared/private loop variable changes
+var PGOHash *HashDebug     // for debugging PGO optimization decisions
 
 // DebugHashMatchPkgFunc reports whether debug variable Gossahash
 //
@@ -203,7 +204,6 @@
 		i++
 	}
 	return hd
-
 }
 
 // TODO: Delete when we switch to bisect-only.
@@ -274,8 +274,36 @@
 }
 
 func (d *HashDebug) matchPos(ctxt *obj.Link, pos src.XPos, note func() string) bool {
+	return d.matchPosWithInfo(ctxt, pos, nil, note)
+}
+
+func (d *HashDebug) matchPosWithInfo(ctxt *obj.Link, pos src.XPos, info any, note func() string) bool {
 	hash := d.hashPos(ctxt, pos)
-	return d.matchAndLog(hash, func() string { return d.fmtPos(ctxt, pos) }, note)
+	if info != nil {
+		hash = bisect.Hash(hash, info)
+	}
+	return d.matchAndLog(hash,
+		func() string {
+			r := d.fmtPos(ctxt, pos)
+			if info != nil {
+				r += fmt.Sprintf(" (%v)", info)
+			}
+			return r
+		},
+		note)
+}
+
+// MatchPosWithInfo is similar to MatchPos, but with additional information
+// that is included for hash computation, so it can distinguish multiple
+// matches on the same source location.
+// Note that the default answer for no environment variable (d == nil)
+// is "yes", do the thing.
+func (d *HashDebug) MatchPosWithInfo(pos src.XPos, info any, desc func() string) bool {
+	if d == nil {
+		return true
+	}
+	// Written this way to make inlining likely.
+	return d.matchPosWithInfo(Ctxt, pos, info, desc)
 }
 
 // matchAndLog is the core matcher. It reports whether the hash matches the pattern.
diff --git a/src/cmd/compile/internal/base/mapfile_mmap.go b/src/cmd/compile/internal/base/mapfile_mmap.go
index bbcfda2..b66c9eb 100644
--- a/src/cmd/compile/internal/base/mapfile_mmap.go
+++ b/src/cmd/compile/internal/base/mapfile_mmap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || (solaris && go1.20)
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
 
 package base
 
diff --git a/src/cmd/compile/internal/base/mapfile_read.go b/src/cmd/compile/internal/base/mapfile_read.go
index c1b84db..783f8c4 100644
--- a/src/cmd/compile/internal/base/mapfile_read.go
+++ b/src/cmd/compile/internal/base/mapfile_read.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !(solaris && go1.20)
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
 
 package base
 
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index efd70f7..cc36ace 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -146,11 +146,6 @@
 	}
 }
 
-// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
-func ErrorfVers(lang string, format string, args ...interface{}) {
-	Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
-}
-
 // UpdateErrorDot is a clumsy hack that rewrites the last error,
 // if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
 // It is used to give better error messages for dot (selector) expressions.
diff --git a/src/cmd/compile/internal/compare/compare.go b/src/cmd/compile/internal/compare/compare.go
index 1674065..e165cd6 100644
--- a/src/cmd/compile/internal/compare/compare.go
+++ b/src/cmd/compile/internal/compare/compare.go
@@ -70,7 +70,7 @@
 	case types.TARRAY:
 		return EqCanPanic(t.Elem())
 	case types.TSTRUCT:
-		for _, f := range t.FieldSlice() {
+		for _, f := range t.Fields() {
 			if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
 				return true
 			}
@@ -87,7 +87,7 @@
 func EqStructCost(t *types.Type) int64 {
 	cost := int64(0)
 
-	for i, fields := 0, t.FieldSlice(); i < len(fields); {
+	for i, fields := 0, t.Fields(); i < len(fields); {
 		f := fields[i]
 
 		// Skip blank-named fields.
@@ -181,7 +181,7 @@
 
 	// Walk the struct using memequal for runs of AMEM
 	// and calling specific equality tests for the others.
-	for i, fields := 0, t.FieldSlice(); i < len(fields); {
+	for i, fields := 0, t.Fields(); i < len(fields); {
 		f := fields[i]
 
 		// Skip blank-named fields.
@@ -198,15 +198,15 @@
 				// Enforce ordering by starting a new set of reorderable conditions.
 				conds = append(conds, []ir.Node{})
 			}
-			p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
-			q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
 			switch {
 			case f.Type.IsString():
+				p := typecheck.DotField(base.Pos, typecheck.Expr(np), i)
+				q := typecheck.DotField(base.Pos, typecheck.Expr(nq), i)
 				eqlen, eqmem := EqString(p, q)
 				and(eqlen)
 				and(eqmem)
 			default:
-				and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
+				and(eqfield(np, nq, i))
 			}
 			if typeCanPanic {
 				// Also enforce ordering after something that can panic.
@@ -219,13 +219,12 @@
 		cost, size, next := eqStructFieldCost(t, i)
 		if cost <= 4 {
 			// Cost of 4 or less: use plain field equality.
-			s := fields[i:next]
-			for _, f := range s {
-				and(eqfield(np, nq, ir.OEQ, f.Sym))
+			for j := i; j < next; j++ {
+				and(eqfield(np, nq, j))
 			}
 		} else {
 			// Higher cost: use memequal.
-			cc := eqmem(np, nq, f.Sym, size)
+			cc := eqmem(np, nq, i, size)
 			and(cc)
 		}
 		i = next
@@ -295,8 +294,7 @@
 		cmplen = tlen
 	}
 
-	fn := typecheck.LookupRuntime("memequal")
-	fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+	fn := typecheck.LookupRuntime("memequal", types.Types[types.TUINT8], types.Types[types.TUINT8])
 	call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(cmplen)}, false).(*ir.CallExpr)
 
 	cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
@@ -348,19 +346,18 @@
 // eqfield returns the node
 //
 //	p.field == q.field
-func eqfield(p ir.Node, q ir.Node, op ir.Op, field *types.Sym) ir.Node {
-	nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
-	ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
-	ne := ir.NewBinaryExpr(base.Pos, op, nx, ny)
-	return ne
+func eqfield(p, q ir.Node, field int) ir.Node {
+	nx := typecheck.DotField(base.Pos, typecheck.Expr(p), field)
+	ny := typecheck.DotField(base.Pos, typecheck.Expr(q), field)
+	return typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny))
 }
 
 // eqmem returns the node
 //
 //	memequal(&p.field, &q.field, size)
-func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
-	nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
-	ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
+func eqmem(p, q ir.Node, field int, size int64) ir.Node {
+	nx := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, p, field)))
+	ny := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, q, field)))
 
 	fn, needsize := eqmemfunc(size, nx.Type().Elem())
 	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
@@ -375,14 +372,10 @@
 
 func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
 	switch size {
-	default:
-		fn = typecheck.LookupRuntime("memequal")
-		needsize = true
 	case 1, 2, 4, 8, 16:
 		buf := fmt.Sprintf("memequal%d", int(size)*8)
-		fn = typecheck.LookupRuntime(buf)
+		return typecheck.LookupRuntime(buf, t, t), false
 	}
 
-	fn = typecheck.SubstArgTypes(fn, t, t)
-	return fn, needsize
+	return typecheck.LookupRuntime("memequal", t, t), true
 }
diff --git a/src/cmd/compile/internal/compare/compare_test.go b/src/cmd/compile/internal/compare/compare_test.go
index c65537f..2f76165 100644
--- a/src/cmd/compile/internal/compare/compare_test.go
+++ b/src/cmd/compile/internal/compare/compare_test.go
@@ -28,154 +28,73 @@
 }
 
 func TestEqStructCost(t *testing.T) {
-	newByteField := func(parent *types.Type, offset int64) *types.Field {
-		f := types.NewField(src.XPos{}, parent.Sym(), types.ByteType)
-		f.Offset = offset
-		return f
+	repeat := func(n int, typ *types.Type) []*types.Type {
+		typs := make([]*types.Type, n)
+		for i := range typs {
+			typs[i] = typ
+		}
+		return typs
 	}
-	newArrayField := func(parent *types.Type, offset int64, len int64, kind types.Kind) *types.Field {
-		f := types.NewField(src.XPos{}, parent.Sym(), types.NewArray(types.Types[kind], len))
-		// Call Type.Size here to force the size calculation to be done. If not done here the size returned later is incorrect.
-		f.Type.Size()
-		f.Offset = offset
-		return f
-	}
-	newField := func(parent *types.Type, offset int64, kind types.Kind) *types.Field {
-		f := types.NewField(src.XPos{}, parent.Sym(), types.Types[kind])
-		f.Offset = offset
-		return f
-	}
+
 	tt := []struct {
 		name             string
 		cost             int64
 		nonMergeLoadCost int64
-		tfn              typefn
+		fieldTypes       []*types.Type
 	}{
-		{"struct without fields", 0, 0,
-			func() *types.Type {
-				return types.NewStruct([]*types.Field{})
-			}},
-		{"struct with 1 byte field", 1, 1,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := []*types.Field{
-					newByteField(parent, 0),
-				}
-				parent.SetFields(fields)
-				return parent
-			},
-		},
-		{"struct with 8 byte fields", 1, 8,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 8)
-				for i := range fields {
-					fields[i] = newByteField(parent, int64(i))
-				}
-				parent.SetFields(fields)
-				return parent
-			},
-		},
-		{"struct with 16 byte fields", 2, 16,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 16)
-				for i := range fields {
-					fields[i] = newByteField(parent, int64(i))
-				}
-				parent.SetFields(fields)
-				return parent
-			},
-		},
-		{"struct with 32 byte fields", 4, 32,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 32)
-				for i := range fields {
-					fields[i] = newByteField(parent, int64(i))
-				}
-				parent.SetFields(fields)
-				return parent
-			},
-		},
-		{"struct with 2 int32 fields", 1, 2,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 2)
-				for i := range fields {
-					fields[i] = newField(parent, int64(i*4), types.TINT32)
-				}
-				parent.SetFields(fields)
-				return parent
-			},
-		},
+		{"struct without fields", 0, 0, nil},
+		{"struct with 1 byte field", 1, 1, repeat(1, types.ByteType)},
+		{"struct with 8 byte fields", 1, 8, repeat(8, types.ByteType)},
+		{"struct with 16 byte fields", 2, 16, repeat(16, types.ByteType)},
+		{"struct with 32 byte fields", 4, 32, repeat(32, types.ByteType)},
+		{"struct with 2 int32 fields", 1, 2, repeat(2, types.Types[types.TINT32])},
 		{"struct with 2 int32 fields and 1 int64", 2, 3,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 3)
-				fields[0] = newField(parent, int64(0), types.TINT32)
-				fields[1] = newField(parent, int64(4), types.TINT32)
-				fields[2] = newField(parent, int64(8), types.TINT64)
-				parent.SetFields(fields)
-				return parent
+			[]*types.Type{
+				types.Types[types.TINT32],
+				types.Types[types.TINT32],
+				types.Types[types.TINT64],
 			},
 		},
 		{"struct with 1 int field and 1 string", 3, 3,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 2)
-				fields[0] = newField(parent, int64(0), types.TINT64)
-				fields[1] = newField(parent, int64(8), types.TSTRING)
-				parent.SetFields(fields)
-				return parent
+			[]*types.Type{
+				types.Types[types.TINT64],
+				types.Types[types.TSTRING],
 			},
 		},
-		{"struct with 2 strings", 4, 4,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := make([]*types.Field, 2)
-				fields[0] = newField(parent, int64(0), types.TSTRING)
-				fields[1] = newField(parent, int64(8), types.TSTRING)
-				parent.SetFields(fields)
-				return parent
-			},
-		},
+		{"struct with 2 strings", 4, 4, repeat(2, types.Types[types.TSTRING])},
 		{"struct with 1 large byte array field", 26, 101,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := []*types.Field{
-					newArrayField(parent, 0, 101, types.TUINT16),
-				}
-				parent.SetFields(fields)
-				return parent
+			[]*types.Type{
+				types.NewArray(types.Types[types.TUINT16], 101),
 			},
 		},
 		{"struct with string array field", 4, 4,
-			func() *types.Type {
-				parent := types.NewStruct([]*types.Field{})
-				fields := []*types.Field{
-					newArrayField(parent, 0, 2, types.TSTRING),
-				}
-				parent.SetFields(fields)
-				return parent
+			[]*types.Type{
+				types.NewArray(types.Types[types.TSTRING], 2),
 			},
 		},
 	}
 
 	for _, tc := range tt {
 		t.Run(tc.name, func(t *testing.T) {
+			fields := make([]*types.Field, len(tc.fieldTypes))
+			for i, ftyp := range tc.fieldTypes {
+				fields[i] = types.NewField(src.NoXPos, typecheck.LookupNum("f", i), ftyp)
+			}
+			typ := types.NewStruct(fields)
+			types.CalcSize(typ)
+
 			want := tc.cost
 			base.Ctxt.Arch.CanMergeLoads = true
-			actual := EqStructCost(tc.tfn())
+			actual := EqStructCost(typ)
 			if actual != want {
-				t.Errorf("CanMergeLoads=true EqStructCost(%v) = %d, want %d", tc.tfn, actual, want)
+				t.Errorf("CanMergeLoads=true EqStructCost(%v) = %d, want %d", typ, actual, want)
 			}
 
 			base.Ctxt.Arch.CanMergeLoads = false
 			want = tc.nonMergeLoadCost
-			actual = EqStructCost(tc.tfn())
+			actual = EqStructCost(typ)
 			if actual != want {
-				t.Errorf("CanMergeLoads=false EqStructCost(%v) = %d, want %d", tc.tfn, actual, want)
+				t.Errorf("CanMergeLoads=false EqStructCost(%v) = %d, want %d", typ, actual, want)
 			}
 		})
 	}
diff --git a/src/cmd/compile/internal/coverage/cover.go b/src/cmd/compile/internal/coverage/cover.go
index 3e0350b..5320f00 100644
--- a/src/cmd/compile/internal/coverage/cover.go
+++ b/src/cmd/compile/internal/coverage/cover.go
@@ -22,9 +22,9 @@
 	"strings"
 )
 
-// Names records state information collected in the first fixup
+// names records state information collected in the first fixup
 // phase so that it can be passed to the second fixup phase.
-type Names struct {
+type names struct {
 	MetaVar     *ir.Name
 	PkgIdVar    *ir.Name
 	InitFn      *ir.Func
@@ -32,13 +32,17 @@
 	CounterGran coverage.CounterGranularity
 }
 
-// FixupVars is the first of two entry points for coverage compiler
-// fixup. It collects and returns the package ID and meta-data
-// variables being used for this "-cover" build, along with the
-// coverage counter mode and granularity. It also reclassifies selected
-// variables (for example, tagging coverage counter variables with
-// flags so that they can be handled properly downstream).
-func FixupVars() Names {
+// Fixup adds calls to the pkg init function as appropriate to
+// register coverage-related variables with the runtime.
+//
+// It also reclassifies selected variables (for example, tagging
+// coverage counter variables with flags so that they can be handled
+// properly downstream).
+func Fixup() {
+	if base.Flag.Cfg.CoverageInfo == nil {
+		return // not using coverage
+	}
+
 	metaVarName := base.Flag.Cfg.CoverageInfo.MetaVar
 	pkgIdVarName := base.Flag.Cfg.CoverageInfo.PkgIdVar
 	counterMode := base.Flag.Cfg.CoverageInfo.CounterMode
@@ -53,15 +57,7 @@
 		}
 	}
 
-	for _, n := range typecheck.Target.Decls {
-		as, ok := n.(*ir.AssignStmt)
-		if !ok {
-			continue
-		}
-		nm, ok := as.X.(*ir.Name)
-		if !ok {
-			continue
-		}
+	for _, nm := range typecheck.Target.Externs {
 		s := nm.Sym()
 		switch s.Name {
 		case metaVarName:
@@ -100,20 +96,15 @@
 			counterGran)
 	}
 
-	return Names{
+	cnames := names{
 		MetaVar:     metavar,
 		PkgIdVar:    pkgidvar,
 		CounterMode: cm,
 		CounterGran: cg,
 	}
-}
 
-// FixupInit is the second main entry point for coverage compiler
-// fixup. It adds calls to the pkg init function as appropriate to
-// register coverage-related variables with the runtime.
-func FixupInit(cnames Names) {
-	for _, n := range typecheck.Target.Decls {
-		if fn, ok := n.(*ir.Func); ok && ir.FuncName(fn) == "init" {
+	for _, fn := range typecheck.Target.Funcs {
+		if ir.FuncName(fn) == "init" {
 			cnames.InitFn = fn
 			break
 		}
@@ -152,7 +143,7 @@
 	return hv, base.Flag.Cfg.CoverageInfo.MetaLen
 }
 
-func registerMeta(cnames Names, hashv [16]byte, mdlen int) {
+func registerMeta(cnames names, hashv [16]byte, mdlen int) {
 	// Materialize expression for hash (an array literal)
 	pos := cnames.InitFn.Pos()
 	elist := make([]ir.Node, 0, 16)
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
deleted file mode 100644
index 46a2239..0000000
--- a/src/cmd/compile/internal/deadcode/deadcode.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package deadcode
-
-import (
-	"go/constant"
-	"go/token"
-
-	"cmd/compile/internal/base"
-	"cmd/compile/internal/ir"
-)
-
-func Func(fn *ir.Func) {
-	stmts(&fn.Body)
-
-	if len(fn.Body) == 0 {
-		return
-	}
-
-	for _, n := range fn.Body {
-		if len(n.Init()) > 0 {
-			return
-		}
-		switch n.Op() {
-		case ir.OIF:
-			n := n.(*ir.IfStmt)
-			if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
-				return
-			}
-		case ir.OFOR:
-			n := n.(*ir.ForStmt)
-			if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
-				return
-			}
-		default:
-			return
-		}
-	}
-
-	ir.VisitList(fn.Body, markHiddenClosureDead)
-	fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
-}
-
-func stmts(nn *ir.Nodes) {
-	var lastLabel = -1
-	for i, n := range *nn {
-		if n != nil && n.Op() == ir.OLABEL {
-			lastLabel = i
-		}
-	}
-	for i, n := range *nn {
-		// Cut is set to true when all nodes after i'th position
-		// should be removed.
-		// In other words, it marks whole slice "tail" as dead.
-		cut := false
-		if n == nil {
-			continue
-		}
-		if n.Op() == ir.OIF {
-			n := n.(*ir.IfStmt)
-			n.Cond = expr(n.Cond)
-			if ir.IsConst(n.Cond, constant.Bool) {
-				var body ir.Nodes
-				if ir.BoolVal(n.Cond) {
-					ir.VisitList(n.Else, markHiddenClosureDead)
-					n.Else = ir.Nodes{}
-					body = n.Body
-				} else {
-					ir.VisitList(n.Body, markHiddenClosureDead)
-					n.Body = ir.Nodes{}
-					body = n.Else
-				}
-				// If "then" or "else" branch ends with panic or return statement,
-				// it is safe to remove all statements after this node.
-				// isterminating is not used to avoid goto-related complications.
-				// We must be careful not to deadcode-remove labels, as they
-				// might be the target of a goto. See issue 28616.
-				if body := body; len(body) != 0 {
-					switch body[(len(body) - 1)].Op() {
-					case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
-						if i > lastLabel {
-							cut = true
-						}
-					}
-				}
-			}
-		}
-		if n.Op() == ir.OSWITCH {
-			n := n.(*ir.SwitchStmt)
-			// Use a closure wrapper here so we can use "return" to abort the analysis.
-			func() {
-				if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
-					return // no special type-switch case yet.
-				}
-				var x constant.Value // value we're switching on
-				if n.Tag != nil {
-					if ir.ConstType(n.Tag) == constant.Unknown {
-						return
-					}
-					x = n.Tag.Val()
-				} else {
-					x = constant.MakeBool(true) // switch { ... }  =>  switch true { ... }
-				}
-				var def *ir.CaseClause
-				for _, cas := range n.Cases {
-					if len(cas.List) == 0 { // default case
-						def = cas
-						continue
-					}
-					for _, c := range cas.List {
-						if ir.ConstType(c) == constant.Unknown {
-							return // can't statically tell if it matches or not - give up.
-						}
-						if constant.Compare(x, token.EQL, c.Val()) {
-							for _, n := range cas.Body {
-								if n.Op() == ir.OFALL {
-									return // fallthrough makes it complicated - abort.
-								}
-							}
-							// This switch entry is the one that always triggers.
-							for _, cas2 := range n.Cases {
-								for _, c2 := range cas2.List {
-									ir.Visit(c2, markHiddenClosureDead)
-								}
-								if cas2 != cas {
-									ir.VisitList(cas2.Body, markHiddenClosureDead)
-								}
-							}
-
-							// Rewrite to switch { case true: ... }
-							n.Tag = nil
-							cas.List[0] = ir.NewBool(c.Pos(), true)
-							cas.List = cas.List[:1]
-							n.Cases[0] = cas
-							n.Cases = n.Cases[:1]
-							return
-						}
-					}
-				}
-				if def != nil {
-					for _, n := range def.Body {
-						if n.Op() == ir.OFALL {
-							return // fallthrough makes it complicated - abort.
-						}
-					}
-					for _, cas := range n.Cases {
-						if cas != def {
-							ir.VisitList(cas.List, markHiddenClosureDead)
-							ir.VisitList(cas.Body, markHiddenClosureDead)
-						}
-					}
-					n.Cases[0] = def
-					n.Cases = n.Cases[:1]
-					return
-				}
-
-				// TODO: handle case bodies ending with panic/return as we do in the IF case above.
-
-				// entire switch is a nop - no case ever triggers
-				for _, cas := range n.Cases {
-					ir.VisitList(cas.List, markHiddenClosureDead)
-					ir.VisitList(cas.Body, markHiddenClosureDead)
-				}
-				n.Cases = n.Cases[:0]
-			}()
-		}
-
-		if len(n.Init()) != 0 {
-			stmts(n.(ir.InitNode).PtrInit())
-		}
-		switch n.Op() {
-		case ir.OBLOCK:
-			n := n.(*ir.BlockStmt)
-			stmts(&n.List)
-		case ir.OFOR:
-			n := n.(*ir.ForStmt)
-			stmts(&n.Body)
-		case ir.OIF:
-			n := n.(*ir.IfStmt)
-			stmts(&n.Body)
-			stmts(&n.Else)
-		case ir.ORANGE:
-			n := n.(*ir.RangeStmt)
-			stmts(&n.Body)
-		case ir.OSELECT:
-			n := n.(*ir.SelectStmt)
-			for _, cas := range n.Cases {
-				stmts(&cas.Body)
-			}
-		case ir.OSWITCH:
-			n := n.(*ir.SwitchStmt)
-			for _, cas := range n.Cases {
-				stmts(&cas.Body)
-			}
-		}
-
-		if cut {
-			ir.VisitList((*nn)[i+1:len(*nn)], markHiddenClosureDead)
-			*nn = (*nn)[:i+1]
-			break
-		}
-	}
-}
-
-func expr(n ir.Node) ir.Node {
-	// Perform dead-code elimination on short-circuited boolean
-	// expressions involving constants with the intent of
-	// producing a constant 'if' condition.
-	switch n.Op() {
-	case ir.OANDAND:
-		n := n.(*ir.LogicalExpr)
-		n.X = expr(n.X)
-		n.Y = expr(n.Y)
-		if ir.IsConst(n.X, constant.Bool) {
-			if ir.BoolVal(n.X) {
-				return n.Y // true && x => x
-			} else {
-				return n.X // false && x => false
-			}
-		}
-	case ir.OOROR:
-		n := n.(*ir.LogicalExpr)
-		n.X = expr(n.X)
-		n.Y = expr(n.Y)
-		if ir.IsConst(n.X, constant.Bool) {
-			if ir.BoolVal(n.X) {
-				return n.X // true || x => true
-			} else {
-				return n.Y // false || x => x
-			}
-		}
-	}
-	return n
-}
-
-func markHiddenClosureDead(n ir.Node) {
-	if n.Op() != ir.OCLOSURE {
-		return
-	}
-	clo := n.(*ir.ClosureExpr)
-	if clo.Func.IsHiddenClosure() {
-		clo.Func.SetIsDeadcodeClosure(true)
-	}
-	ir.VisitList(clo.Func.Body, markHiddenClosureDead)
-}
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
index b156b66..5d1b952 100644
--- a/src/cmd/compile/internal/devirtualize/devirtualize.go
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -18,40 +18,28 @@
 	"cmd/compile/internal/types"
 )
 
-// Static devirtualizes calls within fn where possible when the concrete callee
+// StaticCall devirtualizes the given call if possible when the concrete callee
 // is available statically.
-func Static(fn *ir.Func) {
-	ir.CurFunc = fn
+func StaticCall(call *ir.CallExpr) {
+	// For promoted methods (including value-receiver methods promoted
+	// to pointer-receivers), the interface method wrapper may contain
+	// expressions that can panic (e.g., ODEREF, ODOTPTR,
+	// ODOTINTER). Devirtualization involves inlining these expressions
+	// (and possible panics) to the call site. This normally isn't a
+	// problem, but for go/defer statements it can move the panic from
+	// when/where the call executes to the go/defer statement itself,
+	// which is a visible change in semantics (e.g., #52072). To prevent
+	// this, we skip devirtualizing calls within go/defer statements
+	// altogether.
+	if call.GoDefer {
+		return
+	}
 
-	// For promoted methods (including value-receiver methods promoted to pointer-receivers),
-	// the interface method wrapper may contain expressions that can panic (e.g., ODEREF, ODOTPTR, ODOTINTER).
-	// Devirtualization involves inlining these expressions (and possible panics) to the call site.
-	// This normally isn't a problem, but for go/defer statements it can move the panic from when/where
-	// the call executes to the go/defer statement itself, which is a visible change in semantics (e.g., #52072).
-	// To prevent this, we skip devirtualizing calls within go/defer statements altogether.
-	goDeferCall := make(map[*ir.CallExpr]bool)
-	ir.VisitList(fn.Body, func(n ir.Node) {
-		switch n := n.(type) {
-		case *ir.GoDeferStmt:
-			if call, ok := n.Call.(*ir.CallExpr); ok {
-				goDeferCall[call] = true
-			}
-			return
-		case *ir.CallExpr:
-			if !goDeferCall[n] {
-				staticCall(n)
-			}
-		}
-	})
-}
-
-// staticCall devirtualizes the given call if possible when the concrete callee
-// is available statically.
-func staticCall(call *ir.CallExpr) {
 	if call.Op() != ir.OCALLINTER {
 		return
 	}
-	sel := call.X.(*ir.SelectorExpr)
+
+	sel := call.Fun.(*ir.SelectorExpr)
 	r := ir.StaticValue(sel.X)
 	if r.Op() != ir.OCONVIFACE {
 		return
@@ -70,7 +58,7 @@
 		return
 	}
 
-	// If typ *has* a shape type, then it's an shaped, instantiated
+	// If typ *has* a shape type, then it's a shaped, instantiated
 	// type like T[go.shape.int], and its methods (may) have an extra
 	// dictionary parameter. We could devirtualize this call if we
 	// could derive an appropriate dictionary argument.
@@ -113,29 +101,23 @@
 
 	dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
 	dt.SetType(typ)
-	x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
+	x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true)
 	switch x.Op() {
 	case ir.ODOTMETH:
-		x := x.(*ir.SelectorExpr)
 		if base.Flag.LowerM != 0 {
 			base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
 		}
 		call.SetOp(ir.OCALLMETH)
-		call.X = x
+		call.Fun = x
 	case ir.ODOTINTER:
 		// Promoted method from embedded interface-typed field (#42279).
-		x := x.(*ir.SelectorExpr)
 		if base.Flag.LowerM != 0 {
 			base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
 		}
 		call.SetOp(ir.OCALLINTER)
-		call.X = x
+		call.Fun = x
 	default:
-		// TODO(mdempsky): Turn back into Fatalf after more testing.
-		if base.Flag.LowerM != 0 {
-			base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
-		}
-		return
+		base.FatalfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
 	}
 
 	// Duplicated logic from typecheck for function call return
@@ -148,9 +130,9 @@
 	switch ft := x.Type(); ft.NumResults() {
 	case 0:
 	case 1:
-		call.SetType(ft.Results().Field(0).Type)
+		call.SetType(ft.Result(0).Type)
 	default:
-		call.SetType(ft.Results())
+		call.SetType(ft.ResultsTuple())
 	}
 
 	// Desugar OCALLMETH, if we created one (#57309).
diff --git a/src/cmd/compile/internal/devirtualize/pgo.go b/src/cmd/compile/internal/devirtualize/pgo.go
index 068e0ef..170bf74 100644
--- a/src/cmd/compile/internal/devirtualize/pgo.go
+++ b/src/cmd/compile/internal/devirtualize/pgo.go
@@ -12,6 +12,8 @@
 	"cmd/compile/internal/pgo"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
 	"encoding/json"
 	"fmt"
 	"os"
@@ -53,8 +55,10 @@
 // ProfileGuided performs call devirtualization of indirect calls based on
 // profile information.
 //
-// Specifically, it performs conditional devirtualization of interface calls
-// for the hottest callee. That is, it performs a transformation like:
+// Specifically, it performs conditional devirtualization of interface calls or
+// function value calls for the hottest callee.
+//
+// That is, for interface calls it performs a transformation like:
 //
 //	type Iface interface {
 //		Foo()
@@ -78,6 +82,24 @@
 //		}
 //	}
 //
+// For function value calls it performs a transformation like:
+//
+//	func Concrete() {}
+//
+//	func foo(fn func()) {
+//		fn()
+//	}
+//
+// to:
+//
+//	func foo(fn func()) {
+//		if internal/abi.FuncPCABIInternal(fn) == internal/abi.FuncPCABIInternal(Concrete) {
+//			Concrete()
+//		} else {
+//			fn()
+//		}
+//	}
+//
 // The primary benefit of this transformation is enabling inlining of the
 // direct call.
 func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
@@ -85,9 +107,6 @@
 
 	name := ir.LinkFuncName(fn)
 
-	// Can't devirtualize go/defer calls. See comment in Static.
-	goDeferCall := make(map[*ir.CallExpr]bool)
-
 	var jsonW *json.Encoder
 	if base.Debug.PGODebug >= 3 {
 		jsonW = json.NewEncoder(os.Stdout)
@@ -99,12 +118,6 @@
 			return n
 		}
 
-		if gds, ok := n.(*ir.GoDeferStmt); ok {
-			if call, ok := gds.Call.(*ir.CallExpr); ok {
-				goDeferCall[call] = true
-			}
-		}
-
 		ir.EditChildren(n, edit)
 
 		call, ok := n.(*ir.CallExpr)
@@ -125,7 +138,8 @@
 			}
 		}
 
-		if call.Op() != ir.OCALLINTER {
+		op := call.Op()
+		if op != ir.OCALLFUNC && op != ir.OCALLINTER {
 			return n
 		}
 
@@ -133,25 +147,26 @@
 			fmt.Printf("%v: PGO devirtualize considering call %v\n", ir.Line(call), call)
 		}
 
-		if goDeferCall[call] {
+		if call.GoDefer {
 			if base.Debug.PGODebug >= 2 {
 				fmt.Printf("%v: can't PGO devirtualize go/defer call %v\n", ir.Line(call), call)
 			}
 			return n
 		}
 
-		// Bail if we do not have a hot callee.
-		callee, weight := findHotConcreteCallee(p, fn, call)
-		if callee == nil {
-			return n
+		var newNode ir.Node
+		var callee *ir.Func
+		var weight int64
+		switch op {
+		case ir.OCALLFUNC:
+			newNode, callee, weight = maybeDevirtualizeFunctionCall(p, fn, call)
+		case ir.OCALLINTER:
+			newNode, callee, weight = maybeDevirtualizeInterfaceCall(p, fn, call)
+		default:
+			panic("unreachable")
 		}
-		// Bail if we do not have a Type node for the hot callee.
-		ctyp := methodRecvType(callee)
-		if ctyp == nil {
-			return n
-		}
-		// Bail if we know for sure it won't inline.
-		if !shouldPGODevirt(callee) {
+
+		if newNode == nil {
 			return n
 		}
 
@@ -160,12 +175,126 @@
 			stat.DevirtualizedWeight = weight
 		}
 
-		return rewriteCondCall(call, fn, callee, ctyp)
+		return newNode
 	}
 
 	ir.EditChildren(fn, edit)
 }
 
+// Devirtualize interface call if possible and eligible. Returns the new
+// ir.Node if call was devirtualized, and if so also the callee and weight of
+// the devirtualized edge.
+func maybeDevirtualizeInterfaceCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) {
+	if base.Debug.PGODevirtualize < 1 {
+		return nil, nil, 0
+	}
+
+	// Bail if we do not have a hot callee.
+	callee, weight := findHotConcreteInterfaceCallee(p, fn, call)
+	if callee == nil {
+		return nil, nil, 0
+	}
+	// Bail if we do not have a Type node for the hot callee.
+	ctyp := methodRecvType(callee)
+	if ctyp == nil {
+		return nil, nil, 0
+	}
+	// Bail if we know for sure it won't inline.
+	if !shouldPGODevirt(callee) {
+		return nil, nil, 0
+	}
+	// Bail if de-selected by PGO Hash.
+	if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) {
+		return nil, nil, 0
+	}
+
+	return rewriteInterfaceCall(call, fn, callee, ctyp), callee, weight
+}
+
+// Devirtualize an indirect function call if possible and eligible. Returns the new
+// ir.Node if call was devirtualized, and if so also the callee and weight of
+// the devirtualized edge.
+func maybeDevirtualizeFunctionCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) {
+	if base.Debug.PGODevirtualize < 2 {
+		return nil, nil, 0
+	}
+
+	// Bail if this is a direct call; no devirtualization necessary.
+	callee := pgo.DirectCallee(call.Fun)
+	if callee != nil {
+		return nil, nil, 0
+	}
+
+	// Bail if we do not have a hot callee.
+	callee, weight := findHotConcreteFunctionCallee(p, fn, call)
+	if callee == nil {
+		return nil, nil, 0
+	}
+
+	// TODO(go.dev/issue/61577): Closures need the closure context passed
+	// via the context register. That requires extra plumbing that we
+	// haven't done yet.
+	if callee.OClosure != nil {
+		if base.Debug.PGODebug >= 3 {
+			fmt.Printf("callee %s is a closure, skipping\n", ir.FuncName(callee))
+		}
+		return nil, nil, 0
+	}
+	// runtime.memhash_varlen does not look like a closure, but it uses
+	// runtime.getclosureptr to access data encoded by callers, which are
+	// are generated by cmd/compile/internal/reflectdata.genhash.
+	if callee.Sym().Pkg.Path == "runtime" && callee.Sym().Name == "memhash_varlen" {
+		if base.Debug.PGODebug >= 3 {
+			fmt.Printf("callee %s is a closure (runtime.memhash_varlen), skipping\n", ir.FuncName(callee))
+		}
+		return nil, nil, 0
+	}
+	// TODO(prattmic): We don't properly handle methods as callees in two
+	// different dimensions:
+	//
+	// 1. Method expressions. e.g.,
+	//
+	//      var fn func(*os.File, []byte) (int, error) = (*os.File).Read
+	//
+	// In this case, typ will report *os.File as the receiver while
+	// ctyp reports it as the first argument. types.Identical ignores
+	// receiver parameters, so it treats these as different, even though
+	// they are still call compatible.
+	//
+	// 2. Method values. e.g.,
+	//
+	//      var f *os.File
+	//      var fn func([]byte) (int, error) = f.Read
+	//
+	// types.Identical will treat these as compatible (since receiver
+	// parameters are ignored). However, in this case, we do not call
+	// (*os.File).Read directly. Instead, f is stored in closure context
+	// and we call the wrapper (*os.File).Read-fm. However, runtime/pprof
+	// hides wrappers from profiles, making it appear that there is a call
+	// directly to the method. We could recognize this pattern return the
+	// wrapper rather than the method.
+	//
+	// N.B. perf profiles will report wrapper symbols directly, so
+	// ideally we should support direct wrapper references as well.
+	if callee.Type().Recv() != nil {
+		if base.Debug.PGODebug >= 3 {
+			fmt.Printf("callee %s is a method, skipping\n", ir.FuncName(callee))
+		}
+		return nil, nil, 0
+	}
+
+	// Bail if we know for sure it won't inline.
+	if !shouldPGODevirt(callee) {
+		return nil, nil, 0
+	}
+	// Bail if de-selected by PGO Hash.
+	if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) {
+		return nil, nil, 0
+	}
+
+	return rewriteFunctionCall(call, fn, callee), callee, weight
+}
+
 // shouldPGODevirt checks if we should perform PGO devirtualization to the
 // target function.
 //
@@ -223,6 +352,18 @@
 
 	offset := pgo.NodeLineOffset(call, fn)
 
+	hotter := func(e *pgo.IREdge) bool {
+		if stat.Hottest == "" {
+			return true
+		}
+		if e.Weight != stat.HottestWeight {
+			return e.Weight > stat.HottestWeight
+		}
+		// If weight is the same, arbitrarily sort lexicographally, as
+		// findHotConcreteCallee does.
+		return e.Dst.Name() < stat.Hottest
+	}
+
 	// Sum of all edges from this callsite, regardless of callee.
 	// For direct calls, this should be the same as the single edge
 	// weight (except for multiple calls on one line, which we
@@ -233,7 +374,7 @@
 			continue
 		}
 		stat.Weight += edge.Weight
-		if edge.Weight > stat.HottestWeight {
+		if hotter(edge) {
 			stat.HottestWeight = edge.Weight
 			stat.Hottest = edge.Dst.Name()
 		}
@@ -243,7 +384,7 @@
 	case ir.OCALLFUNC:
 		stat.Interface = false
 
-		callee := pgo.DirectCallee(call.X)
+		callee := pgo.DirectCallee(call.Fun)
 		if callee != nil {
 			stat.Direct = true
 			if stat.Hottest == "" {
@@ -262,11 +403,90 @@
 	return &stat
 }
 
-// rewriteCondCall devirtualizes the given call using a direct method call to
-// concretetyp.
-func rewriteCondCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *types.Type) ir.Node {
+// copyInputs copies the inputs to a call: the receiver (for interface calls)
+// or function value (for function value calls) and the arguments. These
+// expressions are evaluated once and assigned to temporaries.
+//
+// The assignment statement is added to init and the copied receiver/fn
+// expression and copied arguments expressions are returned.
+func copyInputs(curfn *ir.Func, pos src.XPos, recvOrFn ir.Node, args []ir.Node, init *ir.Nodes) (ir.Node, []ir.Node) {
+	// Evaluate receiver/fn and argument expressions. The receiver/fn is
+	// used twice but we don't want to cause side effects twice. The
+	// arguments are used in two different calls and we can't trivially
+	// copy them.
+	//
+	// recvOrFn must be first in the assignment list as its side effects
+	// must be ordered before argument side effects.
+	var lhs, rhs []ir.Node
+	newRecvOrFn := typecheck.TempAt(pos, curfn, recvOrFn.Type())
+	lhs = append(lhs, newRecvOrFn)
+	rhs = append(rhs, recvOrFn)
+
+	for _, arg := range args {
+		argvar := typecheck.TempAt(pos, curfn, arg.Type())
+
+		lhs = append(lhs, argvar)
+		rhs = append(rhs, arg)
+	}
+
+	asList := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+	init.Append(typecheck.Stmt(asList))
+
+	return newRecvOrFn, lhs[1:]
+}
+
+// retTemps returns a slice of temporaries to be used for storing result values from call.
+func retTemps(curfn *ir.Func, pos src.XPos, call *ir.CallExpr) []ir.Node {
+	sig := call.Fun.Type()
+	var retvars []ir.Node
+	for _, ret := range sig.Results() {
+		retvars = append(retvars, typecheck.TempAt(pos, curfn, ret.Type))
+	}
+	return retvars
+}
+
+// condCall returns an ir.InlinedCallExpr that performs a call to thenCall if
+// cond is true and elseCall if cond is false. The return variables of the
+// InlinedCallExpr evaluate to the return values from the call.
+func condCall(curfn *ir.Func, pos src.XPos, cond ir.Node, thenCall, elseCall *ir.CallExpr, init ir.Nodes) *ir.InlinedCallExpr {
+	// Doesn't matter whether we use thenCall or elseCall, they must have
+	// the same return types.
+	retvars := retTemps(curfn, pos, thenCall)
+
+	var thenBlock, elseBlock ir.Nodes
+	if len(retvars) == 0 {
+		thenBlock.Append(thenCall)
+		elseBlock.Append(elseCall)
+	} else {
+		// Copy slice so edits in one location don't affect another.
+		thenRet := append([]ir.Node(nil), retvars...)
+		thenAsList := ir.NewAssignListStmt(pos, ir.OAS2, thenRet, []ir.Node{thenCall})
+		thenBlock.Append(typecheck.Stmt(thenAsList))
+
+		elseRet := append([]ir.Node(nil), retvars...)
+		elseAsList := ir.NewAssignListStmt(pos, ir.OAS2, elseRet, []ir.Node{elseCall})
+		elseBlock.Append(typecheck.Stmt(elseAsList))
+	}
+
+	nif := ir.NewIfStmt(pos, cond, thenBlock, elseBlock)
+	nif.SetInit(init)
+	nif.Likely = true
+
+	body := []ir.Node{typecheck.Stmt(nif)}
+
+	// This isn't really an inlined call of course, but InlinedCallExpr
+	// makes handling reassignment of return values easier.
+	res := ir.NewInlinedCallExpr(pos, body, retvars)
+	res.SetType(thenCall.Type())
+	res.SetTypecheck(1)
+	return res
+}
+
+// rewriteInterfaceCall devirtualizes the given interface call using a direct
+// method call to concretetyp.
+func rewriteInterfaceCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *types.Type) ir.Node {
 	if base.Flag.LowerM != 0 {
-		fmt.Printf("%v: PGO devirtualizing %v to %v\n", ir.Line(call), call.X, callee)
+		fmt.Printf("%v: PGO devirtualizing interface call %v to %v\n", ir.Line(call), call.Fun, callee)
 	}
 
 	// We generate an OINCALL of:
@@ -297,91 +517,106 @@
 	// making it less like to inline. We may want to compensate for this
 	// somehow.
 
-	var retvars []ir.Node
-
-	sig := call.X.Type()
-
-	for _, ret := range sig.Results().FieldSlice() {
-		retvars = append(retvars, typecheck.Temp(ret.Type))
-	}
-
-	sel := call.X.(*ir.SelectorExpr)
+	sel := call.Fun.(*ir.SelectorExpr)
 	method := sel.Sel
 	pos := call.Pos()
 	init := ir.TakeInit(call)
 
-	// Evaluate receiver and argument expressions. The receiver is used
-	// twice but we don't want to cause side effects twice. The arguments
-	// are used in two different calls and we can't trivially copy them.
-	//
-	// recv must be first in the assignment list as its side effects must
-	// be ordered before argument side effects.
-	var lhs, rhs []ir.Node
-	recv := typecheck.Temp(sel.X.Type())
-	lhs = append(lhs, recv)
-	rhs = append(rhs, sel.X)
-
-	// Move arguments to assignments prior to the if statement. We cannot
-	// simply copy the args' IR, as some IR constructs cannot be copied,
-	// such as labels (possible in InlinedCall nodes).
-	args := call.Args.Take()
-	for _, arg := range args {
-		argvar := typecheck.Temp(arg.Type())
-
-		lhs = append(lhs, argvar)
-		rhs = append(rhs, arg)
-	}
-
-	asList := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
-	init.Append(typecheck.Stmt(asList))
+	recv, args := copyInputs(curfn, pos, sel.X, call.Args.Take(), &init)
 
 	// Copy slice so edits in one location don't affect another.
-	argvars := append([]ir.Node(nil), lhs[1:]...)
+	argvars := append([]ir.Node(nil), args...)
 	call.Args = argvars
 
-	tmpnode := typecheck.Temp(concretetyp)
-	tmpok := typecheck.Temp(types.Types[types.TBOOL])
+	tmpnode := typecheck.TempAt(base.Pos, curfn, concretetyp)
+	tmpok := typecheck.TempAt(base.Pos, curfn, types.Types[types.TBOOL])
 
 	assert := ir.NewTypeAssertExpr(pos, recv, concretetyp)
 
 	assertAsList := ir.NewAssignListStmt(pos, ir.OAS2, []ir.Node{tmpnode, tmpok}, []ir.Node{typecheck.Expr(assert)})
 	init.Append(typecheck.Stmt(assertAsList))
 
-	concreteCallee := typecheck.Callee(ir.NewSelectorExpr(pos, ir.OXDOT, tmpnode, method))
+	concreteCallee := typecheck.XDotMethod(pos, tmpnode, method, true)
 	// Copy slice so edits in one location don't affect another.
 	argvars = append([]ir.Node(nil), argvars...)
-	concreteCall := typecheck.Call(pos, concreteCallee, argvars, call.IsDDD)
+	concreteCall := typecheck.Call(pos, concreteCallee, argvars, call.IsDDD).(*ir.CallExpr)
 
-	var thenBlock, elseBlock ir.Nodes
-	if len(retvars) == 0 {
-		thenBlock.Append(concreteCall)
-		elseBlock.Append(call)
-	} else {
-		// Copy slice so edits in one location don't affect another.
-		thenRet := append([]ir.Node(nil), retvars...)
-		thenAsList := ir.NewAssignListStmt(pos, ir.OAS2, thenRet, []ir.Node{concreteCall})
-		thenBlock.Append(typecheck.Stmt(thenAsList))
-
-		elseRet := append([]ir.Node(nil), retvars...)
-		elseAsList := ir.NewAssignListStmt(pos, ir.OAS2, elseRet, []ir.Node{call})
-		elseBlock.Append(typecheck.Stmt(elseAsList))
-	}
-
-	cond := ir.NewIfStmt(pos, nil, nil, nil)
-	cond.SetInit(init)
-	cond.Cond = tmpok
-	cond.Body = thenBlock
-	cond.Else = elseBlock
-	cond.Likely = true
-
-	body := []ir.Node{typecheck.Stmt(cond)}
-
-	res := ir.NewInlinedCallExpr(pos, body, retvars)
-	res.SetType(call.Type())
-	res.SetTypecheck(1)
+	res := condCall(curfn, pos, tmpok, concreteCall, call, init)
 
 	if base.Debug.PGODebug >= 3 {
-		fmt.Printf("PGO devirtualizing call to %+v. After: %+v\n", concretetyp, res)
+		fmt.Printf("PGO devirtualizing interface call to %+v. After: %+v\n", concretetyp, res)
+	}
+
+	return res
+}
+
+// rewriteFunctionCall devirtualizes the given OCALLFUNC using a direct
+// function call to callee.
+func rewriteFunctionCall(call *ir.CallExpr, curfn, callee *ir.Func) ir.Node {
+	if base.Flag.LowerM != 0 {
+		fmt.Printf("%v: PGO devirtualizing function call %v to %v\n", ir.Line(call), call.Fun, callee)
+	}
+
+	// We generate an OINCALL of:
+	//
+	// var fn FuncType
+	//
+	// var arg1 A1
+	// var argN AN
+	//
+	// var ret1 R1
+	// var retN RN
+	//
+	// fn, arg1, argN = fn expr, arg1 expr, argN expr
+	//
+	// fnPC := internal/abi.FuncPCABIInternal(fn)
+	// concretePC := internal/abi.FuncPCABIInternal(concrete)
+	//
+	// if fnPC == concretePC {
+	//   ret1, retN = concrete(arg1, ... argN) // Same closure context passed (TODO)
+	// } else {
+	//   ret1, retN = fn(arg1, ... argN)
+	// }
+	//
+	// OINCALL retvars: ret1, ... retN
+	//
+	// This isn't really an inlined call of course, but InlinedCallExpr
+	// makes handling reassignment of return values easier.
+
+	pos := call.Pos()
+	init := ir.TakeInit(call)
+
+	fn, args := copyInputs(curfn, pos, call.Fun, call.Args.Take(), &init)
+
+	// Copy slice so edits in one location don't affect another.
+	argvars := append([]ir.Node(nil), args...)
+	call.Args = argvars
+
+	// FuncPCABIInternal takes an interface{}, emulate that. This is needed
+	// for to ensure we get the MAKEFACE we need for SSA.
+	fnIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], fn))
+	calleeIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], callee.Nname))
+
+	fnPC := ir.FuncPC(pos, fnIface, obj.ABIInternal)
+	concretePC := ir.FuncPC(pos, calleeIface, obj.ABIInternal)
+
+	pcEq := typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, fnPC, concretePC))
+
+	// TODO(go.dev/issue/61577): Handle callees that a closures and need a
+	// copy of the closure context from call. For now, we skip callees that
+	// are closures in maybeDevirtualizeFunctionCall.
+	if callee.OClosure != nil {
+		base.Fatalf("Callee is a closure: %+v", callee)
+	}
+
+	// Copy slice so edits in one location don't affect another.
+	argvars = append([]ir.Node(nil), argvars...)
+	concreteCall := typecheck.Call(pos, callee.Nname, argvars, call.IsDDD).(*ir.CallExpr)
+
+	res := condCall(curfn, pos, pcEq, concreteCall, call, init)
+
+	if base.Debug.PGODebug >= 3 {
+		fmt.Printf("PGO devirtualizing function call to %+v. After: %+v\n", ir.FuncName(callee), res)
 	}
 
 	return res
@@ -404,7 +639,7 @@
 		base.Fatalf("Call isn't OCALLINTER: %+v", call)
 	}
 
-	sel, ok := call.X.(*ir.SelectorExpr)
+	sel, ok := call.Fun.(*ir.SelectorExpr)
 	if !ok {
 		base.Fatalf("OCALLINTER doesn't contain SelectorExpr: %+v", call)
 	}
@@ -412,15 +647,15 @@
 	return sel.X.Type(), sel.Sel
 }
 
-// findHotConcreteCallee returns the *ir.Func of the hottest callee of an
-// indirect call, if available, and its edge weight.
-func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+// findHotConcreteCallee returns the *ir.Func of the hottest callee of a call,
+// if available, and its edge weight. extraFn can perform additional
+// applicability checks on each candidate edge. If extraFn returns false,
+// candidate will not be considered a valid callee candidate.
+func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr, extraFn func(callerName string, callOffset int, candidate *pgo.IREdge) bool) (*ir.Func, int64) {
 	callerName := ir.LinkFuncName(caller)
 	callerNode := p.WeightedCG.IRNodes[callerName]
 	callOffset := pgo.NodeLineOffset(call, caller)
 
-	inter, method := interfaceCallRecvTypeAndMethod(call)
-
 	var hottest *pgo.IREdge
 
 	// Returns true if e is hotter than hottest.
@@ -440,9 +675,12 @@
 		// Now e.Weight == hottest.Weight, we must select on other
 		// criteria.
 
-		if hottest.Dst.AST == nil && e.Dst.AST != nil {
-			// Prefer the edge with IR available.
-			return true
+		// If only one edge has IR, prefer that one.
+		if (hottest.Dst.AST == nil) != (e.Dst.AST == nil) {
+			if e.Dst.AST != nil {
+				return true
+			}
+			return false
 		}
 
 		// Arbitrary, but the callee names will always differ. Select
@@ -484,41 +722,7 @@
 			continue
 		}
 
-		ctyp := methodRecvType(e.Dst.AST)
-		if ctyp == nil {
-			// Not a method.
-			// TODO(prattmic): Support non-interface indirect calls.
-			if base.Debug.PGODebug >= 2 {
-				fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee not a method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
-			}
-			continue
-		}
-
-		// If ctyp doesn't implement inter it is most likely from a
-		// different call on the same line
-		if !typecheck.Implements(ctyp, inter) {
-			// TODO(prattmic): this is overly strict. Consider if
-			// ctyp is a partial implementation of an interface
-			// that gets embedded in types that complete the
-			// interface. It would still be OK to devirtualize a
-			// call to this method.
-			//
-			// What we'd need to do is check that the function
-			// pointer in the itab matches the method we want,
-			// rather than doing a full type assertion.
-			if base.Debug.PGODebug >= 2 {
-				why := typecheck.ImplementsExplain(ctyp, inter)
-				fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't implement %v (%s)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, inter, why)
-			}
-			continue
-		}
-
-		// If the method name is different it is most likely from a
-		// different call on the same line
-		if !strings.HasSuffix(e.Dst.Name(), "."+method.Name) {
-			if base.Debug.PGODebug >= 2 {
-				fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee is a different method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
-			}
+		if extraFn != nil && !extraFn(callerName, callOffset, e) {
 			continue
 		}
 
@@ -540,3 +744,77 @@
 	}
 	return hottest.Dst.AST, hottest.Weight
 }
+
+// findHotConcreteInterfaceCallee returns the *ir.Func of the hottest callee of an
+// interface call, if available, and its edge weight.
+func findHotConcreteInterfaceCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+	inter, method := interfaceCallRecvTypeAndMethod(call)
+
+	return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool {
+		ctyp := methodRecvType(e.Dst.AST)
+		if ctyp == nil {
+			// Not a method.
+			// TODO(prattmic): Support non-interface indirect calls.
+			if base.Debug.PGODebug >= 2 {
+				fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee not a method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+			}
+			return false
+		}
+
+		// If ctyp doesn't implement inter it is most likely from a
+		// different call on the same line
+		if !typecheck.Implements(ctyp, inter) {
+			// TODO(prattmic): this is overly strict. Consider if
+			// ctyp is a partial implementation of an interface
+			// that gets embedded in types that complete the
+			// interface. It would still be OK to devirtualize a
+			// call to this method.
+			//
+			// What we'd need to do is check that the function
+			// pointer in the itab matches the method we want,
+			// rather than doing a full type assertion.
+			if base.Debug.PGODebug >= 2 {
+				why := typecheck.ImplementsExplain(ctyp, inter)
+				fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't implement %v (%s)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, inter, why)
+			}
+			return false
+		}
+
+		// If the method name is different it is most likely from a
+		// different call on the same line
+		if !strings.HasSuffix(e.Dst.Name(), "."+method.Name) {
+			if base.Debug.PGODebug >= 2 {
+				fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee is a different method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+			}
+			return false
+		}
+
+		return true
+	})
+}
+
+// findHotConcreteFunctionCallee returns the *ir.Func of the hottest callee of an
+// indirect function call, if available, and its edge weight.
+func findHotConcreteFunctionCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+	typ := call.Fun.Type().Underlying()
+
+	return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool {
+		ctyp := e.Dst.AST.Type().Underlying()
+
+		// If ctyp doesn't match typ it is most likely from a different
+		// call on the same line.
+		//
+		// Note that we are comparing underlying types, as different
+		// defined types are OK. e.g., a call to a value of type
+		// net/http.HandlerFunc can be devirtualized to a function with
+		// the same underlying type.
+		if !types.Identical(typ, ctyp) {
+			if base.Debug.PGODebug >= 2 {
+				fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't match %v\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, typ)
+			}
+			return false
+		}
+
+		return true
+	})
+}
diff --git a/src/cmd/compile/internal/devirtualize/pgo_test.go b/src/cmd/compile/internal/devirtualize/pgo_test.go
new file mode 100644
index 0000000..84c96df
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/pgo_test.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package devirtualize
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/pgo"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+	"testing"
+)
+
+func init() {
+	// These are the few constants that need to be initialized in order to use
+	// the types package without using the typecheck package by calling
+	// typecheck.InitUniverse() (the normal way to initialize the types package).
+	types.PtrSize = 8
+	types.RegSize = 8
+	types.MaxWidth = 1 << 50
+	typecheck.InitUniverse()
+	base.Ctxt = &obj.Link{}
+	base.Debug.PGODebug = 3
+}
+
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+	return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
+}
+
+type profileBuilder struct {
+	p *pgo.Profile
+}
+
+func newProfileBuilder() *profileBuilder {
+	// findHotConcreteCallee only uses pgo.Profile.WeightedCG, so we're
+	// going to take a shortcut and only construct that.
+	return &profileBuilder{
+		p: &pgo.Profile{
+			WeightedCG: &pgo.IRGraph{
+				IRNodes: make(map[string]*pgo.IRNode),
+			},
+		},
+	}
+}
+
+// Profile returns the constructed profile.
+func (p *profileBuilder) Profile() *pgo.Profile {
+	return p.p
+}
+
+// NewNode creates a new IRNode and adds it to the profile.
+//
+// fn may be nil, in which case the node will set LinkerSymbolName.
+func (p *profileBuilder) NewNode(name string, fn *ir.Func) *pgo.IRNode {
+	n := &pgo.IRNode{
+		OutEdges: make(map[pgo.NamedCallEdge]*pgo.IREdge),
+	}
+	if fn != nil {
+		n.AST = fn
+	} else {
+		n.LinkerSymbolName = name
+	}
+	p.p.WeightedCG.IRNodes[name] = n
+	return n
+}
+
+// Add a new call edge from caller to callee.
+func addEdge(caller, callee *pgo.IRNode, offset int, weight int64) {
+	namedEdge := pgo.NamedCallEdge{
+		CallerName:     caller.Name(),
+		CalleeName:     callee.Name(),
+		CallSiteOffset: offset,
+	}
+	irEdge := &pgo.IREdge{
+		Src:            caller,
+		Dst:            callee,
+		CallSiteOffset: offset,
+		Weight:         weight,
+	}
+	caller.OutEdges[namedEdge] = irEdge
+}
+
+// Create a new struct type named structName with a method named methName and
+// return the method.
+func makeStructWithMethod(pkg *types.Pkg, structName, methName string) *ir.Func {
+	// type structName struct{}
+	structType := types.NewStruct(nil)
+
+	// func (structName) methodName()
+	recv := types.NewField(src.NoXPos, typecheck.Lookup(structName), structType)
+	sig := types.NewSignature(recv, nil, nil)
+	fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup(structName+"."+methName), sig)
+
+	// Add the method to the struct.
+	structType.SetMethods([]*types.Field{types.NewField(src.NoXPos, typecheck.Lookup(methName), sig)})
+
+	return fn
+}
+
+func TestFindHotConcreteInterfaceCallee(t *testing.T) {
+	p := newProfileBuilder()
+
+	pkgFoo := types.NewPkg("example.com/foo", "foo")
+	basePos := src.NewFileBase("foo.go", "/foo.go")
+
+	const (
+		// Caller start line.
+		callerStart = 42
+
+		// The line offset of the call we care about.
+		callOffset = 1
+
+		// The line offset of some other call we don't care about.
+		wrongCallOffset = 2
+	)
+
+	// type IFace interface {
+	//	Foo()
+	// }
+	fooSig := types.NewSignature(types.FakeRecv(), nil, nil)
+	method := types.NewField(src.NoXPos, typecheck.Lookup("Foo"), fooSig)
+	iface := types.NewInterface([]*types.Field{method})
+
+	callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil))
+
+	hotCalleeFn := makeStructWithMethod(pkgFoo, "HotCallee", "Foo")
+	coldCalleeFn := makeStructWithMethod(pkgFoo, "ColdCallee", "Foo")
+	wrongLineCalleeFn := makeStructWithMethod(pkgFoo, "WrongLineCallee", "Foo")
+	wrongMethodCalleeFn := makeStructWithMethod(pkgFoo, "WrongMethodCallee", "Bar")
+
+	callerNode := p.NewNode("example.com/foo.Caller", callerFn)
+	hotCalleeNode := p.NewNode("example.com/foo.HotCallee.Foo", hotCalleeFn)
+	coldCalleeNode := p.NewNode("example.com/foo.ColdCallee.Foo", coldCalleeFn)
+	wrongLineCalleeNode := p.NewNode("example.com/foo.WrongCalleeLine.Foo", wrongLineCalleeFn)
+	wrongMethodCalleeNode := p.NewNode("example.com/foo.WrongCalleeMethod.Foo", wrongMethodCalleeFn)
+
+	hotMissingCalleeNode := p.NewNode("example.com/bar.HotMissingCallee.Foo", nil)
+
+	addEdge(callerNode, wrongLineCalleeNode, wrongCallOffset, 100) // Really hot, but wrong line.
+	addEdge(callerNode, wrongMethodCalleeNode, callOffset, 100)    // Really hot, but wrong method type.
+	addEdge(callerNode, hotCalleeNode, callOffset, 10)
+	addEdge(callerNode, coldCalleeNode, callOffset, 1)
+
+	// Equal weight, but IR missing.
+	//
+	// N.B. example.com/bar sorts lexicographically before example.com/foo,
+	// so if the IR availability of hotCalleeNode doesn't get precedence,
+	// this would be mistakenly selected.
+	addEdge(callerNode, hotMissingCalleeNode, callOffset, 10)
+
+	// IFace.Foo()
+	sel := typecheck.NewMethodExpr(src.NoXPos, iface, typecheck.Lookup("Foo"))
+	call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALLINTER, sel, nil)
+
+	gotFn, gotWeight := findHotConcreteInterfaceCallee(p.Profile(), callerFn, call)
+	if gotFn != hotCalleeFn {
+		t.Errorf("findHotConcreteInterfaceCallee func got %v want %v", gotFn, hotCalleeFn)
+	}
+	if gotWeight != 10 {
+		t.Errorf("findHotConcreteInterfaceCallee weight got %v want 10", gotWeight)
+	}
+}
+
+func TestFindHotConcreteFunctionCallee(t *testing.T) {
+	// TestFindHotConcreteInterfaceCallee already covered basic weight
+	// comparisons, which is shared logic. Here we just test type signature
+	// disambiguation.
+
+	p := newProfileBuilder()
+
+	pkgFoo := types.NewPkg("example.com/foo", "foo")
+	basePos := src.NewFileBase("foo.go", "/foo.go")
+
+	const (
+		// Caller start line.
+		callerStart = 42
+
+		// The line offset of the call we care about.
+		callOffset = 1
+	)
+
+	callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil))
+
+	// func HotCallee()
+	hotCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("HotCallee"), types.NewSignature(nil, nil, nil))
+
+	// func WrongCallee() bool
+	wrongCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("WrongCallee"), types.NewSignature(nil, nil,
+		[]*types.Field{
+			types.NewField(src.NoXPos, nil, types.Types[types.TBOOL]),
+		},
+	))
+
+	callerNode := p.NewNode("example.com/foo.Caller", callerFn)
+	hotCalleeNode := p.NewNode("example.com/foo.HotCallee", hotCalleeFn)
+	wrongCalleeNode := p.NewNode("example.com/foo.WrongCallee", wrongCalleeFn)
+
+	addEdge(callerNode, wrongCalleeNode, callOffset, 100) // Really hot, but wrong function type.
+	addEdge(callerNode, hotCalleeNode, callOffset, 10)
+
+	// var fn func()
+	name := ir.NewNameAt(src.NoXPos, typecheck.Lookup("fn"), types.NewSignature(nil, nil, nil))
+	// fn()
+	call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALL, name, nil)
+
+	gotFn, gotWeight := findHotConcreteFunctionCallee(p.Profile(), callerFn, call)
+	if gotFn != hotCalleeFn {
+		t.Errorf("findHotConcreteFunctionCallee func got %v want %v", gotFn, hotCalleeFn)
+	}
+	if gotWeight != 10 {
+		t.Errorf("findHotConcreteFunctionCallee weight got %v want 10", gotWeight)
+	}
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index 886250a..e9553d1 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -23,7 +23,7 @@
 	"cmd/internal/src"
 )
 
-func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) (scopes []dwarf.Scope, inlcalls dwarf.InlCalls, startPos src.XPos) {
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (scopes []dwarf.Scope, inlcalls dwarf.InlCalls) {
 	fn := curfn.(*ir.Func)
 
 	if fn.Nname != nil {
@@ -128,7 +128,7 @@
 	if base.Flag.GenDwarfInl > 0 {
 		inlcalls = assembleInlines(fnsym, dwarfVars)
 	}
-	return scopes, inlcalls, fn.Pos()
+	return scopes, inlcalls
 }
 
 func declPos(decl *ir.Name) src.XPos {
@@ -204,7 +204,7 @@
 		if c == '.' || n.Type().IsUntyped() {
 			continue
 		}
-		if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
+		if n.Class == ir.PPARAM && !ssa.CanSSA(n.Type()) {
 			// SSA-able args get location lists, and may move in and
 			// out of registers, so those are handled elsewhere.
 			// Autos and named output params seem to get handled
@@ -270,13 +270,10 @@
 func sortDeclsAndVars(fn *ir.Func, decls []*ir.Name, vars []*dwarf.Var) {
 	paramOrder := make(map[*ir.Name]int)
 	idx := 1
-	for _, selfn := range types.RecvsParamsResults {
-		fsl := selfn(fn.Type()).FieldSlice()
-		for _, f := range fsl {
-			if n, ok := f.Nname.(*ir.Name); ok {
-				paramOrder[n] = idx
-				idx++
-			}
+	for _, f := range fn.Type().RecvParamsResults() {
+		if n, ok := f.Nname.(*ir.Name); ok {
+			paramOrder[n] = idx
+			idx++
 		}
 	}
 	sort.Stable(varsAndDecls{decls, vars, paramOrder})
@@ -326,7 +323,7 @@
 		c := n.Sym().Name[0]
 		// Avoid reporting "_" parameters, since if there are more than
 		// one, it can result in a collision later on, as in #23179.
-		if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
+		if n.Sym().Name == "_" || c == '.' || n.Type().IsUntyped() {
 			continue
 		}
 		rdcl = append(rdcl, n)
@@ -527,9 +524,7 @@
 // in the DWARF info.
 func RecordFlags(flags ...string) {
 	if base.Ctxt.Pkgpath == "" {
-		// We can't record the flags if we don't know what the
-		// package name is.
-		return
+		panic("missing pkgpath")
 	}
 
 	type BoolFlag interface {
diff --git a/src/cmd/compile/internal/dwarfgen/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go
index 99e1ce9..655e7c6 100644
--- a/src/cmd/compile/internal/dwarfgen/dwinl.go
+++ b/src/cmd/compile/internal/dwarfgen/dwinl.go
@@ -124,18 +124,16 @@
 		// caller.
 		synthCount := len(m)
 		for _, v := range sl {
-			canonName := unversion(v.Name)
 			vp := varPos{
-				DeclName: canonName,
+				DeclName: v.Name,
 				DeclFile: v.DeclFile,
 				DeclLine: v.DeclLine,
 				DeclCol:  v.DeclCol,
 			}
-			synthesized := strings.HasPrefix(v.Name, "~r") || canonName == "_" || strings.HasPrefix(v.Name, "~b")
+			synthesized := strings.HasPrefix(v.Name, "~") || v.Name == "_"
 			if idx, found := m[vp]; found {
 				v.ChildIndex = int32(idx)
 				v.IsInAbstract = !synthesized
-				v.Name = canonName
 			} else {
 				// Variable can't be found in the pre-inline dcl list.
 				// In the top-level case (ii=0) this can happen
@@ -217,16 +215,7 @@
 	if base.Debug.DwarfInl != 0 {
 		base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
 	}
-	base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
-}
-
-// Undo any versioning performed when a name was written
-// out as part of export data.
-func unversion(name string) string {
-	if i := strings.Index(name, "·"); i > 0 {
-		name = name[:i]
-	}
-	return name
+	base.Ctxt.DwarfAbstractFunc(ifn, fn)
 }
 
 // Given a function that was inlined as part of the compilation, dig
@@ -241,7 +230,7 @@
 	for i, n := range dcl {
 		pos := base.Ctxt.InnermostPos(n.Pos())
 		vp := varPos{
-			DeclName: unversion(n.Sym().Name),
+			DeclName: n.Sym().Name,
 			DeclFile: pos.RelFilename(),
 			DeclLine: pos.RelLine(),
 			DeclCol:  pos.RelCol(),
@@ -273,13 +262,11 @@
 	// Create new entry for this inline
 	inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
 	callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
-	callPos := base.Ctxt.PosTable.Pos(callXPos)
-	callFileSym := base.Ctxt.Lookup(callPos.Base().SymFilename())
+	callPos := base.Ctxt.InnermostPos(callXPos)
 	absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
 	ic := dwarf.InlCall{
 		InlIndex:  inlIdx,
-		CallFile:  callFileSym,
-		CallLine:  uint32(callPos.RelLine()),
+		CallPos:   callPos,
 		AbsFunSym: absFnSym,
 		Root:      parCallIdx == -1,
 	}
diff --git a/src/cmd/compile/internal/dwarfgen/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go
index ae4a87c..ee4170e 100644
--- a/src/cmd/compile/internal/dwarfgen/scope_test.go
+++ b/src/cmd/compile/internal/dwarfgen/scope_test.go
@@ -50,13 +50,14 @@
 
 var testfile = []testline{
 	{line: "package main"},
+	{line: "var sink any"},
 	{line: "func f1(x int) { }"},
 	{line: "func f2(x int) { }"},
 	{line: "func f3(x int) { }"},
 	{line: "func f4(x int) { }"},
 	{line: "func f5(x int) { }"},
 	{line: "func f6(x int) { }"},
-	{line: "func fi(x interface{}) { if a, ok := x.(error); ok { a.Error() } }"},
+	{line: "func leak(x interface{}) { sink = x }"},
 	{line: "func gret1() int { return 2 }"},
 	{line: "func gretbool() bool { return true }"},
 	{line: "func gret3() (int, int, int) { return 0, 1, 2 }"},
@@ -177,7 +178,7 @@
 	{line: "		b := 2", scopes: []int{1}, vars: []string{"var &b *int", "var p *int"}},
 	{line: "		p := &b", scopes: []int{1}},
 	{line: "		f1(a)", scopes: []int{1}},
-	{line: "		fi(p)", scopes: []int{1}},
+	{line: "		leak(p)", scopes: []int{1}},
 	{line: "	}"},
 	{line: "}"},
 	{line: "var fglob func() int"},
diff --git a/src/cmd/compile/internal/escape/assign.go b/src/cmd/compile/internal/escape/assign.go
index 80697bf..6af5388 100644
--- a/src/cmd/compile/internal/escape/assign.go
+++ b/src/cmd/compile/internal/escape/assign.go
@@ -39,10 +39,14 @@
 		if n.X.Type().IsArray() {
 			k = e.addr(n.X)
 		} else {
-			e.discard(n.X)
+			e.mutate(n.X)
 		}
-	case ir.ODEREF, ir.ODOTPTR:
-		e.discard(n)
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		e.mutate(n.X)
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		e.mutate(n.X)
 	case ir.OINDEXMAP:
 		n := n.(*ir.IndexExpr)
 		e.discard(n.X)
@@ -52,6 +56,10 @@
 	return k
 }
 
+func (e *escape) mutate(n ir.Node) {
+	e.expr(e.mutatorHole(), n)
+}
+
 func (e *escape) addrs(l ir.Nodes) []hole {
 	var ks []hole
 	for _, n := range l {
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
index c69eca1..4a3753a 100644
--- a/src/cmd/compile/internal/escape/call.go
+++ b/src/cmd/compile/internal/escape/call.go
@@ -16,38 +16,9 @@
 // should contain the holes representing where the function callee's
 // results flows.
 func (e *escape) call(ks []hole, call ir.Node) {
-	var init ir.Nodes
-	e.callCommon(ks, call, &init, nil)
-	if len(init) != 0 {
-		call.(ir.InitNode).PtrInit().Append(init...)
-	}
-}
-
-func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
-
-	// argumentPragma handles escape analysis of argument *argp to the
-	// given hole. If the function callee is known, pragma is the
-	// function's pragma flags; otherwise 0.
-	argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
-		e.rewriteArgument(argp, init, call, fn, wrapper)
-
-		e.expr(k.note(call, "call parameter"), *argp)
-	}
-
-	argument := func(k hole, argp *ir.Node) {
-		argumentFunc(nil, k, argp)
-	}
-
-	argumentRType := func(rtypep *ir.Node) {
-		rtype := *rtypep
-		if rtype == nil {
-			return
-		}
-		// common case: static rtype/itab argument, which can be evaluated within the wrapper instead.
-		if addr, ok := rtype.(*ir.AddrExpr); ok && addr.Op() == ir.OADDR && addr.X.Op() == ir.OLINKSYMOFFSET {
-			return
-		}
-		e.wrapExpr(rtype.Pos(), rtypep, init, call, wrapper)
+	argument := func(k hole, arg ir.Node) {
+		// TODO(mdempsky): Should be "call argument".
+		e.expr(k.note(call, "call parameter"), arg)
 	}
 
 	switch call.Op() {
@@ -55,7 +26,7 @@
 		ir.Dump("esc", call)
 		base.Fatalf("unexpected call op: %v", call.Op())
 
-	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+	case ir.OCALLFUNC, ir.OCALLINTER:
 		call := call.(*ir.CallExpr)
 		typecheck.AssertFixedCall(call)
 
@@ -68,64 +39,62 @@
 		var fn *ir.Name
 		switch call.Op() {
 		case ir.OCALLFUNC:
-			// If we have a direct call to a closure (not just one we were
-			// able to statically resolve with ir.StaticValue), mark it as
-			// such so batch.outlives can optimize the flow results.
-			if call.X.Op() == ir.OCLOSURE {
-				call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
-			}
-
-			switch v := ir.StaticValue(call.X); v.Op() {
-			case ir.ONAME:
-				if v := v.(*ir.Name); v.Class == ir.PFUNC {
-					fn = v
-				}
-			case ir.OCLOSURE:
-				fn = v.(*ir.ClosureExpr).Func.Nname
-			case ir.OMETHEXPR:
-				fn = ir.MethodExprName(v)
-			}
-		case ir.OCALLMETH:
-			base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+			v := ir.StaticValue(call.Fun)
+			fn = ir.StaticCalleeName(v)
 		}
 
-		fntype := call.X.Type()
+		fntype := call.Fun.Type()
 		if fn != nil {
 			fntype = fn.Type()
 		}
 
 		if ks != nil && fn != nil && e.inMutualBatch(fn) {
-			for i, result := range fn.Type().Results().FieldSlice() {
-				e.expr(ks[i], ir.AsNode(result.Nname))
+			for i, result := range fn.Type().Results() {
+				e.expr(ks[i], result.Nname.(*ir.Name))
 			}
 		}
 
-		var recvp *ir.Node
+		var recvArg ir.Node
 		if call.Op() == ir.OCALLFUNC {
 			// Evaluate callee function expression.
-			//
-			// Note: We use argument and not argumentFunc, because while
-			// call.X here may be an argument to runtime.{new,defer}proc,
-			// it's not an argument to fn itself.
-			argument(e.discardHole(), &call.X)
+			calleeK := e.discardHole()
+			if fn == nil { // unknown callee
+				for _, k := range ks {
+					if k.dst != &e.blankLoc {
+						// The results flow somewhere, but we don't statically
+						// know the callee function. If a closure flows here, we
+						// need to conservatively assume its results might flow to
+						// the heap.
+						calleeK = e.calleeHole().note(call, "callee operand")
+						break
+					}
+				}
+			}
+			e.expr(calleeK, call.Fun)
 		} else {
-			recvp = &call.X.(*ir.SelectorExpr).X
+			recvArg = call.Fun.(*ir.SelectorExpr).X
+		}
+
+		// argumentParam handles escape analysis of assigning a call
+		// argument to its corresponding parameter.
+		argumentParam := func(param *types.Field, arg ir.Node) {
+			e.rewriteArgument(arg, call, fn)
+			argument(e.tagHole(ks, fn, param), arg)
 		}
 
 		args := call.Args
-		if recv := fntype.Recv(); recv != nil {
-			if recvp == nil {
+		if recvParam := fntype.Recv(); recvParam != nil {
+			if recvArg == nil {
 				// Function call using method expression. Receiver argument is
 				// at the front of the regular arguments list.
-				recvp = &args[0]
-				args = args[1:]
+				recvArg, args = args[0], args[1:]
 			}
 
-			argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
+			argumentParam(recvParam, recvArg)
 		}
 
-		for i, param := range fntype.Params().FieldSlice() {
-			argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
+		for i, param := range fntype.Params() {
+			argumentParam(param, args[i])
 		}
 
 	case ir.OINLCALL:
@@ -147,84 +116,83 @@
 		// it has enough capacity. Alternatively, a new heap
 		// slice might be allocated, and all slice elements
 		// might flow to heap.
-		appendeeK := ks[0]
+		appendeeK := e.teeHole(ks[0], e.mutatorHole())
 		if args[0].Type().Elem().HasPointers() {
 			appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
 		}
-		argument(appendeeK, &args[0])
+		argument(appendeeK, args[0])
 
 		if call.IsDDD {
 			appendedK := e.discardHole()
 			if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
 				appendedK = e.heapHole().deref(call, "appended slice...")
 			}
-			argument(appendedK, &args[1])
+			argument(appendedK, args[1])
 		} else {
 			for i := 1; i < len(args); i++ {
-				argument(e.heapHole(), &args[i])
+				argument(e.heapHole(), args[i])
 			}
 		}
-		argumentRType(&call.RType)
+		e.discard(call.RType)
 
 	case ir.OCOPY:
 		call := call.(*ir.BinaryExpr)
-		argument(e.discardHole(), &call.X)
+		argument(e.mutatorHole(), call.X)
 
 		copiedK := e.discardHole()
 		if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
 			copiedK = e.heapHole().deref(call, "copied slice")
 		}
-		argument(copiedK, &call.Y)
-		argumentRType(&call.RType)
+		argument(copiedK, call.Y)
+		e.discard(call.RType)
 
 	case ir.OPANIC:
 		call := call.(*ir.UnaryExpr)
-		argument(e.heapHole(), &call.X)
+		argument(e.heapHole(), call.X)
 
 	case ir.OCOMPLEX:
 		call := call.(*ir.BinaryExpr)
-		argument(e.discardHole(), &call.X)
-		argument(e.discardHole(), &call.Y)
+		e.discard(call.X)
+		e.discard(call.Y)
 
-	case ir.ODELETE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+	case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
 		call := call.(*ir.CallExpr)
-		fixRecoverCall(call)
-		for i := range call.Args {
-			argument(e.discardHole(), &call.Args[i])
+		for _, arg := range call.Args {
+			e.discard(arg)
 		}
-		argumentRType(&call.RType)
+		e.discard(call.RType)
 
-	case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE, ir.OCLEAR:
+	case ir.OMIN, ir.OMAX:
+		call := call.(*ir.CallExpr)
+		for _, arg := range call.Args {
+			argument(ks[0], arg)
+		}
+		e.discard(call.RType)
+
+	case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
 		call := call.(*ir.UnaryExpr)
-		argument(e.discardHole(), &call.X)
+		e.discard(call.X)
+
+	case ir.OCLEAR:
+		call := call.(*ir.UnaryExpr)
+		argument(e.mutatorHole(), call.X)
 
 	case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
 		call := call.(*ir.UnaryExpr)
-		argument(ks[0], &call.X)
+		argument(ks[0], call.X)
 
 	case ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING:
 		call := call.(*ir.BinaryExpr)
-		argument(ks[0], &call.X)
-		argument(e.discardHole(), &call.Y)
-		argumentRType(&call.RType)
+		argument(ks[0], call.X)
+		e.discard(call.Y)
+		e.discard(call.RType)
 	}
 }
 
 // goDeferStmt analyzes a "go" or "defer" statement.
-//
-// In the process, it also normalizes the statement to always use a
-// simple function call with no arguments and no results. For example,
-// it rewrites:
-//
-//	defer f(x, y)
-//
-// into:
-//
-//	x1, y1 := x, y
-//	defer func() { f(x1, y1) }()
 func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
 	k := e.heapHole()
-	if n.Op() == ir.ODEFER && e.loopDepth == 1 {
+	if n.Op() == ir.ODEFER && e.loopDepth == 1 && n.DeferAt == nil {
 		// Top-level defer arguments don't escape to the heap,
 		// but they do need to last until they're invoked.
 		k = e.later(e.discardHole())
@@ -234,145 +202,75 @@
 		n.SetEsc(ir.EscNever)
 	}
 
-	call := n.Call
-
-	init := n.PtrInit()
-	init.Append(ir.TakeInit(call)...)
-	e.stmts(*init)
-
 	// If the function is already a zero argument/result function call,
 	// just escape analyze it normally.
 	//
 	// Note that the runtime is aware of this optimization for
 	// "go" statements that start in reflect.makeFuncStub or
 	// reflect.methodValueCall.
-	if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
-		if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
-			if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
-				clo.IsGoWrap = true
-			}
-			e.expr(k, call.X)
-			return
-		}
+
+	call, ok := n.Call.(*ir.CallExpr)
+	if !ok || call.Op() != ir.OCALLFUNC {
+		base.FatalfAt(n.Pos(), "expected function call: %v", n.Call)
+	}
+	if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() != 0 {
+		base.FatalfAt(n.Pos(), "expected signature without parameters or results: %v", sig)
 	}
 
-	// Create a new no-argument function that we'll hand off to defer.
-	fn := ir.NewClosureFunc(n.Pos(), true)
-	fn.SetWrapper(true)
-	fn.Nname.SetType(types.NewSignature(nil, nil, nil))
-	fn.Body = []ir.Node{call}
-	if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
-		// If the callee is a named function, link to the original callee.
-		x := call.X
-		if x.Op() == ir.ONAME && x.(*ir.Name).Class == ir.PFUNC {
-			fn.WrappedFunc = call.X.(*ir.Name).Func
-		} else if x.Op() == ir.OMETHEXPR && ir.MethodExprFunc(x).Nname != nil {
-			fn.WrappedFunc = ir.MethodExprName(x).Func
-		}
-	}
-
-	clo := fn.OClosure
-	if n.Op() == ir.OGO {
+	if clo, ok := call.Fun.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
 		clo.IsGoWrap = true
 	}
 
-	e.callCommon(nil, call, init, fn)
-	e.closures = append(e.closures, closure{e.spill(k, clo), clo})
-
-	// Create new top level call to closure.
-	n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
-	ir.WithFunc(e.curfn, func() {
-		typecheck.Stmt(n.Call)
-	})
+	e.expr(k, call.Fun)
 }
 
-// rewriteArgument rewrites the argument *argp of the given call expression.
+// rewriteArgument rewrites the argument arg of the given call expression.
 // fn is the static callee function, if known.
-// wrapper is the go/defer wrapper function for call, if any.
-func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
-	var pragma ir.PragmaFlag
-	if fn != nil && fn.Func != nil {
-		pragma = fn.Func.Pragma
+func (e *escape) rewriteArgument(arg ir.Node, call *ir.CallExpr, fn *ir.Name) {
+	if fn == nil || fn.Func == nil {
+		return
+	}
+	pragma := fn.Func.Pragma
+	if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
+		return
 	}
 
 	// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
 	// functions, so that ptr is kept alive and/or escaped as
 	// appropriate. unsafeUintptr also reports whether it modified arg0.
-	unsafeUintptr := func(arg0 ir.Node) bool {
-		if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
-			return false
-		}
-
+	unsafeUintptr := func(arg ir.Node) {
 		// If the argument is really a pointer being converted to uintptr,
-		// arrange for the pointer to be kept alive until the call returns,
-		// by copying it into a temp and marking that temp
-		// still alive when we pop the temp stack.
-		if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
-			return false
+		// arrange for the pointer to be kept alive until the call
+		// returns, by copying it into a temp and marking that temp still
+		// alive when we pop the temp stack.
+		conv, ok := arg.(*ir.ConvExpr)
+		if !ok || conv.Op() != ir.OCONVNOP {
+			return // not a conversion
 		}
-		arg := arg0.(*ir.ConvExpr)
-
-		if !arg.X.Type().IsUnsafePtr() {
-			return false
+		if !conv.X.Type().IsUnsafePtr() || !conv.Type().IsUintptr() {
+			return // not an unsafe.Pointer->uintptr conversion
 		}
 
 		// Create and declare a new pointer-typed temp variable.
-		tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
+		//
+		// TODO(mdempsky): This potentially violates the Go spec's order
+		// of evaluations, by evaluating arg.X before any other
+		// operands.
+		tmp := e.copyExpr(conv.Pos(), conv.X, call.PtrInit())
+		conv.X = tmp
 
+		k := e.mutatorHole()
 		if pragma&ir.UintptrEscapes != 0 {
-			e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
+			k = e.heapHole().note(conv, "//go:uintptrescapes")
 		}
+		e.flow(k, e.oldLoc(tmp))
 
 		if pragma&ir.UintptrKeepAlive != 0 {
-			call := call.(*ir.CallExpr)
-
-			// SSA implements CallExpr.KeepAlive using OpVarLive, which
-			// doesn't support PAUTOHEAP variables. I tried changing it to
-			// use OpKeepAlive, but that ran into issues of its own.
-			// For now, the easy solution is to explicitly copy to (yet
-			// another) new temporary variable.
-			keep := tmp
-			if keep.Class == ir.PAUTOHEAP {
-				keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
-			}
-
-			keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
-			call.KeepAlive = append(call.KeepAlive, keep)
-		}
-
-		return true
-	}
-
-	visit := func(pos src.XPos, argp *ir.Node) {
-		// Optimize a few common constant expressions. By leaving these
-		// untouched in the call expression, we let the wrapper handle
-		// evaluating them, rather than taking up closure context space.
-		switch arg := *argp; arg.Op() {
-		case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
-			return
-		case ir.ONAME:
-			if arg.(*ir.Name).Class == ir.PFUNC {
-				return
-			}
-		}
-
-		if unsafeUintptr(*argp) {
-			return
-		}
-
-		if wrapper != nil {
-			e.wrapExpr(pos, argp, init, call, wrapper)
+			tmp.SetAddrtaken(true) // ensure SSA keeps the tmp variable
+			call.KeepAlive = append(call.KeepAlive, tmp)
 		}
 	}
 
-	// Peel away any slice literals for better escape analyze
-	// them. For example:
-	//
-	//     go F([]int{a, b})
-	//
-	// If F doesn't escape its arguments, then the slice can
-	// be allocated on the new goroutine's stack.
-	//
 	// For variadic functions, the compiler has already rewritten:
 	//
 	//     f(a, b, c)
@@ -382,54 +280,29 @@
 	//     f([]T{a, b, c}...)
 	//
 	// So we need to look into slice elements to handle uintptr(ptr)
-	// arguments to syscall-like functions correctly.
-	if arg := *argp; arg.Op() == ir.OSLICELIT {
+	// arguments to variadic syscall-like functions correctly.
+	if arg.Op() == ir.OSLICELIT {
 		list := arg.(*ir.CompLitExpr).List
-		for i := range list {
-			el := &list[i]
-			if list[i].Op() == ir.OKEY {
-				el = &list[i].(*ir.KeyExpr).Value
+		for _, el := range list {
+			if el.Op() == ir.OKEY {
+				el = el.(*ir.KeyExpr).Value
 			}
-			visit(arg.Pos(), el)
+			unsafeUintptr(el)
 		}
 	} else {
-		visit(call.Pos(), argp)
+		unsafeUintptr(arg)
 	}
 }
 
-// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
-// is non-nil, the variable will be captured for use within that
-// function.
-func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
-	tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
-
-	if wrapper != nil {
-		// Currently for "defer i.M()" if i is nil it panics at the point
-		// of defer statement, not when deferred function is called.  We
-		// need to do the nil check outside of the wrapper.
-		if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
-			check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
-			init.Append(typecheck.Stmt(check))
-		}
-
-		e.oldLoc(tmp).captured = true
-
-		tmp = ir.NewClosureVar(pos, wrapper, tmp)
-	}
-
-	*exprp = tmp
-	return tmp
-}
-
 // copyExpr creates and returns a new temporary variable within fn;
 // appends statements to init to declare and initialize it to expr;
-// and escape analyzes the data flow if analyze is true.
-func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
+// and escape analyzes the data flow.
+func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
 	if ir.HasUniquePos(expr) {
 		pos = expr.Pos()
 	}
 
-	tmp := typecheck.TempAt(pos, fn, expr.Type())
+	tmp := typecheck.TempAt(pos, e.curfn, expr.Type())
 
 	stmts := []ir.Node{
 		ir.NewDecl(pos, ir.ODCL, tmp),
@@ -438,10 +311,8 @@
 	typecheck.Stmts(stmts)
 	init.Append(stmts...)
 
-	if analyze {
-		e.newLoc(tmp, false)
-		e.stmts(stmts)
-	}
+	e.newLoc(tmp, true)
+	e.stmts(stmts)
 
 	return tmp
 }
@@ -457,17 +328,26 @@
 	}
 
 	if e.inMutualBatch(fn) {
-		return e.addr(ir.AsNode(param.Nname))
+		if param.Nname == nil {
+			return e.discardHole()
+		}
+		return e.addr(param.Nname.(*ir.Name))
 	}
 
 	// Call to previously tagged function.
 
 	var tagKs []hole
-
 	esc := parseLeaks(param.Note)
+
 	if x := esc.Heap(); x >= 0 {
 		tagKs = append(tagKs, e.heapHole().shift(x))
 	}
+	if x := esc.Mutator(); x >= 0 {
+		tagKs = append(tagKs, e.mutatorHole().shift(x))
+	}
+	if x := esc.Callee(); x >= 0 {
+		tagKs = append(tagKs, e.calleeHole().shift(x))
+	}
 
 	if ks != nil {
 		for i := 0; i < numEscResults; i++ {
diff --git a/src/cmd/compile/internal/escape/desugar.go b/src/cmd/compile/internal/escape/desugar.go
deleted file mode 100644
index b2c4294..0000000
--- a/src/cmd/compile/internal/escape/desugar.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package escape
-
-import (
-	"cmd/compile/internal/base"
-	"cmd/compile/internal/ir"
-	"cmd/compile/internal/typecheck"
-	"cmd/compile/internal/types"
-)
-
-// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
-// but for now it's the most convenient place for some rewrites.
-
-// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
-// adding an explicit frame pointer argument.
-// If call is not an ORECOVER call, it's left unmodified.
-func fixRecoverCall(call *ir.CallExpr) {
-	if call.Op() != ir.ORECOVER {
-		return
-	}
-
-	pos := call.Pos()
-
-	// FP is equal to caller's SP plus FixedFrameSize.
-	var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
-	if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
-		fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off))
-	}
-	// TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
-	fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
-
-	call.SetOp(ir.ORECOVERFP)
-	call.Args = []ir.Node{typecheck.Expr(fp)}
-}
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index f17ac13..7df367c 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -12,6 +12,7 @@
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/src"
 )
 
 // Escape analysis.
@@ -88,8 +89,10 @@
 	allLocs  []*location
 	closures []closure
 
-	heapLoc  location
-	blankLoc location
+	heapLoc    location
+	mutatorLoc location
+	calleeLoc  location
+	blankLoc   location
 }
 
 // A closure holds a closure expression and its spill hole (i.e.,
@@ -115,21 +118,17 @@
 	loopDepth int
 }
 
-func Funcs(all []ir.Node) {
+func Funcs(all []*ir.Func) {
 	ir.VisitFuncsBottomUp(all, Batch)
 }
 
 // Batch performs escape analysis on a minimal batch of
 // functions.
 func Batch(fns []*ir.Func, recursive bool) {
-	for _, fn := range fns {
-		if fn.Op() != ir.ODCLFUNC {
-			base.Fatalf("unexpected node: %v", fn)
-		}
-	}
-
 	var b batch
-	b.heapLoc.escapes = true
+	b.heapLoc.attrs = attrEscapes | attrPersists | attrMutates | attrCalls
+	b.mutatorLoc.attrs = attrMutates
+	b.calleeLoc.attrs = attrCalls
 
 	// Construct data-flow graph from syntax trees.
 	for _, fn := range fns {
@@ -184,19 +183,19 @@
 
 	// Allocate locations for local variables.
 	for _, n := range fn.Dcl {
-		e.newLoc(n, false)
+		e.newLoc(n, true)
 	}
 
 	// Also for hidden parameters (e.g., the ".this" parameter to a
 	// method value wrapper).
 	if fn.OClosure == nil {
 		for _, n := range fn.ClosureVars {
-			e.newLoc(n.Canonical(), false)
+			e.newLoc(n.Canonical(), true)
 		}
 	}
 
 	// Initialize resultIndex for result parameters.
-	for i, f := range fn.Type().Results().FieldSlice() {
+	for i, f := range fn.Type().Results() {
 		e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
 	}
 }
@@ -274,12 +273,8 @@
 	for _, fn := range fns {
 		fn.SetEsc(escFuncTagged)
 
-		narg := 0
-		for _, fs := range &types.RecvsParams {
-			for _, f := range fs(fn.Type()).Fields().Slice() {
-				narg++
-				f.Note = b.paramTag(fn, narg, f)
-			}
+		for i, param := range fn.Type().RecvParams() {
+			param.Note = b.paramTag(fn, 1+i, param)
 		}
 	}
 
@@ -288,6 +283,7 @@
 		if n == nil {
 			continue
 		}
+
 		if n.Op() == ir.ONAME {
 			n := n.(*ir.Name)
 			n.Opt = nil
@@ -300,7 +296,7 @@
 		// TODO(mdempsky): Update tests to expect this.
 		goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
 
-		if loc.escapes {
+		if loc.hasAttr(attrEscapes) {
 			if n.Op() == ir.ONAME {
 				if base.Flag.CompilingRuntime {
 					base.ErrorfAt(n.Pos(), 0, "%v escapes to heap, not allowed in runtime", n)
@@ -323,7 +319,7 @@
 				base.WarnfAt(n.Pos(), "%v does not escape", n)
 			}
 			n.SetEsc(ir.EscNone)
-			if loc.transient {
+			if !loc.hasAttr(attrPersists) {
 				switch n.Op() {
 				case ir.OCLOSURE:
 					n := n.(*ir.ClosureExpr)
@@ -337,6 +333,17 @@
 				}
 			}
 		}
+
+		// If the result of a string->[]byte conversion is never mutated,
+		// then it can simply reuse the string's memory directly.
+		if base.Debug.ZeroCopy != 0 {
+			if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OSTR2BYTES && !loc.hasAttr(attrMutates) {
+				if base.Flag.LowerM >= 1 {
+					base.WarnfAt(n.Pos(), "zero-copy string->[]byte conversion")
+				}
+				n.SetOp(ir.OSTR2BYTESTMP)
+			}
+		}
 	}
 }
 
@@ -345,10 +352,10 @@
 // fn has not yet been analyzed, so its parameters and results
 // should be incorporated directly into the flow graph instead of
 // relying on its escape analysis tagging.
-func (e *escape) inMutualBatch(fn *ir.Name) bool {
+func (b *batch) inMutualBatch(fn *ir.Name) bool {
 	if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
 		if fn.Defn.Esc() == escFuncUnknown {
-			base.Fatalf("graph inconsistency: %v", fn)
+			base.FatalfAt(fn.Pos(), "graph inconsistency: %v", fn)
 		}
 		return true
 	}
@@ -372,8 +379,8 @@
 
 func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
 	name := func() string {
-		if f.Sym != nil {
-			return f.Sym.Name
+		if f.Nname != nil {
+			return f.Nname.Sym().Name
 		}
 		return fmt.Sprintf("arg#%d", narg)
 	}
@@ -411,6 +418,8 @@
 			if diagnose && f.Sym != nil {
 				base.WarnfAt(f.Pos, "%v does not escape", name())
 			}
+			esc.AddMutator(0)
+			esc.AddCallee(0)
 		} else {
 			if diagnose && f.Sym != nil {
 				base.WarnfAt(f.Pos, "leaking param: %v", name())
@@ -452,25 +461,49 @@
 	esc := loc.paramEsc
 	esc.Optimize()
 
-	if diagnose && !loc.escapes {
-		if esc.Empty() {
-			base.WarnfAt(f.Pos, "%v does not escape", name())
-		}
-		if x := esc.Heap(); x >= 0 {
-			if x == 0 {
-				base.WarnfAt(f.Pos, "leaking param: %v", name())
-			} else {
-				// TODO(mdempsky): Mention level=x like below?
-				base.WarnfAt(f.Pos, "leaking param content: %v", name())
-			}
-		}
-		for i := 0; i < numEscResults; i++ {
-			if x := esc.Result(i); x >= 0 {
-				res := fn.Type().Results().Field(i).Sym
-				base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
-			}
-		}
+	if diagnose && !loc.hasAttr(attrEscapes) {
+		b.reportLeaks(f.Pos, name(), esc, fn.Type())
 	}
 
 	return esc.Encode()
 }
+
+func (b *batch) reportLeaks(pos src.XPos, name string, esc leaks, sig *types.Type) {
+	warned := false
+	if x := esc.Heap(); x >= 0 {
+		if x == 0 {
+			base.WarnfAt(pos, "leaking param: %v", name)
+		} else {
+			// TODO(mdempsky): Mention level=x like below?
+			base.WarnfAt(pos, "leaking param content: %v", name)
+		}
+		warned = true
+	}
+	for i := 0; i < numEscResults; i++ {
+		if x := esc.Result(i); x >= 0 {
+			res := sig.Result(i).Nname.Sym().Name
+			base.WarnfAt(pos, "leaking param: %v to result %v level=%d", name, res, x)
+			warned = true
+		}
+	}
+
+	if base.Debug.EscapeMutationsCalls <= 0 {
+		if !warned {
+			base.WarnfAt(pos, "%v does not escape", name)
+		}
+		return
+	}
+
+	if x := esc.Mutator(); x >= 0 {
+		base.WarnfAt(pos, "mutates param: %v derefs=%v", name, x)
+		warned = true
+	}
+	if x := esc.Callee(); x >= 0 {
+		base.WarnfAt(pos, "calls param: %v derefs=%v", name, x)
+		warned = true
+	}
+
+	if !warned {
+		base.WarnfAt(pos, "%v does not escape, mutate, or call", name)
+	}
+}
diff --git a/src/cmd/compile/internal/escape/expr.go b/src/cmd/compile/internal/escape/expr.go
index e5f590d..6aa5ad7 100644
--- a/src/cmd/compile/internal/escape/expr.go
+++ b/src/cmd/compile/internal/escape/expr.go
@@ -113,13 +113,13 @@
 		} else {
 			e.expr(k, n.X)
 		}
-	case ir.OCONVIFACE, ir.OCONVIDATA:
+	case ir.OCONVIFACE:
 		n := n.(*ir.ConvExpr)
 		if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
 			k = e.spill(k, n)
 		}
 		e.expr(k.note(n, "interface-converted"), n.X)
-	case ir.OEFACE:
+	case ir.OMAKEFACE:
 		n := n.(*ir.BinaryExpr)
 		// Note: n.X is not needed because it can never point to memory that might escape.
 		e.expr(k, n.Y)
@@ -139,7 +139,7 @@
 		e.discard(n.X)
 
 	case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL,
-		ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER,
+		ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVERFP,
 		ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
 		e.call([]hole{k}, n)
 
@@ -250,7 +250,7 @@
 				// analysis (happens for escape analysis called
 				// from reflectdata.methodWrapper)
 				if n.Op() == ir.ONAME && n.Opt == nil {
-					e.with(fn).newLoc(n, false)
+					e.with(fn).newLoc(n, true)
 				}
 			}
 			e.walkFunc(fn)
@@ -335,7 +335,7 @@
 // its address to k, and returns a hole that flows values to it. It's
 // intended for use with most expressions that allocate storage.
 func (e *escape) spill(k hole, n ir.Node) hole {
-	loc := e.newLoc(n, true)
+	loc := e.newLoc(n, false)
 	e.flow(k.addr(n, "spill"), loc)
 	return loc.asHole()
 }
diff --git a/src/cmd/compile/internal/escape/graph.go b/src/cmd/compile/internal/escape/graph.go
index fc18f77..75e2546 100644
--- a/src/cmd/compile/internal/escape/graph.go
+++ b/src/cmd/compile/internal/escape/graph.go
@@ -38,7 +38,7 @@
 //        e.value(k, n.Left)
 //    }
 
-// An location represents an abstract location that stores a Go
+// A location represents an abstract location that stores a Go
 // variable.
 type location struct {
 	n         ir.Node  // represented variable or expression, if any
@@ -66,15 +66,8 @@
 	// in the walk queue.
 	queued bool
 
-	// escapes reports whether the represented variable's address
-	// escapes; that is, whether the variable must be heap
-	// allocated.
-	escapes bool
-
-	// transient reports whether the represented expression's
-	// address does not outlive the statement; that is, whether
-	// its storage can be immediately reused.
-	transient bool
+	// attrs is a bitset of location attributes.
+	attrs locAttr
 
 	// paramEsc records the represented parameter's leak set.
 	paramEsc leaks
@@ -84,6 +77,32 @@
 	addrtaken  bool // has this variable's address been taken?
 }
 
+type locAttr uint8
+
+const (
+	// attrEscapes indicates whether the represented variable's address
+	// escapes; that is, whether the variable must be heap allocated.
+	attrEscapes locAttr = 1 << iota
+
+	// attrPersists indicates whether the represented expression's
+	// address outlives the statement; that is, whether its storage
+	// cannot be immediately reused.
+	attrPersists
+
+	// attrMutates indicates whether pointers that are reachable from
+	// this location may have their addressed memory mutated. This is
+	// used to detect string->[]byte conversions that can be safely
+	// optimized away.
+	attrMutates
+
+	// attrCalls indicates whether closures that are reachable from this
+	// location may be called without tracking their results. This is
+	// used to better optimize indirect closure calls.
+	attrCalls
+)
+
+func (l *location) hasAttr(attr locAttr) bool { return l.attrs&attr != 0 }
+
 // An edge represents an assignment edge between two Go variables.
 type edge struct {
 	src    *location
@@ -100,7 +119,7 @@
 	// If sink is a result parameter that doesn't escape (#44614)
 	// and we can fit return bits into the escape analysis tag,
 	// then record as a result leak.
-	if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+	if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
 		ri := sink.resultIndex - 1
 		if ri < numEscResults {
 			// Leak to result parameter.
@@ -113,6 +132,35 @@
 	l.paramEsc.AddHeap(derefs)
 }
 
+// leakTo records that parameter l leaks to sink.
+func (b *batch) leakTo(l, sink *location, derefs int) {
+	if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.hasAttr(attrEscapes) {
+		if base.Flag.LowerM >= 2 {
+			fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(sink), derefs)
+		}
+		explanation := b.explainPath(sink, l)
+		if logopt.Enabled() {
+			var e_curfn *ir.Func // TODO(mdempsky): Fix.
+			logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+				fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(sink), derefs), explanation)
+		}
+	}
+
+	// If sink is a result parameter that doesn't escape (#44614)
+	// and we can fit return bits into the escape analysis tag,
+	// then record as a result leak.
+	if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+		if ri := sink.resultIndex - 1; ri < numEscResults {
+			// Leak to result parameter.
+			l.paramEsc.AddResult(ri, derefs)
+			return
+		}
+	}
+
+	// Otherwise, record as heap leak.
+	l.paramEsc.AddHeap(derefs)
+}
+
 func (l *location) isName(c ir.Class) bool {
 	return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
 }
@@ -182,7 +230,7 @@
 	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
 		return
 	}
-	if dst.escapes && k.derefs < 0 { // dst = &src
+	if dst.hasAttr(attrEscapes) && k.derefs < 0 { // dst = &src
 		if base.Flag.LowerM >= 2 || logopt.Enabled() {
 			pos := base.FmtPos(src.n.Pos())
 			if base.Flag.LowerM >= 2 {
@@ -195,7 +243,7 @@
 			}
 
 		}
-		src.escapes = true
+		src.attrs |= attrEscapes | attrPersists | attrMutates | attrCalls
 		return
 	}
 
@@ -204,16 +252,18 @@
 }
 
 func (b *batch) heapHole() hole    { return b.heapLoc.asHole() }
+func (b *batch) mutatorHole() hole { return b.mutatorLoc.asHole() }
+func (b *batch) calleeHole() hole  { return b.calleeLoc.asHole() }
 func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
 
 func (b *batch) oldLoc(n *ir.Name) *location {
 	if n.Canonical().Opt == nil {
-		base.Fatalf("%v has no location", n)
+		base.FatalfAt(n.Pos(), "%v has no location", n)
 	}
 	return n.Canonical().Opt.(*location)
 }
 
-func (e *escape) newLoc(n ir.Node, transient bool) *location {
+func (e *escape) newLoc(n ir.Node, persists bool) *location {
 	if e.curfn == nil {
 		base.Fatalf("e.curfn isn't set")
 	}
@@ -223,14 +273,16 @@
 
 	if n != nil && n.Op() == ir.ONAME {
 		if canon := n.(*ir.Name).Canonical(); n != canon {
-			base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
+			base.FatalfAt(n.Pos(), "newLoc on non-canonical %v (canonical is %v)", n, canon)
 		}
 	}
 	loc := &location{
 		n:         n,
 		curfn:     e.curfn,
 		loopDepth: e.loopDepth,
-		transient: transient,
+	}
+	if persists {
+		loc.attrs |= attrPersists
 	}
 	e.allLocs = append(e.allLocs, loc)
 	if n != nil {
@@ -239,11 +291,11 @@
 			if n.Class == ir.PPARAM && n.Curfn == nil {
 				// ok; hidden parameter
 			} else if n.Curfn != e.curfn {
-				base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
+				base.FatalfAt(n.Pos(), "curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
 			}
 
 			if n.Opt != nil {
-				base.Fatalf("%v already has a location", n)
+				base.FatalfAt(n.Pos(), "%v already has a location", n)
 			}
 			n.Opt = loc
 		}
@@ -265,7 +317,7 @@
 	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
 	// new temporary location ltmp, wire it into place, and return
 	// a hole for "ltmp = _".
-	loc := e.newLoc(nil, true)
+	loc := e.newLoc(nil, false)
 	for _, k := range ks {
 		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
 		// semantically equivalent. To combine holes like "l1
@@ -285,7 +337,7 @@
 // Its main effect is to prevent immediate reuse of temporary
 // variables introduced during Order.
 func (e *escape) later(k hole) hole {
-	loc := e.newLoc(nil, false)
+	loc := e.newLoc(nil, true)
 	e.flow(k, loc)
 	return loc.asHole()
 }
diff --git a/src/cmd/compile/internal/escape/leaks.go b/src/cmd/compile/internal/escape/leaks.go
index 1432607..942f87d 100644
--- a/src/cmd/compile/internal/escape/leaks.go
+++ b/src/cmd/compile/internal/escape/leaks.go
@@ -10,33 +10,53 @@
 	"strings"
 )
 
-const numEscResults = 7
+// A leaks represents a set of assignment flows from a parameter to
+// the heap, mutator, callee, or to any of its function's (first
+// numEscResults) result parameters.
+type leaks [8]uint8
 
-// An leaks represents a set of assignment flows from a parameter
-// to the heap or to any of its function's (first numEscResults)
-// result parameters.
-type leaks [1 + numEscResults]uint8
+const (
+	leakHeap = iota
+	leakMutator
+	leakCallee
+	leakResult0
+)
 
-// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l leaks) Empty() bool { return l == leaks{} }
+const numEscResults = len(leaks{}) - leakResult0
 
 // Heap returns the minimum deref count of any assignment flow from l
 // to the heap. If no such flows exist, Heap returns -1.
-func (l leaks) Heap() int { return l.get(0) }
+func (l leaks) Heap() int { return l.get(leakHeap) }
+
+// Mutator returns the minimum deref count of any assignment flow from
+// l to the pointer operand of an indirect assignment statement. If no
+// such flows exist, Mutator returns -1.
+func (l leaks) Mutator() int { return l.get(leakMutator) }
+
+// Callee returns the minimum deref count of any assignment flow from
+// l to the callee operand of call expression. If no such flows exist,
+// Callee returns -1.
+func (l leaks) Callee() int { return l.get(leakCallee) }
 
 // Result returns the minimum deref count of any assignment flow from
 // l to its function's i'th result parameter. If no such flows exist,
 // Result returns -1.
-func (l leaks) Result(i int) int { return l.get(1 + i) }
+func (l leaks) Result(i int) int { return l.get(leakResult0 + i) }
 
 // AddHeap adds an assignment flow from l to the heap.
-func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+func (l *leaks) AddHeap(derefs int) { l.add(leakHeap, derefs) }
+
+// AddMutator adds a flow from l to the mutator (i.e., a pointer
+// operand of an indirect assignment statement).
+func (l *leaks) AddMutator(derefs int) { l.add(leakMutator, derefs) }
+
+// AddCallee adds an assignment flow from l to the callee operand of a
+// call expression.
+func (l *leaks) AddCallee(derefs int) { l.add(leakCallee, derefs) }
 
 // AddResult adds an assignment flow from l to its function's i'th
 // result parameter.
-func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-
-func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+func (l *leaks) AddResult(i, derefs int) { l.add(leakResult0+i, derefs) }
 
 func (l leaks) get(i int) int { return int(l[i]) - 1 }
 
@@ -64,9 +84,9 @@
 	// If we have a path to the heap, then there's no use in
 	// keeping equal or longer paths elsewhere.
 	if x := l.Heap(); x >= 0 {
-		for i := 0; i < numEscResults; i++ {
-			if l.Result(i) >= x {
-				l.setResult(i, -1)
+		for i := 1; i < len(*l); i++ {
+			if l.get(i) >= x {
+				l.set(i, -1)
 			}
 		}
 	}
diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go
index a2d3b6d..2675a16 100644
--- a/src/cmd/compile/internal/escape/solve.go
+++ b/src/cmd/compile/internal/escape/solve.go
@@ -21,7 +21,7 @@
 	//
 	// We walk once from each location (including the heap), and
 	// then re-enqueue each location on its transition from
-	// transient->!transient and !escapes->escapes, which can each
+	// !persists->persists and !escapes->escapes, which can each
 	// happen at most once. So we take Θ(len(e.allLocs)) walks.
 
 	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
@@ -36,6 +36,8 @@
 	for _, loc := range b.allLocs {
 		enqueue(loc)
 	}
+	enqueue(&b.mutatorLoc)
+	enqueue(&b.calleeLoc)
 	enqueue(&b.heapLoc)
 
 	var walkgen uint32
@@ -61,12 +63,27 @@
 	root.derefs = 0
 	root.dst = nil
 
+	if root.hasAttr(attrCalls) {
+		if clo, ok := root.n.(*ir.ClosureExpr); ok {
+			if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
+				fn.SetClosureResultsLost(true)
+
+				// Re-flow from the closure's results, now that we're aware
+				// we lost track of them.
+				for _, result := range fn.Type().Results() {
+					enqueue(b.oldLoc(result.Nname.(*ir.Name)))
+				}
+			}
+		}
+	}
+
 	todo := []*location{root} // LIFO queue
 	for len(todo) > 0 {
 		l := todo[len(todo)-1]
 		todo = todo[:len(todo)-1]
 
 		derefs := l.derefs
+		var newAttrs locAttr
 
 		// If l.derefs < 0, then l's address flows to root.
 		addressOf := derefs < 0
@@ -77,23 +94,41 @@
 			// derefs at 0.
 			derefs = 0
 
-			// If l's address flows to a non-transient
-			// location, then l can't be transiently
+			// If l's address flows somewhere that
+			// outlives it, then l needs to be heap
 			// allocated.
-			if !root.transient && l.transient {
-				l.transient = false
-				enqueue(l)
+			if b.outlives(root, l) {
+				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
+					if base.Flag.LowerM >= 2 {
+						fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+					}
+					explanation := b.explainPath(root, l)
+					if logopt.Enabled() {
+						var e_curfn *ir.Func // TODO(mdempsky): Fix.
+						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+					}
+				}
+				newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
+			} else
+			// If l's address flows to a persistent location, then l needs
+			// to persist too.
+			if root.hasAttr(attrPersists) {
+				newAttrs |= attrPersists
 			}
 		}
 
-		if b.outlives(root, l) {
-			// l's value flows to root. If l is a function
-			// parameter and root is the heap or a
-			// corresponding result parameter, then record
-			// that value flow for tagging the function
-			// later.
-			if l.isName(ir.PPARAM) {
-				if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+		if derefs == 0 {
+			newAttrs |= root.attrs & (attrMutates | attrCalls)
+		}
+
+		// l's value flows to root. If l is a function
+		// parameter and root is the heap or a
+		// corresponding result parameter, then record
+		// that value flow for tagging the function
+		// later.
+		if l.isName(ir.PPARAM) {
+			if b.outlives(root, l) {
+				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
 					if base.Flag.LowerM >= 2 {
 						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
 					}
@@ -106,29 +141,24 @@
 				}
 				l.leakTo(root, derefs)
 			}
+			if root.hasAttr(attrMutates) {
+				l.paramEsc.AddMutator(derefs)
+			}
+			if root.hasAttr(attrCalls) {
+				l.paramEsc.AddCallee(derefs)
+			}
+		}
 
-			// If l's address flows somewhere that
-			// outlives it, then l needs to be heap
-			// allocated.
-			if addressOf && !l.escapes {
-				if logopt.Enabled() || base.Flag.LowerM >= 2 {
-					if base.Flag.LowerM >= 2 {
-						fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
-					}
-					explanation := b.explainPath(root, l)
-					if logopt.Enabled() {
-						var e_curfn *ir.Func // TODO(mdempsky): Fix.
-						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
-					}
-				}
-				l.escapes = true
-				enqueue(l)
+		if newAttrs&^l.attrs != 0 {
+			l.attrs |= newAttrs
+			enqueue(l)
+			if l.attrs&attrEscapes != 0 {
 				continue
 			}
 		}
 
 		for i, edge := range l.edges {
-			if edge.src.escapes {
+			if edge.src.hasAttr(attrEscapes) {
 				continue
 			}
 			d := derefs + edge.derefs
@@ -228,21 +258,27 @@
 // other's lifetime if stack allocated.
 func (b *batch) outlives(l, other *location) bool {
 	// The heap outlives everything.
-	if l.escapes {
+	if l.hasAttr(attrEscapes) {
 		return true
 	}
 
+	// Pseudo-locations that don't really exist.
+	if l == &b.mutatorLoc || l == &b.calleeLoc {
+		return false
+	}
+
 	// We don't know what callers do with returned values, so
 	// pessimistically we need to assume they flow to the heap and
 	// outlive everything too.
 	if l.isName(ir.PPARAMOUT) {
-		// Exception: Directly called closures can return
-		// locations allocated outside of them without forcing
-		// them to the heap. For example:
+		// Exception: Closures can return locations allocated outside of
+		// them without forcing them to the heap, if we can statically
+		// identify all call sites. For example:
 		//
-		//    var u int  // okay to stack allocate
-		//    *(func() *int { return &u }()) = 42
-		if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+		//	var u int  // okay to stack allocate
+		//	fn := func() *int { return &u }()
+		//	*fn() = 42
+		if containsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
 			return false
 		}
 
@@ -253,10 +289,10 @@
 	// outlives other if it was declared outside other's loop
 	// scope. For example:
 	//
-	//    var l *int
-	//    for {
-	//        l = new(int)
-	//    }
+	//	var l *int
+	//	for {
+	//		l = new(int) // must heap allocate: outlives for loop
+	//	}
 	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
 		return true
 	}
@@ -264,10 +300,10 @@
 	// If other is declared within a child closure of where l is
 	// declared, then l outlives it. For example:
 	//
-	//    var l *int
-	//    func() {
-	//        l = new(int)
-	//    }
+	//	var l *int
+	//	func() {
+	//		l = new(int) // must heap allocate: outlives call frame (if not inlined)
+	//	}()
 	if containsClosure(l.curfn, other.curfn) {
 		return true
 	}
@@ -277,8 +313,8 @@
 
 // containsClosure reports whether c is a closure contained within f.
 func containsClosure(f, c *ir.Func) bool {
-	// Common case.
-	if f == c {
+	// Common cases.
+	if f == c || c.OClosure == nil {
 		return false
 	}
 
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
index 5ae78e3..b766864 100644
--- a/src/cmd/compile/internal/escape/stmt.go
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -31,7 +31,7 @@
 	default:
 		base.Fatalf("unexpected stmt: %v", n)
 
-	case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+	case ir.OFALL, ir.OINLMARK:
 		// nop
 
 	case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
@@ -92,8 +92,9 @@
 		n := n.(*ir.RangeStmt)
 		base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis
 
-		// X is evaluated outside the loop.
-		tmp := e.newLoc(nil, false)
+		// X is evaluated outside the loop and persists until the loop
+		// terminates.
+		tmp := e.newLoc(nil, true)
 		e.expr(tmp.asHole(), n.X)
 
 		e.loopDepth++
@@ -176,13 +177,13 @@
 		e.reassigned(ks, n)
 	case ir.ORETURN:
 		n := n.(*ir.ReturnStmt)
-		results := e.curfn.Type().Results().FieldSlice()
+		results := e.curfn.Type().Results()
 		dsts := make([]ir.Node, len(results))
 		for i, res := range results {
 			dsts[i] = res.Nname.(*ir.Name)
 		}
 		e.assignList(dsts, n.Results, "return", n)
-	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
 		e.call(nil, n)
 	case ir.OGO, ir.ODEFER:
 		n := n.(*ir.GoDeferStmt)
diff --git a/src/cmd/compile/internal/escape/utils.go b/src/cmd/compile/internal/escape/utils.go
index b481d8e..bd1d2c2 100644
--- a/src/cmd/compile/internal/escape/utils.go
+++ b/src/cmd/compile/internal/escape/utils.go
@@ -151,7 +151,7 @@
 		n := n.(*ir.ConvExpr)
 		return mayAffectMemory(n.X)
 
-	case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+	case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
 		n := n.(*ir.UnaryExpr)
 		return mayAffectMemory(n.X)
 
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
index 4795297..0f57f8c 100644
--- a/src/cmd/compile/internal/gc/compile.go
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -16,7 +16,6 @@
 	"cmd/compile/internal/objw"
 	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/staticinit"
-	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/compile/internal/walk"
 	"cmd/internal/obj"
@@ -39,6 +38,11 @@
 		return
 	}
 
+	// Don't try compiling dead hidden closure.
+	if fn.IsDeadcodeClosure() {
+		return
+	}
+
 	if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
 		return // we'll get this as part of its enclosing function
 	}
@@ -52,7 +56,7 @@
 		ir.InitLSym(fn, false)
 		types.CalcSize(fn.Type())
 		a := ssagen.AbiForBodylessFuncStackMap(fn)
-		abiInfo := a.ABIAnalyzeFuncType(fn.Type().FuncType()) // abiInfo has spill/home locations for wrapper
+		abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper
 		liveness.WriteFuncMap(fn, abiInfo)
 		if fn.ABI == obj.ABI0 {
 			x := ssagen.EmitArgInfo(fn, abiInfo)
@@ -100,21 +104,15 @@
 	// Calculate parameter offsets.
 	types.CalcSize(fn.Type())
 
-	typecheck.DeclContext = ir.PAUTO
 	ir.CurFunc = fn
 	walk.Walk(fn)
 	ir.CurFunc = nil // enforce no further uses of CurFunc
-	typecheck.DeclContext = ir.PEXTERN
 }
 
 // compileFunctions compiles all functions in compilequeue.
 // It fans out nBackendWorkers to do the work
 // and waits for them to complete.
 func compileFunctions() {
-	if len(compilequeue) == 0 {
-		return
-	}
-
 	if race.Enabled {
 		// Randomize compilation order to try to shake out races.
 		tmp := make([]*ir.Func, len(compilequeue))
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index c9acfc1..c93f008 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -21,7 +21,7 @@
 		base.Fatalf("%v", err)
 	}
 	fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
-	for _, n := range typecheck.Target.Asms {
+	for _, n := range typecheck.Target.AsmHdrDecls {
 		if n.Sym().IsBlank() {
 			continue
 		}
@@ -39,7 +39,7 @@
 				break
 			}
 			fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Size()))
-			for _, f := range t.Fields().Slice() {
+			for _, f := range t.Fields() {
 				if !f.Sym.IsBlank() {
 					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
 				}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 937d1c4..7e5069f 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -9,11 +9,10 @@
 	"bytes"
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/coverage"
-	"cmd/compile/internal/deadcode"
-	"cmd/compile/internal/devirtualize"
 	"cmd/compile/internal/dwarfgen"
 	"cmd/compile/internal/escape"
 	"cmd/compile/internal/inline"
+	"cmd/compile/internal/inline/interleaved"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/loopvar"
@@ -21,6 +20,7 @@
 	"cmd/compile/internal/pgo"
 	"cmd/compile/internal/pkginit"
 	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/rttype"
 	"cmd/compile/internal/ssa"
 	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/staticinit"
@@ -153,7 +153,7 @@
 		symABIs.ReadSymABIs(base.Flag.SymABIs)
 	}
 
-	if base.Compiling(base.NoInstrumentPkgs) {
+	if objabi.LookupPkgSpecial(base.Ctxt.Pkgpath).NoInstrument {
 		base.Flag.Race = false
 		base.Flag.MSan = false
 		base.Flag.ASan = false
@@ -187,12 +187,11 @@
 
 	typecheck.Target = new(ir.Package)
 
-	typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
-
 	base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
 
 	typecheck.InitUniverse()
 	typecheck.InitRuntime()
+	rttype.Init()
 
 	// Parse and typecheck input.
 	noder.LoadPackage(flag.Args())
@@ -208,49 +207,11 @@
 
 	dwarfgen.RecordPackageName()
 
-	// Prepare for backend processing. This must happen before pkginit,
-	// because it generates itabs for initializing global variables.
+	// Prepare for backend processing.
 	ssagen.InitConfig()
 
-	// First part of coverage fixup (if applicable).
-	var cnames coverage.Names
-	if base.Flag.Cfg.CoverageInfo != nil {
-		cnames = coverage.FixupVars()
-	}
-
-	// Create "init" function for package-scope variable initialization
-	// statements, if any.
-	//
-	// Note: This needs to happen early, before any optimizations. The
-	// Go spec defines a precise order than initialization should be
-	// carried out in, and even mundane optimizations like dead code
-	// removal can skew the results (e.g., #43444).
-	pkginit.MakeInit()
-
-	// Second part of code coverage fixup (init func modification),
-	// if applicable.
-	if base.Flag.Cfg.CoverageInfo != nil {
-		coverage.FixupInit(cnames)
-	}
-
-	// Eliminate some obviously dead code.
-	// Must happen after typechecking.
-	for _, n := range typecheck.Target.Decls {
-		if n.Op() == ir.ODCLFUNC {
-			deadcode.Func(n.(*ir.Func))
-		}
-	}
-
-	// Compute Addrtaken for names.
-	// We need to wait until typechecking is done so that when we see &x[i]
-	// we know that x has its address taken if x is an array, but not if x is a slice.
-	// We compute Addrtaken in bulk here.
-	// After this phase, we maintain Addrtaken incrementally.
-	if typecheck.DirtyAddrtaken {
-		typecheck.ComputeAddrtaken(typecheck.Target.Decls)
-		typecheck.DirtyAddrtaken = false
-	}
-	typecheck.IncrementalAddrtaken = true
+	// Apply coverage fixups, if applicable.
+	coverage.Fixup()
 
 	// Read profile file and build profile-graph and weighted-call-graph.
 	base.Timer.Start("fe", "pgo-load-profile")
@@ -263,40 +224,21 @@
 		}
 	}
 
-	base.Timer.Start("fe", "pgo-devirtualization")
-	if profile != nil && base.Debug.PGODevirtualize > 0 {
-		// TODO(prattmic): No need to use bottom-up visit order. This
-		// is mirroring the PGO IRGraph visit order, which also need
-		// not be bottom-up.
-		ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
-			for _, fn := range list {
-				devirtualize.ProfileGuided(fn, profile)
-			}
-		})
-		ir.CurFunc = nil
-	}
+	// Interleaved devirtualization and inlining.
+	base.Timer.Start("fe", "devirtualize-and-inline")
+	interleaved.DevirtualizeAndInlinePackage(typecheck.Target, profile)
 
-	// Inlining
-	base.Timer.Start("fe", "inlining")
-	if base.Flag.LowerL != 0 {
-		inline.InlinePackage(profile)
-	}
 	noder.MakeWrappers(typecheck.Target) // must happen after inlining
 
-	// Devirtualize and get variable capture right in for loops
+	// Get variable capture right in for loops.
 	var transformed []loopvar.VarAndLoop
-	for _, n := range typecheck.Target.Decls {
-		if n.Op() == ir.ODCLFUNC {
-			devirtualize.Static(n.(*ir.Func))
-			transformed = append(transformed, loopvar.ForCapture(n.(*ir.Func))...)
-		}
+	for _, fn := range typecheck.Target.Funcs {
+		transformed = append(transformed, loopvar.ForCapture(fn)...)
 	}
 	ir.CurFunc = nil
 
 	// Build init task, if needed.
-	if initTask := pkginit.Task(); initTask != nil {
-		typecheck.Export(initTask)
-	}
+	pkginit.MakeTask()
 
 	// Generate ABI wrappers. Must happen before escape analysis
 	// and doesn't benefit from dead-coding or inlining.
@@ -311,7 +253,7 @@
 	// Large values are also moved off stack in escape analysis;
 	// because large values may contain pointers, it must happen early.
 	base.Timer.Start("fe", "escapes")
-	escape.Funcs(typecheck.Target.Decls)
+	escape.Funcs(typecheck.Target.Funcs)
 
 	loopvar.LogTransformations(transformed)
 
@@ -325,23 +267,62 @@
 
 	ir.CurFunc = nil
 
-	// Compile top level functions.
-	// Don't use range--walk can add functions to Target.Decls.
-	base.Timer.Start("be", "compilefuncs")
-	fcount := int64(0)
-	for i := 0; i < len(typecheck.Target.Decls); i++ {
-		if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
-			// Don't try compiling dead hidden closure.
-			if fn.IsDeadcodeClosure() {
-				continue
-			}
-			enqueueFunc(fn)
-			fcount++
-		}
-	}
-	base.Timer.AddEvent(fcount, "funcs")
+	reflectdata.WriteBasicTypes()
 
-	compileFunctions()
+	// Compile top-level declarations.
+	//
+	// There are cyclic dependencies between all of these phases, so we
+	// need to iterate all of them until we reach a fixed point.
+	base.Timer.Start("be", "compilefuncs")
+	for nextFunc, nextExtern := 0, 0; ; {
+		reflectdata.WriteRuntimeTypes()
+
+		if nextExtern < len(typecheck.Target.Externs) {
+			switch n := typecheck.Target.Externs[nextExtern]; n.Op() {
+			case ir.ONAME:
+				dumpGlobal(n)
+			case ir.OLITERAL:
+				dumpGlobalConst(n)
+			case ir.OTYPE:
+				reflectdata.NeedRuntimeType(n.Type())
+			}
+			nextExtern++
+			continue
+		}
+
+		if nextFunc < len(typecheck.Target.Funcs) {
+			enqueueFunc(typecheck.Target.Funcs[nextFunc])
+			nextFunc++
+			continue
+		}
+
+		// The SSA backend supports using multiple goroutines, so keep it
+		// as late as possible to maximize how much work we can batch and
+		// process concurrently.
+		if len(compilequeue) != 0 {
+			compileFunctions()
+			continue
+		}
+
+		// Finalize DWARF inline routine DIEs, then explicitly turn off
+		// further DWARF inlining generation to avoid problems with
+		// generated method wrappers.
+		//
+		// Note: The DWARF fixup code for inlined calls currently doesn't
+		// allow multiple invocations, so we intentionally run it just
+		// once after everything else. Worst case, some generated
+		// functions have slightly larger DWARF DIEs.
+		if base.Ctxt.DwFixups != nil {
+			base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+			base.Ctxt.DwFixups = nil
+			base.Flag.GenDwarfInl = 0
+			continue // may have called reflectdata.TypeLinksym (#62156)
+		}
+
+		break
+	}
+
+	base.Timer.AddEvent(int64(len(typecheck.Target.Funcs)), "funcs")
 
 	if base.Flag.CompilingRuntime {
 		// Write barriers are now known. Check the call graph.
@@ -353,15 +334,6 @@
 		staticinit.AddKeepRelocations()
 	}
 
-	// Finalize DWARF inline routine DIEs, then explicitly turn off
-	// DWARF inlining gen so as to avoid problems with generated
-	// method wrappers.
-	if base.Ctxt.DwFixups != nil {
-		base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
-		base.Ctxt.DwFixups = nil
-		base.Flag.GenDwarfInl = 0
-	}
-
 	// Write object data to disk.
 	base.Timer.Start("be", "dumpobj")
 	dumpdata()
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index e895c45..e090caf 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -110,43 +110,10 @@
 }
 
 func dumpdata() {
-	numExterns := len(typecheck.Target.Externs)
-	numDecls := len(typecheck.Target.Decls)
-	dumpglobls(typecheck.Target.Externs)
-	reflectdata.CollectPTabs()
-	numExports := len(typecheck.Target.Exports)
-	addsignats(typecheck.Target.Externs)
-	reflectdata.WriteRuntimeTypes()
-	reflectdata.WriteTabs()
-	numPTabs := reflectdata.CountPTabs()
-	reflectdata.WriteImportStrings()
-	reflectdata.WriteBasicTypes()
+	reflectdata.WriteGCSymbols()
+	reflectdata.WritePluginTable()
 	dumpembeds()
 
-	// Calls to WriteRuntimeTypes can generate functions,
-	// like method wrappers and hash and equality routines.
-	// Compile any generated functions, process any new resulting types, repeat.
-	// This can't loop forever, because there is no way to generate an infinite
-	// number of types in a finite amount of code.
-	// In the typical case, we loop 0 or 1 times.
-	// It was not until issue 24761 that we found any code that required a loop at all.
-	for {
-		for i := numDecls; i < len(typecheck.Target.Decls); i++ {
-			if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
-				enqueueFunc(n)
-			}
-		}
-		numDecls = len(typecheck.Target.Decls)
-		compileFunctions()
-		reflectdata.WriteRuntimeTypes()
-		if numDecls == len(typecheck.Target.Decls) {
-			break
-		}
-	}
-
-	// Dump extra globals.
-	dumpglobls(typecheck.Target.Externs[numExterns:])
-
 	if reflectdata.ZeroSize > 0 {
 		zero := base.PkgLinksym("go:map", "zero", obj.ABI0)
 		objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
@@ -155,14 +122,6 @@
 
 	staticdata.WriteFuncSyms()
 	addGCLocals()
-
-	if numExports != len(typecheck.Target.Exports) {
-		base.Fatalf("Target.Exports changed after compile functions loop")
-	}
-	newNumPTabs := reflectdata.CountPTabs()
-	if newNumPTabs != numPTabs {
-		base.Fatalf("ptabs changed after compile functions loop")
-	}
 }
 
 func dumpLinkerObj(bout *bio.Writer) {
@@ -198,10 +157,10 @@
 	if n.CoverageCounter() || n.CoverageAuxVar() || n.Linksym().Static() {
 		return
 	}
-	base.Ctxt.DwarfGlobal(base.Ctxt.Pkgpath, types.TypeSymName(n.Type()), n.Linksym())
+	base.Ctxt.DwarfGlobal(types.TypeSymName(n.Type()), n.Linksym())
 }
 
-func dumpGlobalConst(n ir.Node) {
+func dumpGlobalConst(n *ir.Name) {
 	// only export typed constants
 	t := n.Type()
 	if t == nil {
@@ -226,19 +185,7 @@
 		// that type so the linker knows about it. See issue 51245.
 		_ = reflectdata.TypeLinksym(t)
 	}
-	base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
-}
-
-func dumpglobls(externs []ir.Node) {
-	// add globals
-	for _, n := range externs {
-		switch n.Op() {
-		case ir.ONAME:
-			dumpGlobal(n.(*ir.Name))
-		case ir.OLITERAL:
-			dumpGlobalConst(n)
-		}
-	}
+	base.Ctxt.DwarfIntConst(n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
 }
 
 // addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
@@ -335,12 +282,3 @@
 		staticdata.WriteEmbed(v)
 	}
 }
-
-func addsignats(dcls []ir.Node) {
-	// copy types from dcl list to signatset
-	for _, n := range dcls {
-		if n.Op() == ir.OTYPE {
-			reflectdata.NeedRuntimeType(n.Type())
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index dcac0ce..b82a983 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -5,17 +5,34 @@
 package gc
 
 import (
+	"net/url"
 	"os"
+	"path/filepath"
 	"runtime"
 	"runtime/pprof"
 	tracepkg "runtime/trace"
+	"strings"
 
 	"cmd/compile/internal/base"
 )
 
+func profileName(fn, suffix string) string {
+	if strings.HasSuffix(fn, string(os.PathSeparator)) {
+		err := os.MkdirAll(fn, 0755)
+		if err != nil {
+			base.Fatalf("%v", err)
+		}
+	}
+	if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() {
+		fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+suffix)
+	}
+	return fn
+}
+
 func startProfile() {
 	if base.Flag.CPUProfile != "" {
-		f, err := os.Create(base.Flag.CPUProfile)
+		fn := profileName(base.Flag.CPUProfile, ".cpuprof")
+		f, err := os.Create(fn)
 		if err != nil {
 			base.Fatalf("%v", err)
 		}
@@ -28,18 +45,36 @@
 		if base.Flag.MemProfileRate != 0 {
 			runtime.MemProfileRate = base.Flag.MemProfileRate
 		}
-		f, err := os.Create(base.Flag.MemProfile)
+		const (
+			gzipFormat = 0
+			textFormat = 1
+		)
+		// compilebench parses the memory profile to extract memstats,
+		// which are only written in the legacy (text) pprof format.
+		// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
+		// gzipFormat is what most people want, otherwise
+		var format = textFormat
+		fn := base.Flag.MemProfile
+		if strings.HasSuffix(fn, string(os.PathSeparator)) {
+			err := os.MkdirAll(fn, 0755)
+			if err != nil {
+				base.Fatalf("%v", err)
+			}
+		}
+		if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() {
+			fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+".memprof")
+			format = gzipFormat
+		}
+
+		f, err := os.Create(fn)
+
 		if err != nil {
 			base.Fatalf("%v", err)
 		}
 		base.AtExit(func() {
 			// Profile all outstanding allocations.
 			runtime.GC()
-			// compilebench parses the memory profile to extract memstats,
-			// which are only written in the legacy pprof format.
-			// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
-			const writeLegacyFormat = 1
-			if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
+			if err := pprof.Lookup("heap").WriteTo(f, format); err != nil {
 				base.Fatalf("%v", err)
 			}
 		})
@@ -48,7 +83,7 @@
 		runtime.MemProfileRate = 0
 	}
 	if base.Flag.BlockProfile != "" {
-		f, err := os.Create(base.Flag.BlockProfile)
+		f, err := os.Create(profileName(base.Flag.BlockProfile, ".blockprof"))
 		if err != nil {
 			base.Fatalf("%v", err)
 		}
@@ -59,7 +94,7 @@
 		})
 	}
 	if base.Flag.MutexProfile != "" {
-		f, err := os.Create(base.Flag.MutexProfile)
+		f, err := os.Create(profileName(base.Flag.MutexProfile, ".mutexprof"))
 		if err != nil {
 			base.Fatalf("%v", err)
 		}
@@ -70,7 +105,7 @@
 		})
 	}
 	if base.Flag.TraceProfile != "" {
-		f, err := os.Create(base.Flag.TraceProfile)
+		f, err := os.Create(profileName(base.Flag.TraceProfile, ".trace"))
 		if err != nil {
 			base.Fatalf("%v", err)
 		}
diff --git a/src/cmd/compile/internal/importer/gcimporter.go b/src/cmd/compile/internal/importer/gcimporter.go
index 490cdf9..1f7b49c 100644
--- a/src/cmd/compile/internal/importer/gcimporter.go
+++ b/src/cmd/compile/internal/importer/gcimporter.go
@@ -8,6 +8,7 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"go/build"
 	"internal/pkgbits"
@@ -21,7 +22,7 @@
 	"cmd/compile/internal/types2"
 )
 
-var exportMap sync.Map // package dir → func() (string, bool)
+var exportMap sync.Map // package dir → func() (string, error)
 
 // lookupGorootExport returns the location of the export data
 // (normally found in the build cache, but located in GOROOT/pkg
@@ -30,37 +31,42 @@
 // (We use the package's directory instead of its import path
 // mainly to simplify handling of the packages in src/vendor
 // and cmd/vendor.)
-func lookupGorootExport(pkgDir string) (string, bool) {
+func lookupGorootExport(pkgDir string) (string, error) {
 	f, ok := exportMap.Load(pkgDir)
 	if !ok {
 		var (
 			listOnce   sync.Once
 			exportPath string
+			err        error
 		)
-		f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
+		f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
 			listOnce.Do(func() {
 				cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
 				cmd.Dir = build.Default.GOROOT
 				cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
 				var output []byte
-				output, err := cmd.Output()
+				output, err = cmd.Output()
 				if err != nil {
+					if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+						err = errors.New(string(ee.Stderr))
+					}
 					return
 				}
 
 				exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
 				if len(exports) != 1 {
+					err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
 					return
 				}
 
 				exportPath = exports[0]
 			})
 
-			return exportPath, exportPath != ""
+			return exportPath, err
 		})
 	}
 
-	return f.(func() (string, bool))()
+	return f.(func() (string, error))()
 }
 
 var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
@@ -69,10 +75,9 @@
 // path based on package information provided by build.Import (using
 // the build.Default build.Context). A relative srcDir is interpreted
 // relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
+func FindPkg(path, srcDir string) (filename, id string, err error) {
 	if path == "" {
-		return
+		return "", "", errors.New("path is empty")
 	}
 
 	var noext string
@@ -83,16 +88,19 @@
 		if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
 			srcDir = abs
 		}
-		bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+		var bp *build.Package
+		bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
 		if bp.PkgObj == "" {
-			var ok bool
 			if bp.Goroot && bp.Dir != "" {
-				filename, ok = lookupGorootExport(bp.Dir)
+				filename, err = lookupGorootExport(bp.Dir)
+				if err == nil {
+					_, err = os.Stat(filename)
+				}
+				if err == nil {
+					return filename, bp.ImportPath, nil
+				}
 			}
-			if !ok {
-				id = path // make sure we have an id to print in error message
-				return
-			}
+			goto notfound
 		} else {
 			noext = strings.TrimSuffix(bp.PkgObj, ".a")
 		}
@@ -117,21 +125,23 @@
 		}
 	}
 
-	if filename != "" {
-		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
-			return
-		}
-	}
 	// try extensions
 	for _, ext := range pkgExts {
 		filename = noext + ext
-		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
-			return
+		f, statErr := os.Stat(filename)
+		if statErr == nil && !f.IsDir() {
+			return filename, id, nil
+		}
+		if err == nil {
+			err = statErr
 		}
 	}
 
-	filename = "" // not found
-	return
+notfound:
+	if err == nil {
+		return "", path, fmt.Errorf("can't find import: %q", path)
+	}
+	return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
 }
 
 // Import imports a gc-generated package given its import path and srcDir, adds
@@ -159,12 +169,12 @@
 		rc = f
 	} else {
 		var filename string
-		filename, id = FindPkg(path, srcDir)
+		filename, id, err = FindPkg(path, srcDir)
 		if filename == "" {
 			if path == "unsafe" {
 				return types2.Unsafe, nil
 			}
-			return nil, fmt.Errorf("can't find import: %q", id)
+			return nil, err
 		}
 
 		// no need to re-import if the package was imported completely before
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
index 96c5f69..7fe4445 100644
--- a/src/cmd/compile/internal/importer/gcimporter_test.go
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -105,9 +105,9 @@
 
 		importMap := map[string]string{}
 		for _, pkg := range wantImports {
-			export, _ := FindPkg(pkg, "testdata")
+			export, _, err := FindPkg(pkg, "testdata")
 			if export == "" {
-				t.Fatalf("no export data found for %s", pkg)
+				t.Fatalf("no export data found for %s: %v", pkg, err)
 			}
 			importMap[pkg] = export
 		}
@@ -268,7 +268,7 @@
 	{"math.Pi", "const Pi untyped float"},
 	{"math.Sin", "func Sin(x float64) float64"},
 	{"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
-	{"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
+	{"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string, err error)"},
 
 	// interfaces
 	{"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
@@ -437,9 +437,9 @@
 		t.Fatal(err)
 	}
 
-	jsonExport, _ := FindPkg("encoding/json", "testdata")
+	jsonExport, _, err := FindPkg("encoding/json", "testdata")
 	if jsonExport == "" {
-		t.Fatalf("no export data found for encoding/json")
+		t.Fatalf("no export data found for encoding/json: %v", err)
 	}
 
 	compile(t, "testdata", "a.go", testoutdir, map[string]string{"encoding/json": jsonExport})
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index 24d3d4b..4981347 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -77,8 +77,6 @@
 	unionType
 )
 
-const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
-
 // ImportData imports a package from the serialized package data
 // and returns the number of bytes consumed and a reference to the package.
 // If the export data version is not recognized or the format is otherwise
@@ -108,10 +106,10 @@
 	sLen := int64(r.uint64())
 	dLen := int64(r.uint64())
 
-	whence, _ := r.Seek(0, io_SeekCurrent)
+	whence, _ := r.Seek(0, io.SeekCurrent)
 	stringData := data[whence : whence+sLen]
 	declData := data[whence+sLen : whence+sLen+dLen]
-	r.Seek(sLen+dLen, io_SeekCurrent)
+	r.Seek(sLen+dLen, io.SeekCurrent)
 
 	p := iimporter{
 		exportVersion: version,
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 4ae7fa9..b365008 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -29,10 +29,11 @@
 import (
 	"fmt"
 	"go/constant"
-	"sort"
+	"internal/buildcfg"
 	"strconv"
 
 	"cmd/compile/internal/base"
+	"cmd/compile/internal/inline/inlheur"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/pgo"
@@ -76,8 +77,8 @@
 	inlineHotMaxBudget int32 = 2000
 )
 
-// pgoInlinePrologue records the hot callsites from ir-graph.
-func pgoInlinePrologue(p *pgo.Profile, decls []ir.Node) {
+// PGOInlinePrologue records the hot callsites from ir-graph.
+func PGOInlinePrologue(p *pgo.Profile, funcs []*ir.Func) {
 	if base.Debug.PGOInlineCDFThreshold != "" {
 		if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 {
 			inlineCDFHotCallSiteThresholdPercent = s
@@ -85,7 +86,7 @@
 			base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100")
 		}
 	}
-	var hotCallsites []pgo.NodeMapKey
+	var hotCallsites []pgo.NamedCallEdge
 	inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p)
 	if base.Debug.PGODebug > 0 {
 		fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent)
@@ -119,101 +120,67 @@
 // (currently only used in debug prints) (in case of equal weights,
 // comparing with the threshold may not accurately reflect which nodes are
 // considiered hot).
-func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NodeMapKey) {
-	nodes := make([]pgo.NodeMapKey, len(p.NodeMap))
-	i := 0
-	for n := range p.NodeMap {
-		nodes[i] = n
-		i++
-	}
-	sort.Slice(nodes, func(i, j int) bool {
-		ni, nj := nodes[i], nodes[j]
-		if wi, wj := p.NodeMap[ni].EWeight, p.NodeMap[nj].EWeight; wi != wj {
-			return wi > wj // want larger weight first
-		}
-		// same weight, order by name/line number
-		if ni.CallerName != nj.CallerName {
-			return ni.CallerName < nj.CallerName
-		}
-		if ni.CalleeName != nj.CalleeName {
-			return ni.CalleeName < nj.CalleeName
-		}
-		return ni.CallSiteOffset < nj.CallSiteOffset
-	})
+func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) {
 	cum := int64(0)
-	for i, n := range nodes {
-		w := p.NodeMap[n].EWeight
+	for i, n := range p.NamedEdgeMap.ByWeight {
+		w := p.NamedEdgeMap.Weight[n]
 		cum += w
-		if pgo.WeightInPercentage(cum, p.TotalEdgeWeight) > inlineCDFHotCallSiteThresholdPercent {
+		if pgo.WeightInPercentage(cum, p.TotalWeight) > inlineCDFHotCallSiteThresholdPercent {
 			// nodes[:i+1] to include the very last node that makes it to go over the threshold.
 			// (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to
 			// include that node instead of excluding it.)
-			return pgo.WeightInPercentage(w, p.TotalEdgeWeight), nodes[:i+1]
+			return pgo.WeightInPercentage(w, p.TotalWeight), p.NamedEdgeMap.ByWeight[:i+1]
 		}
 	}
-	return 0, nodes
+	return 0, p.NamedEdgeMap.ByWeight
 }
 
-// InlinePackage finds functions that can be inlined and clones them before walk expands them.
-func InlinePackage(p *pgo.Profile) {
-	if base.Debug.PGOInline == 0 {
-		p = nil
+// CanInlineFuncs computes whether a batch of functions are inlinable.
+func CanInlineFuncs(funcs []*ir.Func, profile *pgo.Profile) {
+	if profile != nil {
+		PGOInlinePrologue(profile, funcs)
 	}
 
-	InlineDecls(p, typecheck.Target.Decls, true)
-
-	// Perform a garbage collection of hidden closures functions that
-	// are no longer reachable from top-level functions following
-	// inlining. See #59404 and #59638 for more context.
-	garbageCollectUnreferencedHiddenClosures()
+	ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) {
+		CanInlineSCC(list, recursive, profile)
+	})
 }
 
-// InlineDecls applies inlining to the given batch of declarations.
-func InlineDecls(p *pgo.Profile, decls []ir.Node, doInline bool) {
-	if p != nil {
-		pgoInlinePrologue(p, decls)
+// CanInlineSCC computes the inlinability of functions within an SCC
+// (strongly connected component).
+//
+// CanInlineSCC is designed to be used by ir.VisitFuncsBottomUp
+// callbacks.
+func CanInlineSCC(funcs []*ir.Func, recursive bool, profile *pgo.Profile) {
+	if base.Flag.LowerL == 0 {
+		return
 	}
 
-	doCanInline := func(n *ir.Func, recursive bool, numfns int) {
+	numfns := numNonClosures(funcs)
+
+	for _, fn := range funcs {
 		if !recursive || numfns > 1 {
 			// We allow inlining if there is no
 			// recursion, or the recursion cycle is
 			// across more than one function.
-			CanInline(n, p)
+			CanInline(fn, profile)
 		} else {
-			if base.Flag.LowerM > 1 && n.OClosure == nil {
-				fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
+			if base.Flag.LowerM > 1 && fn.OClosure == nil {
+				fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(fn), fn.Nname)
 			}
 		}
+		if inlheur.Enabled() {
+			analyzeFuncProps(fn, profile)
+		}
 	}
-
-	ir.VisitFuncsBottomUp(decls, func(list []*ir.Func, recursive bool) {
-		numfns := numNonClosures(list)
-		// We visit functions within an SCC in fairly arbitrary order,
-		// so by computing inlinability for all functions in the SCC
-		// before performing any inlining, the results are less
-		// sensitive to the order within the SCC (see #58905 for an
-		// example).
-
-		// First compute inlinability for all functions in the SCC ...
-		for _, n := range list {
-			doCanInline(n, recursive, numfns)
-		}
-		// ... then make a second pass to do inlining of calls.
-		if doInline {
-			for _, n := range list {
-				InlineCalls(n, p)
-			}
-		}
-	})
 }
 
-// garbageCollectUnreferencedHiddenClosures makes a pass over all the
+// GarbageCollectUnreferencedHiddenClosures makes a pass over all the
 // top-level (non-hidden-closure) functions looking for nested closure
 // functions that are reachable, then sweeps through the Target.Decls
 // list and marks any non-reachable hidden closure function as dead.
 // See issues #59404 and #59638 for more context.
-func garbageCollectUnreferencedHiddenClosures() {
+func GarbageCollectUnreferencedHiddenClosures() {
 
 	liveFuncs := make(map[*ir.Func]bool)
 
@@ -230,35 +197,59 @@
 		})
 	}
 
-	for i := 0; i < len(typecheck.Target.Decls); i++ {
-		if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
-			if fn.IsHiddenClosure() {
-				continue
-			}
-			markLiveFuncs(fn)
+	for i := 0; i < len(typecheck.Target.Funcs); i++ {
+		fn := typecheck.Target.Funcs[i]
+		if fn.IsHiddenClosure() {
+			continue
 		}
+		markLiveFuncs(fn)
 	}
 
-	for i := 0; i < len(typecheck.Target.Decls); i++ {
-		if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
-			if !fn.IsHiddenClosure() {
-				continue
-			}
-			if fn.IsDeadcodeClosure() {
-				continue
-			}
-			if liveFuncs[fn] {
-				continue
-			}
-			fn.SetIsDeadcodeClosure(true)
-			if base.Flag.LowerM > 2 {
-				fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
-			}
-			if fn.Inl != nil && fn.LSym == nil {
-				ir.InitLSym(fn, true)
+	for i := 0; i < len(typecheck.Target.Funcs); i++ {
+		fn := typecheck.Target.Funcs[i]
+		if !fn.IsHiddenClosure() {
+			continue
+		}
+		if fn.IsDeadcodeClosure() {
+			continue
+		}
+		if liveFuncs[fn] {
+			continue
+		}
+		fn.SetIsDeadcodeClosure(true)
+		if base.Flag.LowerM > 2 {
+			fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
+		}
+		if fn.Inl != nil && fn.LSym == nil {
+			ir.InitLSym(fn, true)
+		}
+	}
+}
+
+// inlineBudget determines the max budget for function 'fn' prior to
+// analyzing the hairyness of the body of 'fn'. We pass in the pgo
+// profile if available (which can change the budget), also a
+// 'relaxed' flag, which expands the budget slightly to allow for the
+// possibility that a call to the function might have its score
+// adjusted downwards. If 'verbose' is set, then print a remark where
+// we boost the budget due to PGO.
+func inlineBudget(fn *ir.Func, profile *pgo.Profile, relaxed bool, verbose bool) int32 {
+	// Update the budget for profile-guided inlining.
+	budget := int32(inlineMaxBudget)
+	if profile != nil {
+		if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
+			if _, ok := candHotCalleeMap[n]; ok {
+				budget = int32(inlineHotMaxBudget)
+				if verbose {
+					fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
+				}
 			}
 		}
 	}
+	if relaxed {
+		budget += inlheur.BudgetExpansion(inlineMaxBudget)
+	}
+	return budget
 }
 
 // CanInline determines whether fn is inlineable.
@@ -302,18 +293,11 @@
 		cc = 1 // this appears to yield better performance than 0.
 	}
 
-	// Update the budget for profile-guided inlining.
-	budget := int32(inlineMaxBudget)
-	if profile != nil {
-		if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
-			if _, ok := candHotCalleeMap[n]; ok {
-				budget = int32(inlineHotMaxBudget)
-				if base.Debug.PGODebug > 0 {
-					fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
-				}
-			}
-		}
-	}
+	// Used a "relaxed" inline budget if the new inliner is enabled.
+	relaxed := inlheur.Enabled()
+
+	// Compute the inline budget for this func.
+	budget := inlineBudget(fn, profile, relaxed, base.Debug.PGODebug > 0)
 
 	// At this point in the game the function we're looking at may
 	// have "stale" autos, vars that still appear in the Dcl list, but
@@ -322,10 +306,11 @@
 	// when creating the "Inline.Dcl" field below; to accomplish this,
 	// the hairyVisitor below builds up a map of used/referenced
 	// locals, and we use this map to produce a pruned Inline.Dcl
-	// list. See issue 25249 for more context.
+	// list. See issue 25459 for more context.
 
 	visitor := hairyVisitor{
 		curFunc:       fn,
+		isBigFunc:     IsBigFunc(fn),
 		budget:        budget,
 		maxBudget:     budget,
 		extraCallCost: cc,
@@ -337,20 +322,28 @@
 	}
 
 	n.Func.Inl = &ir.Inline{
-		Cost: budget - visitor.budget,
-		Dcl:  pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
-		Body: inlcopylist(fn.Body),
+		Cost:    budget - visitor.budget,
+		Dcl:     pruneUnusedAutos(n.Func.Dcl, &visitor),
+		HaveDcl: true,
 
 		CanDelayResults: canDelayResults(fn),
 	}
+	if base.Flag.LowerM != 0 || logopt.Enabled() {
+		noteInlinableFunc(n, fn, budget-visitor.budget)
+	}
+}
 
+// noteInlinableFunc issues a message to the user that the specified
+// function is inlinable.
+func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
 	if base.Flag.LowerM > 1 {
-		fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, budget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
+		fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
 	} else if base.Flag.LowerM != 0 {
 		fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
 	}
+	// JSON optimization log output.
 	if logopt.Enabled() {
-		logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", budget-visitor.budget))
+		logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", cost))
 	}
 }
 
@@ -421,13 +414,6 @@
 		return reason
 	}
 
-	// If fn is synthetic hash or eq function, cannot inline it.
-	// The function is not generated in Unified IR frontend at this moment.
-	if ir.IsEqOrHashFunc(fn) {
-		reason = "type eq/hash function"
-		return reason
-	}
-
 	return ""
 }
 
@@ -454,8 +440,8 @@
 	}
 
 	// temporaries for return values.
-	for _, param := range fn.Type().Results().FieldSlice() {
-		if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
+	for _, param := range fn.Type().Results() {
+		if sym := param.Sym; sym != nil && !sym.IsBlank() {
 			return false // found a named result parameter (case 3)
 		}
 	}
@@ -468,6 +454,7 @@
 type hairyVisitor struct {
 	// This is needed to access the current caller in the doNode function.
 	curFunc       *ir.Func
+	isBigFunc     bool
 	budget        int32
 	maxBudget     int32
 	reason        string
@@ -495,6 +482,7 @@
 	if n == nil {
 		return false
 	}
+opSwitch:
 	switch n.Op() {
 	// Call is okay if inlinable and we have the budget for the body.
 	case ir.OCALLFUNC:
@@ -504,24 +492,23 @@
 		//
 		// runtime.throw is a "cheap call" like panic in normal code.
 		var cheap bool
-		if n.X.Op() == ir.ONAME {
-			name := n.X.(*ir.Name)
-			if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
-				fn := name.Sym().Name
-				if fn == "getcallerpc" || fn == "getcallersp" {
+		if n.Fun.Op() == ir.ONAME {
+			name := n.Fun.(*ir.Name)
+			if name.Class == ir.PFUNC {
+				switch fn := types.RuntimeSymName(name.Sym()); fn {
+				case "getcallerpc", "getcallersp":
 					v.reason = "call to " + fn
 					return true
-				}
-				if fn == "throw" {
+				case "throw":
 					v.budget -= inlineExtraThrowCost
-					break
+					break opSwitch
+				case "panicrangeexit":
+					cheap = true
 				}
-			}
-			// Special case for reflect.noescpae. It does just type
-			// conversions to appease the escape analysis, and doesn't
-			// generate code.
-			if name.Class == ir.PFUNC && types.IsReflectPkg(name.Sym().Pkg) {
-				if name.Sym().Name == "noescape" {
+				// Special case for reflect.noescape. It does just type
+				// conversions to appease the escape analysis, and doesn't
+				// generate code.
+				if types.ReflectSymName(name.Sym()) == "noescape" {
 					cheap = true
 				}
 			}
@@ -539,11 +526,11 @@
 				return false
 			}
 		}
-		if n.X.Op() == ir.OMETHEXPR {
-			if meth := ir.MethodExprName(n.X); meth != nil {
+		if n.Fun.Op() == ir.OMETHEXPR {
+			if meth := ir.MethodExprName(n.Fun); meth != nil {
 				if fn := meth.Func; fn != nil {
 					s := fn.Sym()
-					if types.IsRuntimePkg(s.Pkg) && s.Name == "heapBits.nextArena" {
+					if types.RuntimeSymName(s) == "heapBits.nextArena" {
 						// Special case: explicitly allow mid-stack inlining of
 						// runtime.heapBits.next even though it calls slow-path
 						// runtime.heapBits.nextArena.
@@ -571,27 +558,30 @@
 			break // treat like any other node, that is, cost of 1
 		}
 
-		// Determine if the callee edge is for an inlinable hot callee or not.
-		if v.profile != nil && v.curFunc != nil {
-			if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
-				lineOffset := pgo.NodeLineOffset(n, fn)
-				csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: v.curFunc}
-				if _, o := candHotEdgeMap[csi]; o {
-					if base.Debug.PGODebug > 0 {
-						fmt.Printf("hot-callsite identified at line=%v for func=%v\n", ir.Line(n), ir.PkgFuncName(v.curFunc))
-					}
-				}
-			}
-		}
-
 		if ir.IsIntrinsicCall(n) {
 			// Treat like any other node.
 			break
 		}
 
-		if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
-			v.budget -= fn.Inl.Cost
-			break
+		if callee := inlCallee(v.curFunc, n.Fun, v.profile); callee != nil && typecheck.HaveInlineBody(callee) {
+			// Check whether we'd actually inline this call. Set
+			// log == false since we aren't actually doing inlining
+			// yet.
+			if ok, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok {
+				// mkinlcall would inline this call [1], so use
+				// the cost of the inline body as the cost of
+				// the call, as that is what will actually
+				// appear in the code.
+				//
+				// [1] This is almost a perfect match to the
+				// mkinlcall logic, except that
+				// canInlineCallExpr considers inlining cycles
+				// by looking at what has already been inlined.
+				// Since we haven't done any inlining yet we
+				// will miss those.
+				v.budget -= callee.Inl.Cost
+				break
+			}
 		}
 
 		// Call cost for non-leaf inlining.
@@ -616,6 +606,8 @@
 		v.budget -= inlineExtraPanicCost
 
 	case ir.ORECOVER:
+		base.FatalfAt(n.Pos(), "ORECOVER missed typecheck")
+	case ir.ORECOVERFP:
 		// recover matches the argument frame pointer to find
 		// the right panic value, so it needs an argument frame.
 		v.reason = "call to recover"
@@ -635,10 +627,7 @@
 		// should try to account for that if we're going to account for captures.
 		v.budget -= 15
 
-	case ir.OGO,
-		ir.ODEFER,
-		ir.ODCLTYPE, // can't print yet
-		ir.OTAILCALL:
+	case ir.OGO, ir.ODEFER, ir.OTAILCALL:
 		v.reason = "unhandled op " + n.Op().String()
 		return true
 
@@ -670,7 +659,7 @@
 		// This doesn't produce code, but the children might.
 		v.budget++ // undo default cost
 
-	case ir.ODCLCONST, ir.OFALL, ir.OTYPE:
+	case ir.OFALL, ir.OTYPE:
 		// These nodes don't produce code; omit from inlining budget.
 		return false
 
@@ -725,14 +714,16 @@
 		// particular, to avoid breaking the existing inlinability regress
 		// tests), we need to compensate for this here.
 		//
-		// See also identical logic in isBigFunc.
-		if init := n.Rhs[0].Init(); len(init) == 1 {
-			if _, ok := init[0].(*ir.AssignListStmt); ok {
-				// 4 for each value, because each temporary variable now
-				// appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
-				//
-				// 1 for the extra "tmp1, tmp2 = f()" assignment statement.
-				v.budget += 4*int32(len(n.Lhs)) + 1
+		// See also identical logic in IsBigFunc.
+		if len(n.Rhs) > 0 {
+			if init := n.Rhs[0].Init(); len(init) == 1 {
+				if _, ok := init[0].(*ir.AssignListStmt); ok {
+					// 4 for each value, because each temporary variable now
+					// appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
+					//
+					// 1 for the extra "tmp1, tmp2 = f()" assignment statement.
+					v.budget += 4*int32(len(n.Lhs)) + 1
+				}
 			}
 		}
 
@@ -764,12 +755,15 @@
 	return ir.DoChildren(n, v.do)
 }
 
-func isBigFunc(fn *ir.Func) bool {
+// IsBigFunc reports whether fn is a "big" function.
+//
+// Note: The criteria for "big" is heuristic and subject to change.
+func IsBigFunc(fn *ir.Func) bool {
 	budget := inlineBigFunctionNodes
 	return ir.Any(fn, func(n ir.Node) bool {
 		// See logic in hairyVisitor.doNode, explaining unified IR's
 		// handling of "a, b = f()" assignments.
-		if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 {
+		if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 {
 			if init := n.Rhs[0].Init(); len(init) == 1 {
 				if _, ok := init[0].(*ir.AssignListStmt); ok {
 					budget += 4*len(n.Lhs) + 1
@@ -782,165 +776,45 @@
 	})
 }
 
-// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
-// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
-// the body and dcls of an inlineable function.
-func inlcopylist(ll []ir.Node) []ir.Node {
-	s := make([]ir.Node, len(ll))
-	for i, n := range ll {
-		s[i] = inlcopy(n)
+// TryInlineCall returns an inlined call expression for call, or nil
+// if inlining is not possible.
+func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgo.Profile) *ir.InlinedCallExpr {
+	if base.Flag.LowerL == 0 {
+		return nil
 	}
-	return s
-}
-
-// inlcopy is like DeepCopy(), but does extra work to copy closures.
-func inlcopy(n ir.Node) ir.Node {
-	var edit func(ir.Node) ir.Node
-	edit = func(x ir.Node) ir.Node {
-		switch x.Op() {
-		case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
-			return x
-		}
-		m := ir.Copy(x)
-		ir.EditChildren(m, edit)
-		if x.Op() == ir.OCLOSURE {
-			x := x.(*ir.ClosureExpr)
-			// Need to save/duplicate x.Func.Nname,
-			// x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
-			// x.Func.Body for iexport and local inlining.
-			oldfn := x.Func
-			newfn := ir.NewFunc(oldfn.Pos())
-			m.(*ir.ClosureExpr).Func = newfn
-			newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
-			// XXX OK to share fn.Type() ??
-			newfn.Nname.SetType(oldfn.Nname.Type())
-			newfn.Body = inlcopylist(oldfn.Body)
-			// Make shallow copy of the Dcl and ClosureVar slices
-			newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
-			newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
-		}
-		return m
+	if call.Op() != ir.OCALLFUNC {
+		return nil
 	}
-	return edit(n)
-}
-
-// InlineCalls/inlnode walks fn's statements and expressions and substitutes any
-// calls made to inlineable functions. This is the external entry point.
-func InlineCalls(fn *ir.Func, profile *pgo.Profile) {
-	savefn := ir.CurFunc
-	ir.CurFunc = fn
-	bigCaller := isBigFunc(fn)
-	if bigCaller && base.Flag.LowerM > 1 {
-		fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
-	}
-	var inlCalls []*ir.InlinedCallExpr
-	var edit func(ir.Node) ir.Node
-	edit = func(n ir.Node) ir.Node {
-		return inlnode(n, bigCaller, &inlCalls, edit, profile)
-	}
-	ir.EditChildren(fn, edit)
-
-	// If we inlined any calls, we want to recursively visit their
-	// bodies for further inlining. However, we need to wait until
-	// *after* the original function body has been expanded, or else
-	// inlCallee can have false positives (e.g., #54632).
-	for len(inlCalls) > 0 {
-		call := inlCalls[0]
-		inlCalls = inlCalls[1:]
-		ir.EditChildren(call, edit)
+	if call.GoDefer || call.NoInline {
+		return nil
 	}
 
-	ir.CurFunc = savefn
-}
-
-// inlnode recurses over the tree to find inlineable calls, which will
-// be turned into OINLCALLs by mkinlcall. When the recursion comes
-// back up will examine left, right, list, rlist, ninit, ntest, nincr,
-// nbody and nelse and use one of the 4 inlconv/glue functions above
-// to turn the OINLCALL into an expression, a statement, or patch it
-// in to this nodes list or rlist as appropriate.
-// NOTE it makes no sense to pass the glue functions down the
-// recursion to the level where the OINLCALL gets created because they
-// have to edit /this/ n, so you'd have to push that one down as well,
-// but then you may as well do it here.  so this is cleaner and
-// shorter and less complicated.
-// The result of inlnode MUST be assigned back to n, e.g.
-//
-//	n.Left = inlnode(n.Left)
-func inlnode(n ir.Node, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr, edit func(ir.Node) ir.Node, profile *pgo.Profile) ir.Node {
-	if n == nil {
-		return n
-	}
-
-	switch n.Op() {
-	case ir.ODEFER, ir.OGO:
-		n := n.(*ir.GoDeferStmt)
-		switch call := n.Call; call.Op() {
-		case ir.OCALLMETH:
-			base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
-		case ir.OCALLFUNC:
-			call := call.(*ir.CallExpr)
-			call.NoInline = true
-		}
-	case ir.OTAILCALL:
-		n := n.(*ir.TailCallStmt)
-		n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)?
-
-	// TODO do them here (or earlier),
-	// so escape analysis can avoid more heapmoves.
-	case ir.OCLOSURE:
-		return n
-	case ir.OCALLMETH:
-		base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
-	case ir.OCALLFUNC:
-		n := n.(*ir.CallExpr)
-		if n.X.Op() == ir.OMETHEXPR {
-			// Prevent inlining some reflect.Value methods when using checkptr,
-			// even when package reflect was compiled without it (#35073).
-			if meth := ir.MethodExprName(n.X); meth != nil {
-				s := meth.Sym()
-				if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
-					return n
-				}
+	// Prevent inlining some reflect.Value methods when using checkptr,
+	// even when package reflect was compiled without it (#35073).
+	if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR {
+		if method := ir.MethodExprName(call.Fun); method != nil {
+			switch types.ReflectSymName(method.Sym()) {
+			case "Value.UnsafeAddr", "Value.Pointer":
+				return nil
 			}
 		}
 	}
 
-	lno := ir.SetPos(n)
-
-	ir.EditChildren(n, edit)
-
-	// with all the branches out of the way, it is now time to
-	// transmogrify this node itself unless inhibited by the
-	// switch at the top of this function.
-	switch n.Op() {
-	case ir.OCALLMETH:
-		base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
-
-	case ir.OCALLFUNC:
-		call := n.(*ir.CallExpr)
-		if call.NoInline {
-			break
-		}
-		if base.Flag.LowerM > 3 {
-			fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
-		}
-		if ir.IsIntrinsicCall(call) {
-			break
-		}
-		if fn := inlCallee(call.X, profile); fn != nil && typecheck.HaveInlineBody(fn) {
-			n = mkinlcall(call, fn, bigCaller, inlCalls)
-		}
+	if base.Flag.LowerM > 3 {
+		fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun)
 	}
-
-	base.Pos = lno
-
-	return n
+	if ir.IsIntrinsicCall(call) {
+		return nil
+	}
+	if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) {
+		return mkinlcall(callerfn, call, fn, bigCaller)
+	}
+	return nil
 }
 
 // inlCallee takes a function-typed expression and returns the underlying function ONAME
 // that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn ir.Node, profile *pgo.Profile) *ir.Func {
+func inlCallee(caller *ir.Func, fn ir.Node, profile *pgo.Profile) (res *ir.Func) {
 	fn = ir.StaticValue(fn)
 	switch fn.Op() {
 	case ir.OMETHEXPR:
@@ -961,6 +835,9 @@
 	case ir.OCLOSURE:
 		fn := fn.(*ir.ClosureExpr)
 		c := fn.Func
+		if len(c.ClosureVars) != 0 && c.ClosureVars[0].Outer.Curfn != caller {
+			return nil // inliner doesn't support inlining across closure frames
+		}
 		CanInline(c, profile)
 		return c
 	}
@@ -975,7 +852,7 @@
 
 // InlineCall allows the inliner implementation to be overridden.
 // If it returns nil, the function will not be inlined.
-var InlineCall = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
 	base.Fatalf("inline.InlineCall not overridden")
 	panic("unreachable")
 }
@@ -983,9 +860,10 @@
 // inlineCostOK returns true if call n from caller to callee is cheap enough to
 // inline. bigCaller indicates that caller is a big function.
 //
-// If inlineCostOK returns false, it also returns the max cost that the callee
-// exceeded.
-func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32) {
+// In addition to the "cost OK" boolean, it also returns the "max
+// cost" limit used to make the decision (which may differ depending
+// on func size), and the score assigned to this specific callsite.
+func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32) {
 	maxCost := int32(inlineMaxBudget)
 	if bigCaller {
 		// We use this to restrict inlining into very big functions.
@@ -993,9 +871,17 @@
 		maxCost = inlineBigFunctionMaxCost
 	}
 
-	if callee.Inl.Cost <= maxCost {
+	metric := callee.Inl.Cost
+	if inlheur.Enabled() {
+		score, ok := inlheur.GetCallSiteScore(caller, n)
+		if ok {
+			metric = int32(score)
+		}
+	}
+
+	if metric <= maxCost {
 		// Simple case. Function is already cheap enough.
-		return true, 0
+		return true, 0, metric
 	}
 
 	// We'll also allow inlining of hot functions below inlineHotMaxBudget,
@@ -1005,7 +891,7 @@
 	csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
 	if _, ok := candHotEdgeMap[csi]; !ok {
 		// Cold
-		return false, maxCost
+		return false, maxCost, metric
 	}
 
 	// Hot
@@ -1014,68 +900,80 @@
 		if base.Debug.PGODebug > 0 {
 			fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
 		}
-		return false, maxCost
+		return false, maxCost, metric
 	}
 
-	if callee.Inl.Cost > inlineHotMaxBudget {
-		return false, inlineHotMaxBudget
+	if metric > inlineHotMaxBudget {
+		return false, inlineHotMaxBudget, metric
+	}
+
+	if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) {
+		// De-selected by PGO Hash.
+		return false, maxCost, metric
 	}
 
 	if base.Debug.PGODebug > 0 {
 		fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
 	}
 
-	return true, 0
+	return true, 0, metric
 }
 
-// If n is a OCALLFUNC node, and fn is an ONAME node for a
-// function with an inlinable body, return an OINLCALL node that can replace n.
-// The returned node's Ninit has the parameter assignments, the Nbody is the
-// inlined function body, and (List, Rlist) contain the (input, output)
-// parameters.
-// The result of mkinlcall MUST be assigned back to n, e.g.
+// canInlineCallsite returns true if the call n from caller to callee
+// can be inlined, plus the score computed for the call expr in
+// question. bigCaller indicates that caller is a big function. log
+// indicates that the 'cannot inline' reason should be logged.
 //
-//	n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr) ir.Node {
-	if fn.Inl == nil {
-		if logopt.Enabled() {
-			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
-				fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
+// Preconditions: CanInline(callee) has already been called.
+func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32) {
+	if callee.Inl == nil {
+		// callee is never inlinable.
+		if log && logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+				fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee)))
 		}
-		return n
+		return false, 0
 	}
 
-	if ok, maxCost := inlineCostOK(n, ir.CurFunc, fn, bigCaller); !ok {
-		if logopt.Enabled() {
-			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
-				fmt.Sprintf("cost %d of %s exceeds max caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
+	ok, maxCost, callSiteScore := inlineCostOK(n, callerfn, callee, bigCaller)
+	if !ok {
+		// callee cost too high for this call site.
+		if log && logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+				fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost))
 		}
-		return n
+		return false, 0
 	}
 
-	if fn == ir.CurFunc {
+	if callee == callerfn {
 		// Can't recursively inline a function into itself.
-		if logopt.Enabled() {
-			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
+		if log && logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn)))
 		}
-		return n
+		return false, 0
 	}
 
-	if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(fn.Sym().Pkg) {
+	if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) {
 		// Runtime package must not be instrumented.
 		// Instrument skips runtime package. However, some runtime code can be
 		// inlined into other packages and instrumented there. To avoid this,
 		// we disable inlining of runtime functions when instrumenting.
 		// The example that we observed is inlining of LockOSThread,
 		// which lead to false race reports on m contents.
-		return n
-	}
-	if base.Flag.Race && types.IsNoRacePkg(fn.Sym().Pkg) {
-		return n
+		if log && logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+				fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee)))
+		}
+		return false, 0
 	}
 
-	parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
-	sym := fn.Linksym()
+	if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) {
+		if log && logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+				fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee)))
+		}
+		return false, 0
+	}
 
 	// Check if we've already inlined this function at this particular
 	// call site, in order to stop inlining when we reach the beginning
@@ -1084,17 +982,42 @@
 	// many functions. Most likely, the inlining will stop before we
 	// even hit the beginning of the cycle again, but this catches the
 	// unusual case.
+	parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+	sym := callee.Linksym()
 	for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) {
 		if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym {
-			if base.Flag.LowerM > 1 {
-				fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
+			if log {
+				if base.Flag.LowerM > 1 {
+					fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), callee, ir.FuncName(callerfn))
+				}
+				if logopt.Enabled() {
+					logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+						fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee)))
+				}
 			}
-			return n
+			return false, 0
 		}
 	}
 
+	return true, callSiteScore
+}
+
+// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or
+// nil if it cannot be inlined. callerfn is the function that contains
+// n, and fn is the function being called.
+//
+// The result of mkinlcall MUST be assigned back to n, e.g.
+//
+//	n.Left = mkinlcall(n.Left, fn, isddd)
+func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr {
+	ok, score := canInlineCallExpr(callerfn, n, fn, bigCaller, true)
+	if !ok {
+		return nil
+	}
 	typecheck.AssertFixedCall(n)
 
+	parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+	sym := fn.Linksym()
 	inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn))
 
 	closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) {
@@ -1123,12 +1046,12 @@
 			// Not a standard call.
 			return
 		}
-		if n.X.Op() != ir.OCLOSURE {
+		if n.Fun.Op() != ir.OCLOSURE {
 			// Not a direct closure call.
 			return
 		}
 
-		clo := n.X.(*ir.ClosureExpr)
+		clo := n.Fun.(*ir.ClosureExpr)
 		if ir.IsTrivialClosure(clo) {
 			// enqueueFunc will handle trivial closures anyways.
 			return
@@ -1147,13 +1070,18 @@
 	}
 
 	if base.Flag.LowerM != 0 {
-		fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+		if buildcfg.Experiment.NewInliner {
+			fmt.Printf("%v: inlining call to %v with score %d\n",
+				ir.Line(n), fn, score)
+		} else {
+			fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+		}
 	}
 	if base.Flag.LowerM > 2 {
 		fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
 	}
 
-	res := InlineCall(n, fn, inlIndex)
+	res := InlineCall(callerfn, n, fn, inlIndex)
 
 	if res == nil {
 		base.FatalfAt(n.Pos(), "inlining call to %v failed", fn)
@@ -1163,7 +1091,9 @@
 		fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
 	}
 
-	*inlCalls = append(*inlCalls, res)
+	if inlheur.Enabled() {
+		inlheur.UpdateCallsiteTable(callerfn, n, res)
+	}
 
 	return res
 }
@@ -1197,6 +1127,9 @@
 	for _, n := range ll {
 		if n.Class == ir.PAUTO {
 			if !vis.usedLocals.Has(n) {
+				// TODO(mdempsky): Simplify code after confident that this
+				// never happens anymore.
+				base.FatalfAt(n.Pos(), "unused auto: %v", n)
 				continue
 			}
 		}
@@ -1245,10 +1178,10 @@
 // determine whether it represents a call to sync/atomic.AddUint32 to
 // increment a coverage counter.
 func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
-	if cn.X.Op() != ir.ONAME {
+	if cn.Fun.Op() != ir.ONAME {
 		return false
 	}
-	name := cn.X.(*ir.Name)
+	name := cn.Fun.(*ir.Name)
 	if name.Class != ir.PFUNC {
 		return false
 	}
@@ -1264,3 +1197,21 @@
 	v := isIndexingCoverageCounter(adn.X)
 	return v
 }
+
+func PostProcessCallSites(profile *pgo.Profile) {
+	if base.Debug.DumpInlCallSiteScores != 0 {
+		budgetCallback := func(fn *ir.Func, prof *pgo.Profile) (int32, bool) {
+			v := inlineBudget(fn, prof, false, false)
+			return v, v == inlineHotMaxBudget
+		}
+		inlheur.DumpInlCallSiteScores(profile, budgetCallback)
+	}
+}
+
+func analyzeFuncProps(fn *ir.Func, p *pgo.Profile) {
+	canInline := func(fn *ir.Func) { CanInline(fn, p) }
+	budgetForFunc := func(fn *ir.Func) int32 {
+		return inlineBudget(fn, p, true, false)
+	}
+	inlheur.AnalyzeFunc(fn, canInline, budgetForFunc, inlineMaxBudget)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go
new file mode 100644
index 0000000..2faf76f
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go
@@ -0,0 +1,58 @@
+// Code generated by "stringer -bitset -type ActualExprPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[ActualExprConstant-1]
+	_ = x[ActualExprIsConcreteConvIface-2]
+	_ = x[ActualExprIsFunc-4]
+	_ = x[ActualExprIsInlinableFunc-8]
+}
+
+var _ActualExprPropBits_value = [...]uint64{
+	0x1, /* ActualExprConstant */
+	0x2, /* ActualExprIsConcreteConvIface */
+	0x4, /* ActualExprIsFunc */
+	0x8, /* ActualExprIsInlinableFunc */
+}
+
+const _ActualExprPropBits_name = "ActualExprConstantActualExprIsConcreteConvIfaceActualExprIsFuncActualExprIsInlinableFunc"
+
+var _ActualExprPropBits_index = [...]uint8{0, 18, 47, 63, 88}
+
+func (i ActualExprPropBits) String() string {
+	var b bytes.Buffer
+
+	remain := uint64(i)
+	seen := false
+
+	for k, v := range _ActualExprPropBits_value {
+		x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]]
+		if v == 0 {
+			if i == 0 {
+				b.WriteString(x)
+				return b.String()
+			}
+			continue
+		}
+		if (v & remain) == v {
+			remain &^= v
+			x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]]
+			if seen {
+				b.WriteString("|")
+			}
+			seen = true
+			b.WriteString(x)
+		}
+	}
+	if remain == 0 {
+		return b.String()
+	}
+	return "ActualExprPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze.go b/src/cmd/compile/internal/inline/inlheur/analyze.go
new file mode 100644
index 0000000..a1b6f35
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"encoding/json"
+	"fmt"
+	"internal/buildcfg"
+	"io"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+)
+
+const (
+	debugTraceFuncs = 1 << iota
+	debugTraceFuncFlags
+	debugTraceResults
+	debugTraceParams
+	debugTraceExprClassify
+	debugTraceCalls
+	debugTraceScoring
+)
+
+// propAnalyzer interface is used for defining one or more analyzer
+// helper objects, each tasked with computing some specific subset of
+// the properties we're interested in. The assumption is that
+// properties are independent, so each new analyzer that implements
+// this interface can operate entirely on its own. For a given analyzer
+// there will be a sequence of calls to nodeVisitPre and nodeVisitPost
+// as the nodes within a function are visited, then a followup call to
+// setResults so that the analyzer can transfer its results into the
+// final properties object.
+type propAnalyzer interface {
+	nodeVisitPre(n ir.Node)
+	nodeVisitPost(n ir.Node)
+	setResults(funcProps *FuncProps)
+}
+
+// fnInlHeur contains inline heuristics state information about a
+// specific Go function being analyzed/considered by the inliner. Note
+// that in addition to constructing a fnInlHeur object by analyzing a
+// specific *ir.Func, there is also code in the test harness
+// (funcprops_test.go) that builds up fnInlHeur's by reading in and
+// parsing a dump. This is the reason why we have file/fname/line
+// fields below instead of just an *ir.Func field.
+type fnInlHeur struct {
+	props *FuncProps
+	cstab CallSiteTab
+	fname string
+	file  string
+	line  uint
+}
+
+var fpmap = map[*ir.Func]fnInlHeur{}
+
+// AnalyzeFunc computes function properties for fn and its contained
+// closures, updating the global 'fpmap' table. It is assumed that
+// "CanInline" has been run on fn and on the closures that feed
+// directly into calls; other closures not directly called will also
+// be checked inlinability for inlinability here in case they are
+// returned as a result.
+func AnalyzeFunc(fn *ir.Func, canInline func(*ir.Func), budgetForFunc func(*ir.Func) int32, inlineMaxBudget int) {
+	if fpmap == nil {
+		// If fpmap is nil this indicates that the main inliner pass is
+		// complete and we're doing inlining of wrappers (no heuristics
+		// used here).
+		return
+	}
+	if fn.OClosure != nil {
+		// closures will be processed along with their outer enclosing func.
+		return
+	}
+	enableDebugTraceIfEnv()
+	if debugTrace&debugTraceFuncs != 0 {
+		fmt.Fprintf(os.Stderr, "=-= AnalyzeFunc(%v)\n", fn)
+	}
+	// Build up a list containing 'fn' and any closures it contains. Along
+	// the way, test to see whether each closure is inlinable in case
+	// we might be returning it.
+	funcs := []*ir.Func{fn}
+	ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+		if clo, ok := n.(*ir.ClosureExpr); ok {
+			funcs = append(funcs, clo.Func)
+		}
+	})
+
+	// Analyze the list of functions. We want to visit a given func
+	// only after the closures it contains have been processed, so
+	// iterate through the list in reverse order. Once a function has
+	// been analyzed, revisit the question of whether it should be
+	// inlinable; if it is over the default hairyness limit and it
+	// doesn't have any interesting properties, then we don't want
+	// the overhead of writing out its inline body.
+	nameFinder := newNameFinder(fn)
+	for i := len(funcs) - 1; i >= 0; i-- {
+		f := funcs[i]
+		if f.OClosure != nil && !f.InlinabilityChecked() {
+			canInline(f)
+		}
+		funcProps := analyzeFunc(f, inlineMaxBudget, nameFinder)
+		revisitInlinability(f, funcProps, budgetForFunc)
+		if f.Inl != nil {
+			f.Inl.Properties = funcProps.SerializeToString()
+		}
+	}
+	disableDebugTrace()
+}
+
+// TearDown is invoked at the end of the main inlining pass; doing
+// function analysis and call site scoring is unlikely to help a lot
+// after this point, so nil out fpmap and other globals to reclaim
+// storage.
+func TearDown() {
+	fpmap = nil
+	scoreCallsCache.tab = nil
+	scoreCallsCache.csl = nil
+}
+
+func analyzeFunc(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) *FuncProps {
+	if funcInlHeur, ok := fpmap[fn]; ok {
+		return funcInlHeur.props
+	}
+	funcProps, fcstab := computeFuncProps(fn, inlineMaxBudget, nf)
+	file, line := fnFileLine(fn)
+	entry := fnInlHeur{
+		fname: fn.Sym().Name,
+		file:  file,
+		line:  line,
+		props: funcProps,
+		cstab: fcstab,
+	}
+	fn.SetNeverReturns(entry.props.Flags&FuncPropNeverReturns != 0)
+	fpmap[fn] = entry
+	if fn.Inl != nil && fn.Inl.Properties == "" {
+		fn.Inl.Properties = entry.props.SerializeToString()
+	}
+	return funcProps
+}
+
+// revisitInlinability revisits the question of whether to continue to
+// treat function 'fn' as an inline candidate based on the set of
+// properties we've computed for it. If (for example) it has an
+// initial size score of 150 and no interesting properties to speak
+// of, then there isn't really any point to moving ahead with it as an
+// inline candidate.
+func revisitInlinability(fn *ir.Func, funcProps *FuncProps, budgetForFunc func(*ir.Func) int32) {
+	if fn.Inl == nil {
+		return
+	}
+	maxAdj := int32(LargestNegativeScoreAdjustment(fn, funcProps))
+	budget := budgetForFunc(fn)
+	if fn.Inl.Cost+maxAdj > budget {
+		fn.Inl = nil
+	}
+}
+
+// computeFuncProps examines the Go function 'fn' and computes for it
+// a function "properties" object, to be used to drive inlining
+// heuristics. See comments on the FuncProps type for more info.
+func computeFuncProps(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*FuncProps, CallSiteTab) {
+	if debugTrace&debugTraceFuncs != 0 {
+		fmt.Fprintf(os.Stderr, "=-= starting analysis of func %v:\n%+v\n",
+			fn, fn)
+	}
+	funcProps := new(FuncProps)
+	ffa := makeFuncFlagsAnalyzer(fn)
+	analyzers := []propAnalyzer{ffa}
+	analyzers = addResultsAnalyzer(fn, analyzers, funcProps, inlineMaxBudget, nf)
+	analyzers = addParamsAnalyzer(fn, analyzers, funcProps, nf)
+	runAnalyzersOnFunction(fn, analyzers)
+	for _, a := range analyzers {
+		a.setResults(funcProps)
+	}
+	cstab := computeCallSiteTable(fn, fn.Body, nil, ffa.panicPathTable(), 0, nf)
+	return funcProps, cstab
+}
+
+func runAnalyzersOnFunction(fn *ir.Func, analyzers []propAnalyzer) {
+	var doNode func(ir.Node) bool
+	doNode = func(n ir.Node) bool {
+		for _, a := range analyzers {
+			a.nodeVisitPre(n)
+		}
+		ir.DoChildren(n, doNode)
+		for _, a := range analyzers {
+			a.nodeVisitPost(n)
+		}
+		return false
+	}
+	doNode(fn)
+}
+
+func propsForFunc(fn *ir.Func) *FuncProps {
+	if funcInlHeur, ok := fpmap[fn]; ok {
+		return funcInlHeur.props
+	} else if fn.Inl != nil && fn.Inl.Properties != "" {
+		// FIXME: considering adding some sort of cache or table
+		// for deserialized properties of imported functions.
+		return DeserializeFromString(fn.Inl.Properties)
+	}
+	return nil
+}
+
+func fnFileLine(fn *ir.Func) (string, uint) {
+	p := base.Ctxt.InnermostPos(fn.Pos())
+	return filepath.Base(p.Filename()), p.Line()
+}
+
+func Enabled() bool {
+	return buildcfg.Experiment.NewInliner || UnitTesting()
+}
+
+func UnitTesting() bool {
+	return base.Debug.DumpInlFuncProps != "" ||
+		base.Debug.DumpInlCallSiteScores != 0
+}
+
+// DumpFuncProps computes and caches function properties for the func
+// 'fn', writing out a description of the previously computed set of
+// properties to the file given in 'dumpfile'. Used for the
+// "-d=dumpinlfuncprops=..." command line flag, intended for use
+// primarily in unit testing.
+func DumpFuncProps(fn *ir.Func, dumpfile string) {
+	if fn != nil {
+		if fn.OClosure != nil {
+			// closures will be processed along with their outer enclosing func.
+			return
+		}
+		captureFuncDumpEntry(fn)
+		ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+			if clo, ok := n.(*ir.ClosureExpr); ok {
+				captureFuncDumpEntry(clo.Func)
+			}
+		})
+	} else {
+		emitDumpToFile(dumpfile)
+	}
+}
+
+// emitDumpToFile writes out the buffer function property dump entries
+// to a file, for unit testing. Dump entries need to be sorted by
+// definition line, and due to generics we need to account for the
+// possibility that several ir.Func's will have the same def line.
+func emitDumpToFile(dumpfile string) {
+	mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+	if dumpfile[0] == '+' {
+		dumpfile = dumpfile[1:]
+		mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE
+	}
+	if dumpfile[0] == '%' {
+		dumpfile = dumpfile[1:]
+		d, b := filepath.Dir(dumpfile), filepath.Base(dumpfile)
+		ptag := strings.ReplaceAll(types.LocalPkg.Path, "/", ":")
+		dumpfile = d + "/" + ptag + "." + b
+	}
+	outf, err := os.OpenFile(dumpfile, mode, 0644)
+	if err != nil {
+		base.Fatalf("opening function props dump file %q: %v\n", dumpfile, err)
+	}
+	defer outf.Close()
+	dumpFilePreamble(outf)
+
+	atline := map[uint]uint{}
+	sl := make([]fnInlHeur, 0, len(dumpBuffer))
+	for _, e := range dumpBuffer {
+		sl = append(sl, e)
+		atline[e.line] = atline[e.line] + 1
+	}
+	sl = sortFnInlHeurSlice(sl)
+
+	prevline := uint(0)
+	for _, entry := range sl {
+		idx := uint(0)
+		if prevline == entry.line {
+			idx++
+		}
+		prevline = entry.line
+		atl := atline[entry.line]
+		if err := dumpFnPreamble(outf, &entry, nil, idx, atl); err != nil {
+			base.Fatalf("function props dump: %v\n", err)
+		}
+	}
+	dumpBuffer = nil
+}
+
+// captureFuncDumpEntry grabs the function properties object for 'fn'
+// and enqueues it for later dumping. Used for the
+// "-d=dumpinlfuncprops=..." command line flag, intended for use
+// primarily in unit testing.
+func captureFuncDumpEntry(fn *ir.Func) {
+	// avoid capturing compiler-generated equality funcs.
+	if strings.HasPrefix(fn.Sym().Name, ".eq.") {
+		return
+	}
+	funcInlHeur, ok := fpmap[fn]
+	if !ok {
+		// Missing entry is expected for functions that are too large
+		// to inline. We still want to write out call site scores in
+		// this case however.
+		funcInlHeur = fnInlHeur{cstab: callSiteTab}
+	}
+	if dumpBuffer == nil {
+		dumpBuffer = make(map[*ir.Func]fnInlHeur)
+	}
+	if _, ok := dumpBuffer[fn]; ok {
+		return
+	}
+	if debugTrace&debugTraceFuncs != 0 {
+		fmt.Fprintf(os.Stderr, "=-= capturing dump for %v:\n", fn)
+	}
+	dumpBuffer[fn] = funcInlHeur
+}
+
+// dumpFilePreamble writes out a file-level preamble for a given
+// Go function as part of a function properties dump.
+func dumpFilePreamble(w io.Writer) {
+	fmt.Fprintf(w, "// DO NOT EDIT (use 'go test -v -update-expected' instead.)\n")
+	fmt.Fprintf(w, "// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt\n")
+	fmt.Fprintf(w, "// for more information on the format of this file.\n")
+	fmt.Fprintf(w, "// %s\n", preambleDelimiter)
+}
+
+// dumpFnPreamble writes out a function-level preamble for a given
+// Go function as part of a function properties dump. See the
+// README.txt file in testdata/props for more on the format of
+// this preamble.
+func dumpFnPreamble(w io.Writer, funcInlHeur *fnInlHeur, ecst encodedCallSiteTab, idx, atl uint) error {
+	fmt.Fprintf(w, "// %s %s %d %d %d\n",
+		funcInlHeur.file, funcInlHeur.fname, funcInlHeur.line, idx, atl)
+	// emit props as comments, followed by delimiter
+	fmt.Fprintf(w, "%s// %s\n", funcInlHeur.props.ToString("// "), comDelimiter)
+	data, err := json.Marshal(funcInlHeur.props)
+	if err != nil {
+		return fmt.Errorf("marshall error %v\n", err)
+	}
+	fmt.Fprintf(w, "// %s\n", string(data))
+	dumpCallSiteComments(w, funcInlHeur.cstab, ecst)
+	fmt.Fprintf(w, "// %s\n", fnDelimiter)
+	return nil
+}
+
+// sortFnInlHeurSlice sorts a slice of fnInlHeur based on
+// the starting line of the function definition, then by name.
+func sortFnInlHeurSlice(sl []fnInlHeur) []fnInlHeur {
+	sort.SliceStable(sl, func(i, j int) bool {
+		if sl[i].line != sl[j].line {
+			return sl[i].line < sl[j].line
+		}
+		return sl[i].fname < sl[j].fname
+	})
+	return sl
+}
+
+// delimiters written to various preambles to make parsing of
+// dumps easier.
+const preambleDelimiter = "<endfilepreamble>"
+const fnDelimiter = "<endfuncpreamble>"
+const comDelimiter = "<endpropsdump>"
+const csDelimiter = "<endcallsites>"
+
+// dumpBuffer stores up function properties dumps when
+// "-d=dumpinlfuncprops=..." is in effect.
+var dumpBuffer map[*ir.Func]fnInlHeur
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go
new file mode 100644
index 0000000..36ebe18
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/pgo"
+	"cmd/compile/internal/typecheck"
+	"fmt"
+	"os"
+	"strings"
+)
+
+type callSiteAnalyzer struct {
+	fn *ir.Func
+	*nameFinder
+}
+
+type callSiteTableBuilder struct {
+	fn *ir.Func
+	*nameFinder
+	cstab    CallSiteTab
+	ptab     map[ir.Node]pstate
+	nstack   []ir.Node
+	loopNest int
+	isInit   bool
+}
+
+func makeCallSiteAnalyzer(fn *ir.Func) *callSiteAnalyzer {
+	return &callSiteAnalyzer{
+		fn:         fn,
+		nameFinder: newNameFinder(fn),
+	}
+}
+
+func makeCallSiteTableBuilder(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) *callSiteTableBuilder {
+	isInit := fn.IsPackageInit() || strings.HasPrefix(fn.Sym().Name, "init.")
+	return &callSiteTableBuilder{
+		fn:         fn,
+		cstab:      cstab,
+		ptab:       ptab,
+		isInit:     isInit,
+		loopNest:   loopNestingLevel,
+		nstack:     []ir.Node{fn},
+		nameFinder: nf,
+	}
+}
+
+// computeCallSiteTable builds and returns a table of call sites for
+// the specified region in function fn. A region here corresponds to a
+// specific subtree within the AST for a function. The main intended
+// use cases are for 'region' to be either A) an entire function body,
+// or B) an inlined call expression.
+func computeCallSiteTable(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) CallSiteTab {
+	cstb := makeCallSiteTableBuilder(fn, cstab, ptab, loopNestingLevel, nf)
+	var doNode func(ir.Node) bool
+	doNode = func(n ir.Node) bool {
+		cstb.nodeVisitPre(n)
+		ir.DoChildren(n, doNode)
+		cstb.nodeVisitPost(n)
+		return false
+	}
+	for _, n := range region {
+		doNode(n)
+	}
+	return cstb.cstab
+}
+
+func (cstb *callSiteTableBuilder) flagsForNode(call *ir.CallExpr) CSPropBits {
+	var r CSPropBits
+
+	if debugTrace&debugTraceCalls != 0 {
+		fmt.Fprintf(os.Stderr, "=-= analyzing call at %s\n",
+			fmtFullPos(call.Pos()))
+	}
+
+	// Set a bit if this call is within a loop.
+	if cstb.loopNest > 0 {
+		r |= CallSiteInLoop
+	}
+
+	// Set a bit if the call is within an init function (either
+	// compiler-generated or user-written).
+	if cstb.isInit {
+		r |= CallSiteInInitFunc
+	}
+
+	// Decide whether to apply the panic path heuristic. Hack: don't
+	// apply this heuristic in the function "main.main" (mostly just
+	// to avoid annoying users).
+	if !isMainMain(cstb.fn) {
+		r = cstb.determinePanicPathBits(call, r)
+	}
+
+	return r
+}
+
+// determinePanicPathBits updates the CallSiteOnPanicPath bit within
+// "r" if we think this call is on an unconditional path to
+// panic/exit. Do this by walking back up the node stack to see if we
+// can find either A) an enclosing panic, or B) a statement node that
+// we've determined leads to a panic/exit.
+func (cstb *callSiteTableBuilder) determinePanicPathBits(call ir.Node, r CSPropBits) CSPropBits {
+	cstb.nstack = append(cstb.nstack, call)
+	defer func() {
+		cstb.nstack = cstb.nstack[:len(cstb.nstack)-1]
+	}()
+
+	for ri := range cstb.nstack[:len(cstb.nstack)-1] {
+		i := len(cstb.nstack) - ri - 1
+		n := cstb.nstack[i]
+		_, isCallExpr := n.(*ir.CallExpr)
+		_, isStmt := n.(ir.Stmt)
+		if isCallExpr {
+			isStmt = false
+		}
+
+		if debugTrace&debugTraceCalls != 0 {
+			ps, inps := cstb.ptab[n]
+			fmt.Fprintf(os.Stderr, "=-= callpar %d op=%s ps=%s inptab=%v stmt=%v\n", i, n.Op().String(), ps.String(), inps, isStmt)
+		}
+
+		if n.Op() == ir.OPANIC {
+			r |= CallSiteOnPanicPath
+			break
+		}
+		if v, ok := cstb.ptab[n]; ok {
+			if v == psCallsPanic {
+				r |= CallSiteOnPanicPath
+				break
+			}
+			if isStmt {
+				break
+			}
+		}
+	}
+	return r
+}
+
+// propsForArg returns property bits for a given call argument expression arg.
+func (cstb *callSiteTableBuilder) propsForArg(arg ir.Node) ActualExprPropBits {
+	if cval := cstb.constValue(arg); cval != nil {
+		return ActualExprConstant
+	}
+	if cstb.isConcreteConvIface(arg) {
+		return ActualExprIsConcreteConvIface
+	}
+	fname := cstb.funcName(arg)
+	if fname != nil {
+		if fn := fname.Func; fn != nil && typecheck.HaveInlineBody(fn) {
+			return ActualExprIsInlinableFunc
+		}
+		return ActualExprIsFunc
+	}
+	return 0
+}
+
+// argPropsForCall returns a slice of argument properties for the
+// expressions being passed to the callee in the specific call
+// expression; these will be stored in the CallSite object for a given
+// call and then consulted when scoring. If no arg has any interesting
+// properties we try to save some space and return a nil slice.
+func (cstb *callSiteTableBuilder) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBits {
+	rv := make([]ActualExprPropBits, len(ce.Args))
+	somethingInteresting := false
+	for idx := range ce.Args {
+		argProp := cstb.propsForArg(ce.Args[idx])
+		somethingInteresting = somethingInteresting || (argProp != 0)
+		rv[idx] = argProp
+	}
+	if !somethingInteresting {
+		return nil
+	}
+	return rv
+}
+
+func (cstb *callSiteTableBuilder) addCallSite(callee *ir.Func, call *ir.CallExpr) {
+	flags := cstb.flagsForNode(call)
+	argProps := cstb.argPropsForCall(call)
+	if debugTrace&debugTraceCalls != 0 {
+		fmt.Fprintf(os.Stderr, "=-= props %+v for call %v\n", argProps, call)
+	}
+	// FIXME: maybe bulk-allocate these?
+	cs := &CallSite{
+		Call:     call,
+		Callee:   callee,
+		Assign:   cstb.containingAssignment(call),
+		ArgProps: argProps,
+		Flags:    flags,
+		ID:       uint(len(cstb.cstab)),
+	}
+	if _, ok := cstb.cstab[call]; ok {
+		fmt.Fprintf(os.Stderr, "*** cstab duplicate entry at: %s\n",
+			fmtFullPos(call.Pos()))
+		fmt.Fprintf(os.Stderr, "*** call: %+v\n", call)
+		panic("bad")
+	}
+	// Set initial score for callsite to the cost computed
+	// by CanInline; this score will be refined later based
+	// on heuristics.
+	cs.Score = int(callee.Inl.Cost)
+
+	if cstb.cstab == nil {
+		cstb.cstab = make(CallSiteTab)
+	}
+	cstb.cstab[call] = cs
+	if debugTrace&debugTraceCalls != 0 {
+		fmt.Fprintf(os.Stderr, "=-= added callsite: caller=%v callee=%v n=%s\n",
+			cstb.fn, callee, fmtFullPos(call.Pos()))
+	}
+}
+
+func (cstb *callSiteTableBuilder) nodeVisitPre(n ir.Node) {
+	switch n.Op() {
+	case ir.ORANGE, ir.OFOR:
+		if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) {
+			cstb.loopNest++
+		}
+	case ir.OCALLFUNC:
+		ce := n.(*ir.CallExpr)
+		callee := pgo.DirectCallee(ce.Fun)
+		if callee != nil && callee.Inl != nil {
+			cstb.addCallSite(callee, ce)
+		}
+	}
+	cstb.nstack = append(cstb.nstack, n)
+}
+
+func (cstb *callSiteTableBuilder) nodeVisitPost(n ir.Node) {
+	cstb.nstack = cstb.nstack[:len(cstb.nstack)-1]
+	switch n.Op() {
+	case ir.ORANGE, ir.OFOR:
+		if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) {
+			cstb.loopNest--
+		}
+	}
+}
+
+func loopBody(n ir.Node) ir.Nodes {
+	if forst, ok := n.(*ir.ForStmt); ok {
+		return forst.Body
+	}
+	if rst, ok := n.(*ir.RangeStmt); ok {
+		return rst.Body
+	}
+	return nil
+}
+
+// hasTopLevelLoopBodyReturnOrBreak examines the body of a "for" or
+// "range" loop to try to verify that it is a real loop, as opposed to
+// a construct that is syntactically loopy but doesn't actually iterate
+// multiple times, like:
+//
+//	for {
+//	  blah()
+//	  return 1
+//	}
+//
+// [Remark: the pattern above crops up quite a bit in the source code
+// for the compiler itself, e.g. the auto-generated rewrite code]
+//
+// Note that we don't look for GOTO statements here, so it's possible
+// we'll get the wrong result for a loop with complicated control
+// jumps via gotos.
+func hasTopLevelLoopBodyReturnOrBreak(loopBody ir.Nodes) bool {
+	for _, n := range loopBody {
+		if n.Op() == ir.ORETURN || n.Op() == ir.OBREAK {
+			return true
+		}
+	}
+	return false
+}
+
+// containingAssignment returns the top-level assignment statement
+// for a statement level function call "n". Examples:
+//
+//	x := foo()
+//	x, y := bar(z, baz())
+//	if blah() { ...
+//
+// Here the top-level assignment statement for the foo() call is the
+// statement assigning to "x"; the top-level assignment for "bar()"
+// call is the assignment to x,y. For the baz() and blah() calls,
+// there is no top level assignment statement.
+//
+// The unstated goal here is that we want to use the containing
+// assignment to establish a connection between a given call and the
+// variables to which its results/returns are being assigned.
+//
+// Note that for the "bar" command above, the front end sometimes
+// decomposes this into two assignments, the first one assigning the
+// call to a pair of auto-temps, then the second one assigning the
+// auto-temps to the user-visible vars. This helper will return the
+// second (outer) of these two.
+func (cstb *callSiteTableBuilder) containingAssignment(n ir.Node) ir.Node {
+	parent := cstb.nstack[len(cstb.nstack)-1]
+
+	// assignsOnlyAutoTemps returns TRUE of the specified OAS2FUNC
+	// node assigns only auto-temps.
+	assignsOnlyAutoTemps := func(x ir.Node) bool {
+		alst := x.(*ir.AssignListStmt)
+		oa2init := alst.Init()
+		if len(oa2init) == 0 {
+			return false
+		}
+		for _, v := range oa2init {
+			d := v.(*ir.Decl)
+			if !ir.IsAutoTmp(d.X) {
+				return false
+			}
+		}
+		return true
+	}
+
+	// Simple case: x := foo()
+	if parent.Op() == ir.OAS {
+		return parent
+	}
+
+	// Multi-return case: x, y := bar()
+	if parent.Op() == ir.OAS2FUNC {
+		// Hack city: if the result vars are auto-temps, try looking
+		// for an outer assignment in the tree. The code shape we're
+		// looking for here is:
+		//
+		// OAS1({x,y},OCONVNOP(OAS2FUNC({auto1,auto2},OCALLFUNC(bar))))
+		//
+		if assignsOnlyAutoTemps(parent) {
+			par2 := cstb.nstack[len(cstb.nstack)-2]
+			if par2.Op() == ir.OAS2 {
+				return par2
+			}
+			if par2.Op() == ir.OCONVNOP {
+				par3 := cstb.nstack[len(cstb.nstack)-3]
+				if par3.Op() == ir.OAS2 {
+					return par3
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// UpdateCallsiteTable handles updating of callerfn's call site table
+// after an inlined has been carried out, e.g. the call at 'n' as been
+// turned into the inlined call expression 'ic' within function
+// callerfn. The chief thing of interest here is to make sure that any
+// call nodes within 'ic' are added to the call site table for
+// 'callerfn' and scored appropriately.
+func UpdateCallsiteTable(callerfn *ir.Func, n *ir.CallExpr, ic *ir.InlinedCallExpr) {
+	enableDebugTraceIfEnv()
+	defer disableDebugTrace()
+
+	funcInlHeur, ok := fpmap[callerfn]
+	if !ok {
+		// This can happen for compiler-generated wrappers.
+		if debugTrace&debugTraceCalls != 0 {
+			fmt.Fprintf(os.Stderr, "=-= early exit, no entry for caller fn %v\n", callerfn)
+		}
+		return
+	}
+
+	if debugTrace&debugTraceCalls != 0 {
+		fmt.Fprintf(os.Stderr, "=-= UpdateCallsiteTable(caller=%v, cs=%s)\n",
+			callerfn, fmtFullPos(n.Pos()))
+	}
+
+	// Mark the call in question as inlined.
+	oldcs, ok := funcInlHeur.cstab[n]
+	if !ok {
+		// This can happen for compiler-generated wrappers.
+		return
+	}
+	oldcs.aux |= csAuxInlined
+
+	if debugTrace&debugTraceCalls != 0 {
+		fmt.Fprintf(os.Stderr, "=-= marked as inlined: callee=%v %s\n",
+			oldcs.Callee, EncodeCallSiteKey(oldcs))
+	}
+
+	// Walk the inlined call region to collect new callsites.
+	var icp pstate
+	if oldcs.Flags&CallSiteOnPanicPath != 0 {
+		icp = psCallsPanic
+	}
+	var loopNestLevel int
+	if oldcs.Flags&CallSiteInLoop != 0 {
+		loopNestLevel = 1
+	}
+	ptab := map[ir.Node]pstate{ic: icp}
+	nf := newNameFinder(nil)
+	icstab := computeCallSiteTable(callerfn, ic.Body, nil, ptab, loopNestLevel, nf)
+
+	// Record parent callsite. This is primarily for debug output.
+	for _, cs := range icstab {
+		cs.parent = oldcs
+	}
+
+	// Score the calls in the inlined body. Note the setting of
+	// "doCallResults" to false here: at the moment there isn't any
+	// easy way to localize or region-ize the work done by
+	// "rescoreBasedOnCallResultUses", which currently does a walk
+	// over the entire function to look for uses of a given set of
+	// results. Similarly we're passing nil to makeCallSiteAnalyzer,
+	// so as to run name finding without the use of static value &
+	// friends.
+	csa := makeCallSiteAnalyzer(nil)
+	const doCallResults = false
+	csa.scoreCallsRegion(callerfn, ic.Body, icstab, doCallResults, ic)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
new file mode 100644
index 0000000..b7403a4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
@@ -0,0 +1,356 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"fmt"
+	"os"
+)
+
+// funcFlagsAnalyzer computes the "Flags" value for the FuncProps
+// object we're computing. The main item of interest here is "nstate",
+// which stores the disposition of a given ir Node with respect to the
+// flags/properties we're trying to compute.
+type funcFlagsAnalyzer struct {
+	fn     *ir.Func
+	nstate map[ir.Node]pstate
+	noInfo bool // set if we see something inscrutable/un-analyzable
+}
+
+// pstate keeps track of the disposition of a given node and its
+// children with respect to panic/exit calls.
+type pstate int
+
+const (
+	psNoInfo     pstate = iota // nothing interesting about this node
+	psCallsPanic               // node causes call to panic or os.Exit
+	psMayReturn                // executing node may trigger a "return" stmt
+	psTop                      // dataflow lattice "top" element
+)
+
+func makeFuncFlagsAnalyzer(fn *ir.Func) *funcFlagsAnalyzer {
+	return &funcFlagsAnalyzer{
+		fn:     fn,
+		nstate: make(map[ir.Node]pstate),
+	}
+}
+
+// setResults transfers func flag results to 'funcProps'.
+func (ffa *funcFlagsAnalyzer) setResults(funcProps *FuncProps) {
+	var rv FuncPropBits
+	if !ffa.noInfo && ffa.stateForList(ffa.fn.Body) == psCallsPanic {
+		rv = FuncPropNeverReturns
+	}
+	// This is slightly hacky and not at all required, but include a
+	// special case for main.main, which often ends in a call to
+	// os.Exit. People who write code like this (very common I
+	// imagine)
+	//
+	//   func main() {
+	//     rc = perform()
+	//     ...
+	//     foo()
+	//     os.Exit(rc)
+	//   }
+	//
+	// will be constantly surprised when foo() is inlined in many
+	// other spots in the program but not in main().
+	if isMainMain(ffa.fn) {
+		rv &^= FuncPropNeverReturns
+	}
+	funcProps.Flags = rv
+}
+
+func (ffa *funcFlagsAnalyzer) getState(n ir.Node) pstate {
+	return ffa.nstate[n]
+}
+
+func (ffa *funcFlagsAnalyzer) setState(n ir.Node, st pstate) {
+	if st != psNoInfo {
+		ffa.nstate[n] = st
+	}
+}
+
+func (ffa *funcFlagsAnalyzer) updateState(n ir.Node, st pstate) {
+	if st == psNoInfo {
+		delete(ffa.nstate, n)
+	} else {
+		ffa.nstate[n] = st
+	}
+}
+
+func (ffa *funcFlagsAnalyzer) panicPathTable() map[ir.Node]pstate {
+	return ffa.nstate
+}
+
+// blockCombine merges together states as part of a linear sequence of
+// statements, where 'pred' and 'succ' are analysis results for a pair
+// of consecutive statements. Examples:
+//
+//	case 1:             case 2:
+//	    panic("foo")      if q { return x }        <-pred
+//	    return x          panic("boo")             <-succ
+//
+// In case 1, since the pred state is "always panic" it doesn't matter
+// what the succ state is, hence the state for the combination of the
+// two blocks is "always panics". In case 2, because there is a path
+// to return that avoids the panic in succ, the state for the
+// combination of the two statements is "may return".
+func blockCombine(pred, succ pstate) pstate {
+	switch succ {
+	case psTop:
+		return pred
+	case psMayReturn:
+		if pred == psCallsPanic {
+			return psCallsPanic
+		}
+		return psMayReturn
+	case psNoInfo:
+		return pred
+	case psCallsPanic:
+		if pred == psMayReturn {
+			return psMayReturn
+		}
+		return psCallsPanic
+	}
+	panic("should never execute")
+}
+
+// branchCombine combines two states at a control flow branch point where
+// either p1 or p2 executes (as in an "if" statement).
+func branchCombine(p1, p2 pstate) pstate {
+	if p1 == psCallsPanic && p2 == psCallsPanic {
+		return psCallsPanic
+	}
+	if p1 == psMayReturn || p2 == psMayReturn {
+		return psMayReturn
+	}
+	return psNoInfo
+}
+
+// stateForList walks through a list of statements and computes the
+// state/diposition for the entire list as a whole, as well
+// as updating disposition of intermediate nodes.
+func (ffa *funcFlagsAnalyzer) stateForList(list ir.Nodes) pstate {
+	st := psTop
+	// Walk the list backwards so that we can update the state for
+	// earlier list elements based on what we find out about their
+	// successors. Example:
+	//
+	//        if ... {
+	//  L10:    foo()
+	//  L11:    <stmt>
+	//  L12:    panic(...)
+	//        }
+	//
+	// After combining the dispositions for line 11 and 12, we want to
+	// update the state for the call at line 10 based on that combined
+	// disposition (if L11 has no path to "return", then the call at
+	// line 10 will be on a panic path).
+	for i := len(list) - 1; i >= 0; i-- {
+		n := list[i]
+		psi := ffa.getState(n)
+		if debugTrace&debugTraceFuncFlags != 0 {
+			fmt.Fprintf(os.Stderr, "=-= %v: stateForList n=%s ps=%s\n",
+				ir.Line(n), n.Op().String(), psi.String())
+		}
+		st = blockCombine(psi, st)
+		ffa.updateState(n, st)
+	}
+	if st == psTop {
+		st = psNoInfo
+	}
+	return st
+}
+
+func isMainMain(fn *ir.Func) bool {
+	s := fn.Sym()
+	return (s.Pkg.Name == "main" && s.Name == "main")
+}
+
+func isWellKnownFunc(s *types.Sym, pkg, name string) bool {
+	return s.Pkg.Path == pkg && s.Name == name
+}
+
+// isExitCall reports TRUE if the node itself is an unconditional
+// call to os.Exit(), a panic, or a function that does likewise.
+func isExitCall(n ir.Node) bool {
+	if n.Op() != ir.OCALLFUNC {
+		return false
+	}
+	cx := n.(*ir.CallExpr)
+	name := ir.StaticCalleeName(cx.Fun)
+	if name == nil {
+		return false
+	}
+	s := name.Sym()
+	if isWellKnownFunc(s, "os", "Exit") ||
+		isWellKnownFunc(s, "runtime", "throw") {
+		return true
+	}
+	if funcProps := propsForFunc(name.Func); funcProps != nil {
+		if funcProps.Flags&FuncPropNeverReturns != 0 {
+			return true
+		}
+	}
+	return name.Func.NeverReturns()
+}
+
+// pessimize is called to record the fact that we saw something in the
+// function that renders it entirely impossible to analyze.
+func (ffa *funcFlagsAnalyzer) pessimize() {
+	ffa.noInfo = true
+}
+
+// shouldVisit reports TRUE if this is an interesting node from the
+// perspective of computing function flags. NB: due to the fact that
+// ir.CallExpr implements the Stmt interface, we wind up visiting
+// a lot of nodes that we don't really need to, but these can
+// simply be screened out as part of the visit.
+func shouldVisit(n ir.Node) bool {
+	_, isStmt := n.(ir.Stmt)
+	return n.Op() != ir.ODCL &&
+		(isStmt || n.Op() == ir.OCALLFUNC || n.Op() == ir.OPANIC)
+}
+
+// nodeVisitPost helps implement the propAnalyzer interface; when
+// called on a given node, it decides the disposition of that node
+// based on the state(s) of the node's children.
+func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) {
+	if debugTrace&debugTraceFuncFlags != 0 {
+		fmt.Fprintf(os.Stderr, "=+= nodevis %v %s should=%v\n",
+			ir.Line(n), n.Op().String(), shouldVisit(n))
+	}
+	if !shouldVisit(n) {
+		return
+	}
+	var st pstate
+	switch n.Op() {
+	case ir.OCALLFUNC:
+		if isExitCall(n) {
+			st = psCallsPanic
+		}
+	case ir.OPANIC:
+		st = psCallsPanic
+	case ir.ORETURN:
+		st = psMayReturn
+	case ir.OBREAK, ir.OCONTINUE:
+		// FIXME: this handling of break/continue is sub-optimal; we
+		// have them as "mayReturn" in order to help with this case:
+		//
+		//   for {
+		//     if q() { break }
+		//     panic(...)
+		//   }
+		//
+		// where the effect of the 'break' is to cause the subsequent
+		// panic to be skipped. One possible improvement would be to
+		// track whether the currently enclosing loop is a "for {" or
+		// a for/range with condition, then use mayReturn only for the
+		// former. Note also that "break X" or "continue X" is treated
+		// the same as "goto", since we don't have a good way to track
+		// the target of the branch.
+		st = psMayReturn
+		n := n.(*ir.BranchStmt)
+		if n.Label != nil {
+			ffa.pessimize()
+		}
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		st = ffa.stateForList(n.List)
+	case ir.OCASE:
+		if ccst, ok := n.(*ir.CaseClause); ok {
+			st = ffa.stateForList(ccst.Body)
+		} else if ccst, ok := n.(*ir.CommClause); ok {
+			st = ffa.stateForList(ccst.Body)
+		} else {
+			panic("unexpected")
+		}
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		st = branchCombine(ffa.stateForList(n.Body), ffa.stateForList(n.Else))
+	case ir.OFOR:
+		// Treat for { XXX } like a block.
+		// Treat for <cond> { XXX } like an if statement with no else.
+		n := n.(*ir.ForStmt)
+		bst := ffa.stateForList(n.Body)
+		if n.Cond == nil {
+			st = bst
+		} else {
+			if bst == psMayReturn {
+				st = psMayReturn
+			}
+		}
+	case ir.ORANGE:
+		// Treat for range { XXX } like an if statement with no else.
+		n := n.(*ir.RangeStmt)
+		if ffa.stateForList(n.Body) == psMayReturn {
+			st = psMayReturn
+		}
+	case ir.OGOTO:
+		// punt if we see even one goto. if we built a control
+		// flow graph we could do more, but this is just a tree walk.
+		ffa.pessimize()
+	case ir.OSELECT:
+		// process selects for "may return" but not "always panics",
+		// the latter case seems very improbable.
+		n := n.(*ir.SelectStmt)
+		if len(n.Cases) != 0 {
+			st = psTop
+			for _, c := range n.Cases {
+				st = branchCombine(ffa.stateForList(c.Body), st)
+			}
+		}
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+		if len(n.Cases) != 0 {
+			st = psTop
+			for _, c := range n.Cases {
+				st = branchCombine(ffa.stateForList(c.Body), st)
+			}
+		}
+
+		st, fall := psTop, psNoInfo
+		for i := len(n.Cases) - 1; i >= 0; i-- {
+			cas := n.Cases[i]
+			cst := ffa.stateForList(cas.Body)
+			endsInFallthrough := false
+			if len(cas.Body) != 0 {
+				endsInFallthrough = cas.Body[0].Op() == ir.OFALL
+			}
+			if endsInFallthrough {
+				cst = blockCombine(cst, fall)
+			}
+			st = branchCombine(st, cst)
+			fall = cst
+		}
+	case ir.OFALL:
+		// Not important.
+	case ir.ODCLFUNC, ir.ORECOVER, ir.OAS, ir.OAS2, ir.OAS2FUNC, ir.OASOP,
+		ir.OPRINTLN, ir.OPRINT, ir.OLABEL, ir.OCALLINTER, ir.ODEFER,
+		ir.OSEND, ir.ORECV, ir.OSELRECV2, ir.OGO, ir.OAPPEND, ir.OAS2DOTTYPE,
+		ir.OAS2MAPR, ir.OGETG, ir.ODELETE, ir.OINLMARK, ir.OAS2RECV,
+		ir.OMIN, ir.OMAX, ir.OMAKE, ir.ORECOVERFP, ir.OGETCALLERSP:
+		// these should all be benign/uninteresting
+	case ir.OTAILCALL, ir.OJUMPTABLE, ir.OTYPESW:
+		// don't expect to see these at all.
+		base.Fatalf("unexpected op %s in func %s",
+			n.Op().String(), ir.FuncName(ffa.fn))
+	default:
+		base.Fatalf("%v: unhandled op %s in func %v",
+			ir.Line(n), n.Op().String(), ir.FuncName(ffa.fn))
+	}
+	if debugTrace&debugTraceFuncFlags != 0 {
+		fmt.Fprintf(os.Stderr, "=-= %v: visit n=%s returns %s\n",
+			ir.Line(n), n.Op().String(), st.String())
+	}
+	ffa.setState(n, st)
+}
+
+func (ffa *funcFlagsAnalyzer) nodeVisitPre(n ir.Node) {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go
new file mode 100644
index 0000000..d85d73b
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go
@@ -0,0 +1,355 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"fmt"
+	"os"
+)
+
+// paramsAnalyzer holds state information for the phase that computes
+// flags for a Go functions parameters, for use in inline heuristics.
+// Note that the params slice below includes entries for blanks.
+type paramsAnalyzer struct {
+	fname  string
+	values []ParamPropBits
+	params []*ir.Name
+	top    []bool
+	*condLevelTracker
+	*nameFinder
+}
+
+// getParams returns an *ir.Name slice containing all params for the
+// function (plus rcvr as well if applicable).
+func getParams(fn *ir.Func) []*ir.Name {
+	sig := fn.Type()
+	numParams := sig.NumRecvs() + sig.NumParams()
+	return fn.Dcl[:numParams]
+}
+
+// addParamsAnalyzer creates a new paramsAnalyzer helper object for
+// the function fn, appends it to the analyzers list, and returns the
+// new list. If the function in question doesn't have any interesting
+// parameters then the analyzer list is returned unchanged, and the
+// params flags in "fp" are updated accordingly.
+func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, nf *nameFinder) []propAnalyzer {
+	pa, props := makeParamsAnalyzer(fn, nf)
+	if pa != nil {
+		analyzers = append(analyzers, pa)
+	} else {
+		fp.ParamFlags = props
+	}
+	return analyzers
+}
+
+// makeParamAnalyzer creates a new helper object to analyze parameters
+// of function fn. If the function doesn't have any interesting
+// params, a nil helper is returned along with a set of default param
+// flags for the func.
+func makeParamsAnalyzer(fn *ir.Func, nf *nameFinder) (*paramsAnalyzer, []ParamPropBits) {
+	params := getParams(fn) // includes receiver if applicable
+	if len(params) == 0 {
+		return nil, nil
+	}
+	vals := make([]ParamPropBits, len(params))
+	if fn.Inl == nil {
+		return nil, vals
+	}
+	top := make([]bool, len(params))
+	interestingToAnalyze := false
+	for i, pn := range params {
+		if pn == nil {
+			continue
+		}
+		pt := pn.Type()
+		if !pt.IsScalar() && !pt.HasNil() {
+			// existing properties not applicable here (for things
+			// like structs, arrays, slices, etc).
+			continue
+		}
+		// If param is reassigned, skip it.
+		if ir.Reassigned(pn) {
+			continue
+		}
+		top[i] = true
+		interestingToAnalyze = true
+	}
+	if !interestingToAnalyze {
+		return nil, vals
+	}
+
+	if debugTrace&debugTraceParams != 0 {
+		fmt.Fprintf(os.Stderr, "=-= param analysis of func %v:\n",
+			fn.Sym().Name)
+		for i := range vals {
+			n := "_"
+			if params[i] != nil {
+				n = params[i].Sym().String()
+			}
+			fmt.Fprintf(os.Stderr, "=-=  %d: %q %s top=%v\n",
+				i, n, vals[i].String(), top[i])
+		}
+	}
+	pa := &paramsAnalyzer{
+		fname:            fn.Sym().Name,
+		values:           vals,
+		params:           params,
+		top:              top,
+		condLevelTracker: new(condLevelTracker),
+		nameFinder:       nf,
+	}
+	return pa, nil
+}
+
+func (pa *paramsAnalyzer) setResults(funcProps *FuncProps) {
+	funcProps.ParamFlags = pa.values
+}
+
+func (pa *paramsAnalyzer) findParamIdx(n *ir.Name) int {
+	if n == nil {
+		panic("bad")
+	}
+	for i := range pa.params {
+		if pa.params[i] == n {
+			return i
+		}
+	}
+	return -1
+}
+
+type testfType func(x ir.Node, param *ir.Name, idx int) (bool, bool)
+
+// paramsAnalyzer invokes function 'testf' on the specified expression
+// 'x' for each parameter, and if the result is TRUE, or's 'flag' into
+// the flags for that param.
+func (pa *paramsAnalyzer) checkParams(x ir.Node, flag ParamPropBits, mayflag ParamPropBits, testf testfType) {
+	for idx, p := range pa.params {
+		if !pa.top[idx] && pa.values[idx] == ParamNoInfo {
+			continue
+		}
+		result, may := testf(x, p, idx)
+		if debugTrace&debugTraceParams != 0 {
+			fmt.Fprintf(os.Stderr, "=-= test expr %v param %s result=%v flag=%s\n", x, p.Sym().Name, result, flag.String())
+		}
+		if result {
+			v := flag
+			if pa.condLevel != 0 || may {
+				v = mayflag
+			}
+			pa.values[idx] |= v
+			pa.top[idx] = false
+		}
+	}
+}
+
+// foldCheckParams checks expression 'x' (an 'if' condition or
+// 'switch' stmt expr) to see if the expr would fold away if a
+// specific parameter had a constant value.
+func (pa *paramsAnalyzer) foldCheckParams(x ir.Node) {
+	pa.checkParams(x, ParamFeedsIfOrSwitch, ParamMayFeedIfOrSwitch,
+		func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+			return ShouldFoldIfNameConstant(x, []*ir.Name{p}), false
+		})
+}
+
+// callCheckParams examines the target of call expression 'ce' to see
+// if it is making a call to the value passed in for some parameter.
+func (pa *paramsAnalyzer) callCheckParams(ce *ir.CallExpr) {
+	switch ce.Op() {
+	case ir.OCALLINTER:
+		if ce.Op() != ir.OCALLINTER {
+			return
+		}
+		sel := ce.Fun.(*ir.SelectorExpr)
+		r := pa.staticValue(sel.X)
+		if r.Op() != ir.ONAME {
+			return
+		}
+		name := r.(*ir.Name)
+		if name.Class != ir.PPARAM {
+			return
+		}
+		pa.checkParams(r, ParamFeedsInterfaceMethodCall,
+			ParamMayFeedInterfaceMethodCall,
+			func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+				name := x.(*ir.Name)
+				return name == p, false
+			})
+	case ir.OCALLFUNC:
+		if ce.Fun.Op() != ir.ONAME {
+			return
+		}
+		called := ir.StaticValue(ce.Fun)
+		if called.Op() != ir.ONAME {
+			return
+		}
+		name := called.(*ir.Name)
+		if name.Class == ir.PPARAM {
+			pa.checkParams(called, ParamFeedsIndirectCall,
+				ParamMayFeedIndirectCall,
+				func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+					name := x.(*ir.Name)
+					return name == p, false
+				})
+		} else {
+			cname := pa.funcName(called)
+			if cname != nil {
+				pa.deriveFlagsFromCallee(ce, cname.Func)
+			}
+		}
+	}
+}
+
+// deriveFlagsFromCallee tries to derive flags for the current
+// function based on a call this function makes to some other
+// function. Example:
+//
+//	/* Simple */                /* Derived from callee */
+//	func foo(f func(int)) {     func foo(f func(int)) {
+//	  f(2)                        bar(32, f)
+//	}                           }
+//	                            func bar(x int, f func()) {
+//	                              f(x)
+//	                            }
+//
+// Here we can set the "param feeds indirect call" flag for
+// foo's param 'f' since we know that bar has that flag set for
+// its second param, and we're passing that param a function.
+func (pa *paramsAnalyzer) deriveFlagsFromCallee(ce *ir.CallExpr, callee *ir.Func) {
+	calleeProps := propsForFunc(callee)
+	if calleeProps == nil {
+		return
+	}
+	if debugTrace&debugTraceParams != 0 {
+		fmt.Fprintf(os.Stderr, "=-= callee props for %v:\n%s",
+			callee.Sym().Name, calleeProps.String())
+	}
+
+	must := []ParamPropBits{ParamFeedsInterfaceMethodCall, ParamFeedsIndirectCall, ParamFeedsIfOrSwitch}
+	may := []ParamPropBits{ParamMayFeedInterfaceMethodCall, ParamMayFeedIndirectCall, ParamMayFeedIfOrSwitch}
+
+	for pidx, arg := range ce.Args {
+		// Does the callee param have any interesting properties?
+		// If not we can skip this one.
+		pflag := calleeProps.ParamFlags[pidx]
+		if pflag == 0 {
+			continue
+		}
+		// See if one of the caller's parameters is flowing unmodified
+		// into this actual expression.
+		r := pa.staticValue(arg)
+		if r.Op() != ir.ONAME {
+			return
+		}
+		name := r.(*ir.Name)
+		if name.Class != ir.PPARAM {
+			return
+		}
+		callerParamIdx := pa.findParamIdx(name)
+		// note that callerParamIdx may return -1 in the case where
+		// the param belongs not to the current closure func we're
+		// analyzing but to an outer enclosing func.
+		if callerParamIdx == -1 {
+			return
+		}
+		if pa.params[callerParamIdx] == nil {
+			panic("something went wrong")
+		}
+		if !pa.top[callerParamIdx] &&
+			pa.values[callerParamIdx] == ParamNoInfo {
+			continue
+		}
+		if debugTrace&debugTraceParams != 0 {
+			fmt.Fprintf(os.Stderr, "=-= pflag for arg %d is %s\n",
+				pidx, pflag.String())
+		}
+		for i := range must {
+			mayv := may[i]
+			mustv := must[i]
+			if pflag&mustv != 0 && pa.condLevel == 0 {
+				pa.values[callerParamIdx] |= mustv
+			} else if pflag&(mustv|mayv) != 0 {
+				pa.values[callerParamIdx] |= mayv
+			}
+		}
+		pa.top[callerParamIdx] = false
+	}
+}
+
+func (pa *paramsAnalyzer) nodeVisitPost(n ir.Node) {
+	if len(pa.values) == 0 {
+		return
+	}
+	pa.condLevelTracker.post(n)
+	switch n.Op() {
+	case ir.OCALLFUNC:
+		ce := n.(*ir.CallExpr)
+		pa.callCheckParams(ce)
+	case ir.OCALLINTER:
+		ce := n.(*ir.CallExpr)
+		pa.callCheckParams(ce)
+	case ir.OIF:
+		ifst := n.(*ir.IfStmt)
+		pa.foldCheckParams(ifst.Cond)
+	case ir.OSWITCH:
+		swst := n.(*ir.SwitchStmt)
+		if swst.Tag != nil {
+			pa.foldCheckParams(swst.Tag)
+		}
+	}
+}
+
+func (pa *paramsAnalyzer) nodeVisitPre(n ir.Node) {
+	if len(pa.values) == 0 {
+		return
+	}
+	pa.condLevelTracker.pre(n)
+}
+
+// condLevelTracker helps keeps track very roughly of "level of conditional
+// nesting", e.g. how many "if" statements you have to go through to
+// get to the point where a given stmt executes. Example:
+//
+//	                      cond nesting level
+//	func foo() {
+//	 G = 1                   0
+//	 if x < 10 {             0
+//	  if y < 10 {            1
+//	   G = 0                 2
+//	  }
+//	 }
+//	}
+//
+// The intent here is to provide some sort of very abstract relative
+// hotness metric, e.g. "G = 1" above is expected to be executed more
+// often than "G = 0" (in the aggregate, across large numbers of
+// functions).
+type condLevelTracker struct {
+	condLevel int
+}
+
+func (c *condLevelTracker) pre(n ir.Node) {
+	// Increment level of "conditional testing" if we see
+	// an "if" or switch statement, and decrement if in
+	// a loop.
+	switch n.Op() {
+	case ir.OIF, ir.OSWITCH:
+		c.condLevel++
+	case ir.OFOR, ir.ORANGE:
+		c.condLevel--
+	}
+}
+
+func (c *condLevelTracker) post(n ir.Node) {
+	switch n.Op() {
+	case ir.OFOR, ir.ORANGE:
+		c.condLevel++
+	case ir.OIF:
+		c.condLevel--
+	case ir.OSWITCH:
+		c.condLevel--
+	}
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go
new file mode 100644
index 0000000..2aaa68d
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go
@@ -0,0 +1,277 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"fmt"
+	"go/constant"
+	"go/token"
+	"os"
+)
+
+// resultsAnalyzer stores state information for the process of
+// computing flags/properties for the return values of a specific Go
+// function, as part of inline heuristics synthesis.
+type resultsAnalyzer struct {
+	fname           string
+	props           []ResultPropBits
+	values          []resultVal
+	inlineMaxBudget int
+	*nameFinder
+}
+
+// resultVal captures information about a specific result returned from
+// the function we're analyzing; we are interested in cases where
+// the func always returns the same constant, or always returns
+// the same function, etc. This container stores info on a the specific
+// scenarios we're looking for.
+type resultVal struct {
+	cval    constant.Value
+	fn      *ir.Name
+	fnClo   bool
+	top     bool
+	derived bool // see deriveReturnFlagsFromCallee below
+}
+
+// addResultsAnalyzer creates a new resultsAnalyzer helper object for
+// the function fn, appends it to the analyzers list, and returns the
+// new list. If the function in question doesn't have any returns (or
+// any interesting returns) then the analyzer list is left as is, and
+// the result flags in "fp" are updated accordingly.
+func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, inlineMaxBudget int, nf *nameFinder) []propAnalyzer {
+	ra, props := makeResultsAnalyzer(fn, inlineMaxBudget, nf)
+	if ra != nil {
+		analyzers = append(analyzers, ra)
+	} else {
+		fp.ResultFlags = props
+	}
+	return analyzers
+}
+
+// makeResultsAnalyzer creates a new helper object to analyze results
+// in function fn. If the function doesn't have any interesting
+// results, a nil helper is returned along with a set of default
+// result flags for the func.
+func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*resultsAnalyzer, []ResultPropBits) {
+	results := fn.Type().Results()
+	if len(results) == 0 {
+		return nil, nil
+	}
+	props := make([]ResultPropBits, len(results))
+	if fn.Inl == nil {
+		return nil, props
+	}
+	vals := make([]resultVal, len(results))
+	interestingToAnalyze := false
+	for i := range results {
+		rt := results[i].Type
+		if !rt.IsScalar() && !rt.HasNil() {
+			// existing properties not applicable here (for things
+			// like structs, arrays, slices, etc).
+			continue
+		}
+		// set the "top" flag (as in "top element of data flow lattice")
+		// meaning "we have no info yet, but we might later on".
+		vals[i].top = true
+		interestingToAnalyze = true
+	}
+	if !interestingToAnalyze {
+		return nil, props
+	}
+	ra := &resultsAnalyzer{
+		props:           props,
+		values:          vals,
+		inlineMaxBudget: inlineMaxBudget,
+		nameFinder:      nf,
+	}
+	return ra, nil
+}
+
+// setResults transfers the calculated result properties for this
+// function to 'funcProps'.
+func (ra *resultsAnalyzer) setResults(funcProps *FuncProps) {
+	// Promote ResultAlwaysSameFunc to ResultAlwaysSameInlinableFunc
+	for i := range ra.values {
+		if ra.props[i] == ResultAlwaysSameFunc && !ra.values[i].derived {
+			f := ra.values[i].fn.Func
+			// HACK: in order to allow for call site score
+			// adjustments, we used a relaxed inline budget in
+			// determining inlinability. For the check below, however,
+			// we want to know is whether the func in question is
+			// likely to be inlined, as opposed to whether it might
+			// possibly be inlined if all the right score adjustments
+			// happened, so do a simple check based on the cost.
+			if f.Inl != nil && f.Inl.Cost <= int32(ra.inlineMaxBudget) {
+				ra.props[i] = ResultAlwaysSameInlinableFunc
+			}
+		}
+	}
+	funcProps.ResultFlags = ra.props
+}
+
+func (ra *resultsAnalyzer) pessimize() {
+	for i := range ra.props {
+		ra.props[i] = ResultNoInfo
+	}
+}
+
+func (ra *resultsAnalyzer) nodeVisitPre(n ir.Node) {
+}
+
+func (ra *resultsAnalyzer) nodeVisitPost(n ir.Node) {
+	if len(ra.values) == 0 {
+		return
+	}
+	if n.Op() != ir.ORETURN {
+		return
+	}
+	if debugTrace&debugTraceResults != 0 {
+		fmt.Fprintf(os.Stderr, "=+= returns nodevis %v %s\n",
+			ir.Line(n), n.Op().String())
+	}
+
+	// No support currently for named results, so if we see an empty
+	// "return" stmt, be conservative.
+	rs := n.(*ir.ReturnStmt)
+	if len(rs.Results) != len(ra.values) {
+		ra.pessimize()
+		return
+	}
+	for i, r := range rs.Results {
+		ra.analyzeResult(i, r)
+	}
+}
+
+// analyzeResult examines the expression 'n' being returned as the
+// 'ii'th argument in some return statement to see whether has
+// interesting characteristics (for example, returns a constant), then
+// applies a dataflow "meet" operation to combine this result with any
+// previous result (for the given return slot) that we've already
+// processed.
+func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) {
+	isAllocMem := ra.isAllocatedMem(n)
+	isConcConvItf := ra.isConcreteConvIface(n)
+	constVal := ra.constValue(n)
+	isConst := (constVal != nil)
+	isNil := ra.isNil(n)
+	rfunc := ra.funcName(n)
+	isFunc := (rfunc != nil)
+	isClo := (rfunc != nil && rfunc.Func.OClosure != nil)
+	curp := ra.props[ii]
+	dprops, isDerivedFromCall := ra.deriveReturnFlagsFromCallee(n)
+	newp := ResultNoInfo
+	var newcval constant.Value
+	var newfunc *ir.Name
+
+	if debugTrace&debugTraceResults != 0 {
+		fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult n=%s ismem=%v isconcconv=%v isconst=%v isnil=%v isfunc=%v isclo=%v\n", ir.Line(n), n.Op().String(), isAllocMem, isConcConvItf, isConst, isNil, isFunc, isClo)
+	}
+
+	if ra.values[ii].top {
+		ra.values[ii].top = false
+		// this is the first return we've seen; record
+		// whatever properties it has.
+		switch {
+		case isAllocMem:
+			newp = ResultIsAllocatedMem
+		case isConcConvItf:
+			newp = ResultIsConcreteTypeConvertedToInterface
+		case isFunc:
+			newp = ResultAlwaysSameFunc
+			newfunc = rfunc
+		case isConst:
+			newp = ResultAlwaysSameConstant
+			newcval = constVal
+		case isNil:
+			newp = ResultAlwaysSameConstant
+			newcval = nil
+		case isDerivedFromCall:
+			newp = dprops
+			ra.values[ii].derived = true
+		}
+	} else {
+		if !ra.values[ii].derived {
+			// this is not the first return we've seen; apply
+			// what amounts of a "meet" operator to combine
+			// the properties we see here with what we saw on
+			// the previous returns.
+			switch curp {
+			case ResultIsAllocatedMem:
+				if isAllocMem {
+					newp = ResultIsAllocatedMem
+				}
+			case ResultIsConcreteTypeConvertedToInterface:
+				if isConcConvItf {
+					newp = ResultIsConcreteTypeConvertedToInterface
+				}
+			case ResultAlwaysSameConstant:
+				if isNil && ra.values[ii].cval == nil {
+					newp = ResultAlwaysSameConstant
+					newcval = nil
+				} else if isConst && constant.Compare(constVal, token.EQL, ra.values[ii].cval) {
+					newp = ResultAlwaysSameConstant
+					newcval = constVal
+				}
+			case ResultAlwaysSameFunc:
+				if isFunc && isSameFuncName(rfunc, ra.values[ii].fn) {
+					newp = ResultAlwaysSameFunc
+					newfunc = rfunc
+				}
+			}
+		}
+	}
+	ra.values[ii].fn = newfunc
+	ra.values[ii].fnClo = isClo
+	ra.values[ii].cval = newcval
+	ra.props[ii] = newp
+
+	if debugTrace&debugTraceResults != 0 {
+		fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult newp=%s\n",
+			ir.Line(n), newp)
+	}
+}
+
+// deriveReturnFlagsFromCallee tries to set properties for a given
+// return result where we're returning call expression; return value
+// is a return property value and a boolean indicating whether the
+// prop is valid. Examples:
+//
+//	func foo() int { return bar() }
+//	func bar() int { return 42 }
+//	func blix() int { return 43 }
+//	func two(y int) int {
+//	  if y < 0 { return bar() } else { return blix() }
+//	}
+//
+// Since "foo" always returns the result of a call to "bar", we can
+// set foo's return property to that of bar. In the case of "two", however,
+// even though each return path returns a constant, we don't know
+// whether the constants are identical, hence we need to be conservative.
+func (ra *resultsAnalyzer) deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) {
+	if n.Op() != ir.OCALLFUNC {
+		return 0, false
+	}
+	ce := n.(*ir.CallExpr)
+	if ce.Fun.Op() != ir.ONAME {
+		return 0, false
+	}
+	called := ir.StaticValue(ce.Fun)
+	if called.Op() != ir.ONAME {
+		return 0, false
+	}
+	cname := ra.funcName(called)
+	if cname == nil {
+		return 0, false
+	}
+	calleeProps := propsForFunc(cname.Func)
+	if calleeProps == nil {
+		return 0, false
+	}
+	if len(calleeProps.ResultFlags) != 1 {
+		return 0, false
+	}
+	return calleeProps.ResultFlags[0], true
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/callsite.go b/src/cmd/compile/internal/inline/inlheur/callsite.go
new file mode 100644
index 0000000..f457dd4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/callsite.go
@@ -0,0 +1,149 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/internal/src"
+	"fmt"
+	"io"
+	"path/filepath"
+	"sort"
+	"strings"
+)
+
+// CallSite records useful information about a potentially inlinable
+// (direct) function call. "Callee" is the target of the call, "Call"
+// is the ir node corresponding to the call itself, "Assign" is
+// the top-level assignment statement containing the call (if the call
+// appears in the form of a top-level statement, e.g. "x := foo()"),
+// "Flags" contains properties of the call that might be useful for
+// making inlining decisions, "Score" is the final score assigned to
+// the site, and "ID" is a numeric ID for the site within its
+// containing function.
+type CallSite struct {
+	Callee *ir.Func
+	Call   *ir.CallExpr
+	parent *CallSite
+	Assign ir.Node
+	Flags  CSPropBits
+
+	ArgProps  []ActualExprPropBits
+	Score     int
+	ScoreMask scoreAdjustTyp
+	ID        uint
+	aux       uint8
+}
+
+// CallSiteTab is a table of call sites, keyed by call expr.
+// Ideally it would be nice to key the table by src.XPos, but
+// this results in collisions for calls on very long lines (the
+// front end saturates column numbers at 255). We also wind up
+// with many calls that share the same auto-generated pos.
+type CallSiteTab map[*ir.CallExpr]*CallSite
+
+// ActualExprPropBits describes a property of an actual expression (value
+// passed to some specific func argument at a call site).
+type ActualExprPropBits uint8
+
+const (
+	ActualExprConstant ActualExprPropBits = 1 << iota
+	ActualExprIsConcreteConvIface
+	ActualExprIsFunc
+	ActualExprIsInlinableFunc
+)
+
+type CSPropBits uint32
+
+const (
+	CallSiteInLoop CSPropBits = 1 << iota
+	CallSiteOnPanicPath
+	CallSiteInInitFunc
+)
+
+type csAuxBits uint8
+
+const (
+	csAuxInlined = 1 << iota
+)
+
+// encodedCallSiteTab is a table keyed by "encoded" callsite
+// (stringified src.XPos plus call site ID) mapping to a value of call
+// property bits and score.
+type encodedCallSiteTab map[string]propsAndScore
+
+type propsAndScore struct {
+	props CSPropBits
+	score int
+	mask  scoreAdjustTyp
+}
+
+func (pas propsAndScore) String() string {
+	return fmt.Sprintf("P=%s|S=%d|M=%s", pas.props.String(),
+		pas.score, pas.mask.String())
+}
+
+func (cst CallSiteTab) merge(other CallSiteTab) error {
+	for k, v := range other {
+		if prev, ok := cst[k]; ok {
+			return fmt.Errorf("internal error: collision during call site table merge, fn=%s callsite=%s", prev.Callee.Sym().Name, fmtFullPos(prev.Call.Pos()))
+		}
+		cst[k] = v
+	}
+	return nil
+}
+
+func fmtFullPos(p src.XPos) string {
+	var sb strings.Builder
+	sep := ""
+	base.Ctxt.AllPos(p, func(pos src.Pos) {
+		fmt.Fprintf(&sb, sep)
+		sep = "|"
+		file := filepath.Base(pos.Filename())
+		fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col())
+	})
+	return sb.String()
+}
+
+func EncodeCallSiteKey(cs *CallSite) string {
+	var sb strings.Builder
+	// FIXME: maybe rewrite line offsets relative to function start?
+	sb.WriteString(fmtFullPos(cs.Call.Pos()))
+	fmt.Fprintf(&sb, "|%d", cs.ID)
+	return sb.String()
+}
+
+func buildEncodedCallSiteTab(tab CallSiteTab) encodedCallSiteTab {
+	r := make(encodedCallSiteTab)
+	for _, cs := range tab {
+		k := EncodeCallSiteKey(cs)
+		r[k] = propsAndScore{
+			props: cs.Flags,
+			score: cs.Score,
+			mask:  cs.ScoreMask,
+		}
+	}
+	return r
+}
+
+// dumpCallSiteComments emits comments into the dump file for the
+// callsites in the function of interest. If "ecst" is non-nil, we use
+// that, otherwise generated a fresh encodedCallSiteTab from "tab".
+func dumpCallSiteComments(w io.Writer, tab CallSiteTab, ecst encodedCallSiteTab) {
+	if ecst == nil {
+		ecst = buildEncodedCallSiteTab(tab)
+	}
+	tags := make([]string, 0, len(ecst))
+	for k := range ecst {
+		tags = append(tags, k)
+	}
+	sort.Strings(tags)
+	for _, s := range tags {
+		v := ecst[s]
+		fmt.Fprintf(w, "// callsite: %s flagstr %q flagval %d score %d mask %d maskstr %q\n", s, v.props.String(), v.props, v.score, v.mask, v.mask.String())
+	}
+	fmt.Fprintf(w, "// %s\n", csDelimiter)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go b/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go
new file mode 100644
index 0000000..216f510
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go
@@ -0,0 +1,56 @@
+// Code generated by "stringer -bitset -type CSPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[CallSiteInLoop-1]
+	_ = x[CallSiteOnPanicPath-2]
+	_ = x[CallSiteInInitFunc-4]
+}
+
+var _CSPropBits_value = [...]uint64{
+	0x1, /* CallSiteInLoop */
+	0x2, /* CallSiteOnPanicPath */
+	0x4, /* CallSiteInInitFunc */
+}
+
+const _CSPropBits_name = "CallSiteInLoopCallSiteOnPanicPathCallSiteInInitFunc"
+
+var _CSPropBits_index = [...]uint8{0, 14, 33, 51}
+
+func (i CSPropBits) String() string {
+	var b bytes.Buffer
+
+	remain := uint64(i)
+	seen := false
+
+	for k, v := range _CSPropBits_value {
+		x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]]
+		if v == 0 {
+			if i == 0 {
+				b.WriteString(x)
+				return b.String()
+			}
+			continue
+		}
+		if (v & remain) == v {
+			remain &^= v
+			x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]]
+			if seen {
+				b.WriteString("|")
+			}
+			seen = true
+			b.WriteString(x)
+		}
+	}
+	if remain == 0 {
+		return b.String()
+	}
+	return "CSPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/debugflags_test.go b/src/cmd/compile/internal/inline/inlheur/debugflags_test.go
new file mode 100644
index 0000000..abf4910
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/debugflags_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"testing"
+)
+
+func TestInlScoreAdjFlagParse(t *testing.T) {
+	scenarios := []struct {
+		value string
+		expok bool
+	}{
+		{
+			value: "returnFeedsConcreteToInterfaceCallAdj:9",
+			expok: true,
+		},
+		{
+			value: "panicPathAdj:-1/initFuncAdj:9",
+			expok: true,
+		},
+		{
+			value: "",
+			expok: false,
+		},
+		{
+			value: "nonsenseAdj:10",
+			expok: false,
+		},
+		{
+			value: "inLoopAdj:",
+			expok: false,
+		},
+		{
+			value: "inLoopAdj:10:10",
+			expok: false,
+		},
+		{
+			value: "inLoopAdj:blah",
+			expok: false,
+		},
+		{
+			value: "/",
+			expok: false,
+		},
+	}
+
+	for _, scenario := range scenarios {
+		err := parseScoreAdj(scenario.value)
+		t.Logf("for value=%q err is %v\n", scenario.value, err)
+		if scenario.expok {
+			if err != nil {
+				t.Errorf("expected parseScoreAdj(%s) ok, got err %v",
+					scenario.value, err)
+			}
+		} else {
+			if err == nil {
+				t.Errorf("expected parseScoreAdj(%s) failure, got success",
+					scenario.value)
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go b/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go
new file mode 100644
index 0000000..438b700
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go
@@ -0,0 +1,109 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"internal/testenv"
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+)
+
+func TestDumpCallSiteScoreDump(t *testing.T) {
+	td := t.TempDir()
+	testenv.MustHaveGoBuild(t)
+
+	scenarios := []struct {
+		name               string
+		promoted           int
+		indirectlyPromoted int
+		demoted            int
+		unchanged          int
+	}{
+		{
+			name:               "dumpscores",
+			promoted:           1,
+			indirectlyPromoted: 1,
+			demoted:            1,
+			unchanged:          5,
+		},
+	}
+
+	for _, scen := range scenarios {
+		dumpfile, err := gatherInlCallSitesScoresForFile(t, scen.name, td)
+		if err != nil {
+			t.Fatalf("dumping callsite scores for %q: error %v", scen.name, err)
+		}
+		var lines []string
+		if content, err := os.ReadFile(dumpfile); err != nil {
+			t.Fatalf("reading dump %q: error %v", dumpfile, err)
+		} else {
+			lines = strings.Split(string(content), "\n")
+		}
+		prom, indprom, dem, unch := 0, 0, 0, 0
+		for _, line := range lines {
+			switch {
+			case strings.TrimSpace(line) == "":
+			case !strings.Contains(line, "|"):
+			case strings.HasPrefix(line, "#"):
+			case strings.Contains(line, "PROMOTED"):
+				prom++
+			case strings.Contains(line, "INDPROM"):
+				indprom++
+			case strings.Contains(line, "DEMOTED"):
+				dem++
+			default:
+				unch++
+			}
+		}
+		showout := false
+		if prom != scen.promoted {
+			t.Errorf("testcase %q, got %d promoted want %d promoted",
+				scen.name, prom, scen.promoted)
+			showout = true
+		}
+		if indprom != scen.indirectlyPromoted {
+			t.Errorf("testcase %q, got %d indirectly promoted want %d",
+				scen.name, indprom, scen.indirectlyPromoted)
+			showout = true
+		}
+		if dem != scen.demoted {
+			t.Errorf("testcase %q, got %d demoted want %d demoted",
+				scen.name, dem, scen.demoted)
+			showout = true
+		}
+		if unch != scen.unchanged {
+			t.Errorf("testcase %q, got %d unchanged want %d unchanged",
+				scen.name, unch, scen.unchanged)
+			showout = true
+		}
+		if showout {
+			t.Logf(">> dump output: %s", strings.Join(lines, "\n"))
+		}
+	}
+}
+
+// gatherInlCallSitesScoresForFile builds the specified testcase 'testcase'
+// from testdata/props passing the "-d=dumpinlcallsitescores=1"
+// compiler option, to produce a dump, then returns the path of the
+// newly created file.
+func gatherInlCallSitesScoresForFile(t *testing.T, testcase string, td string) (string, error) {
+	t.Helper()
+	gopath := "testdata/" + testcase + ".go"
+	outpath := filepath.Join(td, testcase+".a")
+	dumpfile := filepath.Join(td, testcase+".callsites.txt")
+	run := []string{testenv.GoToolPath(t), "build",
+		"-gcflags=-d=dumpinlcallsitescores=1", "-o", outpath, gopath}
+	out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+	t.Logf("run: %+v\n", run)
+	if err != nil {
+		return "", err
+	}
+	if err := os.WriteFile(dumpfile, out, 0666); err != nil {
+		return "", err
+	}
+	return dumpfile, err
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/eclassify.go b/src/cmd/compile/internal/inline/inlheur/eclassify.go
new file mode 100644
index 0000000..1e6d1b9
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/eclassify.go
@@ -0,0 +1,247 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"fmt"
+	"os"
+)
+
+// ShouldFoldIfNameConstant analyzes expression tree 'e' to see
+// whether it contains only combinations of simple references to all
+// of the names in 'names' with selected constants + operators. The
+// intent is to identify expression that could be folded away to a
+// constant if the value of 'n' were available. Return value is TRUE
+// if 'e' does look foldable given the value of 'n', and given that
+// 'e' actually makes reference to 'n'. Some examples where the type
+// of "n" is int64, type of "s" is string, and type of "p" is *byte:
+//
+//	Simple?		Expr
+//	yes			n<10
+//	yes			n*n-100
+//	yes			(n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101)
+//	yes			s == "foo"
+//	yes			p == nil
+//	no			n<foo()
+//	no			n<1 || n>m
+//	no			float32(n)<1.0
+//	no			*p == 1
+//	no			1 + 100
+//	no			1 / n
+//	no			1 + unsafe.Sizeof(n)
+//
+// To avoid complexities (e.g. nan, inf) we stay way from folding and
+// floating point or complex operations (integers, bools, and strings
+// only). We also try to be conservative about avoiding any operation
+// that might result in a panic at runtime, e.g. for "n" with type
+// int64:
+//
+//	1<<(n-9) < 100/(n<<9999)
+//
+// we would return FALSE due to the negative shift count and/or
+// potential divide by zero.
+func ShouldFoldIfNameConstant(n ir.Node, names []*ir.Name) bool {
+	cl := makeExprClassifier(names)
+	var doNode func(ir.Node) bool
+	doNode = func(n ir.Node) bool {
+		ir.DoChildren(n, doNode)
+		cl.Visit(n)
+		return false
+	}
+	doNode(n)
+	if cl.getdisp(n) != exprSimple {
+		return false
+	}
+	for _, v := range cl.names {
+		if !v {
+			return false
+		}
+	}
+	return true
+}
+
+// exprClassifier holds intermediate state about nodes within an
+// expression tree being analyzed by ShouldFoldIfNameConstant. Here
+// "name" is the name node passed in, and "disposition" stores the
+// result of classifying a given IR node.
+type exprClassifier struct {
+	names       map[*ir.Name]bool
+	disposition map[ir.Node]disp
+}
+
+type disp int
+
+const (
+	// no info on this expr
+	exprNoInfo disp = iota
+
+	// expr contains only literals
+	exprLiterals
+
+	// expr is legal combination of literals and specified names
+	exprSimple
+)
+
+func (d disp) String() string {
+	switch d {
+	case exprNoInfo:
+		return "noinfo"
+	case exprSimple:
+		return "simple"
+	case exprLiterals:
+		return "literals"
+	default:
+		return fmt.Sprintf("unknown<%d>", d)
+	}
+}
+
+func makeExprClassifier(names []*ir.Name) *exprClassifier {
+	m := make(map[*ir.Name]bool, len(names))
+	for _, n := range names {
+		m[n] = false
+	}
+	return &exprClassifier{
+		names:       m,
+		disposition: make(map[ir.Node]disp),
+	}
+}
+
+// Visit sets the classification for 'n' based on the previously
+// calculated classifications for n's children, as part of a bottom-up
+// walk over an expression tree.
+func (ec *exprClassifier) Visit(n ir.Node) {
+
+	ndisp := exprNoInfo
+
+	binparts := func(n ir.Node) (ir.Node, ir.Node) {
+		if lex, ok := n.(*ir.LogicalExpr); ok {
+			return lex.X, lex.Y
+		} else if bex, ok := n.(*ir.BinaryExpr); ok {
+			return bex.X, bex.Y
+		} else {
+			panic("bad")
+		}
+	}
+
+	t := n.Type()
+	if t == nil {
+		if debugTrace&debugTraceExprClassify != 0 {
+			fmt.Fprintf(os.Stderr, "=-= *** untyped op=%s\n",
+				n.Op().String())
+		}
+	} else if t.IsInteger() || t.IsString() || t.IsBoolean() || t.HasNil() {
+		switch n.Op() {
+		// FIXME: maybe add support for OADDSTR?
+		case ir.ONIL:
+			ndisp = exprLiterals
+
+		case ir.OLITERAL:
+			if _, ok := n.(*ir.BasicLit); ok {
+			} else {
+				panic("unexpected")
+			}
+			ndisp = exprLiterals
+
+		case ir.ONAME:
+			nn := n.(*ir.Name)
+			if _, ok := ec.names[nn]; ok {
+				ndisp = exprSimple
+				ec.names[nn] = true
+			} else {
+				sv := ir.StaticValue(n)
+				if sv.Op() == ir.ONAME {
+					nn = sv.(*ir.Name)
+				}
+				if _, ok := ec.names[nn]; ok {
+					ndisp = exprSimple
+					ec.names[nn] = true
+				}
+			}
+
+		case ir.ONOT,
+			ir.OPLUS,
+			ir.ONEG:
+			uex := n.(*ir.UnaryExpr)
+			ndisp = ec.getdisp(uex.X)
+
+		case ir.OEQ,
+			ir.ONE,
+			ir.OLT,
+			ir.OGT,
+			ir.OGE,
+			ir.OLE:
+			// compare ops
+			x, y := binparts(n)
+			ndisp = ec.dispmeet(x, y)
+			if debugTrace&debugTraceExprClassify != 0 {
+				fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n",
+					ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y),
+					n.Op().String())
+			}
+		case ir.OLSH,
+			ir.ORSH,
+			ir.ODIV,
+			ir.OMOD:
+			x, y := binparts(n)
+			if ec.getdisp(y) == exprLiterals {
+				ndisp = ec.dispmeet(x, y)
+			}
+
+		case ir.OADD,
+			ir.OSUB,
+			ir.OOR,
+			ir.OXOR,
+			ir.OMUL,
+			ir.OAND,
+			ir.OANDNOT,
+			ir.OANDAND,
+			ir.OOROR:
+			x, y := binparts(n)
+			if debugTrace&debugTraceExprClassify != 0 {
+				fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n",
+					ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y),
+					n.Op().String())
+			}
+			ndisp = ec.dispmeet(x, y)
+		}
+	}
+
+	if debugTrace&debugTraceExprClassify != 0 {
+		fmt.Fprintf(os.Stderr, "=-= op=%s disp=%v\n", n.Op().String(),
+			ndisp.String())
+	}
+
+	ec.disposition[n] = ndisp
+}
+
+func (ec *exprClassifier) getdisp(x ir.Node) disp {
+	if d, ok := ec.disposition[x]; ok {
+		return d
+	} else {
+		panic("missing node from disp table")
+	}
+}
+
+// dispmeet performs a "meet" operation on the data flow states of
+// node x and y (where the term "meet" is being drawn from traditional
+// lattice-theoretical data flow analysis terminology).
+func (ec *exprClassifier) dispmeet(x, y ir.Node) disp {
+	xd := ec.getdisp(x)
+	if xd == exprNoInfo {
+		return exprNoInfo
+	}
+	yd := ec.getdisp(y)
+	if yd == exprNoInfo {
+		return exprNoInfo
+	}
+	if xd == exprSimple || yd == exprSimple {
+		return exprSimple
+	}
+	if xd != exprLiterals || yd != exprLiterals {
+		panic("unexpected")
+	}
+	return exprLiterals
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcprop_string.go b/src/cmd/compile/internal/inline/inlheur/funcprop_string.go
new file mode 100644
index 0000000..d16e4d33
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcprop_string.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"fmt"
+	"strings"
+)
+
+func (fp *FuncProps) String() string {
+	return fp.ToString("")
+}
+
+func (fp *FuncProps) ToString(prefix string) string {
+	var sb strings.Builder
+	if fp.Flags != 0 {
+		fmt.Fprintf(&sb, "%sFlags %s\n", prefix, fp.Flags)
+	}
+	flagSliceToSB[ParamPropBits](&sb, fp.ParamFlags,
+		prefix, "ParamFlags")
+	flagSliceToSB[ResultPropBits](&sb, fp.ResultFlags,
+		prefix, "ResultFlags")
+	return sb.String()
+}
+
+func flagSliceToSB[T interface {
+	~uint32
+	String() string
+}](sb *strings.Builder, sl []T, prefix string, tag string) {
+	var sb2 strings.Builder
+	foundnz := false
+	fmt.Fprintf(&sb2, "%s%s\n", prefix, tag)
+	for i, e := range sl {
+		if e != 0 {
+			foundnz = true
+		}
+		fmt.Fprintf(&sb2, "%s  %d %s\n", prefix, i, e.String())
+	}
+	if foundnz {
+		sb.WriteString(sb2.String())
+	}
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go
new file mode 100644
index 0000000..28de4a9
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go
@@ -0,0 +1,58 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type FuncPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+	"bytes"
+	"strconv"
+)
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[FuncPropNeverReturns-1]
+}
+
+var _FuncPropBits_value = [...]uint64{
+	0x1, /* FuncPropNeverReturns */
+}
+
+const _FuncPropBits_name = "FuncPropNeverReturns"
+
+var _FuncPropBits_index = [...]uint8{0, 20}
+
+func (i FuncPropBits) String() string {
+	var b bytes.Buffer
+
+	remain := uint64(i)
+	seen := false
+
+	for k, v := range _FuncPropBits_value {
+		x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]]
+		if v == 0 {
+			if i == 0 {
+				b.WriteString(x)
+				return b.String()
+			}
+			continue
+		}
+		if (v & remain) == v {
+			remain &^= v
+			x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]]
+			if seen {
+				b.WriteString("|")
+			}
+			seen = true
+			b.WriteString(x)
+		}
+	}
+	if remain == 0 {
+		return b.String()
+	}
+	return "FuncPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcprops_test.go b/src/cmd/compile/internal/inline/inlheur/funcprops_test.go
new file mode 100644
index 0000000..c04e604
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcprops_test.go
@@ -0,0 +1,530 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"bufio"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"internal/testenv"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+)
+
+var remasterflag = flag.Bool("update-expected", false, "if true, generate updated golden results in testcases for all props tests")
+
+func TestFuncProperties(t *testing.T) {
+	td := t.TempDir()
+	// td = "/tmp/qqq"
+	// os.RemoveAll(td)
+	// os.Mkdir(td, 0777)
+	testenv.MustHaveGoBuild(t)
+
+	// NOTE: this testpoint has the unfortunate characteristic that it
+	// relies on the installed compiler, meaning that if you make
+	// changes to the inline heuristics code in your working copy and
+	// then run the test, it will test the installed compiler and not
+	// your local modifications. TODO: decide whether to convert this
+	// to building a fresh compiler on the fly, or using some other
+	// scheme.
+
+	testcases := []string{"funcflags", "returns", "params",
+		"acrosscall", "calls", "returns2"}
+	for _, tc := range testcases {
+		dumpfile, err := gatherPropsDumpForFile(t, tc, td)
+		if err != nil {
+			t.Fatalf("dumping func props for %q: error %v", tc, err)
+		}
+		// Read in the newly generated dump.
+		dentries, dcsites, derr := readDump(t, dumpfile)
+		if derr != nil {
+			t.Fatalf("reading func prop dump: %v", derr)
+		}
+		if *remasterflag {
+			updateExpected(t, tc, dentries, dcsites)
+			continue
+		}
+		// Generate expected dump.
+		epath, egerr := genExpected(td, tc)
+		if egerr != nil {
+			t.Fatalf("generating expected func prop dump: %v", egerr)
+		}
+		// Read in the expected result entries.
+		eentries, ecsites, eerr := readDump(t, epath)
+		if eerr != nil {
+			t.Fatalf("reading expected func prop dump: %v", eerr)
+		}
+		// Compare new vs expected.
+		n := len(dentries)
+		eidx := 0
+		for i := 0; i < n; i++ {
+			dentry := dentries[i]
+			dcst := dcsites[i]
+			if !interestingToCompare(dentry.fname) {
+				continue
+			}
+			if eidx >= len(eentries) {
+				t.Errorf("testcase %s missing expected entry for %s, skipping", tc, dentry.fname)
+				continue
+			}
+			eentry := eentries[eidx]
+			ecst := ecsites[eidx]
+			eidx++
+			if dentry.fname != eentry.fname {
+				t.Errorf("got fn %q wanted %q, skipping checks",
+					dentry.fname, eentry.fname)
+				continue
+			}
+			compareEntries(t, tc, &dentry, dcst, &eentry, ecst)
+		}
+	}
+}
+
+func propBitsToString[T interface{ String() string }](sl []T) string {
+	var sb strings.Builder
+	for i, f := range sl {
+		fmt.Fprintf(&sb, "%d: %s\n", i, f.String())
+	}
+	return sb.String()
+}
+
+func compareEntries(t *testing.T, tc string, dentry *fnInlHeur, dcsites encodedCallSiteTab, eentry *fnInlHeur, ecsites encodedCallSiteTab) {
+	dfp := dentry.props
+	efp := eentry.props
+	dfn := dentry.fname
+
+	// Compare function flags.
+	if dfp.Flags != efp.Flags {
+		t.Errorf("testcase %q: Flags mismatch for %q: got %s, wanted %s",
+			tc, dfn, dfp.Flags.String(), efp.Flags.String())
+	}
+	// Compare returns
+	rgot := propBitsToString[ResultPropBits](dfp.ResultFlags)
+	rwant := propBitsToString[ResultPropBits](efp.ResultFlags)
+	if rgot != rwant {
+		t.Errorf("testcase %q: Results mismatch for %q: got:\n%swant:\n%s",
+			tc, dfn, rgot, rwant)
+	}
+	// Compare receiver + params.
+	pgot := propBitsToString[ParamPropBits](dfp.ParamFlags)
+	pwant := propBitsToString[ParamPropBits](efp.ParamFlags)
+	if pgot != pwant {
+		t.Errorf("testcase %q: Params mismatch for %q: got:\n%swant:\n%s",
+			tc, dfn, pgot, pwant)
+	}
+	// Compare call sites.
+	for k, ve := range ecsites {
+		if vd, ok := dcsites[k]; !ok {
+			t.Errorf("testcase %q missing expected callsite %q in func %q", tc, k, dfn)
+			continue
+		} else {
+			if vd != ve {
+				t.Errorf("testcase %q callsite %q in func %q: got %+v want %+v",
+					tc, k, dfn, vd.String(), ve.String())
+			}
+		}
+	}
+	for k := range dcsites {
+		if _, ok := ecsites[k]; !ok {
+			t.Errorf("testcase %q unexpected extra callsite %q in func %q", tc, k, dfn)
+		}
+	}
+}
+
+type dumpReader struct {
+	s  *bufio.Scanner
+	t  *testing.T
+	p  string
+	ln int
+}
+
+// readDump reads in the contents of a dump file produced
+// by the "-d=dumpinlfuncprops=..." command line flag by the Go
+// compiler. It breaks the dump down into separate sections
+// by function, then deserializes each func section into a
+// fnInlHeur object and returns a slice of those objects.
+func readDump(t *testing.T, path string) ([]fnInlHeur, []encodedCallSiteTab, error) {
+	content, err := os.ReadFile(path)
+	if err != nil {
+		return nil, nil, err
+	}
+	dr := &dumpReader{
+		s:  bufio.NewScanner(strings.NewReader(string(content))),
+		t:  t,
+		p:  path,
+		ln: 1,
+	}
+	// consume header comment until preamble delimiter.
+	found := false
+	for dr.scan() {
+		if dr.curLine() == preambleDelimiter {
+			found = true
+			break
+		}
+	}
+	if !found {
+		return nil, nil, fmt.Errorf("malformed testcase file %s, missing preamble delimiter", path)
+	}
+	res := []fnInlHeur{}
+	csres := []encodedCallSiteTab{}
+	for {
+		dentry, dcst, err := dr.readEntry()
+		if err != nil {
+			t.Fatalf("reading func prop dump: %v", err)
+		}
+		if dentry.fname == "" {
+			break
+		}
+		res = append(res, dentry)
+		csres = append(csres, dcst)
+	}
+	return res, csres, nil
+}
+
+func (dr *dumpReader) scan() bool {
+	v := dr.s.Scan()
+	if v {
+		dr.ln++
+	}
+	return v
+}
+
+func (dr *dumpReader) curLine() string {
+	res := strings.TrimSpace(dr.s.Text())
+	if !strings.HasPrefix(res, "// ") {
+		dr.t.Fatalf("malformed line %s:%d, no comment: %s", dr.p, dr.ln, res)
+	}
+	return res[3:]
+}
+
+// readObjBlob reads in a series of commented lines until
+// it hits a delimiter, then returns the contents of the comments.
+func (dr *dumpReader) readObjBlob(delim string) (string, error) {
+	var sb strings.Builder
+	foundDelim := false
+	for dr.scan() {
+		line := dr.curLine()
+		if delim == line {
+			foundDelim = true
+			break
+		}
+		sb.WriteString(line + "\n")
+	}
+	if err := dr.s.Err(); err != nil {
+		return "", err
+	}
+	if !foundDelim {
+		return "", fmt.Errorf("malformed input %s, missing delimiter %q",
+			dr.p, delim)
+	}
+	return sb.String(), nil
+}
+
+// readEntry reads a single function's worth of material from
+// a file produced by the "-d=dumpinlfuncprops=..." command line
+// flag. It deserializes the json for the func properties and
+// returns the resulting properties and function name. EOF is
+// signaled by a nil FuncProps return (with no error
+func (dr *dumpReader) readEntry() (fnInlHeur, encodedCallSiteTab, error) {
+	var funcInlHeur fnInlHeur
+	var callsites encodedCallSiteTab
+	if !dr.scan() {
+		return funcInlHeur, callsites, nil
+	}
+	// first line contains info about function: file/name/line
+	info := dr.curLine()
+	chunks := strings.Fields(info)
+	funcInlHeur.file = chunks[0]
+	funcInlHeur.fname = chunks[1]
+	if _, err := fmt.Sscanf(chunks[2], "%d", &funcInlHeur.line); err != nil {
+		return funcInlHeur, callsites, fmt.Errorf("scanning line %q: %v", info, err)
+	}
+	// consume comments until and including delimiter
+	for {
+		if !dr.scan() {
+			break
+		}
+		if dr.curLine() == comDelimiter {
+			break
+		}
+	}
+
+	// Consume JSON for encoded props.
+	dr.scan()
+	line := dr.curLine()
+	fp := &FuncProps{}
+	if err := json.Unmarshal([]byte(line), fp); err != nil {
+		return funcInlHeur, callsites, err
+	}
+	funcInlHeur.props = fp
+
+	// Consume callsites.
+	callsites = make(encodedCallSiteTab)
+	for dr.scan() {
+		line := dr.curLine()
+		if line == csDelimiter {
+			break
+		}
+		// expected format: "// callsite: <expanded pos> flagstr <desc> flagval <flags> score <score> mask <scoremask> maskstr <scoremaskstring>"
+		fields := strings.Fields(line)
+		if len(fields) != 12 {
+			return funcInlHeur, nil, fmt.Errorf("malformed callsite (nf=%d) %s line %d: %s", len(fields), dr.p, dr.ln, line)
+		}
+		if fields[2] != "flagstr" || fields[4] != "flagval" || fields[6] != "score" || fields[8] != "mask" || fields[10] != "maskstr" {
+			return funcInlHeur, nil, fmt.Errorf("malformed callsite %s line %d: %s",
+				dr.p, dr.ln, line)
+		}
+		tag := fields[1]
+		flagstr := fields[5]
+		flags, err := strconv.Atoi(flagstr)
+		if err != nil {
+			return funcInlHeur, nil, fmt.Errorf("bad flags val %s line %d: %q err=%v",
+				dr.p, dr.ln, line, err)
+		}
+		scorestr := fields[7]
+		score, err2 := strconv.Atoi(scorestr)
+		if err2 != nil {
+			return funcInlHeur, nil, fmt.Errorf("bad score val %s line %d: %q err=%v",
+				dr.p, dr.ln, line, err2)
+		}
+		maskstr := fields[9]
+		mask, err3 := strconv.Atoi(maskstr)
+		if err3 != nil {
+			return funcInlHeur, nil, fmt.Errorf("bad mask val %s line %d: %q err=%v",
+				dr.p, dr.ln, line, err3)
+		}
+		callsites[tag] = propsAndScore{
+			props: CSPropBits(flags),
+			score: score,
+			mask:  scoreAdjustTyp(mask),
+		}
+	}
+
+	// Consume function delimiter.
+	dr.scan()
+	line = dr.curLine()
+	if line != fnDelimiter {
+		return funcInlHeur, nil, fmt.Errorf("malformed testcase file %q, missing delimiter %q", dr.p, fnDelimiter)
+	}
+
+	return funcInlHeur, callsites, nil
+}
+
+// gatherPropsDumpForFile builds the specified testcase 'testcase' from
+// testdata/props passing the "-d=dumpinlfuncprops=..." compiler option,
+// to produce a properties dump, then returns the path of the newly
+// created file. NB: we can't use "go tool compile" here, since
+// some of the test cases import stdlib packages (such as "os").
+// This means using "go build", which is problematic since the
+// Go command can potentially cache the results of the compile step,
+// causing the test to fail when being run interactively. E.g.
+//
+//	$ rm -f dump.txt
+//	$ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go
+//	$ rm -f dump.txt foo.a
+//	$ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go
+//	$ ls foo.a dump.txt > /dev/null
+//	ls : cannot access 'dump.txt': No such file or directory
+//	$
+//
+// For this reason, pick a unique filename for the dump, so as to
+// defeat the caching.
+func gatherPropsDumpForFile(t *testing.T, testcase string, td string) (string, error) {
+	t.Helper()
+	gopath := "testdata/props/" + testcase + ".go"
+	outpath := filepath.Join(td, testcase+".a")
+	salt := fmt.Sprintf(".p%dt%d", os.Getpid(), time.Now().UnixNano())
+	dumpfile := filepath.Join(td, testcase+salt+".dump.txt")
+	run := []string{testenv.GoToolPath(t), "build",
+		"-gcflags=-d=dumpinlfuncprops=" + dumpfile, "-o", outpath, gopath}
+	out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+	if err != nil {
+		t.Logf("compile command: %+v", run)
+	}
+	if strings.TrimSpace(string(out)) != "" {
+		t.Logf("%s", out)
+	}
+	return dumpfile, err
+}
+
+// genExpected reads in a given Go testcase file, strips out all the
+// unindented (column 0) commands, writes them out to a new file, and
+// returns the path of that new file. By picking out just the comments
+// from the Go file we wind up with something that resembles the
+// output from a "-d=dumpinlfuncprops=..." compilation.
+func genExpected(td string, testcase string) (string, error) {
+	epath := filepath.Join(td, testcase+".expected")
+	outf, err := os.OpenFile(epath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+	if err != nil {
+		return "", err
+	}
+	gopath := "testdata/props/" + testcase + ".go"
+	content, err := os.ReadFile(gopath)
+	if err != nil {
+		return "", err
+	}
+	lines := strings.Split(string(content), "\n")
+	for _, line := range lines[3:] {
+		if !strings.HasPrefix(line, "// ") {
+			continue
+		}
+		fmt.Fprintf(outf, "%s\n", line)
+	}
+	if err := outf.Close(); err != nil {
+		return "", err
+	}
+	return epath, nil
+}
+
+type upexState struct {
+	dentries   []fnInlHeur
+	newgolines []string
+	atline     map[uint]uint
+}
+
+func mkUpexState(dentries []fnInlHeur) *upexState {
+	atline := make(map[uint]uint)
+	for _, e := range dentries {
+		atline[e.line] = atline[e.line] + 1
+	}
+	return &upexState{
+		dentries: dentries,
+		atline:   atline,
+	}
+}
+
+// updateExpected takes a given Go testcase file X.go and writes out a
+// new/updated version of the file to X.go.new, where the column-0
+// "expected" comments have been updated using fresh data from
+// "dentries".
+//
+// Writing of expected results is complicated by closures and by
+// generics, where you can have multiple functions that all share the
+// same starting line. Currently we combine up all the dups and
+// closures into the single pre-func comment.
+func updateExpected(t *testing.T, testcase string, dentries []fnInlHeur, dcsites []encodedCallSiteTab) {
+	nd := len(dentries)
+
+	ues := mkUpexState(dentries)
+
+	gopath := "testdata/props/" + testcase + ".go"
+	newgopath := "testdata/props/" + testcase + ".go.new"
+
+	// Read the existing Go file.
+	content, err := os.ReadFile(gopath)
+	if err != nil {
+		t.Fatalf("opening %s: %v", gopath, err)
+	}
+	golines := strings.Split(string(content), "\n")
+
+	// Preserve copyright.
+	ues.newgolines = append(ues.newgolines, golines[:4]...)
+	if !strings.HasPrefix(golines[0], "// Copyright") {
+		t.Fatalf("missing copyright from existing testcase")
+	}
+	golines = golines[4:]
+
+	clore := regexp.MustCompile(`.+\.func\d+[\.\d]*$`)
+
+	emitFunc := func(e *fnInlHeur, dcsites encodedCallSiteTab,
+		instance, atl uint) {
+		var sb strings.Builder
+		dumpFnPreamble(&sb, e, dcsites, instance, atl)
+		ues.newgolines = append(ues.newgolines,
+			strings.Split(strings.TrimSpace(sb.String()), "\n")...)
+	}
+
+	// Write file preamble with "DO NOT EDIT" message and such.
+	var sb strings.Builder
+	dumpFilePreamble(&sb)
+	ues.newgolines = append(ues.newgolines,
+		strings.Split(strings.TrimSpace(sb.String()), "\n")...)
+
+	// Helper to add a clump of functions to the output file.
+	processClump := func(idx int, emit bool) int {
+		// Process func itself, plus anything else defined
+		// on the same line
+		atl := ues.atline[dentries[idx].line]
+		for k := uint(0); k < atl; k++ {
+			if emit {
+				emitFunc(&dentries[idx], dcsites[idx], k, atl)
+			}
+			idx++
+		}
+		// now process any closures it contains
+		ncl := 0
+		for idx < nd {
+			nfn := dentries[idx].fname
+			if !clore.MatchString(nfn) {
+				break
+			}
+			ncl++
+			if emit {
+				emitFunc(&dentries[idx], dcsites[idx], 0, 1)
+			}
+			idx++
+		}
+		return idx
+	}
+
+	didx := 0
+	for _, line := range golines {
+		if strings.HasPrefix(line, "func ") {
+
+			// We have a function definition.
+			// Pick out the corresponding entry or entries in the dump
+			// and emit if interesting (or skip if not).
+			dentry := dentries[didx]
+			emit := interestingToCompare(dentry.fname)
+			didx = processClump(didx, emit)
+		}
+
+		// Consume all existing comments.
+		if strings.HasPrefix(line, "//") {
+			continue
+		}
+		ues.newgolines = append(ues.newgolines, line)
+	}
+
+	if didx != nd {
+		t.Logf("didx=%d wanted %d", didx, nd)
+	}
+
+	// Open new Go file and write contents.
+	of, err := os.OpenFile(newgopath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+	if err != nil {
+		t.Fatalf("opening %s: %v", newgopath, err)
+	}
+	fmt.Fprintf(of, "%s", strings.Join(ues.newgolines, "\n"))
+	if err := of.Close(); err != nil {
+		t.Fatalf("closing %s: %v", newgopath, err)
+	}
+
+	t.Logf("update-expected: emitted updated file %s", newgopath)
+	t.Logf("please compare the two files, then overwrite %s with %s\n",
+		gopath, newgopath)
+}
+
+// interestingToCompare returns TRUE if we want to compare results
+// for function 'fname'.
+func interestingToCompare(fname string) bool {
+	if strings.HasPrefix(fname, "init.") {
+		return true
+	}
+	if strings.HasPrefix(fname, "T_") {
+		return true
+	}
+	f := strings.Split(fname, ".")
+	if len(f) == 2 && strings.HasPrefix(f[1], "T_") {
+		return true
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/function_properties.go b/src/cmd/compile/internal/inline/inlheur/function_properties.go
new file mode 100644
index 0000000..b90abf9
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/function_properties.go
@@ -0,0 +1,98 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+// This file defines a set of Go function "properties" intended to
+// guide inlining heuristics; these properties may apply to the
+// function as a whole, or to one or more function return values or
+// parameters.
+//
+// IMPORTANT: function properties are produced on a "best effort"
+// basis, meaning that the code that computes them doesn't verify that
+// the properties are guaranteed to be true in 100% of cases. For this
+// reason, properties should only be used to drive always-safe
+// optimization decisions (e.g. "should I inline this call", or
+// "should I unroll this loop") as opposed to potentially unsafe IR
+// alterations that could change program semantics (e.g. "can I delete
+// this variable" or "can I move this statement to a new location").
+//
+//----------------------------------------------------------------
+
+// FuncProps describes a set of function or method properties that may
+// be useful for inlining heuristics. Here 'Flags' are properties that
+// we think apply to the entire function; 'RecvrParamFlags' are
+// properties of specific function params (or the receiver), and
+// 'ResultFlags' are things properties we think will apply to values
+// of specific results. Note that 'ParamFlags' includes and entry for
+// the receiver if applicable, and does include etries for blank
+// params; for a function such as "func foo(_ int, b byte, _ float32)"
+// the length of ParamFlags will be 3.
+type FuncProps struct {
+	Flags       FuncPropBits
+	ParamFlags  []ParamPropBits // slot 0 receiver if applicable
+	ResultFlags []ResultPropBits
+}
+
+type FuncPropBits uint32
+
+const (
+	// Function always panics or invokes os.Exit() or a func that does
+	// likewise.
+	FuncPropNeverReturns FuncPropBits = 1 << iota
+)
+
+type ParamPropBits uint32
+
+const (
+	// No info about this param
+	ParamNoInfo ParamPropBits = 0
+
+	// Parameter value feeds unmodified into a top-level interface
+	// call (this assumes the parameter is of interface type).
+	ParamFeedsInterfaceMethodCall ParamPropBits = 1 << iota
+
+	// Parameter value feeds unmodified into an interface call that
+	// may be conditional/nested and not always executed (this assumes
+	// the parameter is of interface type).
+	ParamMayFeedInterfaceMethodCall ParamPropBits = 1 << iota
+
+	// Parameter value feeds unmodified into a top level indirect
+	// function call (assumes parameter is of function type).
+	ParamFeedsIndirectCall
+
+	// Parameter value feeds unmodified into an indirect function call
+	// that is conditional/nested (not guaranteed to execute). Assumes
+	// parameter is of function type.
+	ParamMayFeedIndirectCall
+
+	// Parameter value feeds unmodified into a top level "switch"
+	// statement or "if" statement simple expressions (see more on
+	// "simple" expression classification below).
+	ParamFeedsIfOrSwitch
+
+	// Parameter value feeds unmodified into a "switch" or "if"
+	// statement simple expressions (see more on "simple" expression
+	// classification below), where the if/switch is
+	// conditional/nested.
+	ParamMayFeedIfOrSwitch
+)
+
+type ResultPropBits uint32
+
+const (
+	// No info about this result
+	ResultNoInfo ResultPropBits = 0
+	// This result always contains allocated memory.
+	ResultIsAllocatedMem ResultPropBits = 1 << iota
+	// This result is always a single concrete type that is
+	// implicitly converted to interface.
+	ResultIsConcreteTypeConvertedToInterface
+	// Result is always the same non-composite compile time constant.
+	ResultAlwaysSameConstant
+	// Result is always the same function or closure.
+	ResultAlwaysSameFunc
+	// Result is always the same (potentially) inlinable function or closure.
+	ResultAlwaysSameInlinableFunc
+)
diff --git a/src/cmd/compile/internal/inline/inlheur/names.go b/src/cmd/compile/internal/inline/inlheur/names.go
new file mode 100644
index 0000000..0223850
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/names.go
@@ -0,0 +1,129 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"go/constant"
+)
+
+// nameFinder provides a set of "isXXX" query methods for clients to
+// ask whether a given AST node corresponds to a function, a constant
+// value, and so on. These methods use an underlying ir.ReassignOracle
+// to return more precise results in cases where an "interesting"
+// value is assigned to a singly-defined local temp. Example:
+//
+//	const q = 101
+//	fq := func() int { return q }
+//	copyOfConstant := q
+//	copyOfFunc := f
+//	interestingCall(copyOfConstant, copyOfFunc)
+//
+// A name finder query method invoked on the arguments being passed to
+// "interestingCall" will be able detect that 'copyOfConstant' always
+// evaluates to a constant (even though it is in fact a PAUTO local
+// variable). A given nameFinder can also operate without using
+// ir.ReassignOracle (in cases where it is not practical to look
+// at the entire function); in such cases queries will still work
+// for explicit constant values and functions.
+type nameFinder struct {
+	ro *ir.ReassignOracle
+}
+
+// newNameFinder returns a new nameFinder object with a reassignment
+// oracle initialized based on the function fn, or if fn is nil,
+// without an underlying ReassignOracle.
+func newNameFinder(fn *ir.Func) *nameFinder {
+	var ro *ir.ReassignOracle
+	if fn != nil {
+		ro = &ir.ReassignOracle{}
+		ro.Init(fn)
+	}
+	return &nameFinder{ro: ro}
+}
+
+// funcName returns the *ir.Name for the func or method
+// corresponding to node 'n', or nil if n can't be proven
+// to contain a function value.
+func (nf *nameFinder) funcName(n ir.Node) *ir.Name {
+	sv := n
+	if nf.ro != nil {
+		sv = nf.ro.StaticValue(n)
+	}
+	if name := ir.StaticCalleeName(sv); name != nil {
+		return name
+	}
+	return nil
+}
+
+// isAllocatedMem returns true if node n corresponds to a memory
+// allocation expression (make, new, or equivalent).
+func (nf *nameFinder) isAllocatedMem(n ir.Node) bool {
+	sv := n
+	if nf.ro != nil {
+		sv = nf.ro.StaticValue(n)
+	}
+	switch sv.Op() {
+	case ir.OMAKESLICE, ir.ONEW, ir.OPTRLIT, ir.OSLICELIT:
+		return true
+	}
+	return false
+}
+
+// constValue returns the underlying constant.Value for an AST node n
+// if n is itself a constant value/expr, or if n is a singly assigned
+// local containing constant expr/value (or nil not constant).
+func (nf *nameFinder) constValue(n ir.Node) constant.Value {
+	sv := n
+	if nf.ro != nil {
+		sv = nf.ro.StaticValue(n)
+	}
+	if sv.Op() == ir.OLITERAL {
+		return sv.Val()
+	}
+	return nil
+}
+
+// isNil returns whether n is nil (or singly
+// assigned local containing nil).
+func (nf *nameFinder) isNil(n ir.Node) bool {
+	sv := n
+	if nf.ro != nil {
+		sv = nf.ro.StaticValue(n)
+	}
+	return sv.Op() == ir.ONIL
+}
+
+func (nf *nameFinder) staticValue(n ir.Node) ir.Node {
+	if nf.ro == nil {
+		return n
+	}
+	return nf.ro.StaticValue(n)
+}
+
+func (nf *nameFinder) reassigned(n *ir.Name) bool {
+	if nf.ro == nil {
+		return true
+	}
+	return nf.ro.Reassigned(n)
+}
+
+func (nf *nameFinder) isConcreteConvIface(n ir.Node) bool {
+	sv := n
+	if nf.ro != nil {
+		sv = nf.ro.StaticValue(n)
+	}
+	if sv.Op() != ir.OCONVIFACE {
+		return false
+	}
+	return !sv.(*ir.ConvExpr).X.Type().IsInterface()
+}
+
+func isSameFuncName(v1, v2 *ir.Name) bool {
+	// NB: there are a few corner cases where pointer equality
+	// doesn't work here, but this should be good enough for
+	// our purposes here.
+	return v1 == v2
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go b/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go
new file mode 100644
index 0000000..bf4d3ca
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go
@@ -0,0 +1,70 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type ParamPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+	"bytes"
+	"strconv"
+)
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[ParamNoInfo-0]
+	_ = x[ParamFeedsInterfaceMethodCall-2]
+	_ = x[ParamMayFeedInterfaceMethodCall-4]
+	_ = x[ParamFeedsIndirectCall-8]
+	_ = x[ParamMayFeedIndirectCall-16]
+	_ = x[ParamFeedsIfOrSwitch-32]
+	_ = x[ParamMayFeedIfOrSwitch-64]
+}
+
+var _ParamPropBits_value = [...]uint64{
+	0x0,  /* ParamNoInfo */
+	0x2,  /* ParamFeedsInterfaceMethodCall */
+	0x4,  /* ParamMayFeedInterfaceMethodCall */
+	0x8,  /* ParamFeedsIndirectCall */
+	0x10, /* ParamMayFeedIndirectCall */
+	0x20, /* ParamFeedsIfOrSwitch */
+	0x40, /* ParamMayFeedIfOrSwitch */
+}
+
+const _ParamPropBits_name = "ParamNoInfoParamFeedsInterfaceMethodCallParamMayFeedInterfaceMethodCallParamFeedsIndirectCallParamMayFeedIndirectCallParamFeedsIfOrSwitchParamMayFeedIfOrSwitch"
+
+var _ParamPropBits_index = [...]uint8{0, 11, 40, 71, 93, 117, 137, 159}
+
+func (i ParamPropBits) String() string {
+	var b bytes.Buffer
+
+	remain := uint64(i)
+	seen := false
+
+	for k, v := range _ParamPropBits_value {
+		x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]]
+		if v == 0 {
+			if i == 0 {
+				b.WriteString(x)
+				return b.String()
+			}
+			continue
+		}
+		if (v & remain) == v {
+			remain &^= v
+			x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]]
+			if seen {
+				b.WriteString("|")
+			}
+			seen = true
+			b.WriteString(x)
+		}
+	}
+	if remain == 0 {
+		return b.String()
+	}
+	return "ParamPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/pstate_string.go b/src/cmd/compile/internal/inline/inlheur/pstate_string.go
new file mode 100644
index 0000000..e6108d1
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/pstate_string.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -type pstate"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[psNoInfo-0]
+	_ = x[psCallsPanic-1]
+	_ = x[psMayReturn-2]
+	_ = x[psTop-3]
+}
+
+const _pstate_name = "psNoInfopsCallsPanicpsMayReturnpsTop"
+
+var _pstate_index = [...]uint8{0, 8, 20, 31, 36}
+
+func (i pstate) String() string {
+	if i < 0 || i >= pstate(len(_pstate_index)-1) {
+		return "pstate(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _pstate_name[_pstate_index[i]:_pstate_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go
new file mode 100644
index 0000000..888af98
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go
@@ -0,0 +1,68 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type ResultPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+	"bytes"
+	"strconv"
+)
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[ResultNoInfo-0]
+	_ = x[ResultIsAllocatedMem-2]
+	_ = x[ResultIsConcreteTypeConvertedToInterface-4]
+	_ = x[ResultAlwaysSameConstant-8]
+	_ = x[ResultAlwaysSameFunc-16]
+	_ = x[ResultAlwaysSameInlinableFunc-32]
+}
+
+var _ResultPropBits_value = [...]uint64{
+	0x0,  /* ResultNoInfo */
+	0x2,  /* ResultIsAllocatedMem */
+	0x4,  /* ResultIsConcreteTypeConvertedToInterface */
+	0x8,  /* ResultAlwaysSameConstant */
+	0x10, /* ResultAlwaysSameFunc */
+	0x20, /* ResultAlwaysSameInlinableFunc */
+}
+
+const _ResultPropBits_name = "ResultNoInfoResultIsAllocatedMemResultIsConcreteTypeConvertedToInterfaceResultAlwaysSameConstantResultAlwaysSameFuncResultAlwaysSameInlinableFunc"
+
+var _ResultPropBits_index = [...]uint8{0, 12, 32, 72, 96, 116, 145}
+
+func (i ResultPropBits) String() string {
+	var b bytes.Buffer
+
+	remain := uint64(i)
+	seen := false
+
+	for k, v := range _ResultPropBits_value {
+		x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]]
+		if v == 0 {
+			if i == 0 {
+				b.WriteString(x)
+				return b.String()
+			}
+			continue
+		}
+		if (v & remain) == v {
+			remain &^= v
+			x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]]
+			if seen {
+				b.WriteString("|")
+			}
+			seen = true
+			b.WriteString(x)
+		}
+	}
+	if remain == 0 {
+		return b.String()
+	}
+	return "ResultPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go
new file mode 100644
index 0000000..b95ea37
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"fmt"
+	"os"
+)
+
+// This file contains code to re-score callsites based on how the
+// results of the call were used.  Example:
+//
+//    func foo() {
+//       x, fptr := bar()
+//       switch x {
+//         case 10: fptr = baz()
+//         default: blix()
+//       }
+//       fptr(100)
+//     }
+//
+// The initial scoring pass will assign a score to "bar()" based on
+// various criteria, however once the first pass of scoring is done,
+// we look at the flags on the result from bar, and check to see
+// how those results are used. If bar() always returns the same constant
+// for its first result, and if the variable receiving that result
+// isn't redefined, and if that variable feeds into an if/switch
+// condition, then we will try to adjust the score for "bar" (on the
+// theory that if we inlined, we can constant fold / deadcode).
+
+type resultPropAndCS struct {
+	defcs *CallSite
+	props ResultPropBits
+}
+
+type resultUseAnalyzer struct {
+	resultNameTab map[*ir.Name]resultPropAndCS
+	fn            *ir.Func
+	cstab         CallSiteTab
+	*condLevelTracker
+}
+
+// rescoreBasedOnCallResultUses examines how call results are used,
+// and tries to update the scores of calls based on how their results
+// are used in the function.
+func (csa *callSiteAnalyzer) rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]resultPropAndCS, cstab CallSiteTab) {
+	enableDebugTraceIfEnv()
+	rua := &resultUseAnalyzer{
+		resultNameTab:    resultNameTab,
+		fn:               fn,
+		cstab:            cstab,
+		condLevelTracker: new(condLevelTracker),
+	}
+	var doNode func(ir.Node) bool
+	doNode = func(n ir.Node) bool {
+		rua.nodeVisitPre(n)
+		ir.DoChildren(n, doNode)
+		rua.nodeVisitPost(n)
+		return false
+	}
+	doNode(fn)
+	disableDebugTrace()
+}
+
+func (csa *callSiteAnalyzer) examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS) map[*ir.Name]resultPropAndCS {
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= examining call results for %q\n",
+			EncodeCallSiteKey(cs))
+	}
+
+	// Invoke a helper to pick out the specific ir.Name's the results
+	// from this call are assigned into, e.g. "x, y := fooBar()". If
+	// the call is not part of an assignment statement, or if the
+	// variables in question are not newly defined, then we'll receive
+	// an empty list here.
+	//
+	names, autoTemps, props := namesDefined(cs)
+	if len(names) == 0 {
+		return resultNameTab
+	}
+
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= %d names defined\n", len(names))
+	}
+
+	// For each returned value, if the value has interesting
+	// properties (ex: always returns the same constant), and the name
+	// in question is never redefined, then make an entry in the
+	// result table for it.
+	const interesting = (ResultIsConcreteTypeConvertedToInterface |
+		ResultAlwaysSameConstant | ResultAlwaysSameInlinableFunc | ResultAlwaysSameFunc)
+	for idx, n := range names {
+		rprop := props.ResultFlags[idx]
+
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= props for ret %d %q: %s\n",
+				idx, n.Sym().Name, rprop.String())
+		}
+
+		if rprop&interesting == 0 {
+			continue
+		}
+		if csa.nameFinder.reassigned(n) {
+			continue
+		}
+		if resultNameTab == nil {
+			resultNameTab = make(map[*ir.Name]resultPropAndCS)
+		} else if _, ok := resultNameTab[n]; ok {
+			panic("should never happen")
+		}
+		entry := resultPropAndCS{
+			defcs: cs,
+			props: rprop,
+		}
+		resultNameTab[n] = entry
+		if autoTemps[idx] != nil {
+			resultNameTab[autoTemps[idx]] = entry
+		}
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= add resultNameTab table entry n=%v autotemp=%v props=%s\n", n, autoTemps[idx], rprop.String())
+		}
+	}
+	return resultNameTab
+}
+
+// namesDefined returns a list of ir.Name's corresponding to locals
+// that receive the results from the call at site 'cs', plus the
+// properties object for the called function. If a given result
+// isn't cleanly assigned to a newly defined local, the
+// slot for that result in the returned list will be nil. Example:
+//
+//	call                             returned name list
+//
+//	x := foo()                       [ x ]
+//	z, y := bar()                    [ nil, nil ]
+//	_, q := baz()                    [ nil, q ]
+//
+// In the case of a multi-return call, such as "x, y := foo()",
+// the pattern we see from the front end will be a call op
+// assigning to auto-temps, and then an assignment of the auto-temps
+// to the user-level variables. In such cases we return
+// first the user-level variable (in the first func result)
+// and then the auto-temp name in the second result.
+func namesDefined(cs *CallSite) ([]*ir.Name, []*ir.Name, *FuncProps) {
+	// If this call doesn't feed into an assignment (and of course not
+	// all calls do), then we don't have anything to work with here.
+	if cs.Assign == nil {
+		return nil, nil, nil
+	}
+	funcInlHeur, ok := fpmap[cs.Callee]
+	if !ok {
+		// TODO: add an assert/panic here.
+		return nil, nil, nil
+	}
+	if len(funcInlHeur.props.ResultFlags) == 0 {
+		return nil, nil, nil
+	}
+
+	// Single return case.
+	if len(funcInlHeur.props.ResultFlags) == 1 {
+		asgn, ok := cs.Assign.(*ir.AssignStmt)
+		if !ok {
+			return nil, nil, nil
+		}
+		// locate name being assigned
+		aname, ok := asgn.X.(*ir.Name)
+		if !ok {
+			return nil, nil, nil
+		}
+		return []*ir.Name{aname}, []*ir.Name{nil}, funcInlHeur.props
+	}
+
+	// Multi-return case
+	asgn, ok := cs.Assign.(*ir.AssignListStmt)
+	if !ok || !asgn.Def {
+		return nil, nil, nil
+	}
+	userVars := make([]*ir.Name, len(funcInlHeur.props.ResultFlags))
+	autoTemps := make([]*ir.Name, len(funcInlHeur.props.ResultFlags))
+	for idx, x := range asgn.Lhs {
+		if n, ok := x.(*ir.Name); ok {
+			userVars[idx] = n
+			r := asgn.Rhs[idx]
+			if r.Op() == ir.OCONVNOP {
+				r = r.(*ir.ConvExpr).X
+			}
+			if ir.IsAutoTmp(r) {
+				autoTemps[idx] = r.(*ir.Name)
+			}
+			if debugTrace&debugTraceScoring != 0 {
+				fmt.Fprintf(os.Stderr, "=-= multi-ret namedef uv=%v at=%v\n",
+					x, autoTemps[idx])
+			}
+		} else {
+			return nil, nil, nil
+		}
+	}
+	return userVars, autoTemps, funcInlHeur.props
+}
+
+func (rua *resultUseAnalyzer) nodeVisitPost(n ir.Node) {
+	rua.condLevelTracker.post(n)
+}
+
+func (rua *resultUseAnalyzer) nodeVisitPre(n ir.Node) {
+	rua.condLevelTracker.pre(n)
+	switch n.Op() {
+	case ir.OCALLINTER:
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= rescore examine iface call %v:\n", n)
+		}
+		rua.callTargetCheckResults(n)
+	case ir.OCALLFUNC:
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= rescore examine call %v:\n", n)
+		}
+		rua.callTargetCheckResults(n)
+	case ir.OIF:
+		ifst := n.(*ir.IfStmt)
+		rua.foldCheckResults(ifst.Cond)
+	case ir.OSWITCH:
+		swst := n.(*ir.SwitchStmt)
+		if swst.Tag != nil {
+			rua.foldCheckResults(swst.Tag)
+		}
+
+	}
+}
+
+// callTargetCheckResults examines a given call to see whether the
+// callee expression is potentially an inlinable function returned
+// from a potentially inlinable call. Examples:
+//
+//	Scenario 1: named intermediate
+//
+//	   fn1 := foo()         conc := bar()
+//	   fn1("blah")          conc.MyMethod()
+//
+//	Scenario 2: returned func or concrete object feeds directly to call
+//
+//	   foo()("blah")        bar().MyMethod()
+//
+// In the second case although at the source level the result of the
+// direct call feeds right into the method call or indirect call,
+// we're relying on the front end having inserted an auto-temp to
+// capture the value.
+func (rua *resultUseAnalyzer) callTargetCheckResults(call ir.Node) {
+	ce := call.(*ir.CallExpr)
+	rname := rua.getCallResultName(ce)
+	if rname == nil {
+		return
+	}
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= staticvalue returns %v:\n",
+			rname)
+	}
+	if rname.Class != ir.PAUTO {
+		return
+	}
+	switch call.Op() {
+	case ir.OCALLINTER:
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= in %s checking %v for cci prop:\n",
+				rua.fn.Sym().Name, rname)
+		}
+		if cs := rua.returnHasProp(rname, ResultIsConcreteTypeConvertedToInterface); cs != nil {
+
+			adj := returnFeedsConcreteToInterfaceCallAdj
+			cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+		}
+	case ir.OCALLFUNC:
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= in %s checking %v for samefunc props:\n",
+				rua.fn.Sym().Name, rname)
+			v, ok := rua.resultNameTab[rname]
+			if !ok {
+				fmt.Fprintf(os.Stderr, "=-= no entry for %v in rt\n", rname)
+			} else {
+				fmt.Fprintf(os.Stderr, "=-= props for %v: %q\n", rname, v.props.String())
+			}
+		}
+		if cs := rua.returnHasProp(rname, ResultAlwaysSameInlinableFunc); cs != nil {
+			adj := returnFeedsInlinableFuncToIndCallAdj
+			cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+		} else if cs := rua.returnHasProp(rname, ResultAlwaysSameFunc); cs != nil {
+			adj := returnFeedsFuncToIndCallAdj
+			cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+
+		}
+	}
+}
+
+// foldCheckResults examines the specified if/switch condition 'cond'
+// to see if it refers to locals defined by a (potentially inlinable)
+// function call at call site C, and if so, whether 'cond' contains
+// only combinations of simple references to all of the names in
+// 'names' with selected constants + operators. If these criteria are
+// met, then we adjust the score for call site C to reflect the
+// fact that inlining will enable deadcode and/or constant propagation.
+// Note: for this heuristic to kick in, the names in question have to
+// be all from the same callsite. Examples:
+//
+//	  q, r := baz()	    x, y := foo()
+//	  switch q+r {		a, b, c := bar()
+//		...			    if x && y && a && b && c {
+//	  }					   ...
+//					    }
+//
+// For the call to "baz" above we apply a score adjustment, but not
+// for the calls to "foo" or "bar".
+func (rua *resultUseAnalyzer) foldCheckResults(cond ir.Node) {
+	namesUsed := collectNamesUsed(cond)
+	if len(namesUsed) == 0 {
+		return
+	}
+	var cs *CallSite
+	for _, n := range namesUsed {
+		rpcs, found := rua.resultNameTab[n]
+		if !found {
+			return
+		}
+		if cs != nil && rpcs.defcs != cs {
+			return
+		}
+		cs = rpcs.defcs
+		if rpcs.props&ResultAlwaysSameConstant == 0 {
+			return
+		}
+	}
+	if debugTrace&debugTraceScoring != 0 {
+		nls := func(nl []*ir.Name) string {
+			r := ""
+			for _, n := range nl {
+				r += " " + n.Sym().Name
+			}
+			return r
+		}
+		fmt.Fprintf(os.Stderr, "=-= calling ShouldFoldIfNameConstant on names={%s} cond=%v\n", nls(namesUsed), cond)
+	}
+
+	if !ShouldFoldIfNameConstant(cond, namesUsed) {
+		return
+	}
+	adj := returnFeedsConstToIfAdj
+	cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+}
+
+func collectNamesUsed(expr ir.Node) []*ir.Name {
+	res := []*ir.Name{}
+	ir.Visit(expr, func(n ir.Node) {
+		if n.Op() != ir.ONAME {
+			return
+		}
+		nn := n.(*ir.Name)
+		if nn.Class != ir.PAUTO {
+			return
+		}
+		res = append(res, nn)
+	})
+	return res
+}
+
+func (rua *resultUseAnalyzer) returnHasProp(name *ir.Name, prop ResultPropBits) *CallSite {
+	v, ok := rua.resultNameTab[name]
+	if !ok {
+		return nil
+	}
+	if v.props&prop == 0 {
+		return nil
+	}
+	return v.defcs
+}
+
+func (rua *resultUseAnalyzer) getCallResultName(ce *ir.CallExpr) *ir.Name {
+	var callTarg ir.Node
+	if sel, ok := ce.Fun.(*ir.SelectorExpr); ok {
+		// method call
+		callTarg = sel.X
+	} else if ctarg, ok := ce.Fun.(*ir.Name); ok {
+		// regular call
+		callTarg = ctarg
+	} else {
+		return nil
+	}
+	r := ir.StaticValue(callTarg)
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= staticname on %v returns %v:\n",
+			callTarg, r)
+	}
+	if r.Op() == ir.OCALLFUNC {
+		// This corresponds to the "x := foo()" case; here
+		// ir.StaticValue has brought us all the way back to
+		// the call expression itself. We need to back off to
+		// the name defined by the call; do this by looking up
+		// the callsite.
+		ce := r.(*ir.CallExpr)
+		cs, ok := rua.cstab[ce]
+		if !ok {
+			return nil
+		}
+		names, _, _ := namesDefined(cs)
+		if len(names) == 0 {
+			return nil
+		}
+		return names[0]
+	} else if r.Op() == ir.ONAME {
+		return r.(*ir.Name)
+	}
+	return nil
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go b/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go
new file mode 100644
index 0000000..f5b8bf6
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go
@@ -0,0 +1,80 @@
+// Code generated by "stringer -bitset -type scoreAdjustTyp"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[panicPathAdj-1]
+	_ = x[initFuncAdj-2]
+	_ = x[inLoopAdj-4]
+	_ = x[passConstToIfAdj-8]
+	_ = x[passConstToNestedIfAdj-16]
+	_ = x[passConcreteToItfCallAdj-32]
+	_ = x[passConcreteToNestedItfCallAdj-64]
+	_ = x[passFuncToIndCallAdj-128]
+	_ = x[passFuncToNestedIndCallAdj-256]
+	_ = x[passInlinableFuncToIndCallAdj-512]
+	_ = x[passInlinableFuncToNestedIndCallAdj-1024]
+	_ = x[returnFeedsConstToIfAdj-2048]
+	_ = x[returnFeedsFuncToIndCallAdj-4096]
+	_ = x[returnFeedsInlinableFuncToIndCallAdj-8192]
+	_ = x[returnFeedsConcreteToInterfaceCallAdj-16384]
+}
+
+var _scoreAdjustTyp_value = [...]uint64{
+	0x1,    /* panicPathAdj */
+	0x2,    /* initFuncAdj */
+	0x4,    /* inLoopAdj */
+	0x8,    /* passConstToIfAdj */
+	0x10,   /* passConstToNestedIfAdj */
+	0x20,   /* passConcreteToItfCallAdj */
+	0x40,   /* passConcreteToNestedItfCallAdj */
+	0x80,   /* passFuncToIndCallAdj */
+	0x100,  /* passFuncToNestedIndCallAdj */
+	0x200,  /* passInlinableFuncToIndCallAdj */
+	0x400,  /* passInlinableFuncToNestedIndCallAdj */
+	0x800,  /* returnFeedsConstToIfAdj */
+	0x1000, /* returnFeedsFuncToIndCallAdj */
+	0x2000, /* returnFeedsInlinableFuncToIndCallAdj */
+	0x4000, /* returnFeedsConcreteToInterfaceCallAdj */
+}
+
+const _scoreAdjustTyp_name = "panicPathAdjinitFuncAdjinLoopAdjpassConstToIfAdjpassConstToNestedIfAdjpassConcreteToItfCallAdjpassConcreteToNestedItfCallAdjpassFuncToIndCallAdjpassFuncToNestedIndCallAdjpassInlinableFuncToIndCallAdjpassInlinableFuncToNestedIndCallAdjreturnFeedsConstToIfAdjreturnFeedsFuncToIndCallAdjreturnFeedsInlinableFuncToIndCallAdjreturnFeedsConcreteToInterfaceCallAdj"
+
+var _scoreAdjustTyp_index = [...]uint16{0, 12, 23, 32, 48, 70, 94, 124, 144, 170, 199, 234, 257, 284, 320, 357}
+
+func (i scoreAdjustTyp) String() string {
+	var b bytes.Buffer
+
+	remain := uint64(i)
+	seen := false
+
+	for k, v := range _scoreAdjustTyp_value {
+		x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]]
+		if v == 0 {
+			if i == 0 {
+				b.WriteString(x)
+				return b.String()
+			}
+			continue
+		}
+		if (v & remain) == v {
+			remain &^= v
+			x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]]
+			if seen {
+				b.WriteString("|")
+			}
+			seen = true
+			b.WriteString(x)
+		}
+	}
+	if remain == 0 {
+		return b.String()
+	}
+	return "scoreAdjustTyp(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/scoring.go b/src/cmd/compile/internal/inline/inlheur/scoring.go
new file mode 100644
index 0000000..623ba8a
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/scoring.go
@@ -0,0 +1,751 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/pgo"
+	"cmd/compile/internal/types"
+	"fmt"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// These constants enumerate the set of possible ways/scenarios
+// in which we'll adjust the score of a given callsite.
+type scoreAdjustTyp uint
+
+// These constants capture the various ways in which the inliner's
+// scoring phase can adjust a callsite score based on heuristics. They
+// fall broadly into three categories:
+//
+// 1) adjustments based solely on the callsite context (ex: call
+// appears on panic path)
+//
+// 2) adjustments that take into account specific interesting values
+// passed at a call site (ex: passing a constant that could result in
+// cprop/deadcode in the caller)
+//
+// 3) adjustments that take into account values returned from the call
+// at a callsite (ex: call always returns the same inlinable function,
+// and return value flows unmodified into an indirect call)
+//
+// For categories 2 and 3 above, each adjustment can have either a
+// "must" version and a "may" version (but not both). Here the idea is
+// that in the "must" version the value flow is unconditional: if the
+// callsite executes, then the condition we're interested in (ex:
+// param feeding call) is guaranteed to happen. For the "may" version,
+// there may be control flow that could cause the benefit to be
+// bypassed.
+const (
+	// Category 1 adjustments (see above)
+	panicPathAdj scoreAdjustTyp = (1 << iota)
+	initFuncAdj
+	inLoopAdj
+
+	// Category 2 adjustments (see above).
+	passConstToIfAdj
+	passConstToNestedIfAdj
+	passConcreteToItfCallAdj
+	passConcreteToNestedItfCallAdj
+	passFuncToIndCallAdj
+	passFuncToNestedIndCallAdj
+	passInlinableFuncToIndCallAdj
+	passInlinableFuncToNestedIndCallAdj
+
+	// Category 3 adjustments.
+	returnFeedsConstToIfAdj
+	returnFeedsFuncToIndCallAdj
+	returnFeedsInlinableFuncToIndCallAdj
+	returnFeedsConcreteToInterfaceCallAdj
+
+	sentinelScoreAdj // sentinel; not a real adjustment
+)
+
+// This table records the specific values we use to adjust call
+// site scores in a given scenario.
+// NOTE: these numbers are chosen very arbitrarily; ideally
+// we will go through some sort of turning process to decide
+// what value for each one produces the best performance.
+
+var adjValues = map[scoreAdjustTyp]int{
+	panicPathAdj:                          40,
+	initFuncAdj:                           20,
+	inLoopAdj:                             -5,
+	passConstToIfAdj:                      -20,
+	passConstToNestedIfAdj:                -15,
+	passConcreteToItfCallAdj:              -30,
+	passConcreteToNestedItfCallAdj:        -25,
+	passFuncToIndCallAdj:                  -25,
+	passFuncToNestedIndCallAdj:            -20,
+	passInlinableFuncToIndCallAdj:         -45,
+	passInlinableFuncToNestedIndCallAdj:   -40,
+	returnFeedsConstToIfAdj:               -15,
+	returnFeedsFuncToIndCallAdj:           -25,
+	returnFeedsInlinableFuncToIndCallAdj:  -40,
+	returnFeedsConcreteToInterfaceCallAdj: -25,
+}
+
+// SetupScoreAdjustments interprets the value of the -d=inlscoreadj
+// debugging option, if set. The value of this flag is expected to be
+// a series of "/"-separated clauses of the form adj1:value1. Example:
+// -d=inlscoreadj=inLoopAdj=0/passConstToIfAdj=-99
+func SetupScoreAdjustments() {
+	if base.Debug.InlScoreAdj == "" {
+		return
+	}
+	if err := parseScoreAdj(base.Debug.InlScoreAdj); err != nil {
+		base.Fatalf("malformed -d=inlscoreadj argument %q: %v",
+			base.Debug.InlScoreAdj, err)
+	}
+}
+
+func adjStringToVal(s string) (scoreAdjustTyp, bool) {
+	for adj := scoreAdjustTyp(1); adj < sentinelScoreAdj; adj <<= 1 {
+		if adj.String() == s {
+			return adj, true
+		}
+	}
+	return 0, false
+}
+
+func parseScoreAdj(val string) error {
+	clauses := strings.Split(val, "/")
+	if len(clauses) == 0 {
+		return fmt.Errorf("no clauses")
+	}
+	for _, clause := range clauses {
+		elems := strings.Split(clause, ":")
+		if len(elems) < 2 {
+			return fmt.Errorf("clause %q: expected colon", clause)
+		}
+		if len(elems) != 2 {
+			return fmt.Errorf("clause %q has %d elements, wanted 2", clause,
+				len(elems))
+		}
+		adj, ok := adjStringToVal(elems[0])
+		if !ok {
+			return fmt.Errorf("clause %q: unknown adjustment", clause)
+		}
+		val, err := strconv.Atoi(elems[1])
+		if err != nil {
+			return fmt.Errorf("clause %q: malformed value: %v", clause, err)
+		}
+		adjValues[adj] = val
+	}
+	return nil
+}
+
+func adjValue(x scoreAdjustTyp) int {
+	if val, ok := adjValues[x]; ok {
+		return val
+	} else {
+		panic("internal error unregistered adjustment type")
+	}
+}
+
+var mayMustAdj = [...]struct{ may, must scoreAdjustTyp }{
+	{may: passConstToNestedIfAdj, must: passConstToIfAdj},
+	{may: passConcreteToNestedItfCallAdj, must: passConcreteToItfCallAdj},
+	{may: passFuncToNestedIndCallAdj, must: passFuncToNestedIndCallAdj},
+	{may: passInlinableFuncToNestedIndCallAdj, must: passInlinableFuncToIndCallAdj},
+}
+
+func isMay(x scoreAdjustTyp) bool {
+	return mayToMust(x) != 0
+}
+
+func isMust(x scoreAdjustTyp) bool {
+	return mustToMay(x) != 0
+}
+
+func mayToMust(x scoreAdjustTyp) scoreAdjustTyp {
+	for _, v := range mayMustAdj {
+		if x == v.may {
+			return v.must
+		}
+	}
+	return 0
+}
+
+func mustToMay(x scoreAdjustTyp) scoreAdjustTyp {
+	for _, v := range mayMustAdj {
+		if x == v.must {
+			return v.may
+		}
+	}
+	return 0
+}
+
+// computeCallSiteScore takes a given call site whose ir node is
+// 'call' and callee function is 'callee' and with previously computed
+// call site properties 'csflags', then computes a score for the
+// callsite that combines the size cost of the callee with heuristics
+// based on previously computed argument and function properties,
+// then stores the score and the adjustment mask in the appropriate
+// fields in 'cs'
+func (cs *CallSite) computeCallSiteScore(csa *callSiteAnalyzer, calleeProps *FuncProps) {
+	callee := cs.Callee
+	csflags := cs.Flags
+	call := cs.Call
+
+	// Start with the size-based score for the callee.
+	score := int(callee.Inl.Cost)
+	var tmask scoreAdjustTyp
+
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= scoring call to %s at %s , initial=%d\n",
+			callee.Sym().Name, fmtFullPos(call.Pos()), score)
+	}
+
+	// First some score adjustments to discourage inlining in selected cases.
+	if csflags&CallSiteOnPanicPath != 0 {
+		score, tmask = adjustScore(panicPathAdj, score, tmask)
+	}
+	if csflags&CallSiteInInitFunc != 0 {
+		score, tmask = adjustScore(initFuncAdj, score, tmask)
+	}
+
+	// Then adjustments to encourage inlining in selected cases.
+	if csflags&CallSiteInLoop != 0 {
+		score, tmask = adjustScore(inLoopAdj, score, tmask)
+	}
+
+	// Stop here if no callee props.
+	if calleeProps == nil {
+		cs.Score, cs.ScoreMask = score, tmask
+		return
+	}
+
+	// Walk through the actual expressions being passed at the call.
+	calleeRecvrParms := callee.Type().RecvParams()
+	for idx := range call.Args {
+		// ignore blanks
+		if calleeRecvrParms[idx].Sym == nil ||
+			calleeRecvrParms[idx].Sym.IsBlank() {
+			continue
+		}
+		arg := call.Args[idx]
+		pflag := calleeProps.ParamFlags[idx]
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= arg %d of %d: val %v flags=%s\n",
+				idx, len(call.Args), arg, pflag.String())
+		}
+
+		if len(cs.ArgProps) == 0 {
+			continue
+		}
+		argProps := cs.ArgProps[idx]
+
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= arg %d props %s value %v\n",
+				idx, argProps.String(), arg)
+		}
+
+		if argProps&ActualExprConstant != 0 {
+			if pflag&ParamMayFeedIfOrSwitch != 0 {
+				score, tmask = adjustScore(passConstToNestedIfAdj, score, tmask)
+			}
+			if pflag&ParamFeedsIfOrSwitch != 0 {
+				score, tmask = adjustScore(passConstToIfAdj, score, tmask)
+			}
+		}
+
+		if argProps&ActualExprIsConcreteConvIface != 0 {
+			// FIXME: ideally here it would be nice to make a
+			// distinction between the inlinable case and the
+			// non-inlinable case, but this is hard to do. Example:
+			//
+			//    type I interface { Tiny() int; Giant() }
+			//    type Conc struct { x int }
+			//    func (c *Conc) Tiny() int { return 42 }
+			//    func (c *Conc) Giant() { <huge amounts of code> }
+			//
+			//    func passConcToItf(c *Conc) {
+			//        makesItfMethodCall(c)
+			//    }
+			//
+			// In the code above, function properties will only tell
+			// us that 'makesItfMethodCall' invokes a method on its
+			// interface parameter, but we don't know whether it calls
+			// "Tiny" or "Giant". If we knew if called "Tiny", then in
+			// theory in addition to converting the interface call to
+			// a direct call, we could also inline (in which case
+			// we'd want to decrease the score even more).
+			//
+			// One thing we could do (not yet implemented) is iterate
+			// through all of the methods of "*Conc" that allow it to
+			// satisfy I, and if all are inlinable, then exploit that.
+			if pflag&ParamMayFeedInterfaceMethodCall != 0 {
+				score, tmask = adjustScore(passConcreteToNestedItfCallAdj, score, tmask)
+			}
+			if pflag&ParamFeedsInterfaceMethodCall != 0 {
+				score, tmask = adjustScore(passConcreteToItfCallAdj, score, tmask)
+			}
+		}
+
+		if argProps&(ActualExprIsFunc|ActualExprIsInlinableFunc) != 0 {
+			mayadj := passFuncToNestedIndCallAdj
+			mustadj := passFuncToIndCallAdj
+			if argProps&ActualExprIsInlinableFunc != 0 {
+				mayadj = passInlinableFuncToNestedIndCallAdj
+				mustadj = passInlinableFuncToIndCallAdj
+			}
+			if pflag&ParamMayFeedIndirectCall != 0 {
+				score, tmask = adjustScore(mayadj, score, tmask)
+			}
+			if pflag&ParamFeedsIndirectCall != 0 {
+				score, tmask = adjustScore(mustadj, score, tmask)
+			}
+		}
+	}
+
+	cs.Score, cs.ScoreMask = score, tmask
+}
+
+func adjustScore(typ scoreAdjustTyp, score int, mask scoreAdjustTyp) (int, scoreAdjustTyp) {
+
+	if isMust(typ) {
+		if mask&typ != 0 {
+			return score, mask
+		}
+		may := mustToMay(typ)
+		if mask&may != 0 {
+			// promote may to must, so undo may
+			score -= adjValue(may)
+			mask &^= may
+		}
+	} else if isMay(typ) {
+		must := mayToMust(typ)
+		if mask&(must|typ) != 0 {
+			return score, mask
+		}
+	}
+	if mask&typ == 0 {
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= applying adj %d for %s\n",
+				adjValue(typ), typ.String())
+		}
+		score += adjValue(typ)
+		mask |= typ
+	}
+	return score, mask
+}
+
+var resultFlagToPositiveAdj map[ResultPropBits]scoreAdjustTyp
+var paramFlagToPositiveAdj map[ParamPropBits]scoreAdjustTyp
+
+func setupFlagToAdjMaps() {
+	resultFlagToPositiveAdj = map[ResultPropBits]scoreAdjustTyp{
+		ResultIsAllocatedMem:     returnFeedsConcreteToInterfaceCallAdj,
+		ResultAlwaysSameFunc:     returnFeedsFuncToIndCallAdj,
+		ResultAlwaysSameConstant: returnFeedsConstToIfAdj,
+	}
+	paramFlagToPositiveAdj = map[ParamPropBits]scoreAdjustTyp{
+		ParamMayFeedInterfaceMethodCall: passConcreteToNestedItfCallAdj,
+		ParamFeedsInterfaceMethodCall:   passConcreteToItfCallAdj,
+		ParamMayFeedIndirectCall:        passInlinableFuncToNestedIndCallAdj,
+		ParamFeedsIndirectCall:          passInlinableFuncToIndCallAdj,
+	}
+}
+
+// LargestNegativeScoreAdjustment tries to estimate the largest possible
+// negative score adjustment that could be applied to a call of the
+// function with the specified props. Example:
+//
+//	func foo() {                  func bar(x int, p *int) int {
+//	   ...                          if x < 0 { *p = x }
+//	}                               return 99
+//	                              }
+//
+// Function 'foo' above on the left has no interesting properties,
+// thus as a result the most we'll adjust any call to is the value for
+// "call in loop". If the calculated cost of the function is 150, and
+// the in-loop adjustment is 5 (for example), then there is not much
+// point treating it as inlinable. On the other hand "bar" has a param
+// property (parameter "x" feeds unmodified to an "if" statement") and
+// a return property (always returns same constant) meaning that a
+// given call _could_ be rescored down as much as -35 points-- thus if
+// the size of "bar" is 100 (for example) then there is at least a
+// chance that scoring will enable inlining.
+func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int {
+	if resultFlagToPositiveAdj == nil {
+		setupFlagToAdjMaps()
+	}
+	var tmask scoreAdjustTyp
+	score := adjValues[inLoopAdj] // any call can be in a loop
+	for _, pf := range props.ParamFlags {
+		if adj, ok := paramFlagToPositiveAdj[pf]; ok {
+			score, tmask = adjustScore(adj, score, tmask)
+		}
+	}
+	for _, rf := range props.ResultFlags {
+		if adj, ok := resultFlagToPositiveAdj[rf]; ok {
+			score, tmask = adjustScore(adj, score, tmask)
+		}
+	}
+
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= largestScore(%v) is %d\n",
+			fn, score)
+	}
+
+	return score
+}
+
+// LargestPositiveScoreAdjustment tries to estimate the largest possible
+// positive score adjustment that could be applied to a given callsite.
+// At the moment we don't have very many positive score adjustments, so
+// this is just hard-coded, not table-driven.
+func LargestPositiveScoreAdjustment(fn *ir.Func) int {
+	return adjValues[panicPathAdj] + adjValues[initFuncAdj]
+}
+
+// callSiteTab contains entries for each call in the function
+// currently being processed by InlineCalls; this variable will either
+// be set to 'cstabCache' below (for non-inlinable routines) or to the
+// local 'cstab' entry in the fnInlHeur object for inlinable routines.
+//
+// NOTE: this assumes that inlining operations are happening in a serial,
+// single-threaded fashion,f which is true today but probably won't hold
+// in the future (for example, we might want to score the callsites
+// in multiple functions in parallel); if the inliner evolves in this
+// direction we'll need to come up with a different approach here.
+var callSiteTab CallSiteTab
+
+// scoreCallsCache caches a call site table and call site list between
+// invocations of ScoreCalls so that we can reuse previously allocated
+// storage.
+var scoreCallsCache scoreCallsCacheType
+
+type scoreCallsCacheType struct {
+	tab CallSiteTab
+	csl []*CallSite
+}
+
+// ScoreCalls assigns numeric scores to each of the callsites in
+// function 'fn'; the lower the score, the more helpful we think it
+// will be to inline.
+//
+// Unlike a lot of the other inline heuristics machinery, callsite
+// scoring can't be done as part of the CanInline call for a function,
+// due to fact that we may be working on a non-trivial SCC. So for
+// example with this SCC:
+//
+//	func foo(x int) {           func bar(x int, f func()) {
+//	  if x != 0 {                  f()
+//	    bar(x, func(){})           foo(x-1)
+//	  }                         }
+//	}
+//
+// We don't want to perform scoring for the 'foo' call in "bar" until
+// after foo has been analyzed, but it's conceivable that CanInline
+// might visit bar before foo for this SCC.
+func ScoreCalls(fn *ir.Func) {
+	if len(fn.Body) == 0 {
+		return
+	}
+	enableDebugTraceIfEnv()
+
+	nameFinder := newNameFinder(fn)
+
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= ScoreCalls(%v)\n", ir.FuncName(fn))
+	}
+
+	// If this is an inlinable function, use the precomputed
+	// call site table for it. If the function wasn't an inline
+	// candidate, collect a callsite table for it now.
+	var cstab CallSiteTab
+	if funcInlHeur, ok := fpmap[fn]; ok {
+		cstab = funcInlHeur.cstab
+	} else {
+		if len(scoreCallsCache.tab) != 0 {
+			panic("missing call to ScoreCallsCleanup")
+		}
+		if scoreCallsCache.tab == nil {
+			scoreCallsCache.tab = make(CallSiteTab)
+		}
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= building cstab for non-inl func %s\n",
+				ir.FuncName(fn))
+		}
+		cstab = computeCallSiteTable(fn, fn.Body, scoreCallsCache.tab, nil, 0,
+			nameFinder)
+	}
+
+	csa := makeCallSiteAnalyzer(fn)
+	const doCallResults = true
+	csa.scoreCallsRegion(fn, fn.Body, cstab, doCallResults, nil)
+
+	disableDebugTrace()
+}
+
+// scoreCallsRegion assigns numeric scores to each of the callsites in
+// region 'region' within function 'fn'. This can be called on
+// an entire function, or with 'region' set to a chunk of
+// code corresponding to an inlined call.
+func (csa *callSiteAnalyzer) scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallResults bool, ic *ir.InlinedCallExpr) {
+	if debugTrace&debugTraceScoring != 0 {
+		fmt.Fprintf(os.Stderr, "=-= scoreCallsRegion(%v, %s) len(cstab)=%d\n",
+			ir.FuncName(fn), region[0].Op().String(), len(cstab))
+	}
+
+	// Sort callsites to avoid any surprises with non deterministic
+	// map iteration order (this is probably not needed, but here just
+	// in case).
+	csl := scoreCallsCache.csl[:0]
+	for _, cs := range cstab {
+		csl = append(csl, cs)
+	}
+	scoreCallsCache.csl = csl[:0]
+	sort.Slice(csl, func(i, j int) bool {
+		return csl[i].ID < csl[j].ID
+	})
+
+	// Score each call site.
+	var resultNameTab map[*ir.Name]resultPropAndCS
+	for _, cs := range csl {
+		var cprops *FuncProps
+		fihcprops := false
+		desercprops := false
+		if funcInlHeur, ok := fpmap[cs.Callee]; ok {
+			cprops = funcInlHeur.props
+			fihcprops = true
+		} else if cs.Callee.Inl != nil {
+			cprops = DeserializeFromString(cs.Callee.Inl.Properties)
+			desercprops = true
+		} else {
+			if base.Debug.DumpInlFuncProps != "" {
+				fmt.Fprintf(os.Stderr, "=-= *** unable to score call to %s from %s\n", cs.Callee.Sym().Name, fmtFullPos(cs.Call.Pos()))
+				panic("should never happen")
+			} else {
+				continue
+			}
+		}
+		cs.computeCallSiteScore(csa, cprops)
+
+		if doCallResults {
+			if debugTrace&debugTraceScoring != 0 {
+				fmt.Fprintf(os.Stderr, "=-= examineCallResults at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops)
+			}
+			resultNameTab = csa.examineCallResults(cs, resultNameTab)
+		}
+
+		if debugTrace&debugTraceScoring != 0 {
+			fmt.Fprintf(os.Stderr, "=-= scoring call at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops)
+		}
+	}
+
+	if resultNameTab != nil {
+		csa.rescoreBasedOnCallResultUses(fn, resultNameTab, cstab)
+	}
+
+	disableDebugTrace()
+
+	if ic != nil && callSiteTab != nil {
+		// Integrate the calls from this cstab into the table for the caller.
+		if err := callSiteTab.merge(cstab); err != nil {
+			base.FatalfAt(ic.Pos(), "%v", err)
+		}
+	} else {
+		callSiteTab = cstab
+	}
+}
+
+// ScoreCallsCleanup resets the state of the callsite cache
+// once ScoreCalls is done with a function.
+func ScoreCallsCleanup() {
+	if base.Debug.DumpInlCallSiteScores != 0 {
+		if allCallSites == nil {
+			allCallSites = make(CallSiteTab)
+		}
+		for call, cs := range callSiteTab {
+			allCallSites[call] = cs
+		}
+	}
+	for k := range scoreCallsCache.tab {
+		delete(scoreCallsCache.tab, k)
+	}
+}
+
+// GetCallSiteScore returns the previously calculated score for call
+// within fn.
+func GetCallSiteScore(fn *ir.Func, call *ir.CallExpr) (int, bool) {
+	if funcInlHeur, ok := fpmap[fn]; ok {
+		if cs, ok := funcInlHeur.cstab[call]; ok {
+			return cs.Score, true
+		}
+	}
+	if cs, ok := callSiteTab[call]; ok {
+		return cs.Score, true
+	}
+	return 0, false
+}
+
+// BudgetExpansion returns the amount to relax/expand the base
+// inlining budget when the new inliner is turned on; the inliner
+// will add the returned value to the hairyness budget.
+//
+// Background: with the new inliner, the score for a given callsite
+// can be adjusted down by some amount due to heuristics, however we
+// won't know whether this is going to happen until much later after
+// the CanInline call. This function returns the amount to relax the
+// budget initially (to allow for a large score adjustment); later on
+// in RevisitInlinability we'll look at each individual function to
+// demote it if needed.
+func BudgetExpansion(maxBudget int32) int32 {
+	if base.Debug.InlBudgetSlack != 0 {
+		return int32(base.Debug.InlBudgetSlack)
+	}
+	// In the default case, return maxBudget, which will effectively
+	// double the budget from 80 to 160; this should be good enough
+	// for most cases.
+	return maxBudget
+}
+
+var allCallSites CallSiteTab
+
+// DumpInlCallSiteScores is invoked by the inliner if the debug flag
+// "-d=dumpinlcallsitescores" is set; it dumps out a human-readable
+// summary of all (potentially) inlinable callsites in the package,
+// along with info on call site scoring and the adjustments made to a
+// given score. Here profile is the PGO profile in use (may be
+// nil), budgetCallback is a callback that can be invoked to find out
+// the original pre-adjustment hairyness limit for the function, and
+// inlineHotMaxBudget is the constant of the same name used in the
+// inliner. Sample output lines:
+//
+// Score  Adjustment  Status  Callee  CallerPos ScoreFlags
+// 115    40          DEMOTED cmd/compile/internal/abi.(*ABIParamAssignment).Offset     expand_calls.go:1679:14|6       panicPathAdj
+// 76     -5n         PROMOTED runtime.persistentalloc   mcheckmark.go:48:45|3   inLoopAdj
+// 201    0           --- PGO  unicode.DecodeRuneInString        utf8.go:312:30|1
+// 7      -5          --- PGO  internal/abi.Name.DataChecked     type.go:625:22|0        inLoopAdj
+//
+// In the dump above, "Score" is the final score calculated for the
+// callsite, "Adjustment" is the amount added to or subtracted from
+// the original hairyness estimate to form the score. "Status" shows
+// whether anything changed with the site -- did the adjustment bump
+// it down just below the threshold ("PROMOTED") or instead bump it
+// above the threshold ("DEMOTED"); this will be blank ("---") if no
+// threshold was crossed as a result of the heuristics. Note that
+// "Status" also shows whether PGO was involved. "Callee" is the name
+// of the function called, "CallerPos" is the position of the
+// callsite, and "ScoreFlags" is a digest of the specific properties
+// we used to make adjustments to callsite score via heuristics.
+func DumpInlCallSiteScores(profile *pgo.Profile, budgetCallback func(fn *ir.Func, profile *pgo.Profile) (int32, bool)) {
+
+	var indirectlyDueToPromotion func(cs *CallSite) bool
+	indirectlyDueToPromotion = func(cs *CallSite) bool {
+		bud, _ := budgetCallback(cs.Callee, profile)
+		hairyval := cs.Callee.Inl.Cost
+		score := int32(cs.Score)
+		if hairyval > bud && score <= bud {
+			return true
+		}
+		if cs.parent != nil {
+			return indirectlyDueToPromotion(cs.parent)
+		}
+		return false
+	}
+
+	genstatus := func(cs *CallSite) string {
+		hairyval := cs.Callee.Inl.Cost
+		bud, isPGO := budgetCallback(cs.Callee, profile)
+		score := int32(cs.Score)
+		st := "---"
+		expinl := false
+		switch {
+		case hairyval <= bud && score <= bud:
+			// "Normal" inlined case: hairy val sufficiently low that
+			// it would have been inlined anyway without heuristics.
+			expinl = true
+		case hairyval > bud && score > bud:
+			// "Normal" not inlined case: hairy val sufficiently high
+			// and scoring didn't lower it.
+		case hairyval > bud && score <= bud:
+			// Promoted: we would not have inlined it before, but
+			// after score adjustment we decided to inline.
+			st = "PROMOTED"
+			expinl = true
+		case hairyval <= bud && score > bud:
+			// Demoted: we would have inlined it before, but after
+			// score adjustment we decided not to inline.
+			st = "DEMOTED"
+		}
+		inlined := cs.aux&csAuxInlined != 0
+		indprom := false
+		if cs.parent != nil {
+			indprom = indirectlyDueToPromotion(cs.parent)
+		}
+		if inlined && indprom {
+			st += "|INDPROM"
+		}
+		if inlined && !expinl {
+			st += "|[NI?]"
+		} else if !inlined && expinl {
+			st += "|[IN?]"
+		}
+		if isPGO {
+			st += "|PGO"
+		}
+		return st
+	}
+
+	if base.Debug.DumpInlCallSiteScores != 0 {
+		var sl []*CallSite
+		for _, cs := range allCallSites {
+			sl = append(sl, cs)
+		}
+		sort.Slice(sl, func(i, j int) bool {
+			if sl[i].Score != sl[j].Score {
+				return sl[i].Score < sl[j].Score
+			}
+			fni := ir.PkgFuncName(sl[i].Callee)
+			fnj := ir.PkgFuncName(sl[j].Callee)
+			if fni != fnj {
+				return fni < fnj
+			}
+			ecsi := EncodeCallSiteKey(sl[i])
+			ecsj := EncodeCallSiteKey(sl[j])
+			return ecsi < ecsj
+		})
+
+		mkname := func(fn *ir.Func) string {
+			var n string
+			if fn == nil || fn.Nname == nil {
+				return "<nil>"
+			}
+			if fn.Sym().Pkg == types.LocalPkg {
+				n = "·" + fn.Sym().Name
+			} else {
+				n = ir.PkgFuncName(fn)
+			}
+			// don't try to print super-long names
+			if len(n) <= 64 {
+				return n
+			}
+			return n[:32] + "..." + n[len(n)-32:len(n)]
+		}
+
+		if len(sl) != 0 {
+			fmt.Fprintf(os.Stdout, "# scores for package %s\n", types.LocalPkg.Path)
+			fmt.Fprintf(os.Stdout, "# Score  Adjustment  Status  Callee  CallerPos Flags ScoreFlags\n")
+		}
+		for _, cs := range sl {
+			hairyval := cs.Callee.Inl.Cost
+			adj := int32(cs.Score) - hairyval
+			nm := mkname(cs.Callee)
+			ecc := EncodeCallSiteKey(cs)
+			fmt.Fprintf(os.Stdout, "%d  %d\t%s\t%s\t%s\t%s\n",
+				cs.Score, adj, genstatus(cs),
+				nm, ecc,
+				cs.ScoreMask.String())
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/serialize.go b/src/cmd/compile/internal/inline/inlheur/serialize.go
new file mode 100644
index 0000000..d650626
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/serialize.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import "strings"
+
+func (funcProps *FuncProps) SerializeToString() string {
+	if funcProps == nil {
+		return ""
+	}
+	var sb strings.Builder
+	writeUleb128(&sb, uint64(funcProps.Flags))
+	writeUleb128(&sb, uint64(len(funcProps.ParamFlags)))
+	for _, pf := range funcProps.ParamFlags {
+		writeUleb128(&sb, uint64(pf))
+	}
+	writeUleb128(&sb, uint64(len(funcProps.ResultFlags)))
+	for _, rf := range funcProps.ResultFlags {
+		writeUleb128(&sb, uint64(rf))
+	}
+	return sb.String()
+}
+
+func DeserializeFromString(s string) *FuncProps {
+	if len(s) == 0 {
+		return nil
+	}
+	var funcProps FuncProps
+	var v uint64
+	sl := []byte(s)
+	v, sl = readULEB128(sl)
+	funcProps.Flags = FuncPropBits(v)
+	v, sl = readULEB128(sl)
+	funcProps.ParamFlags = make([]ParamPropBits, v)
+	for i := range funcProps.ParamFlags {
+		v, sl = readULEB128(sl)
+		funcProps.ParamFlags[i] = ParamPropBits(v)
+	}
+	v, sl = readULEB128(sl)
+	funcProps.ResultFlags = make([]ResultPropBits, v)
+	for i := range funcProps.ResultFlags {
+		v, sl = readULEB128(sl)
+		funcProps.ResultFlags[i] = ResultPropBits(v)
+	}
+	return &funcProps
+}
+
+func readULEB128(sl []byte) (value uint64, rsl []byte) {
+	var shift uint
+
+	for {
+		b := sl[0]
+		sl = sl[1:]
+		value |= (uint64(b&0x7F) << shift)
+		if b&0x80 == 0 {
+			break
+		}
+		shift += 7
+	}
+	return value, sl
+}
+
+func writeUleb128(sb *strings.Builder, v uint64) {
+	if v < 128 {
+		sb.WriteByte(uint8(v))
+		return
+	}
+	more := true
+	for more {
+		c := uint8(v & 0x7f)
+		v >>= 7
+		more = v != 0
+		if more {
+			c |= 0x80
+		}
+		sb.WriteByte(c)
+	}
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go b/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go
new file mode 100644
index 0000000..6f2f760
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dumpscores
+
+var G int
+
+func inlinable(x int, f func(int) int) int {
+	if x != 0 {
+		return 1
+	}
+	G += noninl(x)
+	return f(x)
+}
+
+func inlinable2(x int) int {
+	return noninl(-x)
+}
+
+//go:noinline
+func noninl(x int) int {
+	return x + 1
+}
+
+func tooLargeToInline(x int) int {
+	if x > 101 {
+		// Drive up the cost of inlining this func over the
+		// regular threshold.
+		return big(big(big(big(big(G + x)))))
+	}
+	if x < 100 {
+		// make sure this callsite is scored properly
+		G += inlinable(101, inlinable2)
+		if G == 101 {
+			return 0
+		}
+		panic(inlinable2(3))
+	}
+	return G
+}
+
+func big(q int) int {
+	return noninl(q) + noninl(-q)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt b/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt
new file mode 100644
index 0000000..af5ebec
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt
@@ -0,0 +1,77 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+Notes on the format of the testcase files in
+cmd/compile/internal/inline/inlheur/testdata/props:
+
+- each (compilable) file contains input Go code and expected results
+  in the form of column-0 comments.
+
+- functions or methods that begin with "T_" are targeted for testing,
+  as well as "init" functions; all other functions are ignored.
+
+- function header comments begin with a line containing
+  the file name, function name, definition line, then index
+  and a count of the number of funcs that share that same
+  definition line (needed to support generics). Example:
+
+	  // foo.go T_mumble 35 1 4
+
+  Here "T_mumble" is defined at line 35, and it is func 0
+  out of the 4 funcs that share that same line.
+
+- function property expected results appear as comments in immediately
+  prior to the function. For example, here we have first the function
+  name ("T_feeds_if_simple"), then human-readable dump of the function
+  properties, as well as the JSON for the properties object, each
+  section separated by a "<>" delimiter.
+
+	  // params.go T_feeds_if_simple 35 0 1
+	  // RecvrParamFlags:
+	  //   0: ParamFeedsIfOrSwitch
+	  // <endpropsdump>
+	  // {"Flags":0,"RecvrParamFlags":[8],"ReturnFlags":[]}
+	  // callsite: params.go:34:10|0 "CallSiteOnPanicPath" 2
+	  // <endcallsites>
+	  // <endfuncpreamble>
+	  func T_feeds_if_simple(x int) {
+		if x < 100 {
+			os.Exit(1)
+		}
+		println(x)
+	}
+
+- when the test runs, it will compile the Go source file with an
+  option to dump out function properties, then compare the new dump
+  for each function with the JSON appearing in the header comment for
+  the function (in the example above, the JSON appears between
+  "<endpropsdump>" and "<endfuncpreamble>". The material prior to the
+  dump is simply there for human consumption, so that a developer can
+  easily see that "RecvrParamFlags":[8] means that the first parameter
+  has flag ParamFeedsIfOrSwitch.
+
+- when making changes to the compiler (which can alter the expected
+  results) or edits/additions to the go code in the testcase files,
+  you can remaster the results by running
+
+    go test -v -count=1 .
+
+  In the trace output of this run, you'll see messages of the form
+
+      === RUN   TestFuncProperties
+       funcprops_test.go:NNN: update-expected: emitted updated file
+                              testdata/props/XYZ.go.new
+       funcprops_test.go:MMM: please compare the two files, then overwrite
+                              testdata/props/XYZ.go with testdata/props/XYZ.go.new
+
+  at which point you can compare the old and new files by hand, then
+  overwrite the *.go file with the *.go.new file if you are happy with
+  the diffs.
+
+- note that the remastering process will strip out any existing
+  column-0 (unindented) comments; if you write comments that you
+  want to see preserved, use "/* */" or indent them.
+
+
+
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go
new file mode 100644
index 0000000..a8166fd
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go
@@ -0,0 +1,214 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package params
+
+// acrosscall.go T_feeds_indirect_call_via_call_toplevel 19 0 1
+// ParamFlags
+//   0 ParamFeedsIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[8],"ResultFlags":null}
+// callsite: acrosscall.go:20:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indirect_call_via_call_toplevel(f func(int)) {
+	callsparam(f)
+}
+
+// acrosscall.go T_feeds_indirect_call_via_call_conditional 31 0 1
+// ParamFlags
+//   0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// callsite: acrosscall.go:33:13|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indirect_call_via_call_conditional(f func(int)) {
+	if G != 101 {
+		callsparam(f)
+	}
+}
+
+// acrosscall.go T_feeds_conditional_indirect_call_via_call_toplevel 45 0 1
+// ParamFlags
+//   0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// callsite: acrosscall.go:46:23|0 flagstr "" flagval 0 score 64 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_conditional_indirect_call_via_call_toplevel(f func(int)) {
+	callsparamconditional(f)
+}
+
+// acrosscall.go T_feeds_if_via_call 57 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// callsite: acrosscall.go:58:9|0 flagstr "" flagval 0 score 8 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_via_call(x int) {
+	feedsif(x)
+}
+
+// acrosscall.go T_feeds_if_via_call_conditional 69 0 1
+// ParamFlags
+//   0 ParamMayFeedIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64],"ResultFlags":null}
+// callsite: acrosscall.go:71:10|0 flagstr "" flagval 0 score 8 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_via_call_conditional(x int) {
+	if G != 101 {
+		feedsif(x)
+	}
+}
+
+// acrosscall.go T_feeds_conditional_if_via_call 83 0 1
+// ParamFlags
+//   0 ParamMayFeedIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64],"ResultFlags":null}
+// callsite: acrosscall.go:84:20|0 flagstr "" flagval 0 score 12 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_conditional_if_via_call(x int) {
+	feedsifconditional(x)
+}
+
+// acrosscall.go T_multifeeds1 97 0 1
+// ParamFlags
+//   0 ParamFeedsIndirectCall|ParamMayFeedIndirectCall
+//   1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[24,0],"ResultFlags":null}
+// callsite: acrosscall.go:98:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// callsite: acrosscall.go:99:23|1 flagstr "" flagval 0 score 64 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_multifeeds1(f1, f2 func(int)) {
+	callsparam(f1)
+	callsparamconditional(f1)
+}
+
+// acrosscall.go T_acrosscall_returnsconstant 110 0 1
+// ResultFlags
+//   0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]}
+// callsite: acrosscall.go:111:24|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnsconstant() int {
+	return returnsconstant()
+}
+
+// acrosscall.go T_acrosscall_returnsmem 122 0 1
+// ResultFlags
+//   0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]}
+// callsite: acrosscall.go:123:19|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnsmem() *int {
+	return returnsmem()
+}
+
+// acrosscall.go T_acrosscall_returnscci 134 0 1
+// ResultFlags
+//   0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[4]}
+// callsite: acrosscall.go:135:19|0 flagstr "" flagval 0 score 7 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnscci() I {
+	return returnscci()
+}
+
+// acrosscall.go T_acrosscall_multiret 144 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: acrosscall.go:146:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_multiret(q int) int {
+	if q != G {
+		return returnsconstant()
+	}
+	return 0
+}
+
+// acrosscall.go T_acrosscall_multiret2 158 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: acrosscall.go:160:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: acrosscall.go:162:25|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_multiret2(q int) int {
+	if q == G {
+		return returnsconstant()
+	} else {
+		return returnsconstant()
+	}
+}
+
+func callsparam(f func(int)) {
+	f(2)
+}
+
+func callsparamconditional(f func(int)) {
+	if G != 101 {
+		f(2)
+	}
+}
+
+func feedsif(x int) int {
+	if x != 101 {
+		return 42
+	}
+	return 43
+}
+
+func feedsifconditional(x int) int {
+	if G != 101 {
+		if x != 101 {
+			return 42
+		}
+	}
+	return 43
+}
+
+func returnsconstant() int {
+	return 42
+}
+
+func returnsmem() *int {
+	return new(int)
+}
+
+func returnscci() I {
+	var q Q
+	return q
+}
+
+type I interface {
+	Foo()
+}
+
+type Q int
+
+func (q Q) Foo() {
+}
+
+var G int
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go
new file mode 100644
index 0000000..5cc217b
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go
@@ -0,0 +1,240 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package calls
+
+import "os"
+
+// calls.go T_call_in_panic_arg 19 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: calls.go:21:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_call_in_panic_arg(x int) {
+	if x < G {
+		panic(callee(x))
+	}
+}
+
+// calls.go T_calls_in_loops 32 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:34:9|0 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj"
+// callsite: calls.go:37:9|1 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_in_loops(x int, q []string) {
+	for i := 0; i < x; i++ {
+		callee(i)
+	}
+	for _, s := range q {
+		callee(len(s))
+	}
+}
+
+// calls.go T_calls_in_pseudo_loop 48 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:50:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:54:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_in_pseudo_loop(x int, q []string) {
+	for i := 0; i < x; i++ {
+		callee(i)
+		return
+	}
+	for _, s := range q {
+		callee(len(s))
+		break
+	}
+}
+
+// calls.go T_calls_on_panic_paths 67 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:69:9|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:73:9|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:77:12|2 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_on_panic_paths(x int, q []string) {
+	if x+G == 101 {
+		callee(x)
+		panic("ouch")
+	}
+	if x < G-101 {
+		callee(x)
+		if len(q) == 0 {
+			G++
+		}
+		callsexit(x)
+	}
+}
+
+// calls.go T_calls_not_on_panic_paths 93 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch|ParamMayFeedIfOrSwitch
+//   1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[96,0],"ResultFlags":null}
+// callsite: calls.go:103:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:112:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:115:9|2 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:119:12|3 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_not_on_panic_paths(x int, q []string) {
+	if x != G {
+		panic("ouch")
+		/* Notes: */
+		/* - we only look for post-dominating panic/exit, so */
+		/*   this site will on fact not have a panicpath flag */
+		/* - vet will complain about this site as unreachable */
+		callee(x)
+	}
+	if x != G {
+		callee(x)
+		if x < 100 {
+			panic("ouch")
+		}
+	}
+	if x+G == 101 {
+		if x < 100 {
+			panic("ouch")
+		}
+		callee(x)
+	}
+	if x < -101 {
+		callee(x)
+		if len(q) == 0 {
+			return
+		}
+		callsexit(x)
+	}
+}
+
+// calls.go init.0 129 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: calls.go:130:16|0 flagstr "CallSiteInInitFunc" flagval 4 score 22 mask 2 maskstr "initFuncAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func init() {
+	println(callee(5))
+}
+
+// calls.go T_pass_inlinable_func_to_param_feeding_indirect_call 140 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: calls.go:141:19|0 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj"
+// callsite: calls.go:141:19|calls.go:232:10|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_inlinable_func_to_param_feeding_indirect_call(x int) int {
+	return callsParam(x, callee)
+}
+
+// calls.go T_pass_noninlinable_func_to_param_feeding_indirect_call 150 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: calls.go:153:19|0 flagstr "" flagval 0 score 36 mask 128 maskstr "passFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_noninlinable_func_to_param_feeding_indirect_call(x int) int {
+	// if we inline callsParam we can convert the indirect call
+	// to a direct call, but we can't inline it.
+	return callsParam(x, calleeNoInline)
+}
+
+// calls.go T_pass_inlinable_func_to_param_feeding_nested_indirect_call 165 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// callsite: calls.go:166:25|0 flagstr "" flagval 0 score 27 mask 1024 maskstr "passInlinableFuncToNestedIndCallAdj"
+// callsite: calls.go:166:25|calls.go:237:11|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_inlinable_func_to_param_feeding_nested_indirect_call(x int) int {
+	return callsParamNested(x, callee)
+}
+
+// calls.go T_pass_noninlinable_func_to_param_feeding_nested_indirect_call 177 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// callsite: calls.go:178:25|0 flagstr "" flagval 0 score 47 mask 256 maskstr "passFuncToNestedIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_noninlinable_func_to_param_feeding_nested_indirect_call(x int) int {
+	return callsParamNested(x, calleeNoInline)
+}
+
+// calls.go T_call_scoring_in_noninlinable_func 195 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// callsite: calls.go:209:14|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:210:15|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:212:19|2 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj"
+// callsite: calls.go:212:19|calls.go:232:10|0 flagstr "" flagval 0 score 4 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+// calls.go T_call_scoring_in_noninlinable_func.func1 212 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_call_scoring_in_noninlinable_func(x int, sl []int) int {
+	if x == 101 {
+		// Drive up the cost of inlining this funcfunc over the
+		// regular threshold.
+		for i := 0; i < 10; i++ {
+			for j := 0; j < i; j++ {
+				sl = append(sl, append(sl, append(sl, append(sl, x)...)...)...)
+				sl = append(sl, sl[0], sl[1], sl[2])
+				x += calleeNoInline(x)
+			}
+		}
+	}
+	if x < 100 {
+		// make sure this callsite is scored properly
+		G += callee(101)
+		panic(callee(x))
+	}
+	return callsParam(x, func(y int) int { return y + x })
+}
+
+var G int
+
+func callee(x int) int {
+	return x
+}
+
+func calleeNoInline(x int) int {
+	defer func() { G++ }()
+	return x
+}
+
+func callsexit(x int) {
+	println(x)
+	os.Exit(x)
+}
+
+func callsParam(x int, f func(int) int) int {
+	return f(x)
+}
+
+func callsParamNested(x int, f func(int) int) int {
+	if x < 0 {
+		return f(x)
+	}
+	return 0
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go
new file mode 100644
index 0000000..f3d7424
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go
@@ -0,0 +1,341 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package funcflags
+
+import "os"
+
+// funcflags.go T_simple 20 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":null,"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_simple() {
+	panic("bad")
+}
+
+// funcflags.go T_nested 32 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_nested(x int) {
+	if x < 10 {
+		panic("bad")
+	} else {
+		panic("good")
+	}
+}
+
+// funcflags.go T_block1 46 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_block1(x int) {
+	panic("bad")
+	if x < 10 {
+		return
+	}
+}
+
+// funcflags.go T_block2 60 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_block2(x int) {
+	if x < 10 {
+		return
+	}
+	panic("bad")
+}
+
+// funcflags.go T_switches1 75 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches1(x int) {
+	switch x {
+	case 1:
+		panic("one")
+	case 2:
+		panic("two")
+	}
+	panic("whatev")
+}
+
+// funcflags.go T_switches1a 92 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches1a(x int) {
+	switch x {
+	case 2:
+		panic("two")
+	}
+}
+
+// funcflags.go T_switches2 106 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches2(x int) {
+	switch x {
+	case 1:
+		panic("one")
+	case 2:
+		panic("two")
+	default:
+		return
+	}
+	panic("whatev")
+}
+
+// funcflags.go T_switches3 123 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches3(x interface{}) {
+	switch x.(type) {
+	case bool:
+		panic("one")
+	case float32:
+		panic("two")
+	}
+}
+
+// funcflags.go T_switches4 138 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches4(x int) {
+	switch x {
+	case 1:
+		x++
+		fallthrough
+	case 2:
+		panic("two")
+		fallthrough
+	default:
+		panic("bad")
+	}
+	panic("whatev")
+}
+
+// funcflags.go T_recov 157 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_recov(x int) {
+	if x := recover(); x != nil {
+		panic(x)
+	}
+}
+
+// funcflags.go T_forloops1 169 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops1(x int) {
+	for {
+		panic("wokketa")
+	}
+}
+
+// funcflags.go T_forloops2 180 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops2(x int) {
+	for {
+		println("blah")
+		if true {
+			break
+		}
+		panic("warg")
+	}
+}
+
+// funcflags.go T_forloops3 195 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops3(x int) {
+	for i := 0; i < 101; i++ {
+		println("blah")
+		if true {
+			continue
+		}
+		panic("plark")
+	}
+	for i := range [10]int{} {
+		println(i)
+		panic("plark")
+	}
+	panic("whatev")
+}
+
+// funcflags.go T_hasgotos 215 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_hasgotos(x int, y int) {
+	{
+		xx := x
+		panic("bad")
+	lab1:
+		goto lab2
+	lab2:
+		if false {
+			goto lab1
+		} else {
+			goto lab4
+		}
+	lab4:
+		if xx < y {
+		lab3:
+			if false {
+				goto lab3
+			}
+		}
+		println(9)
+	}
+}
+
+// funcflags.go T_break_with_label 246 0 1
+// ParamFlags
+//   0 ParamMayFeedIfOrSwitch
+//   1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_break_with_label(x int, y int) {
+	// presence of break with label should pessimize this func
+	// (similar to goto).
+	panic("bad")
+lab1:
+	for {
+		println("blah")
+		if x < 0 {
+			break lab1
+		}
+		panic("hubba")
+	}
+}
+
+// funcflags.go T_callsexit 268 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_callsexit(x int) {
+	if x < 0 {
+		os.Exit(1)
+	}
+	os.Exit(2)
+}
+
+// funcflags.go T_exitinexpr 281 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: funcflags.go:286:18|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_exitinexpr(x int) {
+	// This function does indeed unconditionally call exit, since the
+	// first thing it does is invoke exprcallsexit, however from the
+	// perspective of this function, the call is not at the statement
+	// level, so we'll wind up missing it.
+	if exprcallsexit(x) < 0 {
+		println("foo")
+	}
+}
+
+// funcflags.go T_select_noreturn 297 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0,0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_select_noreturn(chi chan int, chf chan float32, p *int) {
+	rv := 0
+	select {
+	case i := <-chi:
+		rv = i
+	case f := <-chf:
+		rv = int(f)
+	}
+	*p = rv
+	panic("bad")
+}
+
+// funcflags.go T_select_mayreturn 314 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_select_mayreturn(chi chan int, chf chan float32, p *int) int {
+	rv := 0
+	select {
+	case i := <-chi:
+		rv = i
+		return i
+	case f := <-chf:
+		rv = int(f)
+	}
+	*p = rv
+	panic("bad")
+}
+
+// funcflags.go T_calls_callsexit 334 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// callsite: funcflags.go:335:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_callsexit(x int) {
+	exprcallsexit(x)
+}
+
+func exprcallsexit(x int) int {
+	os.Exit(x)
+	return x
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go
new file mode 100644
index 0000000..1a3073c
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go
@@ -0,0 +1,367 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package params
+
+import "os"
+
+// params.go T_feeds_if_simple 20 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_simple(x int) {
+	if x < 100 {
+		os.Exit(1)
+	}
+	println(x)
+}
+
+// params.go T_feeds_if_nested 35 0 1
+// ParamFlags
+//   0 ParamMayFeedIfOrSwitch
+//   1 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64,32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_nested(x, y int) {
+	if y != 0 {
+		if x < 100 {
+			os.Exit(1)
+		}
+	}
+	println(x)
+}
+
+// params.go T_feeds_if_pointer 51 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_pointer(xp *int) {
+	if xp != nil {
+		os.Exit(1)
+	}
+	println(xp)
+}
+
+// params.go T.T_feeds_if_simple_method 66 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+//   1 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32,32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func (r T) T_feeds_if_simple_method(x int) {
+	if x < 100 {
+		os.Exit(1)
+	}
+	if r != 99 {
+		os.Exit(2)
+	}
+	println(x)
+}
+
+// params.go T_feeds_if_blanks 86 0 1
+// ParamFlags
+//   0 ParamNoInfo
+//   1 ParamFeedsIfOrSwitch
+//   2 ParamNoInfo
+//   3 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,32,0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_blanks(_ string, x int, _ bool, _ bool) {
+	// blanks ignored; from a props perspective "x" is param 0
+	if x < 100 {
+		os.Exit(1)
+	}
+	println(x)
+}
+
+// params.go T_feeds_if_with_copy 101 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_with_copy(x int) {
+	// simple copy here -- we get this case
+	xx := x
+	if xx < 100 {
+		os.Exit(1)
+	}
+	println(x)
+}
+
+// params.go T_feeds_if_with_copy_expr 115 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_with_copy_expr(x int) {
+	// this case (copy of expression) currently not handled.
+	xx := x < 100
+	if xx {
+		os.Exit(1)
+	}
+	println(x)
+}
+
+// params.go T_feeds_switch 131 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_switch(x int) {
+	switch x {
+	case 101:
+		println(101)
+	case 202:
+		panic("bad")
+	}
+	println(x)
+}
+
+// params.go T_feeds_if_toocomplex 146 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_toocomplex(x int, y int) {
+	// not handled at the moment; we only look for cases where
+	// an "if" or "switch" can be simplified based on a single
+	// constant param, not a combination of constant params.
+	if x < y {
+		panic("bad")
+	}
+	println(x + y)
+}
+
+// params.go T_feeds_if_redefined 161 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined(x int) {
+	if x < G {
+		x++
+	}
+	if x == 101 {
+		panic("bad")
+	}
+}
+
+// params.go T_feeds_if_redefined2 175 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined2(x int) {
+	// this currently classifies "x" as "no info", since the analysis we
+	// use to check for reassignments/redefinitions is not flow-sensitive,
+	// but we could probably catch this case with better analysis or
+	// high-level SSA.
+	if x == 101 {
+		panic("bad")
+	}
+	if x < G {
+		x++
+	}
+}
+
+// params.go T_feeds_multi_if 196 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+//   1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_multi_if(x int, y int) {
+	// Here we have one "if" that is too complex (x < y) but one that is
+	// simple enough. Currently we enable the heuristic for this. It's
+	// possible to imagine this being a bad thing if the function in
+	// question is sufficiently large, but if it's too large we probably
+	// can't inline it anyhow.
+	if x < y {
+		panic("bad")
+	}
+	if x < 10 {
+		panic("whatev")
+	}
+	println(x + y)
+}
+
+// params.go T_feeds_if_redefined_indirectwrite 216 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined_indirectwrite(x int) {
+	ax := &x
+	if G != 2 {
+		*ax = G
+	}
+	if x == 101 {
+		panic("bad")
+	}
+}
+
+// params.go T_feeds_if_redefined_indirectwrite_copy 231 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined_indirectwrite_copy(x int) {
+	// we don't catch this case, "x" is marked as no info,
+	// since we're conservative about redefinitions.
+	ax := &x
+	cx := x
+	if G != 2 {
+		*ax = G
+	}
+	if cx == 101 {
+		panic("bad")
+	}
+}
+
+// params.go T_feeds_if_expr1 251 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr1(x int) {
+	if x == 101 || x == 102 || x&0xf == 0 {
+		panic("bad")
+	}
+}
+
+// params.go T_feeds_if_expr2 262 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr2(x int) {
+	if (x*x)-(x+x)%x == 101 || x&0xf == 0 {
+		panic("bad")
+	}
+}
+
+// params.go T_feeds_if_expr3 273 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr3(x int) {
+	if x-(x&0x1)^378 > (1 - G) {
+		panic("bad")
+	}
+}
+
+// params.go T_feeds_if_shift_may_panic 284 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_shift_may_panic(x int) *int {
+	// here if "x" is a constant like 2, we could simplify the "if",
+	// but if we were to pass in a negative value for "x" we can't
+	// fold the condition due to the need to panic on negative shift.
+	if 1<<x > 1024 {
+		return nil
+	}
+	return &G
+}
+
+// params.go T_feeds_if_maybe_divide_by_zero 299 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_maybe_divide_by_zero(x int) {
+	if 99/x == 3 {
+		return
+	}
+	println("blarg")
+}
+
+// params.go T_feeds_indcall 313 0 1
+// ParamFlags
+//   0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall(x func()) {
+	if G != 20 {
+		x()
+	}
+}
+
+// params.go T_feeds_indcall_and_if 326 0 1
+// ParamFlags
+//   0 ParamMayFeedIndirectCall|ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[48],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall_and_if(x func()) {
+	if x != nil {
+		x()
+	}
+}
+
+// params.go T_feeds_indcall_with_copy 339 0 1
+// ParamFlags
+//   0 ParamFeedsIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[8],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall_with_copy(x func()) {
+	xx := x
+	if G < 10 {
+		G--
+	}
+	xx()
+}
+
+// params.go T_feeds_interface_method_call 354 0 1
+// ParamFlags
+//   0 ParamFeedsInterfaceMethodCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[2],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_interface_method_call(i I) {
+	i.Blarg()
+}
+
+var G int
+
+type T int
+
+type I interface {
+	Blarg()
+}
+
+func (r T) Blarg() {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go
new file mode 100644
index 0000000..51f2bc7
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package returns1
+
+import "unsafe"
+
+// returns.go T_simple_allocmem 21 0 1
+// ResultFlags
+//   0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_simple_allocmem() *Bar {
+	return &Bar{}
+}
+
+// returns.go T_allocmem_two_returns 34 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// ResultFlags
+//   0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_allocmem_two_returns(x int) *Bar {
+	// multiple returns
+	if x < 0 {
+		return new(Bar)
+	} else {
+		return &Bar{x: 2}
+	}
+}
+
+// returns.go T_allocmem_three_returns 52 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// ResultFlags
+//   0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_allocmem_three_returns(x int) []*Bar {
+	// more multiple returns
+	switch x {
+	case 10, 11, 12:
+		return make([]*Bar, 10)
+	case 13:
+		fallthrough
+	case 15:
+		return []*Bar{&Bar{x: 15}}
+	}
+	return make([]*Bar, 0, 10)
+}
+
+// returns.go T_return_nil 72 0 1
+// ResultFlags
+//   0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_nil() *Bar {
+	// simple case: no alloc
+	return nil
+}
+
+// returns.go T_multi_return_nil 84 0 1
+// ResultFlags
+//   0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_nil(x, y bool) *Bar {
+	if x && y {
+		return nil
+	}
+	return nil
+}
+
+// returns.go T_multi_return_nil_anomoly 98 0 1
+// ResultFlags
+//   0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_nil_anomoly(x, y bool) Itf {
+	if x && y {
+		var qnil *Q
+		return qnil
+	}
+	var barnil *Bar
+	return barnil
+}
+
+// returns.go T_multi_return_some_nil 112 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_some_nil(x, y bool) *Bar {
+	if x && y {
+		return nil
+	} else {
+		return &GB
+	}
+}
+
+// returns.go T_mixed_returns 127 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_mixed_returns(x int) *Bar {
+	// mix of alloc and non-alloc
+	if x < 0 {
+		return new(Bar)
+	} else {
+		return &GB
+	}
+}
+
+// returns.go T_mixed_returns_slice 143 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_mixed_returns_slice(x int) []*Bar {
+	// mix of alloc and non-alloc
+	switch x {
+	case 10, 11, 12:
+		return make([]*Bar, 10)
+	case 13:
+		fallthrough
+	case 15:
+		return []*Bar{&Bar{x: 15}}
+	}
+	ba := [...]*Bar{&GB, &GB}
+	return ba[:]
+}
+
+// returns.go T_maps_and_channels 167 0 1
+// ResultFlags
+//   0 ResultNoInfo
+//   1 ResultNoInfo
+//   2 ResultNoInfo
+//   3 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0,0,0,8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_maps_and_channels(x int, b bool) (bool, map[int]int, chan bool, unsafe.Pointer) {
+	// maps and channels
+	return b, make(map[int]int), make(chan bool), nil
+}
+
+// returns.go T_assignment_to_named_returns 179 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0,0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_assignment_to_named_returns(x int) (r1 *uint64, r2 *uint64) {
+	// assignments to named returns and then "return" not supported
+	r1 = new(uint64)
+	if x < 1 {
+		*r1 = 2
+	}
+	r2 = new(uint64)
+	return
+}
+
+// returns.go T_named_returns_but_return_explicit_values 199 0 1
+// ParamFlags
+//   0 ParamFeedsIfOrSwitch
+// ResultFlags
+//   0 ResultIsAllocatedMem
+//   1 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2,2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_named_returns_but_return_explicit_values(x int) (r1 *uint64, r2 *uint64) {
+	// named returns ok if all returns are non-empty
+	rx1 := new(uint64)
+	if x < 1 {
+		*rx1 = 2
+	}
+	rx2 := new(uint64)
+	return rx1, rx2
+}
+
+// returns.go T_return_concrete_type_to_itf 216 0 1
+// ResultFlags
+//   0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itf(x, y int) Itf {
+	return &Bar{}
+}
+
+// returns.go T_return_concrete_type_to_itfwith_copy 227 0 1
+// ResultFlags
+//   0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itfwith_copy(x, y int) Itf {
+	b := &Bar{}
+	println("whee")
+	return b
+}
+
+// returns.go T_return_concrete_type_to_itf_mixed 238 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itf_mixed(x, y int) Itf {
+	if x < y {
+		b := &Bar{}
+		return b
+	}
+	return nil
+}
+
+// returns.go T_return_same_func 253 0 1
+// ResultFlags
+//   0 ResultAlwaysSameInlinableFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_same_func() func(int) int {
+	if G < 10 {
+		return foo
+	} else {
+		return foo
+	}
+}
+
+// returns.go T_return_different_funcs 266 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_different_funcs() func(int) int {
+	if G != 10 {
+		return foo
+	} else {
+		return bar
+	}
+}
+
+// returns.go T_return_same_closure 286 0 1
+// ResultFlags
+//   0 ResultAlwaysSameInlinableFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_same_closure.func1 287 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_same_closure() func(int) int {
+	p := func(q int) int { return q }
+	if G < 10 {
+		return p
+	} else {
+		return p
+	}
+}
+
+// returns.go T_return_different_closures 312 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_different_closures.func1 313 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_different_closures.func2 317 0 1
+// ResultFlags
+//   0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_different_closures() func(int) int {
+	p := func(q int) int { return q }
+	if G < 10 {
+		return p
+	} else {
+		return func(q int) int { return 101 }
+	}
+}
+
+// returns.go T_return_noninlinable 339 0 1
+// ResultFlags
+//   0 ResultAlwaysSameFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[16]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_noninlinable.func1 340 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns.go:343:4|0 flagstr "" flagval 0 score 4 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_noninlinable.func1.1 341 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_noninlinable(x int) func(int) int {
+	noti := func(q int) int {
+		defer func() {
+			println(q + x)
+		}()
+		return q
+	}
+	return noti
+}
+
+type Bar struct {
+	x int
+	y string
+}
+
+func (b *Bar) Plark() {
+}
+
+type Q int
+
+func (q *Q) Plark() {
+}
+
+func foo(x int) int { return x }
+func bar(x int) int { return -x }
+
+var G int
+var GB Bar
+
+type Itf interface {
+	Plark()
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go
new file mode 100644
index 0000000..7200926
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go
@@ -0,0 +1,231 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package returns2
+
+// returns2.go T_return_feeds_iface_call 18 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: returns2.go:19:13|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_feeds_iface_call() {
+	b := newBar(10)
+	b.Plark()
+}
+
+// returns2.go T_multi_return_feeds_iface_call 29 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: returns2.go:30:20|0 flagstr "" flagval 0 score 3 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_iface_call() {
+	_, b, _ := newBar2(10)
+	b.Plark()
+}
+
+// returns2.go T_returned_inlinable_func_feeds_indirect_call 41 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:42:18|0 flagstr "" flagval 0 score -51 mask 8200 maskstr "passConstToIfAdj|returnFeedsInlinableFuncToIndCallAdj"
+// callsite: returns2.go:44:20|1 flagstr "" flagval 0 score -23 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_returned_inlinable_func_feeds_indirect_call(q int) {
+	f := returnsFunc(10)
+	f(q)
+	f2 := returnsFunc2()
+	f2(q)
+}
+
+// returns2.go T_returned_noninlineable_func_feeds_indirect_call 54 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:55:30|0 flagstr "" flagval 0 score -23 mask 4096 maskstr "returnFeedsFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_returned_noninlineable_func_feeds_indirect_call(q int) {
+	f := returnsNonInlinableFunc()
+	f(q)
+}
+
+// returns2.go T_multi_return_feeds_indirect_call 65 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:66:29|0 flagstr "" flagval 0 score -21 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_indirect_call(q int) {
+	_, f, _ := multiReturnsFunc()
+	f(q)
+}
+
+// returns2.go T_return_feeds_ifswitch 76 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:77:14|0 flagstr "" flagval 0 score 10 mask 2048 maskstr "returnFeedsConstToIfAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_feeds_ifswitch(q int) int {
+	x := meaning(q)
+	if x < 42 {
+		switch x {
+		case 42:
+			return 1
+		}
+	}
+	return 0
+}
+
+// returns2.go T_multi_return_feeds_ifswitch 93 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:94:21|0 flagstr "" flagval 0 score 9 mask 2048 maskstr "returnFeedsConstToIfAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_ifswitch(q int) int {
+	x, y, z := meanings(q)
+	if x < y {
+		switch x {
+		case 42:
+			return z
+		}
+	}
+	return 0
+}
+
+// returns2.go T_two_calls_feed_ifswitch 111 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:115:14|0 flagstr "" flagval 0 score 25 mask 0 maskstr ""
+// callsite: returns2.go:116:14|1 flagstr "" flagval 0 score 25 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_two_calls_feed_ifswitch(q int) int {
+	// This case we don't handle; for the heuristic to kick in,
+	// all names in a given if/switch cond have to come from the
+	// same callsite
+	x := meaning(q)
+	y := meaning(-q)
+	if x < y {
+		switch x + y {
+		case 42:
+			return 1
+		}
+	}
+	return 0
+}
+
+// returns2.go T_chained_indirect_call 132 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: returns2.go:135:18|0 flagstr "" flagval 0 score -31 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_chained_indirect_call(x, y int) {
+	// Here 'returnsFunc' returns an inlinable func that feeds
+	// directly into a call (no named intermediate).
+	G += returnsFunc(x - y)(x + y)
+}
+
+// returns2.go T_chained_conc_iface_call 144 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: returns2.go:148:8|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_chained_conc_iface_call(x, y int) {
+	// Similar to the case above, return from call returning concrete type
+	// feeds directly into interface call. Note that only the first
+	// iface call is interesting here.
+	newBar(10).Plark().Plark()
+}
+
+func returnsFunc(x int) func(int) int {
+	if x < 0 {
+		G++
+	}
+	return adder
+}
+
+func returnsFunc2() func(int) int {
+	return func(x int) int {
+		return adder(x)
+	}
+}
+
+func returnsNonInlinableFunc() func(int) int {
+	return adderNoInline
+}
+
+func multiReturnsFunc() (int, func(int) int, int) {
+	return 42, func(x int) int { G++; return 1 }, -42
+}
+
+func adder(x int) int {
+	G += 1
+	return G
+}
+
+func adderNoInline(x int) int {
+	defer func() { G += x }()
+	G += 1
+	return G
+}
+
+func meaning(q int) int {
+	r := 0
+	for i := 0; i < 42; i++ {
+		r += q
+	}
+	G += r
+	return 42
+}
+
+func meanings(q int) (int, int, int) {
+	r := 0
+	for i := 0; i < 42; i++ {
+		r += q
+	}
+	return 42, 43, r
+}
+
+type Bar struct {
+	x int
+	y string
+}
+
+func (b *Bar) Plark() Itf {
+	return b
+}
+
+type Itf interface {
+	Plark() Itf
+}
+
+func newBar(x int) Itf {
+	s := 0
+	for i := 0; i < x; i++ {
+		s += i
+	}
+	return &Bar{
+		x: s,
+	}
+}
+
+func newBar2(x int) (int, Itf, bool) {
+	s := 0
+	for i := 0; i < x; i++ {
+		s += i
+	}
+	return 0, &Bar{x: s}, false
+}
+
+var G int
diff --git a/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
new file mode 100644
index 0000000..587eab03
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"go/constant"
+	"testing"
+)
+
+var pos src.XPos
+var local *types.Pkg
+var f *ir.Func
+
+func init() {
+	types.PtrSize = 8
+	types.RegSize = 8
+	types.MaxWidth = 1 << 50
+	typecheck.InitUniverse()
+	local = types.NewPkg("", "")
+	fsym := &types.Sym{
+		Pkg:  types.NewPkg("my/import/path", "path"),
+		Name: "function",
+	}
+	f = ir.NewFunc(src.NoXPos, src.NoXPos, fsym, nil)
+}
+
+type state struct {
+	ntab map[string]*ir.Name
+}
+
+func mkstate() *state {
+	return &state{
+		ntab: make(map[string]*ir.Name),
+	}
+}
+
+func bin(x ir.Node, op ir.Op, y ir.Node) ir.Node {
+	return ir.NewBinaryExpr(pos, op, x, y)
+}
+
+func conv(x ir.Node, t *types.Type) ir.Node {
+	return ir.NewConvExpr(pos, ir.OCONV, t, x)
+}
+
+func logical(x ir.Node, op ir.Op, y ir.Node) ir.Node {
+	return ir.NewLogicalExpr(pos, op, x, y)
+}
+
+func un(op ir.Op, x ir.Node) ir.Node {
+	return ir.NewUnaryExpr(pos, op, x)
+}
+
+func liti(i int64) ir.Node {
+	return ir.NewBasicLit(pos, types.Types[types.TINT64], constant.MakeInt64(i))
+}
+
+func lits(s string) ir.Node {
+	return ir.NewBasicLit(pos, types.Types[types.TSTRING], constant.MakeString(s))
+}
+
+func (s *state) nm(name string, t *types.Type) *ir.Name {
+	if n, ok := s.ntab[name]; ok {
+		if n.Type() != t {
+			panic("bad")
+		}
+		return n
+	}
+	sym := local.Lookup(name)
+	nn := ir.NewNameAt(pos, sym, t)
+	s.ntab[name] = nn
+	return nn
+}
+
+func (s *state) nmi64(name string) *ir.Name {
+	return s.nm(name, types.Types[types.TINT64])
+}
+
+func (s *state) nms(name string) *ir.Name {
+	return s.nm(name, types.Types[types.TSTRING])
+}
+
+func TestClassifyIntegerCompare(t *testing.T) {
+
+	// (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101)
+	s := mkstate()
+	nn := s.nmi64("n")
+	nlt10 := bin(nn, ir.OLT, liti(10))         // n < 10
+	ngt100 := bin(nn, ir.OGT, liti(100))       // n > 100
+	nge12 := bin(nn, ir.OGE, liti(12))         // n >= 12
+	nle99 := bin(nn, ir.OLE, liti(99))         // n < 10
+	nne101 := bin(nn, ir.ONE, liti(101))       // n != 101
+	noror1 := logical(nlt10, ir.OOROR, ngt100) // n < 10 || n > 100
+	noror2 := logical(nge12, ir.OOROR, nle99)  // n >= 12 || n <= 99
+	noror3 := logical(noror2, ir.OOROR, nne101)
+	nandand := typecheck.Expr(logical(noror1, ir.OANDAND, noror3))
+
+	wantv := true
+	v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v)
+	}
+}
+
+func TestClassifyStringCompare(t *testing.T) {
+
+	// s != "foo" && s < "ooblek" && s > "plarkish"
+	s := mkstate()
+	nn := s.nms("s")
+	snefoo := bin(nn, ir.ONE, lits("foo"))     // s != "foo"
+	sltoob := bin(nn, ir.OLT, lits("ooblek"))  // s < "ooblek"
+	sgtpk := bin(nn, ir.OGT, lits("plarkish")) // s > "plarkish"
+	nandand := logical(snefoo, ir.OANDAND, sltoob)
+	top := typecheck.Expr(logical(nandand, ir.OANDAND, sgtpk))
+
+	wantv := true
+	v := ShouldFoldIfNameConstant(top, []*ir.Name{nn})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v)
+	}
+}
+
+func TestClassifyIntegerArith(t *testing.T) {
+	// n+1 ^ n-3 * n/2 + n<<9 + n>>2 - n&^7
+
+	s := mkstate()
+	nn := s.nmi64("n")
+	np1 := bin(nn, ir.OADD, liti(1))     // n+1
+	nm3 := bin(nn, ir.OSUB, liti(3))     // n-3
+	nd2 := bin(nn, ir.ODIV, liti(2))     // n/2
+	nls9 := bin(nn, ir.OLSH, liti(9))    // n<<9
+	nrs2 := bin(nn, ir.ORSH, liti(2))    // n>>2
+	nan7 := bin(nn, ir.OANDNOT, liti(7)) // n&^7
+	c1xor := bin(np1, ir.OXOR, nm3)
+	c2mul := bin(c1xor, ir.OMUL, nd2)
+	c3add := bin(c2mul, ir.OADD, nls9)
+	c4add := bin(c3add, ir.OADD, nrs2)
+	c5sub := bin(c4add, ir.OSUB, nan7)
+	top := typecheck.Expr(c5sub)
+
+	wantv := true
+	v := ShouldFoldIfNameConstant(top, []*ir.Name{nn})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v)
+	}
+}
+
+func TestClassifyAssortedShifts(t *testing.T) {
+
+	s := mkstate()
+	nn := s.nmi64("n")
+	badcases := []ir.Node{
+		bin(liti(3), ir.OLSH, nn), // 3<<n
+		bin(liti(7), ir.ORSH, nn), // 7>>n
+	}
+	for _, bc := range badcases {
+		wantv := false
+		v := ShouldFoldIfNameConstant(typecheck.Expr(bc), []*ir.Name{nn})
+		if v != wantv {
+			t.Errorf("wanted shouldfold(%v) %v, got %v", bc, wantv, v)
+		}
+	}
+}
+
+func TestClassifyFloat(t *testing.T) {
+	// float32(n) + float32(10)
+	s := mkstate()
+	nn := s.nm("n", types.Types[types.TUINT32])
+	f1 := conv(nn, types.Types[types.TFLOAT32])
+	f2 := conv(liti(10), types.Types[types.TFLOAT32])
+	add := bin(f1, ir.OADD, f2)
+
+	wantv := false
+	v := ShouldFoldIfNameConstant(typecheck.Expr(add), []*ir.Name{nn})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", add, wantv, v)
+	}
+}
+
+func TestMultipleNamesAllUsed(t *testing.T) {
+	// n != 101 && m < 2
+	s := mkstate()
+	nn := s.nmi64("n")
+	nm := s.nmi64("m")
+	nne101 := bin(nn, ir.ONE, liti(101)) // n != 101
+	mlt2 := bin(nm, ir.OLT, liti(2))     // m < 2
+	nandand := typecheck.Expr(logical(nne101, ir.OANDAND, mlt2))
+
+	// all names used
+	wantv := true
+	v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn, nm})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v)
+	}
+
+	// not all names used
+	wantv = false
+	v = ShouldFoldIfNameConstant(nne101, []*ir.Name{nn, nm})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", nne101, wantv, v)
+	}
+
+	// other names used.
+	np := s.nmi64("p")
+	pne0 := bin(np, ir.ONE, liti(101)) // p != 0
+	noror := logical(nandand, ir.OOROR, pne0)
+	wantv = false
+	v = ShouldFoldIfNameConstant(noror, []*ir.Name{nn, nm})
+	if v != wantv {
+		t.Errorf("wanted shouldfold(%v) %v, got %v", noror, wantv, v)
+	}
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/trace_off.go b/src/cmd/compile/internal/inline/inlheur/trace_off.go
new file mode 100644
index 0000000..9eea7fa
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/trace_off.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !debugtrace
+
+package inlheur
+
+const debugTrace = 0
+
+func enableDebugTrace(x int) {
+}
+
+func enableDebugTraceIfEnv() {
+}
+
+func disableDebugTrace() {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/trace_on.go b/src/cmd/compile/internal/inline/inlheur/trace_on.go
new file mode 100644
index 0000000..1608429
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/trace_on.go
@@ -0,0 +1,40 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build debugtrace
+
+package inlheur
+
+import (
+	"os"
+	"strconv"
+)
+
+var debugTrace = 0
+
+func enableDebugTrace(x int) {
+	debugTrace = x
+}
+
+func enableDebugTraceIfEnv() {
+	v := os.Getenv("DEBUG_TRACE_INLHEUR")
+	if v == "" {
+		return
+	}
+	if v[0] == '*' {
+		if !UnitTesting() {
+			return
+		}
+		v = v[1:]
+	}
+	i, err := strconv.Atoi(v)
+	if err != nil {
+		return
+	}
+	debugTrace = i
+}
+
+func disableDebugTrace() {
+	debugTrace = 0
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/tserial_test.go b/src/cmd/compile/internal/inline/inlheur/tserial_test.go
new file mode 100644
index 0000000..def12f5
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/tserial_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import "testing"
+
+func fpeq(fp1, fp2 FuncProps) bool {
+	if fp1.Flags != fp2.Flags {
+		return false
+	}
+	if len(fp1.ParamFlags) != len(fp2.ParamFlags) {
+		return false
+	}
+	for i := range fp1.ParamFlags {
+		if fp1.ParamFlags[i] != fp2.ParamFlags[i] {
+			return false
+		}
+	}
+	if len(fp1.ResultFlags) != len(fp2.ResultFlags) {
+		return false
+	}
+	for i := range fp1.ResultFlags {
+		if fp1.ResultFlags[i] != fp2.ResultFlags[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func TestSerDeser(t *testing.T) {
+	testcases := []FuncProps{
+		FuncProps{},
+		FuncProps{
+			Flags: 0xfffff,
+		},
+		FuncProps{
+			Flags:       1,
+			ResultFlags: []ResultPropBits{ResultAlwaysSameConstant},
+		},
+		FuncProps{
+			Flags:       1,
+			ParamFlags:  []ParamPropBits{0x99, 0xaa, 0xfffff},
+			ResultFlags: []ResultPropBits{0xfeedface},
+		},
+	}
+
+	for k, tc := range testcases {
+		s := tc.SerializeToString()
+		fp := DeserializeFromString(s)
+		got := fp.String()
+		want := tc.String()
+		if !fpeq(*fp, tc) {
+			t.Errorf("eq check failed for test %d: got:\n%s\nwant:\n%s\n", k, got, want)
+		}
+	}
+
+	var nilt *FuncProps
+	ns := nilt.SerializeToString()
+	nfp := DeserializeFromString(ns)
+	if len(ns) != 0 || nfp != nil {
+		t.Errorf("nil serialize/deserialize failed")
+	}
+}
diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go
new file mode 100644
index 0000000..a6f19d4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go
@@ -0,0 +1,132 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package interleaved implements the interleaved devirtualization and
+// inlining pass.
+package interleaved
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/devirtualize"
+	"cmd/compile/internal/inline"
+	"cmd/compile/internal/inline/inlheur"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/pgo"
+	"cmd/compile/internal/typecheck"
+	"fmt"
+)
+
+// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on
+// all functions within pkg.
+func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgo.Profile) {
+	if profile != nil && base.Debug.PGODevirtualize > 0 {
+		// TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below.
+		ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
+			for _, fn := range list {
+				devirtualize.ProfileGuided(fn, profile)
+			}
+		})
+		ir.CurFunc = nil
+	}
+
+	if base.Flag.LowerL != 0 {
+		inlheur.SetupScoreAdjustments()
+	}
+
+	var inlProfile *pgo.Profile // copy of profile for inlining
+	if base.Debug.PGOInline != 0 {
+		inlProfile = profile
+	}
+	if inlProfile != nil {
+		inline.PGOInlinePrologue(inlProfile, pkg.Funcs)
+	}
+
+	ir.VisitFuncsBottomUp(pkg.Funcs, func(funcs []*ir.Func, recursive bool) {
+		// We visit functions within an SCC in fairly arbitrary order,
+		// so by computing inlinability for all functions in the SCC
+		// before performing any inlining, the results are less
+		// sensitive to the order within the SCC (see #58905 for an
+		// example).
+
+		// First compute inlinability for all functions in the SCC ...
+		inline.CanInlineSCC(funcs, recursive, inlProfile)
+
+		// ... then make a second pass to do devirtualization and inlining
+		// of calls.
+		for _, fn := range funcs {
+			DevirtualizeAndInlineFunc(fn, inlProfile)
+		}
+	})
+
+	if base.Flag.LowerL != 0 {
+		// Perform a garbage collection of hidden closures functions that
+		// are no longer reachable from top-level functions following
+		// inlining. See #59404 and #59638 for more context.
+		inline.GarbageCollectUnreferencedHiddenClosures()
+
+		if base.Debug.DumpInlFuncProps != "" {
+			inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps)
+		}
+		if inlheur.Enabled() {
+			inline.PostProcessCallSites(inlProfile)
+			inlheur.TearDown()
+		}
+	}
+}
+
+// DevirtualizeAndInlineFunc interleaves devirtualization and inlining
+// on a single function.
+func DevirtualizeAndInlineFunc(fn *ir.Func, profile *pgo.Profile) {
+	ir.WithFunc(fn, func() {
+		if base.Flag.LowerL != 0 {
+			if inlheur.Enabled() && !fn.Wrapper() {
+				inlheur.ScoreCalls(fn)
+				defer inlheur.ScoreCallsCleanup()
+			}
+			if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() {
+				inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps)
+			}
+		}
+
+		bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn)
+		if bigCaller && base.Flag.LowerM > 1 {
+			fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
+		}
+
+		// Walk fn's body and apply devirtualization and inlining.
+		var inlCalls []*ir.InlinedCallExpr
+		var edit func(ir.Node) ir.Node
+		edit = func(n ir.Node) ir.Node {
+			switch n := n.(type) {
+			case *ir.TailCallStmt:
+				n.Call.NoInline = true // can't inline yet
+			}
+
+			ir.EditChildren(n, edit)
+
+			if call, ok := n.(*ir.CallExpr); ok {
+				devirtualize.StaticCall(call)
+
+				if inlCall := inline.TryInlineCall(fn, call, bigCaller, profile); inlCall != nil {
+					inlCalls = append(inlCalls, inlCall)
+					n = inlCall
+				}
+			}
+
+			return n
+		}
+		ir.EditChildren(fn, edit)
+
+		// If we inlined any calls, we want to recursively visit their
+		// bodies for further devirtualization and inlining. However, we
+		// need to wait until *after* the original function body has been
+		// expanded, or else inlCallee can have false positives (e.g.,
+		// #54632).
+		for len(inlCalls) > 0 {
+			call := inlCalls[0]
+			inlCalls = inlCalls[1:]
+			ir.EditChildren(call, edit)
+		}
+	})
+}
diff --git a/src/cmd/compile/internal/ir/abi.go b/src/cmd/compile/internal/ir/abi.go
index 041448f..ebe0fbf 100644
--- a/src/cmd/compile/internal/ir/abi.go
+++ b/src/cmd/compile/internal/ir/abi.go
@@ -50,9 +50,6 @@
 	if f.Pragma&Nosplit != 0 {
 		flag |= obj.NOSPLIT
 	}
-	if f.ReflectMethod() {
-		flag |= obj.REFLECTMETHOD
-	}
 	if f.IsPackageInit() {
 		flag |= obj.PKGINIT
 	}
diff --git a/src/cmd/compile/internal/ir/check_reassign_no.go b/src/cmd/compile/internal/ir/check_reassign_no.go
new file mode 100644
index 0000000..8290a7d
--- /dev/null
+++ b/src/cmd/compile/internal/ir/check_reassign_no.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !checknewoldreassignment
+
+package ir
+
+const consistencyCheckEnabled = false
diff --git a/src/cmd/compile/internal/ir/check_reassign_yes.go b/src/cmd/compile/internal/ir/check_reassign_yes.go
new file mode 100644
index 0000000..30876cc
--- /dev/null
+++ b/src/cmd/compile/internal/ir/check_reassign_yes.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build checknewoldreassignment
+
+package ir
+
+const consistencyCheckEnabled = true
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
index 751620f..0efd113 100644
--- a/src/cmd/compile/internal/ir/const.go
+++ b/src/cmd/compile/internal/ir/const.go
@@ -14,18 +14,79 @@
 	"cmd/internal/src"
 )
 
+// NewBool returns an OLITERAL representing b as an untyped boolean.
 func NewBool(pos src.XPos, b bool) Node {
-	return NewBasicLit(pos, constant.MakeBool(b))
+	return NewBasicLit(pos, types.UntypedBool, constant.MakeBool(b))
 }
 
+// NewInt returns an OLITERAL representing v as an untyped integer.
 func NewInt(pos src.XPos, v int64) Node {
-	return NewBasicLit(pos, constant.MakeInt64(v))
+	return NewBasicLit(pos, types.UntypedInt, constant.MakeInt64(v))
 }
 
+// NewString returns an OLITERAL representing s as an untyped string.
 func NewString(pos src.XPos, s string) Node {
-	return NewBasicLit(pos, constant.MakeString(s))
+	return NewBasicLit(pos, types.UntypedString, constant.MakeString(s))
 }
 
+// NewUintptr returns an OLITERAL representing v as a uintptr.
+func NewUintptr(pos src.XPos, v int64) Node {
+	return NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(v))
+}
+
+// NewZero returns a zero value of the given type.
+func NewZero(pos src.XPos, typ *types.Type) Node {
+	switch {
+	case typ.HasNil():
+		return NewNilExpr(pos, typ)
+	case typ.IsInteger():
+		return NewBasicLit(pos, typ, intZero)
+	case typ.IsFloat():
+		return NewBasicLit(pos, typ, floatZero)
+	case typ.IsComplex():
+		return NewBasicLit(pos, typ, complexZero)
+	case typ.IsBoolean():
+		return NewBasicLit(pos, typ, constant.MakeBool(false))
+	case typ.IsString():
+		return NewBasicLit(pos, typ, constant.MakeString(""))
+	case typ.IsArray() || typ.IsStruct():
+		// TODO(mdempsky): Return a typechecked expression instead.
+		return NewCompLitExpr(pos, OCOMPLIT, typ, nil)
+	}
+
+	base.FatalfAt(pos, "unexpected type: %v", typ)
+	panic("unreachable")
+}
+
+var (
+	intZero     = constant.MakeInt64(0)
+	floatZero   = constant.ToFloat(intZero)
+	complexZero = constant.ToComplex(intZero)
+)
+
+// NewOne returns an OLITERAL representing 1 with the given type.
+func NewOne(pos src.XPos, typ *types.Type) Node {
+	var val constant.Value
+	switch {
+	case typ.IsInteger():
+		val = intOne
+	case typ.IsFloat():
+		val = floatOne
+	case typ.IsComplex():
+		val = complexOne
+	default:
+		base.FatalfAt(pos, "%v cannot represent 1", typ)
+	}
+
+	return NewBasicLit(pos, typ, val)
+}
+
+var (
+	intOne     = constant.MakeInt64(1)
+	floatOne   = constant.ToFloat(intOne)
+	complexOne = constant.ToComplex(intOne)
+)
+
 const (
 	// Maximum size in bits for big.Ints before signaling
 	// overflow and also mantissa precision for big.Floats.
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
index be57a8f..d30f7bc 100644
--- a/src/cmd/compile/internal/ir/copy.go
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -5,71 +5,12 @@
 package ir
 
 import (
-	"cmd/compile/internal/base"
 	"cmd/internal/src"
 )
 
-// A Node may implement the Orig and SetOrig method to
-// maintain a pointer to the "unrewritten" form of a Node.
-// If a Node does not implement OrigNode, it is its own Orig.
-//
-// Note that both SepCopy and Copy have definitions compatible
-// with a Node that does not implement OrigNode: such a Node
-// is its own Orig, and in that case, that's what both want to return
-// anyway (SepCopy unconditionally, and Copy only when the input
-// is its own Orig as well, but if the output does not implement
-// OrigNode, then neither does the input, making the condition true).
-type OrigNode interface {
-	Node
-	Orig() Node
-	SetOrig(Node)
-}
-
-// origNode may be embedded into a Node to make it implement OrigNode.
-type origNode struct {
-	orig Node `mknode:"-"`
-}
-
-func (n *origNode) Orig() Node     { return n.orig }
-func (n *origNode) SetOrig(o Node) { n.orig = o }
-
-// Orig returns the “original” node for n.
-// If n implements OrigNode, Orig returns n.Orig().
-// Otherwise Orig returns n itself.
-func Orig(n Node) Node {
-	if n, ok := n.(OrigNode); ok {
-		o := n.Orig()
-		if o == nil {
-			Dump("Orig nil", n)
-			base.Fatalf("Orig returned nil")
-		}
-		return o
-	}
-	return n
-}
-
-// SepCopy returns a separate shallow copy of n,
-// breaking any Orig link to any other nodes.
-func SepCopy(n Node) Node {
-	n = n.copy()
-	if n, ok := n.(OrigNode); ok {
-		n.SetOrig(n)
-	}
-	return n
-}
-
 // Copy returns a shallow copy of n.
-// If Orig(n) == n, then Orig(Copy(n)) == the copy.
-// Otherwise the Orig link is preserved as well.
-//
-// The specific semantics surrounding Orig are subtle but right for most uses.
-// See issues #26855 and #27765 for pitfalls.
 func Copy(n Node) Node {
-	c := n.copy()
-	if n, ok := n.(OrigNode); ok && n.Orig() == n {
-		c.(OrigNode).SetOrig(c)
-	}
-	return c
+	return n.copy()
 }
 
 // DeepCopy returns a “deep” copy of n, with its entire structure copied
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index 5355edc..da5b437 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -54,7 +54,7 @@
 func (n *miniExpr) PtrInit() *Nodes       { return &n.init }
 func (n *miniExpr) SetInit(x Nodes)       { n.init = x }
 
-// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
+// An AddStringExpr is a string concatenation List[0] + List[1] + ... + List[len(List)-1].
 type AddStringExpr struct {
 	miniExpr
 	List     Nodes
@@ -78,9 +78,39 @@
 }
 
 func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
+	if x == nil || x.Typecheck() != 1 {
+		base.FatalfAt(pos, "missed typecheck: %L", x)
+	}
 	n := &AddrExpr{X: x}
-	n.op = OADDR
 	n.pos = pos
+
+	switch x.Op() {
+	case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
+		n.op = OPTRLIT
+
+	default:
+		n.op = OADDR
+		if r, ok := OuterValue(x).(*Name); ok && r.Op() == ONAME {
+			r.SetAddrtaken(true)
+
+			// If r is a closure variable, we need to mark its canonical
+			// variable as addrtaken too, so that closure conversion
+			// captures it by reference.
+			//
+			// Exception: if we've already marked the variable as
+			// capture-by-value, then that means this variable isn't
+			// logically modified, and we must be taking its address to pass
+			// to a runtime function that won't mutate it. In that case, we
+			// only need to make sure our own copy is addressable.
+			if r.IsClosureVar() && !r.Byval() {
+				r.Canonical().SetAddrtaken(true)
+			}
+		}
+	}
+
+	n.SetType(types.NewPtr(x.Type()))
+	n.SetTypecheck(1)
+
 	return n
 }
 
@@ -102,19 +132,27 @@
 	val constant.Value
 }
 
-func NewBasicLit(pos src.XPos, val constant.Value) Node {
+// NewBasicLit returns an OLITERAL representing val with the given type.
+func NewBasicLit(pos src.XPos, typ *types.Type, val constant.Value) Node {
+	AssertValidTypeForConst(typ, val)
+
 	n := &BasicLit{val: val}
 	n.op = OLITERAL
 	n.pos = pos
-	if k := val.Kind(); k != constant.Unknown {
-		n.SetType(idealType(k))
-	}
+	n.SetType(typ)
+	n.SetTypecheck(1)
 	return n
 }
 
 func (n *BasicLit) Val() constant.Value       { return n.val }
 func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
 
+// NewConstExpr returns an OLITERAL representing val, copying the
+// position and type from orig.
+func NewConstExpr(val constant.Value, orig Node) Node {
+	return NewBasicLit(orig.Pos(), orig.Type(), val)
+}
+
 // A BinaryExpr is a binary expression X Op Y,
 // or Op(X, Y) for builtin functions that do not become calls.
 type BinaryExpr struct {
@@ -138,27 +176,27 @@
 	case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
 		OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
 		OCOPY, OCOMPLEX, OUNSAFEADD, OUNSAFESLICE, OUNSAFESTRING,
-		OEFACE:
+		OMAKEFACE:
 		n.op = op
 	}
 }
 
-// A CallExpr is a function call X(Args).
+// A CallExpr is a function call Fun(Args).
 type CallExpr struct {
 	miniExpr
-	origNode
-	X         Node
+	Fun       Node
 	Args      Nodes
+	DeferAt   Node
 	RType     Node    `mknode:"-"` // see reflectdata/helpers.go
 	KeepAlive []*Name // vars to be kept alive until call returns
 	IsDDD     bool
-	NoInline  bool
+	GoDefer   bool // whether this call is part of a go or defer statement
+	NoInline  bool // whether this call must not be inlined
 }
 
 func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
-	n := &CallExpr{X: fun}
+	n := &CallExpr{Fun: fun}
 	n.pos = pos
-	n.orig = n
 	n.SetOp(op)
 	n.Args = args
 	return n
@@ -174,7 +212,7 @@
 		OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
 		ODELETE,
 		OGETG, OGETCALLERPC, OGETCALLERSP,
-		OMAKE, OMAX, OMIN, OPRINT, OPRINTN,
+		OMAKE, OMAX, OMIN, OPRINT, OPRINTLN,
 		ORECOVER, ORECOVERFP:
 		n.op = op
 	}
@@ -192,7 +230,6 @@
 // Before type-checking, the type is Ntype.
 type CompLitExpr struct {
 	miniExpr
-	origNode
 	List     Nodes // initialized values
 	RType    Node  `mknode:"-"` // *runtime._type for OMAPLIT map types
 	Prealloc *Name
@@ -209,7 +246,6 @@
 	if typ != nil {
 		n.SetType(typ)
 	}
-	n.orig = n
 	return n
 }
 
@@ -225,25 +261,6 @@
 	}
 }
 
-type ConstExpr struct {
-	miniExpr
-	origNode
-	val constant.Value
-}
-
-func NewConstExpr(val constant.Value, orig Node) Node {
-	n := &ConstExpr{val: val}
-	n.op = OLITERAL
-	n.pos = orig.Pos()
-	n.orig = orig
-	n.SetType(orig.Type())
-	n.SetTypecheck(orig.Typecheck())
-	return n
-}
-
-func (n *ConstExpr) Sym() *types.Sym     { return n.orig.Sym() }
-func (n *ConstExpr) Val() constant.Value { return n.val }
-
 // A ConvExpr is a conversion Type(X).
 // It may end up being a value or a type.
 type ConvExpr struct {
@@ -289,7 +306,7 @@
 	switch op {
 	default:
 		panic(n.no("SetOp " + op.String()))
-	case OCONV, OCONVIFACE, OCONVIDATA, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARR, OSLICE2ARRPTR:
+	case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARR, OSLICE2ARRPTR:
 		n.op = op
 	}
 }
@@ -333,7 +350,7 @@
 	return n
 }
 
-// A StructKeyExpr is an Field: Value composite literal key.
+// A StructKeyExpr is a Field: Value composite literal key.
 type StructKeyExpr struct {
 	miniExpr
 	Field *types.Field
@@ -432,15 +449,19 @@
 }
 
 // A NilExpr represents the predefined untyped constant nil.
-// (It may be copied and assigned a type, though.)
 type NilExpr struct {
 	miniExpr
 }
 
-func NewNilExpr(pos src.XPos) *NilExpr {
+func NewNilExpr(pos src.XPos, typ *types.Type) *NilExpr {
+	if typ == nil {
+		base.FatalfAt(pos, "missing type")
+	}
 	n := &NilExpr{}
 	n.pos = pos
 	n.op = ONIL
+	n.SetType(typ)
+	n.SetTypecheck(1)
 	return n
 }
 
@@ -461,20 +482,6 @@
 func (n *ParenExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
 func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
 
-// A RawOrigExpr represents an arbitrary Go expression as a string value.
-// When printed in diagnostics, the string value is written out exactly as-is.
-type RawOrigExpr struct {
-	miniExpr
-	Raw string
-}
-
-func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
-	n := &RawOrigExpr{Raw: raw}
-	n.pos = pos
-	n.op = op
-	return n
-}
-
 // A ResultExpr represents a direct access to a result.
 type ResultExpr struct {
 	miniExpr
@@ -498,9 +505,13 @@
 }
 
 func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
+	if typ == nil {
+		base.FatalfAt(pos, "nil type")
+	}
 	n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
 	n.typ = typ
 	n.op = OLINKSYMOFFSET
+	n.SetTypecheck(1)
 	return n
 }
 
@@ -557,9 +568,8 @@
 	if n.Op() != OMETHEXPR {
 		panic(n.no("FuncName"))
 	}
-	fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel))
+	fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel), n.Type())
 	fn.Class = PFUNC
-	fn.SetType(n.Type())
 	if n.Selection.Nname != nil {
 		// TODO(austin): Nname is nil for interface method
 		// expressions (I.M), so we can't attach a Func to
@@ -664,6 +674,9 @@
 	// Runtime type information provided by walkDotType for
 	// assertions from non-empty interface to concrete type.
 	ITab Node `mknode:"-"` // *runtime.itab for Type implementing X's type
+
+	// An internal/abi.TypeAssert descriptor to pass to the runtime.
+	Descriptor *obj.LSym
 }
 
 func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
@@ -746,33 +759,13 @@
 	default:
 		panic(n.no("SetOp " + op.String()))
 	case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
-		OALIGNOF, OCAP, OCLEAR, OCLOSE, OIMAG, OLEN, ONEW,
-		OOFFSETOF, OPANIC, OREAL, OSIZEOF,
+		OCAP, OCLEAR, OCLOSE, OIMAG, OLEN, ONEW, OPANIC, OREAL,
 		OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR,
 		OUNSAFESTRINGDATA, OUNSAFESLICEDATA:
 		n.op = op
 	}
 }
 
-// Probably temporary: using Implicit() flag to mark generic function nodes that
-// are called to make getGfInfo analysis easier in one pre-order pass.
-func (n *InstExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
-func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
-
-// An InstExpr is a generic function or type instantiation.
-type InstExpr struct {
-	miniExpr
-	X     Node
-	Targs []Ntype
-}
-
-func NewInstExpr(pos src.XPos, op Op, x Node, targs []Ntype) *InstExpr {
-	n := &InstExpr{X: x, Targs: targs}
-	n.pos = pos
-	n.op = op
-	return n
-}
-
 func IsZero(n Node) bool {
 	switch n.Op() {
 	case ONIL:
@@ -847,6 +840,20 @@
 	return false
 }
 
+// StaticValue analyzes n to find the earliest expression that always
+// evaluates to the same value as n, which might be from an enclosing
+// function.
+//
+// For example, given:
+//
+//	var x int = g()
+//	func() {
+//		y := x
+//		*p = int(y)
+//	}
+//
+// calling StaticValue on the "int(y)" expression returns the outer
+// "g()" expression.
 func StaticValue(n Node) Node {
 	for {
 		if n.Op() == OCONVNOP {
@@ -867,14 +874,11 @@
 	}
 }
 
-// staticValue1 implements a simple SSA-like optimization. If n is a local variable
-// that is initialized and never reassigned, staticValue1 returns the initializer
-// expression. Otherwise, it returns nil.
 func staticValue1(nn Node) Node {
 	if nn.Op() != ONAME {
 		return nil
 	}
-	n := nn.(*Name)
+	n := nn.(*Name).Canonical()
 	if n.Class != PAUTO {
 		return nil
 	}
@@ -906,20 +910,22 @@
 		base.Fatalf("RHS is nil: %v", defn)
 	}
 
-	if reassigned(n) {
+	if Reassigned(n) {
 		return nil
 	}
 
 	return rhs
 }
 
-// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
-// indicating whether the name has any assignments other than its declaration.
-// The second return value is the first such assignment encountered in the walk, if any. It is mostly
-// useful for -m output documenting the reason for inhibited optimizations.
+// Reassigned takes an ONAME node, walks the function in which it is
+// defined, and returns a boolean indicating whether the name has any
+// assignments other than its declaration.
 // NB: global variables are always considered to be re-assigned.
-// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(name *Name) bool {
+// TODO: handle initial declaration not including an assignment and
+// followed by a single assignment?
+// NOTE: any changes made here should also be made in the corresponding
+// code in the ReassignOracle.Init method.
+func Reassigned(name *Name) bool {
 	if name.Op() != ONAME {
 		base.Fatalf("reassigned %v", name)
 	}
@@ -928,13 +934,20 @@
 		return true
 	}
 
+	if name.Addrtaken() {
+		return true // conservatively assume it's reassigned indirectly
+	}
+
 	// TODO(mdempsky): This is inefficient and becoming increasingly
 	// unwieldy. Figure out a way to generalize escape analysis's
 	// reassignment detection for use by inlining and devirtualization.
 
 	// isName reports whether n is a reference to name.
 	isName := func(x Node) bool {
-		n, ok := x.(*Name)
+		if x == nil {
+			return false
+		}
+		n, ok := OuterValue(x).(*Name)
 		return ok && n.Canonical() == name
 	}
 
@@ -953,10 +966,15 @@
 					return true
 				}
 			}
+		case OASOP:
+			n := n.(*AssignOpStmt)
+			if isName(n.X) {
+				return true
+			}
 		case OADDR:
 			n := n.(*AddrExpr)
-			if isName(OuterValue(n.X)) {
-				return true
+			if isName(n.X) {
+				base.FatalfAt(n.Pos(), "%v not marked addrtaken", name)
 			}
 		case ORANGE:
 			n := n.(*RangeStmt)
@@ -974,6 +992,23 @@
 	return Any(name.Curfn, do)
 }
 
+// StaticCalleeName returns the ONAME/PFUNC for n, if known.
+func StaticCalleeName(n Node) *Name {
+	switch n.Op() {
+	case OMETHEXPR:
+		n := n.(*SelectorExpr)
+		return MethodExprName(n)
+	case ONAME:
+		n := n.(*Name)
+		if n.Class == PFUNC {
+			return n
+		}
+	case OCLOSURE:
+		return n.(*ClosureExpr).Func.Nname
+	}
+	return nil
+}
+
 // IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
 var IsIntrinsicCall = func(*CallExpr) bool { return false }
 
@@ -1089,8 +1124,8 @@
 
 func ParamNames(ft *types.Type) []Node {
 	args := make([]Node, ft.NumParams())
-	for i, f := range ft.Params().FieldSlice() {
-		args[i] = AsNode(f.Nname)
+	for i, f := range ft.Params() {
+		args[i] = f.Nname.(*Name)
 	}
 	return args
 }
@@ -1109,7 +1144,7 @@
 	return sym
 }
 
-// MethodSymSuffix is like methodsym, but allows attaching a
+// MethodSymSuffix is like MethodSym, but allows attaching a
 // distinguisher suffix. To avoid collisions, the suffix must not
 // start with a letter, number, or period.
 func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
@@ -1157,6 +1192,51 @@
 	return rpkg.LookupBytes(b.Bytes())
 }
 
+// LookupMethodSelector returns the types.Sym of the selector for a method
+// named in local symbol name, as well as the types.Sym of the receiver.
+//
+// TODO(prattmic): this does not attempt to handle method suffixes (wrappers).
+func LookupMethodSelector(pkg *types.Pkg, name string) (typ, meth *types.Sym, err error) {
+	typeName, methName := splitType(name)
+	if typeName == "" {
+		return nil, nil, fmt.Errorf("%s doesn't contain type split", name)
+	}
+
+	if len(typeName) > 3 && typeName[:2] == "(*" && typeName[len(typeName)-1] == ')' {
+		// Symbol name is for a pointer receiver method. We just want
+		// the base type name.
+		typeName = typeName[2 : len(typeName)-1]
+	}
+
+	typ = pkg.Lookup(typeName)
+	meth = pkg.Selector(methName)
+	return typ, meth, nil
+}
+
+// splitType splits a local symbol name into type and method (fn). If this a
+// free function, typ == "".
+//
+// N.B. closures and methods can be ambiguous (e.g., bar.func1). These cases
+// are returned as methods.
+func splitType(name string) (typ, fn string) {
+	// Types are split on the first dot, ignoring everything inside
+	// brackets (instantiation of type parameter, usually including
+	// "go.shape").
+	bracket := 0
+	for i, r := range name {
+		if r == '.' && bracket == 0 {
+			return name[:i], name[i+1:]
+		}
+		if r == '[' {
+			bracket++
+		}
+		if r == ']' {
+			bracket--
+		}
+	}
+	return "", name
+}
+
 // MethodExprName returns the ONAME representing the method
 // referenced by expression n, which must be a method selector,
 // method expression, or method value.
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index 0c553a9..31c6103 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -27,7 +27,6 @@
 	OADDR:             "&",
 	OADD:              "+",
 	OADDSTR:           "+",
-	OALIGNOF:          "unsafe.Alignof",
 	OANDAND:           "&&",
 	OANDNOT:           "&^",
 	OAND:              "&",
@@ -70,12 +69,11 @@
 	ONEW:              "new",
 	ONE:               "!=",
 	ONOT:              "!",
-	OOFFSETOF:         "unsafe.Offsetof",
 	OOROR:             "||",
 	OOR:               "|",
 	OPANIC:            "panic",
 	OPLUS:             "+",
-	OPRINTN:           "println",
+	OPRINTLN:          "println",
 	OPRINT:            "print",
 	ORANGE:            "range",
 	OREAL:             "real",
@@ -85,7 +83,6 @@
 	ORSH:              ">>",
 	OSELECT:           "select",
 	OSEND:             "<-",
-	OSIZEOF:           "unsafe.Sizeof",
 	OSUB:              "-",
 	OSWITCH:           "switch",
 	OUNSAFEADD:        "unsafe.Add",
@@ -173,7 +170,6 @@
 }
 
 var OpPrec = []int{
-	OALIGNOF:          8,
 	OAPPEND:           8,
 	OBYTES2STR:        8,
 	OARRAYLIT:         8,
@@ -188,7 +184,6 @@
 	OCLOSE:            8,
 	OCOMPLIT:          8,
 	OCONVIFACE:        8,
-	OCONVIDATA:        8,
 	OCONVNOP:          8,
 	OCONV:             8,
 	OCOPY:             8,
@@ -206,13 +201,11 @@
 	ONEW:              8,
 	ONIL:              8,
 	ONONAME:           8,
-	OOFFSETOF:         8,
 	OPANIC:            8,
 	OPAREN:            8,
-	OPRINTN:           8,
+	OPRINTLN:          8,
 	OPRINT:            8,
 	ORUNESTR:          8,
-	OSIZEOF:           8,
 	OSLICE2ARR:        8,
 	OSLICE2ARRPTR:     8,
 	OSTR2BYTES:        8,
@@ -526,12 +519,6 @@
 			return
 		}
 
-		// We always want the original, if any.
-		if o := Orig(n); o != n {
-			n = o
-			continue
-		}
-
 		// Skip implicit operations introduced during typechecking.
 		switch nn := n; nn.Op() {
 		case OADDR:
@@ -546,7 +533,7 @@
 				n = nn.X
 				continue
 			}
-		case OCONV, OCONVNOP, OCONVIFACE, OCONVIDATA:
+		case OCONV, OCONVNOP, OCONVIFACE:
 			nn := nn.(*ConvExpr)
 			if nn.Implicit() {
 				n = nn.X
@@ -567,11 +554,6 @@
 		return
 	}
 
-	if n, ok := n.(*RawOrigExpr); ok {
-		fmt.Fprint(s, n.Raw)
-		return
-	}
-
 	switch n.Op() {
 	case OPAREN:
 		n := n.(*ParenExpr)
@@ -580,46 +562,29 @@
 	case ONIL:
 		fmt.Fprint(s, "nil")
 
-	case OLITERAL: // this is a bit of a mess
-		if !exportFormat && n.Sym() != nil {
+	case OLITERAL:
+		if n.Sym() != nil {
 			fmt.Fprint(s, n.Sym())
 			return
 		}
 
-		needUnparen := false
-		if n.Type() != nil && !n.Type().IsUntyped() {
-			// Need parens when type begins with what might
-			// be misinterpreted as a unary operator: * or <-.
-			if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
-				fmt.Fprintf(s, "(%v)(", n.Type())
-			} else {
-				fmt.Fprintf(s, "%v(", n.Type())
-			}
-			needUnparen = true
-		}
+		typ := n.Type()
+		val := n.Val()
 
-		if n.Type() == types.UntypedRune {
-			switch x, ok := constant.Uint64Val(n.Val()); {
-			case !ok:
-				fallthrough
-			default:
-				fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
-
-			case x < utf8.RuneSelf:
+		// Special case for rune constants.
+		if typ == types.RuneType || typ == types.UntypedRune {
+			if x, ok := constant.Uint64Val(val); ok && x <= utf8.MaxRune {
 				fmt.Fprintf(s, "%q", x)
-
-			case x < 1<<16:
-				fmt.Fprintf(s, "'\\u%04x'", x)
-
-			case x <= utf8.MaxRune:
-				fmt.Fprintf(s, "'\\U%08x'", x)
+				return
 			}
-		} else {
-			fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
 		}
 
-		if needUnparen {
-			fmt.Fprintf(s, ")")
+		// Only include typ if it's neither the default nor untyped type
+		// for the constant value.
+		if k := val.Kind(); typ == types.Types[types.DefaultKinds[k]] || typ == types.UntypedTypes[k] {
+			fmt.Fprint(s, val)
+		} else {
+			fmt.Fprintf(s, "%v(%v)", typ, val)
 		}
 
 	case ODCLFUNC:
@@ -661,33 +626,17 @@
 		}
 		fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
 
-	case OCOMPLIT:
-		n := n.(*CompLitExpr)
-		if !exportFormat {
-			if n.Implicit() {
-				fmt.Fprintf(s, "... argument")
-				return
-			}
-			if typ := n.Type(); typ != nil {
-				fmt.Fprintf(s, "%v{%s}", typ, ellipsisIf(len(n.List) != 0))
-				return
-			}
-			fmt.Fprint(s, "composite literal")
-			return
-		}
-		fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
-
 	case OPTRLIT:
 		n := n.(*AddrExpr)
 		fmt.Fprintf(s, "&%v", n.X)
 
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+	case OCOMPLIT, OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
 		n := n.(*CompLitExpr)
-		if !exportFormat {
-			fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
+		if n.Implicit() {
+			fmt.Fprintf(s, "... argument")
 			return
 		}
-		fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
+		fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
 
 	case OKEY:
 		n := n.(*KeyExpr)
@@ -758,7 +707,6 @@
 
 	case OCONV,
 		OCONVIFACE,
-		OCONVIDATA,
 		OCONVNOP,
 		OBYTES2STR,
 		ORUNES2STR,
@@ -782,10 +730,7 @@
 		OCLOSE,
 		OLEN,
 		ONEW,
-		OPANIC,
-		OALIGNOF,
-		OOFFSETOF,
-		OSIZEOF:
+		OPANIC:
 		n := n.(*UnaryExpr)
 		fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
 
@@ -796,7 +741,7 @@
 		OMIN,
 		ORECOVER,
 		OPRINT,
-		OPRINTN:
+		OPRINTLN:
 		n := n.(*CallExpr)
 		if n.IsDDD {
 			fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
@@ -806,7 +751,7 @@
 
 	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
 		n := n.(*CallExpr)
-		exprFmt(n.X, s, nprec)
+		exprFmt(n.Fun, s, nprec)
 		if n.IsDDD {
 			fmt.Fprintf(s, "(%.v...)", n.Args)
 			return
@@ -1184,11 +1129,6 @@
 				dumpNode(w, cv, depth+1)
 			}
 		}
-		if len(fn.Enter) > 0 {
-			indent(w, depth)
-			fmt.Fprintf(w, "%+v-Enter", n.Op())
-			dumpNodes(w, fn.Enter, depth+1)
-		}
 		if len(fn.Body) > 0 {
 			indent(w, depth)
 			fmt.Fprintf(w, "%+v-body", n.Op())
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 5c41893..303c5e4 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -12,6 +12,7 @@
 	"cmd/internal/src"
 	"fmt"
 	"strings"
+	"unicode/utf8"
 )
 
 // A Func corresponds to a single function in a Go program
@@ -56,18 +57,11 @@
 	Nname    *Name        // ONAME node
 	OClosure *ClosureExpr // OCLOSURE node
 
-	// Extra entry code for the function. For example, allocate and initialize
-	// memory for escaping parameters.
-	Enter Nodes
-	Exit  Nodes
-
 	// ONAME nodes for all params/locals for this func/closure, does NOT
 	// include closurevars until transforming closures during walk.
 	// Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
 	// with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
-	// However, as anonymous or blank PPARAMs are not actually declared,
-	// they are omitted from Dcl.
-	// Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively.
+	// Anonymous and blank params are declared as ~pNN (for PPARAMs) and ~rNN (for PPARAMOUTs).
 	Dcl []*Name
 
 	// ClosureVars lists the free variables that are used within a
@@ -96,10 +90,15 @@
 
 	Inl *Inline
 
-	// Closgen tracks how many closures have been generated within
-	// this function. Used by closurename for creating unique
+	// funcLitGen and goDeferGen track how many closures have been
+	// created in this function for function literals and go/defer
+	// wrappers, respectively. Used by closureName for creating unique
 	// function names.
-	Closgen int32
+	//
+	// Tracking goDeferGen separately avoids wrappers throwing off
+	// function literal numbering (e.g., runtime/trace_test.TestTraceSymbolize.func11).
+	funcLitGen int32
+	goDeferGen int32
 
 	Label int32 // largest auto-generated label in this function
 
@@ -127,7 +126,7 @@
 	NumDefers  int32 // number of defer calls in the function
 	NumReturns int32 // number of explicit returns in the function
 
-	// nwbrCalls records the LSyms of functions called by this
+	// NWBRCalls records the LSyms of functions called by this
 	// function for go:nowritebarrierrec analysis. Only filled in
 	// if nowritebarrierrecCheck != nil.
 	NWBRCalls *[]SymAndPos
@@ -147,14 +146,29 @@
 	Name   string
 }
 
-func NewFunc(pos src.XPos) *Func {
-	f := new(Func)
-	f.pos = pos
-	f.op = ODCLFUNC
+// NewFunc returns a new Func with the given name and type.
+//
+// fpos is the position of the "func" token, and npos is the position
+// of the name identifier.
+//
+// TODO(mdempsky): I suspect there's no need for separate fpos and
+// npos.
+func NewFunc(fpos, npos src.XPos, sym *types.Sym, typ *types.Type) *Func {
+	name := NewNameAt(npos, sym, typ)
+	name.Class = PFUNC
+	sym.SetFunc(true)
+
+	fn := &Func{Nname: name}
+	fn.pos = fpos
+	fn.op = ODCLFUNC
 	// Most functions are ABIInternal. The importer or symabis
 	// pass may override this.
-	f.ABI = obj.ABIInternal
-	return f
+	fn.ABI = obj.ABIInternal
+	fn.SetTypecheck(1)
+
+	name.Func = fn
+
+	return fn
 }
 
 func (f *Func) isStmt() {}
@@ -173,12 +187,16 @@
 type Inline struct {
 	Cost int32 // heuristic cost of inlining this function
 
-	// Copies of Func.Dcl and Func.Body for use during inlining. Copies are
-	// needed because the function's dcl/body may be changed by later compiler
-	// transformations. These fields are also populated when a function from
-	// another package is imported.
-	Dcl  []*Name
-	Body []Node
+	// Copy of Func.Dcl for use during inlining. This copy is needed
+	// because the function's Dcl may change from later compiler
+	// transformations. This field is also populated when a function
+	// from another package is imported and inlined.
+	Dcl     []*Name
+	HaveDcl bool // whether we've loaded Dcl
+
+	// Function properties, encoded as a string (these are used for
+	// making inlining decisions). See cmd/compile/internal/inline/inlheur.
+	Properties string
 
 	// CanDelayResults reports whether it's safe for the inliner to delay
 	// initializing the result parameters until immediately before the
@@ -200,11 +218,10 @@
 type ScopeID int32
 
 const (
-	funcDupok         = 1 << iota // duplicate definitions ok
-	funcWrapper                   // hide frame from users (elide in tracebacks, don't count as a frame for recover())
-	funcABIWrapper                // is an ABI wrapper (also set flagWrapper)
-	funcNeedctxt                  // function uses context register (has closure variables)
-	funcReflectMethod             // function calls reflect.Type.Method or MethodByName
+	funcDupok      = 1 << iota // duplicate definitions ok
+	funcWrapper                // hide frame from users (elide in tracebacks, don't count as a frame for recover())
+	funcABIWrapper             // is an ABI wrapper (also set flagWrapper)
+	funcNeedctxt               // function uses context register (has closure variables)
 	// true if closure inside a function; false if a simple function or a
 	// closure in a global variable initialization
 	funcIsHiddenClosure
@@ -212,10 +229,9 @@
 	funcHasDefer                 // contains a defer statement
 	funcNilCheckDisabled         // disable nil checks when compiling this function
 	funcInlinabilityChecked      // inliner has already determined whether the function is inlinable
-	funcExportInline             // include inline body in export data
-	funcInstrumentBody           // add race/msan/asan instrumentation during SSA construction
+	funcNeverReturns             // function never returns (in most cases calls panic(), os.Exit(), or equivalent)
 	funcOpenCodedDeferDisallowed // can't do open-coded defers
-	funcClosureCalled            // closure is only immediately called; used by escape analysis
+	funcClosureResultsLost       // closure is called indirectly and we lost track of its results; used by escape analysis
 	funcPackageInit              // compiler emitted .init func for package
 )
 
@@ -228,32 +244,28 @@
 func (f *Func) Wrapper() bool                  { return f.flags&funcWrapper != 0 }
 func (f *Func) ABIWrapper() bool               { return f.flags&funcABIWrapper != 0 }
 func (f *Func) Needctxt() bool                 { return f.flags&funcNeedctxt != 0 }
-func (f *Func) ReflectMethod() bool            { return f.flags&funcReflectMethod != 0 }
 func (f *Func) IsHiddenClosure() bool          { return f.flags&funcIsHiddenClosure != 0 }
 func (f *Func) IsDeadcodeClosure() bool        { return f.flags&funcIsDeadcodeClosure != 0 }
 func (f *Func) HasDefer() bool                 { return f.flags&funcHasDefer != 0 }
 func (f *Func) NilCheckDisabled() bool         { return f.flags&funcNilCheckDisabled != 0 }
 func (f *Func) InlinabilityChecked() bool      { return f.flags&funcInlinabilityChecked != 0 }
-func (f *Func) ExportInline() bool             { return f.flags&funcExportInline != 0 }
-func (f *Func) InstrumentBody() bool           { return f.flags&funcInstrumentBody != 0 }
+func (f *Func) NeverReturns() bool             { return f.flags&funcNeverReturns != 0 }
 func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
-func (f *Func) ClosureCalled() bool            { return f.flags&funcClosureCalled != 0 }
+func (f *Func) ClosureResultsLost() bool       { return f.flags&funcClosureResultsLost != 0 }
 func (f *Func) IsPackageInit() bool            { return f.flags&funcPackageInit != 0 }
 
 func (f *Func) SetDupok(b bool)                    { f.flags.set(funcDupok, b) }
 func (f *Func) SetWrapper(b bool)                  { f.flags.set(funcWrapper, b) }
 func (f *Func) SetABIWrapper(b bool)               { f.flags.set(funcABIWrapper, b) }
 func (f *Func) SetNeedctxt(b bool)                 { f.flags.set(funcNeedctxt, b) }
-func (f *Func) SetReflectMethod(b bool)            { f.flags.set(funcReflectMethod, b) }
 func (f *Func) SetIsHiddenClosure(b bool)          { f.flags.set(funcIsHiddenClosure, b) }
 func (f *Func) SetIsDeadcodeClosure(b bool)        { f.flags.set(funcIsDeadcodeClosure, b) }
 func (f *Func) SetHasDefer(b bool)                 { f.flags.set(funcHasDefer, b) }
 func (f *Func) SetNilCheckDisabled(b bool)         { f.flags.set(funcNilCheckDisabled, b) }
 func (f *Func) SetInlinabilityChecked(b bool)      { f.flags.set(funcInlinabilityChecked, b) }
-func (f *Func) SetExportInline(b bool)             { f.flags.set(funcExportInline, b) }
-func (f *Func) SetInstrumentBody(b bool)           { f.flags.set(funcInstrumentBody, b) }
+func (f *Func) SetNeverReturns(b bool)             { f.flags.set(funcNeverReturns, b) }
 func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
-func (f *Func) SetClosureCalled(b bool)            { f.flags.set(funcClosureCalled, b) }
+func (f *Func) SetClosureResultsLost(b bool)       { f.flags.set(funcClosureResultsLost, b) }
 func (f *Func) SetIsPackageInit(b bool)            { f.flags.set(funcPackageInit, b) }
 
 func (f *Func) SetWBPos(pos src.XPos) {
@@ -301,12 +313,65 @@
 	return objabi.PathToPrefix(pkg.Path) + "." + s.Name
 }
 
-// IsEqOrHashFunc reports whether f is type eq/hash function.
-func IsEqOrHashFunc(f *Func) bool {
-	if f == nil || f.Nname == nil {
-		return false
+// ParseLinkFuncName parsers a symbol name (as returned from LinkFuncName) back
+// to the package path and local symbol name.
+func ParseLinkFuncName(name string) (pkg, sym string, err error) {
+	pkg, sym = splitPkg(name)
+	if pkg == "" {
+		return "", "", fmt.Errorf("no package path in name")
 	}
-	return types.IsTypePkg(f.Sym().Pkg)
+
+	pkg, err = objabi.PrefixToPath(pkg) // unescape
+	if err != nil {
+		return "", "", fmt.Errorf("malformed package path: %v", err)
+	}
+
+	return pkg, sym, nil
+}
+
+// Borrowed from x/mod.
+func modPathOK(r rune) bool {
+	if r < utf8.RuneSelf {
+		return r == '-' || r == '.' || r == '_' || r == '~' ||
+			'0' <= r && r <= '9' ||
+			'A' <= r && r <= 'Z' ||
+			'a' <= r && r <= 'z'
+	}
+	return false
+}
+
+func escapedImportPathOK(r rune) bool {
+	return modPathOK(r) || r == '+' || r == '/' || r == '%'
+}
+
+// splitPkg splits the full linker symbol name into package and local symbol
+// name.
+func splitPkg(name string) (pkgpath, sym string) {
+	// package-sym split is at first dot after last the / that comes before
+	// any characters illegal in a package path.
+
+	lastSlashIdx := 0
+	for i, r := range name {
+		// Catches cases like:
+		// * example.foo[sync/atomic.Uint64].
+		// * example%2ecom.foo[sync/atomic.Uint64].
+		//
+		// Note that name is still escaped; unescape occurs after splitPkg.
+		if !escapedImportPathOK(r) {
+			break
+		}
+		if r == '/' {
+			lastSlashIdx = i
+		}
+	}
+	for i := lastSlashIdx; i < len(name); i++ {
+		r := name[i]
+		if r == '.' {
+			return name[:i], name[i+1:]
+		}
+	}
+
+	return "", name
 }
 
 var CurFunc *Func
@@ -326,16 +391,6 @@
 	return s.Name + "·f"
 }
 
-// MarkFunc marks a node as a function.
-func MarkFunc(n *Name) {
-	if n.Op() != ONAME || n.Class != Pxxx {
-		base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
-	}
-
-	n.Class = PFUNC
-	n.Sym().SetFunc(true)
-}
-
 // ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
 // and compiling runtime.
 func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
@@ -361,25 +416,35 @@
 var globClosgen int32
 
 // closureName generates a new unique name for a closure within outerfn at pos.
-func closureName(outerfn *Func, pos src.XPos) *types.Sym {
+func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym {
 	pkg := types.LocalPkg
 	outer := "glob."
-	prefix := "func"
+	var prefix string
+	switch why {
+	default:
+		base.FatalfAt(pos, "closureName: bad Op: %v", why)
+	case OCLOSURE:
+		if outerfn == nil || outerfn.OClosure == nil {
+			prefix = "func"
+		}
+	case OGO:
+		prefix = "gowrap"
+	case ODEFER:
+		prefix = "deferwrap"
+	}
 	gen := &globClosgen
 
-	if outerfn != nil {
-		if outerfn.OClosure != nil {
-			prefix = ""
-		}
-
+	// There may be multiple functions named "_". In those
+	// cases, we can't use their individual Closgens as it
+	// would lead to name clashes.
+	if outerfn != nil && !IsBlank(outerfn.Nname) {
 		pkg = outerfn.Sym().Pkg
 		outer = FuncName(outerfn)
 
-		// There may be multiple functions named "_". In those
-		// cases, we can't use their individual Closgens as it
-		// would lead to name clashes.
-		if !IsBlank(outerfn.Nname) {
-			gen = &outerfn.Closgen
+		if why == OCLOSURE {
+			gen = &outerfn.funcLitGen
+		} else {
+			gen = &outerfn.goDeferGen
 		}
 	}
 
@@ -398,80 +463,136 @@
 	return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
 }
 
-// NewClosureFunc creates a new Func to represent a function literal.
-// If hidden is true, then the closure is marked hidden (i.e., as a
-// function literal contained within another function, rather than a
-// package-scope variable initialization expression).
-func NewClosureFunc(pos src.XPos, hidden bool) *Func {
-	fn := NewFunc(pos)
-	fn.SetIsHiddenClosure(hidden)
+// NewClosureFunc creates a new Func to represent a function literal
+// with the given type.
+//
+// fpos the position used for the underlying ODCLFUNC and ONAME,
+// whereas cpos is the position used for the OCLOSURE. They're
+// separate because in the presence of inlining, the OCLOSURE node
+// should have an inline-adjusted position, whereas the ODCLFUNC and
+// ONAME must not.
+//
+// outerfn is the enclosing function, if any. The returned function is
+// appending to pkg.Funcs.
+//
+// why is the reason we're generating this Func. It can be OCLOSURE
+// (for a normal function literal) or OGO or ODEFER (for wrapping a
+// call expression that has parameters or results).
+func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, pkg *Package) *Func {
+	fn := NewFunc(fpos, fpos, closureName(outerfn, cpos, why), typ)
+	fn.SetIsHiddenClosure(outerfn != nil)
 
-	fn.Nname = NewNameAt(pos, BlankNode.Sym())
-	fn.Nname.Func = fn
+	clo := &ClosureExpr{Func: fn}
+	clo.op = OCLOSURE
+	clo.pos = cpos
+	clo.SetType(typ)
+	clo.SetTypecheck(1)
+	fn.OClosure = clo
+
 	fn.Nname.Defn = fn
-
-	fn.OClosure = &ClosureExpr{Func: fn}
-	fn.OClosure.op = OCLOSURE
-	fn.OClosure.pos = pos
+	pkg.Funcs = append(pkg.Funcs, fn)
 
 	return fn
 }
 
-// NameClosure generates a unique for the given function literal,
-// which must have appeared within outerfn.
-func NameClosure(clo *ClosureExpr, outerfn *Func) {
-	fn := clo.Func
-	if fn.IsHiddenClosure() != (outerfn != nil) {
-		base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
+// IsFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
+func IsFuncPCIntrinsic(n *CallExpr) bool {
+	if n.Op() != OCALLFUNC || n.Fun.Op() != ONAME {
+		return false
 	}
-
-	name := fn.Nname
-	if !IsBlank(name) {
-		base.FatalfAt(clo.Pos(), "closure already named: %v", name)
-	}
-
-	name.SetSym(closureName(outerfn, clo.Pos()))
-	MarkFunc(name)
+	fn := n.Fun.(*Name).Sym()
+	return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
+		fn.Pkg.Path == "internal/abi"
 }
 
-// UseClosure checks that the given function literal has been setup
-// correctly, and then returns it as an expression.
-// It must be called after clo.Func.ClosureVars has been set.
-func UseClosure(clo *ClosureExpr, pkg *Package) Node {
-	fn := clo.Func
-	name := fn.Nname
+// IsIfaceOfFunc inspects whether n is an interface conversion from a direct
+// reference of a func. If so, it returns referenced Func; otherwise nil.
+//
+// This is only usable before walk.walkConvertInterface, which converts to an
+// OMAKEFACE.
+func IsIfaceOfFunc(n Node) *Func {
+	if n, ok := n.(*ConvExpr); ok && n.Op() == OCONVIFACE {
+		if name, ok := n.X.(*Name); ok && name.Op() == ONAME && name.Class == PFUNC {
+			return name.Func
+		}
+	}
+	return nil
+}
 
-	if IsBlank(name) {
-		base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
-	}
-	// Caution: clo.Typecheck() is still 0 when UseClosure is called by
-	// tcClosure.
-	if fn.Typecheck() != 1 || name.Typecheck() != 1 {
-		base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
-	}
-	if clo.Type() == nil || name.Type() == nil {
-		base.FatalfAt(fn.Pos(), "missing types: %v", fn)
-	}
-	if !types.Identical(clo.Type(), name.Type()) {
-		base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
+// FuncPC returns a uintptr-typed expression that evaluates to the PC of a
+// function as uintptr, as returned by internal/abi.FuncPC{ABI0,ABIInternal}.
+//
+// n should be a Node of an interface type, as is passed to
+// internal/abi.FuncPC{ABI0,ABIInternal}.
+//
+// TODO(prattmic): Since n is simply an interface{} there is no assertion that
+// it is actually a function at all. Perhaps we should emit a runtime type
+// assertion?
+func FuncPC(pos src.XPos, n Node, wantABI obj.ABI) Node {
+	if !n.Type().IsInterface() {
+		base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an interface value, got %v", wantABI, n.Type())
 	}
 
-	if base.Flag.W > 1 {
-		s := fmt.Sprintf("new closure func: %v", fn)
-		Dump(s, fn)
+	if fn := IsIfaceOfFunc(n); fn != nil {
+		name := fn.Nname
+		abi := fn.ABI
+		if abi != wantABI {
+			base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an %v function, %s is defined as %v", wantABI, wantABI, name.Sym().Name, abi)
+		}
+		var e Node = NewLinksymExpr(pos, name.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])
+		e = NewAddrExpr(pos, e)
+		e.SetType(types.Types[types.TUINTPTR].PtrTo())
+		e = NewConvExpr(pos, OCONVNOP, types.Types[types.TUINTPTR], e)
+		e.SetTypecheck(1)
+		return e
+	}
+	// fn is not a defined function. It must be ABIInternal.
+	// Read the address from func value, i.e. *(*uintptr)(idata(fn)).
+	if wantABI != obj.ABIInternal {
+		base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s does not accept func expression, which is ABIInternal", wantABI)
+	}
+	var e Node = NewUnaryExpr(pos, OIDATA, n)
+	e.SetType(types.Types[types.TUINTPTR].PtrTo())
+	e.SetTypecheck(1)
+	e = NewStarExpr(pos, e)
+	e.SetType(types.Types[types.TUINTPTR])
+	e.SetTypecheck(1)
+	return e
+}
+
+// DeclareParams creates Names for all of the parameters in fn's
+// signature and adds them to fn.Dcl.
+//
+// If setNname is true, then it also sets types.Field.Nname for each
+// parameter.
+func (fn *Func) DeclareParams(setNname bool) {
+	if fn.Dcl != nil {
+		base.FatalfAt(fn.Pos(), "%v already has Dcl", fn)
 	}
 
-	if pkg != nil {
-		pkg.Decls = append(pkg.Decls, fn)
+	declareParams := func(params []*types.Field, ctxt Class, prefix string, offset int) {
+		for i, param := range params {
+			sym := param.Sym
+			if sym == nil || sym.IsBlank() {
+				sym = fn.Sym().Pkg.LookupNum(prefix, i)
+			}
+
+			name := NewNameAt(param.Pos, sym, param.Type)
+			name.Class = ctxt
+			name.Curfn = fn
+			fn.Dcl[offset+i] = name
+
+			if setNname {
+				param.Nname = name
+			}
+		}
 	}
 
-	if false && IsTrivialClosure(clo) {
-		// TODO(mdempsky): Investigate if we can/should optimize this
-		// case. walkClosure already handles it later, but it could be
-		// useful to recognize earlier (e.g., it might allow multiple
-		// inlined calls to a function to share a common trivial closure
-		// func, rather than cloning it for each inlined call).
-	}
+	sig := fn.Type()
+	params := sig.RecvParams()
+	results := sig.Results()
 
-	return clo
+	fn.Dcl = make([]*Name, len(params)+len(results))
+	declareParams(params, PPARAM, "~p", 0)
+	declareParams(results, PPARAMOUT, "~r", len(params))
 }
diff --git a/src/cmd/compile/internal/ir/func_test.go b/src/cmd/compile/internal/ir/func_test.go
new file mode 100644
index 0000000..5b40c02
--- /dev/null
+++ b/src/cmd/compile/internal/ir/func_test.go
@@ -0,0 +1,82 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"testing"
+)
+
+func TestSplitPkg(t *testing.T) {
+	tests := []struct {
+		in  string
+		pkg string
+		sym string
+	}{
+		{
+			in:  "foo.Bar",
+			pkg: "foo",
+			sym: "Bar",
+		},
+		{
+			in:  "foo/bar.Baz",
+			pkg: "foo/bar",
+			sym: "Baz",
+		},
+		{
+			in:  "memeqbody",
+			pkg: "",
+			sym: "memeqbody",
+		},
+		{
+			in:  `example%2ecom.Bar`,
+			pkg: `example%2ecom`,
+			sym: "Bar",
+		},
+		{
+			// Not a real generated symbol name, but easier to catch the general parameter form.
+			in:  `foo.Bar[sync/atomic.Uint64]`,
+			pkg: `foo`,
+			sym: "Bar[sync/atomic.Uint64]",
+		},
+		{
+			in:  `example%2ecom.Bar[sync/atomic.Uint64]`,
+			pkg: `example%2ecom`,
+			sym: "Bar[sync/atomic.Uint64]",
+		},
+		{
+			in:  `gopkg.in/yaml%2ev3.Bar[sync/atomic.Uint64]`,
+			pkg: `gopkg.in/yaml%2ev3`,
+			sym: "Bar[sync/atomic.Uint64]",
+		},
+		{
+			// This one is a real symbol name.
+			in:  `foo.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+			pkg: `foo`,
+			sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+		},
+		{
+			in:  `example%2ecom.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+			pkg: `example%2ecom`,
+			sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+		},
+		{
+			in:  `gopkg.in/yaml%2ev3.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+			pkg: `gopkg.in/yaml%2ev3`,
+			sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+		},
+	}
+
+	for _, tc := range tests {
+		t.Run(tc.in, func(t *testing.T) {
+			pkg, sym := splitPkg(tc.in)
+			if pkg != tc.pkg {
+				t.Errorf("splitPkg(%q) got pkg %q want %q", tc.in, pkg, tc.pkg)
+			}
+			if sym != tc.sym {
+				t.Errorf("splitPkg(%q) got sym %q want %q", tc.in, sym, tc.sym)
+			}
+		})
+	}
+}
diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go
index 716e843..ca78a03 100644
--- a/src/cmd/compile/internal/ir/mknode.go
+++ b/src/cmd/compile/internal/ir/mknode.go
@@ -335,9 +335,9 @@
 }
 
 func generateHelpers() {
-	for _, typ := range []string{"CaseClause", "CommClause", "Name", "Node", "Ntype"} {
+	for _, typ := range []string{"CaseClause", "CommClause", "Name", "Node"} {
 		ptr := "*"
-		if typ == "Node" || typ == "Ntype" {
+		if typ == "Node" {
 			ptr = "" // interfaces don't need *
 		}
 		fmt.Fprintf(&buf, "\n")
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index c6eff27..2844c0b 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -47,7 +47,7 @@
 	Embed     *[]Embed    // list of embedded files, for ONAME var
 
 	// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
-	// For a closure var, the ONAME node of the outer captured variable.
+	// For a closure var, the ONAME node of the original (outermost) captured variable.
 	// For the case-local variables of a type switch, the type switch guard (OTYPESW).
 	// For a range variable, the range statement (ORANGE)
 	// For a recv variable in a case of a select statement, the receive assignment (OSELRECV2)
@@ -59,77 +59,9 @@
 
 	Heapaddr *Name // temp holding heap address of param
 
-	// ONAME closure linkage
-	// Consider:
-	//
-	//	func f() {
-	//		x := 1 // x1
-	//		func() {
-	//			use(x) // x2
-	//			func() {
-	//				use(x) // x3
-	//				--- parser is here ---
-	//			}()
-	//		}()
-	//	}
-	//
-	// There is an original declaration of x and then a chain of mentions of x
-	// leading into the current function. Each time x is mentioned in a new closure,
-	// we create a variable representing x for use in that specific closure,
-	// since the way you get to x is different in each closure.
-	//
-	// Let's number the specific variables as shown in the code:
-	// x1 is the original x, x2 is when mentioned in the closure,
-	// and x3 is when mentioned in the closure in the closure.
-	//
-	// We keep these linked (assume N > 1):
-	//
-	//   - x1.Defn = original declaration statement for x (like most variables)
-	//   - x1.Innermost = current innermost closure x (in this case x3), or nil for none
-	//   - x1.IsClosureVar() = false
-	//
-	//   - xN.Defn = x1, N > 1
-	//   - xN.IsClosureVar() = true, N > 1
-	//   - x2.Outer = nil
-	//   - xN.Outer = x(N-1), N > 2
-	//
-	//
-	// When we look up x in the symbol table, we always get x1.
-	// Then we can use x1.Innermost (if not nil) to get the x
-	// for the innermost known closure function,
-	// but the first reference in a closure will find either no x1.Innermost
-	// or an x1.Innermost with .Funcdepth < Funcdepth.
-	// In that case, a new xN must be created, linked in with:
-	//
-	//     xN.Defn = x1
-	//     xN.Outer = x1.Innermost
-	//     x1.Innermost = xN
-	//
-	// When we finish the function, we'll process its closure variables
-	// and find xN and pop it off the list using:
-	//
-	//     x1 := xN.Defn
-	//     x1.Innermost = xN.Outer
-	//
-	// We leave x1.Innermost set so that we can still get to the original
-	// variable quickly. Not shown here, but once we're
-	// done parsing a function and no longer need xN.Outer for the
-	// lexical x reference links as described above, funcLit
-	// recomputes xN.Outer as the semantic x reference link tree,
-	// even filling in x in intermediate closures that might not
-	// have mentioned it along the way to inner closures that did.
-	// See funcLit for details.
-	//
-	// During the eventual compilation, then, for closure variables we have:
-	//
-	//     xN.Defn = original variable
-	//     xN.Outer = variable captured in next outward scope
-	//                to make closure where xN appears
-	//
-	// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
-	// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
-	Innermost *Name
-	Outer     *Name
+	// Outer points to the immediately enclosing function's copy of this
+	// closure variable. If not a closure variable, then Outer is nil.
+	Outer *Name
 }
 
 func (n *Name) isExpr() {}
@@ -147,11 +79,39 @@
 
 // NewNameAt returns a new ONAME Node associated with symbol s at position pos.
 // The caller is responsible for setting Curfn.
-func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
+func NewNameAt(pos src.XPos, sym *types.Sym, typ *types.Type) *Name {
 	if sym == nil {
 		base.Fatalf("NewNameAt nil")
 	}
-	return newNameAt(pos, ONAME, sym)
+	n := newNameAt(pos, ONAME, sym)
+	if typ != nil {
+		n.SetType(typ)
+		n.SetTypecheck(1)
+	}
+	return n
+}
+
+// NewBuiltin returns a new Name representing a builtin function,
+// either predeclared or from package unsafe.
+func NewBuiltin(sym *types.Sym, op Op) *Name {
+	n := newNameAt(src.NoXPos, ONAME, sym)
+	n.BuiltinOp = op
+	n.SetTypecheck(1)
+	sym.Def = n
+	return n
+}
+
+// NewLocal returns a new function-local variable with the given name and type.
+func (fn *Func) NewLocal(pos src.XPos, sym *types.Sym, typ *types.Type) *Name {
+	if fn.Dcl == nil {
+		base.FatalfAt(pos, "must call DeclParams on %v first", fn)
+	}
+
+	n := NewNameAt(pos, sym, typ)
+	n.Class = PAUTO
+	n.Curfn = fn
+	fn.Dcl = append(fn.Dcl, n)
+	return n
 }
 
 // NewDeclNameAt returns a new Name associated with symbol s at position pos.
@@ -176,6 +136,7 @@
 	}
 	n := newNameAt(pos, OLITERAL, sym)
 	n.SetType(typ)
+	n.SetTypecheck(1)
 	n.SetVal(val)
 	return n
 }
@@ -189,18 +150,12 @@
 	return n
 }
 
-func (n *Name) Name() *Name         { return n }
-func (n *Name) Sym() *types.Sym     { return n.sym }
-func (n *Name) SetSym(x *types.Sym) { n.sym = x }
-func (n *Name) SubOp() Op           { return n.BuiltinOp }
-func (n *Name) SetSubOp(x Op)       { n.BuiltinOp = x }
-func (n *Name) SetFunc(x *Func)     { n.Func = x }
-func (n *Name) Offset() int64       { panic("Name.Offset") }
-func (n *Name) SetOffset(x int64) {
-	if x != 0 {
-		panic("Name.SetOffset")
-	}
-}
+func (n *Name) Name() *Name            { return n }
+func (n *Name) Sym() *types.Sym        { return n.sym }
+func (n *Name) SetSym(x *types.Sym)    { n.sym = x }
+func (n *Name) SubOp() Op              { return n.BuiltinOp }
+func (n *Name) SetSubOp(x Op)          { n.BuiltinOp = x }
+func (n *Name) SetFunc(x *Func)        { n.Func = x }
 func (n *Name) FrameOffset() int64     { return n.Offset_ }
 func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
 
@@ -351,16 +306,13 @@
 		base.Fatalf("NewClosureVar: %+v", n)
 	}
 
-	c := NewNameAt(pos, n.Sym())
+	c := NewNameAt(pos, n.Sym(), n.Type())
 	c.Curfn = fn
 	c.Class = PAUTOHEAP
 	c.SetIsClosureVar(true)
 	c.Defn = n.Canonical()
 	c.Outer = n
 
-	c.SetType(n.Type())
-	c.SetTypecheck(n.Typecheck())
-
 	fn.ClosureVars = append(fn.ClosureVars, c)
 
 	return c
@@ -377,88 +329,13 @@
 
 	// Create a fake parameter, disassociated from any real function, to
 	// pretend to capture.
-	fake := NewNameAt(pos, sym)
+	fake := NewNameAt(pos, sym, typ)
 	fake.Class = PPARAM
-	fake.SetType(typ)
 	fake.SetByval(true)
 
 	return NewClosureVar(pos, fn, fake)
 }
 
-// CaptureName returns a Name suitable for referring to n from within function
-// fn or from the package block if fn is nil. If n is a free variable declared
-// within a function that encloses fn, then CaptureName returns the closure
-// variable that refers to n within fn, creating it if necessary.
-// Otherwise, it simply returns n.
-func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
-	if n.Op() != ONAME || n.Curfn == nil {
-		return n // okay to use directly
-	}
-	if n.IsClosureVar() {
-		base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
-	}
-
-	c := n.Innermost
-	if c == nil {
-		c = n
-	}
-	if c.Curfn == fn {
-		return c
-	}
-
-	if fn == nil {
-		base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
-	}
-
-	// Do not have a closure var for the active closure yet; make one.
-	c = NewClosureVar(pos, fn, c)
-
-	// Link into list of active closure variables.
-	// Popped from list in FinishCaptureNames.
-	n.Innermost = c
-
-	return c
-}
-
-// FinishCaptureNames handles any work leftover from calling CaptureName
-// earlier. outerfn should be the function that immediately encloses fn.
-func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) {
-	// closure-specific variables are hanging off the
-	// ordinary ones; see CaptureName above.
-	// unhook them.
-	// make the list of pointers for the closure call.
-	for _, cv := range fn.ClosureVars {
-		// Unlink from n; see comment above on type Name for these fields.
-		n := cv.Defn.(*Name)
-		n.Innermost = cv.Outer
-
-		// If the closure usage of n is not dense, we need to make it
-		// dense by recapturing n within the enclosing function.
-		//
-		// That is, suppose we just finished parsing the innermost
-		// closure f4 in this code:
-		//
-		//	func f() {
-		//		n := 1
-		//		func() { // f2
-		//			use(n)
-		//			func() { // f3
-		//				func() { // f4
-		//					use(n)
-		//				}()
-		//			}()
-		//		}()
-		//	}
-		//
-		// At this point cv.Outer is f2's n; there is no n for f3. To
-		// construct the closure f4 from within f3, we need to use f3's
-		// n and in this case we need to create f3's n with CaptureName.
-		//
-		// We'll decide later in walk whether to use v directly or &v.
-		cv.Outer = CaptureName(pos, outerfn, n)
-	}
-}
-
 // SameSource reports whether two nodes refer to the same source
 // element.
 //
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index 769340e..6513386 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -169,15 +169,12 @@
 	OPTRLIT    // &X (X is composite literal)
 	OCONV      // Type(X) (type conversion)
 	OCONVIFACE // Type(X) (type conversion, to interface)
-	OCONVIDATA // Builds a data word to store X in an interface. Equivalent to IDATA(CONVIFACE(X)). Is an ir.ConvExpr.
 	OCONVNOP   // Type(X) (type conversion, no effect)
 	OCOPY      // copy(X, Y)
 	ODCL       // var X (declares X of type X.Type)
 
 	// Used during parsing but don't last.
-	ODCLFUNC  // func f() or func (r) f()
-	ODCLCONST // const pi = 3.14
-	ODCLTYPE  // type Int int or type Int = int
+	ODCLFUNC // func f() or func (r) f()
 
 	ODELETE        // delete(Args)
 	ODOT           // X.Sel (X is of struct type)
@@ -226,7 +223,7 @@
 	OOROR             // X || Y
 	OPANIC            // panic(X)
 	OPRINT            // print(List)
-	OPRINTN           // println(List)
+	OPRINTLN          // println(List)
 	OPAREN            // (X)
 	OSEND             // Chan <- Value
 	OSLICE            // X[Low : High] (X is untypechecked or slice)
@@ -246,9 +243,6 @@
 	OREAL             // real(X)
 	OIMAG             // imag(X)
 	OCOMPLEX          // complex(X, Y)
-	OALIGNOF          // unsafe.Alignof(X)
-	OOFFSETOF         // unsafe.Offsetof(X)
-	OSIZEOF           // unsafe.Sizeof(X)
 	OUNSAFEADD        // unsafe.Add(X, Y)
 	OUNSAFESLICE      // unsafe.Slice(X, Y)
 	OUNSAFESLICEDATA  // unsafe.SliceData(X)
@@ -282,24 +276,24 @@
 	// OTYPESW:  X := Y.(type) (appears as .Tag of OSWITCH)
 	//   X is nil if there is no type-switch variable
 	OTYPESW
-	OFUNCINST // instantiation of a generic function
 
 	// misc
 	// intermediate representation of an inlined call.  Uses Init (assignments
 	// for the captured variables, parameters, retvars, & INLMARK op),
 	// Body (body of the inlined function), and ReturnVars (list of
 	// return values)
-	OINLCALL       // intermediary representation of an inlined call.
-	OEFACE         // itable and data words of an empty-interface value.
-	OITAB          // itable word of an interface value.
-	OIDATA         // data word of an interface value in X
-	OSPTR          // base pointer of a slice or string. Bounded==1 means known non-nil.
-	OCFUNC         // reference to c function pointer (not go func value)
-	OCHECKNIL      // emit code to ensure pointer/interface not nil
-	ORESULT        // result of a function call; Xoffset is stack offset
-	OINLMARK       // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
-	OLINKSYMOFFSET // offset within a name
-	OJUMPTABLE     // A jump table structure for implementing dense expression switches
+	OINLCALL         // intermediary representation of an inlined call.
+	OMAKEFACE        // construct an interface value from rtype/itab and data pointers
+	OITAB            // rtype/itab pointer of an interface value
+	OIDATA           // data pointer of an interface value
+	OSPTR            // base pointer of a slice or string. Bounded==1 means known non-nil.
+	OCFUNC           // reference to c function pointer (not go func value)
+	OCHECKNIL        // emit code to ensure pointer/interface not nil
+	ORESULT          // result of a function call; Xoffset is stack offset
+	OINLMARK         // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+	OLINKSYMOFFSET   // offset within a name
+	OJUMPTABLE       // A jump table structure for implementing dense expression switches
+	OINTERFACESWITCH // A type switch with interface cases
 
 	// opcodes for generics
 	ODYNAMICDOTTYPE  // x = i.(T) where T is a type parameter (or derived from a type parameter)
@@ -325,11 +319,18 @@
 	return false
 }
 
-// Nodes is a pointer to a slice of *Node.
-// For fields that are not used in most nodes, this is used instead of
-// a slice to save space.
+// Nodes is a slice of Node.
 type Nodes []Node
 
+// ToNodes returns s as a slice of Nodes.
+func ToNodes[T Node](s []T) Nodes {
+	res := make(Nodes, len(s))
+	for i, n := range s {
+		res[i] = n
+	}
+	return res
+}
+
 // Append appends entries to Nodes.
 func (n *Nodes) Append(a ...Node) {
 	if len(a) == 0 {
@@ -465,14 +466,7 @@
 
 )
 
-func AsNode(n types.Object) Node {
-	if n == nil {
-		return nil
-	}
-	return n.(Node)
-}
-
-var BlankNode Node
+var BlankNode *Name
 
 func IsConst(n Node, ct constant.Kind) bool {
 	return ConstType(n) == ct
@@ -480,9 +474,7 @@
 
 // IsNil reports whether n represents the universal untyped zero value "nil".
 func IsNil(n Node) bool {
-	// Check n.Orig because constant propagation may produce typed nil constants,
-	// which don't exist in the Go spec.
-	return n != nil && Orig(n).Op() == ONIL
+	return n != nil && n.Op() == ONIL
 }
 
 func IsBlank(n Node) bool {
@@ -498,11 +490,6 @@
 	return n.Type().Recv() != nil
 }
 
-func HasNamedResults(fn *Func) bool {
-	typ := fn.Type()
-	return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
-}
-
 // HasUniquePos reports whether n has a unique position that can be
 // used for reporting error messages.
 //
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index 2dda76b..fc28067 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -295,7 +295,7 @@
 	if doNodes(n.init, do) {
 		return true
 	}
-	if n.X != nil && do(n.X) {
+	if n.Fun != nil && do(n.Fun) {
 		return true
 	}
 	if doNodes(n.Args, do) {
@@ -308,16 +308,16 @@
 }
 func (n *CallExpr) editChildren(edit func(Node) Node) {
 	editNodes(n.init, edit)
-	if n.X != nil {
-		n.X = edit(n.X).(Node)
+	if n.Fun != nil {
+		n.Fun = edit(n.Fun).(Node)
 	}
 	editNodes(n.Args, edit)
 	editNames(n.KeepAlive, edit)
 }
 func (n *CallExpr) editChildrenWithHidden(edit func(Node) Node) {
 	editNodes(n.init, edit)
-	if n.X != nil {
-		n.X = edit(n.X).(Node)
+	if n.Fun != nil {
+		n.Fun = edit(n.Fun).(Node)
 	}
 	editNodes(n.Args, edit)
 	if n.RType != nil {
@@ -471,25 +471,6 @@
 	}
 }
 
-func (n *ConstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *ConstExpr) copy() Node {
-	c := *n
-	c.init = copyNodes(c.init)
-	return &c
-}
-func (n *ConstExpr) doChildren(do func(Node) bool) bool {
-	if doNodes(n.init, do) {
-		return true
-	}
-	return false
-}
-func (n *ConstExpr) editChildren(edit func(Node) Node) {
-	editNodes(n.init, edit)
-}
-func (n *ConstExpr) editChildrenWithHidden(edit func(Node) Node) {
-	editNodes(n.init, edit)
-}
-
 func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
 func (n *ConvExpr) copy() Node {
 	c := *n
@@ -866,38 +847,50 @@
 	editNodes(n.ReturnVars, edit)
 }
 
-func (n *InstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *InstExpr) copy() Node {
+func (n *InterfaceSwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InterfaceSwitchStmt) copy() Node {
 	c := *n
 	c.init = copyNodes(c.init)
-	c.Targs = copyNtypes(c.Targs)
 	return &c
 }
-func (n *InstExpr) doChildren(do func(Node) bool) bool {
+func (n *InterfaceSwitchStmt) doChildren(do func(Node) bool) bool {
 	if doNodes(n.init, do) {
 		return true
 	}
-	if n.X != nil && do(n.X) {
+	if n.Case != nil && do(n.Case) {
 		return true
 	}
-	if doNtypes(n.Targs, do) {
+	if n.Itab != nil && do(n.Itab) {
+		return true
+	}
+	if n.RuntimeType != nil && do(n.RuntimeType) {
 		return true
 	}
 	return false
 }
-func (n *InstExpr) editChildren(edit func(Node) Node) {
+func (n *InterfaceSwitchStmt) editChildren(edit func(Node) Node) {
 	editNodes(n.init, edit)
-	if n.X != nil {
-		n.X = edit(n.X).(Node)
+	if n.Case != nil {
+		n.Case = edit(n.Case).(Node)
 	}
-	editNtypes(n.Targs, edit)
+	if n.Itab != nil {
+		n.Itab = edit(n.Itab).(Node)
+	}
+	if n.RuntimeType != nil {
+		n.RuntimeType = edit(n.RuntimeType).(Node)
+	}
 }
-func (n *InstExpr) editChildrenWithHidden(edit func(Node) Node) {
+func (n *InterfaceSwitchStmt) editChildrenWithHidden(edit func(Node) Node) {
 	editNodes(n.init, edit)
-	if n.X != nil {
-		n.X = edit(n.X).(Node)
+	if n.Case != nil {
+		n.Case = edit(n.Case).(Node)
 	}
-	editNtypes(n.Targs, edit)
+	if n.Itab != nil {
+		n.Itab = edit(n.Itab).(Node)
+	}
+	if n.RuntimeType != nil {
+		n.RuntimeType = edit(n.RuntimeType).(Node)
+	}
 }
 
 func (n *JumpTableStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
@@ -1205,25 +1198,6 @@
 	}
 }
 
-func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *RawOrigExpr) copy() Node {
-	c := *n
-	c.init = copyNodes(c.init)
-	return &c
-}
-func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
-	if doNodes(n.init, do) {
-		return true
-	}
-	return false
-}
-func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
-	editNodes(n.init, edit)
-}
-func (n *RawOrigExpr) editChildrenWithHidden(edit func(Node) Node) {
-	editNodes(n.init, edit)
-}
-
 func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
 func (n *ResultExpr) copy() Node {
 	c := *n
@@ -1833,27 +1807,3 @@
 		}
 	}
 }
-
-func copyNtypes(list []Ntype) []Ntype {
-	if list == nil {
-		return nil
-	}
-	c := make([]Ntype, len(list))
-	copy(c, list)
-	return c
-}
-func doNtypes(list []Ntype, do func(Node) bool) bool {
-	for _, x := range list {
-		if x != nil && do(x) {
-			return true
-		}
-	}
-	return false
-}
-func editNtypes(list []Ntype, edit func(Node) Node) {
-	for i, x := range list {
-		if x != nil {
-			list[i] = edit(x).(Ntype)
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index 571ac6c..fb97ac6 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -53,124 +53,118 @@
 	_ = x[OPTRLIT-42]
 	_ = x[OCONV-43]
 	_ = x[OCONVIFACE-44]
-	_ = x[OCONVIDATA-45]
-	_ = x[OCONVNOP-46]
-	_ = x[OCOPY-47]
-	_ = x[ODCL-48]
-	_ = x[ODCLFUNC-49]
-	_ = x[ODCLCONST-50]
-	_ = x[ODCLTYPE-51]
-	_ = x[ODELETE-52]
-	_ = x[ODOT-53]
-	_ = x[ODOTPTR-54]
-	_ = x[ODOTMETH-55]
-	_ = x[ODOTINTER-56]
-	_ = x[OXDOT-57]
-	_ = x[ODOTTYPE-58]
-	_ = x[ODOTTYPE2-59]
-	_ = x[OEQ-60]
-	_ = x[ONE-61]
-	_ = x[OLT-62]
-	_ = x[OLE-63]
-	_ = x[OGE-64]
-	_ = x[OGT-65]
-	_ = x[ODEREF-66]
-	_ = x[OINDEX-67]
-	_ = x[OINDEXMAP-68]
-	_ = x[OKEY-69]
-	_ = x[OSTRUCTKEY-70]
-	_ = x[OLEN-71]
-	_ = x[OMAKE-72]
-	_ = x[OMAKECHAN-73]
-	_ = x[OMAKEMAP-74]
-	_ = x[OMAKESLICE-75]
-	_ = x[OMAKESLICECOPY-76]
-	_ = x[OMUL-77]
-	_ = x[ODIV-78]
-	_ = x[OMOD-79]
-	_ = x[OLSH-80]
-	_ = x[ORSH-81]
-	_ = x[OAND-82]
-	_ = x[OANDNOT-83]
-	_ = x[ONEW-84]
-	_ = x[ONOT-85]
-	_ = x[OBITNOT-86]
-	_ = x[OPLUS-87]
-	_ = x[ONEG-88]
-	_ = x[OOROR-89]
-	_ = x[OPANIC-90]
-	_ = x[OPRINT-91]
-	_ = x[OPRINTN-92]
-	_ = x[OPAREN-93]
-	_ = x[OSEND-94]
-	_ = x[OSLICE-95]
-	_ = x[OSLICEARR-96]
-	_ = x[OSLICESTR-97]
-	_ = x[OSLICE3-98]
-	_ = x[OSLICE3ARR-99]
-	_ = x[OSLICEHEADER-100]
-	_ = x[OSTRINGHEADER-101]
-	_ = x[ORECOVER-102]
-	_ = x[ORECOVERFP-103]
-	_ = x[ORECV-104]
-	_ = x[ORUNESTR-105]
-	_ = x[OSELRECV2-106]
-	_ = x[OMIN-107]
-	_ = x[OMAX-108]
-	_ = x[OREAL-109]
-	_ = x[OIMAG-110]
-	_ = x[OCOMPLEX-111]
-	_ = x[OALIGNOF-112]
-	_ = x[OOFFSETOF-113]
-	_ = x[OSIZEOF-114]
-	_ = x[OUNSAFEADD-115]
-	_ = x[OUNSAFESLICE-116]
-	_ = x[OUNSAFESLICEDATA-117]
-	_ = x[OUNSAFESTRING-118]
-	_ = x[OUNSAFESTRINGDATA-119]
-	_ = x[OMETHEXPR-120]
-	_ = x[OMETHVALUE-121]
-	_ = x[OBLOCK-122]
-	_ = x[OBREAK-123]
-	_ = x[OCASE-124]
-	_ = x[OCONTINUE-125]
-	_ = x[ODEFER-126]
-	_ = x[OFALL-127]
-	_ = x[OFOR-128]
-	_ = x[OGOTO-129]
-	_ = x[OIF-130]
-	_ = x[OLABEL-131]
-	_ = x[OGO-132]
-	_ = x[ORANGE-133]
-	_ = x[ORETURN-134]
-	_ = x[OSELECT-135]
-	_ = x[OSWITCH-136]
-	_ = x[OTYPESW-137]
-	_ = x[OFUNCINST-138]
-	_ = x[OINLCALL-139]
-	_ = x[OEFACE-140]
-	_ = x[OITAB-141]
-	_ = x[OIDATA-142]
-	_ = x[OSPTR-143]
-	_ = x[OCFUNC-144]
-	_ = x[OCHECKNIL-145]
-	_ = x[ORESULT-146]
-	_ = x[OINLMARK-147]
-	_ = x[OLINKSYMOFFSET-148]
-	_ = x[OJUMPTABLE-149]
-	_ = x[ODYNAMICDOTTYPE-150]
-	_ = x[ODYNAMICDOTTYPE2-151]
-	_ = x[ODYNAMICTYPE-152]
-	_ = x[OTAILCALL-153]
-	_ = x[OGETG-154]
-	_ = x[OGETCALLERPC-155]
-	_ = x[OGETCALLERSP-156]
-	_ = x[OEND-157]
+	_ = x[OCONVNOP-45]
+	_ = x[OCOPY-46]
+	_ = x[ODCL-47]
+	_ = x[ODCLFUNC-48]
+	_ = x[ODELETE-49]
+	_ = x[ODOT-50]
+	_ = x[ODOTPTR-51]
+	_ = x[ODOTMETH-52]
+	_ = x[ODOTINTER-53]
+	_ = x[OXDOT-54]
+	_ = x[ODOTTYPE-55]
+	_ = x[ODOTTYPE2-56]
+	_ = x[OEQ-57]
+	_ = x[ONE-58]
+	_ = x[OLT-59]
+	_ = x[OLE-60]
+	_ = x[OGE-61]
+	_ = x[OGT-62]
+	_ = x[ODEREF-63]
+	_ = x[OINDEX-64]
+	_ = x[OINDEXMAP-65]
+	_ = x[OKEY-66]
+	_ = x[OSTRUCTKEY-67]
+	_ = x[OLEN-68]
+	_ = x[OMAKE-69]
+	_ = x[OMAKECHAN-70]
+	_ = x[OMAKEMAP-71]
+	_ = x[OMAKESLICE-72]
+	_ = x[OMAKESLICECOPY-73]
+	_ = x[OMUL-74]
+	_ = x[ODIV-75]
+	_ = x[OMOD-76]
+	_ = x[OLSH-77]
+	_ = x[ORSH-78]
+	_ = x[OAND-79]
+	_ = x[OANDNOT-80]
+	_ = x[ONEW-81]
+	_ = x[ONOT-82]
+	_ = x[OBITNOT-83]
+	_ = x[OPLUS-84]
+	_ = x[ONEG-85]
+	_ = x[OOROR-86]
+	_ = x[OPANIC-87]
+	_ = x[OPRINT-88]
+	_ = x[OPRINTLN-89]
+	_ = x[OPAREN-90]
+	_ = x[OSEND-91]
+	_ = x[OSLICE-92]
+	_ = x[OSLICEARR-93]
+	_ = x[OSLICESTR-94]
+	_ = x[OSLICE3-95]
+	_ = x[OSLICE3ARR-96]
+	_ = x[OSLICEHEADER-97]
+	_ = x[OSTRINGHEADER-98]
+	_ = x[ORECOVER-99]
+	_ = x[ORECOVERFP-100]
+	_ = x[ORECV-101]
+	_ = x[ORUNESTR-102]
+	_ = x[OSELRECV2-103]
+	_ = x[OMIN-104]
+	_ = x[OMAX-105]
+	_ = x[OREAL-106]
+	_ = x[OIMAG-107]
+	_ = x[OCOMPLEX-108]
+	_ = x[OUNSAFEADD-109]
+	_ = x[OUNSAFESLICE-110]
+	_ = x[OUNSAFESLICEDATA-111]
+	_ = x[OUNSAFESTRING-112]
+	_ = x[OUNSAFESTRINGDATA-113]
+	_ = x[OMETHEXPR-114]
+	_ = x[OMETHVALUE-115]
+	_ = x[OBLOCK-116]
+	_ = x[OBREAK-117]
+	_ = x[OCASE-118]
+	_ = x[OCONTINUE-119]
+	_ = x[ODEFER-120]
+	_ = x[OFALL-121]
+	_ = x[OFOR-122]
+	_ = x[OGOTO-123]
+	_ = x[OIF-124]
+	_ = x[OLABEL-125]
+	_ = x[OGO-126]
+	_ = x[ORANGE-127]
+	_ = x[ORETURN-128]
+	_ = x[OSELECT-129]
+	_ = x[OSWITCH-130]
+	_ = x[OTYPESW-131]
+	_ = x[OINLCALL-132]
+	_ = x[OMAKEFACE-133]
+	_ = x[OITAB-134]
+	_ = x[OIDATA-135]
+	_ = x[OSPTR-136]
+	_ = x[OCFUNC-137]
+	_ = x[OCHECKNIL-138]
+	_ = x[ORESULT-139]
+	_ = x[OINLMARK-140]
+	_ = x[OLINKSYMOFFSET-141]
+	_ = x[OJUMPTABLE-142]
+	_ = x[OINTERFACESWITCH-143]
+	_ = x[ODYNAMICDOTTYPE-144]
+	_ = x[ODYNAMICDOTTYPE2-145]
+	_ = x[ODYNAMICTYPE-146]
+	_ = x[OTAILCALL-147]
+	_ = x[OGETG-148]
+	_ = x[OGETCALLERPC-149]
+	_ = x[OGETCALLERSP-150]
+	_ = x[OEND-151]
 }
 
-const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTINLCALLEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
+const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
 
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 296, 303, 307, 310, 317, 325, 332, 338, 341, 347, 354, 362, 366, 373, 381, 383, 385, 387, 389, 391, 393, 398, 403, 411, 414, 423, 426, 430, 438, 445, 454, 467, 470, 473, 476, 479, 482, 485, 491, 494, 497, 503, 507, 510, 514, 519, 524, 530, 535, 539, 544, 552, 560, 566, 575, 586, 598, 605, 614, 618, 625, 633, 636, 639, 643, 647, 654, 661, 669, 675, 684, 695, 710, 722, 738, 746, 755, 760, 765, 769, 777, 782, 786, 789, 793, 795, 800, 802, 807, 813, 819, 825, 831, 839, 846, 851, 855, 860, 864, 869, 877, 883, 890, 903, 912, 926, 941, 952, 960, 964, 975, 986, 989}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 506, 511, 515, 520, 528, 536, 542, 551, 562, 574, 581, 590, 594, 601, 609, 612, 615, 619, 623, 630, 639, 650, 665, 677, 693, 701, 710, 715, 720, 724, 732, 737, 741, 744, 748, 750, 755, 757, 762, 768, 774, 780, 786, 793, 801, 805, 810, 814, 819, 827, 833, 840, 853, 862, 877, 891, 906, 917, 925, 929, 940, 951, 954}
 
 func (i Op) String() string {
 	if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go
index 3896e2b..3b70a92 100644
--- a/src/cmd/compile/internal/ir/package.go
+++ b/src/cmd/compile/internal/ir/package.go
@@ -15,14 +15,18 @@
 	// Init functions, listed in source order.
 	Inits []*Func
 
-	// Top-level declarations.
-	Decls []Node
+	// Funcs contains all (instantiated) functions, methods, and
+	// function literals to be compiled.
+	Funcs []*Func
 
-	// Extern (package global) declarations.
-	Externs []Node
+	// Externs holds constants, (non-generic) types, and variables
+	// declared at package scope.
+	Externs []*Name
 
-	// Assembly function declarations.
-	Asms []*Name
+	// AsmHdrDecls holds declared constants and struct types that should
+	// be included in -asmhdr output. It's only populated when -asmhdr
+	// is set.
+	AsmHdrDecls []*Name
 
 	// Cgo directives.
 	CgoPragmas [][]string
@@ -30,6 +34,9 @@
 	// Variables with //go:embed lines.
 	Embeds []*Name
 
-	// Exported (or re-exported) symbols.
-	Exports []*Name
+	// PluginExports holds exported functions and variables that are
+	// accessible through the package plugin API. It's only populated
+	// for -buildmode=plugin (i.e., compiling package main and -dynlink
+	// is set).
+	PluginExports []*Name
 }
diff --git a/src/cmd/compile/internal/ir/reassign_consistency_check.go b/src/cmd/compile/internal/ir/reassign_consistency_check.go
new file mode 100644
index 0000000..e4d928d
--- /dev/null
+++ b/src/cmd/compile/internal/ir/reassign_consistency_check.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/internal/src"
+	"fmt"
+	"path/filepath"
+	"strings"
+)
+
+// checkStaticValueResult compares the result from ReassignOracle.StaticValue
+// with the corresponding result from ir.StaticValue to make sure they agree.
+// This method is called only when turned on via build tag.
+func checkStaticValueResult(n Node, newres Node) {
+	oldres := StaticValue(n)
+	if oldres != newres {
+		base.Fatalf("%s: new/old static value disagreement on %v:\nnew=%v\nold=%v", fmtFullPos(n.Pos()), n, newres, oldres)
+	}
+}
+
+// checkStaticValueResult compares the result from ReassignOracle.Reassigned
+// with the corresponding result from ir.Reassigned to make sure they agree.
+// This method is called only when turned on via build tag.
+func checkReassignedResult(n *Name, newres bool) {
+	origres := Reassigned(n)
+	if newres != origres {
+		base.Fatalf("%s: new/old reassigned disagreement on %v (class %s) newres=%v oldres=%v", fmtFullPos(n.Pos()), n, n.Class.String(), newres, origres)
+	}
+}
+
+// fmtFullPos returns a verbose dump for pos p, including inlines.
+func fmtFullPos(p src.XPos) string {
+	var sb strings.Builder
+	sep := ""
+	base.Ctxt.AllPos(p, func(pos src.Pos) {
+		fmt.Fprintf(&sb, sep)
+		sep = "|"
+		file := filepath.Base(pos.Filename())
+		fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col())
+	})
+	return sb.String()
+}
diff --git a/src/cmd/compile/internal/ir/reassignment.go b/src/cmd/compile/internal/ir/reassignment.go
new file mode 100644
index 0000000..9974292
--- /dev/null
+++ b/src/cmd/compile/internal/ir/reassignment.go
@@ -0,0 +1,205 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+)
+
+// A ReassignOracle efficiently answers queries about whether local
+// variables are reassigned. This helper works by looking for function
+// params and short variable declarations (e.g.
+// https://go.dev/ref/spec#Short_variable_declarations) that are
+// neither address taken nor subsequently re-assigned. It is intended
+// to operate much like "ir.StaticValue" and "ir.Reassigned", but in a
+// way that does just a single walk of the containing function (as
+// opposed to a new walk on every call).
+type ReassignOracle struct {
+	fn *Func
+	// maps candidate name to its defining assignment (or for
+	// for params, defining func).
+	singleDef map[*Name]Node
+}
+
+// Init initializes the oracle based on the IR in function fn, laying
+// the groundwork for future calls to the StaticValue and Reassigned
+// methods. If the fn's IR is subsequently modified, Init must be
+// called again.
+func (ro *ReassignOracle) Init(fn *Func) {
+	ro.fn = fn
+
+	// Collect candidate map. Start by adding function parameters
+	// explicitly.
+	ro.singleDef = make(map[*Name]Node)
+	sig := fn.Type()
+	numParams := sig.NumRecvs() + sig.NumParams()
+	for _, param := range fn.Dcl[:numParams] {
+		if IsBlank(param) {
+			continue
+		}
+		// For params, use func itself as defining node.
+		ro.singleDef[param] = fn
+	}
+
+	// Walk the function body to discover any locals assigned
+	// via ":=" syntax (e.g. "a := <expr>").
+	var findLocals func(n Node) bool
+	findLocals = func(n Node) bool {
+		if nn, ok := n.(*Name); ok {
+			if nn.Defn != nil && !nn.Addrtaken() && nn.Class == PAUTO {
+				ro.singleDef[nn] = nn.Defn
+			}
+		} else if nn, ok := n.(*ClosureExpr); ok {
+			Any(nn.Func, findLocals)
+		}
+		return false
+	}
+	Any(fn, findLocals)
+
+	outerName := func(x Node) *Name {
+		if x == nil {
+			return nil
+		}
+		n, ok := OuterValue(x).(*Name)
+		if ok {
+			return n.Canonical()
+		}
+		return nil
+	}
+
+	// pruneIfNeeded examines node nn appearing on the left hand side
+	// of assignment statement asn to see if it contains a reassignment
+	// to any nodes in our candidate map ro.singleDef; if a reassignment
+	// is found, the corresponding name is deleted from singleDef.
+	pruneIfNeeded := func(nn Node, asn Node) {
+		oname := outerName(nn)
+		if oname == nil {
+			return
+		}
+		defn, ok := ro.singleDef[oname]
+		if !ok {
+			return
+		}
+		// any assignment to a param invalidates the entry.
+		paramAssigned := oname.Class == PPARAM
+		// assignment to local ok iff assignment is its orig def.
+		localAssigned := (oname.Class == PAUTO && asn != defn)
+		if paramAssigned || localAssigned {
+			// We found an assignment to name N that doesn't
+			// correspond to its original definition; remove
+			// from candidates.
+			delete(ro.singleDef, oname)
+		}
+	}
+
+	// Prune away anything that looks assigned. This code modeled after
+	// similar code in ir.Reassigned; any changes there should be made
+	// here as well.
+	var do func(n Node) bool
+	do = func(n Node) bool {
+		switch n.Op() {
+		case OAS:
+			asn := n.(*AssignStmt)
+			pruneIfNeeded(asn.X, n)
+		case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+			asn := n.(*AssignListStmt)
+			for _, p := range asn.Lhs {
+				pruneIfNeeded(p, n)
+			}
+		case OASOP:
+			asn := n.(*AssignOpStmt)
+			pruneIfNeeded(asn.X, n)
+		case ORANGE:
+			rs := n.(*RangeStmt)
+			pruneIfNeeded(rs.Key, n)
+			pruneIfNeeded(rs.Value, n)
+		case OCLOSURE:
+			n := n.(*ClosureExpr)
+			Any(n.Func, do)
+		}
+		return false
+	}
+	Any(fn, do)
+}
+
+// StaticValue method has the same semantics as the ir package function
+// of the same name; see comments on [StaticValue].
+func (ro *ReassignOracle) StaticValue(n Node) Node {
+	arg := n
+	for {
+		if n.Op() == OCONVNOP {
+			n = n.(*ConvExpr).X
+			continue
+		}
+
+		if n.Op() == OINLCALL {
+			n = n.(*InlinedCallExpr).SingleResult()
+			continue
+		}
+
+		n1 := ro.staticValue1(n)
+		if n1 == nil {
+			if consistencyCheckEnabled {
+				checkStaticValueResult(arg, n)
+			}
+			return n
+		}
+		n = n1
+	}
+}
+
+func (ro *ReassignOracle) staticValue1(nn Node) Node {
+	if nn.Op() != ONAME {
+		return nil
+	}
+	n := nn.(*Name).Canonical()
+	if n.Class != PAUTO {
+		return nil
+	}
+
+	defn := n.Defn
+	if defn == nil {
+		return nil
+	}
+
+	var rhs Node
+FindRHS:
+	switch defn.Op() {
+	case OAS:
+		defn := defn.(*AssignStmt)
+		rhs = defn.Y
+	case OAS2:
+		defn := defn.(*AssignListStmt)
+		for i, lhs := range defn.Lhs {
+			if lhs == n {
+				rhs = defn.Rhs[i]
+				break FindRHS
+			}
+		}
+		base.Fatalf("%v missing from LHS of %v", n, defn)
+	default:
+		return nil
+	}
+	if rhs == nil {
+		base.Fatalf("RHS is nil: %v", defn)
+	}
+
+	if _, ok := ro.singleDef[n]; !ok {
+		return nil
+	}
+
+	return rhs
+}
+
+// Reassigned method has the same semantics as the ir package function
+// of the same name; see comments on [Reassigned] for more info.
+func (ro *ReassignOracle) Reassigned(n *Name) bool {
+	_, ok := ro.singleDef[n]
+	result := !ok
+	if consistencyCheckEnabled {
+		checkReassignedResult(n, result)
+	}
+	return result
+}
diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go
index b222939..a640f4f 100644
--- a/src/cmd/compile/internal/ir/scc.go
+++ b/src/cmd/compile/internal/ir/scc.go
@@ -49,16 +49,13 @@
 // If recursive is false, the list consists of only a single function and its closures.
 // If recursive is true, the list may still contain only a single function,
 // if that function is itself recursive.
-func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
+func VisitFuncsBottomUp(list []*Func, analyze func(list []*Func, recursive bool)) {
 	var v bottomUpVisitor
 	v.analyze = analyze
 	v.nodeID = make(map[*Func]uint32)
 	for _, n := range list {
-		if n.Op() == ODCLFUNC {
-			n := n.(*Func)
-			if !n.IsHiddenClosure() {
-				v.visit(n)
-			}
+		if !n.IsHiddenClosure() {
+			v.visit(n)
 		}
 	}
 }
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index 307f40d..3b68238 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -20,8 +20,8 @@
 		_32bit uintptr     // size on 32bit platforms
 		_64bit uintptr     // size on 64bit platforms
 	}{
-		{Func{}, 188, 328},
-		{Name{}, 100, 176},
+		{Func{}, 168, 288},
+		{Name{}, 96, 168},
 	}
 
 	for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
index b6653ab..0801ecdd 100644
--- a/src/cmd/compile/internal/ir/stmt.go
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -7,6 +7,7 @@
 import (
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/types"
+	"cmd/internal/obj"
 	"cmd/internal/src"
 	"go/constant"
 )
@@ -23,7 +24,7 @@
 	switch op {
 	default:
 		panic("invalid Decl op " + op.String())
-	case ODCL, ODCLCONST, ODCLTYPE:
+	case ODCL:
 		n.op = op
 	}
 	return n
@@ -242,7 +243,8 @@
 // in a different context (a separate goroutine or a later time).
 type GoDeferStmt struct {
 	miniStmt
-	Call Node
+	Call    Node
+	DeferAt Expr
 }
 
 func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
@@ -285,7 +287,7 @@
 //
 // Note that a JumpTableStmt is more like a multiway-goto than
 // a multiway-if. In particular, the case bodies are just
-// labels to jump to, not not full Nodes lists.
+// labels to jump to, not full Nodes lists.
 type JumpTableStmt struct {
 	miniStmt
 
@@ -308,6 +310,46 @@
 	return n
 }
 
+// An InterfaceSwitchStmt is used to implement type switches.
+// Its semantics are:
+//
+//	if RuntimeType implements Descriptor.Cases[0] {
+//	    Case, Itab = 0, itab<RuntimeType, Descriptor.Cases[0]>
+//	} else if RuntimeType implements Descriptor.Cases[1] {
+//	    Case, Itab = 1, itab<RuntimeType, Descriptor.Cases[1]>
+//	...
+//	} else if RuntimeType implements Descriptor.Cases[N-1] {
+//	    Case, Itab = N-1, itab<RuntimeType, Descriptor.Cases[N-1]>
+//	} else {
+//	    Case, Itab = len(cases), nil
+//	}
+//
+// RuntimeType must be a non-nil *runtime._type.
+// Hash must be the hash field of RuntimeType (or its copy loaded from an itab).
+// Descriptor must represent an abi.InterfaceSwitch global variable.
+type InterfaceSwitchStmt struct {
+	miniStmt
+
+	Case        Node
+	Itab        Node
+	RuntimeType Node
+	Hash        Node
+	Descriptor  *obj.LSym
+}
+
+func NewInterfaceSwitchStmt(pos src.XPos, case_, itab, runtimeType, hash Node, descriptor *obj.LSym) *InterfaceSwitchStmt {
+	n := &InterfaceSwitchStmt{
+		Case:        case_,
+		Itab:        itab,
+		RuntimeType: runtimeType,
+		Hash:        hash,
+		Descriptor:  descriptor,
+	}
+	n.pos = pos
+	n.op = OINTERFACESWITCH
+	return n
+}
+
 // An InlineMarkStmt is a marker placed just before an inlined body.
 type InlineMarkStmt struct {
 	miniStmt
@@ -373,15 +415,13 @@
 // A ReturnStmt is a return statement.
 type ReturnStmt struct {
 	miniStmt
-	origNode       // for typecheckargs rewrite
-	Results  Nodes // return list
+	Results Nodes // return list
 }
 
 func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
 	n := &ReturnStmt{}
 	n.pos = pos
 	n.op = ORETURN
-	n.orig = n
 	n.Results = results
 	return n
 }
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index 6ee832e..202c494 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -10,7 +10,9 @@
 )
 
 // Syms holds known symbols.
-var Syms struct {
+var Syms symsStruct
+
+type symsStruct struct {
 	AssertE2I         *obj.LSym
 	AssertE2I2        *obj.LSym
 	AssertI2I         *obj.LSym
@@ -21,6 +23,7 @@
 	CgoCheckPtrWrite  *obj.LSym
 	CheckPtrAlignment *obj.LSym
 	Deferproc         *obj.LSym
+	Deferprocat       *obj.LSym
 	DeferprocStack    *obj.LSym
 	Deferreturn       *obj.LSym
 	Duffcopy          *obj.LSym
@@ -28,6 +31,7 @@
 	GCWriteBarrier    [8]*obj.LSym
 	Goschedguarded    *obj.LSym
 	Growslice         *obj.LSym
+	InterfaceSwitch   *obj.LSym
 	Memmove           *obj.LSym
 	Msanread          *obj.LSym
 	Msanwrite         *obj.LSym
@@ -40,10 +44,13 @@
 	PanicdottypeI     *obj.LSym
 	Panicnildottype   *obj.LSym
 	Panicoverflow     *obj.LSym
+	Racefuncenter     *obj.LSym
+	Racefuncexit      *obj.LSym
 	Raceread          *obj.LSym
 	Racereadrange     *obj.LSym
 	Racewrite         *obj.LSym
 	Racewriterange    *obj.LSym
+	TypeAssert        *obj.LSym
 	WBZero            *obj.LSym
 	WBMove            *obj.LSym
 	// Wasm
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
index 033d1ee..7db76c1 100644
--- a/src/cmd/compile/internal/ir/type.go
+++ b/src/cmd/compile/internal/ir/type.go
@@ -8,44 +8,10 @@
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
-	"fmt"
 )
 
-// Nodes that represent the syntax of a type before type-checking.
-// After type-checking, they serve only as shells around a *types.Type.
 // Calling TypeNode converts a *types.Type to a Node shell.
 
-// An Ntype is a Node that syntactically looks like a type.
-// It can be the raw syntax for a type before typechecking,
-// or it can be an OTYPE with Type() set to a *types.Type.
-// Note that syntax doesn't guarantee it's a type: an expression
-// like *fmt is an Ntype (we don't know whether names are types yet),
-// but at least 1+1 is not an Ntype.
-type Ntype interface {
-	Node
-	CanBeNtype()
-}
-
-// A Field is a declared function parameter.
-// It is not a Node.
-type Field struct {
-	Pos   src.XPos
-	Sym   *types.Sym
-	Type  *types.Type
-	IsDDD bool
-}
-
-func NewField(pos src.XPos, sym *types.Sym, typ *types.Type) *Field {
-	return &Field{Pos: pos, Sym: sym, Type: typ}
-}
-
-func (f *Field) String() string {
-	if f.Sym != nil {
-		return fmt.Sprintf("%v %v", f.Sym, f.Type)
-	}
-	return fmt.Sprint(f.Type)
-}
-
 // A typeNode is a Node wrapper for type t.
 type typeNode struct {
 	miniNode
@@ -56,20 +22,20 @@
 	n := &typeNode{typ: typ}
 	n.pos = src.NoXPos
 	n.op = OTYPE
+	n.SetTypecheck(1)
 	return n
 }
 
 func (n *typeNode) Type() *types.Type { return n.typ }
 func (n *typeNode) Sym() *types.Sym   { return n.typ.Sym() }
-func (n *typeNode) CanBeNtype()       {}
 
 // TypeNode returns the Node representing the type t.
-func TypeNode(t *types.Type) Ntype {
+func TypeNode(t *types.Type) Node {
 	if n := t.Obj(); n != nil {
 		if n.Type() != t {
 			base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
 		}
-		return n.(Ntype)
+		return n.(*Name)
 	}
 	return newTypeNode(t)
 }
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
index 75da5a1..16c8a08 100644
--- a/src/cmd/compile/internal/ir/val.go
+++ b/src/cmd/compile/internal/ir/val.go
@@ -60,23 +60,6 @@
 	panic("unreachable")
 }
 
-func idealType(ct constant.Kind) *types.Type {
-	switch ct {
-	case constant.String:
-		return types.UntypedString
-	case constant.Bool:
-		return types.UntypedBool
-	case constant.Int:
-		return types.UntypedInt
-	case constant.Float:
-		return types.UntypedFloat
-	case constant.Complex:
-		return types.UntypedComplex
-	}
-	base.Fatalf("unexpected Ctype: %v", ct)
-	return nil
-}
-
 var OKForConst [types.NTYPE]bool
 
 // Int64Val returns n as an int64.
diff --git a/src/cmd/compile/internal/liveness/arg.go b/src/cmd/compile/internal/liveness/arg.go
index 6375e43..e1269a1 100644
--- a/src/cmd/compile/internal/liveness/arg.go
+++ b/src/cmd/compile/internal/liveness/arg.go
@@ -97,8 +97,8 @@
 	}
 	// Gather all register arg spill slots.
 	for _, a := range f.OwnAux.ABIInfo().InParams() {
-		n, ok := a.Name.(*ir.Name)
-		if !ok || len(a.Registers) == 0 {
+		n := a.Name
+		if n == nil || len(a.Registers) == 0 {
 			continue
 		}
 		_, offs := a.RegisterTypesAndOffsets()
@@ -116,7 +116,7 @@
 	}
 
 	// We spill address-taken or non-SSA-able value upfront, so they are always live.
-	alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !f.Frontend().CanSSA(n.Type()) }
+	alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !ssa.CanSSA(n.Type()) }
 
 	// We'll emit the smallest offset for the slots that need liveness info.
 	// No need to include a slot with a lower offset if it is always live.
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index 169467e..e4dbfa9 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -116,6 +116,10 @@
 	// unsafePoints bit i is set if Value ID i is an unsafe-point
 	// (preemption is not allowed). Only valid if !allUnsafe.
 	unsafePoints bitvec.BitVec
+	// unsafeBlocks bit i is set if Block ID i is an unsafe-point
+	// (preemption is not allowed on any end-of-block
+	// instructions). Only valid if !allUnsafe.
+	unsafeBlocks bitvec.BitVec
 
 	// An array with a bit vector for each safe point in the
 	// current Block during liveness.epilogue. Indexed in Value
@@ -141,36 +145,61 @@
 	noClobberArgs bool // Do not clobber function arguments
 }
 
-// Map maps from *ssa.Value to LivenessIndex.
+// Map maps from *ssa.Value to StackMapIndex.
+// Also keeps track of unsafe ssa.Values and ssa.Blocks.
+// (unsafe = can't be interrupted during GC.)
 type Map struct {
-	Vals map[ssa.ID]objw.LivenessIndex
+	Vals         map[ssa.ID]objw.StackMapIndex
+	UnsafeVals   map[ssa.ID]bool
+	UnsafeBlocks map[ssa.ID]bool
 	// The set of live, pointer-containing variables at the DeferReturn
 	// call (only set when open-coded defers are used).
-	DeferReturn objw.LivenessIndex
+	DeferReturn objw.StackMapIndex
 }
 
 func (m *Map) reset() {
 	if m.Vals == nil {
-		m.Vals = make(map[ssa.ID]objw.LivenessIndex)
+		m.Vals = make(map[ssa.ID]objw.StackMapIndex)
+		m.UnsafeVals = make(map[ssa.ID]bool)
+		m.UnsafeBlocks = make(map[ssa.ID]bool)
 	} else {
 		for k := range m.Vals {
 			delete(m.Vals, k)
 		}
+		for k := range m.UnsafeVals {
+			delete(m.UnsafeVals, k)
+		}
+		for k := range m.UnsafeBlocks {
+			delete(m.UnsafeBlocks, k)
+		}
 	}
-	m.DeferReturn = objw.LivenessDontCare
+	m.DeferReturn = objw.StackMapDontCare
 }
 
-func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) {
+func (m *Map) set(v *ssa.Value, i objw.StackMapIndex) {
 	m.Vals[v.ID] = i
 }
+func (m *Map) setUnsafeVal(v *ssa.Value) {
+	m.UnsafeVals[v.ID] = true
+}
+func (m *Map) setUnsafeBlock(b *ssa.Block) {
+	m.UnsafeBlocks[b.ID] = true
+}
 
-func (m Map) Get(v *ssa.Value) objw.LivenessIndex {
-	// If v isn't in the map, then it's a "don't care" and not an
-	// unsafe-point.
+func (m Map) Get(v *ssa.Value) objw.StackMapIndex {
+	// If v isn't in the map, then it's a "don't care".
 	if idx, ok := m.Vals[v.ID]; ok {
 		return idx
 	}
-	return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
+	return objw.StackMapDontCare
+}
+func (m Map) GetUnsafe(v *ssa.Value) bool {
+	// default is safe
+	return m.UnsafeVals[v.ID]
+}
+func (m Map) GetUnsafeBlock(b *ssa.Block) bool {
+	// default is safe
+	return m.UnsafeBlocks[b.ID]
 }
 
 type progeffectscache struct {
@@ -377,8 +406,15 @@
 		if cap(lc.be) >= f.NumBlocks() {
 			lv.be = lc.be[:f.NumBlocks()]
 		}
-		lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare}
+		lv.livenessMap = Map{
+			Vals:         lc.livenessMap.Vals,
+			UnsafeVals:   lc.livenessMap.UnsafeVals,
+			UnsafeBlocks: lc.livenessMap.UnsafeBlocks,
+			DeferReturn:  objw.StackMapDontCare,
+		}
 		lc.livenessMap.Vals = nil
+		lc.livenessMap.UnsafeVals = nil
+		lc.livenessMap.UnsafeBlocks = nil
 	}
 	if lv.be == nil {
 		lv.be = make([]blockEffects, f.NumBlocks())
@@ -460,6 +496,7 @@
 	}
 
 	lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
+	lv.unsafeBlocks = bitvec.New(int32(lv.f.NumBlocks()))
 
 	// Mark architecture-specific unsafe points.
 	for _, b := range lv.f.Blocks {
@@ -489,8 +526,6 @@
 			//    m2 = store operation ... m1
 			//    m3 = store operation ... m2
 			//    m4 = WBend m3
-			//
-			// (For now m2 and m3 won't be present.)
 
 			// Find first memory op in the block, which should be a Phi.
 			m := v
@@ -535,40 +570,38 @@
 			var load *ssa.Value
 			v := decisionBlock.Controls[0]
 			for {
-				if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
-					load = v
-					break
-				}
-				switch v.Op {
-				case ssa.Op386TESTL:
-					// 386 lowers Neq32 to (TESTL cond cond),
-					if v.Args[0] == v.Args[1] {
-						v = v.Args[0]
-						continue
+				if v.MemoryArg() != nil {
+					// Single instruction to load (and maybe compare) the write barrier flag.
+					if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+						load = v
+						break
 					}
-				case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpMIPS64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U:
-					// Args[0] is the address of the write
-					// barrier control. Ignore Args[1],
-					// which is the mem operand.
-					// TODO: Just ignore mem operands?
+					// Some architectures have to materialize the address separate from
+					// the load.
+					if sym, ok := v.Args[0].Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+						load = v
+						break
+					}
+					v.Fatalf("load of write barrier flag not from correct global: %s", v.LongString())
+				}
+				// Common case: just flow backwards.
+				if len(v.Args) == 1 || len(v.Args) == 2 && v.Args[0] == v.Args[1] {
+					// Note: 386 lowers Neq32 to (TESTL cond cond),
 					v = v.Args[0]
 					continue
 				}
-				// Common case: just flow backwards.
-				if len(v.Args) != 1 {
-					v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
-				}
-				v = v.Args[0]
+				v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
 			}
 
 			// Mark everything after the load unsafe.
 			found := false
 			for _, v := range decisionBlock.Values {
-				found = found || v == load
 				if found {
 					lv.unsafePoints.Set(int32(v.ID))
 				}
+				found = found || v == load
 			}
+			lv.unsafeBlocks.Set(int32(decisionBlock.ID))
 
 			// Mark the write barrier on/off blocks as unsafe.
 			for _, e := range decisionBlock.Succs {
@@ -579,14 +612,15 @@
 				for _, v := range x.Values {
 					lv.unsafePoints.Set(int32(v.ID))
 				}
+				lv.unsafeBlocks.Set(int32(x.ID))
 			}
 
 			// Mark from the join point up to the WBend as unsafe.
 			for _, v := range b.Values {
-				lv.unsafePoints.Set(int32(v.ID))
 				if v.Op == ssa.OpWBend {
 					break
 				}
+				lv.unsafePoints.Set(int32(v.ID))
 			}
 		}
 	}
@@ -828,13 +862,10 @@
 
 	// If we have an open-coded deferreturn call, make a liveness map for it.
 	if lv.fn.OpenCodedDeferDisallowed() {
-		lv.livenessMap.DeferReturn = objw.LivenessDontCare
+		lv.livenessMap.DeferReturn = objw.StackMapDontCare
 	} else {
 		idx, _ := lv.stackMapSet.add(livedefer)
-		lv.livenessMap.DeferReturn = objw.LivenessIndex{
-			StackMapIndex: idx,
-			IsUnsafePoint: false,
-		}
+		lv.livenessMap.DeferReturn = objw.StackMapIndex(idx)
 	}
 
 	// Done compacting. Throw out the stack map set.
@@ -875,17 +906,18 @@
 		pos++
 	}
 	for _, v := range b.Values {
-		hasStackMap := lv.hasStackMap(v)
-		isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID))
-		idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
-		if hasStackMap {
-			idx.StackMapIndex, _ = lv.stackMapSet.add(lv.livevars[pos])
+		if lv.hasStackMap(v) {
+			idx, _ := lv.stackMapSet.add(lv.livevars[pos])
 			pos++
+			lv.livenessMap.set(v, objw.StackMapIndex(idx))
 		}
-		if hasStackMap || isUnsafePoint {
-			lv.livenessMap.set(v, idx)
+		if lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID)) {
+			lv.livenessMap.setUnsafeVal(v)
 		}
 	}
+	if lv.allUnsafe || lv.unsafeBlocks.Get(int32(b.ID)) {
+		lv.livenessMap.setUnsafeBlock(b)
+	}
 
 	// Reset livevars.
 	lv.livevars = lv.livevars[:0]
@@ -1039,7 +1071,7 @@
 		}
 
 	case types.TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
+		for _, t1 := range t.Fields() {
 			clobberWalk(b, v, offset+t1.Offset, t1.Type)
 		}
 
@@ -1221,7 +1253,7 @@
 				fmt.Printf("\tlive=")
 				printed = false
 				if pcdata.StackMapValid() {
-					live := lv.stackMaps[pcdata.StackMapIndex]
+					live := lv.stackMaps[pcdata]
 					for j, n := range lv.vars {
 						if !live.Get(int32(j)) {
 							continue
@@ -1236,10 +1268,13 @@
 				fmt.Printf("\n")
 			}
 
-			if pcdata.IsUnsafePoint {
+			if lv.livenessMap.GetUnsafe(v) {
 				fmt.Printf("\tunsafe-point\n")
 			}
 		}
+		if lv.livenessMap.GetUnsafeBlock(b) {
+			fmt.Printf("\tunsafe-block\n")
+		}
 
 		// bb bitsets
 		fmt.Printf("end\n")
@@ -1333,7 +1368,7 @@
 		for _, b := range f.Blocks {
 			for _, val := range b.Values {
 				if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
-					lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
+					lv.showlive(val, lv.stackMaps[idx])
 				}
 			}
 		}
@@ -1485,7 +1520,7 @@
 		return
 	}
 	nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize))
-	bv := bitvec.New(int32(nptr) * 2)
+	bv := bitvec.New(int32(nptr))
 
 	for _, p := range abiInfo.InParams() {
 		typebits.SetNoCheck(p.Type, p.FrameOffset(abiInfo), bv)
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
index 1c48351..c7debd9 100644
--- a/src/cmd/compile/internal/logopt/logopt_test.go
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -205,15 +205,15 @@
 			`"relatedInformation":[`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:    flow: y = z:"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from y := z (assign-pair)"},`+
-			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:    flow: ~R0 = y:"},`+
+			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:    flow: ~r0 = y:"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from y.b (dot of pointer)"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from \u0026y.b (address-of)"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
-			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from ~R0 = \u0026y.b (assign-pair)"},`+
-			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow:    flow: ~r0 = ~R0:"},`+
-			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow:      from return ~R0 (return)"}]}`)
+			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from ~r0 = \u0026y.b (assign-pair)"},`+
+			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow:    flow: ~r0 = ~r0:"},`+
+			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow:      from return ~r0 (return)"}]}`)
 	})
 }
 
diff --git a/src/cmd/compile/internal/loong64/galign.go b/src/cmd/compile/internal/loong64/galign.go
index 99ab7bd..a613165 100644
--- a/src/cmd/compile/internal/loong64/galign.go
+++ b/src/cmd/compile/internal/loong64/galign.go
@@ -20,4 +20,6 @@
 	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
+	arch.LoadRegResult = loadRegResult
+	arch.SpillArgReg = spillArgReg
 }
diff --git a/src/cmd/compile/internal/loong64/ggen.go b/src/cmd/compile/internal/loong64/ggen.go
index 8a24d2f..27d318a 100644
--- a/src/cmd/compile/internal/loong64/ggen.go
+++ b/src/cmd/compile/internal/loong64/ggen.go
@@ -5,6 +5,7 @@
 package loong64
 
 import (
+	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/objw"
 	"cmd/compile/internal/types"
@@ -16,34 +17,38 @@
 	if cnt == 0 {
 		return p
 	}
+
+	// Adjust the frame to account for LR.
+	off += base.Ctxt.Arch.FixedFrameSize
+
 	if cnt < int64(4*types.PtrSize) {
 		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
-			p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, 8+off+i)
+			p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, off+i)
 		}
 	} else if cnt <= int64(128*types.PtrSize) {
-		p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0)
+		p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0)
 		p.Reg = loong64.REGSP
 		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
 		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
 	} else {
-		//	ADDV	$(8+frame+lo-8), SP, r1
+		//	ADDV	$(off), SP, r1
 		//	ADDV	$cnt, r1, r2
 		// loop:
-		//	MOVV	R0, (Widthptr)r1
+		//	MOVV	R0, (r1)
 		//	ADDV	$Widthptr, r1
-		//	BNE		r1, r2, loop
-		p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0)
+		//	BNE	r1, r2, loop
+		p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0)
 		p.Reg = loong64.REGSP
 		p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, loong64.REGRT2, 0)
 		p.Reg = loong64.REGRT1
-		p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, int64(types.PtrSize))
-		p1 := p
+		p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, 0)
+		loop := p
 		p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, loong64.REGRT1, 0)
 		p = pp.Append(p, loong64.ABNE, obj.TYPE_REG, loong64.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
 		p.Reg = loong64.REGRT2
-		p.To.SetTarget(p1)
+		p.To.SetTarget(loop)
 	}
 
 	return p
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index 8193b4e..e7298bd 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -10,6 +10,7 @@
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
+	"cmd/compile/internal/objw"
 	"cmd/compile/internal/ssa"
 	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
@@ -80,6 +81,28 @@
 	panic("bad store type")
 }
 
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOVV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+	switch {
+	case alignment%8 == 0:
+		return loong64.AMOVV, 8
+	case alignment%4 == 0:
+		return loong64.AMOVW, 4
+	case alignment%2 == 0:
+		return loong64.AMOVH, 2
+	default:
+		return loong64.AMOVB, 1
+	}
+}
+
 func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpLOONG64MOVVreg:
@@ -122,6 +145,18 @@
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = r
 		ssagen.AddrAuto(&p.To, v)
+	case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+		// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+		// The loop only runs once.
+		for _, a := range v.Block.Func.RegArgs {
+			// Pass the spill/unspill information along to the assembler, offset by size of
+			// the saved LR slot.
+			addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize)
+			s.FuncInfo().AddSpill(
+				obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)})
+		}
+		v.Block.Func.RegArgs = nil
+		ssagen.CheckArgReg(v)
 	case ssa.OpLOONG64ADDV,
 		ssa.OpLOONG64SUBV,
 		ssa.OpLOONG64AND,
@@ -340,62 +375,36 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpLOONG64DUFFZERO:
-		// runtime.duffzero expects start address - 8 in R19
-		p := s.Prog(loong64.ASUBVU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = loong64.REG_R19
-		p = s.Prog(obj.ADUFFZERO)
+		// runtime.duffzero expects start address in R20
+		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
 		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.OpLOONG64LoweredZero:
-		// SUBV	$8, R19
-		// MOVV	R0, 8(R19)
-		// ADDV	$8, R19
-		// BNE	Rarg1, R19, -2(PC)
-		// arg1 is the address of the last element to zero
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%8 == 0:
-			sz = 8
-			mov = loong64.AMOVV
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = loong64.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = loong64.AMOVH
-		default:
-			sz = 1
-			mov = loong64.AMOVB
-		}
-		p := s.Prog(loong64.ASUBVU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = loong64.REG_R19
-		p2 := s.Prog(mov)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = loong64.REGZERO
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = loong64.REG_R19
-		p2.To.Offset = sz
-		p3 := s.Prog(loong64.AADDVU)
-		p3.From.Type = obj.TYPE_CONST
-		p3.From.Offset = sz
-		p3.To.Type = obj.TYPE_REG
-		p3.To.Reg = loong64.REG_R19
-		p4 := s.Prog(loong64.ABNE)
-		p4.From.Type = obj.TYPE_REG
-		p4.From.Reg = v.Args[1].Reg()
-		p4.Reg = loong64.REG_R19
-		p4.To.Type = obj.TYPE_BRANCH
-		p4.To.SetTarget(p2)
+		// MOVx	R0, (Rarg0)
+		// ADDV	$sz, Rarg0
+		// BGEU	Rarg1, Rarg0, -2(PC)
+		mov, sz := largestMove(v.AuxInt)
+		p := s.Prog(mov)
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = loong64.REGZERO
+		p.To.Type = obj.TYPE_MEM
+		p.To.Reg = v.Args[0].Reg()
+
+		p2 := s.Prog(loong64.AADDVU)
+		p2.From.Type = obj.TYPE_CONST
+		p2.From.Offset = sz
+		p2.To.Type = obj.TYPE_REG
+		p2.To.Reg = v.Args[0].Reg()
+
+		p3 := s.Prog(loong64.ABGEU)
+		p3.From.Type = obj.TYPE_REG
+		p3.From.Reg = v.Args[1].Reg()
+		p3.Reg = v.Args[0].Reg()
+		p3.To.Type = obj.TYPE_BRANCH
+		p3.To.SetTarget(p)
+
 	case ssa.OpLOONG64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
@@ -403,61 +412,43 @@
 		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 	case ssa.OpLOONG64LoweredMove:
-		// SUBV	$8, R19
-		// MOVV	8(R19), Rtmp
-		// MOVV	Rtmp, (R4)
-		// ADDV	$8, R19
-		// ADDV	$8, R4
-		// BNE	Rarg2, R19, -4(PC)
-		// arg2 is the address of the last element of src
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%8 == 0:
-			sz = 8
-			mov = loong64.AMOVV
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = loong64.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = loong64.AMOVH
-		default:
-			sz = 1
-			mov = loong64.AMOVB
-		}
-		p := s.Prog(loong64.ASUBVU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = sz
+		// MOVx	(Rarg1), Rtmp
+		// MOVx	Rtmp, (Rarg0)
+		// ADDV	$sz, Rarg1
+		// ADDV	$sz, Rarg0
+		// BGEU	Rarg2, Rarg0, -4(PC)
+		mov, sz := largestMove(v.AuxInt)
+		p := s.Prog(mov)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_REG
-		p.To.Reg = loong64.REG_R19
+		p.To.Reg = loong64.REGTMP
+
 		p2 := s.Prog(mov)
-		p2.From.Type = obj.TYPE_MEM
-		p2.From.Reg = loong64.REG_R19
-		p2.From.Offset = sz
-		p2.To.Type = obj.TYPE_REG
-		p2.To.Reg = loong64.REGTMP
-		p3 := s.Prog(mov)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = loong64.REGTMP
-		p3.To.Type = obj.TYPE_MEM
-		p3.To.Reg = loong64.REG_R4
+		p2.From.Type = obj.TYPE_REG
+		p2.From.Reg = loong64.REGTMP
+		p2.To.Type = obj.TYPE_MEM
+		p2.To.Reg = v.Args[0].Reg()
+
+		p3 := s.Prog(loong64.AADDVU)
+		p3.From.Type = obj.TYPE_CONST
+		p3.From.Offset = sz
+		p3.To.Type = obj.TYPE_REG
+		p3.To.Reg = v.Args[1].Reg()
+
 		p4 := s.Prog(loong64.AADDVU)
 		p4.From.Type = obj.TYPE_CONST
 		p4.From.Offset = sz
 		p4.To.Type = obj.TYPE_REG
-		p4.To.Reg = loong64.REG_R19
-		p5 := s.Prog(loong64.AADDVU)
-		p5.From.Type = obj.TYPE_CONST
-		p5.From.Offset = sz
-		p5.To.Type = obj.TYPE_REG
-		p5.To.Reg = loong64.REG_R4
-		p6 := s.Prog(loong64.ABNE)
-		p6.From.Type = obj.TYPE_REG
-		p6.From.Reg = v.Args[2].Reg()
-		p6.Reg = loong64.REG_R19
-		p6.To.Type = obj.TYPE_BRANCH
-		p6.To.SetTarget(p2)
+		p4.To.Reg = v.Args[0].Reg()
+
+		p5 := s.Prog(loong64.ABGEU)
+		p5.From.Type = obj.TYPE_REG
+		p5.From.Reg = v.Args[2].Reg()
+		p5.Reg = v.Args[1].Reg()
+		p5.To.Type = obj.TYPE_BRANCH
+		p5.To.SetTarget(p)
+
 	case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
 		s.Call(v)
 	case ssa.OpLOONG64CALLtail:
@@ -818,3 +809,22 @@
 		b.Fatalf("branch not implemented: %s", b.LongString())
 	}
 }
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+	p := s.Prog(loadByType(t, reg))
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_AUTO
+	p.From.Sym = n.Linksym()
+	p.From.Offset = n.FrameOffset() + off
+	p.To.Type = obj.TYPE_REG
+	p.To.Reg = reg
+	return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+	p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+	p.To.Name = obj.NAME_PARAM
+	p.To.Sym = n.Linksym()
+	p.Pos = p.Pos.WithNotStmt()
+	return p
+}
diff --git a/src/cmd/compile/internal/loopvar/loopvar.go b/src/cmd/compile/internal/loopvar/loopvar.go
index 43f081c..030fc04 100644
--- a/src/cmd/compile/internal/loopvar/loopvar.go
+++ b/src/cmd/compile/internal/loopvar/loopvar.go
@@ -107,7 +107,7 @@
 				if base.LoopVarHash.MatchPos(n.Pos(), desc) {
 					// Rename the loop key, prefix body with assignment from loop key
 					transformed = append(transformed, VarAndLoop{n, x, lastPos})
-					tk := typecheck.Temp(n.Type())
+					tk := typecheck.TempAt(base.Pos, fn, n.Type())
 					tk.SetTypecheck(1)
 					as := ir.NewAssignStmt(x.Pos(), n, tk)
 					as.Def = true
@@ -298,7 +298,7 @@
 					for _, z := range leaked {
 						transformed = append(transformed, VarAndLoop{z, x, lastPos})
 
-						tz := typecheck.Temp(z.Type())
+						tz := typecheck.TempAt(base.Pos, fn, z.Type())
 						tz.SetTypecheck(1)
 						zPrimeForZ[z] = tz
 
@@ -355,26 +355,17 @@
 					})
 
 					postNotNil := x.Post != nil
-					var tmpFirstDcl *ir.AssignStmt
+					var tmpFirstDcl ir.Node
 					if postNotNil {
 						// body' = prebody +
 						// (6)     if tmp_first {tmp_first = false} else {Post} +
 						//         if !cond {break} + ...
-						tmpFirst := typecheck.Temp(types.Types[types.TBOOL])
-
-						// tmpFirstAssign assigns val to tmpFirst
-						tmpFirstAssign := func(val bool) *ir.AssignStmt {
-							s := ir.NewAssignStmt(x.Pos(), tmpFirst, typecheck.OrigBool(tmpFirst, val))
-							s.SetTypecheck(1)
-							return s
-						}
-
-						tmpFirstDcl = tmpFirstAssign(true)
-						tmpFirstDcl.Def = true // also declares tmpFirst
-						tmpFirstSetFalse := tmpFirstAssign(false)
+						tmpFirst := typecheck.TempAt(base.Pos, fn, types.Types[types.TBOOL])
+						tmpFirstDcl = typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, true)))
+						tmpFirstSetFalse := typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, false)))
 						ifTmpFirst := ir.NewIfStmt(x.Pos(), tmpFirst, ir.Nodes{tmpFirstSetFalse}, ir.Nodes{x.Post})
-						ifTmpFirst.SetTypecheck(1)
-						preBody.Append(ifTmpFirst)
+						ifTmpFirst.PtrInit().Append(typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, tmpFirst))) // declares tmpFirst
+						preBody.Append(typecheck.Stmt(ifTmpFirst))
 					}
 
 					// body' = prebody +
@@ -496,8 +487,6 @@
 		switch x := n.(type) {
 		case *ir.Func:
 			x.Body = editNodes(x.Body)
-			x.Enter = editNodes(x.Enter)
-			x.Exit = editNodes(x.Exit)
 		case *ir.InlinedCallExpr:
 			x.Body = editNodes(x.Body)
 
@@ -605,7 +594,7 @@
 				// Intended to help with performance debugging, we record whole loop ranges
 				logopt.LogOptRange(pos, last, "loop-modified-"+loopKind, "loopvar", ir.FuncName(l.curfn))
 			}
-			if print && 3 <= base.Debug.LoopVar {
+			if print && 4 <= base.Debug.LoopVar {
 				// TODO decide if we want to keep this, or not.  It was helpful for validating logopt, otherwise, eh.
 				inner := base.Ctxt.InnermostPos(pos)
 				outer := base.Ctxt.OutermostPos(pos)
diff --git a/src/cmd/compile/internal/loopvar/loopvar_test.go b/src/cmd/compile/internal/loopvar/loopvar_test.go
index 03e6eec..64cfdb7 100644
--- a/src/cmd/compile/internal/loopvar/loopvar_test.go
+++ b/src/cmd/compile/internal/loopvar/loopvar_test.go
@@ -51,7 +51,7 @@
 }
 
 // TestLoopVar checks that the GOEXPERIMENT and debug flags behave as expected.
-func TestLoopVar(t *testing.T) {
+func TestLoopVarGo1_21(t *testing.T) {
 	switch runtime.GOOS {
 	case "linux", "darwin":
 	default:
@@ -71,7 +71,7 @@
 	for i, tc := range cases {
 		for _, f := range tc.files {
 			source := f
-			cmd := testenv.Command(t, gocmd, "build", "-o", output, "-gcflags=-d=loopvar="+tc.lvFlag, source)
+			cmd := testenv.Command(t, gocmd, "build", "-o", output, "-gcflags=-lang=go1.21 -d=loopvar="+tc.lvFlag, source)
 			cmd.Env = append(cmd.Env, "GOEXPERIMENT=loopvar", "HOME="+tmpdir)
 			cmd.Dir = "testdata"
 			t.Logf("File %s loopvar=%s expect '%s' exit code %d", f, tc.lvFlag, tc.buildExpect, tc.expectRC)
@@ -103,7 +103,7 @@
 	}
 }
 
-func TestLoopVarInlines(t *testing.T) {
+func TestLoopVarInlinesGo1_21(t *testing.T) {
 	switch runtime.GOOS {
 	case "linux", "darwin":
 	default:
@@ -125,7 +125,7 @@
 		// This disables the loopvar change, except for the specified package.
 		// The effect should follow the package, even though everything (except "c")
 		// is inlined.
-		cmd := testenv.Command(t, gocmd, "run", "-gcflags="+pkg+"=-d=loopvar=1", root)
+		cmd := testenv.Command(t, gocmd, "run", "-gcflags="+root+"/...=-lang=go1.21", "-gcflags="+pkg+"=-d=loopvar=1", root)
 		cmd.Env = append(cmd.Env, "GOEXPERIMENT=noloopvar", "HOME="+tmpdir)
 		cmd.Dir = filepath.Join("testdata", "inlines")
 
@@ -166,6 +166,7 @@
 }
 
 func TestLoopVarHashes(t *testing.T) {
+	// This behavior does not depend on Go version (1.21 or greater)
 	switch runtime.GOOS {
 	case "linux", "darwin":
 	default:
@@ -187,7 +188,7 @@
 		// This disables the loopvar change, except for the specified hash pattern.
 		// -trimpath is necessary so we get the same answer no matter where the
 		// Go repository is checked out. This is not normally a concern since people
-		// do not rely on the meaning of specific hashes.
+		// do not normally rely on the meaning of specific hashes.
 		cmd := testenv.Command(t, gocmd, "run", "-trimpath", root)
 		cmd.Env = append(cmd.Env, "GOCOMPILEDEBUG=loopvarhash="+hash, "HOME="+tmpdir)
 		cmd.Dir = filepath.Join("testdata", "inlines")
@@ -225,7 +226,8 @@
 	}
 }
 
-func TestLoopVarOpt(t *testing.T) {
+// TestLoopVarVersionEnableFlag checks for loopvar transformation enabled by command line flag (1.22).
+func TestLoopVarVersionEnableFlag(t *testing.T) {
 	switch runtime.GOOS {
 	case "linux", "darwin":
 	default:
@@ -240,7 +242,8 @@
 	testenv.MustHaveGoBuild(t)
 	gocmd := testenv.GoToolPath(t)
 
-	cmd := testenv.Command(t, gocmd, "run", "-gcflags=-d=loopvar=2", "opt.go")
+	// loopvar=3 logs info but does not change loopvarness
+	cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt.go")
 	cmd.Dir = filepath.Join("testdata")
 
 	b, err := cmd.CombinedOutput()
@@ -248,7 +251,7 @@
 
 	t.Logf(m)
 
-	yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:30)")
+	yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)")
 	nCount := strings.Count(m, "shared")
 
 	if yCount != 1 {
@@ -260,5 +263,121 @@
 	if err != nil {
 		t.Errorf("err=%v != nil", err)
 	}
+}
 
+// TestLoopVarVersionEnableGoBuild checks for loopvar transformation enabled by go:build version (1.22).
+func TestLoopVarVersionEnableGoBuild(t *testing.T) {
+	switch runtime.GOOS {
+	case "linux", "darwin":
+	default:
+		t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+	}
+	switch runtime.GOARCH {
+	case "amd64", "arm64":
+	default:
+		t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+	}
+
+	testenv.MustHaveGoBuild(t)
+	gocmd := testenv.GoToolPath(t)
+
+	// loopvar=3 logs info but does not change loopvarness
+	cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt-122.go")
+	cmd.Dir = filepath.Join("testdata")
+
+	b, err := cmd.CombinedOutput()
+	m := string(b)
+
+	t.Logf(m)
+
+	yCount := strings.Count(m, "opt-122.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-122.go:31)")
+	nCount := strings.Count(m, "shared")
+
+	if yCount != 1 {
+		t.Errorf("yCount=%d != 1", yCount)
+	}
+	if nCount > 0 {
+		t.Errorf("nCount=%d > 0", nCount)
+	}
+	if err != nil {
+		t.Errorf("err=%v != nil", err)
+	}
+}
+
+// TestLoopVarVersionDisableFlag checks for loopvar transformation DISABLED by command line version (1.21).
+func TestLoopVarVersionDisableFlag(t *testing.T) {
+	switch runtime.GOOS {
+	case "linux", "darwin":
+	default:
+		t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+	}
+	switch runtime.GOARCH {
+	case "amd64", "arm64":
+	default:
+		t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+	}
+
+	testenv.MustHaveGoBuild(t)
+	gocmd := testenv.GoToolPath(t)
+
+	// loopvar=3 logs info but does not change loopvarness
+	cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt.go")
+	cmd.Dir = filepath.Join("testdata")
+
+	b, err := cmd.CombinedOutput()
+	m := string(b)
+
+	t.Logf(m) // expect error
+
+	yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)")
+	nCount := strings.Count(m, "shared")
+
+	if yCount != 0 {
+		t.Errorf("yCount=%d != 0", yCount)
+	}
+	if nCount > 0 {
+		t.Errorf("nCount=%d > 0", nCount)
+	}
+	if err == nil { // expect error
+		t.Errorf("err=%v == nil", err)
+	}
+}
+
+// TestLoopVarVersionDisableGoBuild checks for loopvar transformation DISABLED by go:build version (1.21).
+func TestLoopVarVersionDisableGoBuild(t *testing.T) {
+	switch runtime.GOOS {
+	case "linux", "darwin":
+	default:
+		t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+	}
+	switch runtime.GOARCH {
+	case "amd64", "arm64":
+	default:
+		t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+	}
+
+	testenv.MustHaveGoBuild(t)
+	gocmd := testenv.GoToolPath(t)
+
+	// loopvar=3 logs info but does not change loopvarness
+	cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt-121.go")
+	cmd.Dir = filepath.Join("testdata")
+
+	b, err := cmd.CombinedOutput()
+	m := string(b)
+
+	t.Logf(m) // expect error
+
+	yCount := strings.Count(m, "opt-121.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-121.go:31)")
+	nCount := strings.Count(m, "shared")
+
+	if yCount != 0 {
+		t.Errorf("yCount=%d != 0", yCount)
+	}
+	if nCount > 0 {
+		t.Errorf("nCount=%d > 0", nCount)
+	}
+	if err == nil { // expect error
+		t.Errorf("err=%v == nil", err)
+	}
 }
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-121.go b/src/cmd/compile/internal/loopvar/testdata/opt-121.go
new file mode 100644
index 0000000..4afb658
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt-121.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package main
+
+import (
+	"fmt"
+	"os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+	var a []*int
+	for private := j; private < k; private++ {
+		a = append(a, &private)
+	}
+	return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+	for shared := j; shared < k; shared++ {
+		if shared == k/2 {
+			// want the call inlined, want "private" in that inline to be transformed,
+			// (believe it ends up on init node of the return).
+			// but do not want "shared" transformed,
+			return inline(j, k), &shared
+		}
+	}
+	return nil, &j
+}
+
+func main() {
+	a, p := notinline(2, 9)
+	fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+	if *a[0] != 2 {
+		os.Exit(1)
+	}
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-122.go b/src/cmd/compile/internal/loopvar/testdata/opt-122.go
new file mode 100644
index 0000000..9dceab9
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt-122.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+
+package main
+
+import (
+	"fmt"
+	"os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+	var a []*int
+	for private := j; private < k; private++ {
+		a = append(a, &private)
+	}
+	return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+	for shared := j; shared < k; shared++ {
+		if shared == k/2 {
+			// want the call inlined, want "private" in that inline to be transformed,
+			// (believe it ends up on init node of the return).
+			// but do not want "shared" transformed,
+			return inline(j, k), &shared
+		}
+	}
+	return nil, &j
+}
+
+func main() {
+	a, p := notinline(2, 9)
+	fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+	if *a[0] != 2 {
+		os.Exit(1)
+	}
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt.go b/src/cmd/compile/internal/loopvar/testdata/opt.go
index 1bcd736..82c8616 100644
--- a/src/cmd/compile/internal/loopvar/testdata/opt.go
+++ b/src/cmd/compile/internal/loopvar/testdata/opt.go
@@ -17,7 +17,6 @@
 		a = append(a, &private)
 	}
 	return a
-
 }
 
 //go:noinline
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
index a18440e..e235ef9 100644
--- a/src/cmd/compile/internal/mips/ggen.go
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -46,10 +46,6 @@
 }
 
 func ginsnop(pp *objw.Progs) *obj.Prog {
-	p := pp.Prog(mips.ANOR)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = mips.REG_R0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = mips.REG_R0
+	p := pp.Prog(mips.ANOOP)
 	return p
 }
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 37bb871..5f3f3e6 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -50,10 +50,6 @@
 }
 
 func ginsnop(pp *objw.Progs) *obj.Prog {
-	p := pp.Prog(mips.ANOR)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = mips.REG_R0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = mips.REG_R0
+	p := pp.Prog(mips.ANOOP)
 	return p
 }
diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go
index c1ee8d1..8bdbfc9 100644
--- a/src/cmd/compile/internal/noder/codes.go
+++ b/src/cmd/compile/internal/noder/codes.go
@@ -55,10 +55,14 @@
 	exprConvert
 	exprNew
 	exprMake
-	exprNil
+	exprSizeof
+	exprAlignof
+	exprOffsetof
+	exprZero
 	exprFuncInst
 	exprRecv
 	exprReshape
+	exprRuntimeBuiltin // a reference to a runtime function from transformed syntax. Followed by string name, e.g., "panicrangeexit"
 )
 
 type codeAssign int
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
deleted file mode 100644
index 8e23fce..0000000
--- a/src/cmd/compile/internal/noder/decl.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
-	"cmd/compile/internal/syntax"
-	"cmd/compile/internal/types2"
-)
-
-// pkgNameOf returns the PkgName associated with the given ImportDecl.
-func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
-	if name := decl.LocalPkgName; name != nil {
-		return info.Defs[name].(*types2.PkgName)
-	}
-	return info.Implicits[decl].(*types2.PkgName)
-}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
deleted file mode 100644
index 51b0656..0000000
--- a/src/cmd/compile/internal/noder/expr.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
-	"fmt"
-
-	"cmd/compile/internal/ir"
-	"cmd/compile/internal/syntax"
-)
-
-func unpackListExpr(expr syntax.Expr) []syntax.Expr {
-	switch expr := expr.(type) {
-	case nil:
-		return nil
-	case *syntax.ListExpr:
-		return expr.ElemList
-	default:
-		return []syntax.Expr{expr}
-	}
-}
-
-// constExprOp returns an ir.Op that represents the outermost
-// operation of the given constant expression. It's intended for use
-// with ir.RawOrigExpr.
-func constExprOp(expr syntax.Expr) ir.Op {
-	switch expr := expr.(type) {
-	default:
-		panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
-
-	case *syntax.BasicLit:
-		return ir.OLITERAL
-	case *syntax.Name, *syntax.SelectorExpr:
-		return ir.ONAME
-	case *syntax.CallExpr:
-		return ir.OCALL
-	case *syntax.Operation:
-		if expr.Y == nil {
-			return unOps[expr.Op]
-		}
-		return binOps[expr.Op]
-	}
-}
-
-func unparen(expr syntax.Expr) syntax.Expr {
-	for {
-		paren, ok := expr.(*syntax.ParenExpr)
-		if !ok {
-			return expr
-		}
-		expr = paren.X
-	}
-}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
index ff2d50f..0bff71e 100644
--- a/src/cmd/compile/internal/noder/helpers.go
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -7,7 +7,6 @@
 import (
 	"go/constant"
 
-	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/syntax"
 	"cmd/compile/internal/typecheck"
@@ -41,11 +40,6 @@
 
 // Values
 
-func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
-	orig := ir.NewRawOrigExpr(pos, op, raw)
-	return ir.NewConstExpr(val, typed(typ, orig))
-}
-
 // FixValue returns val after converting and truncating it as
 // appropriate for typ.
 func FixValue(typ *types.Type, val constant.Value) constant.Value {
@@ -59,16 +53,12 @@
 		val = constant.ToComplex(val)
 	}
 	if !typ.IsUntyped() {
-		val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
+		val = typecheck.ConvertVal(val, typ, false)
 	}
 	ir.AssertValidTypeForConst(typ, val)
 	return val
 }
 
-func Nil(pos src.XPos, typ *types.Type) ir.Node {
-	return typed(typ, ir.NewNilExpr(pos))
-}
-
 // Expressions
 
 func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr {
@@ -77,165 +67,40 @@
 	return n
 }
 
-func Assert(pos src.XPos, x ir.Node, typ *types.Type) ir.Node {
-	return typed(typ, ir.NewTypeAssertExpr(pos, x, nil))
-}
-
-func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) *ir.BinaryExpr {
-	switch op {
-	case ir.OADD:
-		n := ir.NewBinaryExpr(pos, op, x, y)
-		typed(typ, n)
-		return n
-	default:
-		n := ir.NewBinaryExpr(pos, op, x, y)
-		typed(x.Type(), n)
-		return n
-	}
-}
-
-func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) *ir.BinaryExpr {
-	n := ir.NewBinaryExpr(pos, op, x, y)
-	typed(typ, n)
-	return n
-}
-
 func Deref(pos src.XPos, typ *types.Type, x ir.Node) *ir.StarExpr {
 	n := ir.NewStarExpr(pos, x)
 	typed(typ, n)
 	return n
 }
 
-func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
-	op, typ := ir.ODOT, x.Type()
-	if typ.IsPtr() {
-		op, typ = ir.ODOTPTR, typ.Elem()
-	}
-	if !typ.IsStruct() {
-		base.FatalfAt(pos, "DotField of non-struct: %L", x)
-	}
-
-	// TODO(mdempsky): This is the backend's responsibility.
-	types.CalcSize(typ)
-
-	field := typ.Field(index)
-	return dot(pos, field.Type, op, x, field)
-}
-
-func DotMethod(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
-	method := method(x.Type(), index)
-
-	// Method value.
-	typ := typecheck.NewMethodType(method.Type, nil)
-	return dot(pos, typ, ir.OMETHVALUE, x, method)
-}
-
-// MethodExpr returns a OMETHEXPR node with the indicated index into the methods
-// of typ. The receiver type is set from recv, which is different from typ if the
-// method was accessed via embedded fields. Similarly, the X value of the
-// ir.SelectorExpr is recv, the original OTYPE node before passing through the
-// embedded fields.
-func MethodExpr(pos src.XPos, recv ir.Node, embed *types.Type, index int) *ir.SelectorExpr {
-	method := method(embed, index)
-	typ := typecheck.NewMethodType(method.Type, recv.Type())
-	// The method expression T.m requires a wrapper when T
-	// is different from m's declared receiver type. We
-	// normally generate these wrappers while writing out
-	// runtime type descriptors, which is always done for
-	// types declared at package scope. However, we need
-	// to make sure to generate wrappers for anonymous
-	// receiver types too.
-	if recv.Sym() == nil {
-		typecheck.NeedRuntimeType(recv.Type())
-	}
-	return dot(pos, typ, ir.OMETHEXPR, recv, method)
-}
-
-func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr {
-	n := ir.NewSelectorExpr(pos, op, x, selection.Sym)
-	n.Selection = selection
-	typed(typ, n)
-	return n
-}
-
-// TODO(mdempsky): Move to package types.
-func method(typ *types.Type, index int) *types.Field {
-	if typ.IsInterface() {
-		return typ.AllMethods().Index(index)
-	}
-	return types.ReceiverBaseType(typ).Methods().Index(index)
-}
-
-func Index(pos src.XPos, typ *types.Type, x, index ir.Node) *ir.IndexExpr {
-	n := ir.NewIndexExpr(pos, x, index)
-	typed(typ, n)
-	return n
-}
-
-func Slice(pos src.XPos, typ *types.Type, x, low, high, max ir.Node) *ir.SliceExpr {
-	op := ir.OSLICE
-	if max != nil {
-		op = ir.OSLICE3
-	}
-	n := ir.NewSliceExpr(pos, op, x, low, high, max)
-	typed(typ, n)
-	return n
-}
-
-func Unary(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node) ir.Node {
-	switch op {
-	case ir.OADDR:
-		return Addr(pos, x)
-	case ir.ODEREF:
-		return Deref(pos, typ, x)
-	}
-
-	if op == ir.ORECV {
-		if typ.IsFuncArgStruct() && typ.NumFields() == 2 {
-			// Remove the second boolean type (if provided by type2),
-			// since that works better with the rest of the compiler
-			// (which will add it back in later).
-			assert(typ.Field(1).Type.Kind() == types.TBOOL)
-			typ = typ.Field(0).Type
-		}
-	}
-	return typed(typ, ir.NewUnaryExpr(pos, op, x))
-}
-
 // Statements
 
-var one = constant.MakeInt64(1)
-
-func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
-	assert(x.Type() != nil)
-	bl := ir.NewBasicLit(pos, one)
-	bl = typecheck.DefaultLit(bl, x.Type())
-	return ir.NewAssignOpStmt(pos, op, x, bl)
-}
-
 func idealType(tv syntax.TypeAndValue) types2.Type {
 	// The gc backend expects all expressions to have a concrete type, and
 	// types2 mostly satisfies this expectation already. But there are a few
 	// cases where the Go spec doesn't require converting to concrete type,
 	// and so types2 leaves them untyped. So we need to fix those up here.
-	typ := tv.Type
+	typ := types2.Unalias(tv.Type)
 	if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 {
 		switch basic.Kind() {
 		case types2.UntypedNil:
 			// ok; can appear in type switch case clauses
 			// TODO(mdempsky): Handle as part of type switches instead?
 		case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex:
-			// Untyped rhs of non-constant shift, e.g. x << 1.0.
-			// If we have a constant value, it must be an int >= 0.
+			typ = types2.Typ[types2.Uint]
 			if tv.Value != nil {
 				s := constant.ToInt(tv.Value)
-				assert(s.Kind() == constant.Int && constant.Sign(s) >= 0)
+				assert(s.Kind() == constant.Int)
+				if constant.Sign(s) < 0 {
+					typ = types2.Typ[types2.Int]
+				}
 			}
-			typ = types2.Typ[types2.Uint]
 		case types2.UntypedBool:
 			typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
 		case types2.UntypedString:
 			typ = types2.Typ[types2.String] // argument to "append" or "copy" calls
+		case types2.UntypedRune:
+			typ = types2.Typ[types2.Int32] // range over rune
 		default:
 			return nil
 		}
@@ -244,13 +109,14 @@
 }
 
 func isTypeParam(t types2.Type) bool {
-	_, ok := t.(*types2.TypeParam)
+	_, ok := types2.Unalias(t).(*types2.TypeParam)
 	return ok
 }
 
 // isNotInHeap reports whether typ is or contains an element of type
 // runtime/internal/sys.NotInHeap.
 func isNotInHeap(typ types2.Type) bool {
+	typ = types2.Unalias(typ)
 	if named, ok := typ.(*types2.Named); ok {
 		if obj := named.Obj(); obj.Name() == "nih" && obj.Pkg().Path() == "runtime/internal/sys" {
 			return true
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index b7008ac..e9bb1e3 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -133,7 +133,10 @@
 		return "", errors.New("cannot import \"main\"")
 	}
 
-	if base.Ctxt.Pkgpath != "" && path == base.Ctxt.Pkgpath {
+	if base.Ctxt.Pkgpath == "" {
+		panic("missing pkgpath")
+	}
+	if path == base.Ctxt.Pkgpath {
 		return "", fmt.Errorf("import %q while compiling that package (import cycle)", path)
 	}
 
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index df5de63..e0b7bb9 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -6,11 +6,13 @@
 
 import (
 	"fmt"
+	"internal/buildcfg"
 	"internal/types/errors"
 	"regexp"
 	"sort"
 
 	"cmd/compile/internal/base"
+	"cmd/compile/internal/rangefunc"
 	"cmd/compile/internal/syntax"
 	"cmd/compile/internal/types2"
 	"cmd/internal/src"
@@ -27,8 +29,12 @@
 
 	// setup and syntax error reporting
 	files := make([]*syntax.File, len(noders))
+	// posBaseMap maps all file pos bases back to *syntax.File
+	// for checking Go version mismatched.
+	posBaseMap := make(map[*syntax.PosBase]*syntax.File)
 	for i, p := range noders {
 		files[i] = p.file
+		posBaseMap[p.file.Pos().Base()] = p.file
 	}
 
 	// typechecking
@@ -41,17 +47,8 @@
 		Context:            ctxt,
 		GoVersion:          base.Flag.Lang,
 		IgnoreBranchErrors: true, // parser already checked via syntax.CheckBranches mode
-		Error: func(err error) {
-			terr := err.(types2.Error)
-			msg := terr.Msg
-			// if we have a version error, hint at the -lang setting
-			if versionErrorRx.MatchString(msg) {
-				msg = fmt.Sprintf("%s (-lang was set to %s; check go.mod)", msg, base.Flag.Lang)
-			}
-			base.ErrorfAt(m.makeXPos(terr.Pos), terr.Code, "%s", msg)
-		},
-		Importer: &importer,
-		Sizes:    &gcSizes{},
+		Importer:           &importer,
+		Sizes:              types2.SizesFor("gc", buildcfg.GOARCH),
 	}
 	if base.Flag.ErrorURL {
 		conf.ErrorURL = " [go.dev/e/%s]"
@@ -64,30 +61,55 @@
 		Implicits:          make(map[syntax.Node]types2.Object),
 		Scopes:             make(map[syntax.Node]*types2.Scope),
 		Instances:          make(map[*syntax.Name]types2.Instance),
+		FileVersions:       make(map[*syntax.PosBase]string),
 		// expand as needed
 	}
+	conf.Error = func(err error) {
+		terr := err.(types2.Error)
+		msg := terr.Msg
+		if versionErrorRx.MatchString(msg) {
+			posBase := terr.Pos.Base()
+			for !posBase.IsFileBase() { // line directive base
+				posBase = posBase.Pos().Base()
+			}
+			fileVersion := info.FileVersions[posBase]
+			file := posBaseMap[posBase]
+			if file.GoVersion == fileVersion {
+				// If we have a version error caused by //go:build, report it.
+				msg = fmt.Sprintf("%s (file declares //go:build %s)", msg, fileVersion)
+			} else {
+				// Otherwise, hint at the -lang setting.
+				msg = fmt.Sprintf("%s (-lang was set to %s; check go.mod)", msg, base.Flag.Lang)
+			}
+		}
+		base.ErrorfAt(m.makeXPos(terr.Pos), terr.Code, "%s", msg)
+	}
 
 	pkg, err := conf.Check(base.Ctxt.Pkgpath, files, info)
+	base.ExitIfErrors()
+	if err != nil {
+		base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
+	}
 
 	// Check for anonymous interface cycles (#56103).
-	if base.Debug.InterfaceCycles == 0 {
-		var f cycleFinder
-		for _, file := range files {
-			syntax.Inspect(file, func(n syntax.Node) bool {
-				if n, ok := n.(*syntax.InterfaceType); ok {
-					if f.hasCycle(n.GetTypeInfo().Type.(*types2.Interface)) {
-						base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)")
+	// TODO(gri) move this code into the type checkers (types2 and go/types)
+	var f cycleFinder
+	for _, file := range files {
+		syntax.Inspect(file, func(n syntax.Node) bool {
+			if n, ok := n.(*syntax.InterfaceType); ok {
+				if f.hasCycle(types2.Unalias(n.GetTypeInfo().Type).(*types2.Interface)) {
+					base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)")
 
-						for typ := range f.cyclic {
-							f.cyclic[typ] = false // suppress duplicate errors
-						}
+					for typ := range f.cyclic {
+						f.cyclic[typ] = false // suppress duplicate errors
 					}
-					return false
 				}
-				return true
-			})
-		}
+				return false
+			}
+			return true
+		})
 	}
+	base.ExitIfErrors()
 
 	// Implementation restriction: we don't allow not-in-heap types to
 	// be used as type arguments (#54765).
@@ -113,11 +135,16 @@
 			base.ErrorfAt(targ.pos, 0, "cannot use incomplete (or unallocatable) type as a type argument: %v", targ.typ)
 		}
 	}
-
 	base.ExitIfErrors()
-	if err != nil {
-		base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
-	}
+
+	// Rewrite range over function to explicit function calls
+	// with the loop bodies converted into new implicit closures.
+	// We do this now, before serialization to unified IR, so that if the
+	// implicit closures are inlined, we will have the unified IR form.
+	// If we do the rewrite in the back end, like between typecheck and walk,
+	// then the new implicit closure will not have a unified IR inline body,
+	// and bodyReaderFor will fail.
+	rangefunc.Rewrite(pkg, info, files)
 
 	return pkg, info
 }
@@ -144,7 +171,7 @@
 // visit recursively walks typ0 to check any referenced interface types.
 func (f *cycleFinder) visit(typ0 types2.Type) bool {
 	for { // loop for tail recursion
-		switch typ := typ0.(type) {
+		switch typ := types2.Unalias(typ0).(type) {
 		default:
 			base.Fatalf("unexpected type: %T", typ)
 
diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go
index 0efe6b6..f5667f5 100644
--- a/src/cmd/compile/internal/noder/linker.go
+++ b/src/cmd/compile/internal/noder/linker.go
@@ -201,7 +201,7 @@
 
 		if obj.Op() == ir.OTYPE && !obj.Alias() {
 			if typ := obj.Type(); !typ.IsInterface() {
-				for _, method := range typ.Methods().Slice() {
+				for _, method := range typ.Methods() {
 					l.exportBody(method.Nname.(*ir.Name), local)
 				}
 			}
@@ -233,7 +233,7 @@
 	//
 	// TODO(mdempsky): Reimplement the reachable method crawling logic
 	// from typecheck/crawler.go.
-	exportBody := local || fn.Inl.Body != nil
+	exportBody := local || fn.Inl.HaveDcl
 	if !exportBody {
 		return
 	}
@@ -289,15 +289,16 @@
 	w.Uint64(uint64(name.Func.ABI))
 
 	// Escape analysis.
-	for _, fs := range &types.RecvsParams {
-		for _, f := range fs(name.Type()).FieldSlice() {
-			w.String(f.Note)
-		}
+	for _, f := range name.Type().RecvParams() {
+		w.String(f.Note)
 	}
 
 	if inl := name.Func.Inl; w.Bool(inl != nil) {
 		w.Len(int(inl.Cost))
 		w.Bool(inl.CanDelayResults)
+		if buildcfg.Experiment.NewInliner {
+			w.String(inl.Properties)
+		}
 	}
 
 	w.Sync(pkgbits.SyncEOF)
@@ -315,7 +316,7 @@
 	l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
 
 	if typ.Kind() != types.TINTER {
-		for _, method := range typ.Methods().Slice() {
+		for _, method := range typ.Methods() {
 			l.relocFuncExt(w, method.Nname.(*ir.Name))
 		}
 	}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index 9407158..1652dc6 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -265,8 +265,7 @@
 			// user didn't provide one.
 			target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1]
 		} else {
-			p.error(syntax.Error{Pos: pos, Msg: "//go:linkname requires linkname argument or -p compiler flag"})
-			break
+			panic("missing pkgpath")
 		}
 		p.linknames = append(p.linknames, linkname{pos, f[1], target})
 
diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go
index a22577f..dd9cec9 100644
--- a/src/cmd/compile/internal/noder/quirks.go
+++ b/src/cmd/compile/internal/noder/quirks.go
@@ -62,7 +62,7 @@
 			}
 
 		case *syntax.IndexExpr: // explicit type instantiation
-			targs := unpackListExpr(expr.Index)
+			targs := syntax.UnpackListExpr(expr.Index)
 			expr0 = targs[len(targs)-1]
 
 		default:
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 610d02c..2dddd20 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -5,6 +5,7 @@
 package noder
 
 import (
+	"encoding/hex"
 	"fmt"
 	"go/constant"
 	"internal/buildcfg"
@@ -13,15 +14,16 @@
 	"strings"
 
 	"cmd/compile/internal/base"
-	"cmd/compile/internal/deadcode"
 	"cmd/compile/internal/dwarfgen"
 	"cmd/compile/internal/inline"
+	"cmd/compile/internal/inline/interleaved"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/objw"
 	"cmd/compile/internal/reflectdata"
 	"cmd/compile/internal/staticinit"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/notsha256"
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
 	"cmd/internal/src"
@@ -107,6 +109,11 @@
 	locals      []*ir.Name
 	closureVars []*ir.Name
 
+	// funarghack is used during inlining to suppress setting
+	// Field.Nname to the inlined copies of the parameters. This is
+	// necessary because we reuse the same types.Type as the original
+	// function, and most of the compiler still relies on field.Nname to
+	// find parameters/results.
 	funarghack bool
 
 	// methodSym is the name of method's name, if reading a method.
@@ -145,14 +152,6 @@
 
 	// Label to return to.
 	retlabel *types.Sym
-
-	// inlvars is the list of variables that the inlinee's arguments are
-	// assigned to, one for each receiver and normal parameter, in order.
-	inlvars ir.Nodes
-
-	// retvars is the list of variables that the inlinee's results are
-	// assigned to, one for each result parameter, in order.
-	retvars ir.Nodes
 }
 
 // A readerDict represents an instantiated "compile-time dictionary,"
@@ -572,10 +571,7 @@
 	methods, embeddeds := fields[:nmethods], fields[nmethods:]
 
 	for i := range methods {
-		pos := r.pos()
-		_, sym := r.selector()
-		mtyp := r.signature(types.FakeRecv())
-		methods[i] = types.NewField(pos, sym, mtyp)
+		methods[i] = types.NewField(r.pos(), r.selector(), r.signature(types.FakeRecv()))
 	}
 	for i := range embeddeds {
 		embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ())
@@ -590,18 +586,12 @@
 func (r *reader) structType() *types.Type {
 	fields := make([]*types.Field, r.Len())
 	for i := range fields {
-		pos := r.pos()
-		_, sym := r.selector()
-		ftyp := r.typ()
-		tag := r.String()
-		embedded := r.Bool()
-
-		f := types.NewField(pos, sym, ftyp)
-		f.Note = tag
-		if embedded {
-			f.Embedded = 1
+		field := types.NewField(r.pos(), r.selector(), r.typ())
+		field.Note = r.String()
+		if r.Bool() {
+			field.Embedded = 1
 		}
-		fields[i] = f
+		fields[i] = field
 	}
 	return types.NewStruct(fields)
 }
@@ -620,21 +610,16 @@
 
 func (r *reader) params() []*types.Field {
 	r.Sync(pkgbits.SyncParams)
-	fields := make([]*types.Field, r.Len())
-	for i := range fields {
-		_, fields[i] = r.param()
+	params := make([]*types.Field, r.Len())
+	for i := range params {
+		params[i] = r.param()
 	}
-	return fields
+	return params
 }
 
-func (r *reader) param() (*types.Pkg, *types.Field) {
+func (r *reader) param() *types.Field {
 	r.Sync(pkgbits.SyncParam)
-
-	pos := r.pos()
-	pkg, sym := r.localIdent()
-	typ := r.typ()
-
-	return pkg, types.NewField(pos, sym, typ)
+	return types.NewField(r.pos(), r.localIdent(), r.typ())
 }
 
 // @@@ Objects
@@ -678,9 +663,24 @@
 }
 
 // objIdx returns the specified object, instantiated with the given
-// type arguments, if any. If shaped is true, then the shaped variant
-// of the object is returned instead.
+// type arguments, if any.
+// If shaped is true, then the shaped variant of the object is returned
+// instead.
 func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) ir.Node {
+	n, err := pr.objIdxMayFail(idx, implicits, explicits, shaped)
+	if err != nil {
+		base.Fatalf("%v", err)
+	}
+	return n
+}
+
+// objIdxMayFail is equivalent to objIdx, but returns an error rather than
+// failing the build if this object requires type arguments and the incorrect
+// number of type arguments were passed.
+//
+// Other sources of internal failure (such as duplicate definitions) still fail
+// the build.
+func (pr *pkgReader) objIdxMayFail(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (ir.Node, error) {
 	rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
 	_, sym := rname.qualifiedIdent()
 	tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
@@ -689,19 +689,25 @@
 		assert(!sym.IsBlank())
 		switch sym.Pkg {
 		case types.BuiltinPkg, types.UnsafePkg:
-			return sym.Def.(ir.Node)
+			return sym.Def.(ir.Node), nil
 		}
 		if pri, ok := objReader[sym]; ok {
-			return pri.pr.objIdx(pri.idx, nil, explicits, shaped)
+			return pri.pr.objIdxMayFail(pri.idx, nil, explicits, shaped)
+		}
+		if sym.Pkg.Path == "runtime" {
+			return typecheck.LookupRuntime(sym.Name), nil
 		}
 		base.Fatalf("unresolved stub: %v", sym)
 	}
 
-	dict := pr.objDictIdx(sym, idx, implicits, explicits, shaped)
+	dict, err := pr.objDictIdx(sym, idx, implicits, explicits, shaped)
+	if err != nil {
+		return nil, err
+	}
 
 	sym = dict.baseSym
 	if !sym.IsBlank() && sym.Def != nil {
-		return sym.Def.(*ir.Name)
+		return sym.Def.(*ir.Name), nil
 	}
 
 	r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
@@ -737,7 +743,7 @@
 		name := do(ir.OTYPE, false)
 		setType(name, r.typ())
 		name.SetAlias(true)
-		return name
+		return name, nil
 
 	case pkgbits.ObjConst:
 		name := do(ir.OLITERAL, false)
@@ -745,17 +751,28 @@
 		val := FixValue(typ, r.Value())
 		setType(name, typ)
 		setValue(name, val)
-		return name
+		return name, nil
 
 	case pkgbits.ObjFunc:
 		if sym.Name == "init" {
 			sym = Renameinit()
 		}
-		name := do(ir.ONAME, true)
-		setType(name, r.signature(nil))
 
-		name.Func = ir.NewFunc(r.pos())
-		name.Func.Nname = name
+		npos := r.pos()
+		setBasePos(npos)
+		r.typeParamNames()
+		typ := r.signature(nil)
+		fpos := r.pos()
+
+		fn := ir.NewFunc(fpos, npos, sym, typ)
+		name := fn.Nname
+		if !sym.IsBlank() {
+			if sym.Def != nil {
+				base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+			}
+			assert(sym.Def == nil)
+			sym.Def = name
+		}
 
 		if r.hasTypeParams() {
 			name.Func.SetDupok(true)
@@ -769,7 +786,7 @@
 		}
 
 		rext.funcExt(name, nil)
-		return name
+		return name, nil
 
 	case pkgbits.ObjType:
 		name := do(ir.OTYPE, true)
@@ -799,20 +816,20 @@
 			methods[i] = r.method(rext)
 		}
 		if len(methods) != 0 {
-			typ.Methods().Set(methods)
+			typ.SetMethods(methods)
 		}
 
 		if !r.dict.shaped {
 			r.needWrapper(typ)
 		}
 
-		return name
+		return name, nil
 
 	case pkgbits.ObjVar:
 		name := do(ir.ONAME, false)
 		setType(name, r.typ())
 		rext.varExt(name)
-		return name
+		return name, nil
 	}
 }
 
@@ -886,7 +903,16 @@
 		under = types.NewPtr(types.Types[types.TUINT8])
 	}
 
-	sym := types.ShapePkg.Lookup(under.LinkString())
+	// Hash long type names to bound symbol name length seen by users,
+	// particularly for large protobuf structs (#65030).
+	uls := under.LinkString()
+	if base.Debug.MaxShapeLen != 0 &&
+		len(uls) > base.Debug.MaxShapeLen {
+		h := notsha256.Sum256([]byte(uls))
+		uls = hex.EncodeToString(h[:])
+	}
+
+	sym := types.ShapePkg.Lookup(uls)
 	if sym.Def == nil {
 		name := ir.NewDeclNameAt(under.Pos(), ir.OTYPE, sym)
 		typ := types.NewNamed(name)
@@ -900,7 +926,7 @@
 }
 
 // objDictIdx reads and returns the specified object dictionary.
-func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) *readerDict {
+func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (*readerDict, error) {
 	r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
 
 	dict := readerDict{
@@ -911,7 +937,7 @@
 	nexplicits := r.Len()
 
 	if nimplicits > len(implicits) || nexplicits != len(explicits) {
-		base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
+		return nil, fmt.Errorf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
 	}
 
 	dict.targs = append(implicits[:nimplicits:nimplicits], explicits...)
@@ -956,7 +982,7 @@
 	dict.typeParamMethodExprs = make([]readerMethodExprInfo, r.Len())
 	for i := range dict.typeParamMethodExprs {
 		typeParamIdx := r.Len()
-		_, method := r.selector()
+		method := r.selector()
 
 		dict.typeParamMethodExprs[i] = readerMethodExprInfo{typeParamIdx, method}
 	}
@@ -976,7 +1002,7 @@
 		dict.itabs[i] = itabInfo{typ: r.typInfo(), iface: r.typInfo()}
 	}
 
-	return &dict
+	return &dict, nil
 }
 
 func (r *reader) typeParamNames() {
@@ -990,17 +1016,15 @@
 
 func (r *reader) method(rext *reader) *types.Field {
 	r.Sync(pkgbits.SyncMethod)
-	pos := r.pos()
-	_, sym := r.selector()
+	npos := r.pos()
+	sym := r.selector()
 	r.typeParamNames()
-	_, recv := r.param()
+	recv := r.param()
 	typ := r.signature(recv)
 
-	name := ir.NewNameAt(pos, ir.MethodSym(recv.Type, sym))
-	setType(name, typ)
-
-	name.Func = ir.NewFunc(r.pos())
-	name.Func.Nname = name
+	fpos := r.pos()
+	fn := ir.NewFunc(fpos, npos, ir.MethodSym(recv.Type, sym), typ)
+	name := fn.Nname
 
 	if r.hasTypeParams() {
 		name.Func.SetDupok(true)
@@ -1028,25 +1052,23 @@
 	return
 }
 
-func (r *reader) localIdent() (pkg *types.Pkg, sym *types.Sym) {
+func (r *reader) localIdent() *types.Sym {
 	r.Sync(pkgbits.SyncLocalIdent)
-	pkg = r.pkg()
+	pkg := r.pkg()
 	if name := r.String(); name != "" {
-		sym = pkg.Lookup(name)
+		return pkg.Lookup(name)
 	}
-	return
+	return nil
 }
 
-func (r *reader) selector() (origPkg *types.Pkg, sym *types.Sym) {
+func (r *reader) selector() *types.Sym {
 	r.Sync(pkgbits.SyncSelector)
-	origPkg = r.pkg()
+	pkg := r.pkg()
 	name := r.String()
-	pkg := origPkg
 	if types.IsExported(name) {
 		pkg = types.LocalPkg
 	}
-	sym = pkg.Lookup(name)
-	return
+	return pkg.Lookup(name)
 }
 
 func (r *reader) hasTypeParams() bool {
@@ -1062,9 +1084,6 @@
 func (r *reader) funcExt(name *ir.Name, method *types.Sym) {
 	r.Sync(pkgbits.SyncFuncExt)
 
-	name.Class = 0 // so MarkFunc doesn't complain
-	ir.MarkFunc(name)
-
 	fn := name.Func
 
 	// XXX: Workaround because linker doesn't know how to copy Pos.
@@ -1098,18 +1117,14 @@
 		}
 	}
 
-	typecheck.Func(fn)
-
 	if r.Bool() {
 		assert(name.Defn == nil)
 
 		fn.ABI = obj.ABI(r.Uint64())
 
 		// Escape analysis.
-		for _, fs := range &types.RecvsParams {
-			for _, f := range fs(name.Type()).FieldSlice() {
-				f.Note = r.String()
-			}
+		for _, f := range name.Type().RecvParams() {
+			f.Note = r.String()
 		}
 
 		if r.Bool() {
@@ -1117,6 +1132,9 @@
 				Cost:            int32(r.Len()),
 				CanDelayResults: r.Bool(),
 			}
+			if buildcfg.Experiment.NewInliner {
+				fn.Inl.Properties = r.String()
+			}
 		}
 	} else {
 		r.addBody(name.Func, method)
@@ -1232,7 +1250,7 @@
 	}
 
 	ir.WithFunc(fn, func() {
-		r.funcargs(fn)
+		r.declareParams()
 
 		if r.syntheticBody(fn.Pos()) {
 			return
@@ -1289,7 +1307,7 @@
 		shapedFn = shapedMethodExpr(pos, shapedObj, r.methodSym)
 	}
 
-	recvs, params := r.syntheticArgs(pos)
+	params := r.syntheticArgs()
 
 	// Construct the arguments list: receiver (if any), then runtime
 	// dictionary, and finally normal parameters.
@@ -1301,7 +1319,10 @@
 	// putting the dictionary parameter after that is the least invasive
 	// solution at the moment.
 	var args ir.Nodes
-	args.Append(recvs...)
+	if r.methodSym != nil {
+		args.Append(params[0])
+		params = params[1:]
+	}
 	args.Append(typecheck.Expr(ir.NewAddrExpr(pos, r.p.dictNameOf(r.dict))))
 	args.Append(params...)
 
@@ -1310,51 +1331,9 @@
 
 // syntheticArgs returns the recvs and params arguments passed to the
 // current function.
-func (r *reader) syntheticArgs(pos src.XPos) (recvs, params ir.Nodes) {
+func (r *reader) syntheticArgs() ir.Nodes {
 	sig := r.curfn.Nname.Type()
-
-	inlVarIdx := 0
-	addParams := func(out *ir.Nodes, params []*types.Field) {
-		for _, param := range params {
-			var arg ir.Node
-			if param.Nname != nil {
-				name := param.Nname.(*ir.Name)
-				if !ir.IsBlank(name) {
-					if r.inlCall != nil {
-						// During inlining, we want the respective inlvar where we
-						// assigned the callee's arguments.
-						arg = r.inlvars[inlVarIdx]
-					} else {
-						// Otherwise, we can use the parameter itself directly.
-						base.AssertfAt(name.Curfn == r.curfn, name.Pos(), "%v has curfn %v, but want %v", name, name.Curfn, r.curfn)
-						arg = name
-					}
-				}
-			}
-
-			// For anonymous and blank parameters, we don't have an *ir.Name
-			// to use as the argument. However, since we know the shaped
-			// function won't use the value either, we can just pass the
-			// zero value. (Also unfortunately, we don't have an easy
-			// zero-value IR node; so we use a default-initialized temporary
-			// variable.)
-			if arg == nil {
-				tmp := typecheck.TempAt(pos, r.curfn, param.Type)
-				r.curfn.Body.Append(
-					typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)),
-					typecheck.Stmt(ir.NewAssignStmt(pos, tmp, nil)),
-				)
-				arg = tmp
-			}
-
-			out.Append(arg)
-			inlVarIdx++
-		}
-	}
-
-	addParams(&recvs, sig.Recvs().FieldSlice())
-	addParams(&params, sig.Params().FieldSlice())
-	return
+	return ir.ToNodes(r.curfn.Dcl[:sig.NumRecvs()+sig.NumParams()])
 }
 
 // syntheticTailCall emits a tail call to fn, passing the given
@@ -1387,7 +1366,7 @@
 		return sym.Def.(*ir.Name)
 	}
 
-	name := ir.NewNameAt(pos, sym)
+	name := ir.NewNameAt(pos, sym, dict.varType())
 	name.Class = ir.PEXTERN
 	sym.Def = name // break cycles with mutual subdictionaries
 
@@ -1401,8 +1380,7 @@
 	assertOffset("type param method exprs", dict.typeParamMethodExprsOffset())
 	for _, info := range dict.typeParamMethodExprs {
 		typeParam := dict.targs[info.typeParamIdx]
-		method := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(typeParam), info.method)).(*ir.SelectorExpr)
-		assert(method.Op() == ir.OMETHEXPR)
+		method := typecheck.NewMethodExpr(pos, typeParam, info.method)
 
 		rsym := method.FuncName().Linksym()
 		assert(rsym.ABI() == obj.ABIInternal) // must be ABIInternal; see ir.OCFUNC in ssagen/ssa.go
@@ -1454,9 +1432,6 @@
 
 	objw.Global(lsym, int32(ot), obj.DUPOK|obj.RODATA)
 
-	name.SetType(dict.varType())
-	name.SetTypecheck(1)
-
 	return name
 }
 
@@ -1495,104 +1470,32 @@
 	return types.NewArray(types.Types[types.TUINTPTR], dict.numWords())
 }
 
-func (r *reader) funcargs(fn *ir.Func) {
-	sig := fn.Nname.Type()
+func (r *reader) declareParams() {
+	r.curfn.DeclareParams(!r.funarghack)
 
-	if recv := sig.Recv(); recv != nil {
-		r.funcarg(recv, recv.Sym, ir.PPARAM)
-	}
-	for _, param := range sig.Params().FieldSlice() {
-		r.funcarg(param, param.Sym, ir.PPARAM)
-	}
-
-	for i, param := range sig.Results().FieldSlice() {
-		sym := types.OrigSym(param.Sym)
-
-		if sym == nil || sym.IsBlank() {
-			prefix := "~r"
-			if r.inlCall != nil {
-				prefix = "~R"
-			} else if sym != nil {
-				prefix = "~b"
-			}
-			sym = typecheck.LookupNum(prefix, i)
+	for _, name := range r.curfn.Dcl {
+		if name.Sym().Name == dictParamName {
+			r.dictParam = name
+			continue
 		}
 
-		r.funcarg(param, sym, ir.PPARAMOUT)
+		r.addLocal(name)
 	}
 }
 
-func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) {
-	if sym == nil {
-		assert(ctxt == ir.PPARAM)
-		if r.inlCall != nil {
-			r.inlvars.Append(ir.BlankNode)
-		}
-		return
-	}
-
-	name := ir.NewNameAt(r.inlPos(param.Pos), sym)
-	setType(name, param.Type)
-	r.addLocal(name, ctxt)
-
-	if r.inlCall == nil {
-		if !r.funarghack {
-			param.Sym = sym
-			param.Nname = name
-		}
-	} else {
-		if ctxt == ir.PPARAMOUT {
-			r.retvars.Append(name)
-		} else {
-			r.inlvars.Append(name)
-		}
-	}
-}
-
-func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
-	assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT)
-
-	if name.Sym().Name == dictParamName {
-		r.dictParam = name
-	} else {
-		if r.synthetic == nil {
-			r.Sync(pkgbits.SyncAddLocal)
-			if r.p.SyncMarkers() {
-				want := r.Int()
-				if have := len(r.locals); have != want {
-					base.FatalfAt(name.Pos(), "locals table has desynced")
-				}
+func (r *reader) addLocal(name *ir.Name) {
+	if r.synthetic == nil {
+		r.Sync(pkgbits.SyncAddLocal)
+		if r.p.SyncMarkers() {
+			want := r.Int()
+			if have := len(r.locals); have != want {
+				base.FatalfAt(name.Pos(), "locals table has desynced")
 			}
-			r.varDictIndex(name)
 		}
-
-		r.locals = append(r.locals, name)
+		r.varDictIndex(name)
 	}
 
-	name.SetUsed(true)
-
-	// TODO(mdempsky): Move earlier.
-	if ir.IsBlank(name) {
-		return
-	}
-
-	if r.inlCall != nil {
-		if ctxt == ir.PAUTO {
-			name.SetInlLocal(true)
-		} else {
-			name.SetInlFormal(true)
-			ctxt = ir.PAUTO
-		}
-	}
-
-	name.Class = ctxt
-	name.Curfn = r.curfn
-
-	r.curfn.Dcl = append(r.curfn.Dcl, name)
-
-	if ctxt == ir.PAUTO {
-		name.SetFrameOffset(0)
-	}
+	r.locals = append(r.locals, name)
 }
 
 func (r *reader) useLocal() *ir.Name {
@@ -1678,7 +1581,11 @@
 // @@@ Statements
 
 func (r *reader) stmt() ir.Node {
-	switch stmts := r.stmts(); len(stmts) {
+	return block(r.stmts())
+}
+
+func block(stmts []ir.Node) ir.Node {
+	switch len(stmts) {
 	case 0:
 		return nil
 	case 1:
@@ -1688,7 +1595,7 @@
 	}
 }
 
-func (r *reader) stmts() []ir.Node {
+func (r *reader) stmts() ir.Nodes {
 	assert(ir.CurFunc == r.curfn)
 	var res ir.Nodes
 
@@ -1753,7 +1660,7 @@
 		op := r.op()
 		lhs := r.expr()
 		pos := r.pos()
-		n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewBasicLit(pos, one))
+		n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewOne(pos, lhs.Type()))
 		n.IncDec = true
 		return n
 
@@ -1771,7 +1678,14 @@
 		pos := r.pos()
 		op := r.op()
 		call := r.expr()
-		return ir.NewGoDeferStmt(pos, op, call)
+		stmt := ir.NewGoDeferStmt(pos, op, call)
+		if op == ir.ODEFER {
+			x := r.optExpr()
+			if x != nil {
+				stmt.DeferAt = x.(ir.Expr)
+			}
+		}
+		return stmt
 
 	case stmtExpr:
 		return r.expr()
@@ -1833,13 +1747,9 @@
 
 	case assignDef:
 		pos := r.pos()
-		setBasePos(pos)
-		_, sym := r.localIdent()
-		typ := r.typ()
-
-		name := ir.NewNameAt(pos, sym)
-		setType(name, typ)
-		r.addLocal(name, ir.PAUTO)
+		setBasePos(pos) // test/fixedbugs/issue49767.go depends on base.Pos being set for the r.typ() call here, ugh
+		name := r.curfn.NewLocal(pos, r.localIdent(), r.typ())
+		r.addLocal(name)
 		return name, true
 
 	case assignExpr:
@@ -1897,10 +1807,14 @@
 	cond := r.optExpr()
 	post := r.stmt()
 	body := r.blockStmt()
-	dv := r.Bool()
+	perLoopVars := r.Bool()
 	r.closeAnotherScope()
 
-	stmt := ir.NewForStmt(pos, init, cond, post, body, dv)
+	if ir.IsConst(cond, constant.Bool) && !ir.BoolVal(cond) {
+		return init // simplify "for init; false; post { ... }" into "init"
+	}
+
+	stmt := ir.NewForStmt(pos, init, cond, post, body, perLoopVars)
 	stmt.Label = label
 	return stmt
 }
@@ -1911,11 +1825,33 @@
 	pos := r.pos()
 	init := r.stmts()
 	cond := r.expr()
-	then := r.blockStmt()
-	els := r.stmts()
+	staticCond := r.Int()
+	var then, els []ir.Node
+	if staticCond >= 0 {
+		then = r.blockStmt()
+	} else {
+		r.lastCloseScopePos = r.pos()
+	}
+	if staticCond <= 0 {
+		els = r.stmts()
+	}
+	r.closeAnotherScope()
+
+	if staticCond != 0 {
+		// We may have removed a dead return statement, which can trip up
+		// later passes (#62211). To avoid confusion, we instead flatten
+		// the if statement into a block.
+
+		if cond.Op() != ir.OLITERAL {
+			init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, ir.BlankNode, cond))) // for side effects
+		}
+		init.Append(then...)
+		init.Append(els...)
+		return block(init)
+	}
+
 	n := ir.NewIfStmt(pos, cond, then, els)
 	n.SetInit(init)
-	r.closeAnotherScope()
 	return n
 }
 
@@ -1995,9 +1931,7 @@
 	if r.Bool() {
 		pos := r.pos()
 		if r.Bool() {
-			pos := r.pos()
-			_, sym := r.localIdent()
-			ident = ir.NewIdent(pos, sym)
+			ident = ir.NewIdent(r.pos(), r.localIdent())
 		}
 		x := r.expr()
 		iface = x.Type()
@@ -2053,12 +1987,8 @@
 		clause.RTypes = rtypes
 
 		if ident != nil {
-			pos := r.pos()
-			typ := r.typ()
-
-			name := ir.NewNameAt(pos, ident.Sym())
-			setType(name, typ)
-			r.addLocal(name, ir.PAUTO)
+			name := r.curfn.NewLocal(r.pos(), ident.Sym(), r.typ())
+			r.addLocal(name)
 			clause.Var = name
 			name.Defn = tag
 		}
@@ -2147,14 +2077,12 @@
 		pos := r.pos()
 		typ := r.typ()
 		val := FixValue(typ, r.Value())
-		op := r.op()
-		orig := r.String()
-		return typecheck.Expr(OrigConst(pos, typ, val, op, orig))
+		return ir.NewBasicLit(pos, typ, val)
 
-	case exprNil:
+	case exprZero:
 		pos := r.pos()
 		typ := r.typ()
-		return Nil(pos, typ)
+		return ir.NewZero(pos, typ)
 
 	case exprCompLit:
 		return r.compLit()
@@ -2165,9 +2093,9 @@
 	case exprFieldVal:
 		x := r.expr()
 		pos := r.pos()
-		_, sym := r.selector()
+		sym := r.selector()
 
-		return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr)
+		return typecheck.XDotField(pos, x, sym)
 
 	case exprMethodVal:
 		recv := r.expr()
@@ -2195,14 +2123,14 @@
 			// interface method values).
 			//
 			if recv.Type().HasShape() {
-				typ := wrapperFn.Type().Params().Field(0).Type
+				typ := wrapperFn.Type().Param(0).Type
 				if !types.Identical(typ, recv.Type()) {
 					base.FatalfAt(wrapperFn.Pos(), "receiver %L does not match %L", recv, wrapperFn)
 				}
 				recv = typecheck.Expr(ir.NewConvExpr(recv.Pos(), ir.OCONVNOP, typ, recv))
 			}
 
-			n := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, recv, wrapperFn.Sel)).(*ir.SelectorExpr)
+			n := typecheck.XDotMethod(pos, recv, wrapperFn.Sel, false)
 
 			// As a consistency check here, we make sure "n" selected the
 			// same method (represented by a types.Field) that wrapperFn
@@ -2257,7 +2185,7 @@
 		// rather than types.Identical, because the latter can be confused
 		// by tricky promoted methods (e.g., typeparam/mdempsky/21.go).
 		if wrapperFn != nil && len(implicits) == 0 && !deref && !addr {
-			if !types.Identical(recv, wrapperFn.Type().Params().Field(0).Type) {
+			if !types.Identical(recv, wrapperFn.Type().Param(0).Type) {
 				base.FatalfAt(pos, "want receiver type %v, but have method %L", recv, wrapperFn)
 			}
 			return wrapperFn
@@ -2267,7 +2195,7 @@
 		// expression (OMETHEXPR) and the receiver type is unshaped, then
 		// we can rely on a statically generated wrapper being available.
 		if method, ok := wrapperFn.(*ir.SelectorExpr); ok && method.Op() == ir.OMETHEXPR && !recv.HasShape() {
-			return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), method.Sel)).(*ir.SelectorExpr)
+			return typecheck.NewMethodExpr(pos, recv, method.Sel)
 		}
 
 		return r.methodExprWrap(origPos, recv, implicits, deref, addr, baseFn, dictPtr)
@@ -2334,6 +2262,13 @@
 		switch op {
 		case ir.OANDAND, ir.OOROR:
 			return typecheck.Expr(ir.NewLogicalExpr(pos, op, x, y))
+		case ir.OLSH, ir.ORSH:
+			// Untyped rhs of non-constant shift, e.g. x << 1.0.
+			// If we have a constant value, it must be an int >= 0.
+			if ir.IsConstNode(y) {
+				val := constant.ToInt(y.Val())
+				assert(val.Kind() == constant.Int && constant.Sign(val) >= 0)
+			}
 		}
 		return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y))
 
@@ -2341,7 +2276,7 @@
 		x := r.expr()
 		pos := r.pos()
 		for i, n := 0, r.Len(); i < n; i++ {
-			x = Implicit(DotField(pos, x, r.Len()))
+			x = Implicit(typecheck.DotField(pos, x, r.Len()))
 		}
 		if r.Bool() { // needs deref
 			x = Implicit(Deref(pos, x.Type().Elem(), x))
@@ -2368,7 +2303,7 @@
 				// There are also corner cases where semantically it's perhaps
 				// significant; e.g., fixedbugs/issue15975.go, #38634, #52025.
 
-				fun = typecheck.Callee(ir.NewSelectorExpr(method.Pos(), ir.OXDOT, recv, method.Sel))
+				fun = typecheck.XDotMethod(method.Pos(), recv, method.Sel, true)
 			} else {
 				if recv.Type().IsInterface() {
 					// N.B., this happens currently for typeparam/issue51521.go
@@ -2432,6 +2367,26 @@
 		typ := r.exprType()
 		return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ))
 
+	case exprSizeof:
+		return ir.NewUintptr(r.pos(), r.typ().Size())
+
+	case exprAlignof:
+		return ir.NewUintptr(r.pos(), r.typ().Alignment())
+
+	case exprOffsetof:
+		pos := r.pos()
+		typ := r.typ()
+		types.CalcSize(typ)
+
+		var offset int64
+		for i := r.Len(); i >= 0; i-- {
+			field := typ.Field(r.Len())
+			offset += field.Offset
+			typ = field.Type
+		}
+
+		return ir.NewUintptr(pos, offset)
+
 	case exprReshape:
 		typ := r.typ()
 		x := r.expr()
@@ -2516,6 +2471,10 @@
 			n.SetTypecheck(1)
 		}
 		return n
+
+	case exprRuntimeBuiltin:
+		builtin := typecheck.LookupRuntime(r.String())
+		return builtin
 	}
 }
 
@@ -2557,7 +2516,7 @@
 
 		// TODO(mdempsky): Is there a more robust way to get the
 		// dictionary pointer type here?
-		dictPtrType := baseFn.Type().Params().Field(0).Type
+		dictPtrType := baseFn.Type().Param(0).Type
 		dictPtr = typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx)))
 
 		return
@@ -2588,7 +2547,10 @@
 		base.Fatalf("unresolved stub: %v", sym)
 	}
 
-	dict := pr.objDictIdx(sym, idx, implicits, explicits, false)
+	dict, err := pr.objDictIdx(sym, idx, implicits, explicits, false)
+	if err != nil {
+		base.Fatalf("%v", err)
+	}
 
 	return pr.dictNameOf(dict)
 }
@@ -2612,14 +2574,11 @@
 	typ := types.NewSignature(nil, params, results)
 
 	addBody := func(pos src.XPos, r *reader, captured []ir.Node) {
-		recvs, params := r.syntheticArgs(pos)
-		assert(len(recvs) == 0)
-
 		fun := captured[0]
 
 		var args ir.Nodes
 		args.Append(captured[1:]...)
-		args.Append(params...)
+		args.Append(r.syntheticArgs()...)
 
 		r.syntheticTailCall(pos, fun, args)
 	}
@@ -2650,16 +2609,14 @@
 	typ := types.NewSignature(nil, params, results)
 
 	addBody := func(pos src.XPos, r *reader, captured []ir.Node) {
-		recvs, args := r.syntheticArgs(pos)
-		assert(len(recvs) == 0)
-
 		fn := captured[0]
+		args := r.syntheticArgs()
 
 		// Rewrite first argument based on implicits/deref/addr.
 		{
 			arg := args[0]
 			for _, ix := range implicits {
-				arg = Implicit(DotField(pos, arg, ix))
+				arg = Implicit(typecheck.DotField(pos, arg, ix))
 			}
 			if deref {
 				arg = Implicit(Deref(pos, arg.Type().Elem(), arg))
@@ -2715,20 +2672,11 @@
 		return false
 	}
 
-	// The ODCLFUNC and its body need to use the original position, but
-	// the OCLOSURE node and any Init statements should use the inlined
-	// position instead. See also the explanation in reader.funcLit.
-	inlPos := r.inlPos(origPos)
-
-	fn := ir.NewClosureFunc(origPos, r.curfn != nil)
+	fn := r.inlClosureFunc(origPos, typ)
 	fn.SetWrapper(true)
-	clo := fn.OClosure
-	clo.SetPos(inlPos)
-	ir.NameClosure(clo, r.curfn)
 
-	setType(fn.Nname, typ)
-	typecheck.Func(fn)
-	setType(clo, fn.Type())
+	clo := fn.OClosure
+	inlPos := clo.Pos()
 
 	var init ir.Nodes
 	for i, n := range captures {
@@ -2765,8 +2713,7 @@
 	bodyReader[fn] = pri
 	pri.funcBody(fn)
 
-	// TODO(mdempsky): Remove hard-coding of typecheck.Target.
-	return ir.InitExpr(init, ir.UseClosure(clo, typecheck.Target))
+	return ir.InitExpr(init, clo)
 }
 
 // syntheticSig duplicates and returns the params and results lists
@@ -2776,23 +2723,19 @@
 	clone := func(params []*types.Field) []*types.Field {
 		res := make([]*types.Field, len(params))
 		for i, param := range params {
-			sym := param.Sym
-			if sym == nil || sym.Name == "_" {
-				sym = typecheck.LookupNum(".anon", i)
-			}
 			// TODO(mdempsky): It would be nice to preserve the original
 			// parameter positions here instead, but at least
 			// typecheck.NewMethodType replaces them with base.Pos, making
 			// them useless. Worse, the positions copied from base.Pos may
 			// have inlining contexts, which we definitely don't want here
 			// (e.g., #54625).
-			res[i] = types.NewField(base.AutogeneratedPos, sym, param.Type)
+			res[i] = types.NewField(base.AutogeneratedPos, param.Sym, param.Type)
 			res[i].SetIsDDD(param.IsDDD())
 		}
 		return res
 	}
 
-	return clone(sig.Params().FieldSlice()), clone(sig.Results().FieldSlice())
+	return clone(sig.Params()), clone(sig.Results())
 }
 
 func (r *reader) optExpr() ir.Node {
@@ -2829,7 +2772,7 @@
 	recv := r.typ()
 	sig0 := r.typ()
 	pos := r.pos()
-	_, sym := r.selector()
+	sym := r.selector()
 
 	// Signature type to return (i.e., recv prepended to the method's
 	// normal parameters list).
@@ -2870,7 +2813,7 @@
 
 		// TODO(mdempsky): Is there a more robust way to get the
 		// dictionary pointer type here?
-		dictPtrType := shapedFn.Type().Params().Field(1).Type
+		dictPtrType := shapedFn.Type().Param(1).Type
 		dictPtr := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx)))
 
 		return nil, shapedFn, dictPtr
@@ -2887,14 +2830,14 @@
 		dictPtr := typecheck.Expr(ir.NewAddrExpr(pos, dict))
 
 		// Check that dictPtr matches shapedFn's dictionary parameter.
-		if !types.Identical(dictPtr.Type(), shapedFn.Type().Params().Field(1).Type) {
+		if !types.Identical(dictPtr.Type(), shapedFn.Type().Param(1).Type) {
 			base.FatalfAt(pos, "dict %L, but shaped method %L", dict, shapedFn)
 		}
 
 		// For statically known instantiations, we can take advantage of
 		// the stenciled wrapper.
 		base.AssertfAt(!recv.HasShape(), pos, "shaped receiver %v", recv)
-		wrapperFn := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), sym)).(*ir.SelectorExpr)
+		wrapperFn := typecheck.NewMethodExpr(pos, recv, sym)
 		base.AssertfAt(types.Identical(sig, wrapperFn.Type()), pos, "wrapper %L does not have type %v", wrapperFn, sig)
 
 		return wrapperFn, shapedFn, dictPtr
@@ -2902,7 +2845,7 @@
 
 	// Simple method expression; no dictionary needed.
 	base.AssertfAt(!recv.HasShape() || recv.IsInterface(), pos, "shaped receiver %v", recv)
-	fn := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), sym)).(*ir.SelectorExpr)
+	fn := typecheck.NewMethodExpr(pos, recv, sym)
 	return fn, fn, nil
 }
 
@@ -2915,7 +2858,7 @@
 	assert(typ.HasShape())
 
 	method := func() *types.Field {
-		for _, method := range typ.Methods().Slice() {
+		for _, method := range typ.Methods() {
 			if method.Sym == sym {
 				return method
 			}
@@ -2927,7 +2870,7 @@
 
 	// Construct an OMETHEXPR node.
 	recv := method.Type.Recv().Type
-	return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, ir.TypeNode(recv), sym)).(*ir.SelectorExpr)
+	return typecheck.NewMethodExpr(pos, recv, sym)
 }
 
 func (r *reader) multiExpr() []ir.Node {
@@ -2973,45 +2916,12 @@
 
 // temp returns a new autotemp of the specified type.
 func (r *reader) temp(pos src.XPos, typ *types.Type) *ir.Name {
-	// See typecheck.typecheckargs.
-	curfn := r.curfn
-	if curfn == nil {
-		curfn = typecheck.InitTodoFunc
-	}
-
-	return typecheck.TempAt(pos, curfn, typ)
+	return typecheck.TempAt(pos, r.curfn, typ)
 }
 
 // tempCopy declares and returns a new autotemp initialized to the
 // value of expr.
 func (r *reader) tempCopy(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
-	if r.curfn == nil {
-		// Escape analysis doesn't know how to handle package-scope
-		// function literals with free variables (i.e., that capture
-		// temporary variables added to typecheck.InitTodoFunc).
-		//
-		// stencil.go works around this limitation by spilling values to
-		// global variables instead, but that causes the value to stay
-		// alive indefinitely; see go.dev/issue/54343.
-		//
-		// This code path (which implements the same workaround) isn't
-		// actually needed by unified IR, because it creates uses normal
-		// OMETHEXPR/OMETHVALUE nodes when statically-known instantiated
-		// types are used. But it's kept around for now because it's handy
-		// for testing that the generic fallback paths work correctly.
-		base.Fatalf("tempCopy called at package scope")
-
-		tmp := staticinit.StaticName(expr.Type())
-
-		assign := ir.NewAssignStmt(pos, tmp, expr)
-		assign.Def = true
-		tmp.Defn = assign
-
-		typecheck.Target.Decls = append(typecheck.Target.Decls, typecheck.Stmt(assign))
-
-		return tmp
-	}
-
 	tmp := r.temp(pos, expr.Type())
 
 	init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
@@ -3073,7 +2983,7 @@
 func wrapName(pos src.XPos, x ir.Node) ir.Node {
 	// These nodes do not carry line numbers.
 	// Introduce a wrapper node to give them the correct line.
-	switch ir.Orig(x).Op() {
+	switch x.Op() {
 	case ir.OTYPE, ir.OLITERAL:
 		if x.Sym() == nil {
 			break
@@ -3107,21 +3017,16 @@
 	// OCLOSURE node, because that position represents where any heap
 	// allocation of the closure is credited (#49171).
 	r.suppressInlPos++
-	pos := r.pos()
-	xtype2 := r.signature(nil)
+	origPos := r.pos()
+	sig := r.signature(nil)
 	r.suppressInlPos--
 
-	fn := ir.NewClosureFunc(pos, r.curfn != nil)
-	clo := fn.OClosure
-	clo.SetPos(r.inlPos(pos)) // see comment above
-	ir.NameClosure(clo, r.curfn)
-
-	setType(fn.Nname, xtype2)
-	typecheck.Func(fn)
-	setType(clo, fn.Type())
+	fn := r.inlClosureFunc(origPos, sig)
 
 	fn.ClosureVars = make([]*ir.Name, 0, r.Len())
 	for len(fn.ClosureVars) < cap(fn.ClosureVars) {
+		// TODO(mdempsky): I think these should be original positions too
+		// (i.e., not inline-adjusted).
 		ir.NewClosureVar(r.pos(), fn, r.useLocal())
 	}
 	if param := r.dictParam; param != nil {
@@ -3132,8 +3037,24 @@
 
 	r.addBody(fn, nil)
 
+	// un-hide closures belong to init function.
+	if (r.curfn.IsPackageInit() || strings.HasPrefix(r.curfn.Sym().Name, "init.")) && ir.IsTrivialClosure(fn.OClosure) {
+		fn.SetIsHiddenClosure(false)
+	}
+
+	return fn.OClosure
+}
+
+// inlClosureFunc constructs a new closure function, but correctly
+// handles inlining.
+func (r *reader) inlClosureFunc(origPos src.XPos, sig *types.Type) *ir.Func {
+	curfn := r.inlCaller
+	if curfn == nil {
+		curfn = r.curfn
+	}
+
 	// TODO(mdempsky): Remove hard-coding of typecheck.Target.
-	return ir.UseClosure(clo, typecheck.Target)
+	return ir.NewClosureFunc(origPos, r.inlPos(origPos), ir.OCLOSURE, sig, curfn, typecheck.Target)
 }
 
 func (r *reader) exprList() []ir.Node {
@@ -3157,7 +3078,7 @@
 // uintptr-typed word from the dictionary parameter.
 func (r *reader) dictWord(pos src.XPos, idx int) ir.Node {
 	base.AssertfAt(r.dictParam != nil, pos, "expected dictParam in %v", r.curfn)
-	return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewBasicLit(pos, constant.MakeInt64(int64(idx)))))
+	return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewInt(pos, int64(idx))))
 }
 
 // rttiWord is like dictWord, but converts it to *byte (the type used
@@ -3277,10 +3198,7 @@
 		typ, rtype = r.rtype0(pos)
 
 		if !r.Bool() { // not derived
-			// TODO(mdempsky): ir.TypeNode should probably return a typecheck'd node.
-			n := ir.TypeNode(typ)
-			n.SetTypecheck(1)
-			return n
+			return ir.TypeNode(typ)
 		}
 	}
 
@@ -3303,11 +3221,66 @@
 	}
 	target.CgoPragmas = cgoPragmas
 
+	r.pkgInitOrder(target)
+
 	r.pkgDecls(target)
 
 	r.Sync(pkgbits.SyncEOF)
 }
 
+// pkgInitOrder creates a synthetic init function to handle any
+// package-scope initialization statements.
+func (r *reader) pkgInitOrder(target *ir.Package) {
+	initOrder := make([]ir.Node, r.Len())
+	if len(initOrder) == 0 {
+		return
+	}
+
+	// Make a function that contains all the initialization statements.
+	pos := base.AutogeneratedPos
+	base.Pos = pos
+
+	fn := ir.NewFunc(pos, pos, typecheck.Lookup("init"), types.NewSignature(nil, nil, nil))
+	fn.SetIsPackageInit(true)
+	fn.SetInlinabilityChecked(true) // suppress useless "can inline" diagnostics
+
+	typecheck.DeclFunc(fn)
+	r.curfn = fn
+
+	for i := range initOrder {
+		lhs := make([]ir.Node, r.Len())
+		for j := range lhs {
+			lhs[j] = r.obj()
+		}
+		rhs := r.expr()
+		pos := lhs[0].Pos()
+
+		var as ir.Node
+		if len(lhs) == 1 {
+			as = typecheck.Stmt(ir.NewAssignStmt(pos, lhs[0], rhs))
+		} else {
+			as = typecheck.Stmt(ir.NewAssignListStmt(pos, ir.OAS2, lhs, []ir.Node{rhs}))
+		}
+
+		for _, v := range lhs {
+			v.(*ir.Name).Defn = as
+		}
+
+		initOrder[i] = as
+	}
+
+	fn.Body = initOrder
+
+	typecheck.FinishFuncBody()
+	r.curfn = nil
+	r.locals = nil
+
+	// Outline (if legal/profitable) global map inits.
+	staticinit.OutlineMapInits(fn)
+
+	target.Inits = append(target.Inits, fn)
+}
+
 func (r *reader) pkgDecls(target *ir.Package) {
 	r.Sync(pkgbits.SyncDecls)
 	for {
@@ -3321,37 +3294,17 @@
 		case declFunc:
 			names := r.pkgObjs(target)
 			assert(len(names) == 1)
-			target.Decls = append(target.Decls, names[0].Func)
+			target.Funcs = append(target.Funcs, names[0].Func)
 
 		case declMethod:
 			typ := r.typ()
-			_, sym := r.selector()
+			sym := r.selector()
 
 			method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0)
-			target.Decls = append(target.Decls, method.Nname.(*ir.Name).Func)
+			target.Funcs = append(target.Funcs, method.Nname.(*ir.Name).Func)
 
 		case declVar:
-			pos := r.pos()
 			names := r.pkgObjs(target)
-			values := r.exprList()
-
-			if len(names) > 1 && len(values) == 1 {
-				as := ir.NewAssignListStmt(pos, ir.OAS2, nil, values)
-				for _, name := range names {
-					as.Lhs.Append(name)
-					name.Defn = as
-				}
-				target.Decls = append(target.Decls, as)
-			} else {
-				for i, name := range names {
-					as := ir.NewAssignStmt(pos, name, nil)
-					if i < len(values) {
-						as.Y = values[i]
-					}
-					name.Defn = as
-					target.Decls = append(target.Decls, as)
-				}
-			}
 
 			if n := r.Len(); n > 0 {
 				assert(len(names) == 1)
@@ -3399,15 +3352,15 @@
 			}
 		}
 
-		if types.IsExported(sym.Name) {
+		if base.Ctxt.Flag_dynlink && types.LocalPkg.Name == "main" && types.IsExported(sym.Name) && name.Op() == ir.ONAME {
 			assert(!sym.OnExportList())
-			target.Exports = append(target.Exports, name)
+			target.PluginExports = append(target.PluginExports, name)
 			sym.SetOnExportList(true)
 		}
 
-		if base.Flag.AsmHdr != "" {
+		if base.Flag.AsmHdr != "" && (name.Op() == ir.OLITERAL || name.Op() == ir.OTYPE) {
 			assert(!sym.Asm())
-			target.Asms = append(target.Asms, name)
+			target.AsmHdrDecls = append(target.AsmHdrDecls, name)
 			sym.SetAsm(true)
 		}
 	}
@@ -3432,28 +3385,20 @@
 
 // unifiedInlineCall implements inline.NewInline by re-reading the function
 // body from its Unified IR export data.
-func unifiedInlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
-	// TODO(mdempsky): Turn callerfn into an explicit parameter.
-	callerfn := ir.CurFunc
-
+func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
 	pri, ok := bodyReaderFor(fn)
 	if !ok {
 		base.FatalfAt(call.Pos(), "cannot inline call to %v: missing inline body", fn)
 	}
 
-	if fn.Inl.Body == nil {
+	if !fn.Inl.HaveDcl {
 		expandInline(fn, pri)
 	}
 
 	r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
 
-	// TODO(mdempsky): This still feels clumsy. Can we do better?
-	tmpfn := ir.NewFunc(fn.Pos())
-	tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), callerfn.Sym())
-	tmpfn.Closgen = callerfn.Closgen
-	defer func() { callerfn.Closgen = tmpfn.Closgen }()
+	tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), callerfn.Sym(), fn.Type())
 
-	setType(tmpfn.Nname, fn.Type())
 	r.curfn = tmpfn
 
 	r.inlCaller = callerfn
@@ -3461,16 +3406,32 @@
 	r.inlFunc = fn
 	r.inlTreeIndex = inlIndex
 	r.inlPosBases = make(map[*src.PosBase]*src.PosBase)
+	r.funarghack = true
 
 	r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars))
 	for i, cv := range r.inlFunc.ClosureVars {
+		// TODO(mdempsky): It should be possible to support this case, but
+		// for now we rely on the inliner avoiding it.
+		if cv.Outer.Curfn != callerfn {
+			base.FatalfAt(call.Pos(), "inlining closure call across frames")
+		}
 		r.closureVars[i] = cv.Outer
 	}
 	if len(r.closureVars) != 0 && r.hasTypeParams() {
 		r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit
 	}
 
-	r.funcargs(fn)
+	r.declareParams()
+
+	var inlvars, retvars []*ir.Name
+	{
+		sig := r.curfn.Type()
+		endParams := sig.NumRecvs() + sig.NumParams()
+		endResults := endParams + sig.NumResults()
+
+		inlvars = r.curfn.Dcl[:endParams]
+		retvars = r.curfn.Dcl[endParams:endResults]
+	}
 
 	r.delayResults = fn.Inl.CanDelayResults
 
@@ -3483,7 +3444,7 @@
 	// may contain side effects. Make sure to preserve these,
 	// if necessary (#42703).
 	if call.Op() == ir.OCALLFUNC {
-		inline.CalleeEffects(&init, call.X)
+		inline.CalleeEffects(&init, call.Fun)
 	}
 
 	var args ir.Nodes
@@ -3493,15 +3454,14 @@
 	args.Append(call.Args...)
 
 	// Create assignment to declare and initialize inlvars.
-	as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, r.inlvars, args)
+	as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, ir.ToNodes(inlvars), args)
 	as2.Def = true
 	var as2init ir.Nodes
-	for _, name := range r.inlvars {
+	for _, name := range inlvars {
 		if ir.IsBlank(name) {
 			continue
 		}
 		// TODO(mdempsky): Use inlined position of name.Pos() instead?
-		name := name.(*ir.Name)
 		as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
 		name.Defn = as2
 	}
@@ -3511,9 +3471,8 @@
 	if !r.delayResults {
 		// If not delaying retvars, declare and zero initialize the
 		// result variables now.
-		for _, name := range r.retvars {
+		for _, name := range retvars {
 			// TODO(mdempsky): Use inlined position of name.Pos() instead?
-			name := name.(*ir.Name)
 			init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
 			ras := ir.NewAssignStmt(call.Pos(), name, nil)
 			init.Append(typecheck.Stmt(ras))
@@ -3527,8 +3486,6 @@
 	// Note issue 28603.
 	init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex)))
 
-	nparams := len(r.curfn.Dcl)
-
 	ir.WithFunc(r.curfn, func() {
 		if !r.syntheticBody(call.Pos()) {
 			assert(r.Bool()) // have body
@@ -3544,13 +3501,11 @@
 		// themselves. But currently it's an easy fix to #50552.
 		readBodies(typecheck.Target, true)
 
-		deadcode.Func(r.curfn)
-
 		// Replace any "return" statements within the function body.
 		var edit func(ir.Node) ir.Node
 		edit = func(n ir.Node) ir.Node {
 			if ret, ok := n.(*ir.ReturnStmt); ok {
-				n = typecheck.Stmt(r.inlReturn(ret))
+				n = typecheck.Stmt(r.inlReturn(ret, retvars))
 			}
 			ir.EditChildren(n, edit)
 			return n
@@ -3560,28 +3515,23 @@
 
 	body := ir.Nodes(r.curfn.Body)
 
-	// Quirkish: We need to eagerly prune variables added during
-	// inlining, but removed by deadcode.FuncBody above. Unused
-	// variables will get removed during stack frame layout anyway, but
-	// len(fn.Dcl) ends up influencing things like autotmp naming.
+	// Reparent any declarations into the caller function.
+	for _, name := range r.curfn.Dcl {
+		name.Curfn = callerfn
 
-	used := usedLocals(body)
-
-	for i, name := range r.curfn.Dcl {
-		if i < nparams || used.Has(name) {
-			name.Curfn = callerfn
-			callerfn.Dcl = append(callerfn.Dcl, name)
-
-			if name.AutoTemp() {
-				name.SetEsc(ir.EscUnknown)
-				name.SetInlLocal(true)
-			}
+		if name.Class != ir.PAUTO {
+			name.SetPos(r.inlPos(name.Pos()))
+			name.SetInlFormal(true)
+			name.Class = ir.PAUTO
+		} else {
+			name.SetInlLocal(true)
 		}
 	}
+	callerfn.Dcl = append(callerfn.Dcl, r.curfn.Dcl...)
 
 	body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel))
 
-	res := ir.NewInlinedCallExpr(call.Pos(), body, append([]ir.Node(nil), r.retvars...))
+	res := ir.NewInlinedCallExpr(call.Pos(), body, ir.ToNodes(retvars))
 	res.SetInit(init)
 	res.SetType(call.Type())
 	res.SetTypecheck(1)
@@ -3594,20 +3544,19 @@
 
 // inlReturn returns a statement that can substitute for the given
 // return statement when inlining.
-func (r *reader) inlReturn(ret *ir.ReturnStmt) *ir.BlockStmt {
+func (r *reader) inlReturn(ret *ir.ReturnStmt, retvars []*ir.Name) *ir.BlockStmt {
 	pos := r.inlCall.Pos()
 
 	block := ir.TakeInit(ret)
 
 	if results := ret.Results; len(results) != 0 {
-		assert(len(r.retvars) == len(results))
+		assert(len(retvars) == len(results))
 
-		as2 := ir.NewAssignListStmt(pos, ir.OAS2, append([]ir.Node(nil), r.retvars...), ret.Results)
+		as2 := ir.NewAssignListStmt(pos, ir.OAS2, ir.ToNodes(retvars), ret.Results)
 
 		if r.delayResults {
-			for _, name := range r.retvars {
+			for _, name := range retvars {
 				// TODO(mdempsky): Use inlined position of name.Pos() instead?
-				name := name.(*ir.Name)
 				block.Append(ir.NewDecl(pos, ir.ODCL, name))
 				name.Defn = as2
 			}
@@ -3621,7 +3570,7 @@
 }
 
 // expandInline reads in an extra copy of IR to populate
-// fn.Inl.{Dcl,Body}.
+// fn.Inl.Dcl.
 func expandInline(fn *ir.Func, pri pkgReaderIndex) {
 	// TODO(mdempsky): Remove this function. It's currently needed by
 	// dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to
@@ -3629,35 +3578,26 @@
 	// with the same information some other way.
 
 	fndcls := len(fn.Dcl)
-	topdcls := len(typecheck.Target.Decls)
+	topdcls := len(typecheck.Target.Funcs)
 
-	tmpfn := ir.NewFunc(fn.Pos())
-	tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), fn.Sym())
+	tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), fn.Sym(), fn.Type())
 	tmpfn.ClosureVars = fn.ClosureVars
 
 	{
 		r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
-		setType(tmpfn.Nname, fn.Type())
 
 		// Don't change parameter's Sym/Nname fields.
 		r.funarghack = true
 
 		r.funcBody(tmpfn)
-
-		ir.WithFunc(tmpfn, func() {
-			deadcode.Func(tmpfn)
-		})
 	}
 
-	used := usedLocals(tmpfn.Body)
-
+	// Move tmpfn's params to fn.Inl.Dcl, and reparent under fn.
 	for _, name := range tmpfn.Dcl {
-		if name.Class != ir.PAUTO || used.Has(name) {
-			name.Curfn = fn
-			fn.Inl.Dcl = append(fn.Inl.Dcl, name)
-		}
+		name.Curfn = fn
 	}
-	fn.Inl.Body = tmpfn.Body
+	fn.Inl.Dcl = tmpfn.Dcl
+	fn.Inl.HaveDcl = true
 
 	// Double check that we didn't change fn.Dcl by accident.
 	assert(fndcls == len(fn.Dcl))
@@ -3665,7 +3605,7 @@
 	// typecheck.Stmts may have added function literals to
 	// typecheck.Target.Decls. Remove them again so we don't risk trying
 	// to compile them multiple times.
-	typecheck.Target.Decls = typecheck.Target.Decls[:topdcls]
+	typecheck.Target.Funcs = typecheck.Target.Funcs[:topdcls]
 }
 
 // usedLocals returns a set of local variables that are used within body.
@@ -3781,7 +3721,7 @@
 	if !typ.IsInterface() {
 		typecheck.CalcMethods(typ)
 	}
-	for _, meth := range typ.AllMethods().Slice() {
+	for _, meth := range typ.AllMethods() {
 		if meth.Sym.IsBlank() || !meth.IsMethod() {
 			base.FatalfAt(meth.Pos, "invalid method: %v", meth)
 		}
@@ -3861,7 +3801,6 @@
 	recv := ir.NewHiddenParam(pos, fn, typecheck.Lookup(".this"), recvType)
 
 	if !needed {
-		typecheck.Func(fn)
 		return
 	}
 
@@ -3871,42 +3810,16 @@
 }
 
 func newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func {
-	fn := ir.NewFunc(pos)
-	fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
-
-	name := ir.NewNameAt(pos, sym)
-	ir.MarkFunc(name)
-	name.Func = fn
-	name.Defn = fn
-	fn.Nname = name
-
 	sig := newWrapperType(wrapper, method)
-	setType(name, sig)
 
-	// TODO(mdempsky): De-duplicate with similar logic in funcargs.
-	defParams := func(class ir.Class, params *types.Type) {
-		for _, param := range params.FieldSlice() {
-			name := ir.NewNameAt(param.Pos, param.Sym)
-			name.Class = class
-			setType(name, param.Type)
-
-			name.Curfn = fn
-			fn.Dcl = append(fn.Dcl, name)
-
-			param.Nname = name
-		}
-	}
-
-	defParams(ir.PPARAM, sig.Recvs())
-	defParams(ir.PPARAM, sig.Params())
-	defParams(ir.PPARAMOUT, sig.Results())
+	fn := ir.NewFunc(pos, pos, sym, sig)
+	fn.DeclareParams(true)
+	fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
 
 	return fn
 }
 
 func finishWrapperFunc(fn *ir.Func, target *ir.Package) {
-	typecheck.Func(fn)
-
 	ir.WithFunc(fn, func() {
 		typecheck.Stmts(fn.Body)
 	})
@@ -3914,7 +3827,7 @@
 	// We generate wrappers after the global inlining pass,
 	// so we're responsible for applying inlining ourselves here.
 	// TODO(prattmic): plumb PGO.
-	inline.InlineCalls(fn, nil)
+	interleaved.DevirtualizeAndInlineFunc(fn, nil)
 
 	// The body of wrapper function after inlining may reveal new ir.OMETHVALUE node,
 	// we don't know whether wrapper function has been generated for it or not, so
@@ -3929,7 +3842,8 @@
 		}
 	})
 
-	target.Decls = append(target.Decls, fn)
+	fn.Nname.Defn = fn
+	target.Funcs = append(target.Funcs, fn)
 }
 
 // newWrapperType returns a copy of the given signature type, but with
@@ -3940,11 +3854,7 @@
 	clone := func(params []*types.Field) []*types.Field {
 		res := make([]*types.Field, len(params))
 		for i, param := range params {
-			sym := param.Sym
-			if sym == nil || sym.Name == "_" {
-				sym = typecheck.LookupNum(".anon", i)
-			}
-			res[i] = types.NewField(param.Pos, sym, param.Type)
+			res[i] = types.NewField(param.Pos, param.Sym, param.Type)
 			res[i].SetIsDDD(param.IsDDD())
 		}
 		return res
@@ -3954,10 +3864,10 @@
 
 	var recv *types.Field
 	if recvType != nil {
-		recv = types.NewField(sig.Recv().Pos, typecheck.Lookup(".this"), recvType)
+		recv = types.NewField(sig.Recv().Pos, sig.Recv().Sym, recvType)
 	}
-	params := clone(sig.Params().FieldSlice())
-	results := clone(sig.Results().FieldSlice())
+	params := clone(sig.Params())
+	results := clone(sig.Results())
 
 	return types.NewSignature(recv, params, results)
 }
@@ -3965,7 +3875,7 @@
 func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
 	sig := fn.Nname.Type()
 	args := make([]ir.Node, sig.NumParams())
-	for i, param := range sig.Params().FieldSlice() {
+	for i, param := range sig.Params() {
 		args[i] = param.Nname.(*ir.Name)
 	}
 
@@ -3974,7 +3884,7 @@
 
 	fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls?
 
-	dot := ir.NewSelectorExpr(pos, ir.OXDOT, recv, method.Sym)
+	dot := typecheck.XDotMethod(pos, recv, method.Sym, true)
 	call := typecheck.Call(pos, dot, args, method.Type.IsVariadic()).(*ir.CallExpr)
 
 	if method.Type.NumResults() == 0 {
@@ -4014,16 +3924,16 @@
 		recv = types.NewField(oldRecv.Pos, oldRecv.Sym, oldRecv.Type)
 	}
 
-	params := make([]*types.Field, 1+sig.Params().Fields().Len())
+	params := make([]*types.Field, 1+sig.NumParams())
 	params[0] = types.NewField(fn.Pos(), fn.Sym().Pkg.Lookup(dictParamName), types.NewPtr(dict.varType()))
-	for i, param := range sig.Params().Fields().Slice() {
+	for i, param := range sig.Params() {
 		d := types.NewField(param.Pos, param.Sym, param.Type)
 		d.SetIsDDD(param.IsDDD())
 		params[1+i] = d
 	}
 
-	results := make([]*types.Field, sig.Results().Fields().Len())
-	for i, result := range sig.Results().Fields().Slice() {
+	results := make([]*types.Field, sig.NumResults())
+	for i, result := range sig.Results() {
 		results[i] = types.NewField(result.Pos, result.Sym, result.Type)
 	}
 
diff --git a/src/cmd/compile/internal/noder/sizes.go b/src/cmd/compile/internal/noder/sizes.go
deleted file mode 100644
index dff8d7b..0000000
--- a/src/cmd/compile/internal/noder/sizes.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noder
-
-import (
-	"fmt"
-
-	"cmd/compile/internal/types"
-	"cmd/compile/internal/types2"
-)
-
-// Code below based on go/types.StdSizes.
-// Intentional differences are marked with "gc:".
-
-type gcSizes struct{}
-
-func (s *gcSizes) Alignof(T types2.Type) int64 {
-	// For arrays and structs, alignment is defined in terms
-	// of alignment of the elements and fields, respectively.
-	switch t := T.Underlying().(type) {
-	case *types2.Array:
-		// spec: "For a variable x of array type: unsafe.Alignof(x)
-		// is the same as unsafe.Alignof(x[0]), but at least 1."
-		return s.Alignof(t.Elem())
-	case *types2.Struct:
-		if t.NumFields() == 0 && types2.IsSyncAtomicAlign64(T) {
-			// Special case: sync/atomic.align64 is an
-			// empty struct we recognize as a signal that
-			// the struct it contains must be
-			// 64-bit-aligned.
-			//
-			// This logic is equivalent to the logic in
-			// cmd/compile/internal/types/size.go:calcStructOffset
-			return 8
-		}
-
-		// spec: "For a variable x of struct type: unsafe.Alignof(x)
-		// is the largest of the values unsafe.Alignof(x.f) for each
-		// field f of x, but at least 1."
-		max := int64(1)
-		for i, nf := 0, t.NumFields(); i < nf; i++ {
-			if a := s.Alignof(t.Field(i).Type()); a > max {
-				max = a
-			}
-		}
-		return max
-	case *types2.Slice, *types2.Interface:
-		// Multiword data structures are effectively structs
-		// in which each element has size PtrSize.
-		return int64(types.PtrSize)
-	case *types2.Basic:
-		// Strings are like slices and interfaces.
-		if t.Info()&types2.IsString != 0 {
-			return int64(types.PtrSize)
-		}
-	}
-	a := s.Sizeof(T) // may be 0
-	// spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
-	if a < 1 {
-		return 1
-	}
-	// complex{64,128} are aligned like [2]float{32,64}.
-	if isComplex(T) {
-		a /= 2
-	}
-	if a > int64(types.RegSize) {
-		return int64(types.RegSize)
-	}
-	return a
-}
-
-func isComplex(T types2.Type) bool {
-	basic, ok := T.Underlying().(*types2.Basic)
-	return ok && basic.Info()&types2.IsComplex != 0
-}
-
-func (s *gcSizes) Offsetsof(fields []*types2.Var) []int64 {
-	offsets := make([]int64, len(fields))
-	var offs int64
-	for i, f := range fields {
-		if offs < 0 {
-			// all remaining offsets are too large
-			offsets[i] = -1
-			continue
-		}
-		// offs >= 0
-		typ := f.Type()
-		a := s.Alignof(typ)
-		offs = types.RoundUp(offs, a) // possibly < 0 if align overflows
-		offsets[i] = offs
-		if d := s.Sizeof(typ); d >= 0 && offs >= 0 {
-			offs += d // ok to overflow to < 0
-		} else {
-			offs = -1
-		}
-	}
-	return offsets
-}
-
-func (s *gcSizes) Sizeof(T types2.Type) int64 {
-	switch t := T.Underlying().(type) {
-	case *types2.Basic:
-		k := t.Kind()
-		if int(k) < len(basicSizes) {
-			if s := basicSizes[k]; s > 0 {
-				return int64(s)
-			}
-		}
-		switch k {
-		case types2.String:
-			return int64(types.PtrSize) * 2
-		case types2.Int, types2.Uint, types2.Uintptr, types2.UnsafePointer:
-			return int64(types.PtrSize)
-		}
-		panic(fmt.Sprintf("unimplemented basic: %v (kind %v)", T, k))
-	case *types2.Array:
-		n := t.Len()
-		if n <= 0 {
-			return 0
-		}
-		// n > 0
-		// gc: Size includes alignment padding.
-		esize := s.Sizeof(t.Elem())
-		if esize < 0 {
-			return -1 // array element too large
-		}
-		if esize == 0 {
-			return 0 // 0-size element
-		}
-		// esize > 0
-		// Final size is esize * n; and size must be <= maxInt64.
-		const maxInt64 = 1<<63 - 1
-		if esize > maxInt64/n {
-			return -1 // esize * n overflows
-		}
-		return esize * n
-	case *types2.Slice:
-		return int64(types.PtrSize) * 3
-	case *types2.Struct:
-		n := t.NumFields()
-		if n == 0 {
-			return 0
-		}
-		fields := make([]*types2.Var, n)
-		for i := range fields {
-			fields[i] = t.Field(i)
-		}
-		offsets := s.Offsetsof(fields)
-
-		// gc: The last field of a non-zero-sized struct is not allowed to
-		// have size 0.
-		last := s.Sizeof(fields[n-1].Type())
-		if last == 0 && offsets[n-1] > 0 {
-			last = 1
-		}
-
-		// gc: Size includes alignment padding.
-		return types.RoundUp(offsets[n-1]+last, s.Alignof(t)) // may overflow to < 0 which is ok
-	case *types2.Interface:
-		return int64(types.PtrSize) * 2
-	case *types2.Chan, *types2.Map, *types2.Pointer, *types2.Signature:
-		return int64(types.PtrSize)
-	default:
-		panic(fmt.Sprintf("unimplemented type: %T", t))
-	}
-}
-
-var basicSizes = [...]byte{
-	types2.Invalid:    1,
-	types2.Bool:       1,
-	types2.Int8:       1,
-	types2.Int16:      2,
-	types2.Int32:      4,
-	types2.Int64:      8,
-	types2.Uint8:      1,
-	types2.Uint16:     2,
-	types2.Uint32:     4,
-	types2.Uint64:     8,
-	types2.Float32:    4,
-	types2.Float64:    8,
-	types2.Complex64:  8,
-	types2.Complex128: 16,
-}
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
index aa82274..04f92d2 100644
--- a/src/cmd/compile/internal/noder/stmt.go
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -22,33 +22,3 @@
 	syntax.Defer: ir.ODEFER,
 	syntax.Go:    ir.OGO,
 }
-
-// initDefn marks the given names as declared by defn and populates
-// its Init field with ODCL nodes. It then reports whether any names
-// were so declared, which can be used to initialize defn.Def.
-func initDefn(defn ir.InitNode, names []*ir.Name) bool {
-	if len(names) == 0 {
-		return false
-	}
-
-	init := make([]ir.Node, len(names))
-	for i, name := range names {
-		name.Defn = defn
-		init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
-	}
-	defn.SetInit(init)
-	return true
-}
-
-// unpackTwo returns the first two nodes in list. If list has fewer
-// than 2 nodes, then the missing nodes are replaced with nils.
-func unpackTwo(list []ir.Node) (fst, snd ir.Node) {
-	switch len(list) {
-	case 0:
-		return nil, nil
-	case 1:
-		return list[0], nil
-	default:
-		return list[0], list[1]
-	}
-}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
index 6caf158..76c6d15 100644
--- a/src/cmd/compile/internal/noder/types.go
+++ b/src/cmd/compile/internal/noder/types.go
@@ -9,8 +9,6 @@
 	"cmd/compile/internal/types2"
 )
 
-var universeAny = types2.Universe.Lookup("any").Type()
-
 var basics = [...]**types.Type{
 	types2.Invalid:        new(*types.Type),
 	types2.Bool:           &types.Types[types.TBOOL],
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index 6c4ac66..492b00d 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -6,7 +6,6 @@
 
 import (
 	"fmt"
-	"internal/goversion"
 	"internal/pkgbits"
 	"io"
 	"runtime"
@@ -16,6 +15,7 @@
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/inline"
 	"cmd/compile/internal/ir"
+	"cmd/compile/internal/pgo"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/compile/internal/types2"
@@ -27,6 +27,110 @@
 // later.
 var localPkgReader *pkgReader
 
+// LookupMethodFunc returns the ir.Func for an arbitrary full symbol name if
+// that function exists in the set of available export data.
+//
+// This allows lookup of arbitrary functions and methods that aren't otherwise
+// referenced by the local package and thus haven't been read yet.
+//
+// TODO(prattmic): Does not handle instantiation of generic types. Currently
+// profiles don't contain the original type arguments, so we won't be able to
+// create the runtime dictionaries.
+//
+// TODO(prattmic): Hit rate of this function is usually fairly low, and errors
+// are only used when debug logging is enabled. Consider constructing cheaper
+// errors by default.
+func LookupFunc(fullName string) (*ir.Func, error) {
+	pkgPath, symName, err := ir.ParseLinkFuncName(fullName)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing symbol name %q: %v", fullName, err)
+	}
+
+	pkg, ok := types.PkgMap()[pkgPath]
+	if !ok {
+		return nil, fmt.Errorf("pkg %s doesn't exist in %v", pkgPath, types.PkgMap())
+	}
+
+	// Symbol naming is ambiguous. We can't necessarily distinguish between
+	// a method and a closure. e.g., is foo.Bar.func1 a closure defined in
+	// function Bar, or a method on type Bar? Thus we must simply attempt
+	// to lookup both.
+
+	fn, err := lookupFunction(pkg, symName)
+	if err == nil {
+		return fn, nil
+	}
+
+	fn, mErr := lookupMethod(pkg, symName)
+	if mErr == nil {
+		return fn, nil
+	}
+
+	return nil, fmt.Errorf("%s is not a function (%v) or method (%v)", fullName, err, mErr)
+}
+
+func lookupFunction(pkg *types.Pkg, symName string) (*ir.Func, error) {
+	sym := pkg.Lookup(symName)
+
+	// TODO(prattmic): Enclosed functions (e.g., foo.Bar.func1) are not
+	// present in objReader, only as OCLOSURE nodes in the enclosing
+	// function.
+	pri, ok := objReader[sym]
+	if !ok {
+		return nil, fmt.Errorf("func sym %v missing objReader", sym)
+	}
+
+	node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false)
+	if err != nil {
+		return nil, fmt.Errorf("func sym %v lookup error: %w", sym, err)
+	}
+	name := node.(*ir.Name)
+	if name.Op() != ir.ONAME || name.Class != ir.PFUNC {
+		return nil, fmt.Errorf("func sym %v refers to non-function name: %v", sym, name)
+	}
+	return name.Func, nil
+}
+
+func lookupMethod(pkg *types.Pkg, symName string) (*ir.Func, error) {
+	// N.B. readPackage creates a Sym for every object in the package to
+	// initialize objReader and importBodyReader, even if the object isn't
+	// read.
+	//
+	// However, objReader is only initialized for top-level objects, so we
+	// must first lookup the type and use that to find the method rather
+	// than looking for the method directly.
+	typ, meth, err := ir.LookupMethodSelector(pkg, symName)
+	if err != nil {
+		return nil, fmt.Errorf("error looking up method symbol %q: %v", symName, err)
+	}
+
+	pri, ok := objReader[typ]
+	if !ok {
+		return nil, fmt.Errorf("type sym %v missing objReader", typ)
+	}
+
+	node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false)
+	if err != nil {
+		return nil, fmt.Errorf("func sym %v lookup error: %w", typ, err)
+	}
+	name := node.(*ir.Name)
+	if name.Op() != ir.OTYPE {
+		return nil, fmt.Errorf("type sym %v refers to non-type name: %v", typ, name)
+	}
+	if name.Alias() {
+		return nil, fmt.Errorf("type sym %v refers to alias", typ)
+	}
+
+	for _, m := range name.Type().Methods() {
+		if m.Sym == meth {
+			fn := m.Nname.(*ir.Name).Func
+			return fn, nil
+		}
+	}
+
+	return nil, fmt.Errorf("method %s missing from method set of %v", symName, typ)
+}
+
 // unified constructs the local package's Internal Representation (IR)
 // from its syntax tree (AST).
 //
@@ -71,53 +175,43 @@
 func unified(m posMap, noders []*noder) {
 	inline.InlineCall = unifiedInlineCall
 	typecheck.HaveInlineBody = unifiedHaveInlineBody
+	pgo.LookupFunc = LookupFunc
 
 	data := writePkgStub(m, noders)
 
-	// We already passed base.Flag.Lang to types2 to handle validating
-	// the user's source code. Bump it up now to the current version and
-	// re-parse, so typecheck doesn't complain if we construct IR that
-	// utilizes newer Go features.
-	base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
-	types.ParseLangFlag()
-
 	target := typecheck.Target
 
-	typecheck.TypecheckAllowed = true
-
 	localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data))
 	readPackage(localPkgReader, types.LocalPkg, true)
 
 	r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
 	r.pkgInit(types.LocalPkg, target)
 
-	// Type-check any top-level assignments. We ignore non-assignments
-	// here because other declarations are typechecked as they're
-	// constructed.
-	for i, ndecls := 0, len(target.Decls); i < ndecls; i++ {
-		switch n := target.Decls[i]; n.Op() {
-		case ir.OAS, ir.OAS2:
-			target.Decls[i] = typecheck.Stmt(n)
-		}
-	}
-
 	readBodies(target, false)
 
 	// Check that nothing snuck past typechecking.
-	for _, n := range target.Decls {
-		if n.Typecheck() == 0 {
-			base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
+	for _, fn := range target.Funcs {
+		if fn.Typecheck() == 0 {
+			base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
 		}
 
 		// For functions, check that at least their first statement (if
 		// any) was typechecked too.
-		if fn, ok := n.(*ir.Func); ok && len(fn.Body) != 0 {
+		if len(fn.Body) != 0 {
 			if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
 				base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
 			}
 		}
 	}
 
+	// For functions originally came from package runtime,
+	// mark as norace to prevent instrumenting, see issue #60439.
+	for _, fn := range target.Funcs {
+		if !base.Flag.CompilingRuntime && types.RuntimeSymName(fn.Sym()) != "" {
+			fn.Pragma |= ir.Norace
+		}
+	}
+
 	base.ExitIfErrors() // just in case
 }
 
@@ -128,7 +222,7 @@
 // necessary on instantiations of imported generic functions, so their
 // inlining costs can be computed.
 func readBodies(target *ir.Package, duringInlining bool) {
-	var inlDecls []ir.Node
+	var inlDecls []*ir.Func
 
 	// Don't use range--bodyIdx can add closures to todoBodies.
 	for {
@@ -165,7 +259,7 @@
 				if duringInlining && canSkipNonGenericMethod {
 					inlDecls = append(inlDecls, fn)
 				} else {
-					target.Decls = append(target.Decls, fn)
+					target.Funcs = append(target.Funcs, fn)
 				}
 			}
 
@@ -194,11 +288,11 @@
 
 		oldLowerM := base.Flag.LowerM
 		base.Flag.LowerM = 0
-		inline.InlineDecls(nil, inlDecls, false)
+		inline.CanInlineFuncs(inlDecls, nil)
 		base.Flag.LowerM = oldLowerM
 
 		for _, fn := range inlDecls {
-			fn.(*ir.Func).Body = nil // free memory
+			fn.Body = nil // free memory
 		}
 	}
 }
@@ -321,7 +415,7 @@
 
 		if r.Bool() {
 			sym := importpkg.Lookup(".inittask")
-			task := ir.NewNameAt(src.NoXPos, sym)
+			task := ir.NewNameAt(src.NoXPos, sym, nil)
 			task.Class = ir.PEXTERN
 			sym.Def = task
 		}
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index 178c3eb..e5894c9 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -6,8 +6,12 @@
 
 import (
 	"fmt"
+	"go/constant"
+	"go/token"
+	"go/version"
 	"internal/buildcfg"
 	"internal/pkgbits"
+	"os"
 
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
@@ -213,7 +217,7 @@
 // generic function or method.
 func (dict *writerDict) typeParamIndex(typ *types2.TypeParam) int {
 	for idx, implicit := range dict.implicits {
-		if implicit.Type().(*types2.TypeParam) == typ {
+		if types2.Unalias(implicit.Type()).(*types2.TypeParam) == typ {
 			return idx
 		}
 	}
@@ -494,7 +498,7 @@
 	w := pw.newWriter(pkgbits.RelocType, pkgbits.SyncTypeIdx)
 	w.dict = dict
 
-	switch typ := typ.(type) {
+	switch typ := types2.Unalias(typ).(type) {
 	default:
 		base.Fatalf("unexpected type: %v (%T)", typ, typ)
 
@@ -885,7 +889,7 @@
 	// parameter is constrained to `int | uint` but then never used in
 	// arithmetic/conversions/etc, we could shape those together.
 	for _, implicit := range dict.implicits {
-		tparam := implicit.Type().(*types2.TypeParam)
+		tparam := types2.Unalias(implicit.Type()).(*types2.TypeParam)
 		w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet())
 	}
 	for i := 0; i < ntparams; i++ {
@@ -1053,6 +1057,9 @@
 
 	sig, block := obj.Type().(*types2.Signature), decl.Body
 	body, closureVars := w.p.bodyIdx(sig, block, w.dict)
+	if len(closureVars) > 0 {
+		fmt.Fprintln(os.Stderr, "CLOSURE", closureVars)
+	}
 	assert(len(closureVars) == 0)
 
 	w.Sync(pkgbits.SyncFuncExt)
@@ -1112,7 +1119,7 @@
 	w.sig = sig
 	w.dict = dict
 
-	w.funcargs(sig)
+	w.declareParams(sig)
 	if w.Bool(block != nil) {
 		w.stmts(block.List)
 		w.pos(block.Rbrace)
@@ -1121,24 +1128,18 @@
 	return w.Flush(), w.closureVars
 }
 
-func (w *writer) funcargs(sig *types2.Signature) {
-	do := func(params *types2.Tuple, result bool) {
+func (w *writer) declareParams(sig *types2.Signature) {
+	addLocals := func(params *types2.Tuple) {
 		for i := 0; i < params.Len(); i++ {
-			w.funcarg(params.At(i), result)
+			w.addLocal(params.At(i))
 		}
 	}
 
 	if recv := sig.Recv(); recv != nil {
-		w.funcarg(recv, false)
+		w.addLocal(recv)
 	}
-	do(sig.Params(), false)
-	do(sig.Results(), true)
-}
-
-func (w *writer) funcarg(param *types2.Var, result bool) {
-	if param.Name() != "" || result {
-		w.addLocal(param)
-	}
+	addLocals(sig.Params())
+	addLocals(sig.Results())
 }
 
 // addLocal records the declaration of a new local variable.
@@ -1206,9 +1207,18 @@
 }
 
 func (w *writer) stmts(stmts []syntax.Stmt) {
+	dead := false
 	w.Sync(pkgbits.SyncStmts)
 	for _, stmt := range stmts {
+		if dead {
+			// Any statements after a terminating statement are safe to
+			// omit, at least until the next labeled statement.
+			if _, ok := stmt.(*syntax.LabeledStmt); !ok {
+				continue
+			}
+		}
 		w.stmt1(stmt)
+		dead = w.p.terminates(stmt)
 	}
 	w.Code(stmtEnd)
 	w.Sync(pkgbits.SyncStmtsEnd)
@@ -1261,6 +1271,9 @@
 		w.pos(stmt)
 		w.op(callOps[stmt.Tok])
 		w.expr(stmt.Call)
+		if stmt.Tok == syntax.Defer {
+			w.optExpr(stmt.DeferAt)
+		}
 
 	case *syntax.DeclStmt:
 		for _, decl := range stmt.DeclList {
@@ -1293,7 +1306,7 @@
 		dstType := func(i int) types2.Type {
 			return resultTypes.At(i).Type()
 		}
-		w.multiExpr(stmt, dstType, unpackListExpr(stmt.Results))
+		w.multiExpr(stmt, dstType, syntax.UnpackListExpr(stmt.Results))
 
 	case *syntax.SelectStmt:
 		w.Code(stmtSelect)
@@ -1314,7 +1327,7 @@
 }
 
 func (w *writer) assignList(expr syntax.Expr) {
-	exprs := unpackListExpr(expr)
+	exprs := syntax.UnpackListExpr(expr)
 	w.Len(len(exprs))
 
 	for _, expr := range exprs {
@@ -1323,7 +1336,7 @@
 }
 
 func (w *writer) assign(expr syntax.Expr) {
-	expr = unparen(expr)
+	expr = syntax.Unparen(expr)
 
 	if name, ok := expr.(*syntax.Name); ok {
 		if name.Value == "_" {
@@ -1364,8 +1377,8 @@
 
 // assignStmt writes out an assignment for "lhs = rhs".
 func (w *writer) assignStmt(pos poser, lhs0, rhs0 syntax.Expr) {
-	lhs := unpackListExpr(lhs0)
-	rhs := unpackListExpr(rhs0)
+	lhs := syntax.UnpackListExpr(lhs0)
+	rhs := syntax.UnpackListExpr(rhs0)
 
 	w.Code(stmtAssign)
 	w.pos(pos)
@@ -1382,7 +1395,7 @@
 		// Finding dstType is somewhat involved, because for VarDecl
 		// statements, the Names are only added to the info.{Defs,Uses}
 		// maps, not to info.Types.
-		if name, ok := unparen(dst).(*syntax.Name); ok {
+		if name, ok := syntax.Unparen(dst).(*syntax.Name); ok {
 			if name.Value == "_" {
 				return nil // ok: no implicit conversion
 			} else if def, ok := w.p.info.Defs[name].(*types2.Var); ok {
@@ -1421,12 +1434,12 @@
 			w.rtype(xtyp)
 		}
 		{
-			lhs := unpackListExpr(rang.Lhs)
+			lhs := syntax.UnpackListExpr(rang.Lhs)
 			assign := func(i int, src types2.Type) {
 				if i >= len(lhs) {
 					return
 				}
-				dst := unparen(lhs[i])
+				dst := syntax.Unparen(lhs[i])
 				if name, ok := dst.(*syntax.Name); ok && name.Value == "_" {
 					return
 				}
@@ -1443,12 +1456,17 @@
 				w.convRTTI(src, dstType)
 			}
 
-			keyType, valueType := w.p.rangeTypes(rang.X)
+			keyType, valueType := types2.RangeKeyVal(w.p.typeOf(rang.X))
 			assign(0, keyType)
 			assign(1, valueType)
 		}
 
 	} else {
+		if stmt.Cond != nil && w.p.staticBool(&stmt.Cond) < 0 { // always false
+			stmt.Post = nil
+			stmt.Body.List = nil
+		}
+
 		w.pos(stmt)
 		w.stmt(stmt.Init)
 		w.optExpr(stmt.Cond)
@@ -1456,42 +1474,46 @@
 	}
 
 	w.blockStmt(stmt.Body)
-	w.Bool(base.Debug.LoopVar > 0)
+	w.Bool(w.distinctVars(stmt))
 	w.closeAnotherScope()
 }
 
-// rangeTypes returns the types of values produced by ranging over
-// expr.
-func (pw *pkgWriter) rangeTypes(expr syntax.Expr) (key, value types2.Type) {
-	typ := pw.typeOf(expr)
-	switch typ := types2.CoreType(typ).(type) {
-	case *types2.Pointer: // must be pointer to array
-		return types2.Typ[types2.Int], types2.CoreType(typ.Elem()).(*types2.Array).Elem()
-	case *types2.Array:
-		return types2.Typ[types2.Int], typ.Elem()
-	case *types2.Slice:
-		return types2.Typ[types2.Int], typ.Elem()
-	case *types2.Basic:
-		if typ.Info()&types2.IsString != 0 {
-			return types2.Typ[types2.Int], runeTypeName.Type()
-		}
-	case *types2.Map:
-		return typ.Key(), typ.Elem()
-	case *types2.Chan:
-		return typ.Elem(), nil
-	}
-	pw.fatalf(expr, "unexpected range type: %v", typ)
-	panic("unreachable")
+func (w *writer) distinctVars(stmt *syntax.ForStmt) bool {
+	lv := base.Debug.LoopVar
+	fileVersion := w.p.info.FileVersions[stmt.Pos().Base()]
+	is122 := fileVersion == "" || version.Compare(fileVersion, "go1.22") >= 0
+
+	// Turning off loopvar for 1.22 is only possible with loopvarhash=qn
+	//
+	// Debug.LoopVar values to be preserved for 1.21 compatibility are 1 and 2,
+	// which are also set (=1) by GOEXPERIMENT=loopvar.  The knobs for turning on
+	// the new, unshared, loopvar behavior apply to versions less than 1.21 because
+	// (1) 1.21 also did that and (2) this is believed to be the likely use case;
+	// anyone checking to see if it affects their code will just run the GOEXPERIMENT
+	// but will not also update all their go.mod files to 1.21.
+	//
+	// -gcflags=-d=loopvar=3 enables logging for 1.22 but does not turn loopvar on for <= 1.21.
+
+	return is122 || lv > 0 && lv != 3
 }
 
 func (w *writer) ifStmt(stmt *syntax.IfStmt) {
+	cond := w.p.staticBool(&stmt.Cond)
+
 	w.Sync(pkgbits.SyncIfStmt)
 	w.openScope(stmt.Pos())
 	w.pos(stmt)
 	w.stmt(stmt.Init)
 	w.expr(stmt.Cond)
-	w.blockStmt(stmt.Then)
-	w.stmt(stmt.Else)
+	w.Int(cond)
+	if cond >= 0 {
+		w.blockStmt(stmt.Then)
+	} else {
+		w.pos(stmt.Then.Rbrace)
+	}
+	if cond <= 0 {
+		w.stmt(stmt.Else)
+	}
 	w.closeAnotherScope()
 }
 
@@ -1539,10 +1561,56 @@
 	} else {
 		tag := stmt.Tag
 
+		var tagValue constant.Value
 		if tag != nil {
-			tagType = w.p.typeOf(tag)
+			tv := w.p.typeAndValue(tag)
+			tagType = tv.Type
+			tagValue = tv.Value
 		} else {
 			tagType = types2.Typ[types2.Bool]
+			tagValue = constant.MakeBool(true)
+		}
+
+		if tagValue != nil {
+			// If the switch tag has a constant value, look for a case
+			// clause that we always branch to.
+			func() {
+				var target *syntax.CaseClause
+			Outer:
+				for _, clause := range stmt.Body {
+					if clause.Cases == nil {
+						target = clause
+					}
+					for _, cas := range syntax.UnpackListExpr(clause.Cases) {
+						tv := w.p.typeAndValue(cas)
+						if tv.Value == nil {
+							return // non-constant case; give up
+						}
+						if constant.Compare(tagValue, token.EQL, tv.Value) {
+							target = clause
+							break Outer
+						}
+					}
+				}
+				// We've found the target clause, if any.
+
+				if target != nil {
+					if hasFallthrough(target.Body) {
+						return // fallthrough is tricky; give up
+					}
+
+					// Rewrite as single "default" case.
+					target.Cases = nil
+					stmt.Body = []*syntax.CaseClause{target}
+				} else {
+					stmt.Body = nil
+				}
+
+				// Clear switch tag (i.e., replace with implicit "true").
+				tag = nil
+				stmt.Tag = nil
+				tagType = types2.Typ[types2.Bool]
+			}()
 		}
 
 		// Walk is going to emit comparisons between the tag value and
@@ -1552,7 +1620,7 @@
 		// `any` instead.
 	Outer:
 		for _, clause := range stmt.Body {
-			for _, cas := range unpackListExpr(clause.Cases) {
+			for _, cas := range syntax.UnpackListExpr(clause.Cases) {
 				if casType := w.p.typeOf(cas); !types2.AssignableTo(casType, tagType) {
 					tagType = types2.NewInterfaceType(nil, nil)
 					break Outer
@@ -1574,7 +1642,7 @@
 
 		w.pos(clause)
 
-		cases := unpackListExpr(clause.Cases)
+		cases := syntax.UnpackListExpr(clause.Cases)
 		if iface != nil {
 			w.Len(len(cases))
 			for _, cas := range cases {
@@ -1602,7 +1670,7 @@
 			// instead just set the variable's DWARF scoping info earlier so
 			// we can give it the correct position information.
 			pos := clause.Pos()
-			if typs := unpackListExpr(clause.Cases); len(typs) != 0 {
+			if typs := syntax.UnpackListExpr(clause.Cases); len(typs) != 0 {
 				pos = typeExprEndPos(typs[len(typs)-1])
 			}
 			w.pos(pos)
@@ -1641,12 +1709,21 @@
 func (w *writer) expr(expr syntax.Expr) {
 	base.Assertf(expr != nil, "missing expression")
 
-	expr = unparen(expr) // skip parens; unneeded after typecheck
+	expr = syntax.Unparen(expr) // skip parens; unneeded after typecheck
 
 	obj, inst := lookupObj(w.p, expr)
 	targs := inst.TypeArgs
 
 	if tv, ok := w.p.maybeTypeAndValue(expr); ok {
+		if tv.IsRuntimeHelper() {
+			if pkg := obj.Pkg(); pkg != nil && pkg.Name() == "runtime" {
+				objName := obj.Name()
+				w.Code(exprRuntimeBuiltin)
+				w.String(objName)
+				return
+			}
+		}
+
 		if tv.IsType() {
 			w.p.fatalf(expr, "unexpected type expression %v", syntax.String(expr))
 		}
@@ -1658,16 +1735,11 @@
 			assert(typ != nil)
 			w.typ(typ)
 			w.Value(tv.Value)
-
-			// TODO(mdempsky): These details are only important for backend
-			// diagnostics. Explore writing them out separately.
-			w.op(constExprOp(expr))
-			w.String(syntax.String(expr))
 			return
 		}
 
 		if _, isNil := obj.(*types2.Nil); isNil {
-			w.Code(exprNil)
+			w.Code(exprZero)
 			w.pos(expr)
 			w.typ(tv.Type)
 			return
@@ -1847,7 +1919,7 @@
 
 		var rtype types2.Type
 		if tv.IsBuiltin() {
-			switch obj, _ := lookupObj(w.p, expr.Fun); obj.Name() {
+			switch obj, _ := lookupObj(w.p, syntax.Unparen(expr.Fun)); obj.Name() {
 			case "make":
 				assert(len(expr.ArgList) >= 1)
 				assert(!expr.HasDots)
@@ -1880,6 +1952,39 @@
 				w.exprType(nil, expr.ArgList[0])
 				return
 
+			case "Sizeof":
+				assert(len(expr.ArgList) == 1)
+				assert(!expr.HasDots)
+
+				w.Code(exprSizeof)
+				w.pos(expr)
+				w.typ(w.p.typeOf(expr.ArgList[0]))
+				return
+
+			case "Alignof":
+				assert(len(expr.ArgList) == 1)
+				assert(!expr.HasDots)
+
+				w.Code(exprAlignof)
+				w.pos(expr)
+				w.typ(w.p.typeOf(expr.ArgList[0]))
+				return
+
+			case "Offsetof":
+				assert(len(expr.ArgList) == 1)
+				assert(!expr.HasDots)
+				selector := syntax.Unparen(expr.ArgList[0]).(*syntax.SelectorExpr)
+				index := w.p.info.Selections[selector].Index()
+
+				w.Code(exprOffsetof)
+				w.pos(expr)
+				w.typ(deref2(w.p.typeOf(selector.X)))
+				w.Len(len(index) - 1)
+				for _, idx := range index {
+					w.Len(idx)
+				}
+				return
+
 			case "append":
 				rtype = sliceElem(w.p.typeOf(expr))
 			case "copy":
@@ -1900,7 +2005,7 @@
 		}
 
 		writeFunExpr := func() {
-			fun := unparen(expr.Fun)
+			fun := syntax.Unparen(expr.Fun)
 
 			if selector, ok := fun.(*syntax.SelectorExpr); ok {
 				if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal {
@@ -2019,7 +2124,7 @@
 
 	// Method on a type parameter. These require an indirect call
 	// through the current function's runtime dictionary.
-	if typeParam, ok := recv.(*types2.TypeParam); w.Bool(ok) {
+	if typeParam, ok := types2.Unalias(recv).(*types2.TypeParam); w.Bool(ok) {
 		typeParamIdx := w.dict.typeParamIndex(typeParam)
 		methodInfo := w.p.selectorIdx(fun)
 
@@ -2032,7 +2137,7 @@
 	}
 
 	if !isInterface(recv) {
-		if named, ok := deref2(recv).(*types2.Named); ok {
+		if named, ok := types2.Unalias(deref2(recv)).(*types2.Named); ok {
 			obj, targs := splitNamed(named)
 			info := w.p.objInstIdx(obj, targs, w.dict)
 
@@ -2212,9 +2317,13 @@
 	var_ *types2.Var
 }
 
+func (p posVar) String() string {
+	return p.pos.String() + ":" + p.var_.String()
+}
+
 func (w *writer) exprList(expr syntax.Expr) {
 	w.Sync(pkgbits.SyncExprList)
-	w.exprs(unpackListExpr(expr))
+	w.exprs(syntax.UnpackListExpr(expr))
 }
 
 func (w *writer) exprs(exprs []syntax.Expr) {
@@ -2254,7 +2363,7 @@
 }
 
 func isUntyped(typ types2.Type) bool {
-	basic, ok := typ.(*types2.Basic)
+	basic, ok := types2.Unalias(typ).(*types2.Basic)
 	return ok && basic.Info()&types2.IsUntyped != 0
 }
 
@@ -2307,7 +2416,7 @@
 // If typ is a type parameter, then isInterface reports an internal
 // compiler error instead.
 func isInterface(typ types2.Type) bool {
-	if _, ok := typ.(*types2.TypeParam); ok {
+	if _, ok := types2.Unalias(typ).(*types2.TypeParam); ok {
 		// typ is a type parameter and may be instantiated as either a
 		// concrete or interface type, so the writer can't depend on
 		// knowing this.
@@ -2384,7 +2493,7 @@
 	case *syntax.ImportDecl:
 		pw.checkPragmas(n.Pragma, 0, false)
 
-		switch pkgNameOf(pw.info, n).Imported().Path() {
+		switch pw.info.PkgNameOf(n).Imported().Path() {
 		case "embed":
 			c.file.importedEmbed = true
 		case "unsafe":
@@ -2507,6 +2616,8 @@
 		w.Strings(cgoPragma)
 	}
 
+	w.pkgInitOrder()
+
 	w.Sync(pkgbits.SyncDecls)
 	for _, p := range noders {
 		for _, decl := range p.file.DeclList {
@@ -2518,6 +2629,18 @@
 	w.Sync(pkgbits.SyncEOF)
 }
 
+func (w *writer) pkgInitOrder() {
+	// TODO(mdempsky): Write as a function body instead?
+	w.Len(len(w.p.info.InitOrder))
+	for _, init := range w.p.info.InitOrder {
+		w.Len(len(init.Lhs))
+		for _, v := range init.Lhs {
+			w.obj(v, nil)
+		}
+		w.expr(init.Rhs)
+	}
+}
+
 func (w *writer) pkgDecl(decl syntax.Decl) {
 	switch decl := decl.(type) {
 	default:
@@ -2572,16 +2695,8 @@
 
 	case *syntax.VarDecl:
 		w.Code(declVar)
-		w.pos(decl)
 		w.pkgObjs(decl.NameList...)
 
-		// TODO(mdempsky): It would make sense to use multiExpr here, but
-		// that results in IR that confuses pkginit/initorder.go. So we
-		// continue using exprList, and let typecheck handle inserting any
-		// implicit conversions. That's okay though, because package-scope
-		// assignments never require dictionaries.
-		w.exprList(decl.Values)
-
 		var embeds []pragmaEmbed
 		if p, ok := decl.Pragma.(*pragmas); ok {
 			embeds = p.Embeds
@@ -2609,12 +2724,67 @@
 
 // @@@ Helpers
 
+// staticBool analyzes a boolean expression and reports whether it's
+// always true (positive result), always false (negative result), or
+// unknown (zero).
+//
+// It also simplifies the expression while preserving semantics, if
+// possible.
+func (pw *pkgWriter) staticBool(ep *syntax.Expr) int {
+	if val := pw.typeAndValue(*ep).Value; val != nil {
+		if constant.BoolVal(val) {
+			return +1
+		} else {
+			return -1
+		}
+	}
+
+	if e, ok := (*ep).(*syntax.Operation); ok {
+		switch e.Op {
+		case syntax.Not:
+			return pw.staticBool(&e.X)
+
+		case syntax.AndAnd:
+			x := pw.staticBool(&e.X)
+			if x < 0 {
+				*ep = e.X
+				return x
+			}
+
+			y := pw.staticBool(&e.Y)
+			if x > 0 || y < 0 {
+				if pw.typeAndValue(e.X).Value != nil {
+					*ep = e.Y
+				}
+				return y
+			}
+
+		case syntax.OrOr:
+			x := pw.staticBool(&e.X)
+			if x > 0 {
+				*ep = e.X
+				return x
+			}
+
+			y := pw.staticBool(&e.Y)
+			if x < 0 || y > 0 {
+				if pw.typeAndValue(e.X).Value != nil {
+					*ep = e.Y
+				}
+				return y
+			}
+		}
+	}
+
+	return 0
+}
+
 // hasImplicitTypeParams reports whether obj is a defined type with
 // implicit type parameters (e.g., declared within a generic function
 // or method).
-func (p *pkgWriter) hasImplicitTypeParams(obj *types2.TypeName) bool {
-	if obj.Pkg() == p.curpkg {
-		decl, ok := p.typDecls[obj]
+func (pw *pkgWriter) hasImplicitTypeParams(obj *types2.TypeName) bool {
+	if obj.Pkg() == pw.curpkg {
+		decl, ok := pw.typDecls[obj]
 		assert(ok)
 		if len(decl.implicits) != 0 {
 			return true
@@ -2643,7 +2813,7 @@
 // object is returned as well.
 func lookupObj(p *pkgWriter, expr syntax.Expr) (obj types2.Object, inst types2.Instance) {
 	if index, ok := expr.(*syntax.IndexExpr); ok {
-		args := unpackListExpr(index.Index)
+		args := syntax.UnpackListExpr(index.Index)
 		if len(args) == 1 {
 			tv := p.typeAndValue(args[0])
 			if tv.IsValue() {
@@ -2686,9 +2856,18 @@
 	return tv.IsNil()
 }
 
+// isBuiltin reports whether expr is a (possibly parenthesized)
+// referenced to the specified built-in function.
+func (pw *pkgWriter) isBuiltin(expr syntax.Expr, builtin string) bool {
+	if name, ok := syntax.Unparen(expr).(*syntax.Name); ok && name.Value == builtin {
+		return pw.typeAndValue(name).IsBuiltin()
+	}
+	return false
+}
+
 // recvBase returns the base type for the given receiver parameter.
 func recvBase(recv *types2.Var) *types2.Named {
-	typ := recv.Type()
+	typ := types2.Unalias(recv.Type())
 	if ptr, ok := typ.(*types2.Pointer); ok {
 		typ = ptr.Elem()
 	}
@@ -2766,6 +2945,59 @@
 
 // isPtrTo reports whether from is the type *to.
 func isPtrTo(from, to types2.Type) bool {
-	ptr, ok := from.(*types2.Pointer)
+	ptr, ok := types2.Unalias(from).(*types2.Pointer)
 	return ok && types2.Identical(ptr.Elem(), to)
 }
+
+// hasFallthrough reports whether stmts ends in a fallthrough
+// statement.
+func hasFallthrough(stmts []syntax.Stmt) bool {
+	last, ok := lastNonEmptyStmt(stmts).(*syntax.BranchStmt)
+	return ok && last.Tok == syntax.Fallthrough
+}
+
+// lastNonEmptyStmt returns the last non-empty statement in list, if
+// any.
+func lastNonEmptyStmt(stmts []syntax.Stmt) syntax.Stmt {
+	for i := len(stmts) - 1; i >= 0; i-- {
+		stmt := stmts[i]
+		if _, ok := stmt.(*syntax.EmptyStmt); !ok {
+			return stmt
+		}
+	}
+	return nil
+}
+
+// terminates reports whether stmt terminates normal control flow
+// (i.e., does not merely advance to the following statement).
+func (pw *pkgWriter) terminates(stmt syntax.Stmt) bool {
+	switch stmt := stmt.(type) {
+	case *syntax.BranchStmt:
+		if stmt.Tok == syntax.Goto {
+			return true
+		}
+	case *syntax.ReturnStmt:
+		return true
+	case *syntax.ExprStmt:
+		if call, ok := syntax.Unparen(stmt.X).(*syntax.CallExpr); ok {
+			if pw.isBuiltin(call.Fun, "panic") {
+				return true
+			}
+		}
+
+		// The handling of BlockStmt here is approximate, but it serves to
+		// allow dead-code elimination for:
+		//
+		//	if true {
+		//		return x
+		//	}
+		//	unreachable
+	case *syntax.IfStmt:
+		cond := pw.staticBool(&stmt.Cond)
+		return (cond < 0 || pw.terminates(stmt.Then)) && (cond > 0 || pw.terminates(stmt.Else))
+	case *syntax.BlockStmt:
+		return pw.terminates(lastNonEmptyStmt(stmt.List))
+	}
+
+	return false
+}
diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go
index 4189337..7774467 100644
--- a/src/cmd/compile/internal/objw/objw.go
+++ b/src/cmd/compile/internal/objw/objw.go
@@ -9,6 +9,7 @@
 	"cmd/compile/internal/bitvec"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
+	"encoding/binary"
 )
 
 // Uint8 writes an unsigned byte v into s at offset off,
@@ -29,6 +30,22 @@
 	return UintN(s, off, v, types.PtrSize)
 }
 
+// Uvarint writes a varint v into s at offset off,
+// and returns the next unused offset.
+func Uvarint(s *obj.LSym, off int, v uint64) int {
+	var buf [binary.MaxVarintLen64]byte
+	n := binary.PutUvarint(buf[:], v)
+	return int(s.WriteBytes(base.Ctxt, int64(off), buf[:n]))
+}
+
+func Bool(s *obj.LSym, off int, v bool) int {
+	w := 0
+	if v {
+		w = 1
+	}
+	return UintN(s, off, uint64(w), 1)
+}
+
 // UintN writes an unsigned integer v of size wid bytes into s at offset off,
 // and returns the next unused offset.
 func UintN(s *obj.LSym, off int, v uint64, wid int) int {
diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go
index 3175123..84fb996 100644
--- a/src/cmd/compile/internal/objw/prog.go
+++ b/src/cmd/compile/internal/objw/prog.go
@@ -57,8 +57,9 @@
 	pp.Pos = fn.Pos()
 	pp.SetText(fn)
 	// PCDATA tables implicitly start with index -1.
-	pp.PrevLive = LivenessIndex{-1, false}
+	pp.PrevLive = -1
 	pp.NextLive = pp.PrevLive
+	pp.NextUnsafe = pp.PrevUnsafe
 	return pp
 }
 
@@ -72,22 +73,14 @@
 	Cache      []obj.Prog // local progcache
 	CacheIndex int        // first free element of progcache
 
-	NextLive LivenessIndex // liveness index for the next Prog
-	PrevLive LivenessIndex // last emitted liveness index
+	NextLive StackMapIndex // liveness index for the next Prog
+	PrevLive StackMapIndex // last emitted liveness index
+
+	NextUnsafe bool // unsafe mark for the next Prog
+	PrevUnsafe bool // last emitted unsafe mark
 }
 
-// LivenessIndex stores the liveness map information for a Value.
-type LivenessIndex struct {
-	StackMapIndex int
-
-	// IsUnsafePoint indicates that this is an unsafe-point.
-	//
-	// Note that it's possible for a call Value to have a stack
-	// map while also being an unsafe-point. This means it cannot
-	// be preempted at this instruction, but that a preemption or
-	// stack growth may happen in the called function.
-	IsUnsafePoint bool
-}
+type StackMapIndex int
 
 // StackMapDontCare indicates that the stack map index at a Value
 // doesn't matter.
@@ -95,15 +88,10 @@
 // This is a sentinel value that should never be emitted to the PCDATA
 // stream. We use -1000 because that's obviously never a valid stack
 // index (but -1 is).
-const StackMapDontCare = -1000
+const StackMapDontCare StackMapIndex = -1000
 
-// LivenessDontCare indicates that the liveness information doesn't
-// matter. Currently it is used in deferreturn liveness when we don't
-// actually need it. It should never be emitted to the PCDATA stream.
-var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
-
-func (idx LivenessIndex) StackMapValid() bool {
-	return idx.StackMapIndex != StackMapDontCare
+func (s StackMapIndex) StackMapValid() bool {
+	return s != StackMapDontCare
 }
 
 func (pp *Progs) NewProg() *obj.Prog {
@@ -121,7 +109,7 @@
 // Flush converts from pp to machine code.
 func (pp *Progs) Flush() {
 	plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
-	obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
+	obj.Flushplist(base.Ctxt, plist, pp.NewProg)
 }
 
 // Free clears pp and any associated resources.
@@ -139,20 +127,20 @@
 
 // Prog adds a Prog with instruction As to pp.
 func (pp *Progs) Prog(as obj.As) *obj.Prog {
-	if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
+	if pp.NextLive != StackMapDontCare && pp.NextLive != pp.PrevLive {
 		// Emit stack map index change.
-		idx := pp.NextLive.StackMapIndex
-		pp.PrevLive.StackMapIndex = idx
+		idx := pp.NextLive
+		pp.PrevLive = idx
 		p := pp.Prog(obj.APCDATA)
 		p.From.SetConst(abi.PCDATA_StackMapIndex)
 		p.To.SetConst(int64(idx))
 	}
-	if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
+	if pp.NextUnsafe != pp.PrevUnsafe {
 		// Emit unsafe-point marker.
-		pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
+		pp.PrevUnsafe = pp.NextUnsafe
 		p := pp.Prog(obj.APCDATA)
 		p.From.SetConst(abi.PCDATA_UnsafePoint)
-		if pp.NextLive.IsUnsafePoint {
+		if pp.NextUnsafe {
 			p.To.SetConst(abi.UnsafePointUnsafe)
 		} else {
 			p.To.SetConst(abi.UnsafePointSafe)
diff --git a/src/cmd/compile/internal/pgo/internal/graph/graph.go b/src/cmd/compile/internal/pgo/internal/graph/graph.go
index 1275298..4d89b1b 100644
--- a/src/cmd/compile/internal/pgo/internal/graph/graph.go
+++ b/src/cmd/compile/internal/pgo/internal/graph/graph.go
@@ -466,9 +466,9 @@
 // Sort returns a slice of the edges in the map, in a consistent
 // order. The sort order is first based on the edge weight
 // (higher-to-lower) and then by the node names to avoid flakiness.
-func (e EdgeMap) Sort() []*Edge {
-	el := make(edgeList, 0, len(e))
-	for _, w := range e {
+func (em EdgeMap) Sort() []*Edge {
+	el := make(edgeList, 0, len(em))
+	for _, w := range em {
 		el = append(el, w)
 	}
 
@@ -477,9 +477,9 @@
 }
 
 // Sum returns the total weight for a set of nodes.
-func (e EdgeMap) Sum() int64 {
+func (em EdgeMap) Sum() int64 {
 	var ret int64
-	for _, edge := range e {
+	for _, edge := range em {
 		ret += edge.Weight
 	}
 	return ret
diff --git a/src/cmd/compile/internal/pgo/irgraph.go b/src/cmd/compile/internal/pgo/irgraph.go
index 074f4a5..96485e3 100644
--- a/src/cmd/compile/internal/pgo/irgraph.go
+++ b/src/cmd/compile/internal/pgo/irgraph.go
@@ -46,9 +46,11 @@
 	"cmd/compile/internal/pgo/internal/graph"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"errors"
 	"fmt"
 	"internal/profile"
 	"os"
+	"sort"
 )
 
 // IRGraph is a call graph with nodes pointing to IRs of functions and edges
@@ -62,7 +64,8 @@
 // TODO(prattmic): Consider merging this data structure with Graph. This is
 // effectively a copy of Graph aggregated to line number and pointing to IR.
 type IRGraph struct {
-	// Nodes of the graph
+	// Nodes of the graph. Each node represents a function, keyed by linker
+	// symbol name.
 	IRNodes map[string]*IRNode
 }
 
@@ -76,7 +79,7 @@
 
 	// Set of out-edges in the callgraph. The map uniquely identifies each
 	// edge based on the callsite and callee, for fast lookup.
-	OutEdges map[NodeMapKey]*IREdge
+	OutEdges map[NamedCallEdge]*IREdge
 }
 
 // Name returns the symbol name of this function.
@@ -96,21 +99,21 @@
 	CallSiteOffset int // Line offset from function start line.
 }
 
-// NodeMapKey represents a hash key to identify unique call-edges in profile
-// and in IR. Used for deduplication of call edges found in profile.
-//
-// TODO(prattmic): rename to something more descriptive.
-type NodeMapKey struct {
+// NamedCallEdge identifies a call edge by linker symbol names and call site
+// offset.
+type NamedCallEdge struct {
 	CallerName     string
 	CalleeName     string
 	CallSiteOffset int // Line offset from function start line.
 }
 
-// Weights capture both node weight and edge weight.
-type Weights struct {
-	NFlat   int64
-	NCum    int64
-	EWeight int64
+// NamedEdgeMap contains all unique call edges in the profile and their
+// edge weight.
+type NamedEdgeMap struct {
+	Weight map[NamedCallEdge]int64
+
+	// ByWeight lists all keys in Weight, sorted by edge weight.
+	ByWeight []NamedCallEdge
 }
 
 // CallSiteInfo captures call-site information and its caller/callee.
@@ -123,15 +126,13 @@
 // Profile contains the processed PGO profile and weighted call graph used for
 // PGO optimizations.
 type Profile struct {
-	// Aggregated NodeWeights and EdgeWeights across the profile. This
-	// helps us determine the percentage threshold for hot/cold
-	// partitioning.
-	TotalNodeWeight int64
-	TotalEdgeWeight int64
+	// Aggregated edge weights across the profile. This helps us determine
+	// the percentage threshold for hot/cold partitioning.
+	TotalWeight int64
 
-	// NodeMap contains all unique call-edges in the profile and their
-	// aggregated weight.
-	NodeMap map[NodeMapKey]*Weights
+	// NamedEdgeMap contains all unique call edges in the profile and their
+	// edge weight.
+	NamedEdgeMap NamedEdgeMap
 
 	// WeightedCG represents the IRGraph built from profile, which we will
 	// update as part of inlining.
@@ -145,18 +146,22 @@
 		return nil, fmt.Errorf("error opening profile: %w", err)
 	}
 	defer f.Close()
-	profile, err := profile.Parse(f)
-	if err != nil {
+	p, err := profile.Parse(f)
+	if errors.Is(err, profile.ErrNoData) {
+		// Treat a completely empty file the same as a profile with no
+		// samples: nothing to do.
+		return nil, nil
+	} else if err != nil {
 		return nil, fmt.Errorf("error parsing profile: %w", err)
 	}
 
-	if len(profile.Sample) == 0 {
+	if len(p.Sample) == 0 {
 		// We accept empty profiles, but there is nothing to do.
 		return nil, nil
 	}
 
 	valueIndex := -1
-	for i, s := range profile.SampleType {
+	for i, s := range p.SampleType {
 		// Samples count is the raw data collected, and CPU nanoseconds is just
 		// a scaled version of it, so either one we can find is fine.
 		if (s.Type == "samples" && s.Unit == "count") ||
@@ -170,100 +175,106 @@
 		return nil, fmt.Errorf(`profile does not contain a sample index with value/type "samples/count" or cpu/nanoseconds"`)
 	}
 
-	g := graph.NewGraph(profile, &graph.Options{
+	g := graph.NewGraph(p, &graph.Options{
 		SampleValue: func(v []int64) int64 { return v[valueIndex] },
 	})
 
-	p := &Profile{
-		NodeMap: make(map[NodeMapKey]*Weights),
-		WeightedCG: &IRGraph{
-			IRNodes: make(map[string]*IRNode),
-		},
-	}
-
-	// Build the node map and totals from the profile graph.
-	if err := p.processprofileGraph(g); err != nil {
+	namedEdgeMap, totalWeight, err := createNamedEdgeMap(g)
+	if err != nil {
 		return nil, err
 	}
 
-	if p.TotalNodeWeight == 0 || p.TotalEdgeWeight == 0 {
+	if totalWeight == 0 {
 		return nil, nil // accept but ignore profile with no samples.
 	}
 
 	// Create package-level call graph with weights from profile and IR.
-	p.initializeIRGraph()
+	wg := createIRGraph(namedEdgeMap)
 
-	return p, nil
+	return &Profile{
+		TotalWeight:  totalWeight,
+		NamedEdgeMap: namedEdgeMap,
+		WeightedCG:   wg,
+	}, nil
 }
 
-// processprofileGraph builds various maps from the profile-graph.
+// createNamedEdgeMap builds a map of callsite-callee edge weights from the
+// profile-graph.
 //
-// It initializes NodeMap and Total{Node,Edge}Weight based on the name and
-// callsite to compute node and edge weights which will be used later on to
-// create edges for WeightedCG.
-//
-// Caller should ignore the profile if p.TotalNodeWeight == 0 || p.TotalEdgeWeight == 0.
-func (p *Profile) processprofileGraph(g *graph.Graph) error {
-	nFlat := make(map[string]int64)
-	nCum := make(map[string]int64)
+// Caller should ignore the profile if totalWeight == 0.
+func createNamedEdgeMap(g *graph.Graph) (edgeMap NamedEdgeMap, totalWeight int64, err error) {
 	seenStartLine := false
 
-	// Accummulate weights for the same node.
-	for _, n := range g.Nodes {
-		canonicalName := n.Info.Name
-		nFlat[canonicalName] += n.FlatValue()
-		nCum[canonicalName] += n.CumValue()
-	}
-
 	// Process graph and build various node and edge maps which will
 	// be consumed by AST walk.
+	weight := make(map[NamedCallEdge]int64)
 	for _, n := range g.Nodes {
 		seenStartLine = seenStartLine || n.Info.StartLine != 0
 
-		p.TotalNodeWeight += n.FlatValue()
 		canonicalName := n.Info.Name
 		// Create the key to the nodeMapKey.
-		nodeinfo := NodeMapKey{
+		namedEdge := NamedCallEdge{
 			CallerName:     canonicalName,
 			CallSiteOffset: n.Info.Lineno - n.Info.StartLine,
 		}
 
 		for _, e := range n.Out {
-			p.TotalEdgeWeight += e.WeightValue()
-			nodeinfo.CalleeName = e.Dest.Info.Name
-			if w, ok := p.NodeMap[nodeinfo]; ok {
-				w.EWeight += e.WeightValue()
-			} else {
-				weights := new(Weights)
-				weights.NFlat = nFlat[canonicalName]
-				weights.NCum = nCum[canonicalName]
-				weights.EWeight = e.WeightValue()
-				p.NodeMap[nodeinfo] = weights
-			}
+			totalWeight += e.WeightValue()
+			namedEdge.CalleeName = e.Dest.Info.Name
+			// Create new entry or increment existing entry.
+			weight[namedEdge] += e.WeightValue()
 		}
 	}
 
-	if p.TotalNodeWeight == 0 || p.TotalEdgeWeight == 0 {
-		return nil // accept but ignore profile with no samples.
+	if totalWeight == 0 {
+		return NamedEdgeMap{}, 0, nil // accept but ignore profile with no samples.
 	}
 
 	if !seenStartLine {
 		// TODO(prattmic): If Function.start_line is missing we could
 		// fall back to using absolute line numbers, which is better
 		// than nothing.
-		return fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)")
+		return NamedEdgeMap{}, 0, fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)")
 	}
 
-	return nil
+	byWeight := make([]NamedCallEdge, 0, len(weight))
+	for namedEdge := range weight {
+		byWeight = append(byWeight, namedEdge)
+	}
+	sort.Slice(byWeight, func(i, j int) bool {
+		ei, ej := byWeight[i], byWeight[j]
+		if wi, wj := weight[ei], weight[ej]; wi != wj {
+			return wi > wj // want larger weight first
+		}
+		// same weight, order by name/line number
+		if ei.CallerName != ej.CallerName {
+			return ei.CallerName < ej.CallerName
+		}
+		if ei.CalleeName != ej.CalleeName {
+			return ei.CalleeName < ej.CalleeName
+		}
+		return ei.CallSiteOffset < ej.CallSiteOffset
+	})
+
+	edgeMap = NamedEdgeMap{
+		Weight:   weight,
+		ByWeight: byWeight,
+	}
+
+	return edgeMap, totalWeight, nil
 }
 
 // initializeIRGraph builds the IRGraph by visiting all the ir.Func in decl list
 // of a package.
-func (p *Profile) initializeIRGraph() {
+func createIRGraph(namedEdgeMap NamedEdgeMap) *IRGraph {
+	g := &IRGraph{
+		IRNodes: make(map[string]*IRNode),
+	}
+
 	// Bottomup walk over the function to create IRGraph.
-	ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+	ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
 		for _, fn := range list {
-			p.VisitIR(fn)
+			visitIR(fn, namedEdgeMap, g)
 		}
 	})
 
@@ -271,24 +282,20 @@
 	// that IRNodes is fully populated (see the dummy node TODO in
 	// addIndirectEdges).
 	//
-	// TODO(prattmic): VisitIR above populates the graph via direct calls
+	// TODO(prattmic): visitIR above populates the graph via direct calls
 	// discovered via the IR. addIndirectEdges populates the graph via
 	// calls discovered via the profile. This combination of opposite
 	// approaches is a bit awkward, particularly because direct calls are
 	// discoverable via the profile as well. Unify these into a single
 	// approach.
-	p.addIndirectEdges()
+	addIndirectEdges(g, namedEdgeMap)
+
+	return g
 }
 
-// VisitIR traverses the body of each ir.Func and use NodeMap to determine if
-// we need to add an edge from ir.Func and any node in the ir.Func body.
-func (p *Profile) VisitIR(fn *ir.Func) {
-	g := p.WeightedCG
-
-	if g.IRNodes == nil {
-		g.IRNodes = make(map[string]*IRNode)
-	}
-
+// visitIR traverses the body of each ir.Func adds edges to g from ir.Func to
+// any called function in the body.
+func visitIR(fn *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) {
 	name := ir.LinkFuncName(fn)
 	node, ok := g.IRNodes[name]
 	if !ok {
@@ -299,7 +306,29 @@
 	}
 
 	// Recursively walk over the body of the function to create IRGraph edges.
-	p.createIRGraphEdge(fn, node, name)
+	createIRGraphEdge(fn, node, name, namedEdgeMap, g)
+}
+
+// createIRGraphEdge traverses the nodes in the body of ir.Func and adds edges
+// between the callernode which points to the ir.Func and the nodes in the
+// body.
+func createIRGraphEdge(fn *ir.Func, callernode *IRNode, name string, namedEdgeMap NamedEdgeMap, g *IRGraph) {
+	ir.VisitList(fn.Body, func(n ir.Node) {
+		switch n.Op() {
+		case ir.OCALLFUNC:
+			call := n.(*ir.CallExpr)
+			// Find the callee function from the call site and add the edge.
+			callee := DirectCallee(call.Fun)
+			if callee != nil {
+				addIREdge(callernode, name, n, callee, namedEdgeMap, g)
+			}
+		case ir.OCALLMETH:
+			call := n.(*ir.CallExpr)
+			// Find the callee method from the call site and add the edge.
+			callee := ir.MethodExprName(call.Fun).Func
+			addIREdge(callernode, name, n, callee, namedEdgeMap, g)
+		}
+	})
 }
 
 // NodeLineOffset returns the line offset of n in fn.
@@ -312,9 +341,7 @@
 
 // addIREdge adds an edge between caller and new node that points to `callee`
 // based on the profile-graph and NodeMap.
-func (p *Profile) addIREdge(callerNode *IRNode, callerName string, call ir.Node, callee *ir.Func) {
-	g := p.WeightedCG
-
+func addIREdge(callerNode *IRNode, callerName string, call ir.Node, callee *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) {
 	calleeName := ir.LinkFuncName(callee)
 	calleeNode, ok := g.IRNodes[calleeName]
 	if !ok {
@@ -324,40 +351,36 @@
 		g.IRNodes[calleeName] = calleeNode
 	}
 
-	nodeinfo := NodeMapKey{
+	namedEdge := NamedCallEdge{
 		CallerName:     callerName,
 		CalleeName:     calleeName,
 		CallSiteOffset: NodeLineOffset(call, callerNode.AST),
 	}
 
-	var weight int64
-	if weights, ok := p.NodeMap[nodeinfo]; ok {
-		weight = weights.EWeight
-	}
-
 	// Add edge in the IRGraph from caller to callee.
 	edge := &IREdge{
 		Src:            callerNode,
 		Dst:            calleeNode,
-		Weight:         weight,
-		CallSiteOffset: nodeinfo.CallSiteOffset,
+		Weight:         namedEdgeMap.Weight[namedEdge],
+		CallSiteOffset: namedEdge.CallSiteOffset,
 	}
 
 	if callerNode.OutEdges == nil {
-		callerNode.OutEdges = make(map[NodeMapKey]*IREdge)
+		callerNode.OutEdges = make(map[NamedCallEdge]*IREdge)
 	}
-	callerNode.OutEdges[nodeinfo] = edge
+	callerNode.OutEdges[namedEdge] = edge
+}
+
+// LookupFunc looks up a function or method in export data. It is expected to
+// be overridden by package noder, to break a dependency cycle.
+var LookupFunc = func(fullName string) (*ir.Func, error) {
+	base.Fatalf("pgo.LookupMethodFunc not overridden")
+	panic("unreachable")
 }
 
 // addIndirectEdges adds indirect call edges found in the profile to the graph,
 // to be used for devirtualization.
 //
-// targetDeclFuncs is the set of functions in typecheck.Target.Decls. Only
-// edges from these functions will be added.
-//
-// Devirtualization is only applied to typecheck.Target.Decls functions, so there
-// is no need to add edges from other functions.
-//
 // N.B. despite the name, addIndirectEdges will add any edges discovered via
 // the profile. We don't know for sure that they are indirect, but assume they
 // are since direct calls would already be added. (e.g., direct calls that have
@@ -366,9 +389,7 @@
 // TODO(prattmic): Devirtualization runs before inlining, so we can't devirtualize
 // calls inside inlined call bodies. If we did add that, we'd need edges from
 // inlined bodies as well.
-func (p *Profile) addIndirectEdges() {
-	g := p.WeightedCG
-
+func addIndirectEdges(g *IRGraph, namedEdgeMap NamedEdgeMap) {
 	// g.IRNodes is populated with the set of functions in the local
 	// package build by VisitIR. We want to filter for local functions
 	// below, but we also add unknown callees to IRNodes as we go. So make
@@ -378,7 +399,15 @@
 		localNodes[k] = v
 	}
 
-	for key, weights := range p.NodeMap {
+	// N.B. We must consider edges in a stable order because export data
+	// lookup order (LookupMethodFunc, below) can impact the export data of
+	// this package, which must be stable across different invocations for
+	// reproducibility.
+	//
+	// The weight ordering of ByWeight is irrelevant, it just happens to be
+	// an ordered list of edges that is already available.
+	for _, key := range namedEdgeMap.ByWeight {
+		weight := namedEdgeMap.Weight[key]
 		// All callers in the local package build were added to IRNodes
 		// in VisitIR. If a caller isn't in the local package build we
 		// can skip adding edges, since we won't be devirtualizing in
@@ -395,25 +424,55 @@
 
 		calleeNode, ok := g.IRNodes[key.CalleeName]
 		if !ok {
-			// IR is missing for this callee. Most likely this is
-			// because the callee isn't in the transitive deps of
-			// this package.
+			// IR is missing for this callee. VisitIR populates
+			// IRNodes with all functions discovered via local
+			// package function declarations and calls. This
+			// function may still be available from export data of
+			// a transitive dependency.
 			//
-			// Record this call anyway. If this is the hottest,
-			// then we want to skip devirtualization rather than
-			// devirtualizing to the second most common callee.
+			// TODO(prattmic): Parameterized types/functions are
+			// not supported.
 			//
-			// TODO(prattmic): VisitIR populates IRNodes with all
-			// of the functions discovered via local package
-			// function declarations and calls. Thus we could miss
-			// functions that are available in export data of
-			// transitive deps, but aren't directly reachable. We
-			// need to do a lookup directly from package export
-			// data to get complete coverage.
-			calleeNode = &IRNode{
-				LinkerSymbolName: key.CalleeName,
-				// TODO: weights? We don't need them.
+			// TODO(prattmic): This eager lookup during graph load
+			// is simple, but wasteful. We are likely to load many
+			// functions that we never need. We could delay load
+			// until we actually need the method in
+			// devirtualization. Instantiation of generic functions
+			// will likely need to be done at the devirtualization
+			// site, if at all.
+			fn, err := LookupFunc(key.CalleeName)
+			if err == nil {
+				if base.Debug.PGODebug >= 3 {
+					fmt.Printf("addIndirectEdges: %s found in export data\n", key.CalleeName)
+				}
+				calleeNode = &IRNode{AST: fn}
+
+				// N.B. we could call createIRGraphEdge to add
+				// direct calls in this newly-imported
+				// function's body to the graph. Similarly, we
+				// could add to this function's queue to add
+				// indirect calls. However, those would be
+				// useless given the visit order of inlining,
+				// and the ordering of PGO devirtualization and
+				// inlining. This function can only be used as
+				// an inlined body. We will never do PGO
+				// devirtualization inside an inlined call. Nor
+				// will we perform inlining inside an inlined
+				// call.
+			} else {
+				// Still not found. Most likely this is because
+				// the callee isn't in the transitive deps of
+				// this package.
+				//
+				// Record this call anyway. If this is the hottest,
+				// then we want to skip devirtualization rather than
+				// devirtualizing to the second most common callee.
+				if base.Debug.PGODebug >= 3 {
+					fmt.Printf("addIndirectEdges: %s not found in export data: %v\n", key.CalleeName, err)
+				}
+				calleeNode = &IRNode{LinkerSymbolName: key.CalleeName}
 			}
+
 			// Add dummy node back to IRNodes. We don't need this
 			// directly, but PrintWeightedCallGraphDOT uses these
 			// to print nodes.
@@ -422,39 +481,17 @@
 		edge := &IREdge{
 			Src:            callerNode,
 			Dst:            calleeNode,
-			Weight:         weights.EWeight,
+			Weight:         weight,
 			CallSiteOffset: key.CallSiteOffset,
 		}
 
 		if callerNode.OutEdges == nil {
-			callerNode.OutEdges = make(map[NodeMapKey]*IREdge)
+			callerNode.OutEdges = make(map[NamedCallEdge]*IREdge)
 		}
 		callerNode.OutEdges[key] = edge
 	}
 }
 
-// createIRGraphEdge traverses the nodes in the body of ir.Func and adds edges
-// between the callernode which points to the ir.Func and the nodes in the
-// body.
-func (p *Profile) createIRGraphEdge(fn *ir.Func, callernode *IRNode, name string) {
-	ir.VisitList(fn.Body, func(n ir.Node) {
-		switch n.Op() {
-		case ir.OCALLFUNC:
-			call := n.(*ir.CallExpr)
-			// Find the callee function from the call site and add the edge.
-			callee := DirectCallee(call.X)
-			if callee != nil {
-				p.addIREdge(callernode, name, n, callee)
-			}
-		case ir.OCALLMETH:
-			call := n.(*ir.CallExpr)
-			// Find the callee method from the call site and add the edge.
-			callee := ir.MethodExprName(call.X).Func
-			p.addIREdge(callernode, name, n, callee)
-		}
-	})
-}
-
 // WeightInPercentage converts profile weights to a percentage.
 func WeightInPercentage(value int64, total int64) float64 {
 	return (float64(value) / float64(total)) * 100
@@ -467,7 +504,7 @@
 
 	// List of functions in this package.
 	funcs := make(map[string]struct{})
-	ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+	ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
 		for _, f := range list {
 			name := ir.LinkFuncName(f)
 			funcs[name] = struct{}{}
@@ -511,7 +548,7 @@
 		}
 	}
 	// Print edges.
-	ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+	ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
 		for _, f := range list {
 			name := ir.LinkFuncName(f)
 			if n, ok := p.WeightedCG.IRNodes[name]; ok {
@@ -521,7 +558,7 @@
 						style = "dashed"
 					}
 					color := "black"
-					edgepercent := WeightInPercentage(e.Weight, p.TotalEdgeWeight)
+					edgepercent := WeightInPercentage(e.Weight, p.TotalWeight)
 					if edgepercent > edgeThreshold {
 						color = "red"
 					}
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
index edb0d6a..9278890 100644
--- a/src/cmd/compile/internal/pkginit/init.go
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -15,82 +15,15 @@
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
 	"cmd/internal/src"
-	"fmt"
-	"os"
 )
 
-// MakeInit creates a synthetic init function to handle any
-// package-scope initialization statements.
-//
-// TODO(mdempsky): Move into noder, so that the types2-based frontends
-// can use Info.InitOrder instead.
-func MakeInit() {
-	nf := initOrder(typecheck.Target.Decls)
-	if len(nf) == 0 {
-		return
-	}
-
-	// Make a function that contains all the initialization statements.
-	base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
-	initializers := typecheck.Lookup("init")
-	fn := typecheck.DeclFunc(initializers, nil, nil, nil)
-	for _, dcl := range typecheck.InitTodoFunc.Dcl {
-		dcl.Curfn = fn
-	}
-	fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...)
-	typecheck.InitTodoFunc.Dcl = nil
-	fn.SetIsPackageInit(true)
-
-	// Outline (if legal/profitable) global map inits.
-	newfuncs := []*ir.Func{}
-	nf, newfuncs = staticinit.OutlineMapInits(nf)
-
-	// Suppress useless "can inline" diagnostics.
-	// Init functions are only called dynamically.
-	fn.SetInlinabilityChecked(true)
-	for _, nfn := range newfuncs {
-		nfn.SetInlinabilityChecked(true)
-	}
-
-	fn.Body = nf
-	typecheck.FinishFuncBody()
-
-	typecheck.Func(fn)
-	ir.WithFunc(fn, func() {
-		typecheck.Stmts(nf)
-	})
-	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-	if base.Debug.WrapGlobalMapDbg > 1 {
-		fmt.Fprintf(os.Stderr, "=-= len(newfuncs) is %d for %v\n",
-			len(newfuncs), fn)
-	}
-	for _, nfn := range newfuncs {
-		if base.Debug.WrapGlobalMapDbg > 1 {
-			fmt.Fprintf(os.Stderr, "=-= add to target.decls %v\n", nfn)
-		}
-		typecheck.Target.Decls = append(typecheck.Target.Decls, ir.Node(nfn))
-	}
-
-	// Prepend to Inits, so it runs first, before any user-declared init
-	// functions.
-	typecheck.Target.Inits = append([]*ir.Func{fn}, typecheck.Target.Inits...)
-
-	if typecheck.InitTodoFunc.Dcl != nil {
-		// We only generate temps using InitTodoFunc if there
-		// are package-scope initialization statements, so
-		// something's weird if we get here.
-		base.Fatalf("InitTodoFunc still has declarations")
-	}
-	typecheck.InitTodoFunc = nil
-}
-
-// Task makes and returns an initialization record for the package.
+// MakeTask makes an initialization record for the package, if necessary.
 // See runtime/proc.go:initTask for its layout.
 // The 3 tasks for initialization are:
 //  1. Initialize all of the packages the current package depends on.
 //  2. Initialize all the variables that have initializers.
 //  3. Run any init functions.
-func Task() *ir.Name {
+func MakeTask() {
 	var deps []*obj.LSym // initTask records for packages the current package depends on
 	var fns []*obj.LSym  // functions to call for package initialization
 
@@ -125,35 +58,29 @@
 		ni := len(InstrumentGlobalsMap)
 		if ni != 0 {
 			// Make an init._ function.
-			base.Pos = base.AutogeneratedPos
-			typecheck.DeclContext = ir.PEXTERN
-			name := noder.Renameinit()
-			fnInit := typecheck.DeclFunc(name, nil, nil, nil)
+			pos := base.AutogeneratedPos
+			base.Pos = pos
+
+			sym := noder.Renameinit()
+			fnInit := ir.NewFunc(pos, pos, sym, types.NewSignature(nil, nil, nil))
+			typecheck.DeclFunc(fnInit)
 
 			// Get an array of instrumented global variables.
 			globals := instrumentGlobals(fnInit)
 
 			// Call runtime.asanregisterglobals function to poison redzones.
 			// runtime.asanregisterglobals(unsafe.Pointer(&globals[0]), ni)
-			asanf := typecheck.NewName(ir.Pkgs.Runtime.Lookup("asanregisterglobals"))
-			ir.MarkFunc(asanf)
-			asanf.SetType(types.NewSignature(nil, []*types.Field{
-				types.NewField(base.Pos, nil, types.Types[types.TUNSAFEPTR]),
-				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
-			}, nil))
-			asancall := ir.NewCallExpr(base.Pos, ir.OCALL, asanf, nil)
+			asancall := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("asanregisterglobals"), nil)
 			asancall.Args.Append(typecheck.ConvNop(typecheck.NodAddr(
 				ir.NewIndexExpr(base.Pos, globals, ir.NewInt(base.Pos, 0))), types.Types[types.TUNSAFEPTR]))
 			asancall.Args.Append(typecheck.DefaultLit(ir.NewInt(base.Pos, int64(ni)), types.Types[types.TUINTPTR]))
 
 			fnInit.Body.Append(asancall)
 			typecheck.FinishFuncBody()
-			typecheck.Func(fnInit)
 			ir.CurFunc = fnInit
 			typecheck.Stmts(fnInit.Body)
 			ir.CurFunc = nil
 
-			typecheck.Target.Decls = append(typecheck.Target.Decls, fnInit)
 			typecheck.Target.Inits = append(typecheck.Target.Inits, fnInit)
 		}
 	}
@@ -191,13 +118,12 @@
 	}
 
 	if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Path != "main" && types.LocalPkg.Path != "runtime" {
-		return nil // nothing to initialize
+		return // nothing to initialize
 	}
 
 	// Make an .inittask structure.
 	sym := typecheck.Lookup(".inittask")
-	task := typecheck.NewName(sym)
-	task.SetType(types.Types[types.TUINT8]) // fake type
+	task := ir.NewNameAt(base.Pos, sym, types.Types[types.TUINT8]) // fake type
 	task.Class = ir.PEXTERN
 	sym.Def = task
 	lsym := task.Linksym()
@@ -219,20 +145,4 @@
 	// An initTask has pointers, but none into the Go heap.
 	// It's not quite read only, the state field must be modifiable.
 	objw.Global(lsym, int32(ot), obj.NOPTR)
-	return task
-}
-
-// initRequiredForCoverage returns TRUE if we need to force creation
-// of an init function for the package so as to insert a coverage
-// runtime registration call.
-func initRequiredForCoverage(l []ir.Node) bool {
-	if base.Flag.Cfg.CoverageInfo == nil {
-		return false
-	}
-	for _, n := range l {
-		if n.Op() == ir.ODCLFUNC {
-			return true
-		}
-	}
-	return false
 }
diff --git a/src/cmd/compile/internal/pkginit/initAsanGlobals.go b/src/cmd/compile/internal/pkginit/initAsanGlobals.go
index ce26cbf..42db0ea 100644
--- a/src/cmd/compile/internal/pkginit/initAsanGlobals.go
+++ b/src/cmd/compile/internal/pkginit/initAsanGlobals.go
@@ -23,8 +23,7 @@
 	// var asanglobals []asanGlobalStruct
 	arraytype := types.NewArray(asanGlobalStruct, int64(len(InstrumentGlobalsMap)))
 	symG := lname(".asanglobals")
-	globals := typecheck.NewName(symG)
-	globals.SetType(arraytype)
+	globals := ir.NewNameAt(base.Pos, symG, arraytype)
 	globals.Class = ir.PEXTERN
 	symG.Def = globals
 	typecheck.Target.Externs = append(typecheck.Target.Externs, globals)
@@ -32,8 +31,7 @@
 	// var asanL []asanLocationStruct
 	arraytype = types.NewArray(asanLocationStruct, int64(len(InstrumentGlobalsMap)))
 	symL := lname(".asanL")
-	asanlocation := typecheck.NewName(symL)
-	asanlocation.SetType(arraytype)
+	asanlocation := ir.NewNameAt(base.Pos, symL, arraytype)
 	asanlocation.Class = ir.PEXTERN
 	symL.Def = asanlocation
 	typecheck.Target.Externs = append(typecheck.Target.Externs, asanlocation)
@@ -43,22 +41,19 @@
 	// var asanModulename string
 	// var asanFilename string
 	symL = lname(".asanName")
-	asanName := typecheck.NewName(symL)
-	asanName.SetType(types.Types[types.TSTRING])
+	asanName := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
 	asanName.Class = ir.PEXTERN
 	symL.Def = asanName
 	typecheck.Target.Externs = append(typecheck.Target.Externs, asanName)
 
 	symL = lname(".asanModulename")
-	asanModulename := typecheck.NewName(symL)
-	asanModulename.SetType(types.Types[types.TSTRING])
+	asanModulename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
 	asanModulename.Class = ir.PEXTERN
 	symL.Def = asanModulename
 	typecheck.Target.Externs = append(typecheck.Target.Externs, asanModulename)
 
 	symL = lname(".asanFilename")
-	asanFilename := typecheck.NewName(symL)
-	asanFilename.SetType(types.Types[types.TSTRING])
+	asanFilename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
 	asanFilename.Class = ir.PEXTERN
 	symL.Def = asanFilename
 	typecheck.Target.Externs = append(typecheck.Target.Externs, asanFilename)
diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go
deleted file mode 100644
index 9416470..0000000
--- a/src/cmd/compile/internal/pkginit/initorder.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pkginit
-
-import (
-	"container/heap"
-	"fmt"
-	"internal/types/errors"
-	"strings"
-
-	"cmd/compile/internal/base"
-	"cmd/compile/internal/ir"
-)
-
-// Package initialization
-//
-// Here we implement the algorithm for ordering package-level variable
-// initialization. The spec is written in terms of variable
-// initialization, but multiple variables initialized by a single
-// assignment are handled together, so here we instead focus on
-// ordering initialization assignments. Conveniently, this maps well
-// to how we represent package-level initializations using the Node
-// AST.
-//
-// Assignments are in one of three phases: NotStarted, Pending, or
-// Done. For assignments in the Pending phase, we use Xoffset to
-// record the number of unique variable dependencies whose
-// initialization assignment is not yet Done. We also maintain a
-// "blocking" map that maps assignments back to all of the assignments
-// that depend on it.
-//
-// For example, for an initialization like:
-//
-//     var x = f(a, b, b)
-//     var a, b = g()
-//
-// the "x = f(a, b, b)" assignment depends on two variables (a and b),
-// so its Xoffset will be 2. Correspondingly, the "a, b = g()"
-// assignment's "blocking" entry will have two entries back to x's
-// assignment.
-//
-// Logically, initialization works by (1) taking all NotStarted
-// assignments, calculating their dependencies, and marking them
-// Pending; (2) adding all Pending assignments with Xoffset==0 to a
-// "ready" priority queue (ordered by variable declaration position);
-// and (3) iteratively processing the next Pending assignment from the
-// queue, decreasing the Xoffset of assignments it's blocking, and
-// adding them to the queue if decremented to 0.
-//
-// As an optimization, we actually apply each of these three steps for
-// each assignment. This yields the same order, but keeps queue size
-// down and thus also heap operation costs.
-
-// Static initialization phase.
-// These values are stored in two bits in Node.flags.
-const (
-	InitNotStarted = iota
-	InitDone
-	InitPending
-)
-
-type InitOrder struct {
-	// blocking maps initialization assignments to the assignments
-	// that depend on it.
-	blocking map[ir.Node][]ir.Node
-
-	// ready is the queue of Pending initialization assignments
-	// that are ready for initialization.
-	ready declOrder
-
-	order map[ir.Node]int
-}
-
-// initOrder computes initialization order for a list l of
-// package-level declarations (in declaration order) and outputs the
-// corresponding list of statements to include in the init() function
-// body.
-func initOrder(l []ir.Node) []ir.Node {
-	var res ir.Nodes
-	o := InitOrder{
-		blocking: make(map[ir.Node][]ir.Node),
-		order:    make(map[ir.Node]int),
-	}
-
-	// Process all package-level assignment in declaration order.
-	for _, n := range l {
-		switch n.Op() {
-		case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
-			o.processAssign(n)
-			o.flushReady(func(n ir.Node) { res.Append(n) })
-		case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
-			// nop
-		default:
-			base.Fatalf("unexpected package-level statement: %v", n)
-		}
-	}
-
-	// Check that all assignments are now Done; if not, there must
-	// have been a dependency cycle.
-	for _, n := range l {
-		switch n.Op() {
-		case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
-			if o.order[n] != orderDone {
-				// If there have already been errors
-				// printed, those errors may have
-				// confused us and there might not be
-				// a loop. Let the user fix those
-				// first.
-				base.ExitIfErrors()
-
-				o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
-				base.Fatalf("initialization unfinished, but failed to identify loop")
-			}
-		}
-	}
-
-	// Invariant consistency check. If this is non-zero, then we
-	// should have found a cycle above.
-	if len(o.blocking) != 0 {
-		base.Fatalf("expected empty map: %v", o.blocking)
-	}
-
-	return res
-}
-
-func (o *InitOrder) processAssign(n ir.Node) {
-	if _, ok := o.order[n]; ok {
-		base.Fatalf("unexpected state: %v, %v", n, o.order[n])
-	}
-	o.order[n] = 0
-
-	// Compute number of variable dependencies and build the
-	// inverse dependency ("blocking") graph.
-	for dep := range collectDeps(n, true) {
-		defn := dep.Defn
-		// Skip dependencies on functions (PFUNC) and
-		// variables already initialized (InitDone).
-		if dep.Class != ir.PEXTERN || o.order[defn] == orderDone {
-			continue
-		}
-		o.order[n]++
-		o.blocking[defn] = append(o.blocking[defn], n)
-	}
-
-	if o.order[n] == 0 {
-		heap.Push(&o.ready, n)
-	}
-}
-
-const orderDone = -1000
-
-// flushReady repeatedly applies initialize to the earliest (in
-// declaration order) assignment ready for initialization and updates
-// the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(ir.Node)) {
-	for o.ready.Len() != 0 {
-		n := heap.Pop(&o.ready).(ir.Node)
-		if order, ok := o.order[n]; !ok || order != 0 {
-			base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
-		}
-
-		initialize(n)
-		o.order[n] = orderDone
-
-		blocked := o.blocking[n]
-		delete(o.blocking, n)
-
-		for _, m := range blocked {
-			if o.order[m]--; o.order[m] == 0 {
-				heap.Push(&o.ready, m)
-			}
-		}
-	}
-}
-
-// findInitLoopAndExit searches for an initialization loop involving variable
-// or function n. If one is found, it reports the loop as an error and exits.
-//
-// path points to a slice used for tracking the sequence of
-// variables/functions visited. Using a pointer to a slice allows the
-// slice capacity to grow and limit reallocations.
-func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
-	for i, x := range *path {
-		if x == n {
-			reportInitLoopAndExit((*path)[i:])
-			return
-		}
-	}
-
-	// There might be multiple loops involving n; by sorting
-	// references, we deterministically pick the one reported.
-	refers := collectDeps(n.Defn, false).Sorted(func(ni, nj *ir.Name) bool {
-		return ni.Pos().Before(nj.Pos())
-	})
-
-	*path = append(*path, n)
-	for _, ref := range refers {
-		// Short-circuit variables that were initialized.
-		if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
-			continue
-		}
-
-		o.findInitLoopAndExit(ref, path, ok)
-	}
-
-	// n is not involved in a cycle.
-	// Record that fact to avoid checking it again when reached another way,
-	// or else this traversal will take exponential time traversing all paths
-	// through the part of the package's call graph implicated in the cycle.
-	ok.Add(n)
-
-	*path = (*path)[:len(*path)-1]
-}
-
-// reportInitLoopAndExit reports and initialization loop as an error
-// and exits. However, if l is not actually an initialization loop, it
-// simply returns instead.
-func reportInitLoopAndExit(l []*ir.Name) {
-	// Rotate loop so that the earliest variable declaration is at
-	// the start.
-	i := -1
-	for j, n := range l {
-		if n.Class == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
-			i = j
-		}
-	}
-	if i == -1 {
-		// False positive: loop only involves recursive
-		// functions. Return so that findInitLoop can continue
-		// searching.
-		return
-	}
-	l = append(l[i:], l[:i]...)
-
-	// TODO(mdempsky): Method values are printed as "T.m-fm"
-	// rather than "T.m". Figure out how to avoid that.
-
-	var msg strings.Builder
-	fmt.Fprintf(&msg, "initialization loop:\n")
-	for _, n := range l {
-		fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
-	}
-	fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
-
-	base.ErrorfAt(l[0].Pos(), errors.InvalidInitCycle, msg.String())
-	base.ErrorExit()
-}
-
-// collectDeps returns all of the package-level functions and
-// variables that declaration n depends on. If transitive is true,
-// then it also includes the transitive dependencies of any depended
-// upon functions (but not variables).
-func collectDeps(n ir.Node, transitive bool) ir.NameSet {
-	d := initDeps{transitive: transitive}
-	switch n.Op() {
-	case ir.OAS:
-		n := n.(*ir.AssignStmt)
-		d.inspect(n.Y)
-	case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
-		n := n.(*ir.AssignListStmt)
-		d.inspect(n.Rhs[0])
-	case ir.ODCLFUNC:
-		n := n.(*ir.Func)
-		d.inspectList(n.Body)
-	default:
-		base.Fatalf("unexpected Op: %v", n.Op())
-	}
-	return d.seen
-}
-
-type initDeps struct {
-	transitive bool
-	seen       ir.NameSet
-	cvisit     func(ir.Node)
-}
-
-func (d *initDeps) cachedVisit() func(ir.Node) {
-	if d.cvisit == nil {
-		d.cvisit = d.visit // cache closure
-	}
-	return d.cvisit
-}
-
-func (d *initDeps) inspect(n ir.Node)      { ir.Visit(n, d.cachedVisit()) }
-func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
-
-// visit calls foundDep on any package-level functions or variables
-// referenced by n, if any.
-func (d *initDeps) visit(n ir.Node) {
-	switch n.Op() {
-	case ir.ONAME:
-		n := n.(*ir.Name)
-		switch n.Class {
-		case ir.PEXTERN, ir.PFUNC:
-			d.foundDep(n)
-		}
-
-	case ir.OCLOSURE:
-		n := n.(*ir.ClosureExpr)
-		d.inspectList(n.Func.Body)
-
-	case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
-		d.foundDep(ir.MethodExprName(n))
-	}
-}
-
-// foundDep records that we've found a dependency on n by adding it to
-// seen.
-func (d *initDeps) foundDep(n *ir.Name) {
-	// Can happen with method expressions involving interface
-	// types; e.g., fixedbugs/issue4495.go.
-	if n == nil {
-		return
-	}
-
-	// Names without definitions aren't interesting as far as
-	// initialization ordering goes.
-	if n.Defn == nil {
-		return
-	}
-
-	if d.seen.Has(n) {
-		return
-	}
-	d.seen.Add(n)
-	if d.transitive && n.Class == ir.PFUNC {
-		d.inspectList(n.Defn.(*ir.Func).Body)
-	}
-}
-
-// declOrder implements heap.Interface, ordering assignment statements
-// by the position of their first LHS expression.
-//
-// N.B., the Pos of the first LHS expression is used because because
-// an OAS node's Pos may not be unique. For example, given the
-// declaration "var a, b = f(), g()", "a" must be ordered before "b",
-// but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []ir.Node
-
-func (s declOrder) Len() int { return len(s) }
-func (s declOrder) Less(i, j int) bool {
-	return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
-}
-func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
-func (s *declOrder) Pop() interface{} {
-	n := (*s)[len(*s)-1]
-	*s = (*s)[:len(*s)-1]
-	return n
-}
-
-// firstLHS returns the first expression on the left-hand side of
-// assignment n.
-func firstLHS(n ir.Node) *ir.Name {
-	switch n.Op() {
-	case ir.OAS:
-		n := n.(*ir.AssignStmt)
-		return n.X.Name()
-	case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
-		n := n.(*ir.AssignListStmt)
-		return n.Lhs[0].Name()
-	}
-
-	base.Fatalf("unexpected Op: %v", n.Op())
-	return nil
-}
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 23df7ee..d20a31e 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -347,9 +347,9 @@
 		// BNE         end
 		// STDCCC      Rarg2, (Rarg0)
 		// BNE         loop
-		// LWSYNC      // Only for sequential consistency; not required in CasRel.
 		// MOVD        $1, Rout
 		// end:
+		// LWSYNC      // Only for sequential consistency; not required in CasRel.
 		ld := ppc64.ALDAR
 		st := ppc64.ASTDCCC
 		cmp := ppc64.ACMP
@@ -402,22 +402,24 @@
 		p4 := s.Prog(ppc64.ABNE)
 		p4.To.Type = obj.TYPE_BRANCH
 		p4.To.SetTarget(p0)
-		// LWSYNC - Assuming shared data not write-through-required nor
-		// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
-		// If the operation is a CAS-Release, then synchronization is not necessary.
-		if v.AuxInt != 0 {
-			plwsync2 := s.Prog(ppc64.ALWSYNC)
-			plwsync2.To.Type = obj.TYPE_NONE
-		}
 		// return value true
 		p5 := s.Prog(ppc64.AMOVD)
 		p5.From.Type = obj.TYPE_CONST
 		p5.From.Offset = 1
 		p5.To.Type = obj.TYPE_REG
 		p5.To.Reg = out
-		// done (label)
-		p6 := s.Prog(obj.ANOP)
-		p2.To.SetTarget(p6)
+		// LWSYNC - Assuming shared data not write-through-required nor
+		// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
+		// If the operation is a CAS-Release, then synchronization is not necessary.
+		if v.AuxInt != 0 {
+			plwsync2 := s.Prog(ppc64.ALWSYNC)
+			plwsync2.To.Type = obj.TYPE_NONE
+			p2.To.SetTarget(plwsync2)
+		} else {
+			// done (label)
+			p6 := s.Prog(obj.ANOP)
+			p2.To.SetTarget(p6)
+		}
 
 	case ssa.OpPPC64LoweredPubBarrier:
 		// LWSYNC
@@ -573,18 +575,6 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = r
 
-		// Mask has been set as sh
-	case ssa.OpPPC64RLDICL:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		shifts := v.AuxInt
-		p := s.Prog(v.Op.Asm())
-		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
-		p.AddRestSourceConst(ssa.GetPPC64Shiftmb(shifts))
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
 	case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
 		ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
 		ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
@@ -603,7 +593,8 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = r
 
-	case ssa.OpPPC64ANDCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC:
+	case ssa.OpPPC64ADDCC, ssa.OpPPC64ANDCC, ssa.OpPPC64SUBCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC, ssa.OpPPC64NORCC,
+		ssa.OpPPC64ANDNCC:
 		r1 := v.Args[0].Reg()
 		r2 := v.Args[1].Reg()
 		p := s.Prog(v.Op.Asm())
@@ -613,6 +604,13 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
 
+	case ssa.OpPPC64NEGCC, ssa.OpPPC64CNTLZDCC:
+		p := s.Prog(v.Op.Asm())
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = v.Reg0()
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = v.Args[0].Reg()
+
 	case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_CONST
@@ -623,13 +621,27 @@
 
 		// Auxint holds encoded rotate + mask
 	case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
-		rot, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+		sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
 		p := s.Prog(v.Op.Asm())
 		p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
 		p.Reg = v.Args[0].Reg()
-		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
+		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)}
 		p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
 		// Auxint holds mask
+
+	case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR:
+		sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+		p := s.Prog(v.Op.Asm())
+		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
+		switch v.Op {
+		case ssa.OpPPC64RLDICL:
+			p.AddRestSourceConst(mb)
+		case ssa.OpPPC64RLDICR:
+			p.AddRestSourceConst(me)
+		}
+		p.Reg = v.Args[0].Reg()
+		p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+
 	case ssa.OpPPC64RLWNM:
 		_, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
 		p := s.Prog(v.Op.Asm())
@@ -730,13 +742,12 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 
-	case ssa.OpPPC64ANDCCconst:
+	case ssa.OpPPC64ADDCCconst, ssa.OpPPC64ANDCCconst:
 		p := s.Prog(v.Op.Asm())
 		p.Reg = v.Args[0].Reg()
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = v.AuxInt
 		p.To.Type = obj.TYPE_REG
-		//		p.To.Reg = ppc64.REGTMP // discard result
 		p.To.Reg = v.Reg0()
 
 	case ssa.OpPPC64MOVDaddr:
diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
new file mode 100644
index 0000000..16856c6
--- /dev/null
+++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
@@ -0,0 +1,1297 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+package rangefunc_test
+
+import (
+	"slices"
+	"testing"
+)
+
+type Seq2[T1, T2 any] func(yield func(T1, T2) bool)
+
+// OfSliceIndex returns a Seq over the elements of s. It is equivalent
+// to range s.
+func OfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+	return func(yield func(int, T) bool) {
+		for i, v := range s {
+			if !yield(i, v) {
+				return
+			}
+		}
+		return
+	}
+}
+
+// BadOfSliceIndex is "bad" because it ignores the return value from yield
+// and just keeps on iterating.
+func BadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+	return func(yield func(int, T) bool) {
+		for i, v := range s {
+			yield(i, v)
+		}
+		return
+	}
+}
+
+// VeryBadOfSliceIndex is "very bad" because it ignores the return value from yield
+// and just keeps on iterating, and also wraps that call in a defer-recover so it can
+// keep on trying after the first panic.
+func VeryBadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+	return func(yield func(int, T) bool) {
+		for i, v := range s {
+			func() {
+				defer func() {
+					recover()
+				}()
+				yield(i, v)
+			}()
+		}
+		return
+	}
+}
+
+// CooperativeBadOfSliceIndex calls the loop body from a goroutine after
+// a ping on a channel, and returns recover()on that same channel.
+func CooperativeBadOfSliceIndex[T any, S ~[]T](s S, proceed chan any) Seq2[int, T] {
+	return func(yield func(int, T) bool) {
+		for i, v := range s {
+			if !yield(i, v) {
+				// if the body breaks, call yield just once in a goroutine
+				go func() {
+					<-proceed
+					defer func() {
+						proceed <- recover()
+					}()
+					yield(0, s[0])
+				}()
+				return
+			}
+		}
+		return
+	}
+}
+
+// TrickyIterator is a type intended to test whether an iterator that
+// calls a yield function after loop exit must inevitably escape the
+// closure; this might be relevant to future checking/optimization.
+type TrickyIterator struct {
+	yield func(int, int) bool
+}
+
+func (ti *TrickyIterator) iterAll(s []int) Seq2[int, int] {
+	return func(yield func(int, int) bool) {
+		ti.yield = yield // Save yield for future abuse
+		for i, v := range s {
+			if !yield(i, v) {
+				return
+			}
+		}
+		return
+	}
+}
+
+func (ti *TrickyIterator) iterOne(s []int) Seq2[int, int] {
+	return func(yield func(int, int) bool) {
+		ti.yield = yield // Save yield for future abuse
+		if len(s) > 0 {  // Not in a loop might escape differently
+			yield(0, s[0])
+		}
+		return
+	}
+}
+
+func (ti *TrickyIterator) iterZero(s []int) Seq2[int, int] {
+	return func(yield func(int, int) bool) {
+		ti.yield = yield // Save yield for future abuse
+		// Don't call it at all, maybe it won't escape
+		return
+	}
+}
+
+func (ti *TrickyIterator) fail() {
+	if ti.yield != nil {
+		ti.yield(1, 1)
+	}
+}
+
+// Check wraps the function body passed to iterator forall
+// in code that ensures that it cannot (successfully) be called
+// either after body return false (control flow out of loop) or
+// forall itself returns (the iteration is now done).
+//
+// Note that this can catch errors before the inserted checks.
+func Check[U, V any](forall Seq2[U, V]) Seq2[U, V] {
+	return func(body func(U, V) bool) {
+		ret := true
+		forall(func(u U, v V) bool {
+			if !ret {
+				panic("Checked iterator access after exit")
+			}
+			ret = body(u, v)
+			return ret
+		})
+		ret = false
+	}
+}
+
+func TestCheck(t *testing.T) {
+	i := 0
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+	for _, x := range Check(BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+		i += x
+		if i > 4*9 {
+			break
+		}
+	}
+}
+
+func TestCooperativeBadOfSliceIndex(t *testing.T) {
+	i := 0
+	proceed := make(chan any)
+	for _, x := range CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed) {
+		i += x
+		if i >= 36 {
+			break
+		}
+	}
+	proceed <- true
+	if r := <-proceed; r != nil {
+		t.Logf("Saw expected panic '%v'", r)
+	} else {
+		t.Error("Wanted to see a failure")
+	}
+	if i != 36 {
+		t.Errorf("Expected i == 36, saw %d instead", i)
+	} else {
+		t.Logf("i = %d", i)
+	}
+}
+
+func TestCheckCooperativeBadOfSliceIndex(t *testing.T) {
+	i := 0
+	proceed := make(chan any)
+	for _, x := range Check(CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed)) {
+		i += x
+		if i >= 36 {
+			break
+		}
+	}
+	proceed <- true
+	if r := <-proceed; r != nil {
+		t.Logf("Saw expected panic '%v'", r)
+	} else {
+		t.Error("Wanted to see a failure")
+	}
+	if i != 36 {
+		t.Errorf("Expected i == 36, saw %d instead", i)
+	} else {
+		t.Logf("i = %d", i)
+	}
+}
+
+func TestTrickyIterAll(t *testing.T) {
+	trickItAll := TrickyIterator{}
+	i := 0
+	for _, x := range trickItAll.iterAll([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+		i += x
+		if i >= 36 {
+			break
+		}
+	}
+
+	if i != 36 {
+		t.Errorf("Expected i == 36, saw %d instead", i)
+	} else {
+		t.Logf("i = %d", i)
+	}
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	trickItAll.fail()
+}
+
+func TestTrickyIterOne(t *testing.T) {
+	trickItOne := TrickyIterator{}
+	i := 0
+	for _, x := range trickItOne.iterOne([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+		i += x
+		if i >= 36 {
+			break
+		}
+	}
+
+	// Don't care about value, ought to be 36 anyhow.
+	t.Logf("i = %d", i)
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	trickItOne.fail()
+}
+
+func TestTrickyIterZero(t *testing.T) {
+	trickItZero := TrickyIterator{}
+	i := 0
+	for _, x := range trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+		i += x
+		if i >= 36 {
+			break
+		}
+	}
+
+	// Don't care about value, ought to be 0 anyhow.
+	t.Logf("i = %d", i)
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	trickItZero.fail()
+}
+
+func TestCheckTrickyIterZero(t *testing.T) {
+	trickItZero := TrickyIterator{}
+	i := 0
+	for _, x := range Check(trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+		i += x
+		if i >= 36 {
+			break
+		}
+	}
+
+	// Don't care about value, ought to be 0 anyhow.
+	t.Logf("i = %d", i)
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	trickItZero.fail()
+}
+
+// TestBreak1 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak1(t *testing.T) {
+	var result []int
+	var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+		if x == -4 {
+			break
+		}
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				break
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestBreak2 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak2(t *testing.T) {
+	var result []int
+	var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+outer:
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				break
+			}
+			if x == -4 {
+				break outer
+			}
+
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestContinue should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestContinue(t *testing.T) {
+	var result []int
+	var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4}
+outer:
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+		result = append(result, x)
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				continue outer
+			}
+			if x == -4 {
+				break outer
+			}
+
+			result = append(result, y)
+		}
+		result = append(result, x-10)
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestBreak3 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak3(t *testing.T) {
+	var result []int
+	var expect = []int{100, 10, 2, 4, 200, 10, 2, 4, 20, 2, 4, 300, 10, 2, 4, 20, 2, 4, 30}
+X:
+	for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+	Y:
+		for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+			if 10*y >= x {
+				break
+			}
+			result = append(result, y)
+			if y == 30 {
+				continue X
+			}
+		Z:
+			for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+				if z&1 == 1 {
+					continue Z
+				}
+				result = append(result, z)
+				if z >= 4 {
+					continue Y
+				}
+			}
+			result = append(result, -y) // should never be executed
+		}
+		result = append(result, x)
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestBreak1BadA should end in a panic when the outer-loop's
+// single-level break is ignore by BadOfSliceIndex
+func TestBreak1BadA(t *testing.T) {
+	var result []int
+	var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		if x == -4 {
+			break
+		}
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				break
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+}
+
+// TestBreak1BadB should end in a panic, sooner, when the inner-loop's
+// (nested) single-level break is ignored by BadOfSliceIndex
+func TestBreak1BadB(t *testing.T) {
+	var result []int
+	var expect = []int{1, 2} // inner breaks, panics, after before outer appends
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		if x == -4 {
+			break
+		}
+		for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				break
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+}
+
+// TestMultiCont0 tests multilevel continue with no bad iterators
+// (it should just work)
+func TestMultiCont0(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4, 2000}
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						continue W // modified to be multilevel
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiCont1 tests multilevel continue with a bad iterator
+// in the outermost loop exited by the continue.
+func TestMultiCont1(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						continue W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiCont2 tests multilevel continue with a bad iterator
+// in a middle loop exited by the continue.
+func TestMultiCont2(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						continue W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiCont3 tests multilevel continue with a bad iterator
+// in the innermost loop exited by the continue.
+func TestMultiCont3(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						continue W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiBreak0 tests multilevel break with a bad iterator
+// in the outermost loop exited by the break (the outermost loop).
+func TestMultiBreak0(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range BadOfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						break W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiBreak1 tests multilevel break with a bad iterator
+// in an intermediate loop exited by the break.
+func TestMultiBreak1(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						break W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiBreak2 tests multilevel break with two bad iterators
+// in intermediate loops exited by the break.
+func TestMultiBreak2(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						break W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestMultiBreak3 tests multilevel break with the bad iterator
+// in the innermost loop exited by the break.
+func TestMultiBreak3(t *testing.T) {
+	var result []int
+	var expect = []int{1000, 10, 2, 4}
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Errorf("Wanted to see a failure, result was %v", result)
+		}
+	}()
+
+W:
+	for _, w := range OfSliceIndex([]int{1000, 2000}) {
+		result = append(result, w)
+		if w == 2000 {
+			break
+		}
+		for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+			for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+				result = append(result, y)
+				for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+					if z&1 == 1 {
+						continue
+					}
+					result = append(result, z)
+					if z >= 4 {
+						break W
+					}
+				}
+				result = append(result, -y) // should never be executed
+			}
+			result = append(result, x)
+		}
+	}
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// veryBad tests that a loop nest behaves sensibly in the face of a
+// "very bad" iterator.  In this case, "sensibly" means that the
+// break out of X still occurs after the very bad iterator finally
+// quits running (the control flow bread crumbs remain.)
+func veryBad(s []int) []int {
+	var result []int
+X:
+	for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+		result = append(result, x)
+
+		for _, y := range VeryBadOfSliceIndex(s) {
+			result = append(result, y)
+			break X
+		}
+		for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+			result = append(result, z)
+			if z == 100 {
+				break
+			}
+		}
+	}
+	return result
+}
+
+// checkVeryBad wraps a "very bad" iterator with Check,
+// demonstrating that the very bad iterator also hides panics
+// thrown by Check.
+func checkVeryBad(s []int) []int {
+	var result []int
+X:
+	for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+		result = append(result, x)
+
+		for _, y := range Check(VeryBadOfSliceIndex(s)) {
+			result = append(result, y)
+			break X
+		}
+		for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+			result = append(result, z)
+			if z == 100 {
+				break
+			}
+		}
+	}
+	return result
+}
+
+// okay is the not-bad version of veryBad.
+// They should behave the same.
+func okay(s []int) []int {
+	var result []int
+X:
+	for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+		result = append(result, x)
+
+		for _, y := range OfSliceIndex(s) {
+			result = append(result, y)
+			break X
+		}
+		for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+			result = append(result, z)
+			if z == 100 {
+				break
+			}
+		}
+	}
+	return result
+}
+
+// TestVeryBad1 checks the behavior of an extremely poorly behaved iterator.
+func TestVeryBad1(t *testing.T) {
+	result := veryBad([]int{10, 20, 30, 40, 50}) // odd length
+	expect := []int{1, 10}
+
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestVeryBad2 checks the behavior of an extremely poorly behaved iterator.
+func TestVeryBad2(t *testing.T) {
+	result := veryBad([]int{10, 20, 30, 40}) // even length
+	expect := []int{1, 10}
+
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestCheckVeryBad checks the behavior of an extremely poorly behaved iterator,
+// which also suppresses the exceptions from "Check"
+func TestCheckVeryBad(t *testing.T) {
+	result := checkVeryBad([]int{10, 20, 30, 40}) // even length
+	expect := []int{1, 10}
+
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// TestOk is the nice version of the very bad iterator.
+func TestOk(t *testing.T) {
+	result := okay([]int{10, 20, 30, 40, 50}) // odd length
+	expect := []int{1, 10}
+
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+}
+
+// testBreak1BadDefer checks that defer behaves properly even in
+// the presence of loop bodies panicking out of bad iterators.
+// (i.e., the instrumentation did not break defer in these loops)
+func testBreak1BadDefer(t *testing.T) (result []int) {
+	var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10}
+
+	defer func() {
+		if r := recover(); r != nil {
+			t.Logf("Saw expected panic '%v'", r)
+			if !slices.Equal(expect, result) {
+				t.Errorf("(Inner) Expected %v, got %v", expect, result)
+			}
+		} else {
+			t.Error("Wanted to see a failure")
+		}
+	}()
+
+	for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				break
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+	return
+}
+
+func TestBreak1BadDefer(t *testing.T) {
+	var result []int
+	var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10}
+	result = testBreak1BadDefer(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("(Outer) Expected %v, got %v", expect, result)
+	}
+}
+
+// testReturn1 has no bad iterators.
+func testReturn1(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				return
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+	return
+}
+
+// testReturn2 has an outermost bad iterator
+func testReturn2(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				return
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+	return
+}
+
+// testReturn3 has an innermost bad iterator
+func testReturn3(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				return
+			}
+			result = append(result, y)
+		}
+	}
+	return
+}
+
+// TestReturns checks that returns through bad iterators behave properly,
+// for inner and outer bad iterators.
+func TestReturns(t *testing.T) {
+	var result []int
+	var expect = []int{-1, 1, 2, -10}
+	var err any
+
+	result, err = testReturn1(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+	if err != nil {
+		t.Errorf("Unexpected error %v", err)
+	}
+
+	result, err = testReturn2(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+	if err == nil {
+		t.Errorf("Missing expected error")
+	} else {
+		t.Logf("Saw expected panic '%v'", err)
+	}
+
+	result, err = testReturn3(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+	if err == nil {
+		t.Errorf("Missing expected error")
+	} else {
+		t.Logf("Saw expected panic '%v'", err)
+	}
+
+}
+
+// testGotoA1 tests loop-nest-internal goto, no bad iterators.
+func testGotoA1(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				goto A
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	A:
+	}
+	return
+}
+
+// testGotoA2 tests loop-nest-internal goto, outer bad iterator.
+func testGotoA2(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				goto A
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	A:
+	}
+	return
+}
+
+// testGotoA3 tests loop-nest-internal goto, inner bad iterator.
+func testGotoA3(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				goto A
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	A:
+	}
+	return
+}
+
+func TestGotoA(t *testing.T) {
+	var result []int
+	var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4, -30, -20, -10}
+	var expect3 = []int{-1, 1, 2, -10} // first goto becomes a panic
+	var err any
+
+	result, err = testGotoA1(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+	if err != nil {
+		t.Errorf("Unexpected error %v", err)
+	}
+
+	result, err = testGotoA2(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+	if err == nil {
+		t.Errorf("Missing expected error")
+	} else {
+		t.Logf("Saw expected panic '%v'", err)
+	}
+
+	result, err = testGotoA3(t)
+	if !slices.Equal(expect3, result) {
+		t.Errorf("Expected %v, got %v", expect3, result)
+	}
+	if err == nil {
+		t.Errorf("Missing expected error")
+	} else {
+		t.Logf("Saw expected panic '%v'", err)
+	}
+}
+
+// testGotoB1 tests loop-nest-exiting goto, no bad iterators.
+func testGotoB1(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				goto B
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+B:
+	result = append(result, 999)
+	return
+}
+
+// testGotoB2 tests loop-nest-exiting goto, outer bad iterator.
+func testGotoB2(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				goto B
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+B:
+	result = append(result, 999)
+	return
+}
+
+// testGotoB3 tests loop-nest-exiting goto, inner bad iterator.
+func testGotoB3(t *testing.T) (result []int, err any) {
+	defer func() {
+		err = recover()
+	}()
+	for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+		result = append(result, x)
+		if x == -4 {
+			break
+		}
+		defer func() {
+			result = append(result, x*10)
+		}()
+		for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+			if y == 3 {
+				goto B
+			}
+			result = append(result, y)
+		}
+		result = append(result, x)
+	}
+B:
+	result = append(result, 999)
+	return
+}
+
+func TestGotoB(t *testing.T) {
+	var result []int
+	var expect = []int{-1, 1, 2, 999, -10}
+	var expectX = []int{-1, 1, 2, -10}
+	var err any
+
+	result, err = testGotoB1(t)
+	if !slices.Equal(expect, result) {
+		t.Errorf("Expected %v, got %v", expect, result)
+	}
+	if err != nil {
+		t.Errorf("Unexpected error %v", err)
+	}
+
+	result, err = testGotoB2(t)
+	if !slices.Equal(expectX, result) {
+		t.Errorf("Expected %v, got %v", expectX, result)
+	}
+	if err == nil {
+		t.Errorf("Missing expected error")
+	} else {
+		t.Logf("Saw expected panic '%v'", err)
+	}
+
+	result, err = testGotoB3(t)
+	if !slices.Equal(expectX, result) {
+		t.Errorf("Expected %v, got %v", expectX, result)
+	}
+	if err == nil {
+		t.Errorf("Missing expected error")
+	} else {
+		t.Logf("Saw expected panic '%v'", err)
+	}
+}
diff --git a/src/cmd/compile/internal/rangefunc/rewrite.go b/src/cmd/compile/internal/rangefunc/rewrite.go
new file mode 100644
index 0000000..d439412
--- /dev/null
+++ b/src/cmd/compile/internal/rangefunc/rewrite.go
@@ -0,0 +1,1334 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package rangefunc rewrites range-over-func to code that doesn't use range-over-funcs.
+Rewriting the construct in the front end, before noder, means the functions generated during
+the rewrite are available in a noder-generated representation for inlining by the back end.
+
+# Theory of Operation
+
+The basic idea is to rewrite
+
+	for x := range f {
+		...
+	}
+
+into
+
+	f(func(x T) bool {
+		...
+	})
+
+But it's not usually that easy.
+
+# Range variables
+
+For a range not using :=, the assigned variables cannot be function parameters
+in the generated body function. Instead, we allocate fake parameters and
+start the body with an assignment. For example:
+
+	for expr1, expr2 = range f {
+		...
+	}
+
+becomes
+
+	f(func(#p1 T1, #p2 T2) bool {
+		expr1, expr2 = #p1, #p2
+		...
+	})
+
+(All the generated variables have a # at the start to signal that they
+are internal variables when looking at the generated code in a
+debugger. Because variables have all been resolved to the specific
+objects they represent, there is no danger of using plain "p1" and
+colliding with a Go variable named "p1"; the # is just nice to have,
+not for correctness.)
+
+It can also happen that there are fewer range variables than function
+arguments, in which case we end up with something like
+
+	f(func(x T1, _ T2) bool {
+		...
+	})
+
+or
+
+	f(func(#p1 T1, #p2 T2, _ T3) bool {
+		expr1, expr2 = #p1, #p2
+		...
+	})
+
+# Return
+
+If the body contains a "break", that break turns into "return false",
+to tell f to stop. And if the body contains a "continue", that turns
+into "return true", to tell f to proceed with the next value.
+Those are the easy cases.
+
+If the body contains a return or a break/continue/goto L, then we need
+to rewrite that into code that breaks out of the loop and then
+triggers that control flow. In general we rewrite
+
+	for x := range f {
+		...
+	}
+
+into
+
+	{
+		var #next int
+		f(func(x T1) bool {
+			...
+			return true
+		})
+		... check #next ...
+	}
+
+The variable #next is an integer code that says what to do when f
+returns. Each difficult statement sets #next and then returns false to
+stop f.
+
+A plain "return" rewrites to {#next = -1; return false}.
+The return false breaks the loop. Then when f returns, the "check
+#next" section includes
+
+	if #next == -1 { return }
+
+which causes the return we want.
+
+Return with arguments is more involved. We need somewhere to store the
+arguments while we break out of f, so we add them to the var
+declaration, like:
+
+	{
+		var (
+			#next int
+			#r1 type1
+			#r2 type2
+		)
+		f(func(x T1) bool {
+			...
+			{
+				// return a, b
+				#r1, #r2 = a, b
+				#next = -2
+				return false
+			}
+			...
+			return true
+		})
+		if #next == -2 { return #r1, #r2 }
+	}
+
+TODO: What about:
+
+	func f() (x bool) {
+		for range g(&x) {
+			return true
+		}
+	}
+
+	func g(p *bool) func(func() bool) {
+		return func(yield func() bool) {
+			yield()
+			// Is *p true or false here?
+		}
+	}
+
+With this rewrite the "return true" is not visible after yield returns,
+but maybe it should be?
+
+# Checking
+
+To permit checking that an iterator is well-behaved -- that is, that
+it does not call the loop body again after it has returned false or
+after the entire loop has exited (it might retain a copy of the body
+function, or pass it to another goroutine) -- each generated loop has
+its own #exitK flag that is checked before each iteration, and set both
+at any early exit and after the iteration completes.
+
+For example:
+
+	for x := range f {
+		...
+		if ... { break }
+		...
+	}
+
+becomes
+
+	{
+		var #exit1 bool
+		f(func(x T1) bool {
+			if #exit1 { runtime.panicrangeexit() }
+			...
+			if ... { #exit1 = true ; return false }
+			...
+			return true
+		})
+		#exit1 = true
+	}
+
+# Nested Loops
+
+So far we've only considered a single loop. If a function contains a
+sequence of loops, each can be translated individually. But loops can
+be nested. It would work to translate the innermost loop and then
+translate the loop around it, and so on, except that there'd be a lot
+of rewriting of rewritten code and the overall traversals could end up
+taking time quadratic in the depth of the nesting. To avoid all that,
+we use a single rewriting pass that handles a top-most range-over-func
+loop and all the range-over-func loops it contains at the same time.
+
+If we need to return from inside a doubly-nested loop, the rewrites
+above stay the same, but the check after the inner loop only says
+
+	if #next < 0 { return false }
+
+to stop the outer loop so it can do the actual return. That is,
+
+	for range f {
+		for range g {
+			...
+			return a, b
+			...
+		}
+	}
+
+becomes
+
+	{
+		var (
+			#next int
+			#r1 type1
+			#r2 type2
+		)
+		var #exit1 bool
+		f(func() {
+			if #exit1 { runtime.panicrangeexit() }
+			var #exit2 bool
+			g(func() {
+				if #exit2 { runtime.panicrangeexit() }
+				...
+				{
+					// return a, b
+					#r1, #r2 = a, b
+					#next = -2
+					#exit1, #exit2 = true, true
+					return false
+				}
+				...
+				return true
+			})
+			#exit2 = true
+			if #next < 0 {
+				return false
+			}
+			return true
+		})
+		#exit1 = true
+		if #next == -2 {
+			return #r1, #r2
+		}
+	}
+
+Note that the #next < 0 after the inner loop handles both kinds of
+return with a single check.
+
+# Labeled break/continue of range-over-func loops
+
+For a labeled break or continue of an outer range-over-func, we
+use positive #next values. Any such labeled break or continue
+really means "do N breaks" or "do N breaks and 1 continue".
+We encode that as perLoopStep*N or perLoopStep*N+1 respectively.
+
+Loops that might need to propagate a labeled break or continue
+add one or both of these to the #next checks:
+
+	if #next >= 2 {
+		#next -= 2
+		return false
+	}
+
+	if #next == 1 {
+		#next = 0
+		return true
+	}
+
+For example
+
+	F: for range f {
+		for range g {
+			for range h {
+				...
+				break F
+				...
+				...
+				continue F
+				...
+			}
+		}
+		...
+	}
+
+becomes
+
+	{
+		var #next int
+		var #exit1 bool
+		f(func() {
+			if #exit1 { runtime.panicrangeexit() }
+			var #exit2 bool
+			g(func() {
+				if #exit2 { runtime.panicrangeexit() }
+				var #exit3 bool
+				h(func() {
+					if #exit3 { runtime.panicrangeexit() }
+					...
+					{
+						// break F
+						#next = 4
+						#exit1, #exit2, #exit3 = true, true, true
+						return false
+					}
+					...
+					{
+						// continue F
+						#next = 3
+						#exit2, #exit3 = true, true
+						return false
+					}
+					...
+					return true
+				})
+				#exit3 = true
+				if #next >= 2 {
+					#next -= 2
+					return false
+				}
+				return true
+			})
+			#exit2 = true
+			if #next >= 2 {
+				#next -= 2
+				return false
+			}
+			if #next == 1 {
+				#next = 0
+				return true
+			}
+			...
+			return true
+		})
+		#exit1 = true
+	}
+
+Note that the post-h checks only consider a break,
+since no generated code tries to continue g.
+
+# Gotos and other labeled break/continue
+
+The final control flow translations are goto and break/continue of a
+non-range-over-func statement. In both cases, we may need to break out
+of one or more range-over-func loops before we can do the actual
+control flow statement. Each such break/continue/goto L statement is
+assigned a unique negative #next value (below -2, since -1 and -2 are
+for the two kinds of return). Then the post-checks for a given loop
+test for the specific codes that refer to labels directly targetable
+from that block. Otherwise, the generic
+
+	if #next < 0 { return false }
+
+check handles stopping the next loop to get one step closer to the label.
+
+For example
+
+	Top: print("start\n")
+	for range f {
+		for range g {
+			...
+			for range h {
+				...
+				goto Top
+				...
+			}
+		}
+	}
+
+becomes
+
+	Top: print("start\n")
+	{
+		var #next int
+		var #exit1 bool
+		f(func() {
+			if #exit1 { runtime.panicrangeexit() }
+			var #exit2 bool
+			g(func() {
+				if #exit2 { runtime.panicrangeexit() }
+				...
+				var #exit3 bool
+				h(func() {
+				if #exit3 { runtime.panicrangeexit() }
+					...
+					{
+						// goto Top
+						#next = -3
+						#exit1, #exit2, #exit3 = true, true, true
+						return false
+					}
+					...
+					return true
+				})
+				#exit3 = true
+				if #next < 0 {
+					return false
+				}
+				return true
+			})
+			#exit2 = true
+			if #next < 0 {
+				return false
+			}
+			return true
+		})
+		#exit1 = true
+		if #next == -3 {
+			#next = 0
+			goto Top
+		}
+	}
+
+Labeled break/continue to non-range-over-funcs are handled the same
+way as goto.
+
+# Defers
+
+The last wrinkle is handling defer statements. If we have
+
+	for range f {
+		defer print("A")
+	}
+
+we cannot rewrite that into
+
+	f(func() {
+		defer print("A")
+	})
+
+because the deferred code will run at the end of the iteration, not
+the end of the containing function. To fix that, the runtime provides
+a special hook that lets us obtain a defer "token" representing the
+outer function and then use it in a later defer to attach the deferred
+code to that outer function.
+
+Normally,
+
+	defer print("A")
+
+compiles to
+
+	runtime.deferproc(func() { print("A") })
+
+This changes in a range-over-func. For example:
+
+	for range f {
+		defer print("A")
+	}
+
+compiles to
+
+	var #defers = runtime.deferrangefunc()
+	f(func() {
+		runtime.deferprocat(func() { print("A") }, #defers)
+	})
+
+For this rewriting phase, we insert the explicit initialization of
+#defers and then attach the #defers variable to the CallStmt
+representing the defer. That variable will be propagated to the
+backend and will cause the backend to compile the defer using
+deferprocat instead of an ordinary deferproc.
+
+TODO: Could call runtime.deferrangefuncend after f.
+*/
+package rangefunc
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/syntax"
+	"cmd/compile/internal/types2"
+	"fmt"
+	"go/constant"
+	"os"
+)
+
+// nopos is the zero syntax.Pos.
+var nopos syntax.Pos
+
+// A rewriter implements rewriting the range-over-funcs in a given function.
+type rewriter struct {
+	pkg   *types2.Package
+	info  *types2.Info
+	outer *syntax.FuncType
+	body  *syntax.BlockStmt
+
+	// References to important types and values.
+	any   types2.Object
+	bool  types2.Object
+	int   types2.Object
+	true  types2.Object
+	false types2.Object
+
+	// Branch numbering, computed as needed.
+	branchNext map[branch]int             // branch -> #next value
+	labelLoop  map[string]*syntax.ForStmt // label -> innermost rangefunc loop it is declared inside (nil for no loop)
+
+	// Stack of nodes being visited.
+	stack    []syntax.Node // all nodes
+	forStack []*forLoop    // range-over-func loops
+
+	rewritten map[*syntax.ForStmt]syntax.Stmt
+
+	// Declared variables in generated code for outermost loop.
+	declStmt     *syntax.DeclStmt
+	nextVar      types2.Object
+	retVars      []types2.Object
+	defers       types2.Object
+	exitVarCount int // exitvars are referenced from their respective loops
+}
+
+// A branch is a single labeled branch.
+type branch struct {
+	tok   syntax.Token
+	label string
+}
+
+// A forLoop describes a single range-over-func loop being processed.
+type forLoop struct {
+	nfor         *syntax.ForStmt // actual syntax
+	exitFlag     *types2.Var     // #exit variable for this loop
+	exitFlagDecl *syntax.VarDecl
+
+	checkRet      bool     // add check for "return" after loop
+	checkRetArgs  bool     // add check for "return args" after loop
+	checkBreak    bool     // add check for "break" after loop
+	checkContinue bool     // add check for "continue" after loop
+	checkBranch   []branch // add check for labeled branch after loop
+}
+
+// Rewrite rewrites all the range-over-funcs in the files.
+func Rewrite(pkg *types2.Package, info *types2.Info, files []*syntax.File) {
+	for _, file := range files {
+		syntax.Inspect(file, func(n syntax.Node) bool {
+			switch n := n.(type) {
+			case *syntax.FuncDecl:
+				rewriteFunc(pkg, info, n.Type, n.Body)
+				return false
+			case *syntax.FuncLit:
+				rewriteFunc(pkg, info, n.Type, n.Body)
+				return false
+			}
+			return true
+		})
+	}
+}
+
+// rewriteFunc rewrites all the range-over-funcs in a single function (a top-level func or a func literal).
+// The typ and body are the function's type and body.
+func rewriteFunc(pkg *types2.Package, info *types2.Info, typ *syntax.FuncType, body *syntax.BlockStmt) {
+	if body == nil {
+		return
+	}
+	r := &rewriter{
+		pkg:   pkg,
+		info:  info,
+		outer: typ,
+		body:  body,
+	}
+	syntax.Inspect(body, r.inspect)
+	if (base.Flag.W != 0) && r.forStack != nil {
+		syntax.Fdump(os.Stderr, body)
+	}
+}
+
+// checkFuncMisuse reports whether to check for misuse of iterator callbacks functions.
+func (r *rewriter) checkFuncMisuse() bool {
+	return base.Debug.RangeFuncCheck != 0
+}
+
+// inspect is a callback for syntax.Inspect that drives the actual rewriting.
+// If it sees a func literal, it kicks off a separate rewrite for that literal.
+// Otherwise, it maintains a stack of range-over-func loops and
+// converts each in turn.
+func (r *rewriter) inspect(n syntax.Node) bool {
+	switch n := n.(type) {
+	case *syntax.FuncLit:
+		rewriteFunc(r.pkg, r.info, n.Type, n.Body)
+		return false
+
+	default:
+		// Push n onto stack.
+		r.stack = append(r.stack, n)
+		if nfor, ok := forRangeFunc(n); ok {
+			loop := &forLoop{nfor: nfor}
+			r.forStack = append(r.forStack, loop)
+			r.startLoop(loop)
+		}
+
+	case nil:
+		// n == nil signals that we are done visiting
+		// the top-of-stack node's children. Find it.
+		n = r.stack[len(r.stack)-1]
+
+		// If we are inside a range-over-func,
+		// take this moment to replace any break/continue/goto/return
+		// statements directly contained in this node.
+		// Also replace any converted for statements
+		// with the rewritten block.
+		switch n := n.(type) {
+		case *syntax.BlockStmt:
+			for i, s := range n.List {
+				n.List[i] = r.editStmt(s)
+			}
+		case *syntax.CaseClause:
+			for i, s := range n.Body {
+				n.Body[i] = r.editStmt(s)
+			}
+		case *syntax.CommClause:
+			for i, s := range n.Body {
+				n.Body[i] = r.editStmt(s)
+			}
+		case *syntax.LabeledStmt:
+			n.Stmt = r.editStmt(n.Stmt)
+		}
+
+		// Pop n.
+		if len(r.forStack) > 0 && r.stack[len(r.stack)-1] == r.forStack[len(r.forStack)-1].nfor {
+			r.endLoop(r.forStack[len(r.forStack)-1])
+			r.forStack = r.forStack[:len(r.forStack)-1]
+		}
+		r.stack = r.stack[:len(r.stack)-1]
+	}
+	return true
+}
+
+// startLoop sets up for converting a range-over-func loop.
+func (r *rewriter) startLoop(loop *forLoop) {
+	// For first loop in function, allocate syntax for any, bool, int, true, and false.
+	if r.any == nil {
+		r.any = types2.Universe.Lookup("any")
+		r.bool = types2.Universe.Lookup("bool")
+		r.int = types2.Universe.Lookup("int")
+		r.true = types2.Universe.Lookup("true")
+		r.false = types2.Universe.Lookup("false")
+		r.rewritten = make(map[*syntax.ForStmt]syntax.Stmt)
+	}
+	if r.checkFuncMisuse() {
+		// declare the exit flag for this loop's body
+		loop.exitFlag, loop.exitFlagDecl = r.exitVar(loop.nfor.Pos())
+	}
+}
+
+// editStmt returns the replacement for the statement x,
+// or x itself if it should be left alone.
+// This includes the for loops we are converting,
+// as left in x.rewritten by r.endLoop.
+func (r *rewriter) editStmt(x syntax.Stmt) syntax.Stmt {
+	if x, ok := x.(*syntax.ForStmt); ok {
+		if s := r.rewritten[x]; s != nil {
+			return s
+		}
+	}
+
+	if len(r.forStack) > 0 {
+		switch x := x.(type) {
+		case *syntax.BranchStmt:
+			return r.editBranch(x)
+		case *syntax.CallStmt:
+			if x.Tok == syntax.Defer {
+				return r.editDefer(x)
+			}
+		case *syntax.ReturnStmt:
+			return r.editReturn(x)
+		}
+	}
+
+	return x
+}
+
+// editDefer returns the replacement for the defer statement x.
+// See the "Defers" section in the package doc comment above for more context.
+func (r *rewriter) editDefer(x *syntax.CallStmt) syntax.Stmt {
+	if r.defers == nil {
+		// Declare and initialize the #defers token.
+		init := &syntax.CallExpr{
+			Fun: runtimeSym(r.info, "deferrangefunc"),
+		}
+		tv := syntax.TypeAndValue{Type: r.any.Type()}
+		tv.SetIsValue()
+		init.SetTypeInfo(tv)
+		r.defers = r.declVar("#defers", r.any.Type(), init)
+	}
+
+	// Attach the token as an "extra" argument to the defer.
+	x.DeferAt = r.useVar(r.defers)
+	setPos(x.DeferAt, x.Pos())
+	return x
+}
+
+func (r *rewriter) exitVar(pos syntax.Pos) (*types2.Var, *syntax.VarDecl) {
+	r.exitVarCount++
+
+	name := fmt.Sprintf("#exit%d", r.exitVarCount)
+	typ := r.bool.Type()
+	obj := types2.NewVar(pos, r.pkg, name, typ)
+	n := syntax.NewName(pos, name)
+	setValueType(n, typ)
+	r.info.Defs[n] = obj
+
+	return obj, &syntax.VarDecl{NameList: []*syntax.Name{n}}
+}
+
+// editReturn returns the replacement for the return statement x.
+// See the "Return" section in the package doc comment above for more context.
+func (r *rewriter) editReturn(x *syntax.ReturnStmt) syntax.Stmt {
+	// #next = -1 is return with no arguments; -2 is return with arguments.
+	var next int
+	if x.Results == nil {
+		next = -1
+		r.forStack[0].checkRet = true
+	} else {
+		next = -2
+		r.forStack[0].checkRetArgs = true
+	}
+
+	// Tell the loops along the way to check for a return.
+	for _, loop := range r.forStack[1:] {
+		loop.checkRet = true
+	}
+
+	// Assign results, set #next, and return false.
+	bl := &syntax.BlockStmt{}
+	if x.Results != nil {
+		if r.retVars == nil {
+			for i, a := range r.outer.ResultList {
+				obj := r.declVar(fmt.Sprintf("#r%d", i+1), a.Type.GetTypeInfo().Type, nil)
+				r.retVars = append(r.retVars, obj)
+			}
+		}
+		bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.useList(r.retVars), Rhs: x.Results})
+	}
+	bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)})
+	if r.checkFuncMisuse() {
+		// mark all enclosing loop bodies as exited
+		for i := 0; i < len(r.forStack); i++ {
+			bl.List = append(bl.List, r.setExitedAt(i))
+		}
+	}
+	bl.List = append(bl.List, &syntax.ReturnStmt{Results: r.useVar(r.false)})
+	setPos(bl, x.Pos())
+	return bl
+}
+
+// perLoopStep is part of the encoding of loop-spanning control flow
+// for function range iterators.  Each multiple of two encodes a "return false"
+// passing control to an enclosing iterator; a terminal value of 1 encodes
+// "return true" (i.e., local continue) from the body function, and a terminal
+// value of 0 encodes executing the remainder of the body function.
+const perLoopStep = 2
+
+// editBranch returns the replacement for the branch statement x,
+// or x itself if it should be left alone.
+// See the package doc comment above for more context.
+func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
+	if x.Tok == syntax.Fallthrough {
+		// Fallthrough is unaffected by the rewrite.
+		return x
+	}
+
+	// Find target of break/continue/goto in r.forStack.
+	// (The target may not be in r.forStack at all.)
+	targ := x.Target
+	i := len(r.forStack) - 1
+	if x.Label == nil && r.forStack[i].nfor != targ {
+		// Unlabeled break or continue that's not nfor must be inside nfor. Leave alone.
+		return x
+	}
+	for i >= 0 && r.forStack[i].nfor != targ {
+		i--
+	}
+	// exitFrom is the index of the loop interior to the target of the control flow,
+	// if such a loop exists (it does not if i == len(r.forStack) - 1)
+	exitFrom := i + 1
+
+	// Compute the value to assign to #next and the specific return to use.
+	var next int
+	var ret *syntax.ReturnStmt
+	if x.Tok == syntax.Goto || i < 0 {
+		// goto Label
+		// or break/continue of labeled non-range-over-func loop.
+		// We may be able to leave it alone, or we may have to break
+		// out of one or more nested loops and then use #next to signal
+		// to complete the break/continue/goto.
+		// Figure out which range-over-func loop contains the label.
+		r.computeBranchNext()
+		nfor := r.forStack[len(r.forStack)-1].nfor
+		label := x.Label.Value
+		targ := r.labelLoop[label]
+		if nfor == targ {
+			// Label is in the innermost range-over-func loop; use it directly.
+			return x
+		}
+
+		// Set #next to the code meaning break/continue/goto label.
+		next = r.branchNext[branch{x.Tok, label}]
+
+		// Break out of nested loops up to targ.
+		i := len(r.forStack) - 1
+		for i >= 0 && r.forStack[i].nfor != targ {
+			i--
+		}
+		exitFrom = i + 1
+
+		// Mark loop we exit to get to targ to check for that branch.
+		// When i==-1 that's the outermost func body
+		top := r.forStack[i+1]
+		top.checkBranch = append(top.checkBranch, branch{x.Tok, label})
+
+		// Mark loops along the way to check for a plain return, so they break.
+		for j := i + 2; j < len(r.forStack); j++ {
+			r.forStack[j].checkRet = true
+		}
+
+		// In the innermost loop, use a plain "return false".
+		ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+	} else {
+		// break/continue of labeled range-over-func loop.
+		depth := len(r.forStack) - 1 - i
+
+		// For continue of innermost loop, use "return true".
+		// Otherwise we are breaking the innermost loop, so "return false".
+
+		if depth == 0 && x.Tok == syntax.Continue {
+			ret = &syntax.ReturnStmt{Results: r.useVar(r.true)}
+			setPos(ret, x.Pos())
+			return ret
+		}
+		ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+
+		// If this is a simple break, mark this loop as exited and return false.
+		// No adjustments to #next.
+		if depth == 0 {
+			var stmts []syntax.Stmt
+			if r.checkFuncMisuse() {
+				stmts = []syntax.Stmt{r.setExited(), ret}
+			} else {
+				stmts = []syntax.Stmt{ret}
+			}
+			bl := &syntax.BlockStmt{
+				List: stmts,
+			}
+			setPos(bl, x.Pos())
+			return bl
+		}
+
+		// The loop inside the one we are break/continue-ing
+		// needs to make that happen when we break out of it.
+		if x.Tok == syntax.Continue {
+			r.forStack[exitFrom].checkContinue = true
+		} else {
+			exitFrom = i
+			r.forStack[exitFrom].checkBreak = true
+		}
+
+		// The loops along the way just need to break.
+		for j := exitFrom + 1; j < len(r.forStack); j++ {
+			r.forStack[j].checkBreak = true
+		}
+
+		// Set next to break the appropriate number of times;
+		// the final time may be a continue, not a break.
+		next = perLoopStep * depth
+		if x.Tok == syntax.Continue {
+			next--
+		}
+	}
+
+	// Assign #next = next and do the return.
+	as := &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)}
+	bl := &syntax.BlockStmt{
+		List: []syntax.Stmt{as},
+	}
+
+	if r.checkFuncMisuse() {
+		// Set #exitK for this loop and those exited by the control flow.
+		for i := exitFrom; i < len(r.forStack); i++ {
+			bl.List = append(bl.List, r.setExitedAt(i))
+		}
+	}
+
+	bl.List = append(bl.List, ret)
+	setPos(bl, x.Pos())
+	return bl
+}
+
+// computeBranchNext computes the branchNext numbering
+// and determines which labels end up inside which range-over-func loop bodies.
+func (r *rewriter) computeBranchNext() {
+	if r.labelLoop != nil {
+		return
+	}
+
+	r.labelLoop = make(map[string]*syntax.ForStmt)
+	r.branchNext = make(map[branch]int)
+
+	var labels []string
+	var stack []syntax.Node
+	var forStack []*syntax.ForStmt
+	forStack = append(forStack, nil)
+	syntax.Inspect(r.body, func(n syntax.Node) bool {
+		if n != nil {
+			stack = append(stack, n)
+			if nfor, ok := forRangeFunc(n); ok {
+				forStack = append(forStack, nfor)
+			}
+			if n, ok := n.(*syntax.LabeledStmt); ok {
+				l := n.Label.Value
+				labels = append(labels, l)
+				f := forStack[len(forStack)-1]
+				r.labelLoop[l] = f
+			}
+		} else {
+			n := stack[len(stack)-1]
+			stack = stack[:len(stack)-1]
+			if n == forStack[len(forStack)-1] {
+				forStack = forStack[:len(forStack)-1]
+			}
+		}
+		return true
+	})
+
+	// Assign numbers to all the labels we observed.
+	used := -2
+	for _, l := range labels {
+		used -= 3
+		r.branchNext[branch{syntax.Break, l}] = used
+		r.branchNext[branch{syntax.Continue, l}] = used + 1
+		r.branchNext[branch{syntax.Goto, l}] = used + 2
+	}
+}
+
+// endLoop finishes the conversion of a range-over-func loop.
+// We have inspected and rewritten the body of the loop and can now
+// construct the body function and rewrite the for loop into a call
+// bracketed by any declarations and checks it requires.
+func (r *rewriter) endLoop(loop *forLoop) {
+	// Pick apart for range X { ... }
+	nfor := loop.nfor
+	start, end := nfor.Pos(), nfor.Body.Rbrace // start, end position of for loop
+	rclause := nfor.Init.(*syntax.RangeClause)
+	rfunc := types2.CoreType(rclause.X.GetTypeInfo().Type).(*types2.Signature) // type of X - func(func(...)bool)
+	if rfunc.Params().Len() != 1 {
+		base.Fatalf("invalid typecheck of range func")
+	}
+	ftyp := types2.CoreType(rfunc.Params().At(0).Type()).(*types2.Signature) // func(...) bool
+	if ftyp.Results().Len() != 1 {
+		base.Fatalf("invalid typecheck of range func")
+	}
+
+	// Build X(bodyFunc)
+	call := &syntax.ExprStmt{
+		X: &syntax.CallExpr{
+			Fun: rclause.X,
+			ArgList: []syntax.Expr{
+				r.bodyFunc(nfor.Body.List, syntax.UnpackListExpr(rclause.Lhs), rclause.Def, ftyp, start, end),
+			},
+		},
+	}
+	setPos(call, start)
+
+	// Build checks based on #next after X(bodyFunc)
+	checks := r.checks(loop, end)
+
+	// Rewrite for vars := range X { ... } to
+	//
+	//	{
+	//		r.declStmt
+	//		call
+	//		checks
+	//	}
+	//
+	// The r.declStmt can be added to by this loop or any inner loop
+	// during the creation of r.bodyFunc; it is only emitted in the outermost
+	// converted range loop.
+	block := &syntax.BlockStmt{Rbrace: end}
+	setPos(block, start)
+	if len(r.forStack) == 1 && r.declStmt != nil {
+		setPos(r.declStmt, start)
+		block.List = append(block.List, r.declStmt)
+	}
+
+	// declare the exitFlag here so it has proper scope and zeroing
+	if r.checkFuncMisuse() {
+		exitFlagDecl := &syntax.DeclStmt{DeclList: []syntax.Decl{loop.exitFlagDecl}}
+		block.List = append(block.List, exitFlagDecl)
+	}
+
+	// iteratorFunc(bodyFunc)
+	block.List = append(block.List, call)
+
+	if r.checkFuncMisuse() {
+		// iteratorFunc has exited, mark the exit flag for the body
+		block.List = append(block.List, r.setExited())
+	}
+	block.List = append(block.List, checks...)
+
+	if len(r.forStack) == 1 { // ending an outermost loop
+		r.declStmt = nil
+		r.nextVar = nil
+		r.retVars = nil
+		r.defers = nil
+	}
+
+	r.rewritten[nfor] = block
+}
+
+func (r *rewriter) setExited() *syntax.AssignStmt {
+	return r.setExitedAt(len(r.forStack) - 1)
+}
+
+func (r *rewriter) setExitedAt(index int) *syntax.AssignStmt {
+	loop := r.forStack[index]
+	return &syntax.AssignStmt{
+		Lhs: r.useVar(loop.exitFlag),
+		Rhs: r.useVar(r.true),
+	}
+}
+
+// bodyFunc converts the loop body (control flow has already been updated)
+// to a func literal that can be passed to the range function.
+//
+// vars is the range variables from the range statement.
+// def indicates whether this is a := range statement.
+// ftyp is the type of the function we are creating
+// start and end are the syntax positions to use for new nodes
+// that should be at the start or end of the loop.
+func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, ftyp *types2.Signature, start, end syntax.Pos) *syntax.FuncLit {
+	// Starting X(bodyFunc); build up bodyFunc first.
+	var params, results []*types2.Var
+	results = append(results, types2.NewVar(start, nil, "", r.bool.Type()))
+	bodyFunc := &syntax.FuncLit{
+		// Note: Type is ignored but needs to be non-nil to avoid panic in syntax.Inspect.
+		Type: &syntax.FuncType{},
+		Body: &syntax.BlockStmt{
+			List:   []syntax.Stmt{},
+			Rbrace: end,
+		},
+	}
+	setPos(bodyFunc, start)
+
+	for i := 0; i < ftyp.Params().Len(); i++ {
+		typ := ftyp.Params().At(i).Type()
+		var paramVar *types2.Var
+		if i < len(lhs) && def {
+			// Reuse range variable as parameter.
+			x := lhs[i]
+			paramVar = r.info.Defs[x.(*syntax.Name)].(*types2.Var)
+		} else {
+			// Declare new parameter and assign it to range expression.
+			paramVar = types2.NewVar(start, r.pkg, fmt.Sprintf("#p%d", 1+i), typ)
+			if i < len(lhs) {
+				x := lhs[i]
+				as := &syntax.AssignStmt{Lhs: x, Rhs: r.useVar(paramVar)}
+				as.SetPos(x.Pos())
+				setPos(as.Rhs, x.Pos())
+				bodyFunc.Body.List = append(bodyFunc.Body.List, as)
+			}
+		}
+		params = append(params, paramVar)
+	}
+
+	tv := syntax.TypeAndValue{
+		Type: types2.NewSignatureType(nil, nil, nil,
+			types2.NewTuple(params...),
+			types2.NewTuple(results...),
+			false),
+	}
+	tv.SetIsValue()
+	bodyFunc.SetTypeInfo(tv)
+
+	loop := r.forStack[len(r.forStack)-1]
+
+	if r.checkFuncMisuse() {
+		bodyFunc.Body.List = append(bodyFunc.Body.List, r.assertNotExited(start, loop))
+	}
+
+	// Original loop body (already rewritten by editStmt during inspect).
+	bodyFunc.Body.List = append(bodyFunc.Body.List, body...)
+
+	// return true to continue at end of loop body
+	ret := &syntax.ReturnStmt{Results: r.useVar(r.true)}
+	ret.SetPos(end)
+	bodyFunc.Body.List = append(bodyFunc.Body.List, ret)
+
+	return bodyFunc
+}
+
+// checks returns the post-call checks that need to be done for the given loop.
+func (r *rewriter) checks(loop *forLoop, pos syntax.Pos) []syntax.Stmt {
+	var list []syntax.Stmt
+	if len(loop.checkBranch) > 0 {
+		did := make(map[branch]bool)
+		for _, br := range loop.checkBranch {
+			if did[br] {
+				continue
+			}
+			did[br] = true
+			doBranch := &syntax.BranchStmt{Tok: br.tok, Label: &syntax.Name{Value: br.label}}
+			list = append(list, r.ifNext(syntax.Eql, r.branchNext[br], doBranch))
+		}
+	}
+	if len(r.forStack) == 1 {
+		if loop.checkRetArgs {
+			list = append(list, r.ifNext(syntax.Eql, -2, retStmt(r.useList(r.retVars))))
+		}
+		if loop.checkRet {
+			list = append(list, r.ifNext(syntax.Eql, -1, retStmt(nil)))
+		}
+	} else {
+		if loop.checkRetArgs || loop.checkRet {
+			// Note: next < 0 also handles gotos handled by outer loops.
+			// We set checkRet in that case to trigger this check.
+			list = append(list, r.ifNext(syntax.Lss, 0, retStmt(r.useVar(r.false))))
+		}
+		if loop.checkBreak {
+			list = append(list, r.ifNext(syntax.Geq, perLoopStep, retStmt(r.useVar(r.false))))
+		}
+		if loop.checkContinue {
+			list = append(list, r.ifNext(syntax.Eql, perLoopStep-1, retStmt(r.useVar(r.true))))
+		}
+	}
+
+	for _, j := range list {
+		setPos(j, pos)
+	}
+	return list
+}
+
+// retStmt returns a return statement returning the given return values.
+func retStmt(results syntax.Expr) *syntax.ReturnStmt {
+	return &syntax.ReturnStmt{Results: results}
+}
+
+// ifNext returns the statement:
+//
+//	if #next op c { adjust; then }
+//
+// When op is >=, adjust is #next -= c.
+// When op is == and c is not -1 or -2, adjust is #next = 0.
+// Otherwise adjust is omitted.
+func (r *rewriter) ifNext(op syntax.Operator, c int, then syntax.Stmt) syntax.Stmt {
+	nif := &syntax.IfStmt{
+		Cond: &syntax.Operation{Op: op, X: r.next(), Y: r.intConst(c)},
+		Then: &syntax.BlockStmt{
+			List: []syntax.Stmt{then},
+		},
+	}
+	tv := syntax.TypeAndValue{Type: r.bool.Type()}
+	tv.SetIsValue()
+	nif.Cond.SetTypeInfo(tv)
+
+	if op == syntax.Geq {
+		sub := &syntax.AssignStmt{
+			Op:  syntax.Sub,
+			Lhs: r.next(),
+			Rhs: r.intConst(c),
+		}
+		nif.Then.List = []syntax.Stmt{sub, then}
+	}
+	if op == syntax.Eql && c != -1 && c != -2 {
+		clr := &syntax.AssignStmt{
+			Lhs: r.next(),
+			Rhs: r.intConst(0),
+		}
+		nif.Then.List = []syntax.Stmt{clr, then}
+	}
+
+	return nif
+}
+
+// setValueType marks x as a value with type typ.
+func setValueType(x syntax.Expr, typ syntax.Type) {
+	tv := syntax.TypeAndValue{Type: typ}
+	tv.SetIsValue()
+	x.SetTypeInfo(tv)
+}
+
+// assertNotExited returns the statement:
+//
+//	if #exitK { runtime.panicrangeexit() }
+//
+// where #exitK is the exit guard for loop.
+func (r *rewriter) assertNotExited(start syntax.Pos, loop *forLoop) syntax.Stmt {
+	callPanicExpr := &syntax.CallExpr{
+		Fun: runtimeSym(r.info, "panicrangeexit"),
+	}
+	setValueType(callPanicExpr, nil) // no result type
+
+	callPanic := &syntax.ExprStmt{X: callPanicExpr}
+
+	nif := &syntax.IfStmt{
+		Cond: r.useVar(loop.exitFlag),
+		Then: &syntax.BlockStmt{
+			List: []syntax.Stmt{callPanic},
+		},
+	}
+	setPos(nif, start)
+	return nif
+}
+
+// next returns a reference to the #next variable.
+func (r *rewriter) next() *syntax.Name {
+	if r.nextVar == nil {
+		r.nextVar = r.declVar("#next", r.int.Type(), nil)
+	}
+	return r.useVar(r.nextVar)
+}
+
+// forRangeFunc checks whether n is a range-over-func.
+// If so, it returns n.(*syntax.ForStmt), true.
+// Otherwise it returns nil, false.
+func forRangeFunc(n syntax.Node) (*syntax.ForStmt, bool) {
+	nfor, ok := n.(*syntax.ForStmt)
+	if !ok {
+		return nil, false
+	}
+	nrange, ok := nfor.Init.(*syntax.RangeClause)
+	if !ok {
+		return nil, false
+	}
+	_, ok = types2.CoreType(nrange.X.GetTypeInfo().Type).(*types2.Signature)
+	if !ok {
+		return nil, false
+	}
+	return nfor, true
+}
+
+// intConst returns syntax for an integer literal with the given value.
+func (r *rewriter) intConst(c int) *syntax.BasicLit {
+	lit := &syntax.BasicLit{
+		Value: fmt.Sprint(c),
+		Kind:  syntax.IntLit,
+	}
+	tv := syntax.TypeAndValue{Type: r.int.Type(), Value: constant.MakeInt64(int64(c))}
+	tv.SetIsValue()
+	lit.SetTypeInfo(tv)
+	return lit
+}
+
+// useVar returns syntax for a reference to decl, which should be its declaration.
+func (r *rewriter) useVar(obj types2.Object) *syntax.Name {
+	n := syntax.NewName(nopos, obj.Name())
+	tv := syntax.TypeAndValue{Type: obj.Type()}
+	tv.SetIsValue()
+	n.SetTypeInfo(tv)
+	r.info.Uses[n] = obj
+	return n
+}
+
+// useList is useVar for a list of decls.
+func (r *rewriter) useList(vars []types2.Object) syntax.Expr {
+	var new []syntax.Expr
+	for _, obj := range vars {
+		new = append(new, r.useVar(obj))
+	}
+	if len(new) == 1 {
+		return new[0]
+	}
+	return &syntax.ListExpr{ElemList: new}
+}
+
+// declVar declares a variable with a given name type and initializer value.
+func (r *rewriter) declVar(name string, typ types2.Type, init syntax.Expr) *types2.Var {
+	if r.declStmt == nil {
+		r.declStmt = &syntax.DeclStmt{}
+	}
+	stmt := r.declStmt
+	obj := types2.NewVar(stmt.Pos(), r.pkg, name, typ)
+	n := syntax.NewName(stmt.Pos(), name)
+	tv := syntax.TypeAndValue{Type: typ}
+	tv.SetIsValue()
+	n.SetTypeInfo(tv)
+	r.info.Defs[n] = obj
+	stmt.DeclList = append(stmt.DeclList, &syntax.VarDecl{
+		NameList: []*syntax.Name{n},
+		// Note: Type is ignored
+		Values: init,
+	})
+	return obj
+}
+
+// declType declares a type with the given name and type.
+// This is more like "type name = typ" than "type name typ".
+func declType(pos syntax.Pos, name string, typ types2.Type) *syntax.Name {
+	n := syntax.NewName(pos, name)
+	n.SetTypeInfo(syntax.TypeAndValue{Type: typ})
+	return n
+}
+
+// runtimePkg is a fake runtime package that contains what we need to refer to in package runtime.
+var runtimePkg = func() *types2.Package {
+	var nopos syntax.Pos
+	pkg := types2.NewPackage("runtime", "runtime")
+	anyType := types2.Universe.Lookup("any").Type()
+
+	// func deferrangefunc() unsafe.Pointer
+	obj := types2.NewFunc(nopos, pkg, "deferrangefunc", types2.NewSignatureType(nil, nil, nil, nil, types2.NewTuple(types2.NewParam(nopos, pkg, "extra", anyType)), false))
+	pkg.Scope().Insert(obj)
+
+	// func panicrangeexit()
+	obj = types2.NewFunc(nopos, pkg, "panicrangeexit", types2.NewSignatureType(nil, nil, nil, nil, nil, false))
+	pkg.Scope().Insert(obj)
+
+	return pkg
+}()
+
+// runtimeSym returns a reference to a symbol in the fake runtime package.
+func runtimeSym(info *types2.Info, name string) *syntax.Name {
+	obj := runtimePkg.Scope().Lookup(name)
+	n := syntax.NewName(nopos, "runtime."+name)
+	tv := syntax.TypeAndValue{Type: obj.Type()}
+	tv.SetIsValue()
+	tv.SetIsRuntimeHelper()
+	n.SetTypeInfo(tv)
+	info.Uses[n] = obj
+	return n
+}
+
+// setPos walks the top structure of x that has no position assigned
+// and assigns it all to have position pos.
+// When setPos encounters a syntax node with a position assigned,
+// setPos does not look inside that node.
+// setPos only needs to handle syntax we create in this package;
+// all other syntax should have positions assigned already.
+func setPos(x syntax.Node, pos syntax.Pos) {
+	if x == nil {
+		return
+	}
+	syntax.Inspect(x, func(n syntax.Node) bool {
+		if n == nil || n.Pos() != nopos {
+			return false
+		}
+		n.SetPos(pos)
+		switch n := n.(type) {
+		case *syntax.BlockStmt:
+			if n.Rbrace == nopos {
+				n.Rbrace = pos
+			}
+		}
+		return true
+	})
+}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
index 69de685..a0f5522 100644
--- a/src/cmd/compile/internal/reflectdata/alg.go
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -115,7 +115,7 @@
 	case types.TARRAY:
 		genhash(t.Elem())
 	case types.TSTRUCT:
-		for _, f := range t.FieldSlice() {
+		for _, f := range t.Fields() {
 			genhash(f.Type)
 		}
 	}
@@ -140,20 +140,25 @@
 		return sym.Def.(*ir.Name).Func
 	}
 
-	base.Pos = base.AutogeneratedPos // less confusing than end of input
-	typecheck.DeclContext = ir.PEXTERN
+	pos := base.AutogeneratedPos // less confusing than end of input
+	base.Pos = pos
 
 	// func sym(p *T, h uintptr) uintptr
-	args := []*ir.Field{
-		ir.NewField(base.Pos, typecheck.Lookup("p"), types.NewPtr(t)),
-		ir.NewField(base.Pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
-	}
-	results := []*ir.Field{ir.NewField(base.Pos, nil, types.Types[types.TUINTPTR])}
-
-	fn := typecheck.DeclFunc(sym, nil, args, results)
+	fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
+		[]*types.Field{
+			types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
+			types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
+		},
+		[]*types.Field{
+			types.NewField(pos, nil, types.Types[types.TUINTPTR]),
+		},
+	))
 	sym.Def = fn.Nname
-	np := ir.AsNode(fn.Type().Params().Field(0).Nname)
-	nh := ir.AsNode(fn.Type().Params().Field(1).Nname)
+	fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
+
+	typecheck.DeclFunc(fn)
+	np := fn.Dcl[0]
+	nh := fn.Dcl[1]
 
 	switch t.Kind() {
 	case types.TARRAY:
@@ -163,7 +168,7 @@
 		hashel := hashfor(t.Elem())
 
 		// for i := 0; i < nelem; i++
-		ni := typecheck.Temp(types.Types[types.TINT])
+		ni := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 		init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(base.Pos, 0))
 		cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(base.Pos, t.NumElem()))
 		post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(base.Pos, 1)))
@@ -185,7 +190,7 @@
 	case types.TSTRUCT:
 		// Walk the struct using memhash for runs of AMEM
 		// and calling specific hash functions for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
+		for i, fields := 0, t.Fields(); i < len(fields); {
 			f := fields[i]
 
 			// Skip blank fields.
@@ -198,8 +203,7 @@
 			if !compare.IsRegularMemory(f.Type) {
 				hashel := hashfor(f.Type)
 				call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
-				nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
-				na := typecheck.NodAddr(nx)
+				na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
 				call.Args.Append(na)
 				call.Args.Append(nh)
 				fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
@@ -213,8 +217,7 @@
 			// h = hashel(&p.first, size, h)
 			hashel := hashmem(f.Type)
 			call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
-			nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
-			na := typecheck.NodAddr(nx)
+			na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
 			call.Args.Append(na)
 			call.Args.Append(nh)
 			call.Args.Append(ir.NewInt(base.Pos, size))
@@ -235,22 +238,18 @@
 	typecheck.FinishFuncBody()
 
 	fn.SetDupok(true)
-	typecheck.Func(fn)
 
 	ir.WithFunc(fn, func() {
 		typecheck.Stmts(fn.Body)
 	})
 
 	fn.SetNilCheckDisabled(true)
-	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
 
 	return fn
 }
 
 func runtimeHashFor(name string, t *types.Type) *ir.Name {
-	n := typecheck.LookupRuntime(name)
-	n = typecheck.SubstArgTypes(n, t)
-	return n
+	return typecheck.LookupRuntime(name, t)
 }
 
 // hashfor returns the function to compute the hash of a value of type t.
@@ -366,18 +365,27 @@
 	if sym.Def != nil {
 		return sym.Def.(*ir.Name).Func
 	}
-	base.Pos = base.AutogeneratedPos // less confusing than end of input
-	typecheck.DeclContext = ir.PEXTERN
+
+	pos := base.AutogeneratedPos // less confusing than end of input
+	base.Pos = pos
 
 	// func sym(p, q *T) bool
-	fn := typecheck.DeclFunc(sym, nil,
-		[]*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("p"), types.NewPtr(t)), ir.NewField(base.Pos, typecheck.Lookup("q"), types.NewPtr(t))},
-		[]*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("r"), types.Types[types.TBOOL])},
-	)
+	fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
+		[]*types.Field{
+			types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
+			types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)),
+		},
+		[]*types.Field{
+			types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
+		},
+	))
 	sym.Def = fn.Nname
-	np := ir.AsNode(fn.Type().Params().Field(0).Nname)
-	nq := ir.AsNode(fn.Type().Params().Field(1).Nname)
-	nr := ir.AsNode(fn.Type().Results().Field(0).Nname)
+	fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
+
+	typecheck.DeclFunc(fn)
+	np := fn.Dcl[0]
+	nq := fn.Dcl[1]
+	nr := fn.Dcl[2]
 
 	// Label to jump to if an equality test fails.
 	neq := typecheck.AutoLabel(".neq")
@@ -440,7 +448,7 @@
 			if iterateTo > 0 {
 				// Generate an unrolled for loop.
 				// for i := 0; i < nelem/unroll*unroll; i += unroll
-				i := typecheck.Temp(types.Types[types.TINT])
+				i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 				init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
 				cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo))
 				loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false)
@@ -619,7 +627,6 @@
 	typecheck.FinishFuncBody()
 
 	fn.SetDupok(true)
-	typecheck.Func(fn)
 
 	ir.WithFunc(fn, func() {
 		typecheck.Stmts(fn.Body)
@@ -630,7 +637,6 @@
 	// neither of which can be nil, and our comparisons
 	// are shallow.
 	fn.SetNilCheckDisabled(true)
-	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
 	return fn
 }
 
@@ -639,9 +645,7 @@
 func EqFor(t *types.Type) (ir.Node, bool) {
 	switch a, _ := types.AlgType(t); a {
 	case types.AMEM:
-		n := typecheck.LookupRuntime("memequal")
-		n = typecheck.SubstArgTypes(n, t, t)
-		return n, true
+		return typecheck.LookupRuntime("memequal", t, t), true
 	case types.ASPECIAL:
 		fn := eqFunc(t)
 		return fn.Nname, false
@@ -659,7 +663,5 @@
 }
 
 func hashmem(t *types.Type) ir.Node {
-	n := typecheck.LookupRuntime("memhash")
-	n = typecheck.SubstArgTypes(n, t)
-	return n
+	return typecheck.LookupRuntime("memhash", t)
 }
diff --git a/src/cmd/compile/internal/reflectdata/helpers.go b/src/cmd/compile/internal/reflectdata/helpers.go
index f2d69cd..9ba62d6 100644
--- a/src/cmd/compile/internal/reflectdata/helpers.go
+++ b/src/cmd/compile/internal/reflectdata/helpers.go
@@ -16,16 +16,6 @@
 		return true
 	}
 
-	// We make an exception for `init`, because we still depend on
-	// pkginit for sorting package initialization statements, and it
-	// gets confused by implicit conversions. Also, because
-	// package-scope statements can never be generic, so they'll never
-	// require dictionary lookups.
-	if ir.CurFunc.Nname.Sym().Name != "init" {
-		ir.Dump("CurFunc", ir.CurFunc)
-		base.FatalfAt(n.Pos(), "missing %s in %v: %+v", fieldName, ir.CurFunc, n)
-	}
-
 	return false
 }
 
@@ -126,11 +116,11 @@
 }
 
 // ConvIfaceSrcRType asserts that n is a conversion from
-// non-interface type to interface type (or OCONVIDATA operation), and
+// non-interface type to interface type, and
 // returns an expression that yields the *runtime._type for copying
 // the convertee value to the heap.
 func ConvIfaceSrcRType(pos src.XPos, n *ir.ConvExpr) ir.Node {
-	assertOp2(n, ir.OCONVIFACE, ir.OCONVIDATA)
+	assertOp(n, ir.OCONVIFACE)
 	if hasRType(n, n.SrcRType, "SrcRType") {
 		return n.SrcRType
 	}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 6ef40cb..c2407af 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -18,6 +18,8 @@
 	"cmd/compile/internal/compare"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/objw"
+	"cmd/compile/internal/rttype"
+	"cmd/compile/internal/staticdata"
 	"cmd/compile/internal/typebits"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
@@ -32,10 +34,6 @@
 	t *types.Type
 }
 
-func CountPTabs() int {
-	return len(ptabs)
-}
-
 // runtime interface and reflection data structures
 var (
 	// protects signatset and signatslice
@@ -47,8 +45,6 @@
 
 	gcsymmu  sync.Mutex // protects gcsymset and gcsymslice
 	gcsymset = make(map[*types.Type]struct{})
-
-	ptabs []*ir.Name
 )
 
 type typeSig struct {
@@ -77,15 +73,13 @@
 	MAXELEMSIZE = abi.MapMaxElemBytes
 )
 
-func structfieldSize() int { return abi.StructFieldSize(types.PtrSize) } // Sizeof(runtime.structfield{})
-func imethodSize() int     { return abi.IMethodSize(types.PtrSize) }     // Sizeof(runtime.imethod{})
-func commonSize() int      { return abi.CommonSize(types.PtrSize) }      // Sizeof(runtime._type{})
+func commonSize() int { return int(rttype.Type.Size()) } // Sizeof(runtime._type{})
 
 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
 	if t.Sym() == nil && len(methods(t)) == 0 {
 		return 0
 	}
-	return int(abi.UncommonSize())
+	return int(rttype.UncommonType.Size())
 }
 
 func makefield(name string, t *types.Type) *types.Field {
@@ -201,14 +195,14 @@
 	return bucket
 }
 
-// MapType builds a type representing a Hmap structure for the given map type.
-// Make sure this stays in sync with runtime/map.go.
-func MapType(t *types.Type) *types.Type {
-	if t.MapType().Hmap != nil {
-		return t.MapType().Hmap
-	}
+var hmapType *types.Type
 
-	bmap := MapBucketType(t)
+// MapType returns a type interchangeable with runtime.hmap.
+// Make sure this stays in sync with runtime/map.go.
+func MapType() *types.Type {
+	if hmapType != nil {
+		return hmapType
+	}
 
 	// build a struct:
 	// type hmap struct {
@@ -217,8 +211,8 @@
 	//    B          uint8
 	//    noverflow  uint16
 	//    hash0      uint32
-	//    buckets    *bmap
-	//    oldbuckets *bmap
+	//    buckets    unsafe.Pointer
+	//    oldbuckets unsafe.Pointer
 	//    nevacuate  uintptr
 	//    extra      unsafe.Pointer // *mapextra
 	// }
@@ -228,15 +222,19 @@
 		makefield("flags", types.Types[types.TUINT8]),
 		makefield("B", types.Types[types.TUINT8]),
 		makefield("noverflow", types.Types[types.TUINT16]),
-		makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
-		makefield("buckets", types.NewPtr(bmap)),       // Used in walk.go for OMAKEMAP.
-		makefield("oldbuckets", types.NewPtr(bmap)),
+		makefield("hash0", types.Types[types.TUINT32]),      // Used in walk.go for OMAKEMAP.
+		makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
+		makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
 		makefield("nevacuate", types.Types[types.TUINTPTR]),
 		makefield("extra", types.Types[types.TUNSAFEPTR]),
 	}
 
-	hmap := types.NewStruct(fields)
-	hmap.SetNoalg(true)
+	n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
+	hmap := types.NewNamed(n)
+	n.SetType(hmap)
+	n.SetTypecheck(1)
+
+	hmap.SetUnderlying(types.NewStruct(fields))
 	types.CalcSize(hmap)
 
 	// The size of hmap should be 48 bytes on 64 bit
@@ -245,29 +243,29 @@
 		base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
 	}
 
-	t.MapType().Hmap = hmap
-	hmap.StructType().Map = t
+	hmapType = hmap
 	return hmap
 }
 
-// MapIterType builds a type representing an Hiter structure for the given map type.
+var hiterType *types.Type
+
+// MapIterType returns a type interchangeable with runtime.hiter.
 // Make sure this stays in sync with runtime/map.go.
-func MapIterType(t *types.Type) *types.Type {
-	if t.MapType().Hiter != nil {
-		return t.MapType().Hiter
+func MapIterType() *types.Type {
+	if hiterType != nil {
+		return hiterType
 	}
 
-	hmap := MapType(t)
-	bmap := MapBucketType(t)
+	hmap := MapType()
 
 	// build a struct:
 	// type hiter struct {
-	//    key         *Key
-	//    elem        *Elem
+	//    key         unsafe.Pointer // *Key
+	//    elem        unsafe.Pointer // *Elem
 	//    t           unsafe.Pointer // *MapType
 	//    h           *hmap
-	//    buckets     *bmap
-	//    bptr        *bmap
+	//    buckets     unsafe.Pointer
+	//    bptr        unsafe.Pointer // *bmap
 	//    overflow    unsafe.Pointer // *[]*bmap
 	//    oldoverflow unsafe.Pointer // *[]*bmap
 	//    startBucket uintptr
@@ -280,12 +278,12 @@
 	// }
 	// must match runtime/map.go:hiter.
 	fields := []*types.Field{
-		makefield("key", types.NewPtr(t.Key())),   // Used in range.go for TMAP.
-		makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
+		makefield("key", types.Types[types.TUNSAFEPTR]),  // Used in range.go for TMAP.
+		makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
 		makefield("t", types.Types[types.TUNSAFEPTR]),
 		makefield("h", types.NewPtr(hmap)),
-		makefield("buckets", types.NewPtr(bmap)),
-		makefield("bptr", types.NewPtr(bmap)),
+		makefield("buckets", types.Types[types.TUNSAFEPTR]),
+		makefield("bptr", types.Types[types.TUNSAFEPTR]),
 		makefield("overflow", types.Types[types.TUNSAFEPTR]),
 		makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
 		makefield("startBucket", types.Types[types.TUINTPTR]),
@@ -298,14 +296,18 @@
 	}
 
 	// build iterator struct holding the above fields
-	hiter := types.NewStruct(fields)
-	hiter.SetNoalg(true)
+	n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
+	hiter := types.NewNamed(n)
+	n.SetType(hiter)
+	n.SetTypecheck(1)
+
+	hiter.SetUnderlying(types.NewStruct(fields))
 	types.CalcSize(hiter)
 	if hiter.Size() != int64(12*types.PtrSize) {
 		base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
 	}
-	t.MapType().Hiter = hiter
-	hiter.StructType().Map = t
+
+	hiterType = hiter
 	return hiter
 }
 
@@ -327,7 +329,7 @@
 	// make list of methods for t,
 	// generating code if necessary.
 	var ms []*typeSig
-	for _, f := range mt.AllMethods().Slice() {
+	for _, f := range mt.AllMethods() {
 		if f.Sym == nil {
 			base.Fatalf("method with no sym on %v", mt)
 		}
@@ -374,7 +376,7 @@
 // imethods returns the methods of the interface type t, sorted by name.
 func imethods(t *types.Type) []*typeSig {
 	var methods []*typeSig
-	for _, f := range t.AllMethods().Slice() {
+	for _, f := range t.AllMethods() {
 		if f.Type.Kind() != types.TFUNC || f.Sym == nil {
 			continue
 		}
@@ -410,6 +412,10 @@
 		return
 	}
 
+	if p == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+		panic("missing pkgpath")
+	}
+
 	// If we are compiling the runtime package, there are two runtime packages around
 	// -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
 	// both of them, so just produce one for localpkg.
@@ -424,51 +430,35 @@
 	p.Pathsym = s
 }
 
-func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
+func dgopkgpath(c rttype.Cursor, pkg *types.Pkg) {
+	c = c.Field("Bytes")
 	if pkg == nil {
-		return objw.Uintptr(s, ot, 0)
-	}
-
-	if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
-		// If we don't know the full import path of the package being compiled
-		// (i.e. -p was not passed on the compiler command line), emit a reference to
-		// type:.importpath.""., which the linker will rewrite using the correct import path.
-		// Every package that imports this one directly defines the symbol.
-		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
-		ns := base.Ctxt.Lookup(`type:.importpath."".`)
-		return objw.SymPtr(s, ot, ns, 0)
+		c.WritePtr(nil)
+		return
 	}
 
 	dimportpath(pkg)
-	return objw.SymPtr(s, ot, pkg.Pathsym, 0)
+	c.WritePtr(pkg.Pathsym)
 }
 
-// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
-func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
+// dgopkgpathOff writes an offset relocation to the pkg path symbol to c.
+func dgopkgpathOff(c rttype.Cursor, pkg *types.Pkg) {
 	if pkg == nil {
-		return objw.Uint32(s, ot, 0)
-	}
-	if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
-		// If we don't know the full import path of the package being compiled
-		// (i.e. -p was not passed on the compiler command line), emit a reference to
-		// type:.importpath.""., which the linker will rewrite using the correct import path.
-		// Every package that imports this one directly defines the symbol.
-		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
-		ns := base.Ctxt.Lookup(`type:.importpath."".`)
-		return objw.SymPtrOff(s, ot, ns)
+		c.WriteInt32(0)
+		return
 	}
 
 	dimportpath(pkg)
-	return objw.SymPtrOff(s, ot, pkg.Pathsym)
+	c.WriteSymPtrOff(pkg.Pathsym, false)
 }
 
 // dnameField dumps a reflect.name for a struct field.
-func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
+func dnameField(c rttype.Cursor, spkg *types.Pkg, ft *types.Field) {
 	if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
 		base.Fatalf("package mismatch for %v", ft.Sym)
 	}
 	nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0)
-	return objw.SymPtr(lsym, ot, nsym, 0)
+	c.Field("Bytes").WritePtr(nsym)
 }
 
 // dnameData writes the contents of a reflect.name into s at offset ot.
@@ -513,7 +503,9 @@
 	ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
 
 	if pkg != nil {
-		ot = dgopkgpathOff(s, ot, pkg)
+		c := rttype.NewCursor(s, int64(ot), types.Types[types.TUINT32])
+		dgopkgpathOff(c, pkg)
+		ot += 4
 	}
 
 	return ot
@@ -544,7 +536,9 @@
 			}
 		}
 	} else {
-		sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
+		// TODO(mdempsky): We should be able to share these too (except
+		// maybe when dynamic linking).
+		sname = fmt.Sprintf("%s%s.%d", sname, types.LocalPkg.Prefix, dnameCount)
 		dnameCount++
 	}
 	if embedded {
@@ -562,14 +556,14 @@
 
 // dextratype dumps the fields of a runtime.uncommontype.
 // dataAdd is the offset in bytes after the header where the
-// backing array of the []method field is written (by dextratypeData).
-func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
+// backing array of the []method field should be written.
+func dextratype(lsym *obj.LSym, off int64, t *types.Type, dataAdd int) {
 	m := methods(t)
 	if t.Sym() == nil && len(m) == 0 {
-		return ot
+		base.Fatalf("extra requested of type with no extra info %v", t)
 	}
-	noff := int(types.RoundUp(int64(ot), int64(types.PtrSize)))
-	if noff != ot {
+	noff := types.RoundUp(off, int64(types.PtrSize))
+	if noff != off {
 		base.Fatalf("unexpected alignment in dextratype for %v", t)
 	}
 
@@ -577,7 +571,8 @@
 		writeType(a.type_)
 	}
 
-	ot = dgopkgpathOff(lsym, ot, typePkg(t))
+	c := rttype.NewCursor(lsym, off, rttype.UncommonType)
+	dgopkgpathOff(c.Field("PkgPath"), typePkg(t))
 
 	dataAdd += uncommonSize(t)
 	mcount := len(m)
@@ -589,11 +584,27 @@
 		base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
 	}
 
-	ot = objw.Uint16(lsym, ot, uint16(mcount))
-	ot = objw.Uint16(lsym, ot, uint16(xcount))
-	ot = objw.Uint32(lsym, ot, uint32(dataAdd))
-	ot = objw.Uint32(lsym, ot, 0)
-	return ot
+	c.Field("Mcount").WriteUint16(uint16(mcount))
+	c.Field("Xcount").WriteUint16(uint16(xcount))
+	c.Field("Moff").WriteUint32(uint32(dataAdd))
+	// Note: there is an unused uint32 field here.
+
+	// Write the backing array for the []method field.
+	array := rttype.NewArrayCursor(lsym, off+int64(dataAdd), rttype.Method, mcount)
+	for i, a := range m {
+		exported := types.IsExported(a.name.Name)
+		var pkg *types.Pkg
+		if !exported && a.name.Pkg != typePkg(t) {
+			pkg = a.name.Pkg
+		}
+		nsym := dname(a.name.Name, "", pkg, exported, false)
+
+		e := array.Elem(i)
+		e.Field("Name").WriteSymPtrOff(nsym, false)
+		dmethodptrOff(e.Field("Mtyp"), writeType(a.mtype))
+		dmethodptrOff(e.Field("Ifn"), a.isym)
+		dmethodptrOff(e.Field("Tfn"), a.tsym)
+	}
 }
 
 func typePkg(t *types.Type) *types.Pkg {
@@ -612,34 +623,11 @@
 	return nil
 }
 
-// dextratypeData dumps the backing array for the []method field of
-// runtime.uncommontype.
-func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
-	for _, a := range methods(t) {
-		// ../../../../runtime/type.go:/method
-		exported := types.IsExported(a.name.Name)
-		var pkg *types.Pkg
-		if !exported && a.name.Pkg != typePkg(t) {
-			pkg = a.name.Pkg
-		}
-		nsym := dname(a.name.Name, "", pkg, exported, false)
-
-		ot = objw.SymPtrOff(lsym, ot, nsym)
-		ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
-		ot = dmethodptrOff(lsym, ot, a.isym)
-		ot = dmethodptrOff(lsym, ot, a.tsym)
-	}
-	return ot
-}
-
-func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
-	objw.Uint32(s, ot, 0)
-	r := obj.Addrel(s)
-	r.Off = int32(ot)
-	r.Siz = 4
+func dmethodptrOff(c rttype.Cursor, x *obj.LSym) {
+	c.WriteInt32(0)
+	r := c.Reloc()
 	r.Sym = x
 	r.Type = objabi.R_METHODOFF
-	return ot + 4
 }
 
 var kinds = []int{
@@ -676,8 +664,8 @@
 	memequalvarlen *obj.LSym
 )
 
-// dcommontype dumps the contents of a reflect.rtype (runtime._type).
-func dcommontype(lsym *obj.LSym, t *types.Type) int {
+// dcommontype dumps the contents of a reflect.rtype (runtime._type) to c.
+func dcommontype(c rttype.Cursor, t *types.Type) {
 	types.CalcSize(t)
 	eqfunc := geneq(t)
 
@@ -709,10 +697,9 @@
 	//		str           nameOff
 	//		ptrToThis     typeOff
 	//	}
-	ot := 0
-	ot = objw.Uintptr(lsym, ot, uint64(t.Size()))
-	ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
-	ot = objw.Uint32(lsym, ot, types.TypeHash(t))
+	c.Field("Size_").WriteUintptr(uint64(t.Size()))
+	c.Field("PtrBytes").WriteUintptr(uint64(ptrdata))
+	c.Field("Hash").WriteUint32(types.TypeHash(t))
 
 	var tflag abi.TFlag
 	if uncommonSize(t) != 0 {
@@ -748,7 +735,7 @@
 		// this should optimize away completely
 		panic("Unexpected change in size of abi.TFlag")
 	}
-	ot = objw.Uint8(lsym, ot, uint8(tflag))
+	c.Field("TFlag").WriteUint8(uint8(tflag))
 
 	// runtime (and common sense) expects alignment to be a power of two.
 	i := int(uint8(t.Alignment()))
@@ -759,8 +746,8 @@
 	if i&(i-1) != 0 {
 		base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t)
 	}
-	ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // align
-	ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // fieldAlign
+	c.Field("Align_").WriteUint8(uint8(t.Alignment()))
+	c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
 
 	i = kinds[t.Kind()]
 	if types.IsDirectIface(t) {
@@ -769,26 +756,14 @@
 	if useGCProg {
 		i |= objabi.KindGCProg
 	}
-	ot = objw.Uint8(lsym, ot, uint8(i)) // kind
-	if eqfunc != nil {
-		ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
-	} else {
-		ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
-	}
-	ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
+	c.Field("Kind_").WriteUint8(uint8(i))
+
+	c.Field("Equal").WritePtr(eqfunc)
+	c.Field("GCData").WritePtr(gcsym)
 
 	nsym := dname(p, "", nil, exported, false)
-	ot = objw.SymPtrOff(lsym, ot, nsym) // str
-	// ptrToThis
-	if sptr == nil {
-		ot = objw.Uint32(lsym, ot, 0)
-	} else if sptrWeak {
-		ot = objw.SymPtrWeakOff(lsym, ot, sptr)
-	} else {
-		ot = objw.SymPtrOff(lsym, ot, sptr)
-	}
-
-	return ot
+	c.Field("Str").WriteSymPtrOff(nsym, false)
+	c.Field("PtrToThis").WriteSymPtrOff(sptr, sptrWeak)
 }
 
 // TrackSym returns the symbol for tracking use of field/method f, assumed
@@ -845,11 +820,6 @@
 	return lsym
 }
 
-// Deprecated: Use TypePtrAt instead.
-func TypePtr(t *types.Type) *ir.AddrExpr {
-	return TypePtrAt(base.Pos, t)
-}
-
 // TypePtrAt returns an expression that evaluates to the
 // *runtime._type value for t.
 func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr {
@@ -873,11 +843,6 @@
 	return lsym
 }
 
-// Deprecated: Use ITabAddrAt instead.
-func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
-	return ITabAddrAt(base.Pos, typ, iface)
-}
-
 // ITabAddrAt returns an expression that evaluates to the
 // *runtime.itab value for concrete type typ implementing interface
 // iface.
@@ -909,7 +874,7 @@
 		return needkeyupdate(t.Elem())
 
 	case types.TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
+		for _, t1 := range t.Fields() {
 			if needkeyupdate(t1.Type) {
 				return true
 			}
@@ -932,7 +897,7 @@
 		return hashMightPanic(t.Elem())
 
 	case types.TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
+		for _, t1 := range t.Fields() {
 			if hashMightPanic(t1.Type) {
 				return true
 			}
@@ -963,16 +928,11 @@
 
 	s := types.TypeSym(t)
 	lsym := s.Linksym()
-	if s.Siggen() {
-		return lsym
-	}
-	s.SetSiggen(true)
 
 	// special case (look for runtime below):
 	// when compiling package runtime,
 	// emit the type structures for int, float, etc.
 	tbase := t
-
 	if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
 		tbase = t.Elem()
 	}
@@ -980,6 +940,19 @@
 		base.Fatalf("unresolved defined type: %v", tbase)
 	}
 
+	// This is a fake type we generated for our builtin pseudo-runtime
+	// package. We'll emit a description for the real type while
+	// compiling package runtime, so we don't need or want to emit one
+	// from this fake type.
+	if sym := tbase.Sym(); sym != nil && sym.Pkg == ir.Pkgs.Runtime {
+		return lsym
+	}
+
+	if s.Siggen() {
+		return lsym
+	}
+	s.SetSiggen(true)
+
 	if !NeedEmit(tbase) {
 		if i := typecheck.BaseTypeIndex(t); i >= 0 {
 			lsym.Pkg = tbase.Sym().Pkg.Prefix
@@ -994,101 +967,137 @@
 		return lsym
 	}
 
-	ot := 0
+	// Type layout                          Written by               Marker
+	// +--------------------------------+                            - 0
+	// | abi/internal.Type              |   dcommontype
+	// +--------------------------------+                            - A
+	// | additional type-dependent      |   code in the switch below
+	// | fields, e.g.                   |
+	// | abi/internal.ArrayType.Len     |
+	// +--------------------------------+                            - B
+	// | internal/abi.UncommonType      |   dextratype
+	// | This section is optional,      |
+	// | if type has a name or methods  |
+	// +--------------------------------+                            - C
+	// | variable-length data           |   code in the switch below
+	// | referenced by                  |
+	// | type-dependent fields, e.g.    |
+	// | abi/internal.StructType.Fields |
+	// | dataAdd = size of this section |
+	// +--------------------------------+                            - D
+	// | method list, if any            |   dextratype
+	// +--------------------------------+                            - E
+
+	// UncommonType section is included if we have a name or a method.
+	extra := t.Sym() != nil || len(methods(t)) != 0
+
+	// Decide the underlying type of the descriptor, and remember
+	// the size we need for variable-length data.
+	var rt *types.Type
+	dataAdd := 0
 	switch t.Kind() {
 	default:
-		ot = dcommontype(lsym, t)
-		ot = dextratype(lsym, ot, t, 0)
-
+		rt = rttype.Type
 	case types.TARRAY:
-		// ../../../../runtime/type.go:/arrayType
+		rt = rttype.ArrayType
+	case types.TSLICE:
+		rt = rttype.SliceType
+	case types.TCHAN:
+		rt = rttype.ChanType
+	case types.TFUNC:
+		rt = rttype.FuncType
+		dataAdd = (t.NumRecvs() + t.NumParams() + t.NumResults()) * types.PtrSize
+	case types.TINTER:
+		rt = rttype.InterfaceType
+		dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
+	case types.TMAP:
+		rt = rttype.MapType
+	case types.TPTR:
+		rt = rttype.PtrType
+		// TODO: use rttype.Type for Elem() is ANY?
+	case types.TSTRUCT:
+		rt = rttype.StructType
+		dataAdd = t.NumFields() * int(rttype.StructField.Size())
+	}
+
+	// Compute offsets of each section.
+	B := rt.Size()
+	C := B
+	if extra {
+		C = B + rttype.UncommonType.Size()
+	}
+	D := C + int64(dataAdd)
+	E := D + int64(len(methods(t)))*rttype.Method.Size()
+
+	// Write the runtime._type
+	c := rttype.NewCursor(lsym, 0, rt)
+	if rt == rttype.Type {
+		dcommontype(c, t)
+	} else {
+		dcommontype(c.Field("Type"), t)
+	}
+
+	// Write additional type-specific data
+	// (Both the fixed size and variable-sized sections.)
+	switch t.Kind() {
+	case types.TARRAY:
+		// internal/abi.ArrayType
 		s1 := writeType(t.Elem())
 		t2 := types.NewSlice(t.Elem())
 		s2 := writeType(t2)
-		ot = dcommontype(lsym, t)
-		ot = objw.SymPtr(lsym, ot, s1, 0)
-		ot = objw.SymPtr(lsym, ot, s2, 0)
-		ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
-		ot = dextratype(lsym, ot, t, 0)
+		c.Field("Elem").WritePtr(s1)
+		c.Field("Slice").WritePtr(s2)
+		c.Field("Len").WriteUintptr(uint64(t.NumElem()))
 
 	case types.TSLICE:
-		// ../../../../runtime/type.go:/sliceType
+		// internal/abi.SliceType
 		s1 := writeType(t.Elem())
-		ot = dcommontype(lsym, t)
-		ot = objw.SymPtr(lsym, ot, s1, 0)
-		ot = dextratype(lsym, ot, t, 0)
+		c.Field("Elem").WritePtr(s1)
 
 	case types.TCHAN:
-		// ../../../../runtime/type.go:/chanType
+		// internal/abi.ChanType
 		s1 := writeType(t.Elem())
-		ot = dcommontype(lsym, t)
-		ot = objw.SymPtr(lsym, ot, s1, 0)
-		ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
-		ot = dextratype(lsym, ot, t, 0)
+		c.Field("Elem").WritePtr(s1)
+		c.Field("Dir").WriteInt(int64(t.ChanDir()))
 
 	case types.TFUNC:
-		for _, t1 := range t.Recvs().Fields().Slice() {
+		// internal/abi.FuncType
+		for _, t1 := range t.RecvParamsResults() {
 			writeType(t1.Type)
 		}
-		isddd := false
-		for _, t1 := range t.Params().Fields().Slice() {
-			isddd = t1.IsDDD()
-			writeType(t1.Type)
-		}
-		for _, t1 := range t.Results().Fields().Slice() {
-			writeType(t1.Type)
-		}
-
-		ot = dcommontype(lsym, t)
 		inCount := t.NumRecvs() + t.NumParams()
 		outCount := t.NumResults()
-		if isddd {
+		if t.IsVariadic() {
 			outCount |= 1 << 15
 		}
-		ot = objw.Uint16(lsym, ot, uint16(inCount))
-		ot = objw.Uint16(lsym, ot, uint16(outCount))
-		if types.PtrSize == 8 {
-			ot += 4 // align for *rtype
-		}
 
-		dataAdd := (inCount + t.NumResults()) * types.PtrSize
-		ot = dextratype(lsym, ot, t, dataAdd)
+		c.Field("InCount").WriteUint16(uint16(inCount))
+		c.Field("OutCount").WriteUint16(uint16(outCount))
 
 		// Array of rtype pointers follows funcType.
-		for _, t1 := range t.Recvs().Fields().Slice() {
-			ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
-		}
-		for _, t1 := range t.Params().Fields().Slice() {
-			ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
-		}
-		for _, t1 := range t.Results().Fields().Slice() {
-			ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+		typs := t.RecvParamsResults()
+		array := rttype.NewArrayCursor(lsym, C, types.Types[types.TUNSAFEPTR], len(typs))
+		for i, t1 := range typs {
+			array.Elem(i).WritePtr(writeType(t1.Type))
 		}
 
 	case types.TINTER:
+		// internal/abi.InterfaceType
 		m := imethods(t)
 		n := len(m)
 		for _, a := range m {
 			writeType(a.type_)
 		}
 
-		// ../../../../runtime/type.go:/interfaceType
-		ot = dcommontype(lsym, t)
-
 		var tpkg *types.Pkg
 		if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
 			tpkg = t.Sym().Pkg
 		}
-		ot = dgopkgpath(lsym, ot, tpkg)
+		dgopkgpath(c.Field("PkgPath"), tpkg)
+		c.Field("Methods").WriteSlice(lsym, C, int64(n), int64(n))
 
-		ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
-		ot = objw.Uintptr(lsym, ot, uint64(n))
-		ot = objw.Uintptr(lsym, ot, uint64(n))
-		dataAdd := imethodSize() * n
-		ot = dextratype(lsym, ot, t, dataAdd)
-
-		for _, a := range m {
-			// ../../../../runtime/type.go:/imethod
+		array := rttype.NewArrayCursor(lsym, C, rttype.IMethod, n)
+		for i, a := range m {
 			exported := types.IsExported(a.name.Name)
 			var pkg *types.Pkg
 			if !exported && a.name.Pkg != tpkg {
@@ -1096,39 +1105,39 @@
 			}
 			nsym := dname(a.name.Name, "", pkg, exported, false)
 
-			ot = objw.SymPtrOff(lsym, ot, nsym)
-			ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
+			e := array.Elem(i)
+			e.Field("Name").WriteSymPtrOff(nsym, false)
+			e.Field("Typ").WriteSymPtrOff(writeType(a.type_), false)
 		}
 
-	// ../../../../runtime/type.go:/mapType
 	case types.TMAP:
+		// internal/abi.MapType
 		s1 := writeType(t.Key())
 		s2 := writeType(t.Elem())
 		s3 := writeType(MapBucketType(t))
 		hasher := genhash(t.Key())
 
-		ot = dcommontype(lsym, t)
-		ot = objw.SymPtr(lsym, ot, s1, 0)
-		ot = objw.SymPtr(lsym, ot, s2, 0)
-		ot = objw.SymPtr(lsym, ot, s3, 0)
-		ot = objw.SymPtr(lsym, ot, hasher, 0)
+		c.Field("Key").WritePtr(s1)
+		c.Field("Elem").WritePtr(s2)
+		c.Field("Bucket").WritePtr(s3)
+		c.Field("Hasher").WritePtr(hasher)
 		var flags uint32
 		// Note: flags must match maptype accessors in ../../../../runtime/type.go
 		// and maptype builder in ../../../../reflect/type.go:MapOf.
 		if t.Key().Size() > MAXKEYSIZE {
-			ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+			c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
 			flags |= 1 // indirect key
 		} else {
-			ot = objw.Uint8(lsym, ot, uint8(t.Key().Size()))
+			c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
 		}
 
 		if t.Elem().Size() > MAXELEMSIZE {
-			ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+			c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
 			flags |= 2 // indirect value
 		} else {
-			ot = objw.Uint8(lsym, ot, uint8(t.Elem().Size()))
+			c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
 		}
-		ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Size()))
+		c.Field("BucketSize").WriteUint16(uint16(MapBucketType(t).Size()))
 		if types.IsReflexive(t.Key()) {
 			flags |= 4 // reflexive key
 		}
@@ -1138,8 +1147,8 @@
 		if hashMightPanic(t.Key()) {
 			flags |= 16 // hash might panic
 		}
-		ot = objw.Uint32(lsym, ot, flags)
-		ot = dextratype(lsym, ot, t, 0)
+		c.Field("Flags").WriteUint32(flags)
+
 		if u := t.Underlying(); u != t {
 			// If t is a named map type, also keep the underlying map
 			// type live in the binary. This is important to make sure that
@@ -1151,25 +1160,17 @@
 		}
 
 	case types.TPTR:
+		// internal/abi.PtrType
 		if t.Elem().Kind() == types.TANY {
-			// ../../../../runtime/type.go:/UnsafePointerType
-			ot = dcommontype(lsym, t)
-			ot = dextratype(lsym, ot, t, 0)
-
-			break
+			base.Fatalf("bad pointer base type")
 		}
 
-		// ../../../../runtime/type.go:/ptrType
 		s1 := writeType(t.Elem())
+		c.Field("Elem").WritePtr(s1)
 
-		ot = dcommontype(lsym, t)
-		ot = objw.SymPtr(lsym, ot, s1, 0)
-		ot = dextratype(lsym, ot, t, 0)
-
-	// ../../../../runtime/type.go:/structType
-	// for security, only the exported fields.
 	case types.TSTRUCT:
-		fields := t.Fields().Slice()
+		// internal/abi.StructType
+		fields := t.Fields()
 		for _, t1 := range fields {
 			writeType(t1.Type)
 		}
@@ -1187,23 +1188,23 @@
 			}
 		}
 
-		ot = dcommontype(lsym, t)
-		ot = dgopkgpath(lsym, ot, spkg)
-		ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
-		ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
-		ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+		dgopkgpath(c.Field("PkgPath"), spkg)
+		c.Field("Fields").WriteSlice(lsym, C, int64(len(fields)), int64(len(fields)))
 
-		dataAdd := len(fields) * structfieldSize()
-		ot = dextratype(lsym, ot, t, dataAdd)
-
-		for _, f := range fields {
-			// ../../../../runtime/type.go:/structField
-			ot = dnameField(lsym, ot, spkg, f)
-			ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
-			ot = objw.Uintptr(lsym, ot, uint64(f.Offset))
+		array := rttype.NewArrayCursor(lsym, C, rttype.StructField, len(fields))
+		for i, f := range fields {
+			e := array.Elem(i)
+			dnameField(e.Field("Name"), spkg, f)
+			e.Field("Typ").WritePtr(writeType(f.Type))
+			e.Field("Offset").WriteUintptr(uint64(f.Offset))
 		}
 	}
 
+	// Write the extra info, if any.
+	if extra {
+		dextratype(lsym, B, t, dataAdd)
+	}
+
 	// Note: DUPOK is required to ensure that we don't end up with more
 	// than one type descriptor for a given type, if the type descriptor
 	// can be defined in multiple packages, that is, unnamed types,
@@ -1213,8 +1214,7 @@
 		dupok = obj.DUPOK
 	}
 
-	ot = dextratypeData(lsym, ot, t)
-	objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
+	objw.Global(lsym, int32(E), int16(dupok|obj.RODATA))
 
 	// The linker will leave a table of all the typelinks for
 	// types in the binary, so the runtime can find them.
@@ -1224,7 +1224,7 @@
 	keep := base.Ctxt.Flag_dynlink
 	if !keep && t.Sym() == nil {
 		// For an unnamed type, we only need the link if the type can
-		// be created at run time by reflect.PtrTo and similar
+		// be created at run time by reflect.PointerTo and similar
 		// functions. If the type exists in the program, those
 		// functions must return the existing type structure rather
 		// than creating a new one.
@@ -1280,7 +1280,9 @@
 		}
 		signatslice = signatslice[len(signats):]
 	}
+}
 
+func WriteGCSymbols() {
 	// Emit GC data symbols.
 	gcsyms := make([]typeAndStr, 0, len(gcsymset))
 	for t := range gcsymset {
@@ -1305,7 +1307,7 @@
 		base.Fatalf("writeITab(%v, %v)", typ, iface)
 	}
 
-	sigs := iface.AllMethods().Slice()
+	sigs := iface.AllMethods()
 	entries := make([]*obj.LSym, 0, len(sigs))
 
 	// both sigs and methods are sorted by name,
@@ -1352,58 +1354,52 @@
 	lsym.Set(obj.AttrContentAddressable, true)
 }
 
-func WriteTabs() {
-	// process ptabs
-	if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
-		ot := 0
-		s := base.Ctxt.Lookup("go:plugin.tabs")
-		for _, p := range ptabs {
-			// Dump ptab symbol into go.pluginsym package.
-			//
-			// type ptab struct {
-			//	name nameOff
-			//	typ  typeOff // pointer to symbol
-			// }
-			nsym := dname(p.Sym().Name, "", nil, true, false)
-			t := p.Type()
-			if p.Class != ir.PFUNC {
-				t = types.NewPtr(t)
-			}
-			tsym := writeType(t)
-			ot = objw.SymPtrOff(s, ot, nsym)
-			ot = objw.SymPtrOff(s, ot, tsym)
-			// Plugin exports symbols as interfaces. Mark their types
-			// as UsedInIface.
-			tsym.Set(obj.AttrUsedInIface, true)
-		}
-		objw.Global(s, int32(ot), int16(obj.RODATA))
-
-		ot = 0
-		s = base.Ctxt.Lookup("go:plugin.exports")
-		for _, p := range ptabs {
-			ot = objw.SymPtr(s, ot, p.Linksym(), 0)
-		}
-		objw.Global(s, int32(ot), int16(obj.RODATA))
+func WritePluginTable() {
+	ptabs := typecheck.Target.PluginExports
+	if len(ptabs) == 0 {
+		return
 	}
-}
 
-func WriteImportStrings() {
-	// generate import strings for imported packages
-	for _, p := range types.ImportedPkgList() {
-		dimportpath(p)
+	lsym := base.Ctxt.Lookup("go:plugin.tabs")
+	ot := 0
+	for _, p := range ptabs {
+		// Dump ptab symbol into go.pluginsym package.
+		//
+		// type ptab struct {
+		//	name nameOff
+		//	typ  typeOff // pointer to symbol
+		// }
+		nsym := dname(p.Sym().Name, "", nil, true, false)
+		t := p.Type()
+		if p.Class != ir.PFUNC {
+			t = types.NewPtr(t)
+		}
+		tsym := writeType(t)
+		ot = objw.SymPtrOff(lsym, ot, nsym)
+		ot = objw.SymPtrOff(lsym, ot, tsym)
+		// Plugin exports symbols as interfaces. Mark their types
+		// as UsedInIface.
+		tsym.Set(obj.AttrUsedInIface, true)
 	}
+	objw.Global(lsym, int32(ot), int16(obj.RODATA))
+
+	lsym = base.Ctxt.Lookup("go:plugin.exports")
+	ot = 0
+	for _, p := range ptabs {
+		ot = objw.SymPtr(lsym, ot, p.Linksym(), 0)
+	}
+	objw.Global(lsym, int32(ot), int16(obj.RODATA))
 }
 
 // writtenByWriteBasicTypes reports whether typ is written by WriteBasicTypes.
 // WriteBasicTypes always writes pointer types; any pointer has been stripped off typ already.
 func writtenByWriteBasicTypes(typ *types.Type) bool {
 	if typ.Sym() == nil && typ.Kind() == types.TFUNC {
-		f := typ.FuncType()
 		// func(error) string
-		if f.Receiver.NumFields() == 0 &&
-			f.Params.NumFields() == 1 && f.Results.NumFields() == 1 &&
-			f.Params.FieldType(0) == types.ErrorType &&
-			f.Results.FieldType(0) == types.Types[types.TSTRING] {
+		if typ.NumRecvs() == 0 &&
+			typ.NumParams() == 1 && typ.NumResults() == 1 &&
+			typ.Param(0).Type == types.ErrorType &&
+			typ.Result(0).Type == types.Types[types.TSTRING] {
 			return true
 		}
 	}
@@ -1431,45 +1427,32 @@
 	// another possible choice would be package main,
 	// but using runtime means fewer copies in object files.
 	// The code here needs to be in sync with writtenByWriteBasicTypes above.
-	if base.Ctxt.Pkgpath == "runtime" {
-		// Note: always write NewPtr(t) because NeedEmit's caller strips the pointer.
-		var list []*types.Type
-		for i := types.Kind(1); i <= types.TBOOL; i++ {
-			list = append(list, types.Types[i])
-		}
-		list = append(list,
-			types.Types[types.TSTRING],
-			types.Types[types.TUNSAFEPTR],
-			types.AnyType,
-			types.ErrorType)
-		for _, t := range list {
-			writeType(types.NewPtr(t))
-			writeType(types.NewPtr(types.NewSlice(t)))
-		}
-
-		// emit type for func(error) string,
-		// which is the type of an auto-generated wrapper.
-		writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{
-			types.NewField(base.Pos, nil, types.ErrorType),
-		}, []*types.Field{
-			types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
-		})))
-
-		// add paths for runtime and main, which 6l imports implicitly.
-		dimportpath(ir.Pkgs.Runtime)
-
-		if base.Flag.Race {
-			dimportpath(types.NewPkg("runtime/race", ""))
-		}
-		if base.Flag.MSan {
-			dimportpath(types.NewPkg("runtime/msan", ""))
-		}
-		if base.Flag.ASan {
-			dimportpath(types.NewPkg("runtime/asan", ""))
-		}
-
-		dimportpath(types.NewPkg("main", ""))
+	if base.Ctxt.Pkgpath != "runtime" {
+		return
 	}
+
+	// Note: always write NewPtr(t) because NeedEmit's caller strips the pointer.
+	var list []*types.Type
+	for i := types.Kind(1); i <= types.TBOOL; i++ {
+		list = append(list, types.Types[i])
+	}
+	list = append(list,
+		types.Types[types.TSTRING],
+		types.Types[types.TUNSAFEPTR],
+		types.AnyType,
+		types.ErrorType)
+	for _, t := range list {
+		writeType(types.NewPtr(t))
+		writeType(types.NewPtr(types.NewSlice(t)))
+	}
+
+	// emit type for func(error) string,
+	// which is the type of an auto-generated wrapper.
+	writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{
+		types.NewField(base.Pos, nil, types.ErrorType),
+	}, []*types.Field{
+		types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
+	})))
 }
 
 type typeAndStr struct {
@@ -1509,8 +1492,8 @@
 	// will be equal for the above checks, but different in DWARF output.
 	// Sort by source position to ensure deterministic order.
 	// See issues 27013 and 30202.
-	if a[i].t.Kind() == types.TINTER && a[i].t.AllMethods().Len() > 0 {
-		return a[i].t.AllMethods().Index(0).Pos.Before(a[j].t.AllMethods().Index(0).Pos)
+	if a[i].t.Kind() == types.TINTER && len(a[i].t.AllMethods()) > 0 {
+		return a[i].t.AllMethods()[0].Pos.Before(a[j].t.AllMethods()[0].Pos)
 	}
 	return false
 }
@@ -1734,7 +1717,7 @@
 		p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1)
 
 	case types.TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
+		for _, t1 := range t.Fields() {
 			p.emit(t1.Type, offset+t1.Offset)
 		}
 	}
@@ -1754,30 +1737,6 @@
 	return typecheck.Expr(typecheck.NodAddr(x))
 }
 
-func CollectPTabs() {
-	if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
-		return
-	}
-	for _, exportn := range typecheck.Target.Exports {
-		s := exportn.Sym()
-		nn := ir.AsNode(s.Def)
-		if nn == nil {
-			continue
-		}
-		if nn.Op() != ir.ONAME {
-			continue
-		}
-		n := nn.(*ir.Name)
-		if !types.IsExported(s.Name) {
-			continue
-		}
-		if s.Pkg.Name != "main" {
-			continue
-		}
-		ptabs = append(ptabs, n)
-	}
-}
-
 // NeedEmit reports whether typ is a type that we need to emit code
 // for (e.g., runtime type descriptors, method wrappers).
 func NeedEmit(typ *types.Type) bool {
@@ -1893,7 +1852,7 @@
 	if ir.CurFunc.LSym == nil {
 		return
 	}
-	dot := n.X.(*ir.SelectorExpr)
+	dot := n.Fun.(*ir.SelectorExpr)
 	ityp := dot.X.Type()
 	if ityp.HasShape() {
 		// Here we're calling a method on a generic interface. Something like:
@@ -1916,17 +1875,8 @@
 		// some sort of fuzzy shape matching. For now, only use the name
 		// of the method for matching.
 		r := obj.Addrel(ir.CurFunc.LSym)
-		// We use a separate symbol just to tell the linker the method name.
-		// (The symbol itself is not needed in the final binary. Do not use
-		// staticdata.StringSym, which creates a content addessable symbol,
-		// which may have trailing zero bytes. This symbol doesn't need to
-		// be deduplicated anyway.)
-		name := dot.Sel.Name
-		var nameSym obj.LSym
-		nameSym.WriteString(base.Ctxt, 0, len(name), name)
-		objw.Global(&nameSym, int32(len(name)), obj.RODATA)
-		r.Sym = &nameSym
-		r.Type = objabi.R_USEGENERICIFACEMETHOD
+		r.Sym = staticdata.StringSymNoCommon(dot.Sel.Name)
+		r.Type = objabi.R_USENAMEDMETHOD
 		return
 	}
 
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 2eb1e7f..2233818 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -193,7 +193,7 @@
 		// input args need no code
 	case ssa.OpPhi:
 		ssagen.CheckLoweredPhi(v)
-	case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
+	case ssa.OpCopy, ssa.OpRISCV64MOVDreg:
 		if v.Type.IsMemory() {
 			return
 		}
@@ -278,7 +278,7 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = rd
 	case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
-		ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
+		ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW,
 		ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
 		ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
 		ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
@@ -332,7 +332,8 @@
 		p2.From.Reg = v.Reg1()
 		p2.To.Type = obj.TYPE_REG
 		p2.To.Reg = v.Reg1()
-	case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD:
+	case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD,
+		ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS:
 		r := v.Reg()
 		r1 := v.Args[0].Reg()
 		r2 := v.Args[1].Reg()
@@ -355,7 +356,7 @@
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
-		ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI,
+		ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI,
 		ssa.OpRISCV64SLTIU:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_CONST
@@ -694,6 +695,13 @@
 		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 
+	case ssa.OpRISCV64LoweredPubBarrier:
+		// FENCE
+		s.Prog(v.Op.Asm())
+
+	case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F:
+		// input is already rounded
+
 	case ssa.OpClobber, ssa.OpClobberReg:
 		// TODO: implement for clobberdead experiment. Nop is ok for now.
 
diff --git a/src/cmd/compile/internal/rttype/rttype.go b/src/cmd/compile/internal/rttype/rttype.go
new file mode 100644
index 0000000..cdc399d
--- /dev/null
+++ b/src/cmd/compile/internal/rttype/rttype.go
@@ -0,0 +1,283 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rttype allows the compiler to share type information with
+// the runtime. The shared type information is stored in
+// internal/abi. This package translates those types from the host
+// machine on which the compiler runs to the target machine on which
+// the compiled program will run. In particular, this package handles
+// layout differences between e.g. a 64 bit compiler and 32 bit
+// target.
+package rttype
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"internal/abi"
+	"reflect"
+)
+
+// The type structures shared with the runtime.
+var Type *types.Type
+
+var ArrayType *types.Type
+var ChanType *types.Type
+var FuncType *types.Type
+var InterfaceType *types.Type
+var MapType *types.Type
+var PtrType *types.Type
+var SliceType *types.Type
+var StructType *types.Type
+
+// Types that are parts of the types above.
+var IMethod *types.Type
+var Method *types.Type
+var StructField *types.Type
+var UncommonType *types.Type
+
+// Type switches and asserts
+var InterfaceSwitch *types.Type
+var TypeAssert *types.Type
+
+func Init() {
+	// Note: this has to be called explicitly instead of being
+	// an init function so it runs after the types package has
+	// been properly initialized.
+	Type = fromReflect(reflect.TypeOf(abi.Type{}))
+	ArrayType = fromReflect(reflect.TypeOf(abi.ArrayType{}))
+	ChanType = fromReflect(reflect.TypeOf(abi.ChanType{}))
+	FuncType = fromReflect(reflect.TypeOf(abi.FuncType{}))
+	InterfaceType = fromReflect(reflect.TypeOf(abi.InterfaceType{}))
+	MapType = fromReflect(reflect.TypeOf(abi.MapType{}))
+	PtrType = fromReflect(reflect.TypeOf(abi.PtrType{}))
+	SliceType = fromReflect(reflect.TypeOf(abi.SliceType{}))
+	StructType = fromReflect(reflect.TypeOf(abi.StructType{}))
+
+	IMethod = fromReflect(reflect.TypeOf(abi.Imethod{}))
+	Method = fromReflect(reflect.TypeOf(abi.Method{}))
+	StructField = fromReflect(reflect.TypeOf(abi.StructField{}))
+	UncommonType = fromReflect(reflect.TypeOf(abi.UncommonType{}))
+
+	InterfaceSwitch = fromReflect(reflect.TypeOf(abi.InterfaceSwitch{}))
+	TypeAssert = fromReflect(reflect.TypeOf(abi.TypeAssert{}))
+
+	// Make sure abi functions are correct. These functions are used
+	// by the linker which doesn't have the ability to do type layout,
+	// so we check the functions it uses here.
+	ptrSize := types.PtrSize
+	if got, want := int64(abi.CommonSize(ptrSize)), Type.Size(); got != want {
+		base.Fatalf("abi.CommonSize() == %d, want %d", got, want)
+	}
+	if got, want := int64(abi.StructFieldSize(ptrSize)), StructField.Size(); got != want {
+		base.Fatalf("abi.StructFieldSize() == %d, want %d", got, want)
+	}
+	if got, want := int64(abi.UncommonSize()), UncommonType.Size(); got != want {
+		base.Fatalf("abi.UncommonSize() == %d, want %d", got, want)
+	}
+	if got, want := int64(abi.TFlagOff(ptrSize)), Type.OffsetOf("TFlag"); got != want {
+		base.Fatalf("abi.TFlagOff() == %d, want %d", got, want)
+	}
+}
+
+// fromReflect translates from a host type to the equivalent target type.
+func fromReflect(rt reflect.Type) *types.Type {
+	t := reflectToType(rt)
+	types.CalcSize(t)
+	return t
+}
+
+// reflectToType converts from a reflect.Type (which is a compiler
+// host type) to a *types.Type, which is a target type.  The result
+// must be CalcSize'd before using.
+func reflectToType(rt reflect.Type) *types.Type {
+	switch rt.Kind() {
+	case reflect.Bool:
+		return types.Types[types.TBOOL]
+	case reflect.Int:
+		return types.Types[types.TINT]
+	case reflect.Int32:
+		return types.Types[types.TINT32]
+	case reflect.Uint8:
+		return types.Types[types.TUINT8]
+	case reflect.Uint16:
+		return types.Types[types.TUINT16]
+	case reflect.Uint32:
+		return types.Types[types.TUINT32]
+	case reflect.Uintptr:
+		return types.Types[types.TUINTPTR]
+	case reflect.Ptr, reflect.Func, reflect.UnsafePointer:
+		// TODO: there's no mechanism to distinguish different pointer types,
+		// so we treat them all as unsafe.Pointer.
+		return types.Types[types.TUNSAFEPTR]
+	case reflect.Slice:
+		return types.NewSlice(reflectToType(rt.Elem()))
+	case reflect.Array:
+		return types.NewArray(reflectToType(rt.Elem()), int64(rt.Len()))
+	case reflect.Struct:
+		fields := make([]*types.Field, rt.NumField())
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			ft := reflectToType(f.Type)
+			fields[i] = &types.Field{Sym: &types.Sym{Name: f.Name}, Type: ft}
+		}
+		return types.NewStruct(fields)
+	default:
+		base.Fatalf("unhandled kind %s", rt.Kind())
+		return nil
+	}
+}
+
+// A Cursor represents a typed location inside a static variable where we
+// are going to write.
+type Cursor struct {
+	lsym   *obj.LSym
+	offset int64
+	typ    *types.Type
+}
+
+// NewCursor returns a cursor starting at lsym+off and having type t.
+func NewCursor(lsym *obj.LSym, off int64, t *types.Type) Cursor {
+	return Cursor{lsym: lsym, offset: off, typ: t}
+}
+
+// WritePtr writes a pointer "target" to the component at the location specified by c.
+func (c Cursor) WritePtr(target *obj.LSym) {
+	if c.typ.Kind() != types.TUNSAFEPTR {
+		base.Fatalf("can't write ptr, it has kind %s", c.typ.Kind())
+	}
+	if target == nil {
+		objw.Uintptr(c.lsym, int(c.offset), 0)
+	} else {
+		objw.SymPtr(c.lsym, int(c.offset), target, 0)
+	}
+}
+func (c Cursor) WriteUintptr(val uint64) {
+	if c.typ.Kind() != types.TUINTPTR {
+		base.Fatalf("can't write uintptr, it has kind %s", c.typ.Kind())
+	}
+	objw.Uintptr(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint32(val uint32) {
+	if c.typ.Kind() != types.TUINT32 {
+		base.Fatalf("can't write uint32, it has kind %s", c.typ.Kind())
+	}
+	objw.Uint32(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint16(val uint16) {
+	if c.typ.Kind() != types.TUINT16 {
+		base.Fatalf("can't write uint16, it has kind %s", c.typ.Kind())
+	}
+	objw.Uint16(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint8(val uint8) {
+	if c.typ.Kind() != types.TUINT8 {
+		base.Fatalf("can't write uint8, it has kind %s", c.typ.Kind())
+	}
+	objw.Uint8(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteInt(val int64) {
+	if c.typ.Kind() != types.TINT {
+		base.Fatalf("can't write int, it has kind %s", c.typ.Kind())
+	}
+	objw.Uintptr(c.lsym, int(c.offset), uint64(val))
+}
+func (c Cursor) WriteInt32(val int32) {
+	if c.typ.Kind() != types.TINT32 {
+		base.Fatalf("can't write int32, it has kind %s", c.typ.Kind())
+	}
+	objw.Uint32(c.lsym, int(c.offset), uint32(val))
+}
+func (c Cursor) WriteBool(val bool) {
+	if c.typ.Kind() != types.TBOOL {
+		base.Fatalf("can't write bool, it has kind %s", c.typ.Kind())
+	}
+	objw.Bool(c.lsym, int(c.offset), val)
+}
+
+// WriteSymPtrOff writes a "pointer" to the given symbol. The symbol
+// is encoded as a uint32 offset from the start of the section.
+func (c Cursor) WriteSymPtrOff(target *obj.LSym, weak bool) {
+	if c.typ.Kind() != types.TINT32 && c.typ.Kind() != types.TUINT32 {
+		base.Fatalf("can't write SymPtr, it has kind %s", c.typ.Kind())
+	}
+	if target == nil {
+		objw.Uint32(c.lsym, int(c.offset), 0)
+	} else if weak {
+		objw.SymPtrWeakOff(c.lsym, int(c.offset), target)
+	} else {
+		objw.SymPtrOff(c.lsym, int(c.offset), target)
+	}
+}
+
+// WriteSlice writes a slice header to c. The pointer is target+off, the len and cap fields are given.
+func (c Cursor) WriteSlice(target *obj.LSym, off, len, cap int64) {
+	if c.typ.Kind() != types.TSLICE {
+		base.Fatalf("can't write slice, it has kind %s", c.typ.Kind())
+	}
+	objw.SymPtr(c.lsym, int(c.offset), target, int(off))
+	objw.Uintptr(c.lsym, int(c.offset)+types.PtrSize, uint64(len))
+	objw.Uintptr(c.lsym, int(c.offset)+2*types.PtrSize, uint64(cap))
+	// TODO: ability to switch len&cap. Maybe not needed here, as every caller
+	// passes the same thing for both?
+	if len != cap {
+		base.Fatalf("len != cap (%d != %d)", len, cap)
+	}
+}
+
+// Reloc adds a relocation from the current cursor position.
+// Reloc fills in Off and Siz fields. Caller should fill in the rest (Type, others).
+func (c Cursor) Reloc() *obj.Reloc {
+	r := obj.Addrel(c.lsym)
+	r.Off = int32(c.offset)
+	r.Siz = uint8(c.typ.Size())
+	return r
+}
+
+// Field selects the field with the given name from the struct pointed to by c.
+func (c Cursor) Field(name string) Cursor {
+	if c.typ.Kind() != types.TSTRUCT {
+		base.Fatalf("can't call Field on non-struct %v", c.typ)
+	}
+	for _, f := range c.typ.Fields() {
+		if f.Sym.Name == name {
+			return Cursor{lsym: c.lsym, offset: c.offset + f.Offset, typ: f.Type}
+		}
+	}
+	base.Fatalf("couldn't find field %s in %v", name, c.typ)
+	return Cursor{}
+}
+
+type ArrayCursor struct {
+	c Cursor // cursor pointing at first element
+	n int    // number of elements
+}
+
+// NewArrayCursor returns a cursor starting at lsym+off and having n copies of type t.
+func NewArrayCursor(lsym *obj.LSym, off int64, t *types.Type, n int) ArrayCursor {
+	return ArrayCursor{
+		c: NewCursor(lsym, off, t),
+		n: n,
+	}
+}
+
+// Elem selects element i of the array pointed to by c.
+func (a ArrayCursor) Elem(i int) Cursor {
+	if i < 0 || i >= a.n {
+		base.Fatalf("element index %d out of range [0:%d]", i, a.n)
+	}
+	return Cursor{lsym: a.c.lsym, offset: a.c.offset + int64(i)*a.c.typ.Size(), typ: a.c.typ}
+}
+
+// ModifyArray converts a cursor pointing at a type [k]T to a cursor pointing
+// at a type [n]T.
+// Also returns the size delta, aka (n-k)*sizeof(T).
+func (c Cursor) ModifyArray(n int) (ArrayCursor, int64) {
+	if c.typ.Kind() != types.TARRAY {
+		base.Fatalf("can't call ModifyArray on non-array %v", c.typ)
+	}
+	k := c.typ.NumElem()
+	return ArrayCursor{c: Cursor{lsym: c.lsym, offset: c.offset, typ: c.typ.Elem()}, n: n}, (int64(n) - k) * c.typ.Elem().Size()
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 5f9b85f..aac6873 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -82,8 +82,8 @@
 (Ctz32 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
 (Ctz64 <t> x) && buildcfg.GOAMD64 <  3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
 (Ctz32 x)     && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
-(Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x))
-(Ctz8  x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [1<<16] x))
+(Ctz8  x) => (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
 
 (Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
 (Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
@@ -172,6 +172,20 @@
 
 (Round(32|64)F ...) => (Copy ...)
 
+// Floating-point min is tricky, as the hardware op isn't right for various special
+// cases (-0 and NaN). We use two hardware ops organized just right to make the
+// result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207
+// (although that comment isn't exactly right, as the value overwritten is not simulated correctly).
+//    t1 = MINSD x, y   => incorrect if x==NaN or x==-0,y==+0
+//    t2 = MINSD t1, x  => fixes x==NaN case
+//   res = POR t1, t2   => fixes x==-0,y==+0 case
+// Note that this trick depends on the special property that (NaN OR x) produces a NaN (although
+// it might not produce the same NaN as the input).
+(Min(64|32)F <t> x y) => (POR (MINS(D|S) <t> (MINS(D|S) <t> x y) x) (MINS(D|S) <t> x y))
+// Floating-point max is even trickier. Punt to using min instead.
+// max(x,y) == -min(-x,-y)
+(Max(64|32)F <t> x y) => (Neg(64|32)F <t> (Min(64|32)F <t> (Neg(64|32)F <t> x) (Neg(64|32)F <t> y)))
+
 (CvtBoolToUint8 ...) => (Copy ...)
 
 // Lowering shifts
@@ -289,10 +303,13 @@
 (Move [10] dst src mem) =>
 	(MOVWstore [8] dst (MOVWload [8] src mem)
 		(MOVQstore dst (MOVQload src mem) mem))
+(Move [11] dst src mem) =>
+	(MOVLstore [7] dst (MOVLload [7] src mem)
+		(MOVQstore dst (MOVQload src mem) mem))
 (Move [12] dst src mem) =>
 	(MOVLstore [8] dst (MOVLload [8] src mem)
 		(MOVQstore dst (MOVQload src mem) mem))
-(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 =>
+(Move [s] dst src mem) && s >= 13 && s <= 15 =>
 	(MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
 		(MOVQstore dst (MOVQload src mem) mem))
 
@@ -366,7 +383,23 @@
 			(MOVQstoreconst [makeValAndOff(0,8)] destptr
 				(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
 
-(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
+(Zero [9] destptr mem) && config.useSSE =>
+	(MOVBstoreconst [makeValAndOff(0,8)] destptr
+		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [10] destptr mem) && config.useSSE =>
+	(MOVWstoreconst [makeValAndOff(0,8)] destptr
+		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [11] destptr mem) && config.useSSE =>
+	(MOVLstoreconst [makeValAndOff(0,7)] destptr
+		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [12] destptr mem) && config.useSSE =>
+	(MOVLstoreconst [makeValAndOff(0,8)] destptr
+		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [s] destptr mem) && s > 12 && s < 16 && config.useSSE =>
 	(MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
 		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
 
@@ -645,29 +678,16 @@
 // Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
 (OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
 (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
-
-// Convert ORconst into BTS, if the code gets smaller, with boundary being
-// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
-((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
-    => (BT(S|C)Qconst [int8(log32(c))] x)
-((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
-    => (BT(S|C)Lconst [int8(log32(c))] x)
-((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
-    => (BT(S|C)Qconst [int8(log64(c))] x)
-((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
-    => (BT(S|C)Lconst [int8(log32(c))] x)
+// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
+// the constant field of the OR/XOR instruction. See issue 61694.
+((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x)
 
 // Recognize bit clearing: a &^= 1<<b
 (AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
 (ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
-(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
-    => (BTRQconst [int8(log32(^c))] x)
-(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
-    => (BTRLconst [int8(log32(^c))] x)
-(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
-    => (BTRQconst [int8(log64(^c))] x)
-(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
-    => (BTRLconst [int8(log32(^c))] x)
+// Note: only convert AND to BTR if the constant wouldn't fit in
+// the constant field of the AND instruction. See issue 61694.
+(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x)
 
 // Special-case bit patterns on first/last bit.
 // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
@@ -681,9 +701,9 @@
 
 // Special case resetting first/last bit
 (SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
-	=> (BTR(L|Q)const [0] x)
+	=> (AND(L|Q)const [-2] x)
 (SHRLconst [1] (SHLLconst [1] x))
-	=> (BTRLconst [31] x)
+	=> (ANDLconst [0x7fffffff] x)
 (SHRQconst [1] (SHLQconst [1] x))
 	=> (BTRQconst [63] x)
 
@@ -717,10 +737,10 @@
     => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
 
 // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
-(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
-(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
-(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
-(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
+(BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x)
+(BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x)
+(BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x)
+(BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x)
 
 // Fold boolean negation into SETcc.
 (XORLconst [1] (SETNE x)) => (SETEQ x)
@@ -764,31 +784,6 @@
 (XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
 (OR(L|Q)const  [c] (OR(L|Q)const  [d] x)) => (OR(L|Q)const  [c | d] x)
 
-(BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x)
-(ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x)
-(BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
-
-(BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x)
-(XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x)
-(BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
-
-(BTSLconst [c] (ORLconst  [d] x)) => (ORLconst [d | 1<<uint32(c)] x)
-(ORLconst  [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x)
-(BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
-
-(BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c)))     => (ANDQconst [d &^ (1<<uint32(c))] x)
-(ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d)))     => (ANDQconst [c &^ (1<<uint32(d))] x)
-(BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
-
-(BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c))     => (XORQconst [d ^ 1<<uint32(c)] x)
-(XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d))     => (XORQconst [c ^ 1<<uint32(d)] x)
-(BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
-
-(BTSQconst [c] (ORQconst  [d] x)) && is32Bit(int64(d) | 1<<uint32(c))     => (ORQconst [d | 1<<uint32(c)] x)
-(ORQconst  [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d))     => (ORQconst [c | 1<<uint32(d)] x)
-(BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
-
-
 (MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
 (MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
 
@@ -1408,11 +1403,8 @@
 (NOTQ (MOVQconst [c])) => (MOVQconst [^c])
 (NOTL (MOVLconst [c])) => (MOVLconst [^c])
 (BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
-(BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))])
 (BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
-(BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))])
 (BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
-(BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))])
 
 // If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
 // but we can still constant-fold.
@@ -1476,6 +1468,7 @@
   && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
   && a.Val() == 0
   && c.Val() == 0
+  && setPos(v, x.Pos)
   && clobber(x)
   => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
 (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
@@ -1484,39 +1477,10 @@
   && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
   && a.Val() == 0
   && c.Val() == 0
+  && setPos(v, x.Pos)
   && clobber(x)
   => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
 
-(MOVBstore [i] {s} p
-  x1:(MOVBload [j] {s2} p2 mem)
-    mem2:(MOVBstore [i-1] {s} p
-      x2:(MOVBload [j-1] {s2} p2 mem) mem))
-  && x1.Uses == 1
-  && x2.Uses == 1
-  && mem2.Uses == 1
-  && clobber(x1, x2, mem2)
-  => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
-
-(MOVWstore [i] {s} p
-  x1:(MOVWload [j] {s2} p2 mem)
-    mem2:(MOVWstore [i-2] {s} p
-      x2:(MOVWload [j-2] {s2} p2 mem) mem))
-  && x1.Uses == 1
-  && x2.Uses == 1
-  && mem2.Uses == 1
-  && clobber(x1, x2, mem2)
-  => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
-
-(MOVLstore [i] {s} p
-  x1:(MOVLload [j] {s2} p2 mem)
-    mem2:(MOVLstore [i-4] {s} p
-      x2:(MOVLload [j-4] {s2} p2 mem) mem))
-  && x1.Uses == 1
-  && x2.Uses == 1
-  && mem2.Uses == 1
-  && clobber(x1, x2, mem2)
-  => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
-
 // Merge load and op
 // TODO: add indexed variants?
 ((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
@@ -1529,6 +1493,8 @@
 (MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
 (MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
 	((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) =>
+	(BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
 
 // Merge ADDQconst and LEAQ into atomic loads.
 (MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
index d8d0225..6061719 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
@@ -399,12 +399,27 @@
 		{name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true},                   // set bit arg1%64 in arg0
 		{name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"},                         // test whether bit auxint in arg0 is set, 0 <= auxint < 32
 		{name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"},                         // test whether bit auxint in arg0 is set, 0 <= auxint < 64
-		{name: "BTCLconst", argLength: 1, reg: gp11, asm: "BTCL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 32
-		{name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 64
-		{name: "BTRLconst", argLength: 1, reg: gp11, asm: "BTRL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 32
-		{name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 64
-		{name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32
-		{name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64
+		{name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 31 <= auxint < 64
+		{name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 31 <= auxint < 64
+		{name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 31 <= auxint < 64
+
+		// BT[SRC]Qconstmodify
+		//
+		//  S: set bit
+		//  R: reset (clear) bit
+		//  C: complement bit
+		//
+		// Apply operation to bit ValAndOff(AuxInt).Val() in the 64 bits at
+		// memory address arg0+ValAndOff(AuxInt).Off()+aux
+		// Bit index must be in range (31-63).
+		// (We use OR/AND/XOR for thinner targets and lower bit indexes.)
+		// arg1=mem, returns mem
+		//
+		// Note that there aren't non-const versions of these instructions.
+		// Well, there are such instructions, but they are slow and weird so we don't use them.
+		{name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+		{name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+		{name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
 
 		// TESTx: compare (arg0 & arg1) to 0
 		{name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"},
@@ -681,6 +696,12 @@
 		// Any use must be preceded by a successful check of runtime.support_fma.
 		{name: "VFMADD231SD", argLength: 3, reg: fp31, resultInArg0: true, asm: "VFMADD231SD"},
 
+		// Note that these operations don't exactly match the semantics of Go's
+		// builtin min. In particular, these aren't commutative, because on various
+		// special cases the 2nd argument is preferred.
+		{name: "MINSD", argLength: 2, reg: fp21, resultInArg0: true, asm: "MINSD"}, // min(arg0,arg1)
+		{name: "MINSS", argLength: 2, reg: fp21, resultInArg0: true, asm: "MINSS"}, // min(arg0,arg1)
+
 		{name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear.
 		{name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
 		// Note: SBBW and SBBB are subsumed by SBBL
@@ -697,16 +718,27 @@
 		{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
 		{name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"},  // extract if overflow flag is set from arg0
 		// Variants that store result to memory
-		{name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
-		{name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},               // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},               // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},               // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},               // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},               // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},               // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+		{name: "SETEQstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETEQ", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract == condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETNEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETNE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract != condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETLstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"},  // extract signed < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETLEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETGstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"},  // extract signed > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETGEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETBstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"},  // extract unsigned < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETBEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETAstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETHI", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"},  // extract unsigned > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+		{name: "SETAEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCC", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+
 		// Need different opcodes for floating point conditions because
 		// any comparison involving a NaN is always FALSE and thus
 		// the patterns for inverting conditions cannot be used.
@@ -746,7 +778,8 @@
 		{name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg
 		{name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"},  // move 32 bits from float to int reg, zero extend
 
-		{name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+		{name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs (for float negation).
+		{name: "POR", argLength: 2, reg: fp21, asm: "POR", commutative: true, resultInArg0: true},   // inclusive or, applied to X regs (for float min/max).
 
 		{name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
 		{name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules
index a60afb0..ed0ed80 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules
@@ -66,17 +66,17 @@
 
 // count trailing zero for ARMv5 and ARMv6
 // 32 - CLZ(x&-x - 1)
-(Ctz32 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz32 <t> x) && buildcfg.GOARM.Version<=6 =>
 	(RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
-(Ctz16 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz16 <t> x) && buildcfg.GOARM.Version<=6 =>
 	(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
-(Ctz8 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz8 <t> x) && buildcfg.GOARM.Version<=6 =>
 	(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
 
 // count trailing zero for ARMv7
-(Ctz32 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <t> x))
-(Ctz16 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
-(Ctz8 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+(Ctz32 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
 
 // bit length
 (BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
@@ -90,13 +90,13 @@
 // t5 = x right rotate 8 bits  -- (d,   a,   b,   c  )
 // result = t4 ^ t5            -- (d,   c,   b,   a  )
 // using shifted ops this can be done in 4 instructions.
-(Bswap32 <t> x) && buildcfg.GOARM==5 =>
+(Bswap32 <t> x) && buildcfg.GOARM.Version==5 =>
 	(XOR <t>
 		(SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
 		(SRRconst <t> x [8]))
 
 // byte swap for ARMv6 and above
-(Bswap32 x) && buildcfg.GOARM>=6 => (REV x)
+(Bswap32 x) && buildcfg.GOARM.Version>=6 => (REV x)
 
 // boolean ops -- booleans are represented with 0=false, 1=true
 (AndB ...) => (AND ...)
@@ -741,10 +741,10 @@
 (SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
 (ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
 (BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
-(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
-(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
-(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
-(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
 (ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
 (ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
 (ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
@@ -1139,7 +1139,7 @@
 // UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
 // ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
 ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM.Version>=6 => (REV16 x)
 
 // use indexed loads and stores
 (MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
@@ -1209,25 +1209,25 @@
 (BIC x x) => (MOVWconst [0])
 
 (ADD (MUL x y) a) => (MULA x y a)
-(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a)
-(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a)
+(SUB a (MUL x y)) && buildcfg.GOARM.Version == 7 => (MULS x y a)
+(RSB (MUL x y) a) && buildcfg.GOARM.Version == 7 => (MULS x y a)
 
-(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y)
-(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y)
-(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y)
-(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y)
+(NEGF (MULF x y)) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
 (NMULF (NEGF x) y) => (MULF x y)
 (NMULD (NEGD x) y) => (MULD x y)
 
 // the result will overwrite the addend, since they are in the same register
-(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
-(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
-(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
-(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
-(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
-(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
-(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
-(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
+(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
 
 (AND x (MVN y)) => (BIC x y)
 
@@ -1259,8 +1259,8 @@
 (CMPD x (MOVDconst [0])) => (CMPD0 x)
 
 // bit extraction
-(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
-(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
 
 // comparison simplification
 ((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
index 8cf6f67..c5ee028 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -61,6 +61,9 @@
 
 (Sqrt32 ...) => (FSQRTS ...)
 
+(Min(64|32)F ...) => (FMIN(D|S) ...)
+(Max(64|32)F ...) => (FMAX(D|S) ...)
+
 // lowering rotates
 // we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
 (RotateLeft8  <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
@@ -482,8 +485,8 @@
 			(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
 				(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
 
-(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) && x.Uses == 1 && clobber(x) => (MOVQstorezero {s} [i] ptr mem)
-(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) && x.Uses == 1 && clobber(x) => (MOVQstorezero {s} [i-8] ptr mem)
+(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i] ptr mem)
+(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i-8] ptr mem)
 
 // strip off fractional word move
 (Move [s] dst src mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
@@ -1184,7 +1187,7 @@
 
 // mul-neg => mneg
 (NEG  (MUL  x y)) => (MNEG  x y)
-(NEG  (MULW x y)) => (MNEGW x y)
+(NEG  (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
 (MUL  (NEG  x) y) => (MNEG  x y)
 (MULW (NEG  x) y) => (MNEGW x y)
 
@@ -1194,10 +1197,10 @@
 (ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
 (SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
 
-(ADD a l:(MULW  x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
-(SUB a l:(MULW  x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
-(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
-(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(ADD a l:(MULW  x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(SUB a l:(MULW  x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
 
 // optimize ADCSflags, SBCSflags and friends
 (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
@@ -1217,16 +1220,16 @@
 (MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
 (MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
 
-(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
+(MULW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (NEG <x.Type> x))
 (MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
-(MULW x (MOVDconst [c])) && int32(c)==1 => x
-(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
-(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
-(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
-(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
-(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
-(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+(MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SLLconst <x.Type> [log64(c)] x))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1])))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
 
 // mneg by constant
 (MNEG x (MOVDconst [-1])) => x
@@ -1241,16 +1244,16 @@
 (MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
 
 
-(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
+(MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
 (MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
-(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
+(MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
 (MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
-(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
-(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
-(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
-(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
 
 
 (MADD a x (MOVDconst [-1])) => (SUB a x)
@@ -1275,27 +1278,27 @@
 (MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
 (MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
 
-(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
-(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
-(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
+(MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 
-(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
-(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
-(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 
 (MSUB a x (MOVDconst [-1])) => (ADD a x)
 (MSUB a _ (MOVDconst [0])) => a
@@ -1319,33 +1322,33 @@
 (MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
 (MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
 
-(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
-(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
-(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 
-(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
-(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
-(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
-(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
-(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
-(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
-(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 
 // div by constant
 (UDIV  x (MOVDconst [1])) => x
 (UDIV  x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
-(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
-(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
 (UMOD  _ (MOVDconst [1])) => (MOVDconst [0])
 (UMOD  x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
 (UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
@@ -1401,24 +1404,24 @@
 (SRLconst [c] (MOVDconst [d]))  => (MOVDconst [int64(uint64(d)>>uint64(c))])
 (SRAconst [c] (MOVDconst [d]))  => (MOVDconst [d>>uint64(c)])
 (MUL   (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
-(MULW  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)*int32(d))])
 (MNEG  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
-(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-int64(int32(c)*int32(d))])
-(MADD  (MOVDconst [c]) x y) => (ADDconst [c] (MUL   <x.Type> x y))
-(MADDW (MOVDconst [c]) x y) => (ADDconst [c] (MULW  <x.Type> x y))
-(MSUB  (MOVDconst [c]) x y) => (ADDconst [c] (MNEG  <x.Type> x y))
-(MSUBW (MOVDconst [c]) x y) => (ADDconst [c] (MNEGW <x.Type> x y))
+(MULW  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
+(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
+(MADD  (MOVDconst [c]) x y) => (ADDconst [c] (MUL  <x.Type> x y))
+(MSUB  (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
 (MADD  a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
-(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [int64(int32(c)*int32(d))] a)
 (MSUB  a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
-(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [int64(int32(c)*int32(d))] a)
+(MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW  <x.Type> x y)))
+(MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
 (DIV   (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
 (UDIV  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
-(DIVW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)/int32(d))])
+(DIVW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
 (UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
 (MOD   (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
 (UMOD  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
-(MODW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)%int32(d))])
+(MODW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
 (UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
 (ANDconst [c] (MOVDconst [d]))  => (MOVDconst [c&d])
 (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
@@ -1566,13 +1569,26 @@
 (LessEqualF       (InvertFlags x)) => (GreaterEqualF x)
 (GreaterThanF     (InvertFlags x)) => (LessThanF x)
 (GreaterEqualF    (InvertFlags x)) => (LessEqualF x)
-(LessThanNoov     (InvertFlags x)) => (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
-(GreaterEqualNoov (InvertFlags x)) => (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
+(LessThanNoov     (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
+(GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
 
 // Boolean-generating instructions (NOTE: NOT all boolean Values) always
 // zero upper bit of the register; no need to zero-extend
 (MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
 
+// Don't bother extending if we're not using the higher bits.
+(MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
+(MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
+(MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
+
+// omit unsign extension
+(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
+
+// omit sign extension
+(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
+(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
+(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
+
 // absorb flag constants into conditional instructions
 (CSEL  [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
 (CSEL  [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index 2853e62..5a98aa0 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -13,6 +13,7 @@
 //  - *const instructions may use a constant larger than the instruction can encode.
 //    In this case the assembler expands to multiple instructions and uses tmp
 //    register (R27).
+//  - All 32-bit Ops will zero the upper 32 bits of the destination register.
 
 // Suffixes encode the bit width of various instructions.
 // D (double word) = 64 bit
@@ -195,9 +196,9 @@
 		{name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true},                                      // arg0 * arg1, signed, 32-bit mult results in 64-bit
 		{name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true},                                     // arg0 * arg1, unsigned, 32-bit mult results in 64-bit
 		{name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"},                                                           // arg0 / arg1, signed
-		{name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"},                                                          // arg0 / arg1, unsighed
+		{name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"},                                                          // arg0 / arg1, unsigned
 		{name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"},                                                         // arg0 / arg1, signed, 32 bit
-		{name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"},                                                        // arg0 / arg1, unsighed, 32 bit
+		{name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"},                                                        // arg0 / arg1, unsigned, 32 bit
 		{name: "MOD", argLength: 2, reg: gp21, asm: "REM"},                                                            // arg0 % arg1, signed
 		{name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"},                                                          // arg0 % arg1, unsigned
 		{name: "MODW", argLength: 2, reg: gp21, asm: "REMW"},                                                          // arg0 % arg1, signed, 32 bit
@@ -234,6 +235,10 @@
 		{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"},                                // -arg0, float64
 		{name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"},                              // sqrt(arg0), float64
 		{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"},                              // sqrt(arg0), float32
+		{name: "FMIND", argLength: 2, reg: fp21, asm: "FMIND"},                                // min(arg0, arg1)
+		{name: "FMINS", argLength: 2, reg: fp21, asm: "FMINS"},                                // min(arg0, arg1)
+		{name: "FMAXD", argLength: 2, reg: fp21, asm: "FMAXD"},                                // max(arg0, arg1)
+		{name: "FMAXS", argLength: 2, reg: fp21, asm: "FMAXS"},                                // max(arg0, arg1)
 		{name: "REV", argLength: 1, reg: gp11, asm: "REV"},                                    // byte reverse, 64-bit
 		{name: "REVW", argLength: 1, reg: gp11, asm: "REVW"},                                  // byte reverse, 32-bit
 		{name: "REV16", argLength: 1, reg: gp11, asm: "REV16"},                                // byte reverse in each 16-bit halfword, 64-bit
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
index 4a47c4c..2af9519 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -416,7 +416,7 @@
 (GetCallerSP ...) => (LoweredGetCallerSP ...)
 (GetCallerPC ...) => (LoweredGetCallerPC ...)
 
-(If cond yes no) => (NE cond yes no)
+(If cond yes no) => (NE (MOVBUreg <typ.UInt64> cond) yes no)
 
 // Write barrier.
 (WB ...) => (LoweredWB ...)
@@ -450,71 +450,37 @@
 (EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
 (NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
 (EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+(MOVBUreg x:((SGT|SGTU) _ _)) => x
 
 // fold offset into address
 (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
 
 // fold address into load/store
-(MOVBload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload  [off1+int32(off2)] {sym} ptr mem)
-(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
-(MOVHload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload  [off1+int32(off2)] {sym} ptr mem)
-(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
-(MOVWload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload  [off1+int32(off2)] {sym} ptr mem)
-(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
-(MOVVload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload  [off1+int32(off2)] {sym} ptr mem)
-(MOVFload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload  [off1+int32(off2)] {sym} ptr mem)
-(MOVDload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload  [off1+int32(off2)] {sym} ptr mem)
+// Do not fold global variable access in -dynlink mode, where it will be rewritten
+// to use the GOT via REGTMP, which currently cannot handle large offset.
+(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+	(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
 
-(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
-(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
-(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
-(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
-(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+	(MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
 
-(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+	(MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem)
 
-(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
-(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
-	(MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+	(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+	(MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+
+(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+	(MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 
 (LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
 (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index 23f20fd..3fbf5be 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -123,17 +123,17 @@
 
 	// Common individual register masks
 	var (
-		gp         = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R21-unused, R22 is g, R30 is REGTMP
+		gp         = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R22 is g, R30 is REGTMP
 		gpg        = gp | buildReg("g")
 		gpsp       = gp | buildReg("SP")
 		gpspg      = gpg | buildReg("SP")
 		gpspsbg    = gpspg | buildReg("SB")
 		fp         = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
 		callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
-		r1         = buildReg("R19")
-		r2         = buildReg("R18")
-		r3         = buildReg("R17")
-		r4         = buildReg("R4")
+		r1         = buildReg("R20")
+		r2         = buildReg("R21")
+		r3         = buildReg("R23")
+		r4         = buildReg("R24")
 	)
 	// Common regInfo
 	var (
@@ -273,31 +273,32 @@
 		{name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"},     // float64 -> float32
 
 		// function calls
-		{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true},                                               // call static function aux.(*obj.LSym).  arg0=mem, auxint=argsize, returns mem
-		{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true},                                 // tail call static function aux.(*obj.LSym).  arg0=mem, auxint=argsize, returns mem
-		{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure.  arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
-		{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true},                         // call fn by pointer.  arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+		{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true},                                               // call static function aux.(*obj.LSym).  last arg=mem, auxint=argsize, returns mem
+		{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true},                                 // tail call static function aux.(*obj.LSym).  last arg=mem, auxint=argsize, returns mem
+		{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure.  arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+		{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true},                         // call fn by pointer.  arg0=codeptr, last arg=mem, auxint=argsize, returns mem
 
 		// duffzero
 		// arg0 = address of memory to zero
 		// arg1 = mem
 		// auxint = offset into duffzero code to start executing
 		// returns mem
-		// R19 aka loong64.REGRT1 changed as side effect
+		// R20 aka loong64.REGRT1 changed as side effect
 		{
 			name:      "DUFFZERO",
 			aux:       "Int64",
 			argLength: 2,
 			reg: regInfo{
-				inputs:   []regMask{gp},
-				clobbers: buildReg("R19 R1"),
+				inputs:   []regMask{buildReg("R20")},
+				clobbers: buildReg("R20 R1"),
 			},
+			typ:            "Mem",
 			faultOnNilArg0: true,
 		},
 
 		// duffcopy
-		// arg0 = address of dst memory (in R20, changed as side effect) REGRT2
-		// arg1 = address of src memory (in R19, changed as side effect) REGRT1
+		// arg0 = address of dst memory (in R21, changed as side effect)
+		// arg1 = address of src memory (in R20, changed as side effect)
 		// arg2 = mem
 		// auxint = offset into duffcopy code to start executing
 		// returns mem
@@ -306,57 +307,56 @@
 			aux:       "Int64",
 			argLength: 3,
 			reg: regInfo{
-				inputs:   []regMask{buildReg("R20"), buildReg("R19")},
-				clobbers: buildReg("R19 R20 R1"),
+				inputs:   []regMask{buildReg("R21"), buildReg("R20")},
+				clobbers: buildReg("R20 R21 R1"),
 			},
+			typ:            "Mem",
 			faultOnNilArg0: true,
 			faultOnNilArg1: true,
 		},
 
 		// large or unaligned zeroing
-		// arg0 = address of memory to zero (in R19, changed as side effect)
+		// arg0 = address of memory to zero (in R20, changed as side effect)
 		// arg1 = address of the last element to zero
 		// arg2 = mem
 		// auxint = alignment
 		// returns mem
-		//	SUBV	$8, R19
-		//	MOVV	R0, 8(R19)
-		//	ADDV	$8, R19
-		//	BNE	Rarg1, R19, -2(PC)
+		//	MOVx	R0, (R20)
+		//	ADDV	$sz, R20
+		//	BGEU	Rarg1, R20, -2(PC)
 		{
 			name:      "LoweredZero",
 			aux:       "Int64",
 			argLength: 3,
 			reg: regInfo{
-				inputs:   []regMask{buildReg("R19"), gp},
-				clobbers: buildReg("R19"),
+				inputs:   []regMask{buildReg("R20"), gp},
+				clobbers: buildReg("R20"),
 			},
-			clobberFlags:   true,
+			typ:            "Mem",
 			faultOnNilArg0: true,
 		},
 
 		// large or unaligned move
-		// arg0 = address of dst memory (in R4, changed as side effect)
-		// arg1 = address of src memory (in R19, changed as side effect)
+		// arg0 = address of dst memory (in R21, changed as side effect)
+		// arg1 = address of src memory (in R20, changed as side effect)
 		// arg2 = address of the last element of src
 		// arg3 = mem
 		// auxint = alignment
 		// returns mem
-		//	SUBV	$8, R19
-		//	MOVV	8(R19), Rtmp
-		//	MOVV	Rtmp, (R4)
-		//	ADDV	$8, R19
-		//	ADDV	$8, R4
-		//	BNE	Rarg2, R19, -4(PC)
+		//	MOVx	(R20), Rtmp
+		//	MOVx	Rtmp, (R21)
+		//	ADDV	$sz, R20
+		//	ADDV	$sz, R21
+		//	BGEU	Rarg2, R20, -4(PC)
 		{
 			name:      "LoweredMove",
 			aux:       "Int64",
 			argLength: 4,
 			reg: regInfo{
-				inputs:   []regMask{buildReg("R4"), buildReg("R19"), gp},
-				clobbers: buildReg("R19 R4"),
+				inputs:   []regMask{buildReg("R21"), buildReg("R20"), gp},
+				clobbers: buildReg("R20 R21"),
 			},
-			clobberFlags:   true,
+			typ:            "Mem",
 			faultOnNilArg0: true,
 			faultOnNilArg1: true,
 		},
@@ -476,8 +476,8 @@
 		blocks:   blocks,
 		regnames: regNamesLOONG64,
 		// TODO: support register ABI on loong64
-		ParamIntRegNames:   "R4 R5 R6 R7 R8 R9 R10 R11",
-		ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7",
+		ParamIntRegNames:   "R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19",
+		ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
 		gpregmask:          gp,
 		fpregmask:          fp,
 		framepointerreg:    -1, // not used
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
index 4628e2a..cabc7c6 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
@@ -38,6 +38,14 @@
 (Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
 (Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
 
+(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
+(Select1 <t> (Add64carry x y c)) =>
+	(OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+
+(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
+(Select1 <t> (Sub64borrow x y c)) =>
+	(OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+
 // math package intrinsics
 (Abs ...) => (ABSD ...)
 
@@ -798,6 +806,10 @@
 (GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
 (GEZ (MOVVconst [c]) yes no) && c <  0 => (First no yes)
 
+// SGT/SGTU with known outcomes.
+(SGT  x x) => (MOVVconst [0])
+(SGTU x x) => (MOVVconst [0])
+
 // fold readonly sym load
 (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
 (MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
index 97e592f..c9cd34b 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -238,6 +238,8 @@
 (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
 (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
 (OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
+(MOVDaddr {sym} [n] p:(ADD x y)) && sym == nil && n == 0 => p
+(MOVDaddr {sym} [n] ptr) && sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) => ptr
 
 // TODO: optimize these cases?
 (Ctz32NonZero ...) => (Ctz32 ...)
@@ -321,10 +323,6 @@
 (NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc)))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
 (NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc)))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
 
-// Elide compares of bit tests
-((EQ|NE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-((EQ|NE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-
 // absorb flag constants into branches
 (EQ (FlagEQ) yes no) => (First yes no)
 (EQ (FlagLT) yes no) => (First no yes)
@@ -405,8 +403,8 @@
 
 
 // Elide compares of bit tests
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
@@ -559,6 +557,7 @@
 (NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
 
 // Discover consts
+(AND x (MOVDconst [-1])) => x
 (AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
 (XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
 (OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
@@ -801,6 +800,7 @@
 (AtomicOr(8|32)   ...) => (LoweredAtomicOr(8|32)   ...)
 
 (Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1  => (SRDconst [63] x)
 
 // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
 // This may interact with other patterns in the future. (Compare with arm64)
@@ -1000,7 +1000,7 @@
 
 // Fold bit reversal into loads.
 (BR(W|H) x:(MOV(W|H)Zload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
-(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOV(W|H)Zreg (MOV(W|H)BRloadidx ptr idx mem))
+(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
 (BRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
 (BRD x:(MOVDloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx ptr idx mem)
 
@@ -1011,7 +1011,7 @@
 // GOPPC64<10 rules.
 // These Bswap operations should only be introduced by the memcombine pass in places where they can be folded into loads or stores.
 (Bswap(32|16) x:(MOV(W|H)Zload [off] {sym} ptr mem)) => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
-(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx      mem)) => @x.Block (MOV(W|H)Zreg (MOV(W|H)BRloadidx ptr idx mem))
+(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx      mem)) => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
 (Bswap64 x:(MOVDload [off] {sym} ptr mem)) => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
 (Bswap64 x:(MOVDloadidx ptr idx      mem)) => @x.Block (MOVDBRloadidx ptr idx mem)
 (MOV(D|W|H)store [off] {sym} ptr (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
index 4be3623..7aa2e6c 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
@@ -176,14 +176,17 @@
 		r6          = buildReg("R6")
 	)
 	ops := []opData{
-		{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true},        // arg0 + arg1
-		{name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"},        // arg0 + auxInt
-		{name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true},      // arg0+arg1
-		{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true},    // arg0+arg1
-		{name: "SUB", argLength: 2, reg: gp21, asm: "SUB"},                           // arg0-arg1
-		{name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (carry is ignored)
-		{name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"},                         // arg0-arg1
-		{name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"},                       // arg0-arg1
+		{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true},                              // arg0 + arg1
+		{name: "ADDCC", argLength: 2, reg: gp21, asm: "ADDCC", commutative: true, typ: "(Int,Flags)"},      // arg0 + arg1
+		{name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"},                              // arg0 + auxInt
+		{name: "ADDCCconst", argLength: 1, reg: gp11cxer, asm: "ADDCCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0 + auxInt sets CC, clobbers XER
+		{name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true},                            // arg0+arg1
+		{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true},                          // arg0+arg1
+		{name: "SUB", argLength: 2, reg: gp21, asm: "SUB"},                                                 // arg0-arg1
+		{name: "SUBCC", argLength: 2, reg: gp21, asm: "SUBCC", typ: "(Int,Flags)"},                         // arg0-arg1 sets CC
+		{name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"},                       // auxInt - arg0 (carry is ignored)
+		{name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"},                                               // arg0-arg1
+		{name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"},                                             // arg0-arg1
 
 		{name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
 		{name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
@@ -215,7 +218,6 @@
 		{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
 		// The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA.
 		// The constant shift values are packed into the aux int32.
-		{name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int32"},     // arg0 extract bits identified by shift params"
 		{name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, //
 		{name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, //
 
@@ -243,9 +245,12 @@
 		{name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"},                      // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
 		{name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"},                       // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
 		{name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+		{name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"},                     // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
+		{name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"},                     // Likewise, but only ME and SH are valid. MB is always 0.
 
-		{name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros
-		{name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit)
+		{name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"},                          // count leading zeros
+		{name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC
+		{name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW"},                          // count leading zeros (32 bit)
 
 		{name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros
 		{name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit)
@@ -284,34 +289,37 @@
 		{name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"},   // move 64 bits of F register into G register
 		{name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register
 
-		{name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true},                                               // arg0&arg1
-		{name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"},                                                                // arg0&^arg1
-		{name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, clobberFlags: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC
-		{name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true},                                                 // arg0|arg1
-		{name: "ORN", argLength: 2, reg: gp21, asm: "ORN"},                                                                  // arg0|^arg1
-		{name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, clobberFlags: true, typ: "(Int,Flags)"},     // arg0|arg1 sets CC
-		{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true},                                               // ^(arg0|arg1)
-		{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true},                                 // arg0^arg1
-		{name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, clobberFlags: true, typ: "(Int,Flags)"},   // arg0^arg1 sets CC
-		{name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true},                                 // arg0^^arg1
-		{name: "NEG", argLength: 1, reg: gp11, asm: "NEG"},                                                                  // -arg0 (integer)
-		{name: "BRD", argLength: 1, reg: gp11, asm: "BRD"},                                                                  // reversebytes64(arg0)
-		{name: "BRW", argLength: 1, reg: gp11, asm: "BRW"},                                                                  // reversebytes32(arg0)
-		{name: "BRH", argLength: 1, reg: gp11, asm: "BRH"},                                                                  // reversebytes16(arg0)
-		{name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"},                                                                // -arg0 (floating point)
-		{name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"},                                                              // sqrt(arg0) (floating point)
-		{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"},                                                            // sqrt(arg0) (floating point, single precision)
-		{name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"},                                                              // floor(arg0), float64
-		{name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"},                                                               // ceil(arg0), float64
-		{name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"},                                                              // trunc(arg0), float64
-		{name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"},                                                              // round(arg0), float64
-		{name: "FABS", argLength: 1, reg: fp11, asm: "FABS"},                                                                // abs(arg0), float64
-		{name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"},                                                              // -abs(arg0), float64
-		{name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"},                                                            // copysign arg0 -> arg1, float64
+		{name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true},                           // arg0&arg1
+		{name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"},                                            // arg0&^arg1
+		{name: "ANDNCC", argLength: 2, reg: gp21, asm: "ANDNCC", typ: "(Int64,Flags)"},                  // arg0&^arg1 sets CC
+		{name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC
+		{name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true},                             // arg0|arg1
+		{name: "ORN", argLength: 2, reg: gp21, asm: "ORN"},                                              // arg0|^arg1
+		{name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, typ: "(Int,Flags)"},     // arg0|arg1 sets CC
+		{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true},                           // ^(arg0|arg1)
+		{name: "NORCC", argLength: 2, reg: gp21, asm: "NORCC", commutative: true, typ: "(Int,Flags)"},   // ^(arg0|arg1) sets CC
+		{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true},             // arg0^arg1
+		{name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, typ: "(Int,Flags)"},   // arg0^arg1 sets CC
+		{name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true},             // arg0^^arg1
+		{name: "NEG", argLength: 1, reg: gp11, asm: "NEG"},                                              // -arg0 (integer)
+		{name: "NEGCC", argLength: 1, reg: gp11, asm: "NEGCC", typ: "(Int,Flags)"},                      // -arg0 (integer) sets CC
+		{name: "BRD", argLength: 1, reg: gp11, asm: "BRD"},                                              // reversebytes64(arg0)
+		{name: "BRW", argLength: 1, reg: gp11, asm: "BRW"},                                              // reversebytes32(arg0)
+		{name: "BRH", argLength: 1, reg: gp11, asm: "BRH"},                                              // reversebytes16(arg0)
+		{name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"},                                            // -arg0 (floating point)
+		{name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"},                                          // sqrt(arg0) (floating point)
+		{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"},                                        // sqrt(arg0) (floating point, single precision)
+		{name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"},                                          // floor(arg0), float64
+		{name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"},                                           // ceil(arg0), float64
+		{name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"},                                          // trunc(arg0), float64
+		{name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"},                                          // round(arg0), float64
+		{name: "FABS", argLength: 1, reg: fp11, asm: "FABS"},                                            // abs(arg0), float64
+		{name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"},                                          // -abs(arg0), float64
+		{name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"},                                        // copysign arg0 -> arg1, float64
 
-		{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"},   // arg0|aux
-		{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
-		{name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true, typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+		{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"},                                                                                       // arg0|aux
+		{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"},                                                                                     // arg0^aux
+		{name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
 
 		{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"},   // sign extend int8 to int64
 		{name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
@@ -469,7 +477,7 @@
 		//	MOVD	$16,R31
 		//	loop:
 		//	STXVD2X VS32,(R0)(R3)
-		//	STXVD2X	VS32,(R31),R3)
+		//	STXVD2X	VS32,(R31)(R3)
 		//	ADD	R3,32
 		//	BC	loop
 
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
index 00d898f..2eecf94 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
@@ -17,3 +17,39 @@
 (SETBCR [0] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [4] (MOVDconst [1]) cmp)
 (SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp)
 (SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp)
+
+// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst
+// always sets it.
+(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+// The upper bits of the smaller than register values is undefined. Take advantage of that.
+(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n))
+
+// Convert simple bit masks to an equivalent rldic[lr] if possible.
+(AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n)
+(AND x:(MOVDconst [m]) n) && m != 0 && isPPC64ValidShiftMask(^m) => (RLDICR [encodePPC64RotateMask(0,m,64)] n)
+
+// If the RLDICL does not rotate its value, a shifted value can be merged.
+(RLDICL [em] x:(SRDconst [s] a)) && (em&0xFF0000) == 0 => (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a)
+
+// Convert rotated 32 bit masks on 32 bit values into rlwinm. In general, this leaves the upper 32 bits in an undefined state.
+(AND <t> x:(MOVDconst [m]) n) && t.Size() == 4 && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(0,m,32)] n)
+
+// When PCRel is supported, paddi can add a 34b signed constant in one instruction.
+(ADD (MOVDconst [m]) x) && supportsPPC64PCRel() && (m<<30)>>30 == m => (ADDconst [m] x)
+
+
+// Where possible and practical, generate CC opcodes. Due to the structure of the rules, there are limits to how
+// a Value can be rewritten which make it impossible to correctly rewrite sibling Value users. To workaround this
+// case, candidates for CC opcodes are converted in two steps:
+//   1. Convert all (x (Op ...) ...) into (x (Select0 (OpCC ...) ...). See convertPPC64OpToOpCC for more
+//      detail on how and why this is done there.
+//   2. Rewrite (CMPconst [0] (Select0 (OpCC ...))) into (Select1 (OpCC...))
+// Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if
+//       both ops are in the same block.
+(CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+// Note: ADDCCconst only assembles to 1 instruction for int16 constants.
+(CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+// And finally, fixup the flag user.
+(CMPconst <t> [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 <t> z)
+(CMPconst <t> [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 <t> z)
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index 9a6fceb..fc206c4 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -3,21 +3,11 @@
 // license that can be found in the LICENSE file.
 
 // Lowering arithmetic
-(Add64 ...) => (ADD ...)
-(AddPtr ...) => (ADD ...)
-(Add32 ...) => (ADD ...)
-(Add16 ...) => (ADD ...)
-(Add8 ...) => (ADD ...)
-(Add32F ...) => (FADDS ...)
-(Add64F ...) => (FADDD ...)
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(64|32)F ...) => (FADD(D|S) ...)
 
-(Sub64 ...) => (SUB ...)
-(SubPtr ...) => (SUB ...)
-(Sub32 ...) => (SUB ...)
-(Sub16 ...) => (SUB ...)
-(Sub8 ...) => (SUB ...)
-(Sub32F ...) => (FSUBS ...)
-(Sub64F ...) => (FSUBD ...)
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(64|32)F ...) => (FSUB(D|S) ...)
 
 (Mul64 ...) => (MUL  ...)
 (Mul64uhilo ...) => (LoweredMuluhilo ...)
@@ -25,11 +15,9 @@
 (Mul32 ...) => (MULW ...)
 (Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
 (Mul8 x y)  => (MULW (SignExt8to32 x)  (SignExt8to32 y))
-(Mul32F ...) => (FMULS ...)
-(Mul64F ...) => (FMULD ...)
+(Mul(64|32)F ...) => (FMUL(D|S) ...)
 
-(Div32F ...) => (FDIVS ...)
-(Div64F ...) => (FDIVD ...)
+(Div(64|32)F ...) => (FDIV(D|S) ...)
 
 (Div64 x y [false])  => (DIV x y)
 (Div64u ...) => (DIVU ...)
@@ -65,32 +53,15 @@
 (Mod8 x y)   => (REMW  (SignExt8to32  x) (SignExt8to32  y))
 (Mod8u x y)  => (REMUW (ZeroExt8to32  x) (ZeroExt8to32  y))
 
-(And64 ...) => (AND ...)
-(And32 ...) => (AND ...)
-(And16 ...) => (AND ...)
-(And8  ...) => (AND ...)
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
 
-(Or64 ...) => (OR ...)
-(Or32 ...) => (OR ...)
-(Or16 ...) => (OR ...)
-(Or8  ...) => (OR ...)
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(64|32)F ...) => (FNEG(D|S) ...)
 
-(Xor64 ...) => (XOR ...)
-(Xor32 ...) => (XOR ...)
-(Xor16 ...) => (XOR ...)
-(Xor8  ...) => (XOR ...)
+(Com(64|32|16|8) ...) => (NOT ...)
 
-(Neg64  ...) => (NEG ...)
-(Neg32  ...) => (NEG ...)
-(Neg16  ...) => (NEG ...)
-(Neg8   ...) => (NEG ...)
-(Neg32F ...) => (FNEGS ...)
-(Neg64F ...) => (FNEGD ...)
-
-(Com64 ...) => (NOT ...)
-(Com32 ...) => (NOT ...)
-(Com16 ...) => (NOT ...)
-(Com8  ...) => (NOT ...)
 
 (Sqrt ...) => (FSQRTD ...)
 (Sqrt32 ...) => (FSQRTS ...)
@@ -132,8 +103,7 @@
 
 (CvtBoolToUint8 ...) => (Copy ...)
 
-(Round32F ...) => (Copy ...)
-(Round64F ...) => (Copy ...)
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
 
 (Slicemask <t> x) => (SRAI [63] (NEG <t> x))
 
@@ -180,61 +150,65 @@
 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
 
-// SRL only considers the bottom 6 bits of y. If y > 64, the result should
-// always be 0. See Lsh above for a detailed description.
-(Rsh8Ux8   <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
-(Rsh8Ux16  <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh8Ux32  <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh8Ux64  <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] y)))
-(Rsh16Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
-(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Rsh32Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
-(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
-(Rsh64Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x                 y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
-(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x                 y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x                 y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x                 y) (Neg64 <t> (SLTIU <t> [64] y)))
+// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
+// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
+// the maximum value. See Lsh above for a detailed description.
+(Rsh8Ux8   <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
+(Rsh8Ux16  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
+(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64  y))))
+(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] y)))
+(Rsh64Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
+(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] y)))
 
-(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64  x) y)
-(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
-(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y)
-(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x                 y)
+(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRL  (ZeroExt8to64  x) y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  (ZeroExt16to64 x) y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x                 y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  x                 y)
 
-// SRA only considers the bottom 6 bits of y. If y > 64, the result should
-// be either 0 or -1 based on the sign bit.
+// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
+// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
+// depending on the instruction),  the result of the shift should be either 0
+// or -1 based on the sign bit of x.
 //
-// We implement this by performing the max shift (-1) if y >= 64.
+// We implement this by performing the max shift (-1) if y > the maximum value.
 //
 // We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
-// us with -1 (0xffff...) if y >= 64.
+// us with -1 (0xffff...) if y >= 64.  Similarly, we OR (uint64(y < 32) - 1) into y
+// before passing it to SRAW.
 //
 // We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
-// more than the 6 bits SRA cares about.
-(Rsh8x8   <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
-(Rsh8x16  <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh8x32  <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh8x64  <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh16x8  <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
-(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh32x8  <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
-(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh64x8  <t> x y) && !shiftIsBounded(v) => (SRA <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
-(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+// more than the 5 or 6 bits SRAW and SRA care about.
+(Rsh8x8   <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
+(Rsh8x16  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
+(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8  <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64  y)))))
+(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
+(Rsh64x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
+(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
 
-(Rsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRA (SignExt8to64  x) y)
-(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
-(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y)
-(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x                 y)
+(Rsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRA  (SignExt8to64  x) y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA  (SignExt16to64 x) y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW  x                y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA   x                y)
 
 // Rotates.
 (RotateLeft8  <t> x (MOVDconst [c])) => (Or8  (Lsh8x64  <t> x (MOVDconst [c&7]))  (Rsh8Ux64  <t> x (MOVDconst [-c&7])))
@@ -250,36 +224,27 @@
 (Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
 (Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
 (Less8U  x y) => (SLTU (ZeroExt8to64  x) (ZeroExt8to64  y))
-(Less64F ...) => (FLTD ...)
-(Less32F ...) => (FLTS ...)
+(Less(64|32)F ...) => (FLT(D|S) ...)
 
 // Convert x <= y to !(y > x).
-(Leq64  x y) => (Not (Less64  y x))
-(Leq32  x y) => (Not (Less32  y x))
-(Leq16  x y) => (Not (Less16  y x))
-(Leq8   x y) => (Not (Less8   y x))
-(Leq64U x y) => (Not (Less64U y x))
-(Leq32U x y) => (Not (Less32U y x))
-(Leq16U x y) => (Not (Less16U y x))
-(Leq8U  x y) => (Not (Less8U  y x))
-(Leq64F ...) => (FLED ...)
-(Leq32F ...) => (FLES ...)
+(Leq(64|32|16|8)  x y) => (Not (Less(64|32|16|8)  y x))
+(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
+(Leq(64|32)F ...) => (FLE(D|S) ...)
 
 (EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
 (Eq64  x y) => (SEQZ (SUB <x.Type> x y))
-(Eq32  x y) => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq32  x y) &&  x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
+(Eq32  x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
 (Eq16  x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
 (Eq8   x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64  x) (ZeroExt8to64  y)))
-(Eq64F ...) => (FEQD ...)
-(Eq32F ...) => (FEQS ...)
+(Eq(64|32)F ...) => (FEQ(D|S) ...)
 
-(NeqPtr x y) => (SNEZ (SUB <typ.Uintptr> x y))
-(Neq64  x y) => (SNEZ (SUB <x.Type> x y))
-(Neq32  x y) => (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Neq16  x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Neq8   x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64  x) (ZeroExt8to64  y)))
-(Neq64F ...) => (FNED ...)
-(Neq32F ...) => (FNES ...)
+(NeqPtr x y) => (Not (EqPtr x y))
+(Neq64  x y) => (Not (Eq64  x y))
+(Neq32  x y) => (Not (Eq32  x y))
+(Neq16  x y) => (Not (Eq16  x y))
+(Neq8   x y) => (Not (Eq8   x y))
+(Neq(64|32)F ...) => (FNE(D|S) ...)
 
 // Loads
 (Load <t> ptr mem) &&  t.IsBoolean()                   => (MOVBUload ptr mem)
@@ -435,8 +400,6 @@
 		(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
 		mem)
 
-(Convert ...) => (MOVconvert ...)
-
 // Checks
 (IsNonNil ...) => (SNEZ ...)
 (IsInBounds ...) => (Less64U ...)
@@ -451,6 +414,9 @@
 // Write barrier.
 (WB ...) => (LoweredWB ...)
 
+// Publication barrier as intrinsic
+(PubBarrier ...) => (LoweredPubBarrier ...)
+
 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
@@ -537,10 +503,7 @@
 (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
 (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
 
-(Const8  [val]) => (MOVDconst [int64(val)])
-(Const16 [val]) => (MOVDconst [int64(val)])
-(Const32 [val]) => (MOVDconst [int64(val)])
-(Const64 [val]) => (MOVDconst [int64(val)])
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
 (Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
 (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
 (ConstNil) => (MOVDconst [0])
@@ -557,18 +520,9 @@
 (TailCall ...) => (CALLtail ...)
 
 // Atomic Intrinsics
-(AtomicLoad8   ...) => (LoweredAtomicLoad8  ...)
-(AtomicLoad32  ...) => (LoweredAtomicLoad32 ...)
-(AtomicLoad64  ...) => (LoweredAtomicLoad64 ...)
-(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
-
-(AtomicStore8       ...) => (LoweredAtomicStore8  ...)
-(AtomicStore32      ...) => (LoweredAtomicStore32 ...)
-(AtomicStore64      ...) => (LoweredAtomicStore64 ...)
-(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
-
-(AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
-(AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
+(AtomicLoad(Ptr|64|32|8)  ...) => (LoweredAtomicLoad(64|64|32|8) ...)
+(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
+(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
 
 // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
 (AtomicAnd8 ptr val mem) =>
@@ -581,8 +535,7 @@
 (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
 (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
 
-(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
-(AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
+(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
 
 // AtomicOr8(ptr,val)  => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
 (AtomicOr8 ptr val mem) =>
@@ -756,6 +709,20 @@
 // But for now, this is enough to get rid of lots of them.
 (MOVDnop (MOVDconst [c])) => (MOVDconst [c])
 
+// Avoid unnecessary zero and sign extension when right shifting.
+(SRAI <t> [x] (MOVWreg  y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
+(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
+
+// Replace right shifts that exceed size of signed type.
+(SRAI <t> [x] (MOVBreg y)) && x >=  8 => (SRAI  [63] (SLLI <t> [56] y))
+(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI  [63] (SLLI <t> [48] y))
+(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
+
+// Eliminate right shifts that exceed size of unsigned type.
+(SRLI <t> [x] (MOVBUreg y)) && x >=  8 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
+
 // Fold constant into immediate instructions where possible.
 (ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
 (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
@@ -763,7 +730,9 @@
 (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
 (SLL  x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
 (SRL  x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
+(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
 (SRA  x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
+(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
 (SLT  x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI  [val] x)
 (SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
 
@@ -832,6 +801,10 @@
 (Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
 (Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
 
+(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
+(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
+(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
+
 // Merge negation into fused multiply-add and multiply-subtract.
 //
 // Key:
@@ -842,5 +815,7 @@
 //                D B
 //
 // Note: multiplication commutativity handled by rule generator.
+(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
+(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
 (F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
 (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
index 52e87cb..93f20f8 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -207,12 +207,16 @@
 		{name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
 
 		// Shift ops
-		{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"},                 // arg0 << (aux1 & 63)
-		{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"},                 // arg0 >> (aux1 & 63), signed
-		{name: "SRL", argLength: 2, reg: gp21, asm: "SRL"},                 // arg0 >> (aux1 & 63), unsigned
-		{name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
-		{name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63
-		{name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63
+		{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"},                   // arg0 << (aux1 & 63)
+		{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"},                   // arg0 >> (aux1 & 63), signed
+		{name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"},                 // arg0 >> (aux1 & 31), signed
+		{name: "SRL", argLength: 2, reg: gp21, asm: "SRL"},                   // arg0 >> (aux1 & 63), unsigned
+		{name: "SRLW", argLength: 2, reg: gp21, asm: "SRLW"},                 // arg0 >> (aux1 & 31), unsigned
+		{name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"},   // arg0 << auxint, shift amount 0-63
+		{name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"},   // arg0 >> auxint, signed, shift amount 0-63
+		{name: "SRAIW", argLength: 1, reg: gp11, asm: "SRAIW", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-31
+		{name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"},   // arg0 >> auxint, unsigned, shift amount 0-63
+		{name: "SRLIW", argLength: 1, reg: gp11, asm: "SRLIW", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-31
 
 		// Bitwise ops
 		{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1
@@ -231,11 +235,9 @@
 		{name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"},                 // arg0 < arg1, unsigned, result is 0 or 1
 		{name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1
 
-		// MOVconvert converts between pointers and integers.
-		// We have a special op for this so as to not confuse GC
-		// (particularly stack maps). It takes a memory arg so it
-		// gets correctly ordered with respect to GC safepoints.
-		{name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem
+		// Round ops to block fused-multiply-add extraction.
+		{name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true},
+		{name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true},
 
 		// Calls
 		{name: "CALLstatic", argLength: -1, reg: call, aux: "CallOff", call: true},               // call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem
@@ -395,6 +397,9 @@
 		// Returns a pointer to a write barrier buffer in X24.
 		{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"], outputs: []regMask{regNamed["X24"]}}, clobberFlags: true, aux: "Int64"},
 
+		// Do data barrier. arg0=memorys
+		{name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true},
+
 		// There are three of these functions so that they can have three different register inputs.
 		// When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
 		// default registers to match so we don't need to copy registers around unnecessarily.
@@ -407,6 +412,10 @@
 		{name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"},                                          // arg0 - arg1
 		{name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"},                                           // arg0 * arg1
 		{name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"},                                          // arg0 / arg1
+		{name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", commutative: true, typ: "Float32"},                                         // (arg0 * arg1) + arg2
+		{name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", commutative: true, typ: "Float32"},                                         // (arg0 * arg1) - arg2
+		{name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS", commutative: true, typ: "Float32"},                                       // -(arg0 * arg1) + arg2
+		{name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"},                                       // -(arg0 * arg1) - arg2
 		{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"},                                                            // sqrt(arg0)
 		{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"},                                                              // -arg0
 		{name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"},                                                              // reinterpret arg0 as float
diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules
index a9d62c7..2a6d7e7 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules
@@ -1300,21 +1300,25 @@
   && p.Op != OpSB
   && x.Uses == 1
   && is20Bit(int64(i)-4)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STM2 [i-4] {s} p w0 w1 mem)
 (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
   && x.Uses == 1
   && is20Bit(int64(i)-8)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STM3 [i-8] {s} p w0 w1 w2 mem)
 (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
   && x.Uses == 1
   && is20Bit(int64(i)-12)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
 (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
   && x.Uses == 1
   && is20Bit(int64(i)-8)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
 // 64-bit
@@ -1322,21 +1326,25 @@
   && p.Op != OpSB
   && x.Uses == 1
   && is20Bit(int64(i)-8)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STMG2 [i-8] {s} p w0 w1 mem)
 (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
   && x.Uses == 1
   && is20Bit(int64(i)-16)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STMG3 [i-16] {s} p w0 w1 w2 mem)
 (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
   && x.Uses == 1
   && is20Bit(int64(i)-24)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
 (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
   && x.Uses == 1
   && is20Bit(int64(i)-16)
+  && setPos(v, x.Pos)
   && clobber(x)
   => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
 
diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go
index 5c72fe8..5869a61 100644
--- a/src/cmd/compile/internal/ssa/_gen/allocators.go
+++ b/src/cmd/compile/internal/ssa/_gen/allocators.go
@@ -1,3 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package main
 
 // TODO: should we share backing storage for similarly-shaped types?
diff --git a/src/cmd/compile/internal/ssa/_gen/cover.bash b/src/cmd/compile/internal/ssa/_gen/cover.bash
index 7311cfb..733f9db 100755
--- a/src/cmd/compile/internal/ssa/_gen/cover.bash
+++ b/src/cmd/compile/internal/ssa/_gen/cover.bash
@@ -12,7 +12,7 @@
 # regular 'go run .' usage to run the generator.
 
 cat >main_test.go <<-EOF
-	// +build ignore
+	//go:build ignore
 
 	package main
 
diff --git a/src/cmd/compile/internal/ssa/_gen/dec.rules b/src/cmd/compile/internal/ssa/_gen/dec.rules
index b194898..7944947 100644
--- a/src/cmd/compile/internal/ssa/_gen/dec.rules
+++ b/src/cmd/compile/internal/ssa/_gen/dec.rules
@@ -7,6 +7,8 @@
 // types.  These rules work together with the decomposeBuiltIn
 // pass which handles phis of these types.
 
+(Store {t} _ _ mem) && t.Size() == 0 => mem
+
 // complex ops
 (ComplexReal (ComplexMake real _  )) => real
 (ComplexImag (ComplexMake _ imag )) => imag
@@ -91,3 +93,109 @@
     (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst)
     data
     (Store {typ.Uintptr} dst itab mem))
+
+// Helpers for expand calls
+// Some of these are copied from generic.rules
+
+(IMake _typ (StructMake1 val)) => (IMake _typ val)
+(StructSelect [0] (IData x)) => (IData x)
+
+(StructSelect (StructMake1 x)) => x
+(StructSelect [0] (StructMake2 x _)) => x
+(StructSelect [1] (StructMake2 _ x)) => x
+(StructSelect [0] (StructMake3 x _ _)) => x
+(StructSelect [1] (StructMake3 _ x _)) => x
+(StructSelect [2] (StructMake3 _ _ x)) => x
+(StructSelect [0] (StructMake4 x _ _ _)) => x
+(StructSelect [1] (StructMake4 _ x _ _)) => x
+(StructSelect [2] (StructMake4 _ _ x _)) => x
+(StructSelect [3] (StructMake4 _ _ _ x)) => x
+
+// Special case coming from immediate interface rewriting
+// Typical case: (StructSelect [0] (IData (IMake typ dat)) rewrites to (StructSelect [0] dat)
+// but because the interface is immediate, the type of "IData" is a one-element struct containing
+// a pointer that is not the pointer type of dat (can be a *uint8).
+// More annoying case: (ArraySelect[0] (StructSelect[0] isAPtr))
+// There, result of the StructSelect is an Array (not a pointer) and
+// the pre-rewrite input to the ArraySelect is a struct, not a pointer.
+(StructSelect [0] x) && x.Type.IsPtrShaped()  => x
+(ArraySelect [0] x) && x.Type.IsPtrShaped()  => x
+
+// These, too.  Bits is bits.
+(ArrayMake1 x) && x.Type.IsPtrShaped() => x
+(StructMake1 x) && x.Type.IsPtrShaped() => x
+
+(Store dst (StructMake1 <t> f0) mem) =>
+  (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+(Store dst (StructMake2 <t> f0 f1) mem) =>
+  (Store {t.FieldType(1)}
+    (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+    f1
+    (Store {t.FieldType(0)}
+      (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+        f0 mem))
+(Store dst (StructMake3 <t> f0 f1 f2) mem) =>
+  (Store {t.FieldType(2)}
+    (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+    f2
+    (Store {t.FieldType(1)}
+      (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+      f1
+      (Store {t.FieldType(0)}
+        (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+          f0 mem)))
+(Store dst (StructMake4 <t> f0 f1 f2 f3) mem) =>
+  (Store {t.FieldType(3)}
+    (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)
+    f3
+    (Store {t.FieldType(2)}
+      (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+      f2
+      (Store {t.FieldType(1)}
+        (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+        f1
+        (Store {t.FieldType(0)}
+          (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+            f0 mem))))
+
+(ArraySelect (ArrayMake1 x)) => x
+(ArraySelect [0] (IData x)) => (IData x)
+
+(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
+
+// NOTE removed must-not-be-SSA condition.
+(ArraySelect [i] x:(Load <t> ptr mem)) =>
+  @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.Elem().Size()*i] ptr) mem)
+
+(StringPtr x:(Load <t> ptr mem)) && t.IsString() => @x.Block (Load <typ.BytePtr> ptr mem)
+(StringLen x:(Load <t> ptr mem)) && t.IsString() => @x.Block (Load <typ.Int>
+      (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+      mem)
+
+// NOTE removed must-not-be-SSA condition.
+(StructSelect [i] x:(Load <t> ptr mem)) =>
+  @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+
+(ITab x:(Load <t> ptr mem)) && t.IsInterface() => @x.Block (Load <typ.Uintptr> ptr mem)
+
+(IData x:(Load <t> ptr mem)) && t.IsInterface() => @x.Block (Load <typ.BytePtr>
+      (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
+      mem)
+
+(SlicePtr x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <t.Elem().PtrTo()> ptr mem)
+(SliceLen x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <typ.Int>
+      (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+      mem)
+(SliceCap x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <typ.Int>
+      (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
+      mem)
+
+(ComplexReal x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load <typ.Float32> ptr mem)
+(ComplexImag x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load <typ.Float32>
+      (OffPtr <typ.Float32Ptr> [4] ptr)
+      mem)
+
+(ComplexReal x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load <typ.Float64> ptr mem)
+(ComplexImag x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load <typ.Float64>
+      (OffPtr <typ.Float64Ptr> [8] ptr)
+      mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules
index cdb3463..aeda625 100644
--- a/src/cmd/compile/internal/ssa/_gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/_gen/generic.rules
@@ -704,7 +704,7 @@
 	(Store {t2} p2 _
 		mem:(Zero [n] p3 _)))
 	&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
-	&& fe.CanSSA(t1)
+	&& CanSSA(t1)
 	&& disjoint(op, t1.Size(), p2, t2.Size())
 	=> @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
 (Load <t1> op:(OffPtr [o1] p1)
@@ -712,7 +712,7 @@
 		(Store {t3} p3 _
 			mem:(Zero [n] p4 _))))
 	&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
-	&& fe.CanSSA(t1)
+	&& CanSSA(t1)
 	&& disjoint(op, t1.Size(), p2, t2.Size())
 	&& disjoint(op, t1.Size(), p3, t3.Size())
 	=> @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
@@ -722,7 +722,7 @@
 			(Store {t4} p4 _
 				mem:(Zero [n] p5 _)))))
 	&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
-	&& fe.CanSSA(t1)
+	&& CanSSA(t1)
 	&& disjoint(op, t1.Size(), p2, t2.Size())
 	&& disjoint(op, t1.Size(), p3, t3.Size())
 	&& disjoint(op, t1.Size(), p4, t4.Size())
@@ -734,7 +734,7 @@
 				(Store {t5} p5 _
 					mem:(Zero [n] p6 _))))))
 	&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
-	&& fe.CanSSA(t1)
+	&& CanSSA(t1)
 	&& disjoint(op, t1.Size(), p2, t2.Size())
 	&& disjoint(op, t1.Size(), p3, t3.Size())
 	&& disjoint(op, t1.Size(), p4, t4.Size())
@@ -848,28 +848,28 @@
 (StructSelect [2] (StructMake4 _ _ x _)) => x
 (StructSelect [3] (StructMake4 _ _ _ x)) => x
 
-(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
+(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && CanSSA(t) =>
   (StructMake0)
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && CanSSA(t) =>
   (StructMake1
     (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && CanSSA(t) =>
   (StructMake2
     (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0]             ptr) mem)
     (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && CanSSA(t) =>
   (StructMake3
     (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0]             ptr) mem)
     (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
     (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && CanSSA(t) =>
   (StructMake4
     (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0]             ptr) mem)
     (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
     (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
     (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
 
-(StructSelect [i] x:(Load <t> ptr mem)) && !fe.CanSSA(t) =>
+(StructSelect [i] x:(Load <t> ptr mem)) && !CanSSA(t) =>
   @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
 
 (Store _ (StructMake0) mem) => mem
@@ -911,9 +911,9 @@
 (StructSelect [0] (IData x)) => (IData x)
 
 // un-SSAable values use mem->mem copies
-(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t) =>
+(Store {t} dst (Load src mem) mem) && !CanSSA(t) =>
 	(Move {t} [t.Size()] dst src mem)
-(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t) =>
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !CanSSA(t) =>
 	(Move {t} [t.Size()] dst src (VarDef {x} mem))
 
 // array ops
@@ -922,7 +922,7 @@
 (Load <t> _ _) && t.IsArray() && t.NumElem() == 0 =>
   (ArrayMake0)
 
-(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
+(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && CanSSA(t) =>
   (ArrayMake1 (Load <t.Elem()> ptr mem))
 
 (Store _ (ArrayMake0) mem) => mem
@@ -981,7 +981,7 @@
     (ConstNil <typ.Uintptr>)
     (ConstNil <typ.BytePtr>))
 
-(NilCheck (GetG mem) mem) => mem
+(NilCheck ptr:(GetG mem) mem) => ptr
 
 (If (Not cond) yes no) => (If cond no yes)
 (If (ConstBool [c]) yes no) && c => (First yes no)
@@ -2055,19 +2055,19 @@
 	&& isSameCall(call.Aux, "runtime.newobject")
 	=> mem
 
-(NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
+(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
 	&& isSameCall(call.Aux, "runtime.newobject")
 	&& warnRule(fe.Debug_checknil(), v, "removed nil check")
-	=> (Invalid)
+	=> ptr
 
-(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
 	&& isSameCall(call.Aux, "runtime.newobject")
 	&& warnRule(fe.Debug_checknil(), v, "removed nil check")
-	=> (Invalid)
+	=> ptr
 
 // Addresses of globals are always non-nil.
-(NilCheck          (Addr {_} (SB))    _) => (Invalid)
-(NilCheck (Convert (Addr {_} (SB)) _) _) => (Invalid)
+(NilCheck          ptr:(Addr {_} (SB))    _) => ptr
+(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr
 
 // for late-expanded calls, recognize memequal applied to a single constant byte
 // Support is limited by 1, 2, 4, 8 byte sizes
@@ -2121,6 +2121,11 @@
   && isSameCall(callAux, "runtime.memequal")
   => (MakeResult (ConstBool <typ.Bool> [true]) mem)
 
+(Static(Call|LECall) {callAux} p q _ mem)
+  && isSameCall(callAux, "runtime.memequal")
+  && isSamePtr(p, q)
+  => (MakeResult (ConstBool <typ.Bool> [true]) mem)
+
 // Turn known-size calls to memclrNoHeapPointers into a Zero.
 // Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details.
 (SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem))
diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go
index 53ff57f..69eb48c 100644
--- a/src/cmd/compile/internal/ssa/_gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go
@@ -285,6 +285,12 @@
 	{name: "Abs", argLength: 1},      // absolute value arg0
 	{name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1
 
+	// Float min/max implementation, if hardware is available.
+	{name: "Min64F", argLength: 2}, // min(arg0,arg1)
+	{name: "Min32F", argLength: 2}, // min(arg0,arg1)
+	{name: "Max64F", argLength: 2}, // max(arg0,arg1)
+	{name: "Max32F", argLength: 2}, // max(arg0,arg1)
+
 	// 3-input opcode.
 	// Fused-multiply-add, float64 only.
 	// When a*b+c is exactly zero (before rounding), then the result is +0 or -0.
@@ -471,7 +477,7 @@
 	{name: "IsNonNil", argLength: 1, typ: "Bool"},        // arg0 != nil
 	{name: "IsInBounds", argLength: 2, typ: "Bool"},      // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
 	{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
-	{name: "NilCheck", argLength: 2, typ: "Void"},        // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
+	{name: "NilCheck", argLength: 2, nilCheck: true},     // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified.
 
 	// Pseudo-ops
 	{name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
@@ -643,6 +649,8 @@
 //    Plain                []            [next]
 //       If   [boolean Value]      [then, else]
 //    First                []   [always, never]
+//    Defer             [mem]  [nopanic, panic]                  (control opcode should be OpStaticCall to runtime.deferproc)
+//JumpTable   [integer Value]  [succ1,succ2,..]
 
 var genericBlocks = []blockData{
 	{name: "Plain"},                  // a single successor
diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go
index 15be9a1..072df29 100644
--- a/src/cmd/compile/internal/ssa/_gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go
@@ -1400,7 +1400,7 @@
 	if op.name == "" {
 		// Failed to find the op.
 		// Run through everything again with strict=false
-		// to generate useful diagnosic messages before failing.
+		// to generate useful diagnostic messages before failing.
 		for _, x := range genericOps {
 			match(x, false, "generic")
 		}
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
index 699f6e4..4e3209e 100644
--- a/src/cmd/compile/internal/ssa/addressingmodes.go
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -195,6 +195,17 @@
 	[2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1,
 	[2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8,
 
+	[2]Op{OpAMD64SETEQstore, OpAMD64LEAQ1}: OpAMD64SETEQstoreidx1,
+	[2]Op{OpAMD64SETNEstore, OpAMD64LEAQ1}: OpAMD64SETNEstoreidx1,
+	[2]Op{OpAMD64SETLstore, OpAMD64LEAQ1}:  OpAMD64SETLstoreidx1,
+	[2]Op{OpAMD64SETLEstore, OpAMD64LEAQ1}: OpAMD64SETLEstoreidx1,
+	[2]Op{OpAMD64SETGstore, OpAMD64LEAQ1}:  OpAMD64SETGstoreidx1,
+	[2]Op{OpAMD64SETGEstore, OpAMD64LEAQ1}: OpAMD64SETGEstoreidx1,
+	[2]Op{OpAMD64SETBstore, OpAMD64LEAQ1}:  OpAMD64SETBstoreidx1,
+	[2]Op{OpAMD64SETBEstore, OpAMD64LEAQ1}: OpAMD64SETBEstoreidx1,
+	[2]Op{OpAMD64SETAstore, OpAMD64LEAQ1}:  OpAMD64SETAstoreidx1,
+	[2]Op{OpAMD64SETAEstore, OpAMD64LEAQ1}: OpAMD64SETAEstoreidx1,
+
 	// These instructions are re-split differently for performance, see needSplit above.
 	// TODO if 386 versions are created, also update needSplit and _gen/386splitload.rules
 	[2]Op{OpAMD64CMPBload, OpAMD64ADDQ}: OpAMD64CMPBloadidx1,
diff --git a/src/cmd/compile/internal/ssa/bench_test.go b/src/cmd/compile/internal/ssa/bench_test.go
index 0971667..1dc733b 100644
--- a/src/cmd/compile/internal/ssa/bench_test.go
+++ b/src/cmd/compile/internal/ssa/bench_test.go
@@ -30,3 +30,21 @@
 		}
 	}
 }
+
+type Point struct {
+	X, Y int
+}
+
+//go:noinline
+func sign(p1, p2, p3 Point) bool {
+	return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
+}
+
+func BenchmarkInvertLessThanNoov(b *testing.B) {
+	p1 := Point{1, 2}
+	p2 := Point{2, 3}
+	p3 := Point{3, 4}
+	for i := 0; i < b.N; i++ {
+		sign(p1, p2, p3)
+	}
+}
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index e7776b2..26af10b 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -112,13 +112,6 @@
 }
 
 // BlockKind is the kind of SSA block.
-//
-//	  kind          controls        successors
-//	------------------------------------------
-//	  Exit      [return mem]                []
-//	 Plain                []            [next]
-//	    If   [boolean Value]      [then, else]
-//	 Defer             [mem]  [nopanic, panic]  (control opcode should be OpStaticCall to runtime.deferproc)
 type BlockKind int16
 
 // short form print
@@ -275,8 +268,7 @@
 	b.Values = b.Values[:i]
 }
 
-// AddEdgeTo adds an edge from block b to block c. Used during building of the
-// SSA graph; do not use on an already-completed SSA graph.
+// AddEdgeTo adds an edge from block b to block c.
 func (b *Block) AddEdgeTo(c *Block) {
 	i := len(b.Succs)
 	j := len(c.Preds)
@@ -305,6 +297,8 @@
 // removeSucc removes the ith output edge from b.
 // It is the responsibility of the caller to remove
 // the corresponding predecessor edge.
+// Note that this potentially reorders successors of b, so it
+// must be used very carefully.
 func (b *Block) removeSucc(i int) {
 	n := len(b.Succs) - 1
 	if i != n {
@@ -331,6 +325,19 @@
 	b.Likely *= -1
 }
 
+// Swaps b.Succs[x] and b.Succs[y].
+func (b *Block) swapSuccessorsByIdx(x, y int) {
+	if x == y {
+		return
+	}
+	ex := b.Succs[x]
+	ey := b.Succs[y]
+	b.Succs[x] = ey
+	b.Succs[y] = ex
+	ex.b.Preds[ex.i].i = y
+	ey.b.Preds[ey.i].i = x
+}
+
 // removePhiArg removes the ith arg from phi.
 // It must be called after calling b.removePred(i) to
 // adjust the corresponding phi value of the block:
@@ -347,7 +354,7 @@
 func (b *Block) removePhiArg(phi *Value, i int) {
 	n := len(b.Preds)
 	if numPhiArgs := len(phi.Args); numPhiArgs-1 != n {
-		b.Fatalf("inconsistent state, num predecessors: %d, num phi args: %d", n, numPhiArgs)
+		b.Fatalf("inconsistent state for %v, num predecessors: %d, num phi args: %d", phi, n, numPhiArgs)
 	}
 	phi.Args[i].Uses--
 	phi.Args[i] = phi.Args[n]
@@ -385,10 +392,10 @@
 		return fmt.Sprintf("%v", int8(b.AuxInt))
 	case "uint8":
 		return fmt.Sprintf("%v", uint8(b.AuxInt))
-	default: // type specified but not implemented - print as int64
-		return fmt.Sprintf("%v", b.AuxInt)
 	case "": // no aux int type
 		return ""
+	default: // type specified but not implemented - print as int64
+		return fmt.Sprintf("%v", b.AuxInt)
 	}
 }
 
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index f34b907..bbfdace 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -317,7 +317,28 @@
 				if !v.Aux.(*ir.Name).Type().HasPointers() {
 					f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String())
 				}
-
+			case OpNilCheck:
+				// nil checks have pointer type before scheduling, and
+				// void type after scheduling.
+				if f.scheduled {
+					if v.Uses != 0 {
+						f.Fatalf("nilcheck must have 0 uses %s", v.Uses)
+					}
+					if !v.Type.IsVoid() {
+						f.Fatalf("nilcheck must have void type %s", v.Type.String())
+					}
+				} else {
+					if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() {
+						f.Fatalf("nilcheck must have pointer type %s", v.Type.String())
+					}
+				}
+				if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() {
+					f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String())
+				}
+				if !v.Args[1].Type.IsMemory() {
+					f.Fatalf("bad arg 1 type to %s: want mem, have %s",
+						v.Op, v.Args[1].Type.String())
+				}
 			}
 
 			// TODO: check for cycles in values
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 8618cf3..d125891 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -472,11 +472,12 @@
 	{name: "nilcheckelim", fn: nilcheckelim},
 	{name: "prove", fn: prove},
 	{name: "early fuse", fn: fuseEarly},
-	{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
 	{name: "expand calls", fn: expandCalls, required: true},
+	{name: "decompose builtin", fn: postExpandCallsDecompose, required: true},
 	{name: "softfloat", fn: softfloat, required: true},
 	{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
 	{name: "dead auto elim", fn: elimDeadAutosGeneric},
+	{name: "sccp", fn: sccp},
 	{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
 	{name: "check bce", fn: checkbce},
 	{name: "branchelim", fn: branchelim},
@@ -508,7 +509,6 @@
 	{name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
 	{name: "regalloc", fn: regalloc, required: true},   // allocate int & float registers + stack slots
 	{name: "loop rotate", fn: loopRotate},
-	{name: "stackframe", fn: stackframe, required: true},
 	{name: "trim", fn: trim}, // remove empty blocks
 }
 
@@ -547,6 +547,8 @@
 	{"generic cse", "tighten"},
 	// checkbce needs the values removed
 	{"generic deadcode", "check bce"},
+	// decompose builtin now also cleans up after expand calls
+	{"expand calls", "decompose builtin"},
 	// don't run optimization pass until we've decomposed builtin objects
 	{"decompose builtin", "late opt"},
 	// decompose builtin is the last pass that may introduce new float ops, so run softfloat after it
@@ -577,8 +579,6 @@
 	{"flagalloc", "regalloc"},
 	// loopRotate will confuse regalloc.
 	{"regalloc", "loop rotate"},
-	// stackframe needs to know about spilled registers.
-	{"regalloc", "stackframe"},
 	// trim needs regalloc to be done first.
 	{"regalloc", "trim"},
 	// memcombine works better if fuse happens first, to help merge stores.
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 43f9f0a..debcf1a 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -143,23 +143,13 @@
 type Frontend interface {
 	Logger
 
-	// CanSSA reports whether variables of type t are SSA-able.
-	CanSSA(t *types.Type) bool
-
 	// StringData returns a symbol pointing to the given string's contents.
 	StringData(string) *obj.LSym
 
-	// Auto returns a Node for an auto variable of the given type.
-	// The SSA compiler uses this function to allocate space for spills.
-	Auto(src.XPos, *types.Type) *ir.Name
-
 	// Given the name for a compound type, returns the name we should use
 	// for the parts of that compound type.
 	SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
 
-	// AllocFrame assigns frame offsets to all live auto variables.
-	AllocFrame(f *Func)
-
 	// Syslook returns a symbol of the runtime function/variable with the
 	// given name.
 	Syslook(string) *obj.LSym
@@ -167,9 +157,6 @@
 	// UseWriteBarrier reports whether write barrier is enabled
 	UseWriteBarrier() bool
 
-	// MyImportPath provides the import name (roughly, the package) for the function being compiled.
-	MyImportPath() string
-
 	// Func returns the ir.Func of the function being compiled.
 	Func() *ir.Func
 }
@@ -296,6 +283,8 @@
 		c.registers = registersLOONG64[:]
 		c.gpRegMask = gpRegMaskLOONG64
 		c.fpRegMask = fpRegMaskLOONG64
+		c.intParamRegs = paramIntRegLOONG64
+		c.floatParamRegs = paramFloatRegLOONG64
 		c.FPReg = framepointerRegLOONG64
 		c.LinkReg = linkRegLOONG64
 		c.hasGReg = true
@@ -374,8 +363,8 @@
 		c.floatParamRegs = nil // no FP registers in softfloat mode
 	}
 
-	c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize)
-	c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize)
+	c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize, 0)
+	c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize, 1)
 
 	// On Plan 9, floating point operations are not allowed in note handler.
 	if buildcfg.GOOS == "plan9" {
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
index 813ebe4..7d3e44f 100644
--- a/src/cmd/compile/internal/ssa/cse_test.go
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"cmd/compile/internal/types"
-	"cmd/internal/src"
 	"testing"
 )
 
@@ -22,7 +21,7 @@
 	arg1Aux := &tstAux{"arg1-aux"}
 	arg2Aux := &tstAux{"arg2-aux"}
 	arg3Aux := &tstAux{"arg3-aux"}
-	a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8.PtrTo())
+	a := c.Temp(c.config.Types.Int8.PtrTo())
 
 	// construct lots of values with args that have aux values and place
 	// them in an order that triggers the bug
@@ -93,7 +92,7 @@
 // TestZCSE tests the zero arg cse.
 func TestZCSE(t *testing.T) {
 	c := testConfig(t)
-	a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8.PtrTo())
+	a := c.Temp(c.config.Types.Int8.PtrTo())
 
 	fun := c.Fun("entry",
 		Bloc("entry",
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
index 52cc7f2..3bd1737 100644
--- a/src/cmd/compile/internal/ssa/deadcode.go
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -110,16 +110,15 @@
 			}
 		}
 		for _, v := range b.Values {
-			if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
+			if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] {
 				live[v.ID] = true
 				q = append(q, v)
 				if v.Pos.IsStmt() != src.PosNotStmt {
 					liveOrderStmts = append(liveOrderStmts, v)
 				}
 			}
-			if v.Type.IsVoid() && !live[v.ID] {
-				// The only Void ops are nil checks and inline marks.  We must keep these.
-				if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] {
+			if v.Op == OpInlMark {
+				if !liveInlIdx[int(v.AuxInt)] {
 					// We don't need marks for bodies that
 					// have been completely optimized away.
 					// TODO: save marks only for bodies which
@@ -313,6 +312,8 @@
 
 // removeEdge removes the i'th outgoing edge from b (and
 // the corresponding incoming edge from b.Succs[i].b).
+// Note that this potentially reorders successors of b, so it
+// must be used very carefully.
 func (b *Block) removeEdge(i int) {
 	e := b.Succs[i]
 	c := e.b
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 648b68a..cb34271 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -73,9 +73,9 @@
 		}
 
 		// Walk backwards looking for dead stores. Keep track of shadowed addresses.
-		// A "shadowed address" is a pointer and a size describing a memory region that
-		// is known to be written. We keep track of shadowed addresses in the shadowed
-		// map, mapping the ID of the address to the size of the shadowed region.
+		// A "shadowed address" is a pointer, offset, and size describing a memory region that
+		// is known to be written. We keep track of shadowed addresses in the shadowed map,
+		// mapping the ID of the address to a shadowRange where future writes will happen.
 		// Since we're walking backwards, writes to a shadowed region are useless,
 		// as they will be immediately overwritten.
 		shadowed.clear()
@@ -88,13 +88,20 @@
 			shadowed.clear()
 		}
 		if v.Op == OpStore || v.Op == OpZero {
+			ptr := v.Args[0]
+			var off int64
+			for ptr.Op == OpOffPtr { // Walk to base pointer
+				off += ptr.AuxInt
+				ptr = ptr.Args[0]
+			}
 			var sz int64
 			if v.Op == OpStore {
 				sz = v.Aux.(*types.Type).Size()
 			} else { // OpZero
 				sz = v.AuxInt
 			}
-			if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
+			sr := shadowRange(shadowed.get(ptr.ID))
+			if sr.contains(off, off+sz) {
 				// Modify the store/zero into a copy of the memory state,
 				// effectively eliding the store operation.
 				if v.Op == OpStore {
@@ -108,10 +115,8 @@
 				v.AuxInt = 0
 				v.Op = OpCopy
 			} else {
-				if sz > 0x7fffffff { // work around sparseMap's int32 value type
-					sz = 0x7fffffff
-				}
-				shadowed.set(v.Args[0].ID, int32(sz))
+				// Extend shadowed region.
+				shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
 			}
 		}
 		// walk to previous store
@@ -131,6 +136,49 @@
 	}
 }
 
+// A shadowRange encodes a set of byte offsets [lo():hi()] from
+// a given pointer that will be written to later in the block.
+// A zero shadowRange encodes an empty shadowed range (and so
+// does a -1 shadowRange, which is what sparsemap.get returns
+// on a failed lookup).
+type shadowRange int32
+
+func (sr shadowRange) lo() int64 {
+	return int64(sr & 0xffff)
+}
+func (sr shadowRange) hi() int64 {
+	return int64((sr >> 16) & 0xffff)
+}
+
+// contains reports whether [lo:hi] is completely within sr.
+func (sr shadowRange) contains(lo, hi int64) bool {
+	return lo >= sr.lo() && hi <= sr.hi()
+}
+
+// merge returns the union of sr and [lo:hi].
+// merge is allowed to return something smaller than the union.
+func (sr shadowRange) merge(lo, hi int64) shadowRange {
+	if lo < 0 || hi > 0xffff {
+		// Ignore offsets that are too large or small.
+		return sr
+	}
+	if sr.lo() == sr.hi() {
+		// Old range is empty - use new one.
+		return shadowRange(lo + hi<<16)
+	}
+	if hi < sr.lo() || lo > sr.hi() {
+		// The two regions don't overlap or abut, so we would
+		// have to keep track of multiple disjoint ranges.
+		// Because we can only keep one, keep the larger one.
+		if sr.hi()-sr.lo() >= hi-lo {
+			return sr
+		}
+		return shadowRange(lo + hi<<16)
+	}
+	// Regions overlap or abut - compute the union.
+	return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
+}
+
 // elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
 // we track the operations that the address of each auto reaches and if it only
 // reaches stores then we delete all the stores. The other operations will then
@@ -201,7 +249,7 @@
 		}
 
 		if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
-			// Nil check has no use, but we need to keep it.
+			// We need to keep nil checks even if they have no use.
 			// Also keep calls and values that have side effects.
 			return
 		}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index 6393466..05a7278 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -42,7 +42,10 @@
 	OptDcl []*ir.Name
 
 	// Filled in by the user. Translates Block and Value ID to PC.
-	GetPC func(ID, ID) int64
+	//
+	// NOTE: block is only used if value is BlockStart.ID or BlockEnd.ID.
+	// Otherwise, it is ignored.
+	GetPC func(block, value ID) int64
 }
 
 type BlockDebug struct {
@@ -70,8 +73,8 @@
 	return fmt.Sprintf("0x%x.%d.%d", ls.Registers, ls.stackOffsetValue(), int32(ls.StackOffset)&1)
 }
 
-func (loc liveSlot) absent() bool {
-	return loc.Registers == 0 && !loc.onStack()
+func (ls liveSlot) absent() bool {
+	return ls.Registers == 0 && !ls.onStack()
 }
 
 // StackOffset encodes whether a value is on the stack and if so, where.
@@ -433,7 +436,7 @@
 // synthesizes new (dead) values for the non-live params or the
 // non-live pieces of partially live params.
 func PopulateABIInRegArgOps(f *Func) {
-	pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
+	pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
 
 	// When manufacturing new slots that correspond to splits of
 	// composite parameters, we want to avoid creating a new sub-slot
@@ -519,7 +522,7 @@
 		if !isNamedRegParam(inp) {
 			continue
 		}
-		n := inp.Name.(*ir.Name)
+		n := inp.Name
 
 		// Param is spread across one or more registers. Walk through
 		// each piece to see whether we've seen an arg reg op for it.
@@ -1368,7 +1371,7 @@
 
 	// Flush any leftover entries live at the end of the last block.
 	for varID := range state.lists {
-		state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, FuncEnd.ID)
+		state.writePendingEntry(VarID(varID), -1, FuncEnd.ID)
 		list := state.lists[varID]
 		if state.loggingLevel > 0 {
 			if len(list) == 0 {
@@ -1734,7 +1737,7 @@
 	if p.Name == nil {
 		return false
 	}
-	n := p.Name.(*ir.Name)
+	n := p.Name
 	if n.Sym() == nil || n.Sym().IsBlank() {
 		return false
 	}
@@ -1754,7 +1757,7 @@
 // each input param reg will be spilled in the prolog).
 func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
 
-	pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
+	pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
 
 	// Look to see if we have any named register-promoted parameters.
 	// If there are none, bail early and let the caller sort things
@@ -1790,7 +1793,7 @@
 			continue
 		}
 
-		n := inp.Name.(*ir.Name)
+		n := inp.Name
 		sl := LocalSlot{N: n, Type: inp.Type, Off: 0}
 		rval.Vars = append(rval.Vars, n)
 		rval.Slots = append(rval.Slots, sl)
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
index cf11510..af9e2a3 100644
--- a/src/cmd/compile/internal/ssa/debug_lines_test.go
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -44,7 +44,7 @@
 
 func hasRegisterABI() bool {
 	switch testGoArch() {
-	case "amd64", "arm64", "ppc64", "ppc64le", "riscv":
+	case "amd64", "arm64", "loong64", "ppc64", "ppc64le", "riscv":
 		return true
 	}
 	return false
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 3afd73e..b0788f1 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -11,83 +11,829 @@
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
 	"fmt"
-	"sort"
 )
 
-type selKey struct {
-	from          *Value // what is selected from
-	offsetOrIndex int64  // whatever is appropriate for the selector
-	size          int64
-	typ           *types.Type
+func postExpandCallsDecompose(f *Func) {
+	decomposeUser(f)    // redo user decompose to cleanup after expand calls
+	decomposeBuiltIn(f) // handles both regular decomposition and cleanup.
 }
 
-type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
+func expandCalls(f *Func) {
+	// Convert each aggregate arg to a call into "dismantle aggregate, store/pass parts"
+	// Convert each aggregate result from a call into "assemble aggregate from parts"
+	// Convert each multivalue exit into "dismantle aggregate, store/return parts"
+	// Convert incoming aggregate arg into assembly of parts.
+	// Feed modified AST to decompose.
+
+	sp, _ := f.spSb()
+
+	x := &expandState{
+		f:               f,
+		debug:           f.pass.debug,
+		regSize:         f.Config.RegSize,
+		sp:              sp,
+		typs:            &f.Config.Types,
+		wideSelects:     make(map[*Value]*Value),
+		commonArgs:      make(map[selKey]*Value),
+		commonSelectors: make(map[selKey]*Value),
+		memForCall:      make(map[ID]*Value),
+	}
+
+	// For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+	if f.Config.BigEndian {
+		x.firstOp = OpInt64Hi
+		x.secondOp = OpInt64Lo
+		x.firstType = x.typs.Int32
+		x.secondType = x.typs.UInt32
+	} else {
+		x.firstOp = OpInt64Lo
+		x.secondOp = OpInt64Hi
+		x.firstType = x.typs.UInt32
+		x.secondType = x.typs.Int32
+	}
+
+	// Defer select processing until after all calls and selects are seen.
+	var selects []*Value
+	var calls []*Value
+	var args []*Value
+	var exitBlocks []*Block
+
+	var m0 *Value
+
+	// Accumulate lists of calls, args, selects, and exit blocks to process,
+	// note "wide" selects consumed by stores,
+	// rewrite mem for each call,
+	// rewrite each OpSelectNAddr.
+	for _, b := range f.Blocks {
+		for _, v := range b.Values {
+			switch v.Op {
+			case OpInitMem:
+				m0 = v
+
+			case OpClosureLECall, OpInterLECall, OpStaticLECall, OpTailLECall:
+				calls = append(calls, v)
+
+			case OpArg:
+				args = append(args, v)
+
+			case OpStore:
+				if a := v.Args[1]; a.Op == OpSelectN && !CanSSA(a.Type) {
+					if a.Uses > 1 {
+						panic(fmt.Errorf("Saw double use of wide SelectN %s operand of Store %s",
+							a.LongString(), v.LongString()))
+					}
+					x.wideSelects[a] = v
+				}
+
+			case OpSelectN:
+				if v.Type == types.TypeMem {
+					// rewrite the mem selector in place
+					call := v.Args[0]
+					aux := call.Aux.(*AuxCall)
+					mem := x.memForCall[call.ID]
+					if mem == nil {
+						v.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
+						x.memForCall[call.ID] = v
+					} else {
+						panic(fmt.Errorf("Saw two memories for call %v, %v and %v", call, mem, v))
+					}
+				} else {
+					selects = append(selects, v)
+				}
+
+			case OpSelectNAddr:
+				call := v.Args[0]
+				which := v.AuxInt
+				aux := call.Aux.(*AuxCall)
+				pt := v.Type
+				off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt)
+				v.copyOf(off)
+			}
+		}
+
+		// rewrite function results from an exit block
+		// values returned by function need to be split out into registers.
+		if isBlockMultiValueExit(b) {
+			exitBlocks = append(exitBlocks, b)
+		}
+	}
+
+	// Convert each aggregate arg into Make of its parts (and so on, to primitive types)
+	for _, v := range args {
+		var rc registerCursor
+		a := x.prAssignForArg(v)
+		aux := x.f.OwnAux
+		regs := a.Registers
+		var offset int64
+		if len(regs) == 0 {
+			offset = a.FrameOffset(aux.abiInfo)
+		}
+		auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+		rc.init(regs, aux.abiInfo, nil, auxBase, 0)
+		x.rewriteSelectOrArg(f.Entry.Pos, f.Entry, v, v, m0, v.Type, rc)
+	}
+
+	// Rewrite selects of results (which may be aggregates) into make-aggregates of register/memory-targeted selects
+	for _, v := range selects {
+		if v.Op == OpInvalid {
+			continue
+		}
+
+		call := v.Args[0]
+		aux := call.Aux.(*AuxCall)
+		mem := x.memForCall[call.ID]
+		if mem == nil {
+			mem = call.Block.NewValue1I(call.Pos, OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
+			x.memForCall[call.ID] = mem
+		}
+
+		i := v.AuxInt
+		regs := aux.RegsOfResult(i)
+
+		// If this select cannot fit into SSA and is stored, either disaggregate to register stores, or mem-mem move.
+		if store := x.wideSelects[v]; store != nil {
+			// Use the mem that comes from the store operation.
+			storeAddr := store.Args[0]
+			mem := store.Args[2]
+			if len(regs) > 0 {
+				// Cannot do a rewrite that builds up a result from pieces; instead, copy pieces to the store operation.
+				var rc registerCursor
+				rc.init(regs, aux.abiInfo, nil, storeAddr, 0)
+				mem = x.rewriteWideSelectToStores(call.Pos, call.Block, v, mem, v.Type, rc)
+				store.copyOf(mem)
+			} else {
+				// Move directly from AuxBase to store target; rewrite the store instruction.
+				offset := aux.OffsetOfResult(i)
+				auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+				// was Store dst, v, mem
+				// now Move dst, auxBase, mem
+				move := store.Block.NewValue3A(store.Pos, OpMove, types.TypeMem, v.Type, storeAddr, auxBase, mem)
+				move.AuxInt = v.Type.Size()
+				store.copyOf(move)
+			}
+			continue
+		}
+
+		var auxBase *Value
+		if len(regs) == 0 {
+			offset := aux.OffsetOfResult(i)
+			auxBase = x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+		}
+		var rc registerCursor
+		rc.init(regs, aux.abiInfo, nil, auxBase, 0)
+		x.rewriteSelectOrArg(call.Pos, call.Block, v, v, mem, v.Type, rc)
+	}
+
+	rewriteCall := func(v *Value, newOp Op, argStart int) {
+		// Break aggregate args passed to call into smaller pieces.
+		x.rewriteCallArgs(v, argStart)
+		v.Op = newOp
+		rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+		v.Type = types.NewResults(append(rts, types.TypeMem))
+	}
+
+	// Rewrite calls
+	for _, v := range calls {
+		switch v.Op {
+		case OpStaticLECall:
+			rewriteCall(v, OpStaticCall, 0)
+		case OpTailLECall:
+			rewriteCall(v, OpTailCall, 0)
+		case OpClosureLECall:
+			rewriteCall(v, OpClosureCall, 2)
+		case OpInterLECall:
+			rewriteCall(v, OpInterCall, 1)
+		}
+	}
+
+	// Rewrite results from exit blocks
+	for _, b := range exitBlocks {
+		v := b.Controls[0]
+		x.rewriteFuncResults(v, b, f.OwnAux)
+		b.SetControl(v)
+	}
+
+}
+
+func (x *expandState) rewriteFuncResults(v *Value, b *Block, aux *AuxCall) {
+	// This is very similar to rewriteCallArgs
+	// differences:
+	// firstArg + preArgs
+	// sp vs auxBase
+
+	m0 := v.MemoryArg()
+	mem := m0
+
+	allResults := []*Value{}
+	var oldArgs []*Value
+	argsWithoutMem := v.Args[:len(v.Args)-1]
+
+	for j, a := range argsWithoutMem {
+		oldArgs = append(oldArgs, a)
+		i := int64(j)
+		auxType := aux.TypeOfResult(i)
+		auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem)
+		auxOffset := int64(0)
+		aRegs := aux.RegsOfResult(int64(j))
+		if a.Op == OpDereference {
+			a.Op = OpLoad
+		}
+		var rc registerCursor
+		var result *[]*Value
+		if len(aRegs) > 0 {
+			result = &allResults
+		} else {
+			if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
+				addr := a.Args[0]
+				if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
+					continue // Self move to output parameter
+				}
+			}
+		}
+		rc.init(aRegs, aux.abiInfo, result, auxBase, auxOffset)
+		mem = x.decomposeAsNecessary(v.Pos, b, a, mem, rc)
+	}
+	v.resetArgs()
+	v.AddArgs(allResults...)
+	v.AddArg(mem)
+	for _, a := range oldArgs {
+		if a.Uses == 0 {
+			if x.debug > 1 {
+				x.Printf("...marking %v unused\n", a.LongString())
+			}
+			x.invalidateRecursively(a)
+		}
+	}
+	v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem))
+	return
+}
+
+func (x *expandState) rewriteCallArgs(v *Value, firstArg int) {
+	if x.debug > 1 {
+		x.indent(3)
+		defer x.indent(-3)
+		x.Printf("rewriteCallArgs(%s; %d)\n", v.LongString(), firstArg)
+	}
+	// Thread the stores on the memory arg
+	aux := v.Aux.(*AuxCall)
+	m0 := v.MemoryArg()
+	mem := m0
+	allResults := []*Value{}
+	oldArgs := []*Value{}
+	argsWithoutMem := v.Args[firstArg : len(v.Args)-1] // Also strip closure/interface Op-specific args
+
+	sp := x.sp
+	if v.Op == OpTailLECall {
+		// For tail call, we unwind the frame before the call so we'll use the caller's
+		// SP.
+		sp = x.f.Entry.NewValue1(src.NoXPos, OpGetCallerSP, x.typs.Uintptr, mem)
+	}
+
+	for i, a := range argsWithoutMem { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
+		oldArgs = append(oldArgs, a)
+		auxI := int64(i)
+		aRegs := aux.RegsOfArg(auxI)
+		aType := aux.TypeOfArg(auxI)
+
+		if a.Op == OpDereference {
+			a.Op = OpLoad
+		}
+		var rc registerCursor
+		var result *[]*Value
+		var aOffset int64
+		if len(aRegs) > 0 {
+			result = &allResults
+		} else {
+			aOffset = aux.OffsetOfArg(auxI)
+		}
+		if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
+			// It's common for a tail call passing the same arguments (e.g. method wrapper),
+			// so this would be a self copy. Detect this and optimize it out.
+			n := a.Aux.(*ir.Name)
+			if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
+				continue
+			}
+		}
+		if x.debug > 1 {
+			x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
+		}
+
+		rc.init(aRegs, aux.abiInfo, result, sp, aOffset)
+		mem = x.decomposeAsNecessary(v.Pos, v.Block, a, mem, rc)
+	}
+	var preArgStore [2]*Value
+	preArgs := append(preArgStore[:0], v.Args[0:firstArg]...)
+	v.resetArgs()
+	v.AddArgs(preArgs...)
+	v.AddArgs(allResults...)
+	v.AddArg(mem)
+	for _, a := range oldArgs {
+		if a.Uses == 0 {
+			x.invalidateRecursively(a)
+		}
+	}
+
+	return
+}
+
+func (x *expandState) decomposePair(pos src.XPos, b *Block, a, mem *Value, t0, t1 *types.Type, o0, o1 Op, rc *registerCursor) *Value {
+	e := b.NewValue1(pos, o0, t0, a)
+	pos = pos.WithNotStmt()
+	mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0))
+	e = b.NewValue1(pos, o1, t1, a)
+	mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t1))
+	return mem
+}
+
+func (x *expandState) decomposeOne(pos src.XPos, b *Block, a, mem *Value, t0 *types.Type, o0 Op, rc *registerCursor) *Value {
+	e := b.NewValue1(pos, o0, t0, a)
+	pos = pos.WithNotStmt()
+	mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0))
+	return mem
+}
+
+// decomposeAsNecessary converts a value (perhaps an aggregate) passed to a call or returned by a function,
+// into the appropriate sequence of stores and register assignments to transmit that value in a given ABI, and
+// returns the current memory after this convert/rewrite (it may be the input memory, perhaps stores were needed.)
+// 'pos' is the source position all this is tied to
+// 'b' is the enclosing block
+// 'a' is the value to decompose
+// 'm0' is the input memory arg used for the first store (or returned if there are no stores)
+// 'rc' is a registerCursor which identifies the register/memory destination for the value
+func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, rc registerCursor) *Value {
+	if x.debug > 1 {
+		x.indent(3)
+		defer x.indent(-3)
+	}
+	at := a.Type
+	if at.Size() == 0 {
+		return m0
+	}
+	if a.Op == OpDereference {
+		a.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load.
+	}
+
+	if !rc.hasRegs() && !CanSSA(at) {
+		dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+		if x.debug > 1 {
+			x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString())
+		}
+		if a.Op == OpLoad {
+			m0 = b.NewValue3A(pos, OpMove, types.TypeMem, at, dst, a.Args[0], m0)
+			m0.AuxInt = at.Size()
+			return m0
+		} else {
+			panic(fmt.Errorf("Store of not a load"))
+		}
+	}
+
+	mem := m0
+	switch at.Kind() {
+	case types.TARRAY:
+		et := at.Elem()
+		for i := int64(0); i < at.NumElem(); i++ {
+			e := b.NewValue1I(pos, OpArraySelect, et, i, a)
+			pos = pos.WithNotStmt()
+			mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et))
+		}
+		return mem
+
+	case types.TSTRUCT:
+		for i := 0; i < at.NumFields(); i++ {
+			et := at.Field(i).Type // might need to read offsets from the fields
+			e := b.NewValue1I(pos, OpStructSelect, et, int64(i), a)
+			pos = pos.WithNotStmt()
+			if x.debug > 1 {
+				x.Printf("...recur decompose %s, %v\n", e.LongString(), et)
+			}
+			mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et))
+		}
+		return mem
+
+	case types.TSLICE:
+		mem = x.decomposeOne(pos, b, a, mem, at.Elem().PtrTo(), OpSlicePtr, &rc)
+		pos = pos.WithNotStmt()
+		mem = x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceLen, &rc)
+		return x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceCap, &rc)
+
+	case types.TSTRING:
+		return x.decomposePair(pos, b, a, mem, x.typs.BytePtr, x.typs.Int, OpStringPtr, OpStringLen, &rc)
+
+	case types.TINTER:
+		mem = x.decomposeOne(pos, b, a, mem, x.typs.Uintptr, OpITab, &rc)
+		pos = pos.WithNotStmt()
+		// Immediate interfaces cause so many headaches.
+		if a.Op == OpIMake {
+			data := a.Args[1]
+			for data.Op == OpStructMake1 || data.Op == OpArrayMake1 {
+				data = data.Args[0]
+			}
+			return x.decomposeAsNecessary(pos, b, data, mem, rc.next(data.Type))
+		}
+		return x.decomposeOne(pos, b, a, mem, x.typs.BytePtr, OpIData, &rc)
+
+	case types.TCOMPLEX64:
+		return x.decomposePair(pos, b, a, mem, x.typs.Float32, x.typs.Float32, OpComplexReal, OpComplexImag, &rc)
+
+	case types.TCOMPLEX128:
+		return x.decomposePair(pos, b, a, mem, x.typs.Float64, x.typs.Float64, OpComplexReal, OpComplexImag, &rc)
+
+	case types.TINT64:
+		if at.Size() > x.regSize {
+			return x.decomposePair(pos, b, a, mem, x.firstType, x.secondType, x.firstOp, x.secondOp, &rc)
+		}
+	case types.TUINT64:
+		if at.Size() > x.regSize {
+			return x.decomposePair(pos, b, a, mem, x.typs.UInt32, x.typs.UInt32, x.firstOp, x.secondOp, &rc)
+		}
+	}
+
+	// An atomic type, either record the register or store it and update the memory.
+
+	if rc.hasRegs() {
+		if x.debug > 1 {
+			x.Printf("...recur addArg %s\n", a.LongString())
+		}
+		rc.addArg(a)
+	} else {
+		dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+		if x.debug > 1 {
+			x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString())
+		}
+		mem = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, mem)
+	}
+
+	return mem
+}
+
+// Convert scalar OpArg into the proper OpWhateverArg instruction
+// Convert scalar OpSelectN into perhaps-differently-indexed OpSelectN
+// Convert aggregate OpArg into Make of its parts (which are eventually scalars)
+// Convert aggregate OpSelectN into Make of its parts (which are eventually scalars)
+// Returns the converted value.
+//
+//   - "pos" the position for any generated instructions
+//   - "b" the block for any generated instructions
+//   - "container" the outermost OpArg/OpSelectN
+//   - "a" the instruction to overwrite, if any (only the outermost caller)
+//   - "m0" the memory arg for any loads that are necessary
+//   - "at" the type of the Arg/part
+//   - "rc" the register/memory cursor locating the various parts of the Arg.
+func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m0 *Value, at *types.Type, rc registerCursor) *Value {
+
+	if at == types.TypeMem {
+		a.copyOf(m0)
+		return a
+	}
+
+	makeOf := func(a *Value, op Op, args []*Value) *Value {
+		if a == nil {
+			a = b.NewValue0(pos, op, at)
+			a.AddArgs(args...)
+		} else {
+			a.resetArgs()
+			a.Aux, a.AuxInt = nil, 0
+			a.Pos, a.Op, a.Type = pos, op, at
+			a.AddArgs(args...)
+		}
+		return a
+	}
+
+	if at.Size() == 0 {
+		// For consistency, create these values even though they'll ultimately be unused
+		if at.IsArray() {
+			return makeOf(a, OpArrayMake0, nil)
+		}
+		if at.IsStruct() {
+			return makeOf(a, OpStructMake0, nil)
+		}
+		return a
+	}
+
+	sk := selKey{from: container, size: 0, offsetOrIndex: rc.storeOffset, typ: at}
+	dupe := x.commonSelectors[sk]
+	if dupe != nil {
+		if a == nil {
+			return dupe
+		}
+		a.copyOf(dupe)
+		return a
+	}
+
+	var argStore [10]*Value
+	args := argStore[:0]
+
+	addArg := func(a0 *Value) {
+		if a0 == nil {
+			as := "<nil>"
+			if a != nil {
+				as = a.LongString()
+			}
+			panic(fmt.Errorf("a0 should not be nil, a=%v, container=%v, at=%v", as, container.LongString(), at))
+		}
+		args = append(args, a0)
+	}
+
+	switch at.Kind() {
+	case types.TARRAY:
+		et := at.Elem()
+		for i := int64(0); i < at.NumElem(); i++ {
+			e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et))
+			addArg(e)
+		}
+		a = makeOf(a, OpArrayMake1, args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TSTRUCT:
+		// Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here.
+		for i := 0; i < at.NumFields(); i++ {
+			et := at.Field(i).Type
+			e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et))
+			if e == nil {
+				panic(fmt.Errorf("nil e, et=%v, et.Size()=%d, i=%d", et, et.Size(), i))
+			}
+			addArg(e)
+			pos = pos.WithNotStmt()
+		}
+		if at.NumFields() > 4 {
+			panic(fmt.Errorf("Too many fields (%d, %d bytes), container=%s", at.NumFields(), at.Size(), container.LongString()))
+		}
+		a = makeOf(a, StructMakeOp(at.NumFields()), args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TSLICE:
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr)))
+		pos = pos.WithNotStmt()
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+		a = makeOf(a, OpSliceMake, args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TSTRING:
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)))
+		pos = pos.WithNotStmt()
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+		a = makeOf(a, OpStringMake, args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TINTER:
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr)))
+		pos = pos.WithNotStmt()
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)))
+		a = makeOf(a, OpIMake, args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TCOMPLEX64:
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32)))
+		pos = pos.WithNotStmt()
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32)))
+		a = makeOf(a, OpComplexMake, args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TCOMPLEX128:
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64)))
+		pos = pos.WithNotStmt()
+		addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64)))
+		a = makeOf(a, OpComplexMake, args)
+		x.commonSelectors[sk] = a
+		return a
+
+	case types.TINT64:
+		if at.Size() > x.regSize {
+			addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.firstType, rc.next(x.firstType)))
+			pos = pos.WithNotStmt()
+			addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.secondType, rc.next(x.secondType)))
+			if !x.f.Config.BigEndian {
+				// Int64Make args are big, little
+				args[0], args[1] = args[1], args[0]
+			}
+			a = makeOf(a, OpInt64Make, args)
+			x.commonSelectors[sk] = a
+			return a
+		}
+	case types.TUINT64:
+		if at.Size() > x.regSize {
+			addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32)))
+			pos = pos.WithNotStmt()
+			addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32)))
+			if !x.f.Config.BigEndian {
+				// Int64Make args are big, little
+				args[0], args[1] = args[1], args[0]
+			}
+			a = makeOf(a, OpInt64Make, args)
+			x.commonSelectors[sk] = a
+			return a
+		}
+	}
+
+	// An atomic type, either record the register or store it and update the memory.
+
+	// Depending on the container Op, the leaves are either OpSelectN or OpArg{Int,Float}Reg
+
+	if container.Op == OpArg {
+		if rc.hasRegs() {
+			op, i := rc.ArgOpAndRegisterFor()
+			name := container.Aux.(*ir.Name)
+			a = makeOf(a, op, nil)
+			a.AuxInt = i
+			a.Aux = &AuxNameOffset{name, rc.storeOffset}
+		} else {
+			key := selKey{container, rc.storeOffset, at.Size(), at}
+			w := x.commonArgs[key]
+			if w != nil && w.Uses != 0 {
+				if a == nil {
+					a = w
+				} else {
+					a.copyOf(w)
+				}
+			} else {
+				if a == nil {
+					aux := container.Aux
+					auxInt := container.AuxInt + rc.storeOffset
+					a = container.Block.NewValue0IA(container.Pos, OpArg, at, auxInt, aux)
+				} else {
+					// do nothing, the original should be okay.
+				}
+				x.commonArgs[key] = a
+			}
+		}
+	} else if container.Op == OpSelectN {
+		call := container.Args[0]
+		aux := call.Aux.(*AuxCall)
+		which := container.AuxInt
+
+		if at == types.TypeMem {
+			if a != m0 || a != x.memForCall[call.ID] {
+				panic(fmt.Errorf("Memories %s, %s, and %s should all be equal after %s", a.LongString(), m0.LongString(), x.memForCall[call.ID], call.LongString()))
+			}
+		} else if rc.hasRegs() {
+			firstReg := uint32(0)
+			for i := 0; i < int(which); i++ {
+				firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
+			}
+			reg := int64(rc.nextSlice + Abi1RO(firstReg))
+			a = makeOf(a, OpSelectN, []*Value{call})
+			a.AuxInt = reg
+		} else {
+			off := x.offsetFrom(x.f.Entry, x.sp, rc.storeOffset+aux.OffsetOfResult(which), types.NewPtr(at))
+			a = makeOf(a, OpLoad, []*Value{off, m0})
+		}
+
+	} else {
+		panic(fmt.Errorf("Expected container OpArg or OpSelectN, saw %v instead", container.LongString()))
+	}
+
+	x.commonSelectors[sk] = a
+	return a
+}
+
+// rewriteWideSelectToStores handles the case of a SelectN'd result from a function call that is too large for SSA,
+// but is transferred in registers.  In this case the register cursor tracks both operands; the register sources and
+// the memory destinations.
+// This returns the memory flowing out of the last store
+func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, container, m0 *Value, at *types.Type, rc registerCursor) *Value {
+
+	if at.Size() == 0 {
+		return m0
+	}
+
+	switch at.Kind() {
+	case types.TARRAY:
+		et := at.Elem()
+		for i := int64(0); i < at.NumElem(); i++ {
+			m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et))
+		}
+		return m0
+
+	case types.TSTRUCT:
+		// Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here.
+		for i := 0; i < at.NumFields(); i++ {
+			et := at.Field(i).Type
+			m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et))
+			pos = pos.WithNotStmt()
+		}
+		return m0
+
+	case types.TSLICE:
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr))
+		pos = pos.WithNotStmt()
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+		return m0
+
+	case types.TSTRING:
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))
+		pos = pos.WithNotStmt()
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+		return m0
+
+	case types.TINTER:
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr))
+		pos = pos.WithNotStmt()
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))
+		return m0
+
+	case types.TCOMPLEX64:
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32))
+		pos = pos.WithNotStmt()
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32))
+		return m0
+
+	case types.TCOMPLEX128:
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64))
+		pos = pos.WithNotStmt()
+		m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64))
+		return m0
+
+	case types.TINT64:
+		if at.Size() > x.regSize {
+			m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.firstType, rc.next(x.firstType))
+			pos = pos.WithNotStmt()
+			m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.secondType, rc.next(x.secondType))
+			return m0
+		}
+	case types.TUINT64:
+		if at.Size() > x.regSize {
+			m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32))
+			pos = pos.WithNotStmt()
+			m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32))
+			return m0
+		}
+	}
+
+	// TODO could change treatment of too-large OpArg, would deal with it here.
+	if container.Op == OpSelectN {
+		call := container.Args[0]
+		aux := call.Aux.(*AuxCall)
+		which := container.AuxInt
+
+		if rc.hasRegs() {
+			firstReg := uint32(0)
+			for i := 0; i < int(which); i++ {
+				firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
+			}
+			reg := int64(rc.nextSlice + Abi1RO(firstReg))
+			a := b.NewValue1I(pos, OpSelectN, at, reg, call)
+			dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+			m0 = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, m0)
+		} else {
+			panic(fmt.Errorf("Expected rc to have registers"))
+		}
+	} else {
+		panic(fmt.Errorf("Expected container OpSelectN, saw %v instead", container.LongString()))
+	}
+	return m0
+}
 
 func isBlockMultiValueExit(b *Block) bool {
 	return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && b.Controls[0] != nil && b.Controls[0].Op == OpMakeResult
 }
 
-func badVal(s string, v *Value) error {
-	return fmt.Errorf("%s %s", s, v.LongString())
-}
-
-// removeTrivialWrapperTypes unwraps layers of
-// struct { singleField SomeType } and [1]SomeType
-// until a non-wrapper type is reached.  This is useful
-// for working with assignments to/from interface data
-// fields (either second operand to OpIMake or OpIData)
-// where the wrapping or type conversion can be elided
-// because of type conversions/assertions in source code
-// that do not appear in SSA.
-func removeTrivialWrapperTypes(t *types.Type) *types.Type {
-	for {
-		if t.IsStruct() && t.NumFields() == 1 {
-			t = t.Field(0).Type
-			continue
-		}
-		if t.IsArray() && t.NumElem() == 1 {
-			t = t.Elem()
-			continue
-		}
-		break
-	}
-	return t
-}
+type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
 
 // A registerCursor tracks which register is used for an Arg or regValues, or a piece of such.
 type registerCursor struct {
-	// TODO(register args) convert this to a generalized target cursor.
-	storeDest *Value // if there are no register targets, then this is the base of the store.
-	regsLen   int    // the number of registers available for this Arg/result (which is all in registers or not at all)
-	nextSlice Abi1RO // the next register/register-slice offset
-	config    *abi.ABIConfig
-	regValues *[]*Value // values assigned to registers accumulate here
+	storeDest   *Value // if there are no register targets, then this is the base of the store.
+	storeOffset int64
+	regs        []abi.RegIndex // the registers available for this Arg/result (which is all in registers or not at all)
+	nextSlice   Abi1RO         // the next register/register-slice offset
+	config      *abi.ABIConfig
+	regValues   *[]*Value // values assigned to registers accumulate here
 }
 
-func (rc *registerCursor) String() string {
+func (c *registerCursor) String() string {
 	dest := "<none>"
-	if rc.storeDest != nil {
-		dest = rc.storeDest.String()
+	if c.storeDest != nil {
+		dest = fmt.Sprintf("%s+%d", c.storeDest.String(), c.storeOffset)
 	}
 	regs := "<none>"
-	if rc.regValues != nil {
+	if c.regValues != nil {
 		regs = ""
-		for i, x := range *rc.regValues {
+		for i, x := range *c.regValues {
 			if i > 0 {
 				regs = regs + "; "
 			}
 			regs = regs + x.LongString()
 		}
 	}
+
 	// not printing the config because that has not been useful
-	return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, rc.regsLen, rc.nextSlice, regs)
+	return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, len(c.regs), c.nextSlice, regs)
 }
 
 // next effectively post-increments the register cursor; the receiver is advanced,
-// the old value is returned.
+// the (aligned) old value is returned.
 func (c *registerCursor) next(t *types.Type) registerCursor {
+	c.storeOffset = types.RoundUp(c.storeOffset, t.Alignment())
 	rc := *c
-	if int(c.nextSlice) < c.regsLen {
+	c.storeOffset = types.RoundUp(c.storeOffset+t.Size(), t.Alignment())
+	if int(c.nextSlice) < len(c.regs) {
 		w := c.config.NumParamRegs(t)
 		c.nextSlice += Abi1RO(w)
 	}
@@ -101,43 +847,11 @@
 	return rc
 }
 
-const (
-	// Register offsets for fields of built-in aggregate types; the ones not listed are zero.
-	RO_complex_imag = 1
-	RO_string_len   = 1
-	RO_slice_len    = 1
-	RO_slice_cap    = 2
-	RO_iface_data   = 1
-)
-
-func (x *expandState) regWidth(t *types.Type) Abi1RO {
-	return Abi1RO(x.abi1.NumParamRegs(t))
-}
-
-// regOffset returns the register offset of the i'th element of type t
-func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
-	// TODO maybe cache this in a map if profiling recommends.
-	if i == 0 {
-		return 0
-	}
-	if t.IsArray() {
-		return Abi1RO(i) * x.regWidth(t.Elem())
-	}
-	if t.IsStruct() {
-		k := Abi1RO(0)
-		for j := 0; j < i; j++ {
-			k += x.regWidth(t.FieldType(j))
-		}
-		return k
-	}
-	panic("Haven't implemented this case yet, do I need to?")
-}
-
 // at returns the register cursor for component i of t, where the first
 // component is numbered 0.
 func (c *registerCursor) at(t *types.Type, i int) registerCursor {
 	rc := *c
-	if i == 0 || c.regsLen == 0 {
+	if i == 0 || len(c.regs) == 0 {
 		return rc
 	}
 	if t.IsArray() {
@@ -154,13 +868,11 @@
 	panic("Haven't implemented this case yet, do I need to?")
 }
 
-func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value) {
-	c.regsLen = len(regs)
+func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value, storeOffset int64) {
+	c.regs = regs
 	c.nextSlice = 0
-	if len(regs) == 0 {
-		c.storeDest = storeDest // only save this if there are no registers, will explode if misused.
-		return
-	}
+	c.storeOffset = storeOffset
+	c.storeDest = storeDest
 	c.config = info.Config()
 	c.regValues = result
 }
@@ -170,29 +882,48 @@
 }
 
 func (c *registerCursor) hasRegs() bool {
-	return c.regsLen > 0
+	return len(c.regs) > 0
+}
+
+func (c *registerCursor) ArgOpAndRegisterFor() (Op, int64) {
+	r := c.regs[c.nextSlice]
+	return ArgOpAndRegisterFor(r, c.config)
+}
+
+// ArgOpAndRegisterFor converts an abi register index into an ssa Op and corresponding
+// arg register index.
+func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) {
+	i := abiConfig.FloatIndexFor(r)
+	if i >= 0 { // float PR
+		return OpArgFloatReg, i
+	}
+	return OpArgIntReg, int64(r)
+}
+
+type selKey struct {
+	from          *Value // what is selected from
+	offsetOrIndex int64  // whatever is appropriate for the selector
+	size          int64
+	typ           *types.Type
 }
 
 type expandState struct {
-	f                  *Func
-	abi1               *abi.ABIConfig
-	debug              int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
-	canSSAType         func(*types.Type) bool
-	regSize            int64
-	sp                 *Value
-	typs               *Types
-	ptrSize            int64
-	hiOffset           int64
-	lowOffset          int64
-	hiRo               Abi1RO
-	loRo               Abi1RO
-	namedSelects       map[*Value][]namedVal
-	sdom               SparseTree
-	commonSelectors    map[selKey]*Value // used to de-dupe selectors
-	commonArgs         map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg
-	memForCall         map[ID]*Value     // For a call, need to know the unique selector that gets the mem.
-	transformedSelects map[ID]bool       // OpSelectN after rewriting, either created or renumbered.
-	indentLevel        int               // Indentation for debugging recursion
+	f       *Func
+	debug   int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
+	regSize int64
+	sp      *Value
+	typs    *Types
+
+	firstOp    Op          // for 64-bit integers on 32-bit machines, first word in memory
+	secondOp   Op          // for 64-bit integers on 32-bit machines, second word in memory
+	firstType  *types.Type // first half type, for Int64
+	secondType *types.Type // second half type, for Int64
+
+	wideSelects     map[*Value]*Value // Selects that are not SSA-able, mapped to consuming stores.
+	commonSelectors map[selKey]*Value // used to de-dupe selectors
+	commonArgs      map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg
+	memForCall      map[ID]*Value     // For a call, need to know the unique selector that gets the mem.
+	indentLevel     int               // Indentation for debugging recursion
 }
 
 // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
@@ -206,20 +937,7 @@
 	return
 }
 
-// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
-// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
-// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
-// integer on 32-bit).
-func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
-	if !x.canSSAType(t) {
-		return false
-	}
-	return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
-		(t.Size() > x.regSize && (t.IsInteger() || (x.f.Config.SoftFloat && t.IsFloat())))
-}
-
 // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
-// TODO should also optimize offsets from SB?
 func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value {
 	ft := from.Type
 	if offset == 0 {
@@ -242,19 +960,33 @@
 	return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
 }
 
-// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
-func (x *expandState) splitSlots(ls []*LocalSlot, sfx string, offset int64, ty *types.Type) []*LocalSlot {
-	var locs []*LocalSlot
-	for i := range ls {
-		locs = append(locs, x.f.SplitSlot(ls[i], sfx, offset, ty))
+func (x *expandState) regWidth(t *types.Type) Abi1RO {
+	return Abi1RO(x.f.ABI1.NumParamRegs(t))
+}
+
+// regOffset returns the register offset of the i'th element of type t
+func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
+	// TODO maybe cache this in a map if profiling recommends.
+	if i == 0 {
+		return 0
 	}
-	return locs
+	if t.IsArray() {
+		return Abi1RO(i) * x.regWidth(t.Elem())
+	}
+	if t.IsStruct() {
+		k := Abi1RO(0)
+		for j := 0; j < i; j++ {
+			k += x.regWidth(t.FieldType(j))
+		}
+		return k
+	}
+	panic("Haven't implemented this case yet, do I need to?")
 }
 
 // prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
 func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment {
 	if v.Op != OpArg {
-		panic(badVal("Wanted OpArg, instead saw", v))
+		panic(fmt.Errorf("Wanted OpArg, instead saw %s", v.LongString()))
 	}
 	return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name))
 }
@@ -284,880 +1016,6 @@
 	return fmt.Printf(format, a...)
 }
 
-// Calls that need lowering have some number of inputs, including a memory input,
-// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
-
-// With the current ABI those inputs need to be converted into stores to memory,
-// rethreading the call's memory input to the first, and the new call now receiving the last.
-
-// With the current ABI, the outputs need to be converted to loads, which will all use the call's
-// memory output as their input.
-
-// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
-// through a chain of Struct/Array/builtin Select operations.  If the chain of selectors does not
-// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
-// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
-// accumulates the offset.
-// It emits the code necessary to implement the leaf select operation that leads to the root.
-//
-// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
-func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64, regOffset Abi1RO) []*LocalSlot {
-	if x.debug > 1 {
-		x.indent(3)
-		defer x.indent(-3)
-		x.Printf("rewriteSelect(%s; %s; memOff=%d; regOff=%d)\n", leaf.LongString(), selector.LongString(), offset, regOffset)
-	}
-	var locs []*LocalSlot
-	leafType := leaf.Type
-	if len(selector.Args) > 0 {
-		w := selector.Args[0]
-		if w.Op == OpCopy {
-			for w.Op == OpCopy {
-				w = w.Args[0]
-			}
-			selector.SetArg(0, w)
-		}
-	}
-	switch selector.Op {
-	case OpArgIntReg, OpArgFloatReg:
-		if leafType == selector.Type { // OpIData leads us here, sometimes.
-			leaf.copyOf(selector)
-		} else {
-			x.f.Fatalf("Unexpected %s type, selector=%s, leaf=%s\n", selector.Op.String(), selector.LongString(), leaf.LongString())
-		}
-		if x.debug > 1 {
-			x.Printf("---%s, break\n", selector.Op.String())
-		}
-	case OpArg:
-		if !x.isAlreadyExpandedAggregateType(selector.Type) {
-			if leafType == selector.Type { // OpIData leads us here, sometimes.
-				x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos)
-			} else {
-				x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
-			}
-			if x.debug > 1 {
-				x.Printf("---OpArg, break\n")
-			}
-			break
-		}
-		switch leaf.Op {
-		case OpIData, OpStructSelect, OpArraySelect:
-			leafType = removeTrivialWrapperTypes(leaf.Type)
-		}
-		x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos)
-
-		for _, s := range x.namedSelects[selector] {
-			locs = append(locs, x.f.Names[s.locIndex])
-		}
-
-	case OpLoad: // We end up here because of IData of immediate structures.
-		// Failure case:
-		// (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
-		// the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
-		//
-		// GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
-		// cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
-		// b2: ← b1
-		// v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
-		// v21 (142) = SelectN <mem> [1] v20
-		// v22 (142) = SelectN <interface {}> [0] v20
-		// b15: ← b8
-		// v71 (+143) = IData <Nodes> v22 (v[Nodes])
-		// v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
-		//
-		// translates (w/o the "case OpLoad:" above) to:
-		//
-		// b2: ← b1
-		// v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
-		// v23 (142) = Load <*uintptr> v19 v20
-		// v823 (142) = IsNonNil <bool> v23
-		// v67 (+143) = Load <*[]*Node> v880 v20
-		// b15: ← b8
-		// v827 (146) = StructSelect <*[]*Node> [0] v67
-		// v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
-		// v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
-		// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
-		// The OpLoad was created to load the single field of the IData
-		// This case removes that StructSelect.
-		if leafType != selector.Type {
-			if x.f.Config.SoftFloat && selector.Type.IsFloat() {
-				if x.debug > 1 {
-					x.Printf("---OpLoad, break\n")
-				}
-				break // softfloat pass will take care of that
-			}
-			x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
-		}
-		leaf.copyOf(selector)
-		for _, s := range x.namedSelects[selector] {
-			locs = append(locs, x.f.Names[s.locIndex])
-		}
-
-	case OpSelectN:
-		// TODO(register args) result case
-		// if applied to Op-mumble-call, the Aux tells us which result, regOffset specifies offset within result.  If a register, should rewrite to OpSelectN for new call.
-		// TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
-		call := selector.Args[0]
-		call0 := call
-		aux := call.Aux.(*AuxCall)
-		which := selector.AuxInt
-		if x.transformedSelects[selector.ID] {
-			// This is a minor hack.  Either this select has had its operand adjusted (mem) or
-			// it is some other intermediate node that was rewritten to reference a register (not a generic arg).
-			// This can occur with chains of selection/indexing from single field/element aggregates.
-			leaf.copyOf(selector)
-			break
-		}
-		if which == aux.NResults() { // mem is after the results.
-			// rewrite v as a Copy of call -- the replacement call will produce a mem.
-			if leaf != selector {
-				panic(fmt.Errorf("Unexpected selector of memory, selector=%s, call=%s, leaf=%s", selector.LongString(), call.LongString(), leaf.LongString()))
-			}
-			if aux.abiInfo == nil {
-				panic(badVal("aux.abiInfo nil for call", call))
-			}
-			if existing := x.memForCall[call.ID]; existing == nil {
-				selector.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
-				x.memForCall[call.ID] = selector
-				x.transformedSelects[selector.ID] = true // operand adjusted
-			} else {
-				selector.copyOf(existing)
-			}
-
-		} else {
-			leafType := removeTrivialWrapperTypes(leaf.Type)
-			if x.canSSAType(leafType) {
-				pt := types.NewPtr(leafType)
-				// Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
-				// Create a "mem" for any loads that need to occur.
-				if mem := x.memForCall[call.ID]; mem != nil {
-					if mem.Block != call.Block {
-						panic(fmt.Errorf("selector and call need to be in same block, selector=%s; call=%s", selector.LongString(), call.LongString()))
-					}
-					call = mem
-				} else {
-					mem = call.Block.NewValue1I(call.Pos.WithNotStmt(), OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
-					x.transformedSelects[mem.ID] = true // select uses post-expansion indexing
-					x.memForCall[call.ID] = mem
-					call = mem
-				}
-				outParam := aux.abiInfo.OutParam(int(which))
-				if len(outParam.Registers) > 0 {
-					firstReg := uint32(0)
-					for i := 0; i < int(which); i++ {
-						firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
-					}
-					reg := int64(regOffset + Abi1RO(firstReg))
-					if leaf.Block == call.Block {
-						leaf.reset(OpSelectN)
-						leaf.SetArgs1(call0)
-						leaf.Type = leafType
-						leaf.AuxInt = reg
-						x.transformedSelects[leaf.ID] = true // leaf, rewritten to use post-expansion indexing.
-					} else {
-						w := call.Block.NewValue1I(leaf.Pos, OpSelectN, leafType, reg, call0)
-						x.transformedSelects[w.ID] = true // select, using post-expansion indexing.
-						leaf.copyOf(w)
-					}
-				} else {
-					off := x.offsetFrom(x.f.Entry, x.sp, offset+aux.OffsetOfResult(which), pt)
-					if leaf.Block == call.Block {
-						leaf.reset(OpLoad)
-						leaf.SetArgs2(off, call)
-						leaf.Type = leafType
-					} else {
-						w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
-						leaf.copyOf(w)
-						if x.debug > 1 {
-							x.Printf("---new %s\n", w.LongString())
-						}
-					}
-				}
-				for _, s := range x.namedSelects[selector] {
-					locs = append(locs, x.f.Names[s.locIndex])
-				}
-			} else {
-				x.f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
-			}
-		}
-
-	case OpStructSelect:
-		w := selector.Args[0]
-		var ls []*LocalSlot
-		if w.Type.Kind() != types.TSTRUCT { // IData artifact
-			ls = x.rewriteSelect(leaf, w, offset, regOffset)
-		} else {
-			fldi := int(selector.AuxInt)
-			ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(fldi), regOffset+x.regOffset(w.Type, fldi))
-			if w.Op != OpIData {
-				for _, l := range ls {
-					locs = append(locs, x.f.SplitStruct(l, int(selector.AuxInt)))
-				}
-			}
-		}
-
-	case OpArraySelect:
-		w := selector.Args[0]
-		index := selector.AuxInt
-		x.rewriteSelect(leaf, w, offset+selector.Type.Size()*index, regOffset+x.regOffset(w.Type, int(index)))
-
-	case OpInt64Hi:
-		w := selector.Args[0]
-		ls := x.rewriteSelect(leaf, w, offset+x.hiOffset, regOffset+x.hiRo)
-		locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType)
-
-	case OpInt64Lo:
-		w := selector.Args[0]
-		ls := x.rewriteSelect(leaf, w, offset+x.lowOffset, regOffset+x.loRo)
-		locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType)
-
-	case OpStringPtr:
-		ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
-		locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr)
-
-	case OpSlicePtr, OpSlicePtrUnchecked:
-		w := selector.Args[0]
-		ls := x.rewriteSelect(leaf, w, offset, regOffset)
-		locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
-
-	case OpITab:
-		w := selector.Args[0]
-		ls := x.rewriteSelect(leaf, w, offset, regOffset)
-		sfx := ".itab"
-		if w.Type.IsEmptyInterface() {
-			sfx = ".type"
-		}
-		locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr)
-
-	case OpComplexReal:
-		ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
-		locs = x.splitSlots(ls, ".real", 0, selector.Type)
-
-	case OpComplexImag:
-		ls := x.rewriteSelect(leaf, selector.Args[0], offset+selector.Type.Size(), regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
-		locs = x.splitSlots(ls, ".imag", selector.Type.Size(), selector.Type)
-
-	case OpStringLen, OpSliceLen:
-		ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_slice_len)
-		locs = x.splitSlots(ls, ".len", x.ptrSize, leafType)
-
-	case OpIData:
-		ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_iface_data)
-		locs = x.splitSlots(ls, ".data", x.ptrSize, leafType)
-
-	case OpSliceCap:
-		ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize, regOffset+RO_slice_cap)
-		locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType)
-
-	case OpCopy: // If it's an intermediate result, recurse
-		locs = x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
-		for _, s := range x.namedSelects[selector] {
-			// this copy may have had its own name, preserve that, too.
-			locs = append(locs, x.f.Names[s.locIndex])
-		}
-
-	default:
-		// Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
-	}
-
-	return locs
-}
-
-func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
-	source := a.Args[0]
-	dst := x.offsetFrom(b, base, offset, source.Type)
-	if a.Uses == 1 && a.Block == b {
-		a.reset(OpMove)
-		a.Pos = pos
-		a.Type = types.TypeMem
-		a.Aux = typ
-		a.AuxInt = size
-		a.SetArgs3(dst, source, mem)
-		mem = a
-	} else {
-		mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
-		mem.AuxInt = size
-	}
-	return mem
-}
-
-var indexNames [1]string = [1]string{"[0]"}
-
-// pathTo returns the selection path to the leaf type at offset within container.
-// e.g. len(thing.field[0]) => ".field[0].len"
-// this is for purposes of generating names ultimately fed to a debugger.
-func (x *expandState) pathTo(container, leaf *types.Type, offset int64) string {
-	if container == leaf || offset == 0 && container.Size() == leaf.Size() {
-		return ""
-	}
-	path := ""
-outer:
-	for {
-		switch container.Kind() {
-		case types.TARRAY:
-			container = container.Elem()
-			if container.Size() == 0 {
-				return path
-			}
-			i := offset / container.Size()
-			offset = offset % container.Size()
-			// If a future compiler/ABI supports larger SSA/Arg-able arrays, expand indexNames.
-			path = path + indexNames[i]
-			continue
-		case types.TSTRUCT:
-			for i := 0; i < container.NumFields(); i++ {
-				fld := container.Field(i)
-				if fld.Offset+fld.Type.Size() > offset {
-					offset -= fld.Offset
-					path += "." + fld.Sym.Name
-					container = fld.Type
-					continue outer
-				}
-			}
-			return path
-		case types.TINT64, types.TUINT64:
-			if container.Size() == x.regSize {
-				return path
-			}
-			if offset == x.hiOffset {
-				return path + ".hi"
-			}
-			return path + ".lo"
-		case types.TINTER:
-			if offset != 0 {
-				return path + ".data"
-			}
-			if container.IsEmptyInterface() {
-				return path + ".type"
-			}
-			return path + ".itab"
-
-		case types.TSLICE:
-			if offset == 2*x.regSize {
-				return path + ".cap"
-			}
-			fallthrough
-		case types.TSTRING:
-			if offset == 0 {
-				return path + ".ptr"
-			}
-			return path + ".len"
-		case types.TCOMPLEX64, types.TCOMPLEX128:
-			if offset == 0 {
-				return path + ".real"
-			}
-			return path + ".imag"
-		}
-		return path
-	}
-}
-
-// decomposeArg is a helper for storeArgOrLoad.
-// It decomposes a Load or an Arg into smaller parts and returns the new mem.
-// If the type does not match one of the expected aggregate types, it returns nil instead.
-// Parameters:
-//
-//	pos           -- the location of any generated code.
-//	b             -- the block into which any generated code should normally be placed
-//	source        -- the value, possibly an aggregate, to be stored.
-//	mem           -- the mem flowing into this decomposition (loads depend on it, stores updated it)
-//	t             -- the type of the value to be stored
-//	storeOffset   -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
-//	loadRegOffset -- regarding source as a value in registers, the register offset in ABI1.  Meaningful only if source is OpArg.
-//	storeRc       -- storeRC; if the value is stored in registers, this specifies the registers.
-//	                 StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
-func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-
-	pa := x.prAssignForArg(source)
-	var locs []*LocalSlot
-	for _, s := range x.namedSelects[source] {
-		locs = append(locs, x.f.Names[s.locIndex])
-	}
-
-	if len(pa.Registers) > 0 {
-		// Handle the in-registers case directly
-		rts, offs := pa.RegisterTypesAndOffsets()
-		last := loadRegOffset + x.regWidth(t)
-		if offs[loadRegOffset] != 0 {
-			// Document the problem before panicking.
-			for i := 0; i < len(rts); i++ {
-				rt := rts[i]
-				off := offs[i]
-				fmt.Printf("rt=%s, off=%d, rt.Width=%d, rt.Align=%d\n", rt.String(), off, rt.Size(), uint8(rt.Alignment()))
-			}
-			panic(fmt.Errorf("offset %d of requested register %d should be zero, source=%s", offs[loadRegOffset], loadRegOffset, source.LongString()))
-		}
-
-		if x.debug > 1 {
-			x.Printf("decompose arg %s has %d locs\n", source.LongString(), len(locs))
-		}
-
-		for i := loadRegOffset; i < last; i++ {
-			rt := rts[i]
-			off := offs[i]
-			w := x.commonArgs[selKey{source, off, rt.Size(), rt}]
-			if w == nil {
-				w = x.newArgToMemOrRegs(source, w, off, i, rt, pos)
-				suffix := x.pathTo(source.Type, rt, off)
-				if suffix != "" {
-					x.splitSlotsIntoNames(locs, suffix, off, rt, w)
-				}
-			}
-			if t.IsPtrShaped() {
-				// Preserve the original store type. This ensures pointer type
-				// properties aren't discarded (e.g, notinheap).
-				if rt.Size() != t.Size() || len(pa.Registers) != 1 || i != loadRegOffset {
-					b.Func.Fatalf("incompatible store type %v and %v, i=%d", t, rt, i)
-				}
-				rt = t
-			}
-			mem = x.storeArgOrLoad(pos, b, w, mem, rt, storeOffset+off, i, storeRc.next(rt))
-		}
-		return mem
-	}
-
-	u := source.Type
-	switch u.Kind() {
-	case types.TARRAY:
-		elem := u.Elem()
-		elemRO := x.regWidth(elem)
-		for i := int64(0); i < u.NumElem(); i++ {
-			elemOff := i * elem.Size()
-			mem = storeOneArg(x, pos, b, locs, indexNames[i], source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem))
-			loadRegOffset += elemRO
-			pos = pos.WithNotStmt()
-		}
-		return mem
-	case types.TSTRUCT:
-		for i := 0; i < u.NumFields(); i++ {
-			fld := u.Field(i)
-			mem = storeOneArg(x, pos, b, locs, "."+fld.Sym.Name, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
-			loadRegOffset += x.regWidth(fld.Type)
-			pos = pos.WithNotStmt()
-		}
-		return mem
-	case types.TINT64, types.TUINT64:
-		if t.Size() == x.regSize {
-			break
-		}
-		tHi, tLo := x.intPairTypes(t.Kind())
-		mem = storeOneArg(x, pos, b, locs, ".hi", source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
-		pos = pos.WithNotStmt()
-		return storeOneArg(x, pos, b, locs, ".lo", source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo))
-	case types.TINTER:
-		sfx := ".itab"
-		if u.IsEmptyInterface() {
-			sfx = ".type"
-		}
-		return storeTwoArg(x, pos, b, locs, sfx, ".idata", source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TSTRING:
-		return storeTwoArg(x, pos, b, locs, ".ptr", ".len", source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TCOMPLEX64:
-		return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TCOMPLEX128:
-		return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TSLICE:
-		mem = storeOneArg(x, pos, b, locs, ".ptr", source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
-		return storeTwoArg(x, pos, b, locs, ".len", ".cap", source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc)
-	}
-	return nil
-}
-
-func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off int64, rt *types.Type, w *Value) {
-	wlocs := x.splitSlots(locs, suffix, off, rt)
-	for _, l := range wlocs {
-		old, ok := x.f.NamedValues[*l]
-		x.f.NamedValues[*l] = append(old, w)
-		if !ok {
-			x.f.Names = append(x.f.Names, l)
-		}
-	}
-}
-
-// decomposeLoad is a helper for storeArgOrLoad.
-// It decomposes a Load  into smaller parts and returns the new mem.
-// If the type does not match one of the expected aggregate types, it returns nil instead.
-// Parameters:
-//
-//	pos           -- the location of any generated code.
-//	b             -- the block into which any generated code should normally be placed
-//	source        -- the value, possibly an aggregate, to be stored.
-//	mem           -- the mem flowing into this decomposition (loads depend on it, stores updated it)
-//	t             -- the type of the value to be stored
-//	storeOffset   -- if the value is stored in memory, it is stored at base (see storeRc) + offset
-//	loadRegOffset -- regarding source as a value in registers, the register offset in ABI1.  Meaningful only if source is OpArg.
-//	storeRc       -- storeRC; if the value is stored in registers, this specifies the registers.
-//	                 StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
-//
-// TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates.
-func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-	u := source.Type
-	switch u.Kind() {
-	case types.TARRAY:
-		elem := u.Elem()
-		elemRO := x.regWidth(elem)
-		for i := int64(0); i < u.NumElem(); i++ {
-			elemOff := i * elem.Size()
-			mem = storeOneLoad(x, pos, b, source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem))
-			loadRegOffset += elemRO
-			pos = pos.WithNotStmt()
-		}
-		return mem
-	case types.TSTRUCT:
-		for i := 0; i < u.NumFields(); i++ {
-			fld := u.Field(i)
-			mem = storeOneLoad(x, pos, b, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
-			loadRegOffset += x.regWidth(fld.Type)
-			pos = pos.WithNotStmt()
-		}
-		return mem
-	case types.TINT64, types.TUINT64:
-		if t.Size() == x.regSize {
-			break
-		}
-		tHi, tLo := x.intPairTypes(t.Kind())
-		mem = storeOneLoad(x, pos, b, source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
-		pos = pos.WithNotStmt()
-		return storeOneLoad(x, pos, b, source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo))
-	case types.TINTER:
-		return storeTwoLoad(x, pos, b, source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TSTRING:
-		return storeTwoLoad(x, pos, b, source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TCOMPLEX64:
-		return storeTwoLoad(x, pos, b, source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TCOMPLEX128:
-		return storeTwoLoad(x, pos, b, source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc)
-	case types.TSLICE:
-		mem = storeOneLoad(x, pos, b, source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
-		return storeTwoLoad(x, pos, b, source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc)
-	}
-	return nil
-}
-
-// storeOneArg creates a decomposed (one step) arg that is then stored.
-// pos and b locate the store instruction, source is the "base" of the value input,
-// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
-func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix string, source, mem *Value, t *types.Type, argOffset, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-	if x.debug > 1 {
-		x.indent(3)
-		defer x.indent(-3)
-		x.Printf("storeOneArg(%s;  %s;  %s; aO=%d; sO=%d; lrO=%d; %s)\n", source.LongString(), mem.String(), t.String(), argOffset, storeOffset, loadRegOffset, storeRc.String())
-	}
-
-	w := x.commonArgs[selKey{source, argOffset, t.Size(), t}]
-	if w == nil {
-		w = x.newArgToMemOrRegs(source, w, argOffset, loadRegOffset, t, pos)
-		x.splitSlotsIntoNames(locs, suffix, argOffset, t, w)
-	}
-	return x.storeArgOrLoad(pos, b, w, mem, t, storeOffset, loadRegOffset, storeRc)
-}
-
-// storeOneLoad creates a decomposed (one step) load that is then stored.
-func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-	from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
-	w := b.NewValue2(source.Pos, OpLoad, t, from, mem)
-	return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
-}
-
-func storeTwoArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix1 string, suffix2 string, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-	mem = storeOneArg(x, pos, b, locs, suffix1, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1))
-	pos = pos.WithNotStmt()
-	t1Size := t1.Size()
-	return storeOneArg(x, pos, b, locs, suffix2, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc)
-}
-
-// storeTwoLoad creates a pair of decomposed (one step) loads that are then stored.
-// the elements of the pair must not require any additional alignment.
-func storeTwoLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-	mem = storeOneLoad(x, pos, b, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1))
-	pos = pos.WithNotStmt()
-	t1Size := t1.Size()
-	return storeOneLoad(x, pos, b, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc)
-}
-
-// storeArgOrLoad converts stores of SSA-able potentially aggregatable arguments (passed to a call) into a series of primitive-typed
-// stores of non-aggregate types.  It recursively walks up a chain of selectors until it reaches a Load or an Arg.
-// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
-func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
-	if x.debug > 1 {
-		x.indent(3)
-		defer x.indent(-3)
-		x.Printf("storeArgOrLoad(%s;  %s;  %s; %d; %s)\n", source.LongString(), mem.String(), t.String(), storeOffset, storeRc.String())
-	}
-
-	// Start with Opcodes that can be disassembled
-	switch source.Op {
-	case OpCopy:
-		return x.storeArgOrLoad(pos, b, source.Args[0], mem, t, storeOffset, loadRegOffset, storeRc)
-
-	case OpLoad, OpDereference:
-		ret := x.decomposeLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
-		if ret != nil {
-			return ret
-		}
-
-	case OpArg:
-		ret := x.decomposeArg(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
-		if ret != nil {
-			return ret
-		}
-
-	case OpArrayMake0, OpStructMake0:
-		// TODO(register args) is this correct for registers?
-		return mem
-
-	case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
-		for i := 0; i < t.NumFields(); i++ {
-			fld := t.Field(i)
-			mem = x.storeArgOrLoad(pos, b, source.Args[i], mem, fld.Type, storeOffset+fld.Offset, 0, storeRc.next(fld.Type))
-			pos = pos.WithNotStmt()
-		}
-		return mem
-
-	case OpArrayMake1:
-		return x.storeArgOrLoad(pos, b, source.Args[0], mem, t.Elem(), storeOffset, 0, storeRc.at(t, 0))
-
-	case OpInt64Make:
-		tHi, tLo := x.intPairTypes(t.Kind())
-		mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tHi, storeOffset+x.hiOffset, 0, storeRc.next(tHi))
-		pos = pos.WithNotStmt()
-		return x.storeArgOrLoad(pos, b, source.Args[1], mem, tLo, storeOffset+x.lowOffset, 0, storeRc)
-
-	case OpComplexMake:
-		tPart := x.typs.Float32
-		wPart := t.Size() / 2
-		if wPart == 8 {
-			tPart = x.typs.Float64
-		}
-		mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tPart, storeOffset, 0, storeRc.next(tPart))
-		pos = pos.WithNotStmt()
-		return x.storeArgOrLoad(pos, b, source.Args[1], mem, tPart, storeOffset+wPart, 0, storeRc)
-
-	case OpIMake:
-		mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.Uintptr, storeOffset, 0, storeRc.next(x.typs.Uintptr))
-		pos = pos.WithNotStmt()
-		return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.BytePtr, storeOffset+x.ptrSize, 0, storeRc)
-
-	case OpStringMake:
-		mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr))
-		pos = pos.WithNotStmt()
-		return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc)
-
-	case OpSliceMake:
-		mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr))
-		pos = pos.WithNotStmt()
-		mem = x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc.next(x.typs.Int))
-		return x.storeArgOrLoad(pos, b, source.Args[2], mem, x.typs.Int, storeOffset+2*x.ptrSize, 0, storeRc)
-	}
-
-	// For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
-	switch t.Kind() {
-	case types.TARRAY:
-		elt := t.Elem()
-		if source.Type != t && t.NumElem() == 1 && elt.Size() == t.Size() && t.Size() == x.regSize {
-			t = removeTrivialWrapperTypes(t)
-			// it could be a leaf type, but the "leaf" could be complex64 (for example)
-			return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
-		}
-		eltRO := x.regWidth(elt)
-		source.Type = t
-		for i := int64(0); i < t.NumElem(); i++ {
-			sel := b.NewValue1I(pos, OpArraySelect, elt, i, source)
-			mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
-			loadRegOffset += eltRO
-			pos = pos.WithNotStmt()
-		}
-		return mem
-
-	case types.TSTRUCT:
-		if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Size() == t.Size() && t.Size() == x.regSize {
-			// This peculiar test deals with accesses to immediate interface data.
-			// It works okay because everything is the same size.
-			// Example code that triggers this can be found in go/constant/value.go, function ToComplex
-			// v119 (+881) = IData <intVal> v6
-			// v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
-			// This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
-			// Guard against "struct{struct{*foo}}"
-			// Other rewriting phases create minor glitches when they transform IData, for instance the
-			// interface-typed Arg "x" of ToFloat in go/constant/value.go
-			//   v6 (858) = Arg <Value> {x} (x[Value], x[Value])
-			// is rewritten by decomposeArgs into
-			//   v141 (858) = Arg <uintptr> {x}
-			//   v139 (858) = Arg <*uint8> {x} [8]
-			// because of a type case clause on line 862 of go/constant/value.go
-			//  	case intVal:
-			//		   return itof(x)
-			// v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
-			// of a *uint8, which does not succeed.
-			t = removeTrivialWrapperTypes(t)
-			// it could be a leaf type, but the "leaf" could be complex64 (for example)
-			return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
-		}
-
-		source.Type = t
-		for i := 0; i < t.NumFields(); i++ {
-			fld := t.Field(i)
-			sel := b.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
-			mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
-			loadRegOffset += x.regWidth(fld.Type)
-			pos = pos.WithNotStmt()
-		}
-		return mem
-
-	case types.TINT64, types.TUINT64:
-		if t.Size() == x.regSize {
-			break
-		}
-		tHi, tLo := x.intPairTypes(t.Kind())
-		sel := b.NewValue1(pos, OpInt64Hi, tHi, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
-		pos = pos.WithNotStmt()
-		sel = b.NewValue1(pos, OpInt64Lo, tLo, source)
-		return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
-
-	case types.TINTER:
-		sel := b.NewValue1(pos, OpITab, x.typs.BytePtr, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
-		pos = pos.WithNotStmt()
-		sel = b.NewValue1(pos, OpIData, x.typs.BytePtr, source)
-		return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
-
-	case types.TSTRING:
-		sel := b.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
-		pos = pos.WithNotStmt()
-		sel = b.NewValue1(pos, OpStringLen, x.typs.Int, source)
-		return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
-
-	case types.TSLICE:
-		et := types.NewPtr(t.Elem())
-		sel := b.NewValue1(pos, OpSlicePtr, et, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
-		pos = pos.WithNotStmt()
-		sel = b.NewValue1(pos, OpSliceLen, x.typs.Int, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
-		sel = b.NewValue1(pos, OpSliceCap, x.typs.Int, source)
-		return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
-
-	case types.TCOMPLEX64:
-		sel := b.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
-		pos = pos.WithNotStmt()
-		sel = b.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
-		return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
-
-	case types.TCOMPLEX128:
-		sel := b.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
-		mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
-		pos = pos.WithNotStmt()
-		sel = b.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
-		return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
-	}
-
-	s := mem
-	if source.Op == OpDereference {
-		source.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load.
-	}
-	if storeRc.hasRegs() {
-		storeRc.addArg(source)
-	} else {
-		dst := x.offsetFrom(b, storeRc.storeDest, storeOffset, types.NewPtr(t))
-		s = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
-	}
-	if x.debug > 1 {
-		x.Printf("-->storeArg returns %s, storeRc=%s\n", s.LongString(), storeRc.String())
-	}
-	return s
-}
-
-// rewriteArgs replaces all the call-parameter Args to a call with their register translation (if any).
-// Preceding parameters (code pointers, closure pointer) are preserved, and the memory input is modified
-// to account for any parameter stores required.
-// Any of the old Args that have their use count fall to zero are marked OpInvalid.
-func (x *expandState) rewriteArgs(v *Value, firstArg int) {
-	if x.debug > 1 {
-		x.indent(3)
-		defer x.indent(-3)
-		x.Printf("rewriteArgs(%s; %d)\n", v.LongString(), firstArg)
-	}
-	// Thread the stores on the memory arg
-	aux := v.Aux.(*AuxCall)
-	m0 := v.MemoryArg()
-	mem := m0
-	newArgs := []*Value{}
-	oldArgs := []*Value{}
-	sp := x.sp
-	if v.Op == OpTailLECall {
-		// For tail call, we unwind the frame before the call so we'll use the caller's
-		// SP.
-		sp = x.f.Entry.NewValue1(src.NoXPos, OpGetCallerSP, x.typs.Uintptr, mem)
-	}
-	for i, a := range v.Args[firstArg : len(v.Args)-1] { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
-		oldArgs = append(oldArgs, a)
-		auxI := int64(i)
-		aRegs := aux.RegsOfArg(auxI)
-		aType := aux.TypeOfArg(auxI)
-		if len(aRegs) == 0 && a.Op == OpDereference {
-			aOffset := aux.OffsetOfArg(auxI)
-			if a.MemoryArg() != m0 {
-				x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
-			}
-			if v.Op == OpTailLECall {
-				// It's common for a tail call passing the same arguments (e.g. method wrapper),
-				// so this would be a self copy. Detect this and optimize it out.
-				a0 := a.Args[0]
-				if a0.Op == OpLocalAddr {
-					n := a0.Aux.(*ir.Name)
-					if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
-						continue
-					}
-				}
-			}
-			if x.debug > 1 {
-				x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
-			}
-			// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
-			// TODO(register args) this will be more complicated with registers in the picture.
-			mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, v.Pos)
-		} else {
-			var rc registerCursor
-			var result *[]*Value
-			var aOffset int64
-			if len(aRegs) > 0 {
-				result = &newArgs
-			} else {
-				aOffset = aux.OffsetOfArg(auxI)
-			}
-			if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
-				// It's common for a tail call passing the same arguments (e.g. method wrapper),
-				// so this would be a self copy. Detect this and optimize it out.
-				n := a.Aux.(*ir.Name)
-				if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
-					continue
-				}
-			}
-			if x.debug > 1 {
-				x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
-			}
-			rc.init(aRegs, aux.abiInfo, result, sp)
-			mem = x.storeArgOrLoad(v.Pos, v.Block, a, mem, aType, aOffset, 0, rc)
-		}
-	}
-	var preArgStore [2]*Value
-	preArgs := append(preArgStore[:0], v.Args[0:firstArg]...)
-	v.resetArgs()
-	v.AddArgs(preArgs...)
-	v.AddArgs(newArgs...)
-	v.AddArg(mem)
-	for _, a := range oldArgs {
-		if a.Uses == 0 {
-			x.invalidateRecursively(a)
-		}
-	}
-
-	return
-}
-
 func (x *expandState) invalidateRecursively(a *Value) {
 	var s string
 	if x.debug > 0 {
@@ -1175,626 +1033,3 @@
 		x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s)
 	}
 }
-
-// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
-// that is more oriented to a platform's ABI.  The SelectN operations that extract results are rewritten into
-// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
-// reached.  On the callee side, OpArg nodes are not decomposed until this phase is run.
-// TODO results should not be lowered until this phase.
-func expandCalls(f *Func) {
-	// Calls that need lowering have some number of inputs, including a memory input,
-	// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
-
-	// With the current ABI those inputs need to be converted into stores to memory,
-	// rethreading the call's memory input to the first, and the new call now receiving the last.
-
-	// With the current ABI, the outputs need to be converted to loads, which will all use the call's
-	// memory output as their input.
-	sp, _ := f.spSb()
-	x := &expandState{
-		f:                  f,
-		abi1:               f.ABI1,
-		debug:              f.pass.debug,
-		canSSAType:         f.fe.CanSSA,
-		regSize:            f.Config.RegSize,
-		sp:                 sp,
-		typs:               &f.Config.Types,
-		ptrSize:            f.Config.PtrSize,
-		namedSelects:       make(map[*Value][]namedVal),
-		sdom:               f.Sdom(),
-		commonArgs:         make(map[selKey]*Value),
-		memForCall:         make(map[ID]*Value),
-		transformedSelects: make(map[ID]bool),
-	}
-
-	// For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
-	if f.Config.BigEndian {
-		x.lowOffset, x.hiOffset = 4, 0
-		x.loRo, x.hiRo = 1, 0
-	} else {
-		x.lowOffset, x.hiOffset = 0, 4
-		x.loRo, x.hiRo = 0, 1
-	}
-
-	if x.debug > 1 {
-		x.Printf("\nexpandsCalls(%s)\n", f.Name)
-	}
-
-	for i, name := range f.Names {
-		t := name.Type
-		if x.isAlreadyExpandedAggregateType(t) {
-			for j, v := range f.NamedValues[*name] {
-				if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) {
-					ns := x.namedSelects[v]
-					x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
-				}
-			}
-		}
-	}
-
-	// TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
-
-	// Step 0: rewrite the calls to convert args to calls into stores/register movement.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			firstArg := 0
-			switch v.Op {
-			case OpStaticLECall, OpTailLECall:
-			case OpInterLECall:
-				firstArg = 1
-			case OpClosureLECall:
-				firstArg = 2
-			default:
-				continue
-			}
-			x.rewriteArgs(v, firstArg)
-		}
-		if isBlockMultiValueExit(b) {
-			x.indent(3)
-			// Very similar to code in rewriteArgs, but results instead of args.
-			v := b.Controls[0]
-			m0 := v.MemoryArg()
-			mem := m0
-			aux := f.OwnAux
-			allResults := []*Value{}
-			if x.debug > 1 {
-				x.Printf("multiValueExit rewriting %s\n", v.LongString())
-			}
-			var oldArgs []*Value
-			for j, a := range v.Args[:len(v.Args)-1] {
-				oldArgs = append(oldArgs, a)
-				i := int64(j)
-				auxType := aux.TypeOfResult(i)
-				auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem)
-				auxOffset := int64(0)
-				auxSize := aux.SizeOfResult(i)
-				aRegs := aux.RegsOfResult(int64(j))
-				if len(aRegs) == 0 && a.Op == OpDereference {
-					// Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
-					if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP &&
-						dAddr.Args[1] == dMem && dAddr.Aux == aux.NameOfResult(i) {
-						if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux {
-							dMem.copyOf(dMem.MemoryArg()) // elide the VarDef
-						}
-						continue
-					}
-					mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, a.Pos)
-				} else {
-					if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
-						addr := a.Args[0] // This is a self-move. // TODO(register args) do what here for registers?
-						if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
-							continue
-						}
-					}
-					var rc registerCursor
-					var result *[]*Value
-					if len(aRegs) > 0 {
-						result = &allResults
-					}
-					rc.init(aRegs, aux.abiInfo, result, auxBase)
-					mem = x.storeArgOrLoad(v.Pos, b, a, mem, aux.TypeOfResult(i), auxOffset, 0, rc)
-				}
-			}
-			v.resetArgs()
-			v.AddArgs(allResults...)
-			v.AddArg(mem)
-			v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem))
-			b.SetControl(v)
-			for _, a := range oldArgs {
-				if a.Uses == 0 {
-					if x.debug > 1 {
-						x.Printf("...marking %v unused\n", a.LongString())
-					}
-					x.invalidateRecursively(a)
-				}
-			}
-			if x.debug > 1 {
-				x.Printf("...multiValueExit new result %s\n", v.LongString())
-			}
-			x.indent(-3)
-		}
-	}
-
-	// Step 1: any stores of aggregates remaining are believed to be sourced from call results or args.
-	// Decompose those stores into a series of smaller stores, adding selection ops as necessary.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Op == OpStore {
-				t := v.Aux.(*types.Type)
-				source := v.Args[1]
-				tSrc := source.Type
-				iAEATt := x.isAlreadyExpandedAggregateType(t)
-
-				if !iAEATt {
-					// guarding against store immediate struct into interface data field -- store type is *uint8
-					// TODO can this happen recursively?
-					iAEATt = x.isAlreadyExpandedAggregateType(tSrc)
-					if iAEATt {
-						t = tSrc
-					}
-				}
-				dst, mem := v.Args[0], v.Args[2]
-				mem = x.storeArgOrLoad(v.Pos, b, source, mem, t, 0, 0, registerCursor{storeDest: dst})
-				v.copyOf(mem)
-			}
-		}
-	}
-
-	val2Preds := make(map[*Value]int32) // Used to accumulate dependency graph of selection operations for topological ordering.
-
-	// Step 2: transform or accumulate selection operations for rewrite in topological order.
-	//
-	// Aggregate types that have already (in earlier phases) been transformed must be lowered comprehensively to finish
-	// the transformation (user-defined structs and arrays, slices, strings, interfaces, complex, 64-bit on 32-bit architectures),
-	//
-	// Any select-for-addressing applied to call results can be transformed directly.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			// Accumulate chains of selectors for processing in topological order
-			switch v.Op {
-			case OpStructSelect, OpArraySelect,
-				OpIData, OpITab,
-				OpStringPtr, OpStringLen,
-				OpSlicePtr, OpSliceLen, OpSliceCap, OpSlicePtrUnchecked,
-				OpComplexReal, OpComplexImag,
-				OpInt64Hi, OpInt64Lo:
-				w := v.Args[0]
-				switch w.Op {
-				case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
-					val2Preds[w] += 1
-					if x.debug > 1 {
-						x.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
-					}
-				}
-				fallthrough
-
-			case OpSelectN:
-				if _, ok := val2Preds[v]; !ok {
-					val2Preds[v] = 0
-					if x.debug > 1 {
-						x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
-					}
-				}
-
-			case OpArg:
-				if !x.isAlreadyExpandedAggregateType(v.Type) {
-					continue
-				}
-				if _, ok := val2Preds[v]; !ok {
-					val2Preds[v] = 0
-					if x.debug > 1 {
-						x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
-					}
-				}
-
-			case OpSelectNAddr:
-				// Do these directly, there are no chains of selectors.
-				call := v.Args[0]
-				which := v.AuxInt
-				aux := call.Aux.(*AuxCall)
-				pt := v.Type
-				off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt)
-				v.copyOf(off)
-			}
-		}
-	}
-
-	// Step 3: Compute topological order of selectors,
-	// then process it in reverse to eliminate duplicates,
-	// then forwards to rewrite selectors.
-	//
-	// All chains of selectors end up in same block as the call.
-
-	// Compilation must be deterministic, so sort after extracting first zeroes from map.
-	// Sorting allows dominators-last order within each batch,
-	// so that the backwards scan for duplicates will most often find copies from dominating blocks (it is best-effort).
-	var toProcess []*Value
-	less := func(i, j int) bool {
-		vi, vj := toProcess[i], toProcess[j]
-		bi, bj := vi.Block, vj.Block
-		if bi == bj {
-			return vi.ID < vj.ID
-		}
-		return x.sdom.domorder(bi) > x.sdom.domorder(bj) // reverse the order to put dominators last.
-	}
-
-	// Accumulate order in allOrdered
-	var allOrdered []*Value
-	for v, n := range val2Preds {
-		if n == 0 {
-			allOrdered = append(allOrdered, v)
-		}
-	}
-	last := 0 // allOrdered[0:last] has been top-sorted and processed
-	for len(val2Preds) > 0 {
-		toProcess = allOrdered[last:]
-		last = len(allOrdered)
-		sort.SliceStable(toProcess, less)
-		for _, v := range toProcess {
-			delete(val2Preds, v)
-			if v.Op == OpArg {
-				continue // no Args[0], hence done.
-			}
-			w := v.Args[0]
-			n, ok := val2Preds[w]
-			if !ok {
-				continue
-			}
-			if n == 1 {
-				allOrdered = append(allOrdered, w)
-				delete(val2Preds, w)
-				continue
-			}
-			val2Preds[w] = n - 1
-		}
-	}
-
-	x.commonSelectors = make(map[selKey]*Value)
-	// Rewrite duplicate selectors as copies where possible.
-	for i := len(allOrdered) - 1; i >= 0; i-- {
-		v := allOrdered[i]
-		if v.Op == OpArg {
-			continue
-		}
-		w := v.Args[0]
-		if w.Op == OpCopy {
-			for w.Op == OpCopy {
-				w = w.Args[0]
-			}
-			v.SetArg(0, w)
-		}
-		typ := v.Type
-		if typ.IsMemory() {
-			continue // handled elsewhere, not an indexable result
-		}
-		size := typ.Size()
-		offset := int64(0)
-		switch v.Op {
-		case OpStructSelect:
-			if w.Type.Kind() == types.TSTRUCT {
-				offset = w.Type.FieldOff(int(v.AuxInt))
-			} else { // Immediate interface data artifact, offset is zero.
-				f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
-			}
-		case OpArraySelect:
-			offset = size * v.AuxInt
-		case OpSelectN:
-			offset = v.AuxInt // offset is just a key, really.
-		case OpInt64Hi:
-			offset = x.hiOffset
-		case OpInt64Lo:
-			offset = x.lowOffset
-		case OpStringLen, OpSliceLen, OpIData:
-			offset = x.ptrSize
-		case OpSliceCap:
-			offset = 2 * x.ptrSize
-		case OpComplexImag:
-			offset = size
-		}
-		sk := selKey{from: w, size: size, offsetOrIndex: offset, typ: typ}
-		dupe := x.commonSelectors[sk]
-		if dupe == nil {
-			x.commonSelectors[sk] = v
-		} else if x.sdom.IsAncestorEq(dupe.Block, v.Block) {
-			if x.debug > 1 {
-				x.Printf("Duplicate, make %s copy of %s\n", v, dupe)
-			}
-			v.copyOf(dupe)
-		} else {
-			// Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
-			// Installing the new value might match some future values.
-			x.commonSelectors[sk] = v
-		}
-	}
-
-	// Indices of entries in f.Names that need to be deleted.
-	var toDelete []namedVal
-
-	// Rewrite selectors.
-	for i, v := range allOrdered {
-		if x.debug > 1 {
-			b := v.Block
-			x.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
-		}
-		if v.Uses == 0 {
-			x.invalidateRecursively(v)
-			continue
-		}
-		if v.Op == OpCopy {
-			continue
-		}
-		locs := x.rewriteSelect(v, v, 0, 0)
-		// Install new names.
-		if v.Type.IsMemory() {
-			continue
-		}
-		// Leaf types may have debug locations
-		if !x.isAlreadyExpandedAggregateType(v.Type) {
-			for _, l := range locs {
-				if _, ok := f.NamedValues[*l]; !ok {
-					f.Names = append(f.Names, l)
-				}
-				f.NamedValues[*l] = append(f.NamedValues[*l], v)
-			}
-			continue
-		}
-		if ns, ok := x.namedSelects[v]; ok {
-			// Not-leaf types that had debug locations need to lose them.
-
-			toDelete = append(toDelete, ns...)
-		}
-	}
-
-	deleteNamedVals(f, toDelete)
-
-	// Step 4: rewrite the calls themselves, correcting the type.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			switch v.Op {
-			case OpArg:
-				x.rewriteArgToMemOrRegs(v)
-			case OpStaticLECall:
-				v.Op = OpStaticCall
-				rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
-				v.Type = types.NewResults(append(rts, types.TypeMem))
-			case OpTailLECall:
-				v.Op = OpTailCall
-				rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
-				v.Type = types.NewResults(append(rts, types.TypeMem))
-			case OpClosureLECall:
-				v.Op = OpClosureCall
-				rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
-				v.Type = types.NewResults(append(rts, types.TypeMem))
-			case OpInterLECall:
-				v.Op = OpInterCall
-				rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
-				v.Type = types.NewResults(append(rts, types.TypeMem))
-			}
-		}
-	}
-
-	// Step 5: dedup OpArgXXXReg values. Mostly it is already dedup'd by commonArgs,
-	// but there are cases that we have same OpArgXXXReg values with different types.
-	// E.g. string is sometimes decomposed as { *int8, int }, sometimes as { unsafe.Pointer, uintptr }.
-	// (Can we avoid that?)
-	var IArg, FArg [32]*Value
-	for _, v := range f.Entry.Values {
-		switch v.Op {
-		case OpArgIntReg:
-			i := v.AuxInt
-			if w := IArg[i]; w != nil {
-				if w.Type.Size() != v.Type.Size() {
-					f.Fatalf("incompatible OpArgIntReg [%d]: %s and %s", i, v.LongString(), w.LongString())
-				}
-				if w.Type.IsUnsafePtr() && !v.Type.IsUnsafePtr() {
-					// Update unsafe.Pointer type if we know the actual pointer type.
-					w.Type = v.Type
-				}
-				// TODO: don't dedup pointer and scalar? Rewrite to OpConvert? Can it happen?
-				v.copyOf(w)
-			} else {
-				IArg[i] = v
-			}
-		case OpArgFloatReg:
-			i := v.AuxInt
-			if w := FArg[i]; w != nil {
-				if w.Type.Size() != v.Type.Size() {
-					f.Fatalf("incompatible OpArgFloatReg [%d]: %v and %v", i, v, w)
-				}
-				v.copyOf(w)
-			} else {
-				FArg[i] = v
-			}
-		}
-	}
-
-	// Step 6: elide any copies introduced.
-	// Update named values.
-	for _, name := range f.Names {
-		values := f.NamedValues[*name]
-		for i, v := range values {
-			if v.Op == OpCopy {
-				a := v.Args[0]
-				for a.Op == OpCopy {
-					a = a.Args[0]
-				}
-				values[i] = a
-			}
-		}
-	}
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for i, a := range v.Args {
-				if a.Op != OpCopy {
-					continue
-				}
-				aa := copySource(a)
-				v.SetArg(i, aa)
-				for a.Uses == 0 {
-					b := a.Args[0]
-					x.invalidateRecursively(a)
-					a = b
-				}
-			}
-		}
-	}
-
-	// Rewriting can attach lines to values that are unlikely to survive code generation, so move them to a use.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for _, a := range v.Args {
-				if a.Pos.IsStmt() != src.PosIsStmt {
-					continue
-				}
-				if a.Type.IsMemory() {
-					continue
-				}
-				if a.Pos.Line() != v.Pos.Line() {
-					continue
-				}
-				if !a.Pos.SameFile(v.Pos) {
-					continue
-				}
-				switch a.Op {
-				case OpArgIntReg, OpArgFloatReg, OpSelectN:
-					v.Pos = v.Pos.WithIsStmt()
-					a.Pos = a.Pos.WithDefaultStmt()
-				}
-			}
-		}
-	}
-}
-
-// rewriteArgToMemOrRegs converts OpArg v in-place into the register version of v,
-// if that is appropriate.
-func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value {
-	if x.debug > 1 {
-		x.indent(3)
-		defer x.indent(-3)
-		x.Printf("rewriteArgToMemOrRegs(%s)\n", v.LongString())
-	}
-	pa := x.prAssignForArg(v)
-	switch len(pa.Registers) {
-	case 0:
-		frameOff := v.Aux.(*ir.Name).FrameOffset()
-		if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) {
-			panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s",
-				pa.Offset(), frameOff, v.LongString()))
-		}
-	case 1:
-		t := v.Type
-		key := selKey{v, 0, t.Size(), t}
-		w := x.commonArgs[key]
-		if w != nil && w.Uses != 0 { // do not reuse dead value
-			v.copyOf(w)
-			break
-		}
-		r := pa.Registers[0]
-		var i int64
-		v.Op, i = ArgOpAndRegisterFor(r, x.f.ABISelf)
-		v.Aux = &AuxNameOffset{v.Aux.(*ir.Name), 0}
-		v.AuxInt = i
-		x.commonArgs[key] = v
-
-	default:
-		panic(badVal("Saw unexpanded OpArg", v))
-	}
-	if x.debug > 1 {
-		x.Printf("-->%s\n", v.LongString())
-	}
-	return v
-}
-
-// newArgToMemOrRegs either rewrites toReplace into an OpArg referencing memory or into an OpArgXXXReg to a register,
-// or rewrites it into a copy of the appropriate OpArgXXX.  The actual OpArgXXX is determined by combining baseArg (an OpArg)
-// with offset, regOffset, and t to determine which portion of it to reference (either all or a part, in memory or in registers).
-func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64, regOffset Abi1RO, t *types.Type, pos src.XPos) *Value {
-	if x.debug > 1 {
-		x.indent(3)
-		defer x.indent(-3)
-		x.Printf("newArgToMemOrRegs(base=%s; toReplace=%s; t=%s; memOff=%d; regOff=%d)\n", baseArg.String(), toReplace.LongString(), t.String(), offset, regOffset)
-	}
-	key := selKey{baseArg, offset, t.Size(), t}
-	w := x.commonArgs[key]
-	if w != nil && w.Uses != 0 { // do not reuse dead value
-		if toReplace != nil {
-			toReplace.copyOf(w)
-			if x.debug > 1 {
-				x.Printf("...replace %s\n", toReplace.LongString())
-			}
-		}
-		if x.debug > 1 {
-			x.Printf("-->%s\n", w.LongString())
-		}
-		return w
-	}
-
-	pa := x.prAssignForArg(baseArg)
-	if len(pa.Registers) == 0 { // Arg is on stack
-		frameOff := baseArg.Aux.(*ir.Name).FrameOffset()
-		if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) {
-			panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s",
-				pa.Offset(), frameOff, baseArg.LongString()))
-		}
-		aux := baseArg.Aux
-		auxInt := baseArg.AuxInt + offset
-		if toReplace != nil && toReplace.Block == baseArg.Block {
-			toReplace.reset(OpArg)
-			toReplace.Aux = aux
-			toReplace.AuxInt = auxInt
-			toReplace.Type = t
-			w = toReplace
-		} else {
-			w = baseArg.Block.NewValue0IA(baseArg.Pos, OpArg, t, auxInt, aux)
-		}
-		x.commonArgs[key] = w
-		if toReplace != nil {
-			toReplace.copyOf(w)
-		}
-		if x.debug > 1 {
-			x.Printf("-->%s\n", w.LongString())
-		}
-		return w
-	}
-	// Arg is in registers
-	r := pa.Registers[regOffset]
-	op, auxInt := ArgOpAndRegisterFor(r, x.f.ABISelf)
-	if op == OpArgIntReg && t.IsFloat() || op == OpArgFloatReg && t.IsInteger() {
-		fmt.Printf("pa=%v\nx.f.OwnAux.abiInfo=%s\n",
-			pa.ToString(x.f.ABISelf, true),
-			x.f.OwnAux.abiInfo.String())
-		panic(fmt.Errorf("Op/Type mismatch, op=%s, type=%s", op.String(), t.String()))
-	}
-	if baseArg.AuxInt != 0 {
-		base.Fatalf("BaseArg %s bound to registers has non-zero AuxInt", baseArg.LongString())
-	}
-	aux := &AuxNameOffset{baseArg.Aux.(*ir.Name), offset}
-	if toReplace != nil && toReplace.Block == baseArg.Block {
-		toReplace.reset(op)
-		toReplace.Aux = aux
-		toReplace.AuxInt = auxInt
-		toReplace.Type = t
-		w = toReplace
-	} else {
-		w = baseArg.Block.NewValue0IA(baseArg.Pos, op, t, auxInt, aux)
-	}
-	x.commonArgs[key] = w
-	if toReplace != nil {
-		toReplace.copyOf(w)
-	}
-	if x.debug > 1 {
-		x.Printf("-->%s\n", w.LongString())
-	}
-	return w
-
-}
-
-// ArgOpAndRegisterFor converts an abi register index into an ssa Op and corresponding
-// arg register index.
-func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) {
-	i := abiConfig.FloatIndexFor(r)
-	if i >= 0 { // float PR
-		return OpArgFloatReg, i
-	}
-	return OpArgIntReg, int64(r)
-}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 14f2474..b2c4b19 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -55,22 +55,26 @@
 
 func (c *Conf) Frontend() Frontend {
 	if c.fe == nil {
-		f := ir.NewFunc(src.NoXPos)
-		f.Nname = ir.NewNameAt(f.Pos(), &types.Sym{
-			Pkg:  types.NewPkg("my/import/path", "path"),
-			Name: "function",
-		})
-		f.LSym = &obj.LSym{Name: "my/import/path.function"}
+		pkg := types.NewPkg("my/import/path", "path")
+		fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup("function"), types.NewSignature(nil, nil, nil))
+		fn.DeclareParams(true)
+		fn.LSym = &obj.LSym{Name: "my/import/path.function"}
 
 		c.fe = TestFrontend{
 			t:    c.tb,
 			ctxt: c.config.ctxt,
-			f:    f,
+			f:    fn,
 		}
 	}
 	return c.fe
 }
 
+func (c *Conf) Temp(typ *types.Type) *ir.Name {
+	n := ir.NewNameAt(src.NoXPos, &types.Sym{Name: "aFakeAuto"}, typ)
+	n.Class = ir.PAUTO
+	return n
+}
+
 // TestFrontend is a test-only frontend.
 // It assumes 64 bit integers and pointers.
 type TestFrontend struct {
@@ -82,17 +86,9 @@
 func (TestFrontend) StringData(s string) *obj.LSym {
 	return nil
 }
-func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name {
-	n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
-	n.SetType(t)
-	n.Class = ir.PAUTO
-	return n
-}
 func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
 	return LocalSlot{N: parent.N, Type: t, Off: offset}
 }
-func (TestFrontend) AllocFrame(f *Func) {
-}
 func (d TestFrontend) Syslook(s string) *obj.LSym {
 	return d.ctxt.Lookup(s)
 }
@@ -107,9 +103,6 @@
 func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{})  { d.t.Logf(msg, args...) }
 func (d TestFrontend) Debug_checknil() bool                               { return false }
 
-func (d TestFrontend) MyImportPath() string {
-	return d.f.Sym().Pkg.Path
-}
 func (d TestFrontend) Func() *ir.Func {
 	return d.f
 }
@@ -125,10 +118,3 @@
 	typecheck.InitUniverse()
 	testTypes.SetTypPtrs()
 }
-
-func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
-
-func (d TestFrontend) CanSSA(t *types.Type) bool {
-	// There are no un-SSAable types in test land.
-	return true
-}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 2d203e5..031d94f 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -7,7 +7,10 @@
 import (
 	"cmd/compile/internal/abi"
 	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/obj"
 	"cmd/internal/src"
 	"fmt"
 	"math"
@@ -61,7 +64,7 @@
 
 	// RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
 	RegArgs []Spill
-	// AuxCall describing parameters and results for this function.
+	// OwnAux describes parameters and results for this function.
 	OwnAux *AuxCall
 
 	freeValues *Value // free Values linked by argstorage[0].  All other fields except ID are 0/nil.
@@ -84,9 +87,17 @@
 }
 
 // NewFunc returns a new, empty function object.
-// Caller must set f.Config and f.Cache before using f.
-func NewFunc(fe Frontend) *Func {
-	return &Func{fe: fe, NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot)}
+// Caller must reset cache before calling NewFunc.
+func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func {
+	return &Func{
+		fe:     fe,
+		Config: c,
+		Cache:  cache,
+
+		NamedValues:          make(map[LocalSlot][]*Value),
+		CanonicalLocalSlots:  make(map[LocalSlot]*LocalSlot),
+		CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot),
+	}
 }
 
 // NumBlocks returns an integer larger than the id of any Block in the Func.
@@ -99,6 +110,21 @@
 	return f.vid.num()
 }
 
+// NameABI returns the function name followed by comma and the ABI number.
+// This is intended for use with GOSSAFUNC and HTML dumps, and differs from
+// the linker's "<1>" convention because "<" and ">" require shell quoting
+// and are not legal file names (for use with GOSSADIR) on Windows.
+func (f *Func) NameABI() string {
+	return FuncNameABI(f.Name, f.ABISelf.Which())
+}
+
+// FuncNameABI returns n followed by a comma and the value of a.
+// This is a separate function to allow a single point encoding
+// of the format, which is used in places where there's not a Func yet.
+func FuncNameABI(n string, a obj.ABI) string {
+	return fmt.Sprintf("%s,%d", n, a)
+}
+
 // newSparseSet returns a sparse set that can store at least up to n integers.
 func (f *Func) newSparseSet(n int) *sparseSet {
 	return f.Cache.allocSparseSet(n)
@@ -695,7 +721,6 @@
 		v.AddArg(sp)
 	}
 	return v
-
 }
 
 func (f *Func) Frontend() Frontend                                  { return f.fe }
@@ -773,7 +798,8 @@
 	if !base.HasDebugHash() {
 		return true
 	}
-	return base.DebugHashMatchPkgFunc(f.fe.MyImportPath(), f.Name)
+	sym := f.fe.Func().Sym()
+	return base.DebugHashMatchPkgFunc(sym.Pkg.Path, sym.Name)
 }
 
 func (f *Func) spSb() (sp, sb *Value) {
@@ -809,3 +835,8 @@
 	}
 	return base.FmaHash.MatchPos(v.Pos, nil)
 }
+
+// NewLocal returns a new anonymous local variable of the given type.
+func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name {
+	return typecheck.TempAt(pos, f.fe.Func(), typ) // Note: adds new auto to fn.Dcl list
+}
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
index bbb228d..6923aaa 100644
--- a/src/cmd/compile/internal/ssa/func_test.go
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -152,12 +152,10 @@
 // supplied to one of the Bloc functions. Each of the bloc names and
 // valu names should be unique across the Fun.
 func (c *Conf) Fun(entry string, blocs ...bloc) fun {
-	f := NewFunc(c.Frontend())
-	f.Config = c.config
 	// TODO: Either mark some SSA tests as t.Parallel,
 	// or set up a shared Cache and Reset it between tests.
 	// But not both.
-	f.Cache = new(Cache)
+	f := c.config.NewFunc(c.Frontend(), new(Cache))
 	f.pass = &emptyPass
 	f.cachedLineStarts = newXposmap(map[int]lineRange{0: {0, 100}, 1: {0, 100}, 2: {0, 100}, 3: {0, 100}, 4: {0, 100}})
 
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index 6d3fb70..68defde 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -169,7 +169,7 @@
 // There may be false positives.
 func isEmpty(b *Block) bool {
 	for _, v := range b.Values {
-		if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
+		if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck {
 			return false
 		}
 	}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index fa7921a..2f89938 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -254,7 +254,7 @@
 			Valu("p", OpArg, c.config.Types.IntPtr, 0, nil),
 			If("c1", "z0", "exit")),
 		Bloc("z0",
-			Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"),
+			Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"),
 			Goto("exit")),
 		Bloc("exit",
 			Exit("mem"),
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 7e5a097..ea170fb 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -741,7 +741,7 @@
 </head>`)
 	w.WriteString("<body>")
 	w.WriteString("<h1>")
-	w.WriteString(html.EscapeString(w.Func.Name))
+	w.WriteString(html.EscapeString(w.Func.NameABI()))
 	w.WriteString("</h1>")
 	w.WriteString(`
 <a href="#" onclick="toggle_visibility('help');return false;" id="helplink">help</a>
@@ -784,7 +784,7 @@
 	io.WriteString(w.w, "</body>")
 	io.WriteString(w.w, "</html>")
 	w.w.Close()
-	fmt.Printf("dumped SSA to %v\n", w.path)
+	fmt.Printf("dumped SSA for %s to %v\n", w.Func.NameABI(), w.path)
 }
 
 // WritePhase writes f in a column headed by title.
diff --git a/src/cmd/compile/internal/ssa/lca.go b/src/cmd/compile/internal/ssa/lca.go
index 90daebe..6e7ad96 100644
--- a/src/cmd/compile/internal/ssa/lca.go
+++ b/src/cmd/compile/internal/ssa/lca.go
@@ -106,7 +106,7 @@
 	if a == b {
 		return a
 	}
-	// Find the positions of a and bin the Euler tour.
+	// Find the positions of a and b in the Euler tour.
 	p1 := lca.blocks[a.ID].pos
 	p2 := lca.blocks[b.ID].pos
 	if p1 > p2 {
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
index b7dfaa3..dd1f39d 100644
--- a/src/cmd/compile/internal/ssa/loopbce.go
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -13,12 +13,14 @@
 type indVarFlags uint8
 
 const (
-	indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
-	indVarMaxInc                         // maximum value is inclusive (default: exclusive)
+	indVarMinExc    indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
+	indVarMaxInc                            // maximum value is inclusive (default: exclusive)
+	indVarCountDown                         // if set the iteration starts at max and count towards min (default: min towards max)
 )
 
 type indVar struct {
 	ind   *Value // induction variable
+	nxt   *Value // the incremented variable
 	min   *Value // minimum value, inclusive/exclusive depends on flags
 	max   *Value // maximum value, inclusive/exclusive depends on flags
 	entry *Block // entry block in the loop.
@@ -127,6 +129,13 @@
 			less = false
 		}
 
+		if ind.Block != b {
+			// TODO: Could be extended to include disjointed loop headers.
+			// I don't think this is causing missed optimizations in real world code often.
+			// See https://go.dev/issue/63955
+			continue
+		}
+
 		// Expect the increment to be a nonzero constant.
 		if !inc.isGenericIntConst() {
 			continue
@@ -277,6 +286,7 @@
 				if !inclusive {
 					flags |= indVarMinExc
 				}
+				flags |= indVarCountDown
 				step = -step
 			}
 			if f.pass.debug >= 1 {
@@ -285,6 +295,7 @@
 
 			iv = append(iv, indVar{
 				ind:   ind,
+				nxt:   nxt,
 				min:   min,
 				max:   max,
 				entry: b.Succs[0].b,
diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go
index 7c56523..0ac473d 100644
--- a/src/cmd/compile/internal/ssa/loopreschedchecks.go
+++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go
@@ -247,7 +247,7 @@
 		//    mem1 := call resched (mem0)
 		//    goto header
 		resched := f.fe.Syslook("goschedguarded")
-		call := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(resched, bb.Func.ABIDefault.ABIAnalyzeTypes(nil, nil, nil)), mem0)
+		call := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(resched, bb.Func.ABIDefault.ABIAnalyzeTypes(nil, nil)), mem0)
 		mem1 := sched.NewValue1I(bb.Pos, OpSelectN, types.TypeMem, 0, call)
 		sched.AddEdgeTo(h)
 		headerMemPhi.AddArg(mem1)
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
index df4b568..235b0e5 100644
--- a/src/cmd/compile/internal/ssa/magic.go
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -170,7 +170,7 @@
 	return c&(c-1) != 0
 }
 
-// smagicOKn reports whether we should strength reduce an signed n-bit divide by c.
+// smagicOKn reports whether we should strength reduce a signed n-bit divide by c.
 func smagicOK8(c int8) bool   { return smagicOK(8, int64(c)) }
 func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) }
 func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) }
diff --git a/src/cmd/compile/internal/ssa/memcombine.go b/src/cmd/compile/internal/ssa/memcombine.go
index c134643..b1a4751 100644
--- a/src/cmd/compile/internal/ssa/memcombine.go
+++ b/src/cmd/compile/internal/ssa/memcombine.go
@@ -313,8 +313,8 @@
 	if isLittleEndian && shift0 != 0 {
 		v = leftShift(loadBlock, pos, v, shift0)
 	}
-	if isBigEndian && shift0-(n-1)*8 != 0 {
-		v = leftShift(loadBlock, pos, v, shift0-(n-1)*8)
+	if isBigEndian && shift0-(n-1)*size*8 != 0 {
+		v = leftShift(loadBlock, pos, v, shift0-(n-1)*size*8)
 	}
 
 	// Install with (Copy v).
@@ -500,6 +500,8 @@
 			return false
 		}
 		if x.Aux.(*types.Type).Size() != size {
+			// TODO: the constant source and consecutive load source cases
+			// do not need all the stores to be the same size.
 			return false
 		}
 		base, off := splitPtr(x.Args[0])
@@ -510,6 +512,8 @@
 	}
 	// Before we sort, grab the memory arg the result should have.
 	mem := a[n-1].store.Args[2]
+	// Also grab position of first store (last in array = first in memory order).
+	pos := a[n-1].store.Pos
 
 	// Sort stores in increasing address order.
 	sort.Slice(a, func(i, j int) bool {
@@ -562,6 +566,7 @@
 			v := a[i].store
 			if v == root {
 				v.Aux = cv.Type // widen store type
+				v.Pos = pos
 				v.SetArg(0, ptr)
 				v.SetArg(1, cv)
 				v.SetArg(2, mem)
@@ -573,6 +578,75 @@
 		return true
 	}
 
+	// Check for consecutive loads as the source of the stores.
+	var loadMem *Value
+	var loadBase BaseAddress
+	var loadIdx int64
+	for i := int64(0); i < n; i++ {
+		load := a[i].store.Args[1]
+		if load.Op != OpLoad {
+			loadMem = nil
+			break
+		}
+		if load.Uses != 1 {
+			loadMem = nil
+			break
+		}
+		if load.Type.IsPtr() {
+			// Don't combine stores containing a pointer, as we need
+			// a write barrier for those. This can't currently happen,
+			// but might in the future if we ever have another
+			// 8-byte-reg/4-byte-ptr architecture like amd64p32.
+			loadMem = nil
+			break
+		}
+		mem := load.Args[1]
+		base, idx := splitPtr(load.Args[0])
+		if loadMem == nil {
+			// First one we found
+			loadMem = mem
+			loadBase = base
+			loadIdx = idx
+			continue
+		}
+		if base != loadBase || mem != loadMem {
+			loadMem = nil
+			break
+		}
+		if idx != loadIdx+(a[i].offset-a[0].offset) {
+			loadMem = nil
+			break
+		}
+	}
+	if loadMem != nil {
+		// Modify the first load to do a larger load instead.
+		load := a[0].store.Args[1]
+		switch size * n {
+		case 2:
+			load.Type = types.Types[types.TUINT16]
+		case 4:
+			load.Type = types.Types[types.TUINT32]
+		case 8:
+			load.Type = types.Types[types.TUINT64]
+		}
+
+		// Modify root to do the store.
+		for i := int64(0); i < n; i++ {
+			v := a[i].store
+			if v == root {
+				v.Aux = load.Type // widen store type
+				v.Pos = pos
+				v.SetArg(0, ptr)
+				v.SetArg(1, load)
+				v.SetArg(2, mem)
+			} else {
+				clobber(v)
+				v.Type = types.Types[types.TBOOL] // erase memory type
+			}
+		}
+		return true
+	}
+
 	// Check that all the shift/trunc are of the same base value.
 	shiftBase := getShiftBase(a)
 	if shiftBase == nil {
@@ -588,14 +662,14 @@
 	isLittleEndian := true
 	shift0 := shift(a[0].store, shiftBase)
 	for i := int64(1); i < n; i++ {
-		if shift(a[i].store, shiftBase) != shift0+i*8 {
+		if shift(a[i].store, shiftBase) != shift0+i*size*8 {
 			isLittleEndian = false
 			break
 		}
 	}
 	isBigEndian := true
 	for i := int64(1); i < n; i++ {
-		if shift(a[i].store, shiftBase) != shift0-i*8 {
+		if shift(a[i].store, shiftBase) != shift0-i*size*8 {
 			isBigEndian = false
 			break
 		}
@@ -618,8 +692,8 @@
 	if isLittleEndian && shift0 != 0 {
 		sv = rightShift(root.Block, root.Pos, sv, shift0)
 	}
-	if isBigEndian && shift0-(n-1)*8 != 0 {
-		sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*8)
+	if isBigEndian && shift0-(n-1)*size*8 != 0 {
+		sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*size*8)
 	}
 	if sv.Type.Size() > size*n {
 		sv = truncate(root.Block, root.Pos, sv, sv.Type.Size(), size*n)
@@ -633,6 +707,7 @@
 		v := a[i].store
 		if v == root {
 			v.Aux = sv.Type // widen store type
+			v.Pos = pos
 			v.SetArg(0, ptr)
 			v.SetArg(1, sv)
 			v.SetArg(2, mem)
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index 4f797a4..c69cd8c 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -38,11 +38,14 @@
 	work := make([]bp, 0, 256)
 	work = append(work, bp{block: f.Entry})
 
-	// map from value ID to bool indicating if value is known to be non-nil
-	// in the current dominator path being walked. This slice is updated by
+	// map from value ID to known non-nil version of that value ID
+	// (in the current dominator path being walked). This slice is updated by
 	// walkStates to maintain the known non-nil values.
-	nonNilValues := f.Cache.allocBoolSlice(f.NumValues())
-	defer f.Cache.freeBoolSlice(nonNilValues)
+	// If there is extrinsic information about non-nil-ness, this map
+	// points a value to itself. If a value is known non-nil because we
+	// already did a nil check on it, it points to the nil check operation.
+	nonNilValues := f.Cache.allocValueSlice(f.NumValues())
+	defer f.Cache.freeValueSlice(nonNilValues)
 
 	// make an initial pass identifying any non-nil values
 	for _, b := range f.Blocks {
@@ -54,7 +57,7 @@
 			// We assume that SlicePtr is non-nil because we do a bounds check
 			// before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
 			if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
-				nonNilValues[v.ID] = true
+				nonNilValues[v.ID] = v
 			}
 		}
 	}
@@ -68,16 +71,16 @@
 				if v.Op == OpPhi {
 					argsNonNil := true
 					for _, a := range v.Args {
-						if !nonNilValues[a.ID] {
+						if nonNilValues[a.ID] == nil {
 							argsNonNil = false
 							break
 						}
 					}
 					if argsNonNil {
-						if !nonNilValues[v.ID] {
+						if nonNilValues[v.ID] == nil {
 							changed = true
 						}
-						nonNilValues[v.ID] = true
+						nonNilValues[v.ID] = v
 					}
 				}
 			}
@@ -103,8 +106,8 @@
 			if len(b.Preds) == 1 {
 				p := b.Preds[0].b
 				if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
-					if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] {
-						nonNilValues[ptr.ID] = true
+					if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil {
+						nonNilValues[ptr.ID] = ptr
 						work = append(work, bp{op: ClearPtr, ptr: ptr})
 					}
 				}
@@ -117,14 +120,11 @@
 			pendingLines.clear()
 
 			// Next, process values in the block.
-			i := 0
 			for _, v := range b.Values {
-				b.Values[i] = v
-				i++
 				switch v.Op {
 				case OpIsNonNil:
 					ptr := v.Args[0]
-					if nonNilValues[ptr.ID] {
+					if nonNilValues[ptr.ID] != nil {
 						if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
 							pendingLines.add(v.Pos)
 							v.Pos = v.Pos.WithNotStmt()
@@ -135,7 +135,7 @@
 					}
 				case OpNilCheck:
 					ptr := v.Args[0]
-					if nonNilValues[ptr.ID] {
+					if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil {
 						// This is a redundant implicit nil check.
 						// Logging in the style of the former compiler -- and omit line 1,
 						// which is usually in generated code.
@@ -145,14 +145,13 @@
 						if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
 							pendingLines.add(v.Pos)
 						}
-						v.reset(OpUnknown)
-						f.freeValue(v)
-						i--
+						v.Op = OpCopy
+						v.SetArgs1(nilCheck)
 						continue
 					}
 					// Record the fact that we know ptr is non nil, and remember to
 					// undo that information when this dominator subtree is done.
-					nonNilValues[ptr.ID] = true
+					nonNilValues[ptr.ID] = v
 					work = append(work, bp{op: ClearPtr, ptr: ptr})
 					fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
 				default:
@@ -163,7 +162,7 @@
 				}
 			}
 			// This reduces the lost statement count in "go" by 5 (out of 500 total).
-			for j := 0; j < i; j++ { // is this an ordering problem?
+			for j := range b.Values { // is this an ordering problem?
 				v := b.Values[j]
 				if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
 					v.Pos = v.Pos.WithIsStmt()
@@ -174,7 +173,6 @@
 				b.Pos = b.Pos.WithIsStmt()
 				pendingLines.remove(b.Pos)
 			}
-			b.truncateValues(i)
 
 			// Add all dominated blocks to the work list.
 			for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
@@ -182,7 +180,7 @@
 			}
 
 		case ClearPtr:
-			nonNilValues[node.ptr.ID] = false
+			nonNilValues[node.ptr.ID] = nil
 			continue
 		}
 	}
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index 2e32afe..6c89b1e 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package ssa
 
 import (
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
index 4cbc491..b4eca32 100644
--- a/src/cmd/compile/internal/ssa/numberlines.go
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -32,7 +32,7 @@
 	// If the value is the last one in the block, too bad, it will have to do
 	// (this assumes that the value ordering vaguely corresponds to the source
 	// program execution order, which tends to be true directly after ssa is
-	// first built.
+	// first built).
 	if i >= len(b.Values)-1 {
 		return i
 	}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index e2319d7..cb151b2 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -238,13 +238,9 @@
 	return a.abiInfo.InParam(int(which)).Registers
 }
 
-// NameOfResult returns the type of result which (indexed 0, 1, etc).
+// NameOfResult returns the ir.Name of result which (indexed 0, 1, etc).
 func (a *AuxCall) NameOfResult(which int64) *ir.Name {
-	name := a.abiInfo.OutParam(int(which)).Name
-	if name == nil {
-		return nil
-	}
-	return name.(*ir.Name)
+	return a.abiInfo.OutParam(int(which)).Name
 }
 
 // TypeOfResult returns the type of result which (indexed 0, 1, etc).
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 1480fcf..c552832 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -716,12 +716,12 @@
 	OpAMD64BTSQ
 	OpAMD64BTLconst
 	OpAMD64BTQconst
-	OpAMD64BTCLconst
 	OpAMD64BTCQconst
-	OpAMD64BTRLconst
 	OpAMD64BTRQconst
-	OpAMD64BTSLconst
 	OpAMD64BTSQconst
+	OpAMD64BTSQconstmodify
+	OpAMD64BTRQconstmodify
+	OpAMD64BTCQconstmodify
 	OpAMD64TESTQ
 	OpAMD64TESTL
 	OpAMD64TESTW
@@ -912,6 +912,8 @@
 	OpAMD64SQRTSS
 	OpAMD64ROUNDSD
 	OpAMD64VFMADD231SD
+	OpAMD64MINSD
+	OpAMD64MINSS
 	OpAMD64SBBQcarrymask
 	OpAMD64SBBLcarrymask
 	OpAMD64SETEQ
@@ -935,6 +937,16 @@
 	OpAMD64SETBEstore
 	OpAMD64SETAstore
 	OpAMD64SETAEstore
+	OpAMD64SETEQstoreidx1
+	OpAMD64SETNEstoreidx1
+	OpAMD64SETLstoreidx1
+	OpAMD64SETLEstoreidx1
+	OpAMD64SETGstoreidx1
+	OpAMD64SETGEstoreidx1
+	OpAMD64SETBstoreidx1
+	OpAMD64SETBEstoreidx1
+	OpAMD64SETAstoreidx1
+	OpAMD64SETAEstoreidx1
 	OpAMD64SETEQF
 	OpAMD64SETNEF
 	OpAMD64SETORD
@@ -964,6 +976,7 @@
 	OpAMD64MOVLi2f
 	OpAMD64MOVLf2i
 	OpAMD64PXOR
+	OpAMD64POR
 	OpAMD64LEAQ
 	OpAMD64LEAL
 	OpAMD64LEAW
@@ -1441,6 +1454,10 @@
 	OpARM64FNEGD
 	OpARM64FSQRTD
 	OpARM64FSQRTS
+	OpARM64FMIND
+	OpARM64FMINS
+	OpARM64FMAXD
+	OpARM64FMAXS
 	OpARM64REV
 	OpARM64REVW
 	OpARM64REV16
@@ -2089,10 +2106,13 @@
 	OpMIPS64LoweredPanicBoundsC
 
 	OpPPC64ADD
+	OpPPC64ADDCC
 	OpPPC64ADDconst
+	OpPPC64ADDCCconst
 	OpPPC64FADD
 	OpPPC64FADDS
 	OpPPC64SUB
+	OpPPC64SUBCC
 	OpPPC64SUBFCconst
 	OpPPC64FSUB
 	OpPPC64FSUBS
@@ -2119,7 +2139,6 @@
 	OpPPC64SLW
 	OpPPC64ROTL
 	OpPPC64ROTLW
-	OpPPC64RLDICL
 	OpPPC64CLRLSLWI
 	OpPPC64CLRLSLDI
 	OpPPC64ADDC
@@ -2142,7 +2161,10 @@
 	OpPPC64RLWINM
 	OpPPC64RLWNM
 	OpPPC64RLWMI
+	OpPPC64RLDICL
+	OpPPC64RLDICR
 	OpPPC64CNTLZD
+	OpPPC64CNTLZDCC
 	OpPPC64CNTLZW
 	OpPPC64CNTTZD
 	OpPPC64CNTTZW
@@ -2168,15 +2190,18 @@
 	OpPPC64MTVSRD
 	OpPPC64AND
 	OpPPC64ANDN
+	OpPPC64ANDNCC
 	OpPPC64ANDCC
 	OpPPC64OR
 	OpPPC64ORN
 	OpPPC64ORCC
 	OpPPC64NOR
+	OpPPC64NORCC
 	OpPPC64XOR
 	OpPPC64XORCC
 	OpPPC64EQV
 	OpPPC64NEG
+	OpPPC64NEGCC
 	OpPPC64BRD
 	OpPPC64BRW
 	OpPPC64BRH
@@ -2364,10 +2389,14 @@
 	OpRISCV64MOVDnop
 	OpRISCV64SLL
 	OpRISCV64SRA
+	OpRISCV64SRAW
 	OpRISCV64SRL
+	OpRISCV64SRLW
 	OpRISCV64SLLI
 	OpRISCV64SRAI
+	OpRISCV64SRAIW
 	OpRISCV64SRLI
+	OpRISCV64SRLIW
 	OpRISCV64XOR
 	OpRISCV64XORI
 	OpRISCV64OR
@@ -2381,7 +2410,8 @@
 	OpRISCV64SLTI
 	OpRISCV64SLTU
 	OpRISCV64SLTIU
-	OpRISCV64MOVconvert
+	OpRISCV64LoweredRound32F
+	OpRISCV64LoweredRound64F
 	OpRISCV64CALLstatic
 	OpRISCV64CALLtail
 	OpRISCV64CALLclosure
@@ -2409,6 +2439,7 @@
 	OpRISCV64LoweredGetCallerSP
 	OpRISCV64LoweredGetCallerPC
 	OpRISCV64LoweredWB
+	OpRISCV64LoweredPubBarrier
 	OpRISCV64LoweredPanicBoundsA
 	OpRISCV64LoweredPanicBoundsB
 	OpRISCV64LoweredPanicBoundsC
@@ -2416,6 +2447,10 @@
 	OpRISCV64FSUBS
 	OpRISCV64FMULS
 	OpRISCV64FDIVS
+	OpRISCV64FMADDS
+	OpRISCV64FMSUBS
+	OpRISCV64FNMADDS
+	OpRISCV64FNMSUBS
 	OpRISCV64FSQRTS
 	OpRISCV64FNEGS
 	OpRISCV64FMVSX
@@ -3006,6 +3041,10 @@
 	OpRoundToEven
 	OpAbs
 	OpCopysign
+	OpMin64F
+	OpMin32F
+	OpMax64F
+	OpMax32F
 	OpFMA
 	OpPhi
 	OpCopy
@@ -8758,22 +8797,6 @@
 		},
 	},
 	{
-		name:         "BTCLconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ABTCL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
-			},
-			outputs: []outputInfo{
-				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
-			},
-		},
-	},
-	{
 		name:         "BTCQconst",
 		auxType:      auxInt8,
 		argLen:       1,
@@ -8790,22 +8813,6 @@
 		},
 	},
 	{
-		name:         "BTRLconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ABTRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
-			},
-			outputs: []outputInfo{
-				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
-			},
-		},
-	},
-	{
 		name:         "BTRQconst",
 		auxType:      auxInt8,
 		argLen:       1,
@@ -8822,22 +8829,6 @@
 		},
 	},
 	{
-		name:         "BTSLconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ABTSL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
-			},
-			outputs: []outputInfo{
-				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
-			},
-		},
-	},
-	{
 		name:         "BTSQconst",
 		auxType:      auxInt8,
 		argLen:       1,
@@ -8854,6 +8845,48 @@
 		},
 	},
 	{
+		name:           "BTSQconstmodify",
+		auxType:        auxSymValAndOff,
+		argLen:         2,
+		clobberFlags:   true,
+		faultOnNilArg0: true,
+		symEffect:      SymRead | SymWrite,
+		asm:            x86.ABTSQ,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:           "BTRQconstmodify",
+		auxType:        auxSymValAndOff,
+		argLen:         2,
+		clobberFlags:   true,
+		faultOnNilArg0: true,
+		symEffect:      SymRead | SymWrite,
+		asm:            x86.ABTRQ,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:           "BTCQconstmodify",
+		auxType:        auxSymValAndOff,
+		argLen:         2,
+		clobberFlags:   true,
+		faultOnNilArg0: true,
+		symEffect:      SymRead | SymWrite,
+		asm:            x86.ABTCQ,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
 		name:        "TESTQ",
 		argLen:      2,
 		commutative: true,
@@ -11891,6 +11924,36 @@
 		},
 	},
 	{
+		name:         "MINSD",
+		argLen:       2,
+		resultInArg0: true,
+		asm:          x86.AMINSD,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+			},
+			outputs: []outputInfo{
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+			},
+		},
+	},
+	{
+		name:         "MINSS",
+		argLen:       2,
+		resultInArg0: true,
+		asm:          x86.AMINSS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+			},
+			outputs: []outputInfo{
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+			},
+		},
+	},
+	{
 		name:   "SBBQcarrymask",
 		argLen: 1,
 		asm:    x86.ASBBQ,
@@ -12151,6 +12214,156 @@
 		},
 	},
 	{
+		name:        "SETEQstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETEQ,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETNEstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETNE,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETLstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETLT,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETLEstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETLE,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETGstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETGT,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETGEstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETGE,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETBstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETCS,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETBEstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETLS,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETAstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETHI,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
+		name:        "SETAEstoreidx1",
+		auxType:     auxSymOff,
+		argLen:      4,
+		commutative: true,
+		symEffect:   SymWrite,
+		asm:         x86.ASETCC,
+		scale:       1,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+			},
+		},
+	},
+	{
 		name:         "SETEQF",
 		argLen:       1,
 		clobberFlags: true,
@@ -12511,6 +12724,22 @@
 		},
 	},
 	{
+		name:         "POR",
+		argLen:       2,
+		commutative:  true,
+		resultInArg0: true,
+		asm:          x86.APOR,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+			},
+			outputs: []outputInfo{
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+			},
+		},
+	},
+	{
 		name:              "LEAQ",
 		auxType:           auxSymOff,
 		argLen:            1,
@@ -19278,6 +19507,62 @@
 		},
 	},
 	{
+		name:   "FMIND",
+		argLen: 2,
+		asm:    arm64.AFMIND,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:   "FMINS",
+		argLen: 2,
+		asm:    arm64.AFMINS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:   "FMAXD",
+		argLen: 2,
+		asm:    arm64.AFMAXD,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:   "FMAXS",
+		argLen: 2,
+		asm:    arm64.AFMAXS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
 		name:   "REV",
 		argLen: 1,
 		asm:    arm64.AREV,
@@ -22908,11 +23193,11 @@
 		asm:         loong64.AADDVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -22923,10 +23208,10 @@
 		asm:     loong64.AADDVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693244}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -22936,11 +23221,11 @@
 		asm:    loong64.ASUBVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -22951,10 +23236,10 @@
 		asm:     loong64.ASUBVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -22965,11 +23250,11 @@
 		asm:         loong64.AMULV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -22980,11 +23265,11 @@
 		asm:         loong64.AMULHV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -22995,11 +23280,11 @@
 		asm:         loong64.AMULHVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23009,11 +23294,11 @@
 		asm:    loong64.ADIVV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23023,11 +23308,11 @@
 		asm:    loong64.ADIVVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23037,11 +23322,11 @@
 		asm:    loong64.AREMV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23051,11 +23336,11 @@
 		asm:    loong64.AREMVU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23182,11 +23467,11 @@
 		asm:         loong64.AAND,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23197,10 +23482,10 @@
 		asm:     loong64.AAND,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23211,11 +23496,11 @@
 		asm:         loong64.AOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23226,10 +23511,10 @@
 		asm:     loong64.AOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23240,11 +23525,11 @@
 		asm:         loong64.AXOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23255,10 +23540,10 @@
 		asm:     loong64.AXOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23269,11 +23554,11 @@
 		asm:         loong64.ANOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23284,10 +23569,10 @@
 		asm:     loong64.ANOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23296,10 +23581,10 @@
 		argLen: 1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23361,11 +23646,11 @@
 		asm:    loong64.AMASKEQZ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23375,11 +23660,11 @@
 		asm:    loong64.AMASKNEZ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23389,11 +23674,11 @@
 		asm:    loong64.ASLLV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23404,10 +23689,10 @@
 		asm:     loong64.ASLLV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23417,11 +23702,11 @@
 		asm:    loong64.ASRLV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23432,10 +23717,10 @@
 		asm:     loong64.ASRLV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23445,11 +23730,11 @@
 		asm:    loong64.ASRAV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23460,10 +23745,10 @@
 		asm:     loong64.ASRAV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23473,11 +23758,11 @@
 		asm:    loong64.AROTR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23487,11 +23772,11 @@
 		asm:    loong64.AROTRV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23502,10 +23787,10 @@
 		asm:     loong64.AROTR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23516,10 +23801,10 @@
 		asm:     loong64.AROTRV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23529,11 +23814,11 @@
 		asm:    loong64.ASGT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23544,10 +23829,10 @@
 		asm:     loong64.ASGT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23557,11 +23842,11 @@
 		asm:    loong64.ASGTU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23572,10 +23857,10 @@
 		asm:     loong64.ASGTU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23653,7 +23938,7 @@
 		asm:               loong64.AMOVV,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23693,7 +23978,7 @@
 				{0, 4611686018427387908}, // SP SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23706,10 +23991,10 @@
 		asm:            loong64.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23722,10 +24007,10 @@
 		asm:            loong64.AMOVBU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23738,10 +24023,10 @@
 		asm:            loong64.AMOVH,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23754,10 +24039,10 @@
 		asm:            loong64.AMOVHU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23770,10 +24055,10 @@
 		asm:            loong64.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23786,10 +24071,10 @@
 		asm:            loong64.AMOVWU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23802,10 +24087,10 @@
 		asm:            loong64.AMOVV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23818,7 +24103,7 @@
 		asm:            loong64.AMOVF,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
 				{0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -23834,7 +24119,7 @@
 		asm:            loong64.AMOVD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
 				{0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -23850,8 +24135,8 @@
 		asm:            loong64.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23864,8 +24149,8 @@
 		asm:            loong64.AMOVH,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23878,8 +24163,8 @@
 		asm:            loong64.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23892,8 +24177,8 @@
 		asm:            loong64.AMOVV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23906,7 +24191,7 @@
 		asm:            loong64.AMOVF,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 				{1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 			},
 		},
@@ -23920,7 +24205,7 @@
 		asm:            loong64.AMOVD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 				{1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 			},
 		},
@@ -23934,7 +24219,7 @@
 		asm:            loong64.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23947,7 +24232,7 @@
 		asm:            loong64.AMOVH,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23960,7 +24245,7 @@
 		asm:            loong64.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23973,7 +24258,7 @@
 		asm:            loong64.AMOVV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -23983,10 +24268,10 @@
 		asm:    loong64.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -23996,10 +24281,10 @@
 		asm:    loong64.AMOVBU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24009,10 +24294,10 @@
 		asm:    loong64.AMOVH,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24022,10 +24307,10 @@
 		asm:    loong64.AMOVHU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24035,10 +24320,10 @@
 		asm:    loong64.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24048,10 +24333,10 @@
 		asm:    loong64.AMOVWU,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24061,10 +24346,10 @@
 		asm:    loong64.AMOVV,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24074,10 +24359,10 @@
 		resultInArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24214,49 +24499,49 @@
 	{
 		name:         "CALLstatic",
 		auxType:      auxCallOff,
-		argLen:       1,
+		argLen:       -1,
 		clobberFlags: true,
 		call:         true,
 		reg: regInfo{
-			clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 		},
 	},
 	{
 		name:         "CALLtail",
 		auxType:      auxCallOff,
-		argLen:       1,
+		argLen:       -1,
 		clobberFlags: true,
 		call:         true,
 		tailCall:     true,
 		reg: regInfo{
-			clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 		},
 	},
 	{
 		name:         "CALLclosure",
 		auxType:      auxCallOff,
-		argLen:       3,
+		argLen:       -1,
 		clobberFlags: true,
 		call:         true,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 268435456},  // R29
-				{0, 1070596092}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
-			clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 		},
 	},
 	{
 		name:         "CALLinter",
 		auxType:      auxCallOff,
-		argLen:       2,
+		argLen:       -1,
 		clobberFlags: true,
 		call:         true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
-			clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 		},
 	},
 	{
@@ -24266,9 +24551,9 @@
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 524288}, // R20
 			},
-			clobbers: 262146, // R1 R19
+			clobbers: 524290, // R1 R20
 		},
 	},
 	{
@@ -24279,40 +24564,38 @@
 		faultOnNilArg1: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 524288}, // R20
-				{1, 262144}, // R19
+				{0, 1048576}, // R21
+				{1, 524288},  // R20
 			},
-			clobbers: 786434, // R1 R19 R20
+			clobbers: 1572866, // R1 R20 R21
 		},
 	},
 	{
 		name:           "LoweredZero",
 		auxType:        auxInt64,
 		argLen:         3,
-		clobberFlags:   true,
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 262144},     // R19
-				{1, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 524288},     // R20
+				{1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
-			clobbers: 262144, // R19
+			clobbers: 524288, // R20
 		},
 	},
 	{
 		name:           "LoweredMove",
 		auxType:        auxInt64,
 		argLen:         4,
-		clobberFlags:   true,
 		faultOnNilArg0: true,
 		faultOnNilArg1: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 8},          // R4
-				{1, 262144},     // R19
-				{2, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1048576},    // R21
+				{1, 524288},     // R20
+				{2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
-			clobbers: 262152, // R4 R19
+			clobbers: 1572864, // R20 R21
 		},
 	},
 	{
@@ -24321,10 +24604,10 @@
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24334,10 +24617,10 @@
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24347,10 +24630,10 @@
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24361,8 +24644,8 @@
 		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -24373,8 +24656,8 @@
 		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -24385,8 +24668,8 @@
 		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -24397,7 +24680,7 @@
 		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -24408,7 +24691,7 @@
 		hasSideEffects: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 		},
 	},
@@ -24421,11 +24704,11 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24438,11 +24721,11 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24455,11 +24738,11 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24472,11 +24755,11 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24490,10 +24773,10 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24507,10 +24790,10 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24523,12 +24806,12 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{2, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{2, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24541,12 +24824,12 @@
 		unsafePoint:     true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{2, 1072693240},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
-				{0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+				{1, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{2, 1073741816},          // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
 			},
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24557,7 +24840,7 @@
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24566,7 +24849,7 @@
 		argLen: 1,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24575,7 +24858,7 @@
 		argLen: 1,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24595,7 +24878,7 @@
 		rematerializeable: true,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24605,7 +24888,7 @@
 		rematerializeable: true,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31
+				{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
 			},
 		},
 	},
@@ -24628,8 +24911,8 @@
 		call:    true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65536}, // R17
-				{1, 8},     // R4
+				{0, 4194304}, // R23
+				{1, 8388608}, // R24
 			},
 		},
 	},
@@ -24640,8 +24923,8 @@
 		call:    true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 131072}, // R18
-				{1, 65536},  // R17
+				{0, 1048576}, // R21
+				{1, 4194304}, // R23
 			},
 		},
 	},
@@ -24652,8 +24935,8 @@
 		call:    true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 262144}, // R19
-				{1, 131072}, // R18
+				{0, 524288},  // R20
+				{1, 1048576}, // R21
 			},
 		},
 	},
@@ -27957,6 +28240,21 @@
 		},
 	},
 	{
+		name:        "ADDCC",
+		argLen:      2,
+		commutative: true,
+		asm:         ppc64.AADDCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
 		name:    "ADDconst",
 		auxType: auxInt64,
 		argLen:  1,
@@ -27971,6 +28269,21 @@
 		},
 	},
 	{
+		name:    "ADDCCconst",
+		auxType: auxInt64,
+		argLen:  1,
+		asm:     ppc64.AADDCCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			clobbers: 9223372036854775808, // XER
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
 		name:        "FADD",
 		argLen:      2,
 		commutative: true,
@@ -28015,6 +28328,20 @@
 		},
 	},
 	{
+		name:   "SUBCC",
+		argLen: 2,
+		asm:    ppc64.ASUBCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
 		name:    "SUBFCconst",
 		auxType: auxInt64,
 		argLen:  1,
@@ -28395,20 +28722,6 @@
 		},
 	},
 	{
-		name:    "RLDICL",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     ppc64.ARLDICL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
 		name:    "CLRLSLWI",
 		auxType: auxInt32,
 		argLen:  1,
@@ -28736,10 +29049,10 @@
 		},
 	},
 	{
-		name:         "CNTLZD",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          ppc64.ACNTLZD,
+		name:    "RLDICL",
+		auxType: auxInt64,
+		argLen:  1,
+		asm:     ppc64.ARLDICL,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -28750,10 +29063,49 @@
 		},
 	},
 	{
-		name:         "CNTLZW",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          ppc64.ACNTLZW,
+		name:    "RLDICR",
+		auxType: auxInt64,
+		argLen:  1,
+		asm:     ppc64.ARLDICR,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
+		name:   "CNTLZD",
+		argLen: 1,
+		asm:    ppc64.ACNTLZD,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
+		name:   "CNTLZDCC",
+		argLen: 1,
+		asm:    ppc64.ACNTLZDCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
+		name:   "CNTLZW",
+		argLen: 1,
+		asm:    ppc64.ACNTLZW,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29089,11 +29441,24 @@
 		},
 	},
 	{
-		name:         "ANDCC",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          ppc64.AANDCC,
+		name:   "ANDNCC",
+		argLen: 2,
+		asm:    ppc64.AANDNCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
+		name:        "ANDCC",
+		argLen:      2,
+		commutative: true,
+		asm:         ppc64.AANDCC,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29134,11 +29499,10 @@
 		},
 	},
 	{
-		name:         "ORCC",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          ppc64.AORCC,
+		name:        "ORCC",
+		argLen:      2,
+		commutative: true,
+		asm:         ppc64.AORCC,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29165,6 +29529,21 @@
 		},
 	},
 	{
+		name:        "NORCC",
+		argLen:      2,
+		commutative: true,
+		asm:         ppc64.ANORCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
 		name:        "XOR",
 		argLen:      2,
 		commutative: true,
@@ -29180,11 +29559,10 @@
 		},
 	},
 	{
-		name:         "XORCC",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          ppc64.AXORCC,
+		name:        "XORCC",
+		argLen:      2,
+		commutative: true,
+		asm:         ppc64.AXORCC,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -29224,6 +29602,19 @@
 		},
 	},
 	{
+		name:   "NEGCC",
+		argLen: 1,
+		asm:    ppc64.ANEGCC,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+			outputs: []outputInfo{
+				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+			},
+		},
+	},
+	{
 		name:   "BRD",
 		argLen: 1,
 		asm:    ppc64.ABRD,
@@ -29422,11 +29813,10 @@
 		},
 	},
 	{
-		name:         "ANDCCconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          ppc64.AANDCC,
+		name:    "ANDCCconst",
+		auxType: auxInt64,
+		argLen:  1,
+		asm:     ppc64.AANDCC,
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
@@ -31666,6 +32056,20 @@
 		},
 	},
 	{
+		name:   "SRAW",
+		argLen: 2,
+		asm:    riscv.ASRAW,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+				{1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+			outputs: []outputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+		},
+	},
+	{
 		name:   "SRL",
 		argLen: 2,
 		asm:    riscv.ASRL,
@@ -31680,6 +32084,20 @@
 		},
 	},
 	{
+		name:   "SRLW",
+		argLen: 2,
+		asm:    riscv.ASRLW,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+				{1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+			outputs: []outputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+		},
+	},
+	{
 		name:    "SLLI",
 		auxType: auxInt64,
 		argLen:  1,
@@ -31708,6 +32126,20 @@
 		},
 	},
 	{
+		name:    "SRAIW",
+		auxType: auxInt64,
+		argLen:  1,
+		asm:     riscv.ASRAIW,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+			outputs: []outputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+		},
+	},
+	{
 		name:    "SRLI",
 		auxType: auxInt64,
 		argLen:  1,
@@ -31722,6 +32154,20 @@
 		},
 	},
 	{
+		name:    "SRLIW",
+		auxType: auxInt64,
+		argLen:  1,
+		asm:     riscv.ASRLIW,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+			outputs: []outputInfo{
+				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+			},
+		},
+	},
+	{
 		name:        "XOR",
 		argLen:      2,
 		commutative: true,
@@ -31904,15 +32350,28 @@
 		},
 	},
 	{
-		name:   "MOVconvert",
-		argLen: 2,
-		asm:    riscv.AMOV,
+		name:         "LoweredRound32F",
+		argLen:       1,
+		resultInArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 			},
 			outputs: []outputInfo{
-				{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:         "LoweredRound64F",
+		argLen:       1,
+		resultInArg0: true,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
 			},
 		},
 	},
@@ -32270,6 +32729,13 @@
 		},
 	},
 	{
+		name:           "LoweredPubBarrier",
+		argLen:         1,
+		hasSideEffects: true,
+		asm:            riscv.AFENCE,
+		reg:            regInfo{},
+	},
+	{
 		name:    "LoweredPanicBoundsA",
 		auxType: auxInt64,
 		argLen:  3,
@@ -32364,6 +32830,70 @@
 		},
 	},
 	{
+		name:        "FMADDS",
+		argLen:      3,
+		commutative: true,
+		asm:         riscv.AFMADDS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:        "FMSUBS",
+		argLen:      3,
+		commutative: true,
+		asm:         riscv.AFMSUBS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:        "FNMADDS",
+		argLen:      3,
+		commutative: true,
+		asm:         riscv.AFNMADDS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
+		name:        "FNMSUBS",
+		argLen:      3,
+		commutative: true,
+		asm:         riscv.AFNMSUBS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+				{2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+			outputs: []outputInfo{
+				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+			},
+		},
+	},
+	{
 		name:   "FSQRTS",
 		argLen: 1,
 		asm:    riscv.AFSQRTS,
@@ -38923,6 +39453,26 @@
 		generic: true,
 	},
 	{
+		name:    "Min64F",
+		argLen:  2,
+		generic: true,
+	},
+	{
+		name:    "Min32F",
+		argLen:  2,
+		generic: true,
+	},
+	{
+		name:    "Max64F",
+		argLen:  2,
+		generic: true,
+	},
+	{
+		name:    "Max32F",
+		argLen:  2,
+		generic: true,
+	},
+	{
 		name:    "FMA",
 		argLen:  3,
 		generic: true,
@@ -39373,9 +39923,10 @@
 		generic: true,
 	},
 	{
-		name:    "NilCheck",
-		argLen:  2,
-		generic: true,
+		name:     "NilCheck",
+		argLen:   2,
+		nilCheck: true,
+		generic:  true,
 	},
 	{
 		name:      "GetG",
@@ -40156,16 +40707,16 @@
 	{17, loong64.REG_R18, 14, "R18"},
 	{18, loong64.REG_R19, 15, "R19"},
 	{19, loong64.REG_R20, 16, "R20"},
-	{20, loong64.REG_R21, -1, "R21"},
+	{20, loong64.REG_R21, 17, "R21"},
 	{21, loong64.REGG, -1, "g"},
-	{22, loong64.REG_R23, 17, "R23"},
-	{23, loong64.REG_R24, 18, "R24"},
-	{24, loong64.REG_R25, 19, "R25"},
-	{25, loong64.REG_R26, 20, "R26"},
-	{26, loong64.REG_R27, 21, "R27"},
-	{27, loong64.REG_R28, 22, "R28"},
-	{28, loong64.REG_R29, 23, "R29"},
-	{29, loong64.REG_R31, 24, "R31"},
+	{22, loong64.REG_R23, 18, "R23"},
+	{23, loong64.REG_R24, 19, "R24"},
+	{24, loong64.REG_R25, 20, "R25"},
+	{25, loong64.REG_R26, 21, "R26"},
+	{26, loong64.REG_R27, 22, "R27"},
+	{27, loong64.REG_R28, 23, "R28"},
+	{28, loong64.REG_R29, 24, "R29"},
+	{29, loong64.REG_R31, 25, "R31"},
 	{30, loong64.REG_F0, -1, "F0"},
 	{31, loong64.REG_F1, -1, "F1"},
 	{32, loong64.REG_F2, -1, "F2"},
@@ -40200,9 +40751,9 @@
 	{61, loong64.REG_F31, -1, "F31"},
 	{62, 0, -1, "SB"},
 }
-var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10}
-var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37}
-var gpRegMaskLOONG64 = regMask(1070596088)
+var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}
+var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}
+var gpRegMaskLOONG64 = regMask(1071644664)
 var fpRegMaskLOONG64 = regMask(4611686017353646080)
 var specialRegMaskLOONG64 = regMask(0)
 var framepointerRegLOONG64 = int8(-1)
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index fd2887d..7b64843 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -122,7 +122,7 @@
 // poset is implemented as a forest of DAGs; in each DAG, if there is a path (directed)
 // from node A to B, it means that A<B (or A<=B). Equality is represented by mapping
 // two SSA values to the same DAG node; when a new equality relation is recorded
-// between two existing nodes,the nodes are merged, adjusting incoming and outgoing edges.
+// between two existing nodes, the nodes are merged, adjusting incoming and outgoing edges.
 //
 // Constants are specially treated. When a constant is added to the poset, it is
 // immediately linked to other constants already present; so for instance if the
@@ -1065,7 +1065,7 @@
 				return true
 			}
 
-			// Case #1, #3 o #4: nothing to do
+			// Case #1, #3, or #4: nothing to do
 			return true
 		}
 
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
index 38758c3..842719f 100644
--- a/src/cmd/compile/internal/ssa/prove.go
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -100,10 +100,11 @@
 }
 
 type pair struct {
-	v, w *Value // a pair of values, ordered by ID.
+	// a pair of values, ordered by ID.
 	// v can be nil, to mean the zero value.
 	// for booleans the zero value (v == nil) is false.
-	d domain
+	v, w *Value
+	d    domain
 }
 
 // fact is a pair plus a relation for that pair.
@@ -165,7 +166,7 @@
 	facts map[pair]relation // current known set of relation
 	stack []fact            // previous sets of relations
 
-	// order is a couple of partial order sets that record information
+	// order* is a couple of partial order sets that record information
 	// about relations between SSA values in the signed and unsigned
 	// domain.
 	orderS *poset
@@ -798,6 +799,166 @@
 // its negation. If either leads to a contradiction, it can trim that
 // successor.
 func prove(f *Func) {
+	// Find induction variables. Currently, findIndVars
+	// is limited to one induction variable per block.
+	var indVars map[*Block]indVar
+	for _, v := range findIndVar(f) {
+		ind := v.ind
+		if len(ind.Args) != 2 {
+			// the rewrite code assumes there is only ever two parents to loops
+			panic("unexpected induction with too many parents")
+		}
+
+		nxt := v.nxt
+		if !(ind.Uses == 2 && // 2 used by comparison and next
+			nxt.Uses == 1) { // 1 used by induction
+			// ind or nxt is used inside the loop, add it for the facts table
+			if indVars == nil {
+				indVars = make(map[*Block]indVar)
+			}
+			indVars[v.entry] = v
+			continue
+		} else {
+			// Since this induction variable is not used for anything but counting the iterations,
+			// no point in putting it into the facts table.
+		}
+
+		// try to rewrite to a downward counting loop checking against start if the
+		// loop body does not depends on ind or nxt and end is known before the loop.
+		// This reduce pressure on the register allocator because this do not need
+		// to use end on each iteration anymore. We compare against the start constant instead.
+		// That means this code:
+		//
+		//	loop:
+		//		ind = (Phi (Const [x]) nxt),
+		//		if ind < end
+		//		then goto enter_loop
+		//		else goto exit_loop
+		//
+		//	enter_loop:
+		//		do something without using ind nor nxt
+		//		nxt = inc + ind
+		//		goto loop
+		//
+		//	exit_loop:
+		//
+		// is rewritten to:
+		//
+		//	loop:
+		//		ind = (Phi end nxt)
+		//		if (Const [x]) < ind
+		//		then goto enter_loop
+		//		else goto exit_loop
+		//
+		//	enter_loop:
+		//		do something without using ind nor nxt
+		//		nxt = ind - inc
+		//		goto loop
+		//
+		//	exit_loop:
+		//
+		// this is better because it only require to keep ind then nxt alive while looping,
+		// while the original form keeps ind then nxt and end alive
+		start, end := v.min, v.max
+		if v.flags&indVarCountDown != 0 {
+			start, end = end, start
+		}
+
+		if !(start.Op == OpConst8 || start.Op == OpConst16 || start.Op == OpConst32 || start.Op == OpConst64) {
+			// if start is not a constant we would be winning nothing from inverting the loop
+			continue
+		}
+		if end.Op == OpConst8 || end.Op == OpConst16 || end.Op == OpConst32 || end.Op == OpConst64 {
+			// TODO: if both start and end are constants we should rewrite such that the comparison
+			// is against zero and nxt is ++ or -- operation
+			// That means:
+			//	for i := 2; i < 11; i += 2 {
+			// should be rewritten to:
+			//	for i := 5; 0 < i; i-- {
+			continue
+		}
+
+		header := ind.Block
+		check := header.Controls[0]
+		if check == nil {
+			// we don't know how to rewrite a loop that not simple comparison
+			continue
+		}
+		switch check.Op {
+		case OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+			OpLess64, OpLess32, OpLess16, OpLess8:
+		default:
+			// we don't know how to rewrite a loop that not simple comparison
+			continue
+		}
+		if !((check.Args[0] == ind && check.Args[1] == end) ||
+			(check.Args[1] == ind && check.Args[0] == end)) {
+			// we don't know how to rewrite a loop that not simple comparison
+			continue
+		}
+		if end.Block == ind.Block {
+			// we can't rewrite loops where the condition depends on the loop body
+			// this simple check is forced to work because if this is true a Phi in ind.Block must exists
+			continue
+		}
+
+		// invert the check
+		check.Args[0], check.Args[1] = check.Args[1], check.Args[0]
+
+		// invert start and end in the loop
+		for i, v := range check.Args {
+			if v != end {
+				continue
+			}
+
+			check.SetArg(i, start)
+			goto replacedEnd
+		}
+		panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end))
+	replacedEnd:
+
+		for i, v := range ind.Args {
+			if v != start {
+				continue
+			}
+
+			ind.SetArg(i, end)
+			goto replacedStart
+		}
+		panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end))
+	replacedStart:
+
+		if nxt.Args[0] != ind {
+			// unlike additions subtractions are not commutative so be sure we get it right
+			nxt.Args[0], nxt.Args[1] = nxt.Args[1], nxt.Args[0]
+		}
+
+		switch nxt.Op {
+		case OpAdd8:
+			nxt.Op = OpSub8
+		case OpAdd16:
+			nxt.Op = OpSub16
+		case OpAdd32:
+			nxt.Op = OpSub32
+		case OpAdd64:
+			nxt.Op = OpSub64
+		case OpSub8:
+			nxt.Op = OpAdd8
+		case OpSub16:
+			nxt.Op = OpAdd16
+		case OpSub32:
+			nxt.Op = OpAdd32
+		case OpSub64:
+			nxt.Op = OpAdd64
+		default:
+			panic("unreachable")
+		}
+
+		if f.pass.debug > 0 {
+			f.Warnl(ind.Pos, "Inverted loop iteration")
+		}
+	}
+
 	ft := newFactsTable(f)
 	ft.checkpoint()
 
@@ -933,15 +1094,6 @@
 			}
 		}
 	}
-	// Find induction variables. Currently, findIndVars
-	// is limited to one induction variable per block.
-	var indVars map[*Block]indVar
-	for _, v := range findIndVar(f) {
-		if indVars == nil {
-			indVars = make(map[*Block]indVar)
-		}
-		indVars[v.entry] = v
-	}
 
 	// current node state
 	type walkState int
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index c4d6e48..2325b9e 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -672,6 +672,8 @@
 			s.allocatable &^= 1 << 9 // R9
 		case "arm64":
 			// nothing to do
+		case "loong64": // R2 (aka TP) already reserved.
+			// nothing to do
 		case "ppc64le": // R2 already reserved.
 			// nothing to do
 		case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved.
@@ -2544,7 +2546,7 @@
 					// Allocate a temp location to spill a register to.
 					// The type of the slot is immaterial - it will not be live across
 					// any safepoint. Just use a type big enough to hold any register.
-					t := LocalSlot{N: e.s.f.fe.Auto(c.Pos, types.Int64), Type: types.Int64}
+					t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64}
 					// TODO: reuse these slots. They'll need to be erased first.
 					e.set(t, vid, x, false, c.Pos)
 					if e.s.f.pass.debug > regDebug {
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index d990cac..7d804a0 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"cmd/compile/internal/types"
-	"cmd/internal/src"
 	"testing"
 )
 
@@ -53,7 +52,7 @@
 	f := c.Fun("b1",
 		Bloc("b1",
 			Valu("v1", OpInitMem, types.TypeMem, 0, nil),
-			Valu("v6", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+			Valu("v6", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
 			Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
 			Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
 			Eq("v11", "b2", "b4"),
@@ -92,8 +91,8 @@
 	f := c.Fun("entry",
 		Bloc("entry",
 			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
-			Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
-			Valu("cond", OpArg, c.config.Types.Bool, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Bool)),
+			Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64)),
+			Valu("cond", OpArg, c.config.Types.Bool, 0, c.Temp(c.config.Types.Bool)),
 			Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
 			Goto("loop"),
 		),
@@ -125,8 +124,8 @@
 	f := c.Fun("entry",
 		Bloc("entry",
 			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
-			Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
-			Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+			Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+			Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
 			Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
 			Goto("loop1"),
 		),
@@ -174,8 +173,8 @@
 	f := c.Fun("entry",
 		Bloc("entry",
 			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
-			Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
-			Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+			Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+			Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
 			Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
 			Goto("loop1"),
 		),
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 43843bd..bb09c6c 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -859,6 +859,9 @@
 			offset += base.AuxInt
 			base = base.Args[0]
 		}
+		if opcodeTable[base.Op].nilCheck {
+			base = base.Args[0]
+		}
 		return base, offset
 	}
 	p1, off1 := baseAndOffset(p1)
@@ -1183,6 +1186,12 @@
 	}
 	return y
 }
+func max(x, y int64) int64 {
+	if x > y {
+		return x
+	}
+	return y
+}
 
 func isConstZero(v *Value) bool {
 	switch v.Op {
@@ -1281,6 +1290,10 @@
 		OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
 		OpAMD64SHLL, OpAMD64SHLLconst:
 		return true
+	case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
+		OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
+		OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
+		return true
 	case OpArg:
 		return x.Type.Size() == 4
 	case OpPhi, OpSelect0, OpSelect1:
@@ -1474,7 +1487,7 @@
 
 	// Determine boundaries and then decode them
 	if mask == 0 || ^mask == 0 || rotate >= nbits {
-		panic("Invalid PPC64 rotate mask")
+		panic(fmt.Sprintf("invalid PPC64 rotate mask: %x %d %d", uint64(mask), rotate, nbits))
 	} else if nbits == 32 {
 		mb = bits.LeadingZeros32(uint32(mask))
 		me = 32 - bits.TrailingZeros32(uint32(mask))
@@ -1495,6 +1508,25 @@
 	return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
 }
 
+// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
+// SRDconst on PPC64 is an extended mnemonic of RLDICL. If the input to an
+// RLDICL is an SRDconst, and the RLDICL does not rotate its value, the two
+// operations can be combined. This functions assumes the two opcodes can
+// be merged, and returns an encoded rotate+mask value of the combined RLDICL.
+func mergePPC64RLDICLandSRDconst(encoded, s int64) int64 {
+	mb := s
+	r := 64 - s
+	// A larger mb is a smaller mask.
+	if (encoded>>8)&0xFF < mb {
+		encoded = (encoded &^ 0xFF00) | mb<<8
+	}
+	// The rotate is expected to be 0.
+	if (encoded & 0xFF0000) != 0 {
+		panic("non-zero rotate")
+	}
+	return encoded | r<<16
+}
+
 // DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask.  The values returned as
 // mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
 func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
@@ -1598,6 +1630,52 @@
 	return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
 }
 
+// Convert a PPC64 opcode from the Op to OpCC form. This converts (op x y)
+// to (Select0 (opCC x y)) without having to explicitly fixup every user
+// of op.
+//
+// E.g consider the case:
+// a = (ADD x y)
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// A rule like (CMPconst [0] (ADD x y)) => (CMPconst [0] (Select0 (ADDCC x y)))
+// would produce:
+// a  = (ADD x y)
+// a' = (ADDCC x y)
+// a” = (Select0 a')
+// b  = (CMPconst [0] a”)
+// c  = (OR a z)
+//
+// which makes it impossible to rewrite the second user. Instead the result
+// of this conversion is:
+// a' = (ADDCC x y)
+// a  = (Select0 a')
+// b  = (CMPconst [0] a)
+// c  = (OR a z)
+//
+// Which makes it trivial to rewrite b using a lowering rule.
+func convertPPC64OpToOpCC(op *Value) *Value {
+	ccOpMap := map[Op]Op{
+		OpPPC64ADD:      OpPPC64ADDCC,
+		OpPPC64ADDconst: OpPPC64ADDCCconst,
+		OpPPC64AND:      OpPPC64ANDCC,
+		OpPPC64ANDN:     OpPPC64ANDNCC,
+		OpPPC64CNTLZD:   OpPPC64CNTLZDCC,
+		OpPPC64OR:       OpPPC64ORCC,
+		OpPPC64SUB:      OpPPC64SUBCC,
+		OpPPC64NEG:      OpPPC64NEGCC,
+		OpPPC64NOR:      OpPPC64NORCC,
+		OpPPC64XOR:      OpPPC64XORCC,
+	}
+	b := op.Block
+	opCC := b.NewValue0I(op.Pos, ccOpMap[op.Op], types.NewTuple(op.Type, types.TypeFlags), op.AuxInt)
+	opCC.AddArgs(op.Args...)
+	op.reset(OpSelect0)
+	op.AddArgs(opCC)
+	return op
+}
+
 // Convenience function to rotate a 32 bit constant value by another constant.
 func rotateLeft32(v, rotate int64) int64 {
 	return int64(bits.RotateLeft32(uint32(v), int(rotate)))
@@ -2053,8 +2131,8 @@
 
 func makeJumpTableSym(b *Block) *obj.LSym {
 	s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.Func().LSym.Name, b.ID))
-	s.Set(obj.AttrDuplicateOK, true)
-	s.Set(obj.AttrLocal, true)
+	// The jump table symbol is accessed only from the function symbol.
+	s.Set(obj.AttrStatic, true)
 	return s
 }
 
@@ -2123,3 +2201,11 @@
 	}
 	return v <= 0xFFF
 }
+
+// setPos sets the position of v to pos, then returns true.
+// Useful for setting the result of a rewrite's position to
+// something other than the default.
+func setPos(v *Value, pos src.XPos) bool {
+	v.Pos = pos
+	return true
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 88bd48f..5332512 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -73,20 +73,14 @@
 		return rewriteValueAMD64_OpAMD64BSWAPL(v)
 	case OpAMD64BSWAPQ:
 		return rewriteValueAMD64_OpAMD64BSWAPQ(v)
-	case OpAMD64BTCLconst:
-		return rewriteValueAMD64_OpAMD64BTCLconst(v)
 	case OpAMD64BTCQconst:
 		return rewriteValueAMD64_OpAMD64BTCQconst(v)
 	case OpAMD64BTLconst:
 		return rewriteValueAMD64_OpAMD64BTLconst(v)
 	case OpAMD64BTQconst:
 		return rewriteValueAMD64_OpAMD64BTQconst(v)
-	case OpAMD64BTRLconst:
-		return rewriteValueAMD64_OpAMD64BTRLconst(v)
 	case OpAMD64BTRQconst:
 		return rewriteValueAMD64_OpAMD64BTRQconst(v)
-	case OpAMD64BTSLconst:
-		return rewriteValueAMD64_OpAMD64BTSLconst(v)
 	case OpAMD64BTSQconst:
 		return rewriteValueAMD64_OpAMD64BTSQconst(v)
 	case OpAMD64CMOVLCC:
@@ -871,6 +865,14 @@
 		return rewriteValueAMD64_OpLsh8x64(v)
 	case OpLsh8x8:
 		return rewriteValueAMD64_OpLsh8x8(v)
+	case OpMax32F:
+		return rewriteValueAMD64_OpMax32F(v)
+	case OpMax64F:
+		return rewriteValueAMD64_OpMax64F(v)
+	case OpMin32F:
+		return rewriteValueAMD64_OpMin32F(v)
+	case OpMin64F:
+		return rewriteValueAMD64_OpMin64F(v)
 	case OpMod16:
 		return rewriteValueAMD64_OpMod16(v)
 	case OpMod16u:
@@ -2618,26 +2620,6 @@
 		}
 		break
 	}
-	// match: (ANDL (MOVLconst [c]) x)
-	// cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
-	// result: (BTRLconst [int8(log32(^c))] x)
-	for {
-		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
-			if v_0.Op != OpAMD64MOVLconst {
-				continue
-			}
-			c := auxIntToInt32(v_0.AuxInt)
-			x := v_1
-			if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
-				continue
-			}
-			v.reset(OpAMD64BTRLconst)
-			v.AuxInt = int8ToAuxInt(int8(log32(^c)))
-			v.AddArg(x)
-			return true
-		}
-		break
-	}
 	// match: (ANDL x (MOVLconst [c]))
 	// result: (ANDLconst [c] x)
 	for {
@@ -2746,20 +2728,6 @@
 }
 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (ANDLconst [c] x)
-	// cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
-	// result: (BTRLconst [int8(log32(^c))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		x := v_0
-		if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
-			break
-		}
-		v.reset(OpAMD64BTRLconst)
-		v.AuxInt = int8ToAuxInt(int8(log32(^c)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ANDLconst [c] (ANDLconst [d] x))
 	// result: (ANDLconst [c & d] x)
 	for {
@@ -2774,20 +2742,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ANDLconst [c] (BTRLconst [d] x))
-	// result: (ANDLconst [c &^ (1<<uint32(d))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		if v_0.Op != OpAMD64BTRLconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ANDLconst [ 0xFF] x)
 	// result: (MOVBQZX x)
 	for {
@@ -3091,7 +3045,7 @@
 		break
 	}
 	// match: (ANDQ (MOVQconst [c]) x)
-	// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
+	// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31
 	// result: (BTRQconst [int8(log64(^c))] x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -3100,7 +3054,7 @@
 			}
 			c := auxIntToInt64(v_0.AuxInt)
 			x := v_1
-			if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
+			if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
 				continue
 			}
 			v.reset(OpAMD64BTRQconst)
@@ -3222,20 +3176,6 @@
 }
 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (ANDQconst [c] x)
-	// cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
-	// result: (BTRQconst [int8(log32(^c))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		x := v_0
-		if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
-			break
-		}
-		v.reset(OpAMD64BTRQconst)
-		v.AuxInt = int8ToAuxInt(int8(log32(^c)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ANDQconst [c] (ANDQconst [d] x))
 	// result: (ANDQconst [c & d] x)
 	for {
@@ -3250,24 +3190,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ANDQconst [c] (BTRQconst [d] x))
-	// cond: is32Bit(int64(c) &^ (1<<uint32(d)))
-	// result: (ANDQconst [c &^ (1<<uint32(d))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		if v_0.Op != OpAMD64BTRQconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
-			break
-		}
-		v.reset(OpAMD64ANDQconst)
-		v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ANDQconst [ 0xFF] x)
 	// result: (MOVBQZX x)
 	for {
@@ -3669,88 +3591,8 @@
 	}
 	return false
 }
-func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
-	v_0 := v.Args[0]
-	// match: (BTCLconst [c] (XORLconst [d] x))
-	// result: (XORLconst [d ^ 1<<uint32(c)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64XORLconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTCLconst [c] (BTCLconst [d] x))
-	// result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTCLconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTCLconst [c] (MOVLconst [d]))
-	// result: (MOVLconst [d^(1<<uint32(c))])
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
-		return true
-	}
-	return false
-}
 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (BTCQconst [c] (XORQconst [d] x))
-	// cond: is32Bit(int64(d) ^ 1<<uint32(c))
-	// result: (XORQconst [d ^ 1<<uint32(c)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64XORQconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
-			break
-		}
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTCQconst [c] (BTCQconst [d] x))
-	// cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
-	// result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTCQconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
-			break
-		}
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
 	// match: (BTCQconst [c] (MOVQconst [d]))
 	// result: (MOVQconst [d^(1<<uint32(c))])
 	for {
@@ -3945,76 +3787,6 @@
 	}
 	return false
 }
-func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
-	v_0 := v.Args[0]
-	// match: (BTRLconst [c] (BTSLconst [c] x))
-	// result: (BTRLconst [c] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64BTRLconst)
-		v.AuxInt = int8ToAuxInt(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTRLconst [c] (BTCLconst [c] x))
-	// result: (BTRLconst [c] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64BTRLconst)
-		v.AuxInt = int8ToAuxInt(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTRLconst [c] (ANDLconst [d] x))
-	// result: (ANDLconst [d &^ (1<<uint32(c))] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTRLconst [c] (BTRLconst [d] x))
-	// result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTRLconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTRLconst [c] (MOVLconst [d]))
-	// result: (MOVLconst [d&^(1<<uint32(c))])
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
-		return true
-	}
-	return false
-}
 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (BTRQconst [c] (BTSQconst [c] x))
@@ -4043,42 +3815,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (BTRQconst [c] (ANDQconst [d] x))
-	// cond: is32Bit(int64(d) &^ (1<<uint32(c)))
-	// result: (ANDQconst [d &^ (1<<uint32(c))] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64ANDQconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
-			break
-		}
-		v.reset(OpAMD64ANDQconst)
-		v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTRQconst [c] (BTRQconst [d] x))
-	// cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
-	// result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTRQconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
-			break
-		}
-		v.reset(OpAMD64ANDQconst)
-		v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (BTRQconst [c] (MOVQconst [d]))
 	// result: (MOVQconst [d&^(1<<uint32(c))])
 	for {
@@ -4093,76 +3829,6 @@
 	}
 	return false
 }
-func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
-	v_0 := v.Args[0]
-	// match: (BTSLconst [c] (BTRLconst [c] x))
-	// result: (BTSLconst [c] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64BTSLconst)
-		v.AuxInt = int8ToAuxInt(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTSLconst [c] (BTCLconst [c] x))
-	// result: (BTSLconst [c] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64BTSLconst)
-		v.AuxInt = int8ToAuxInt(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTSLconst [c] (ORLconst [d] x))
-	// result: (ORLconst [d | 1<<uint32(c)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64ORLconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64ORLconst)
-		v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTSLconst [c] (BTSLconst [d] x))
-	// result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTSLconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64ORLconst)
-		v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTSLconst [c] (MOVLconst [d]))
-	// result: (MOVLconst [d|(1<<uint32(c))])
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
-		return true
-	}
-	return false
-}
 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (BTSQconst [c] (BTRQconst [c] x))
@@ -4191,42 +3857,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (BTSQconst [c] (ORQconst [d] x))
-	// cond: is32Bit(int64(d) | 1<<uint32(c))
-	// result: (ORQconst [d | 1<<uint32(c)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64ORQconst {
-			break
-		}
-		d := auxIntToInt32(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(int64(d) | 1<<uint32(c))) {
-			break
-		}
-		v.reset(OpAMD64ORQconst)
-		v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BTSQconst [c] (BTSQconst [d] x))
-	// cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
-	// result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt8(v.AuxInt)
-		if v_0.Op != OpAMD64BTSQconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
-			break
-		}
-		v.reset(OpAMD64ORQconst)
-		v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
 	// match: (BTSQconst [c] (MOVQconst [d]))
 	// result: (MOVQconst [d|(1<<uint32(c))])
 	for {
@@ -10181,8 +9811,6 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
-	b := v.Block
-	typ := &b.Func.Config.Types
 	// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
 	// cond: y.Uses == 1
 	// result: (SETLstore [off] {sym} ptr x mem)
@@ -10516,47 +10144,6 @@
 		v.AddArg3(base, val, mem)
 		return true
 	}
-	// match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
-	// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
-	// result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
-	for {
-		i := auxIntToInt32(v.AuxInt)
-		s := auxToSym(v.Aux)
-		p := v_0
-		x1 := v_1
-		if x1.Op != OpAMD64MOVBload {
-			break
-		}
-		j := auxIntToInt32(x1.AuxInt)
-		s2 := auxToSym(x1.Aux)
-		mem := x1.Args[1]
-		p2 := x1.Args[0]
-		mem2 := v_2
-		if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
-			break
-		}
-		_ = mem2.Args[2]
-		if p != mem2.Args[0] {
-			break
-		}
-		x2 := mem2.Args[1]
-		if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
-			break
-		}
-		_ = x2.Args[1]
-		if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = int32ToAuxInt(i - 1)
-		v.Aux = symToAux(s)
-		v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
-		v0.AuxInt = int32ToAuxInt(j - 1)
-		v0.Aux = symToAux(s2)
-		v0.AddArg2(p2, mem)
-		v.AddArg3(p, v0, mem)
-		return true
-	}
 	return false
 }
 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
@@ -11069,8 +10656,6 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
-	b := v.Block
-	typ := &b.Func.Config.Types
 	// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
 	// result: (MOVLstore [off] {sym} ptr x mem)
 	for {
@@ -11184,47 +10769,6 @@
 		v.AddArg3(base, val, mem)
 		return true
 	}
-	// match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
-	// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
-	// result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
-	for {
-		i := auxIntToInt32(v.AuxInt)
-		s := auxToSym(v.Aux)
-		p := v_0
-		x1 := v_1
-		if x1.Op != OpAMD64MOVLload {
-			break
-		}
-		j := auxIntToInt32(x1.AuxInt)
-		s2 := auxToSym(x1.Aux)
-		mem := x1.Args[1]
-		p2 := x1.Args[0]
-		mem2 := v_2
-		if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
-			break
-		}
-		_ = mem2.Args[2]
-		if p != mem2.Args[0] {
-			break
-		}
-		x2 := mem2.Args[1]
-		if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
-			break
-		}
-		_ = x2.Args[1]
-		if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = int32ToAuxInt(i - 4)
-		v.Aux = symToAux(s)
-		v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
-		v0.AuxInt = int32ToAuxInt(j - 4)
-		v0.Aux = symToAux(s2)
-		v0.AddArg2(p2, mem)
-		v.AddArg3(p, v0, mem)
-		return true
-	}
 	// match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
 	// cond: y.Uses==1 && clobber(y)
 	// result: (ADDLmodify [off] {sym} ptr x mem)
@@ -12384,6 +11928,84 @@
 		}
 		break
 	}
+	// match: (MOVQstore {sym} [off] ptr x:(BTSQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+	// cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+	// result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+	for {
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
+		ptr := v_0
+		x := v_1
+		if x.Op != OpAMD64BTSQconst {
+			break
+		}
+		c := auxIntToInt8(x.AuxInt)
+		l := x.Args[0]
+		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+			break
+		}
+		mem := l.Args[1]
+		if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+			break
+		}
+		v.reset(OpAMD64BTSQconstmodify)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+		v.Aux = symToAux(sym)
+		v.AddArg2(ptr, mem)
+		return true
+	}
+	// match: (MOVQstore {sym} [off] ptr x:(BTRQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+	// cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+	// result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+	for {
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
+		ptr := v_0
+		x := v_1
+		if x.Op != OpAMD64BTRQconst {
+			break
+		}
+		c := auxIntToInt8(x.AuxInt)
+		l := x.Args[0]
+		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+			break
+		}
+		mem := l.Args[1]
+		if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+			break
+		}
+		v.reset(OpAMD64BTRQconstmodify)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+		v.Aux = symToAux(sym)
+		v.AddArg2(ptr, mem)
+		return true
+	}
+	// match: (MOVQstore {sym} [off] ptr x:(BTCQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+	// cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+	// result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+	for {
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
+		ptr := v_0
+		x := v_1
+		if x.Op != OpAMD64BTCQconst {
+			break
+		}
+		c := auxIntToInt8(x.AuxInt)
+		l := x.Args[0]
+		if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+			break
+		}
+		mem := l.Args[1]
+		if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+			break
+		}
+		v.reset(OpAMD64BTCQconstmodify)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+		v.Aux = symToAux(sym)
+		v.AddArg2(ptr, mem)
+		return true
+	}
 	// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
 	// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
 	// result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
@@ -12582,7 +12204,7 @@
 		return true
 	}
 	// match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
-	// cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)
+	// cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
 	// result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
 	for {
 		c := auxIntToValAndOff(v.AuxInt)
@@ -12598,7 +12220,7 @@
 		}
 		mem := x.Args[1]
 		p0 := x.Args[0]
-		if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
+		if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpAMD64MOVOstoreconst)
@@ -12608,7 +12230,7 @@
 		return true
 	}
 	// match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
-	// cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)
+	// cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
 	// result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
 	for {
 		a := auxIntToValAndOff(v.AuxInt)
@@ -12624,7 +12246,7 @@
 		}
 		mem := x.Args[1]
 		p1 := x.Args[0]
-		if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
+		if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpAMD64MOVOstoreconst)
@@ -13270,8 +12892,6 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
-	b := v.Block
-	typ := &b.Func.Config.Types
 	// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
 	// result: (MOVWstore [off] {sym} ptr x mem)
 	for {
@@ -13385,47 +13005,6 @@
 		v.AddArg3(base, val, mem)
 		return true
 	}
-	// match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
-	// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
-	// result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
-	for {
-		i := auxIntToInt32(v.AuxInt)
-		s := auxToSym(v.Aux)
-		p := v_0
-		x1 := v_1
-		if x1.Op != OpAMD64MOVWload {
-			break
-		}
-		j := auxIntToInt32(x1.AuxInt)
-		s2 := auxToSym(x1.Aux)
-		mem := x1.Args[1]
-		p2 := x1.Args[0]
-		mem2 := v_2
-		if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
-			break
-		}
-		_ = mem2.Args[2]
-		if p != mem2.Args[0] {
-			break
-		}
-		x2 := mem2.Args[1]
-		if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
-			break
-		}
-		_ = x2.Args[1]
-		if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = int32ToAuxInt(i - 2)
-		v.Aux = symToAux(s)
-		v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
-		v0.AuxInt = int32ToAuxInt(j - 2)
-		v0.Aux = symToAux(s2)
-		v0.AddArg2(p2, mem)
-		v.AddArg3(p, v0, mem)
-		return true
-	}
 	// match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem)
 	// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
 	// result: (MOVBEWstore [i] {s} p w mem)
@@ -14764,26 +14343,6 @@
 		}
 		break
 	}
-	// match: (ORL (MOVLconst [c]) x)
-	// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
-	// result: (BTSLconst [int8(log32(c))] x)
-	for {
-		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
-			if v_0.Op != OpAMD64MOVLconst {
-				continue
-			}
-			c := auxIntToInt32(v_0.AuxInt)
-			x := v_1
-			if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
-				continue
-			}
-			v.reset(OpAMD64BTSLconst)
-			v.AuxInt = int8ToAuxInt(int8(log32(c)))
-			v.AddArg(x)
-			return true
-		}
-		break
-	}
 	// match: (ORL x (MOVLconst [c]))
 	// result: (ORLconst [c] x)
 	for {
@@ -14839,20 +14398,6 @@
 }
 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (ORLconst [c] x)
-	// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
-	// result: (BTSLconst [int8(log32(c))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		x := v_0
-		if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
-			break
-		}
-		v.reset(OpAMD64BTSLconst)
-		v.AuxInt = int8ToAuxInt(int8(log32(c)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ORLconst [c] (ORLconst [d] x))
 	// result: (ORLconst [c | d] x)
 	for {
@@ -14867,20 +14412,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ORLconst [c] (BTSLconst [d] x))
-	// result: (ORLconst [c | 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		if v_0.Op != OpAMD64BTSLconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64ORLconst)
-		v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ORLconst [c] x)
 	// cond: c==0
 	// result: x
@@ -15114,7 +14645,7 @@
 		break
 	}
 	// match: (ORQ (MOVQconst [c]) x)
-	// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+	// cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
 	// result: (BTSQconst [int8(log64(c))] x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -15123,7 +14654,7 @@
 			}
 			c := auxIntToInt64(v_0.AuxInt)
 			x := v_1
-			if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+			if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
 				continue
 			}
 			v.reset(OpAMD64BTSQconst)
@@ -15322,20 +14853,6 @@
 }
 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (ORQconst [c] x)
-	// cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
-	// result: (BTSQconst [int8(log32(c))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		x := v_0
-		if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
-			break
-		}
-		v.reset(OpAMD64BTSQconst)
-		v.AuxInt = int8ToAuxInt(int8(log32(c)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ORQconst [c] (ORQconst [d] x))
 	// result: (ORQconst [c | d] x)
 	for {
@@ -15350,24 +14867,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ORQconst [c] (BTSQconst [d] x))
-	// cond: is32Bit(int64(c) | 1<<uint32(d))
-	// result: (ORQconst [c | 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		if v_0.Op != OpAMD64BTSQconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(int64(c) | 1<<uint32(d))) {
-			break
-		}
-		v.reset(OpAMD64ORQconst)
-		v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
 	// match: (ORQconst [0] x)
 	// result: x
 	for {
@@ -21179,14 +20678,14 @@
 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (SHLLconst [1] (SHRLconst [1] x))
-	// result: (BTRLconst [0] x)
+	// result: (ANDLconst [-2] x)
 	for {
 		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
 			break
 		}
 		x := v_0.Args[0]
-		v.reset(OpAMD64BTRLconst)
-		v.AuxInt = int8ToAuxInt(0)
+		v.reset(OpAMD64ANDLconst)
+		v.AuxInt = int32ToAuxInt(-2)
 		v.AddArg(x)
 		return true
 	}
@@ -21435,14 +20934,14 @@
 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (SHLQconst [1] (SHRQconst [1] x))
-	// result: (BTRQconst [0] x)
+	// result: (ANDQconst [-2] x)
 	for {
 		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
 			break
 		}
 		x := v_0.Args[0]
-		v.reset(OpAMD64BTRQconst)
-		v.AuxInt = int8ToAuxInt(0)
+		v.reset(OpAMD64ANDQconst)
+		v.AuxInt = int32ToAuxInt(-2)
 		v.AddArg(x)
 		return true
 	}
@@ -21862,14 +21361,14 @@
 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (SHRLconst [1] (SHLLconst [1] x))
-	// result: (BTRLconst [31] x)
+	// result: (ANDLconst [0x7fffffff] x)
 	for {
 		if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
 			break
 		}
 		x := v_0.Args[0]
-		v.reset(OpAMD64BTRLconst)
-		v.AuxInt = int8ToAuxInt(31)
+		v.reset(OpAMD64ANDLconst)
+		v.AuxInt = int32ToAuxInt(0x7fffffff)
 		v.AddArg(x)
 		return true
 	}
@@ -23571,26 +23070,6 @@
 		}
 		break
 	}
-	// match: (XORL (MOVLconst [c]) x)
-	// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
-	// result: (BTCLconst [int8(log32(c))] x)
-	for {
-		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
-			if v_0.Op != OpAMD64MOVLconst {
-				continue
-			}
-			c := auxIntToInt32(v_0.AuxInt)
-			x := v_1
-			if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
-				continue
-			}
-			v.reset(OpAMD64BTCLconst)
-			v.AuxInt = int8ToAuxInt(int8(log32(c)))
-			v.AddArg(x)
-			return true
-		}
-		break
-	}
 	// match: (XORL x (MOVLconst [c]))
 	// result: (XORLconst [c] x)
 	for {
@@ -23662,20 +23141,6 @@
 }
 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (XORLconst [c] x)
-	// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
-	// result: (BTCLconst [int8(log32(c))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		x := v_0
-		if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
-			break
-		}
-		v.reset(OpAMD64BTCLconst)
-		v.AuxInt = int8ToAuxInt(int8(log32(c)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (XORLconst [1] (SETNE x))
 	// result: (SETEQ x)
 	for {
@@ -23800,20 +23265,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (XORLconst [c] (BTCLconst [d] x))
-	// result: (XORLconst [c ^ 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		if v_0.Op != OpAMD64BTCLconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
 	// match: (XORLconst [c] x)
 	// cond: c==0
 	// result: x
@@ -24035,7 +23486,7 @@
 		break
 	}
 	// match: (XORQ (MOVQconst [c]) x)
-	// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+	// cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
 	// result: (BTCQconst [int8(log64(c))] x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -24044,7 +23495,7 @@
 			}
 			c := auxIntToInt64(v_0.AuxInt)
 			x := v_1
-			if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+			if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
 				continue
 			}
 			v.reset(OpAMD64BTCQconst)
@@ -24129,20 +23580,6 @@
 }
 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
 	v_0 := v.Args[0]
-	// match: (XORQconst [c] x)
-	// cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
-	// result: (BTCQconst [int8(log32(c))] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		x := v_0
-		if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
-			break
-		}
-		v.reset(OpAMD64BTCQconst)
-		v.AuxInt = int8ToAuxInt(int8(log32(c)))
-		v.AddArg(x)
-		return true
-	}
 	// match: (XORQconst [c] (XORQconst [d] x))
 	// result: (XORQconst [c ^ d] x)
 	for {
@@ -24157,24 +23594,6 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (XORQconst [c] (BTCQconst [d] x))
-	// cond: is32Bit(int64(c) ^ 1<<uint32(d))
-	// result: (XORQconst [c ^ 1<<uint32(d)] x)
-	for {
-		c := auxIntToInt32(v.AuxInt)
-		if v_0.Op != OpAMD64BTCQconst {
-			break
-		}
-		d := auxIntToInt8(v_0.AuxInt)
-		x := v_0.Args[0]
-		if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
-			break
-		}
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
-		v.AddArg(x)
-		return true
-	}
 	// match: (XORQconst [0] x)
 	// result: x
 	for {
@@ -25791,12 +25210,12 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Ctz16 x)
-	// result: (BSFL (BTSLconst <typ.UInt32> [16] x))
+	// result: (BSFL (ORLconst <typ.UInt32> [1<<16] x))
 	for {
 		x := v_0
 		v.reset(OpAMD64BSFL)
-		v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
-		v0.AuxInt = int8ToAuxInt(16)
+		v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
+		v0.AuxInt = int32ToAuxInt(1 << 16)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -25969,12 +25388,12 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Ctz8 x)
-	// result: (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+	// result: (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
 	for {
 		x := v_0
 		v.reset(OpAMD64BSFL)
-		v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
-		v0.AuxInt = int8ToAuxInt(8)
+		v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
+		v0.AuxInt = int32ToAuxInt(1 << 8)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -27481,6 +26900,88 @@
 	}
 	return false
 }
+func rewriteValueAMD64_OpMax32F(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (Max32F <t> x y)
+	// result: (Neg32F <t> (Min32F <t> (Neg32F <t> x) (Neg32F <t> y)))
+	for {
+		t := v.Type
+		x := v_0
+		y := v_1
+		v.reset(OpNeg32F)
+		v.Type = t
+		v0 := b.NewValue0(v.Pos, OpMin32F, t)
+		v1 := b.NewValue0(v.Pos, OpNeg32F, t)
+		v1.AddArg(x)
+		v2 := b.NewValue0(v.Pos, OpNeg32F, t)
+		v2.AddArg(y)
+		v0.AddArg2(v1, v2)
+		v.AddArg(v0)
+		return true
+	}
+}
+func rewriteValueAMD64_OpMax64F(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (Max64F <t> x y)
+	// result: (Neg64F <t> (Min64F <t> (Neg64F <t> x) (Neg64F <t> y)))
+	for {
+		t := v.Type
+		x := v_0
+		y := v_1
+		v.reset(OpNeg64F)
+		v.Type = t
+		v0 := b.NewValue0(v.Pos, OpMin64F, t)
+		v1 := b.NewValue0(v.Pos, OpNeg64F, t)
+		v1.AddArg(x)
+		v2 := b.NewValue0(v.Pos, OpNeg64F, t)
+		v2.AddArg(y)
+		v0.AddArg2(v1, v2)
+		v.AddArg(v0)
+		return true
+	}
+}
+func rewriteValueAMD64_OpMin32F(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (Min32F <t> x y)
+	// result: (POR (MINSS <t> (MINSS <t> x y) x) (MINSS <t> x y))
+	for {
+		t := v.Type
+		x := v_0
+		y := v_1
+		v.reset(OpAMD64POR)
+		v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
+		v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
+		v1.AddArg2(x, y)
+		v0.AddArg2(v1, x)
+		v.AddArg2(v0, v1)
+		return true
+	}
+}
+func rewriteValueAMD64_OpMin64F(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (Min64F <t> x y)
+	// result: (POR (MINSD <t> (MINSD <t> x y) x) (MINSD <t> x y))
+	for {
+		t := v.Type
+		x := v_0
+		y := v_1
+		v.reset(OpAMD64POR)
+		v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
+		v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
+		v1.AddArg2(x, y)
+		v0.AddArg2(v1, x)
+		v.AddArg2(v0, v1)
+		return true
+	}
+}
 func rewriteValueAMD64_OpMod16(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
@@ -27955,6 +27456,27 @@
 		v.AddArg3(dst, v0, v1)
 		return true
 	}
+	// match: (Move [11] dst src mem)
+	// result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem))
+	for {
+		if auxIntToInt64(v.AuxInt) != 11 {
+			break
+		}
+		dst := v_0
+		src := v_1
+		mem := v_2
+		v.reset(OpAMD64MOVLstore)
+		v.AuxInt = int32ToAuxInt(7)
+		v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+		v0.AuxInt = int32ToAuxInt(7)
+		v0.AddArg2(src, mem)
+		v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+		v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+		v2.AddArg2(src, mem)
+		v1.AddArg3(dst, v2, mem)
+		v.AddArg3(dst, v0, v1)
+		return true
+	}
 	// match: (Move [12] dst src mem)
 	// result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
 	for {
@@ -27977,14 +27499,14 @@
 		return true
 	}
 	// match: (Move [s] dst src mem)
-	// cond: s == 11 || s >= 13 && s <= 15
+	// cond: s >= 13 && s <= 15
 	// result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
 	for {
 		s := auxIntToInt64(v.AuxInt)
 		dst := v_0
 		src := v_1
 		mem := v_2
-		if !(s == 11 || s >= 13 && s <= 15) {
+		if !(s >= 13 && s <= 15) {
 			break
 		}
 		v.reset(OpAMD64MOVQstore)
@@ -30443,14 +29965,94 @@
 		v.AddArg2(destptr, v0)
 		return true
 	}
+	// match: (Zero [9] destptr mem)
+	// cond: config.useSSE
+	// result: (MOVBstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+	for {
+		if auxIntToInt64(v.AuxInt) != 9 {
+			break
+		}
+		destptr := v_0
+		mem := v_1
+		if !(config.useSSE) {
+			break
+		}
+		v.reset(OpAMD64MOVBstoreconst)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+		v0.AddArg2(destptr, mem)
+		v.AddArg2(destptr, v0)
+		return true
+	}
+	// match: (Zero [10] destptr mem)
+	// cond: config.useSSE
+	// result: (MOVWstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+	for {
+		if auxIntToInt64(v.AuxInt) != 10 {
+			break
+		}
+		destptr := v_0
+		mem := v_1
+		if !(config.useSSE) {
+			break
+		}
+		v.reset(OpAMD64MOVWstoreconst)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+		v0.AddArg2(destptr, mem)
+		v.AddArg2(destptr, v0)
+		return true
+	}
+	// match: (Zero [11] destptr mem)
+	// cond: config.useSSE
+	// result: (MOVLstoreconst [makeValAndOff(0,7)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+	for {
+		if auxIntToInt64(v.AuxInt) != 11 {
+			break
+		}
+		destptr := v_0
+		mem := v_1
+		if !(config.useSSE) {
+			break
+		}
+		v.reset(OpAMD64MOVLstoreconst)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
+		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+		v0.AddArg2(destptr, mem)
+		v.AddArg2(destptr, v0)
+		return true
+	}
+	// match: (Zero [12] destptr mem)
+	// cond: config.useSSE
+	// result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+	for {
+		if auxIntToInt64(v.AuxInt) != 12 {
+			break
+		}
+		destptr := v_0
+		mem := v_1
+		if !(config.useSSE) {
+			break
+		}
+		v.reset(OpAMD64MOVLstoreconst)
+		v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+		v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+		v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+		v0.AddArg2(destptr, mem)
+		v.AddArg2(destptr, v0)
+		return true
+	}
 	// match: (Zero [s] destptr mem)
-	// cond: s > 8 && s < 16 && config.useSSE
+	// cond: s > 12 && s < 16 && config.useSSE
 	// result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
 	for {
 		s := auxIntToInt64(v.AuxInt)
 		destptr := v_0
 		mem := v_1
-		if !(s > 8 && s < 16 && config.useSSE) {
+		if !(s > 12 && s < 16 && config.useSSE) {
 			break
 		}
 		v.reset(OpAMD64MOVQstoreconst)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 70cacb9..971c9a5 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -1496,7 +1496,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (ADDD a (MULD x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULAD a x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1506,7 +1506,7 @@
 			}
 			y := v_1.Args[1]
 			x := v_1.Args[0]
-			if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+			if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 				continue
 			}
 			v.reset(OpARMMULAD)
@@ -1516,7 +1516,7 @@
 		break
 	}
 	// match: (ADDD a (NMULD x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULSD a x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1526,7 +1526,7 @@
 			}
 			y := v_1.Args[1]
 			x := v_1.Args[0]
-			if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+			if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 				continue
 			}
 			v.reset(OpARMMULSD)
@@ -1541,7 +1541,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (ADDF a (MULF x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULAF a x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1551,7 +1551,7 @@
 			}
 			y := v_1.Args[1]
 			x := v_1.Args[0]
-			if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+			if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 				continue
 			}
 			v.reset(OpARMMULAF)
@@ -1561,7 +1561,7 @@
 		break
 	}
 	// match: (ADDF a (NMULF x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULSF a x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1571,7 +1571,7 @@
 			}
 			y := v_1.Args[1]
 			x := v_1.Args[0]
-			if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+			if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 				continue
 			}
 			v.reset(OpARMMULSF)
@@ -1979,12 +1979,12 @@
 		return true
 	}
 	// match: (ADDconst [c] x)
-	// cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+	// cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
 	// result: (SUBconst [-c] x)
 	for {
 		c := auxIntToInt32(v.AuxInt)
 		x := v_0
-		if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+		if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
 			break
 		}
 		v.reset(OpARMSUBconst)
@@ -2099,7 +2099,7 @@
 		return true
 	}
 	// match: (ADDshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
-	// cond: buildcfg.GOARM>=6
+	// cond: buildcfg.GOARM.Version>=6
 	// result: (REV16 x)
 	for {
 		if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
@@ -2110,7 +2110,7 @@
 			break
 		}
 		x := v_0_0.Args[0]
-		if x != v_1 || !(buildcfg.GOARM >= 6) {
+		if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMREV16)
@@ -2551,12 +2551,12 @@
 		return true
 	}
 	// match: (ANDconst [c] x)
-	// cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+	// cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
 	// result: (BICconst [int32(^uint32(c))] x)
 	for {
 		c := auxIntToInt32(v.AuxInt)
 		x := v_0
-		if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+		if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
 			break
 		}
 		v.reset(OpARMBICconst)
@@ -3052,12 +3052,12 @@
 		return true
 	}
 	// match: (BICconst [c] x)
-	// cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+	// cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
 	// result: (ANDconst [int32(^uint32(c))] x)
 	for {
 		c := auxIntToInt32(v.AuxInt)
 		x := v_0
-		if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+		if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
 			break
 		}
 		v.reset(OpARMANDconst)
@@ -7590,7 +7590,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (MULD (NEGD x) y)
-	// cond: buildcfg.GOARM >= 6
+	// cond: buildcfg.GOARM.Version >= 6
 	// result: (NMULD x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -7599,7 +7599,7 @@
 			}
 			x := v_0.Args[0]
 			y := v_1
-			if !(buildcfg.GOARM >= 6) {
+			if !(buildcfg.GOARM.Version >= 6) {
 				continue
 			}
 			v.reset(OpARMNMULD)
@@ -7614,7 +7614,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (MULF (NEGF x) y)
-	// cond: buildcfg.GOARM >= 6
+	// cond: buildcfg.GOARM.Version >= 6
 	// result: (NMULF x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -7623,7 +7623,7 @@
 			}
 			x := v_0.Args[0]
 			y := v_1
-			if !(buildcfg.GOARM >= 6) {
+			if !(buildcfg.GOARM.Version >= 6) {
 				continue
 			}
 			v.reset(OpARMNMULF)
@@ -8247,7 +8247,7 @@
 func rewriteValueARM_OpARMNEGD(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (NEGD (MULD x y))
-	// cond: buildcfg.GOARM >= 6
+	// cond: buildcfg.GOARM.Version >= 6
 	// result: (NMULD x y)
 	for {
 		if v_0.Op != OpARMMULD {
@@ -8255,7 +8255,7 @@
 		}
 		y := v_0.Args[1]
 		x := v_0.Args[0]
-		if !(buildcfg.GOARM >= 6) {
+		if !(buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMNMULD)
@@ -8267,7 +8267,7 @@
 func rewriteValueARM_OpARMNEGF(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (NEGF (MULF x y))
-	// cond: buildcfg.GOARM >= 6
+	// cond: buildcfg.GOARM.Version >= 6
 	// result: (NMULF x y)
 	for {
 		if v_0.Op != OpARMMULF {
@@ -8275,7 +8275,7 @@
 		}
 		y := v_0.Args[1]
 		x := v_0.Args[0]
-		if !(buildcfg.GOARM >= 6) {
+		if !(buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMNMULF)
@@ -8583,7 +8583,7 @@
 		return true
 	}
 	// match: (ORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
-	// cond: buildcfg.GOARM>=6
+	// cond: buildcfg.GOARM.Version>=6
 	// result: (REV16 x)
 	for {
 		if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
@@ -8594,7 +8594,7 @@
 			break
 		}
 		x := v_0_0.Args[0]
-		if x != v_1 || !(buildcfg.GOARM >= 6) {
+		if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMREV16)
@@ -9048,7 +9048,7 @@
 		return true
 	}
 	// match: (RSB (MUL x y) a)
-	// cond: buildcfg.GOARM == 7
+	// cond: buildcfg.GOARM.Version == 7
 	// result: (MULS x y a)
 	for {
 		if v_0.Op != OpARMMUL {
@@ -9057,7 +9057,7 @@
 		y := v_0.Args[1]
 		x := v_0.Args[0]
 		a := v_1
-		if !(buildcfg.GOARM == 7) {
+		if !(buildcfg.GOARM.Version == 7) {
 			break
 		}
 		v.reset(OpARMMULS)
@@ -10534,7 +10534,7 @@
 		return true
 	}
 	// match: (SRAconst (SLLconst x [c]) [d])
-	// cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+	// cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
 	// result: (BFX [(d-c)|(32-d)<<8] x)
 	for {
 		d := auxIntToInt32(v.AuxInt)
@@ -10543,7 +10543,7 @@
 		}
 		c := auxIntToInt32(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+		if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
 			break
 		}
 		v.reset(OpARMBFX)
@@ -10590,7 +10590,7 @@
 		return true
 	}
 	// match: (SRLconst (SLLconst x [c]) [d])
-	// cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+	// cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
 	// result: (BFXU [(d-c)|(32-d)<<8] x)
 	for {
 		d := auxIntToInt32(v.AuxInt)
@@ -10599,7 +10599,7 @@
 		}
 		c := auxIntToInt32(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+		if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
 			break
 		}
 		v.reset(OpARMBFXU)
@@ -10830,7 +10830,7 @@
 		return true
 	}
 	// match: (SUB a (MUL x y))
-	// cond: buildcfg.GOARM == 7
+	// cond: buildcfg.GOARM.Version == 7
 	// result: (MULS x y a)
 	for {
 		a := v_0
@@ -10839,7 +10839,7 @@
 		}
 		y := v_1.Args[1]
 		x := v_1.Args[0]
-		if !(buildcfg.GOARM == 7) {
+		if !(buildcfg.GOARM.Version == 7) {
 			break
 		}
 		v.reset(OpARMMULS)
@@ -10852,7 +10852,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (SUBD a (MULD x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULSD a x y)
 	for {
 		a := v_0
@@ -10861,7 +10861,7 @@
 		}
 		y := v_1.Args[1]
 		x := v_1.Args[0]
-		if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+		if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMMULSD)
@@ -10869,7 +10869,7 @@
 		return true
 	}
 	// match: (SUBD a (NMULD x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULAD a x y)
 	for {
 		a := v_0
@@ -10878,7 +10878,7 @@
 		}
 		y := v_1.Args[1]
 		x := v_1.Args[0]
-		if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+		if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMMULAD)
@@ -10891,7 +10891,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (SUBF a (MULF x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULSF a x y)
 	for {
 		a := v_0
@@ -10900,7 +10900,7 @@
 		}
 		y := v_1.Args[1]
 		x := v_1.Args[0]
-		if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+		if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMMULSF)
@@ -10908,7 +10908,7 @@
 		return true
 	}
 	// match: (SUBF a (NMULF x y))
-	// cond: a.Uses == 1 && buildcfg.GOARM >= 6
+	// cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
 	// result: (MULAF a x y)
 	for {
 		a := v_0
@@ -10917,7 +10917,7 @@
 		}
 		y := v_1.Args[1]
 		x := v_1.Args[0]
-		if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+		if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMMULAF)
@@ -11383,12 +11383,12 @@
 		return true
 	}
 	// match: (SUBconst [c] x)
-	// cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+	// cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
 	// result: (ADDconst [-c] x)
 	for {
 		c := auxIntToInt32(v.AuxInt)
 		x := v_0
-		if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+		if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
 			break
 		}
 		v.reset(OpARMADDconst)
@@ -12710,7 +12710,7 @@
 		return true
 	}
 	// match: (XORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
-	// cond: buildcfg.GOARM>=6
+	// cond: buildcfg.GOARM.Version>=6
 	// result: (REV16 x)
 	for {
 		if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
@@ -12721,7 +12721,7 @@
 			break
 		}
 		x := v_0_0.Args[0]
-		if x != v_1 || !(buildcfg.GOARM >= 6) {
+		if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMREV16)
@@ -13062,12 +13062,12 @@
 	v_0 := v.Args[0]
 	b := v.Block
 	// match: (Bswap32 <t> x)
-	// cond: buildcfg.GOARM==5
+	// cond: buildcfg.GOARM.Version==5
 	// result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8]))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM == 5) {
+		if !(buildcfg.GOARM.Version == 5) {
 			break
 		}
 		v.reset(OpARMXOR)
@@ -13090,11 +13090,11 @@
 		return true
 	}
 	// match: (Bswap32 x)
-	// cond: buildcfg.GOARM>=6
+	// cond: buildcfg.GOARM.Version>=6
 	// result: (REV x)
 	for {
 		x := v_0
-		if !(buildcfg.GOARM >= 6) {
+		if !(buildcfg.GOARM.Version >= 6) {
 			break
 		}
 		v.reset(OpARMREV)
@@ -13177,12 +13177,12 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Ctz16 <t> x)
-	// cond: buildcfg.GOARM<=6
+	// cond: buildcfg.GOARM.Version<=6
 	// result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM <= 6) {
+		if !(buildcfg.GOARM.Version <= 6) {
 			break
 		}
 		v.reset(OpARMRSBconst)
@@ -13204,12 +13204,12 @@
 		return true
 	}
 	// match: (Ctz16 <t> x)
-	// cond: buildcfg.GOARM==7
+	// cond: buildcfg.GOARM.Version==7
 	// result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM == 7) {
+		if !(buildcfg.GOARM.Version == 7) {
 			break
 		}
 		v.reset(OpARMCLZ)
@@ -13228,12 +13228,12 @@
 	v_0 := v.Args[0]
 	b := v.Block
 	// match: (Ctz32 <t> x)
-	// cond: buildcfg.GOARM<=6
+	// cond: buildcfg.GOARM.Version<=6
 	// result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM <= 6) {
+		if !(buildcfg.GOARM.Version <= 6) {
 			break
 		}
 		v.reset(OpARMRSBconst)
@@ -13252,12 +13252,12 @@
 		return true
 	}
 	// match: (Ctz32 <t> x)
-	// cond: buildcfg.GOARM==7
+	// cond: buildcfg.GOARM.Version==7
 	// result: (CLZ <t> (RBIT <t> x))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM == 7) {
+		if !(buildcfg.GOARM.Version == 7) {
 			break
 		}
 		v.reset(OpARMCLZ)
@@ -13274,12 +13274,12 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Ctz8 <t> x)
-	// cond: buildcfg.GOARM<=6
+	// cond: buildcfg.GOARM.Version<=6
 	// result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM <= 6) {
+		if !(buildcfg.GOARM.Version <= 6) {
 			break
 		}
 		v.reset(OpARMRSBconst)
@@ -13301,12 +13301,12 @@
 		return true
 	}
 	// match: (Ctz8 <t> x)
-	// cond: buildcfg.GOARM==7
+	// cond: buildcfg.GOARM.Version==7
 	// result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
 	for {
 		t := v.Type
 		x := v_0
-		if !(buildcfg.GOARM == 7) {
+		if !(buildcfg.GOARM.Version == 7) {
 			break
 		}
 		v.reset(OpARMCLZ)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 3b8fe30..f0a4425 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -820,6 +820,18 @@
 		return rewriteValueARM64_OpLsh8x64(v)
 	case OpLsh8x8:
 		return rewriteValueARM64_OpLsh8x8(v)
+	case OpMax32F:
+		v.Op = OpARM64FMAXS
+		return true
+	case OpMax64F:
+		v.Op = OpARM64FMAXD
+		return true
+	case OpMin32F:
+		v.Op = OpARM64FMINS
+		return true
+	case OpMin64F:
+		v.Op = OpARM64FMIND
+		return true
 	case OpMod16:
 		return rewriteValueARM64_OpMod16(v)
 	case OpMod16u:
@@ -1249,7 +1261,7 @@
 		break
 	}
 	// match: (ADD a l:(MULW x y))
-	// cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+	// cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
 	// result: (MADDW a x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1260,7 +1272,7 @@
 			}
 			y := l.Args[1]
 			x := l.Args[0]
-			if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+			if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
 				continue
 			}
 			v.reset(OpARM64MADDW)
@@ -1270,7 +1282,7 @@
 		break
 	}
 	// match: (ADD a l:(MNEGW x y))
-	// cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+	// cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
 	// result: (MSUBW a x y)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1281,7 +1293,7 @@
 			}
 			y := l.Args[1]
 			x := l.Args[0]
-			if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+			if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
 				continue
 			}
 			v.reset(OpARM64MSUBW)
@@ -3736,7 +3748,7 @@
 	v_0 := v.Args[0]
 	// match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
 	// cond: d != 0
-	// result: (MOVDconst [int64(int32(c)/int32(d))])
+	// result: (MOVDconst [int64(uint32(int32(c)/int32(d)))])
 	for {
 		if v_0.Op != OpARM64MOVDconst {
 			break
@@ -3750,7 +3762,7 @@
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64ToAuxInt(int64(int32(c) / int32(d)))
+		v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) / int32(d))))
 		return true
 	}
 	return false
@@ -5962,18 +5974,19 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (GreaterEqualNoov (InvertFlags x))
-	// result: (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
+	// result: (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
 	for {
 		if v_0.Op != OpARM64InvertFlags {
 			break
 		}
 		x := v_0.Args[0]
-		v.reset(OpARM64OR)
+		v.reset(OpARM64CSINC)
+		v.AuxInt = opToAuxInt(OpARM64NotEqual)
 		v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool)
 		v0.AddArg(x)
-		v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
-		v1.AddArg(x)
-		v.AddArg2(v0, v1)
+		v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+		v1.AuxInt = int64ToAuxInt(0)
+		v.AddArg3(v0, v1, x)
 		return true
 	}
 	return false
@@ -6697,18 +6710,17 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (LessThanNoov (InvertFlags x))
-	// result: (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
+	// result: (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
 	for {
 		if v_0.Op != OpARM64InvertFlags {
 			break
 		}
 		x := v_0.Args[0]
-		v.reset(OpARM64BIC)
+		v.reset(OpARM64CSEL0)
+		v.AuxInt = opToAuxInt(OpARM64NotEqual)
 		v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool)
 		v0.AddArg(x)
-		v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
-		v1.AddArg(x)
-		v.AddArg2(v0, v1)
+		v.AddArg2(v0, x)
 		return true
 	}
 	return false
@@ -7138,7 +7150,7 @@
 	b := v.Block
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: int32(c)==-1
-	// result: (SUB a x)
+	// result: (MOVWUreg (SUB <a.Type> a x))
 	for {
 		a := v_0
 		x := v_1
@@ -7149,13 +7161,15 @@
 		if !(int32(c) == -1) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a _ (MOVDconst [c]))
 	// cond: int32(c)==0
-	// result: a
+	// result: (MOVWUreg a)
 	for {
 		a := v_0
 		if v_2.Op != OpARM64MOVDconst {
@@ -7165,12 +7179,13 @@
 		if !(int32(c) == 0) {
 			break
 		}
-		v.copyOf(a)
+		v.reset(OpARM64MOVWUreg)
+		v.AddArg(a)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: int32(c)==1
-	// result: (ADD a x)
+	// result: (MOVWUreg (ADD <a.Type> a x))
 	for {
 		a := v_0
 		x := v_1
@@ -7181,13 +7196,15 @@
 		if !(int32(c) == 1) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c)
-	// result: (ADDshiftLL a x [log64(c)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
 	for {
 		a := v_0
 		x := v_1
@@ -7198,14 +7215,16 @@
 		if !(isPowerOfTwo64(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c))
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c))
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c-1) && int32(c)>=3
-	// result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+	// result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
 	for {
 		a := v_0
 		x := v_1
@@ -7216,16 +7235,18 @@
 		if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c - 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c - 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c+1) && int32(c)>=7
-	// result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+	// result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
 	for {
 		a := v_0
 		x := v_1
@@ -7236,16 +7257,18 @@
 		if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c + 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c + 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
-	// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
 	for {
 		a := v_0
 		x := v_1
@@ -7256,17 +7279,19 @@
 		if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 3))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 3))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
-	// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
 	for {
 		a := v_0
 		x := v_1
@@ -7277,17 +7302,19 @@
 		if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 5))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 5))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
-	// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
 	for {
 		a := v_0
 		x := v_1
@@ -7298,17 +7325,19 @@
 		if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 7))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 7))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a x (MOVDconst [c]))
 	// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
-	// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 	for {
 		a := v_0
 		x := v_1
@@ -7319,17 +7348,19 @@
 		if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 9))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 9))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: int32(c)==-1
-	// result: (SUB a x)
+	// result: (MOVWUreg (SUB <a.Type> a x))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7340,13 +7371,15 @@
 		if !(int32(c) == -1) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) _)
 	// cond: int32(c)==0
-	// result: a
+	// result: (MOVWUreg a)
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7356,12 +7389,13 @@
 		if !(int32(c) == 0) {
 			break
 		}
-		v.copyOf(a)
+		v.reset(OpARM64MOVWUreg)
+		v.AddArg(a)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: int32(c)==1
-	// result: (ADD a x)
+	// result: (MOVWUreg (ADD <a.Type> a x))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7372,13 +7406,15 @@
 		if !(int32(c) == 1) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: isPowerOfTwo64(c)
-	// result: (ADDshiftLL a x [log64(c)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7389,14 +7425,16 @@
 		if !(isPowerOfTwo64(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c))
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c))
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: isPowerOfTwo64(c-1) && int32(c)>=3
-	// result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+	// result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7407,16 +7445,18 @@
 		if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c - 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c - 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: isPowerOfTwo64(c+1) && int32(c)>=7
-	// result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+	// result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7427,16 +7467,18 @@
 		if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c + 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c + 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
-	// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7447,17 +7489,19 @@
 		if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 3))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 3))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
-	// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7468,17 +7512,19 @@
 		if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 5))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 5))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
-	// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7489,17 +7535,19 @@
 		if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 7))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 7))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) x)
 	// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
-	// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7510,16 +7558,18 @@
 		if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 9))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 9))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW (MOVDconst [c]) x y)
-	// result: (ADDconst [c] (MULW <x.Type> x y))
+	// result: (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
 	for {
 		if v_0.Op != OpARM64MOVDconst {
 			break
@@ -7527,15 +7577,17 @@
 		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		y := v_2
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64ToAuxInt(c)
-		v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
-		v0.AddArg2(x, y)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type)
+		v0.AuxInt = int64ToAuxInt(c)
+		v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+		v1.AddArg2(x, y)
+		v0.AddArg(v1)
 		v.AddArg(v0)
 		return true
 	}
 	// match: (MADDW a (MOVDconst [c]) (MOVDconst [d]))
-	// result: (ADDconst [int64(int32(c)*int32(d))] a)
+	// result: (MOVWUreg (ADDconst <a.Type> [c*d] a))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -7546,9 +7598,11 @@
 			break
 		}
 		d := auxIntToInt64(v_2.AuxInt)
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
-		v.AddArg(a)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDconst, a.Type)
+		v0.AuxInt = int64ToAuxInt(c * d)
+		v0.AddArg(a)
+		v.AddArg(v0)
 		return true
 	}
 	return false
@@ -7789,7 +7843,7 @@
 	b := v.Block
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: int32(c)==-1
-	// result: x
+	// result: (MOVWUreg x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7800,7 +7854,8 @@
 			if !(int32(c) == -1) {
 				continue
 			}
-			v.copyOf(x)
+			v.reset(OpARM64MOVWUreg)
+			v.AddArg(x)
 			return true
 		}
 		break
@@ -7825,7 +7880,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: int32(c)==1
-	// result: (NEG x)
+	// result: (MOVWUreg (NEG <x.Type> x))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7836,8 +7891,10 @@
 			if !(int32(c) == 1) {
 				continue
 			}
-			v.reset(OpARM64NEG)
-			v.AddArg(x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v0.AddArg(x)
+			v.AddArg(v0)
 			return true
 		}
 		break
@@ -7866,7 +7923,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c-1) && int32(c) >= 3
-	// result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+	// result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7877,10 +7934,12 @@
 			if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
 				continue
 			}
-			v.reset(OpARM64NEG)
-			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(log64(c - 1))
-			v0.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(log64(c - 1))
+			v1.AddArg2(x, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -7888,7 +7947,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c+1) && int32(c) >= 7
-	// result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+	// result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7899,12 +7958,14 @@
 			if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
 				continue
 			}
-			v.reset(OpARM64NEG)
-			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(log64(c + 1))
-			v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
-			v1.AddArg(x)
-			v0.AddArg2(v1, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(log64(c + 1))
+			v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v2.AddArg(x)
+			v1.AddArg2(v2, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -7912,7 +7973,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
-	// result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7923,12 +7984,13 @@
 			if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.Type = x.Type
-			v.AuxInt = int64ToAuxInt(log64(c / 3))
-			v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(2)
-			v0.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c / 3))
+			v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(2)
+			v1.AddArg2(x, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -7936,7 +7998,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
-	// result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+	// result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7947,12 +8009,14 @@
 			if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64NEG)
-			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-			v0.AuxInt = int64ToAuxInt(log64(c / 5))
-			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v1.AuxInt = int64ToAuxInt(2)
-			v1.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v1.AuxInt = int64ToAuxInt(log64(c / 5))
+			v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v2.AuxInt = int64ToAuxInt(2)
+			v2.AddArg2(x, x)
+			v1.AddArg(v2)
 			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
@@ -7961,7 +8025,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
-	// result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7972,12 +8036,13 @@
 			if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.Type = x.Type
-			v.AuxInt = int64ToAuxInt(log64(c / 7))
-			v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(3)
-			v0.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c / 7))
+			v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(3)
+			v1.AddArg2(x, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -7985,7 +8050,7 @@
 	}
 	// match: (MNEGW x (MOVDconst [c]))
 	// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
-	// result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+	// result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -7996,12 +8061,14 @@
 			if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64NEG)
-			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-			v0.AuxInt = int64ToAuxInt(log64(c / 9))
-			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v1.AuxInt = int64ToAuxInt(3)
-			v1.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v1.AuxInt = int64ToAuxInt(log64(c / 9))
+			v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v2.AuxInt = int64ToAuxInt(3)
+			v2.AddArg2(x, x)
+			v1.AddArg(v2)
 			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
@@ -8009,7 +8076,7 @@
 		break
 	}
 	// match: (MNEGW (MOVDconst [c]) (MOVDconst [d]))
-	// result: (MOVDconst [-int64(int32(c)*int32(d))])
+	// result: (MOVDconst [int64(uint32(-c*d))])
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			if v_0.Op != OpARM64MOVDconst {
@@ -8021,7 +8088,7 @@
 			}
 			d := auxIntToInt64(v_1.AuxInt)
 			v.reset(OpARM64MOVDconst)
-			v.AuxInt = int64ToAuxInt(-int64(int32(c) * int32(d)))
+			v.AuxInt = int64ToAuxInt(int64(uint32(-c * d)))
 			return true
 		}
 		break
@@ -8057,7 +8124,7 @@
 	v_0 := v.Args[0]
 	// match: (MODW (MOVDconst [c]) (MOVDconst [d]))
 	// cond: d != 0
-	// result: (MOVDconst [int64(int32(c)%int32(d))])
+	// result: (MOVDconst [int64(uint32(int32(c)%int32(d)))])
 	for {
 		if v_0.Op != OpARM64MOVDconst {
 			break
@@ -8071,7 +8138,7 @@
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64ToAuxInt(int64(int32(c) % int32(d)))
+		v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) % int32(d))))
 		return true
 	}
 	return false
@@ -8451,6 +8518,17 @@
 		v.AddArg(x)
 		return true
 	}
+	// match: (MOVBUreg x)
+	// cond: v.Type.Size() <= 1
+	// result: x
+	for {
+		x := v_0
+		if !(v.Type.Size() <= 1) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
 	// match: (MOVBUreg (SLLconst [lc] x))
 	// cond: lc >= 8
 	// result: (MOVDconst [0])
@@ -8714,6 +8792,36 @@
 		v.AuxInt = int64ToAuxInt(int64(int8(c)))
 		return true
 	}
+	// match: (MOVBreg x)
+	// cond: v.Type.Size() <= 1
+	// result: x
+	for {
+		x := v_0
+		if !(v.Type.Size() <= 1) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (MOVBreg <t> (ANDconst x [c]))
+	// cond: uint64(c) & uint64(0xffffffffffffff80) == 0
+	// result: (ANDconst <t> x [c])
+	for {
+		t := v.Type
+		if v_0.Op != OpARM64ANDconst {
+			break
+		}
+		c := auxIntToInt64(v_0.AuxInt)
+		x := v_0.Args[0]
+		if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
+			break
+		}
+		v.reset(OpARM64ANDconst)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(c)
+		v.AddArg(x)
+		return true
+	}
 	// match: (MOVBreg (SLLconst [lc] x))
 	// cond: lc < 8
 	// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
@@ -9759,7 +9867,7 @@
 	b := v.Block
 	config := b.Func.Config
 	// match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
+	// cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x)
 	// result: (MOVQstorezero {s} [i] ptr mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -9770,7 +9878,7 @@
 			break
 		}
 		mem := x.Args[1]
-		if ptr != x.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVQstorezero)
@@ -9780,7 +9888,7 @@
 		return true
 	}
 	// match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
+	// cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x)
 	// result: (MOVQstorezero {s} [i-8] ptr mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -9791,7 +9899,7 @@
 			break
 		}
 		mem := x.Args[1]
-		if ptr != x.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVQstorezero)
@@ -10346,6 +10454,17 @@
 		v.AuxInt = int64ToAuxInt(int64(uint16(c)))
 		return true
 	}
+	// match: (MOVHUreg x)
+	// cond: v.Type.Size() <= 2
+	// result: x
+	for {
+		x := v_0
+		if !(v.Type.Size() <= 2) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
 	// match: (MOVHUreg (SLLconst [lc] x))
 	// cond: lc >= 16
 	// result: (MOVDconst [0])
@@ -10792,6 +10911,36 @@
 		v.AuxInt = int64ToAuxInt(int64(int16(c)))
 		return true
 	}
+	// match: (MOVHreg x)
+	// cond: v.Type.Size() <= 2
+	// result: x
+	for {
+		x := v_0
+		if !(v.Type.Size() <= 2) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (MOVHreg <t> (ANDconst x [c]))
+	// cond: uint64(c) & uint64(0xffffffffffff8000) == 0
+	// result: (ANDconst <t> x [c])
+	for {
+		t := v.Type
+		if v_0.Op != OpARM64ANDconst {
+			break
+		}
+		c := auxIntToInt64(v_0.AuxInt)
+		x := v_0.Args[0]
+		if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
+			break
+		}
+		v.reset(OpARM64ANDconst)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(c)
+		v.AddArg(x)
+		return true
+	}
 	// match: (MOVHreg (SLLconst [lc] x))
 	// cond: lc < 16
 	// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
@@ -11951,6 +12100,28 @@
 		v.AuxInt = int64ToAuxInt(int64(uint32(c)))
 		return true
 	}
+	// match: (MOVWUreg x)
+	// cond: v.Type.Size() <= 4
+	// result: x
+	for {
+		x := v_0
+		if !(v.Type.Size() <= 4) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (MOVWUreg x)
+	// cond: zeroUpper32Bits(x, 3)
+	// result: x
+	for {
+		x := v_0
+		if !(zeroUpper32Bits(x, 3)) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
 	// match: (MOVWUreg (SLLconst [lc] x))
 	// cond: lc >= 32
 	// result: (MOVDconst [0])
@@ -12455,6 +12626,36 @@
 		v.AuxInt = int64ToAuxInt(int64(int32(c)))
 		return true
 	}
+	// match: (MOVWreg x)
+	// cond: v.Type.Size() <= 4
+	// result: x
+	for {
+		x := v_0
+		if !(v.Type.Size() <= 4) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (MOVWreg <t> (ANDconst x [c]))
+	// cond: uint64(c) & uint64(0xffffffff80000000) == 0
+	// result: (ANDconst <t> x [c])
+	for {
+		t := v.Type
+		if v_0.Op != OpARM64ANDconst {
+			break
+		}
+		c := auxIntToInt64(v_0.AuxInt)
+		x := v_0.Args[0]
+		if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
+			break
+		}
+		v.reset(OpARM64ANDconst)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(c)
+		v.AddArg(x)
+		return true
+	}
 	// match: (MOVWreg (SLLconst [lc] x))
 	// cond: lc < 32
 	// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
@@ -13411,7 +13612,7 @@
 	b := v.Block
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: int32(c)==-1
-	// result: (ADD a x)
+	// result: (MOVWUreg (ADD <a.Type> a x))
 	for {
 		a := v_0
 		x := v_1
@@ -13422,13 +13623,15 @@
 		if !(int32(c) == -1) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a _ (MOVDconst [c]))
 	// cond: int32(c)==0
-	// result: a
+	// result: (MOVWUreg a)
 	for {
 		a := v_0
 		if v_2.Op != OpARM64MOVDconst {
@@ -13438,12 +13641,13 @@
 		if !(int32(c) == 0) {
 			break
 		}
-		v.copyOf(a)
+		v.reset(OpARM64MOVWUreg)
+		v.AddArg(a)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: int32(c)==1
-	// result: (SUB a x)
+	// result: (MOVWUreg (SUB <a.Type> a x))
 	for {
 		a := v_0
 		x := v_1
@@ -13454,13 +13658,15 @@
 		if !(int32(c) == 1) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c)
-	// result: (SUBshiftLL a x [log64(c)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
 	for {
 		a := v_0
 		x := v_1
@@ -13471,14 +13677,16 @@
 		if !(isPowerOfTwo64(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c))
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c))
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c-1) && int32(c)>=3
-	// result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+	// result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
 	for {
 		a := v_0
 		x := v_1
@@ -13489,16 +13697,18 @@
 		if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c - 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c - 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c+1) && int32(c)>=7
-	// result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+	// result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
 	for {
 		a := v_0
 		x := v_1
@@ -13509,16 +13719,18 @@
 		if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c + 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c + 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
-	// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
 	for {
 		a := v_0
 		x := v_1
@@ -13529,17 +13741,19 @@
 		if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 3))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 3))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
-	// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
 	for {
 		a := v_0
 		x := v_1
@@ -13550,17 +13764,19 @@
 		if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 5))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 5))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
-	// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
 	for {
 		a := v_0
 		x := v_1
@@ -13571,17 +13787,19 @@
 		if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 7))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 7))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a x (MOVDconst [c]))
 	// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
-	// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 	for {
 		a := v_0
 		x := v_1
@@ -13592,17 +13810,19 @@
 		if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 9))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 9))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: int32(c)==-1
-	// result: (ADD a x)
+	// result: (MOVWUreg (ADD <a.Type> a x))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13613,13 +13833,15 @@
 		if !(int32(c) == -1) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) _)
 	// cond: int32(c)==0
-	// result: a
+	// result: (MOVWUreg a)
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13629,12 +13851,13 @@
 		if !(int32(c) == 0) {
 			break
 		}
-		v.copyOf(a)
+		v.reset(OpARM64MOVWUreg)
+		v.AddArg(a)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: int32(c)==1
-	// result: (SUB a x)
+	// result: (MOVWUreg (SUB <a.Type> a x))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13645,13 +13868,15 @@
 		if !(int32(c) == 1) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: isPowerOfTwo64(c)
-	// result: (SUBshiftLL a x [log64(c)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13662,14 +13887,16 @@
 		if !(isPowerOfTwo64(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c))
-		v.AddArg2(a, x)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c))
+		v0.AddArg2(a, x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: isPowerOfTwo64(c-1) && int32(c)>=3
-	// result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+	// result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13680,16 +13907,18 @@
 		if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
 			break
 		}
-		v.reset(OpARM64SUB)
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c - 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c - 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: isPowerOfTwo64(c+1) && int32(c)>=7
-	// result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+	// result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13700,16 +13929,18 @@
 		if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
 			break
 		}
-		v.reset(OpARM64ADD)
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(log64(c + 1))
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(log64(c + 1))
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
-	// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13720,17 +13951,19 @@
 		if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 3))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 3))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
-	// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13741,17 +13974,19 @@
 		if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 5))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(2)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 5))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(2)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
-	// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+	// result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13762,17 +13997,19 @@
 		if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 7))
-		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 7))
+		v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) x)
 	// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
-	// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+	// result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13783,16 +14020,18 @@
 		if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
 			break
 		}
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = int64ToAuxInt(log64(c / 9))
-		v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = int64ToAuxInt(3)
-		v0.AddArg2(x, x)
-		v.AddArg2(a, v0)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+		v0.AuxInt = int64ToAuxInt(log64(c / 9))
+		v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+		v1.AuxInt = int64ToAuxInt(3)
+		v1.AddArg2(x, x)
+		v0.AddArg2(a, v1)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW (MOVDconst [c]) x y)
-	// result: (ADDconst [c] (MNEGW <x.Type> x y))
+	// result: (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
 	for {
 		if v_0.Op != OpARM64MOVDconst {
 			break
@@ -13800,15 +14039,17 @@
 		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		y := v_2
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64ToAuxInt(c)
-		v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
-		v0.AddArg2(x, y)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type)
+		v0.AuxInt = int64ToAuxInt(c)
+		v1 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
+		v1.AddArg2(x, y)
+		v0.AddArg(v1)
 		v.AddArg(v0)
 		return true
 	}
 	// match: (MSUBW a (MOVDconst [c]) (MOVDconst [d]))
-	// result: (SUBconst [int64(int32(c)*int32(d))] a)
+	// result: (MOVWUreg (SUBconst <a.Type> [c*d] a))
 	for {
 		a := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -13819,9 +14060,11 @@
 			break
 		}
 		d := auxIntToInt64(v_2.AuxInt)
-		v.reset(OpARM64SUBconst)
-		v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
-		v.AddArg(a)
+		v.reset(OpARM64MOVWUreg)
+		v0 := b.NewValue0(v.Pos, OpARM64SUBconst, a.Type)
+		v0.AuxInt = int64ToAuxInt(c * d)
+		v0.AddArg(a)
+		v.AddArg(v0)
 		return true
 	}
 	return false
@@ -14082,7 +14325,7 @@
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: int32(c)==-1
-	// result: (NEG x)
+	// result: (MOVWUreg (NEG <x.Type> x))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14093,8 +14336,10 @@
 			if !(int32(c) == -1) {
 				continue
 			}
-			v.reset(OpARM64NEG)
-			v.AddArg(x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v0.AddArg(x)
+			v.AddArg(v0)
 			return true
 		}
 		break
@@ -14119,7 +14364,7 @@
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: int32(c)==1
-	// result: x
+	// result: (MOVWUreg x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14130,14 +14375,15 @@
 			if !(int32(c) == 1) {
 				continue
 			}
-			v.copyOf(x)
+			v.reset(OpARM64MOVWUreg)
+			v.AddArg(x)
 			return true
 		}
 		break
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c)
-	// result: (SLLconst [log64(c)] x)
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c)] x))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14148,16 +14394,18 @@
 			if !(isPowerOfTwo64(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.AuxInt = int64ToAuxInt(log64(c))
-			v.AddArg(x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c))
+			v0.AddArg(x)
+			v.AddArg(v0)
 			return true
 		}
 		break
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c-1) && int32(c) >= 3
-	// result: (ADDshiftLL x x [log64(c-1)])
+	// result: (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)]))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14168,16 +14416,18 @@
 			if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
 				continue
 			}
-			v.reset(OpARM64ADDshiftLL)
-			v.AuxInt = int64ToAuxInt(log64(c - 1))
-			v.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c - 1))
+			v0.AddArg2(x, x)
+			v.AddArg(v0)
 			return true
 		}
 		break
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c+1) && int32(c) >= 7
-	// result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+	// result: (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14188,18 +14438,20 @@
 			if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
 				continue
 			}
-			v.reset(OpARM64ADDshiftLL)
-			v.AuxInt = int64ToAuxInt(log64(c + 1))
-			v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
-			v0.AddArg(x)
-			v.AddArg2(v0, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c + 1))
+			v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v1.AddArg(x)
+			v0.AddArg2(v1, x)
+			v.AddArg(v0)
 			return true
 		}
 		break
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
-	// result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14210,11 +14462,13 @@
 			if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.AuxInt = int64ToAuxInt(log64(c / 3))
-			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(1)
-			v0.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c / 3))
+			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(1)
+			v1.AddArg2(x, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -14222,7 +14476,7 @@
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
-	// result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14233,11 +14487,13 @@
 			if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.AuxInt = int64ToAuxInt(log64(c / 5))
-			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(2)
-			v0.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c / 5))
+			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(2)
+			v1.AddArg2(x, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -14245,7 +14501,7 @@
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
-	// result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14256,13 +14512,15 @@
 			if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.AuxInt = int64ToAuxInt(log64(c / 7))
-			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(3)
-			v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
-			v1.AddArg(x)
-			v0.AddArg2(v1, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c / 7))
+			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(3)
+			v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+			v2.AddArg(x)
+			v1.AddArg2(v2, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
@@ -14270,7 +14528,7 @@
 	}
 	// match: (MULW x (MOVDconst [c]))
 	// cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
-	// result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+	// result: (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			x := v_0
@@ -14281,18 +14539,20 @@
 			if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
 				continue
 			}
-			v.reset(OpARM64SLLconst)
-			v.AuxInt = int64ToAuxInt(log64(c / 9))
-			v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
-			v0.AuxInt = int64ToAuxInt(3)
-			v0.AddArg2(x, x)
+			v.reset(OpARM64MOVWUreg)
+			v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+			v0.AuxInt = int64ToAuxInt(log64(c / 9))
+			v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+			v1.AuxInt = int64ToAuxInt(3)
+			v1.AddArg2(x, x)
+			v0.AddArg(v1)
 			v.AddArg(v0)
 			return true
 		}
 		break
 	}
 	// match: (MULW (MOVDconst [c]) (MOVDconst [d]))
-	// result: (MOVDconst [int64(int32(c)*int32(d))])
+	// result: (MOVDconst [int64(uint32(c*d))])
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			if v_0.Op != OpARM64MOVDconst {
@@ -14304,7 +14564,7 @@
 			}
 			d := auxIntToInt64(v_1.AuxInt)
 			v.reset(OpARM64MOVDconst)
-			v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+			v.AuxInt = int64ToAuxInt(int64(uint32(c * d)))
 			return true
 		}
 		break
@@ -14489,6 +14749,7 @@
 		return true
 	}
 	// match: (NEG (MULW x y))
+	// cond: v.Type.Size() <= 4
 	// result: (MNEGW x y)
 	for {
 		if v_0.Op != OpARM64MULW {
@@ -14496,6 +14757,9 @@
 		}
 		y := v_0.Args[1]
 		x := v_0.Args[0]
+		if !(v.Type.Size() <= 4) {
+			break
+		}
 		v.reset(OpARM64MNEGW)
 		v.AddArg2(x, y)
 		return true
@@ -16745,7 +17009,7 @@
 		return true
 	}
 	// match: (SUB a l:(MULW x y))
-	// cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+	// cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
 	// result: (MSUBW a x y)
 	for {
 		a := v_0
@@ -16755,7 +17019,7 @@
 		}
 		y := l.Args[1]
 		x := l.Args[0]
-		if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+		if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
 			break
 		}
 		v.reset(OpARM64MSUBW)
@@ -16763,7 +17027,7 @@
 		return true
 	}
 	// match: (SUB a l:(MNEGW x y))
-	// cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+	// cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
 	// result: (MADDW a x y)
 	for {
 		a := v_0
@@ -16773,7 +17037,7 @@
 		}
 		y := l.Args[1]
 		x := l.Args[0]
-		if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+		if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
 			break
 		}
 		v.reset(OpARM64MADDW)
@@ -17516,9 +17780,10 @@
 func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
 	// match: (UDIVW x (MOVDconst [c]))
 	// cond: uint32(c)==1
-	// result: x
+	// result: (MOVWUreg x)
 	for {
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -17528,12 +17793,13 @@
 		if !(uint32(c) == 1) {
 			break
 		}
-		v.copyOf(x)
+		v.reset(OpARM64MOVWUreg)
+		v.AddArg(x)
 		return true
 	}
 	// match: (UDIVW x (MOVDconst [c]))
 	// cond: isPowerOfTwo64(c) && is32Bit(c)
-	// result: (SRLconst [log64(c)] x)
+	// result: (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
 	for {
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
@@ -17545,7 +17811,9 @@
 		}
 		v.reset(OpARM64SRLconst)
 		v.AuxInt = int64ToAuxInt(log64(c))
-		v.AddArg(x)
+		v0 := b.NewValue0(v.Pos, OpARM64MOVWUreg, v.Type)
+		v0.AddArg(x)
+		v.AddArg(v0)
 		return true
 	}
 	// match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
index e88b74c..edd3ffe 100644
--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
@@ -1724,8 +1724,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -1736,7 +1738,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBUload)
@@ -1746,7 +1748,7 @@
 		return true
 	}
 	// match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -1758,7 +1760,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBUload)
@@ -1771,6 +1773,26 @@
 }
 func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool {
 	v_0 := v.Args[0]
+	// match: (MOVBUreg x:(SGT _ _))
+	// result: x
+	for {
+		x := v_0
+		if x.Op != OpLOONG64SGT {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (MOVBUreg x:(SGTU _ _))
+	// result: x
+	for {
+		x := v_0
+		if x.Op != OpLOONG64SGTU {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
 	// match: (MOVBUreg x:(MOVBUload _ _))
 	// result: (MOVVreg x)
 	for {
@@ -1809,8 +1831,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -1821,7 +1845,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBload)
@@ -1831,7 +1855,7 @@
 		return true
 	}
 	// match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -1843,7 +1867,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBload)
@@ -1895,8 +1919,10 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -1908,7 +1934,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBstore)
@@ -1918,7 +1944,7 @@
 		return true
 	}
 	// match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -1931,7 +1957,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBstore)
@@ -2047,8 +2073,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2059,7 +2087,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBstorezero)
@@ -2069,7 +2097,7 @@
 		return true
 	}
 	// match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2081,7 +2109,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVBstorezero)
@@ -2095,8 +2123,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2107,7 +2137,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVDload)
@@ -2117,7 +2147,7 @@
 		return true
 	}
 	// match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2129,7 +2159,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVDload)
@@ -2144,8 +2174,10 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2157,7 +2189,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVDstore)
@@ -2167,7 +2199,7 @@
 		return true
 	}
 	// match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2180,7 +2212,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVDstore)
@@ -2194,8 +2226,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVFload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2206,7 +2240,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVFload)
@@ -2216,7 +2250,7 @@
 		return true
 	}
 	// match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2228,7 +2262,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVFload)
@@ -2243,8 +2277,10 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2256,7 +2292,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVFstore)
@@ -2266,7 +2302,7 @@
 		return true
 	}
 	// match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2279,7 +2315,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVFstore)
@@ -2293,8 +2329,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2305,7 +2343,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHUload)
@@ -2315,7 +2353,7 @@
 		return true
 	}
 	// match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2327,7 +2365,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHUload)
@@ -2400,8 +2438,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2412,7 +2452,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHload)
@@ -2422,7 +2462,7 @@
 		return true
 	}
 	// match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2434,7 +2474,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHload)
@@ -2530,8 +2570,10 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2543,7 +2585,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHstore)
@@ -2553,7 +2595,7 @@
 		return true
 	}
 	// match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2566,7 +2608,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHstore)
@@ -2648,8 +2690,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2660,7 +2704,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHstorezero)
@@ -2670,7 +2714,7 @@
 		return true
 	}
 	// match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2682,7 +2726,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVHstorezero)
@@ -2696,8 +2740,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2708,7 +2754,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVVload)
@@ -2718,7 +2764,7 @@
 		return true
 	}
 	// match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2730,7 +2776,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVVload)
@@ -2772,8 +2818,10 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2785,7 +2833,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVVstore)
@@ -2795,7 +2843,7 @@
 		return true
 	}
 	// match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2808,7 +2856,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVVstore)
@@ -2822,8 +2870,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2834,7 +2884,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVVstorezero)
@@ -2844,7 +2894,7 @@
 		return true
 	}
 	// match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2856,7 +2906,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVVstorezero)
@@ -2870,8 +2920,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2882,7 +2934,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWUload)
@@ -2892,7 +2944,7 @@
 		return true
 	}
 	// match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -2904,7 +2956,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWUload)
@@ -2999,8 +3051,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -3011,7 +3065,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWload)
@@ -3021,7 +3075,7 @@
 		return true
 	}
 	// match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -3033,7 +3087,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWload)
@@ -3162,8 +3216,10 @@
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -3175,7 +3231,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWstore)
@@ -3185,7 +3241,7 @@
 		return true
 	}
 	// match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -3198,7 +3254,7 @@
 		ptr := v_0.Args[0]
 		val := v_1
 		mem := v_2
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWstore)
@@ -3246,8 +3302,10 @@
 func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
 	// match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(int64(off1)+off2)
+	// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -3258,7 +3316,7 @@
 		off2 := auxIntToInt64(v_0.AuxInt)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(is32Bit(int64(off1) + off2)) {
+		if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWstorezero)
@@ -3268,7 +3326,7 @@
 		return true
 	}
 	// match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+	// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
 	// result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := auxIntToInt32(v.AuxInt)
@@ -3280,7 +3338,7 @@
 		sym2 := auxToSym(v_0.Aux)
 		ptr := v_0.Args[0]
 		mem := v_1
-		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
 			break
 		}
 		v.reset(OpLOONG64MOVWstorezero)
@@ -7570,6 +7628,7 @@
 	return false
 }
 func rewriteBlockLOONG64(b *Block) bool {
+	typ := &b.Func.Config.Types
 	switch b.Kind {
 	case BlockLOONG64EQ:
 		// match: (EQ (FPFlagTrue cmp) yes no)
@@ -7769,10 +7828,12 @@
 		}
 	case BlockIf:
 		// match: (If cond yes no)
-		// result: (NE cond yes no)
+		// result: (NE (MOVBUreg <typ.UInt64> cond) yes no)
 		for {
 			cond := b.Controls[0]
-			b.resetWithControl(BlockLOONG64NE, cond)
+			v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64)
+			v0.AddArg(cond)
+			b.resetWithControl(BlockLOONG64NE, v0)
 			return true
 		}
 	case BlockLOONG64LEZ:
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index de316e9..764465d 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -4797,6 +4797,17 @@
 		v.AddArg(x)
 		return true
 	}
+	// match: (SGT x x)
+	// result: (MOVVconst [0])
+	for {
+		x := v_0
+		if x != v_1 {
+			break
+		}
+		v.reset(OpMIPS64MOVVconst)
+		v.AuxInt = int64ToAuxInt(0)
+		return true
+	}
 	return false
 }
 func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
@@ -4819,6 +4830,17 @@
 		v.AddArg(x)
 		return true
 	}
+	// match: (SGTU x x)
+	// result: (MOVVconst [0])
+	for {
+		x := v_0
+		if x != v_1 {
+			break
+		}
+		v.reset(OpMIPS64MOVVconst)
+		v.AuxInt = int64ToAuxInt(0)
+		return true
+	}
 	return false
 }
 func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool {
@@ -7315,6 +7337,38 @@
 		v.AddArg(v0)
 		return true
 	}
+	// match: (Select0 <t> (Add64carry x y c))
+	// result: (ADDV (ADDV <t> x y) c)
+	for {
+		t := v.Type
+		if v_0.Op != OpAdd64carry {
+			break
+		}
+		c := v_0.Args[2]
+		x := v_0.Args[0]
+		y := v_0.Args[1]
+		v.reset(OpMIPS64ADDV)
+		v0 := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+		v0.AddArg2(x, y)
+		v.AddArg2(v0, c)
+		return true
+	}
+	// match: (Select0 <t> (Sub64borrow x y c))
+	// result: (SUBV (SUBV <t> x y) c)
+	for {
+		t := v.Type
+		if v_0.Op != OpSub64borrow {
+			break
+		}
+		c := v_0.Args[2]
+		x := v_0.Args[0]
+		y := v_0.Args[1]
+		v.reset(OpMIPS64SUBV)
+		v0 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+		v0.AddArg2(x, y)
+		v.AddArg2(v0, c)
+		return true
+	}
 	// match: (Select0 (DIVVU _ (MOVVconst [1])))
 	// result: (MOVVconst [0])
 	for {
@@ -7427,6 +7481,50 @@
 		v.AddArg2(v0, v2)
 		return true
 	}
+	// match: (Select1 <t> (Add64carry x y c))
+	// result: (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+	for {
+		t := v.Type
+		if v_0.Op != OpAdd64carry {
+			break
+		}
+		c := v_0.Args[2]
+		x := v_0.Args[0]
+		y := v_0.Args[1]
+		v.reset(OpMIPS64OR)
+		v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+		s := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+		s.AddArg2(x, y)
+		v0.AddArg2(x, s)
+		v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+		v3 := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+		v3.AddArg2(s, c)
+		v2.AddArg2(s, v3)
+		v.AddArg2(v0, v2)
+		return true
+	}
+	// match: (Select1 <t> (Sub64borrow x y c))
+	// result: (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+	for {
+		t := v.Type
+		if v_0.Op != OpSub64borrow {
+			break
+		}
+		c := v_0.Args[2]
+		x := v_0.Args[0]
+		y := v_0.Args[1]
+		v.reset(OpMIPS64OR)
+		v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+		s := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+		s.AddArg2(x, y)
+		v0.AddArg2(s, x)
+		v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+		v3 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+		v3.AddArg2(s, c)
+		v2.AddArg2(v3, s)
+		v.AddArg2(v0, v2)
+		return true
+	}
 	// match: (Select1 (MULVU x (MOVVconst [-1])))
 	// result: (NEGV x)
 	for {
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index d1c0c2b..473a8ff 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -533,6 +533,8 @@
 		return rewriteValuePPC64_OpPPC64MOVBstoreidx(v)
 	case OpPPC64MOVBstorezero:
 		return rewriteValuePPC64_OpPPC64MOVBstorezero(v)
+	case OpPPC64MOVDaddr:
+		return rewriteValuePPC64_OpPPC64MOVDaddr(v)
 	case OpPPC64MOVDload:
 		return rewriteValuePPC64_OpPPC64MOVDload(v)
 	case OpPPC64MOVDloadidx:
@@ -1176,7 +1178,7 @@
 		return true
 	}
 	// match: (Bswap16 x:(MOVHZloadidx ptr idx mem))
-	// result: @x.Block (MOVHZreg (MOVHBRloadidx ptr idx mem))
+	// result: @x.Block (MOVHBRloadidx ptr idx mem)
 	for {
 		x := v_0
 		if x.Op != OpPPC64MOVHZloadidx {
@@ -1186,11 +1188,9 @@
 		ptr := x.Args[0]
 		idx := x.Args[1]
 		b = x.Block
-		v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+		v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
 		v.copyOf(v0)
-		v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
-		v1.AddArg3(ptr, idx, mem)
-		v0.AddArg(v1)
+		v0.AddArg3(ptr, idx, mem)
 		return true
 	}
 	return false
@@ -1233,7 +1233,7 @@
 		return true
 	}
 	// match: (Bswap32 x:(MOVWZloadidx ptr idx mem))
-	// result: @x.Block (MOVWZreg (MOVWBRloadidx ptr idx mem))
+	// result: @x.Block (MOVWBRloadidx ptr idx mem)
 	for {
 		x := v_0
 		if x.Op != OpPPC64MOVWZloadidx {
@@ -1243,11 +1243,9 @@
 		ptr := x.Args[0]
 		idx := x.Args[1]
 		b = x.Block
-		v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+		v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
 		v.copyOf(v0)
-		v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
-		v1.AddArg3(ptr, idx, mem)
-		v0.AddArg(v1)
+		v0.AddArg3(ptr, idx, mem)
 		return true
 	}
 	return false
@@ -4226,6 +4224,19 @@
 		}
 		break
 	}
+	// match: (AND x (MOVDconst [-1]))
+	// result: x
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+				continue
+			}
+			v.copyOf(x)
+			return true
+		}
+		break
+	}
 	// match: (AND x (MOVDconst [c]))
 	// cond: isU16Bit(c)
 	// result: (Select0 (ANDCCconst [c] x))
@@ -4431,7 +4442,7 @@
 	}
 	// match: (BRH x:(MOVHZloadidx ptr idx mem))
 	// cond: x.Uses == 1
-	// result: @x.Block (MOVHZreg (MOVHBRloadidx ptr idx mem))
+	// result: @x.Block (MOVHBRloadidx ptr idx mem)
 	for {
 		x := v_0
 		if x.Op != OpPPC64MOVHZloadidx {
@@ -4444,11 +4455,9 @@
 			break
 		}
 		b = x.Block
-		v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+		v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
 		v.copyOf(v0)
-		v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
-		v1.AddArg3(ptr, idx, mem)
-		v0.AddArg(v1)
+		v0.AddArg3(ptr, idx, mem)
 		return true
 	}
 	return false
@@ -4484,7 +4493,7 @@
 	}
 	// match: (BRW x:(MOVWZloadidx ptr idx mem))
 	// cond: x.Uses == 1
-	// result: @x.Block (MOVWZreg (MOVWBRloadidx ptr idx mem))
+	// result: @x.Block (MOVWBRloadidx ptr idx mem)
 	for {
 		x := v_0
 		if x.Op != OpPPC64MOVWZloadidx {
@@ -4497,11 +4506,9 @@
 			break
 		}
 		b = x.Block
-		v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+		v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
 		v.copyOf(v0)
-		v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
-		v1.AddArg3(ptr, idx, mem)
-		v0.AddArg(v1)
+		v0.AddArg3(ptr, idx, mem)
 		return true
 	}
 	return false
@@ -7765,6 +7772,39 @@
 	}
 	return false
 }
+func rewriteValuePPC64_OpPPC64MOVDaddr(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (MOVDaddr {sym} [n] p:(ADD x y))
+	// cond: sym == nil && n == 0
+	// result: p
+	for {
+		n := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
+		p := v_0
+		if p.Op != OpPPC64ADD {
+			break
+		}
+		if !(sym == nil && n == 0) {
+			break
+		}
+		v.copyOf(p)
+		return true
+	}
+	// match: (MOVDaddr {sym} [n] ptr)
+	// cond: sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)
+	// result: ptr
+	for {
+		n := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
+		ptr := v_0
+		if !(sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)) {
+			break
+		}
+		v.copyOf(ptr)
+		return true
+	}
+	return false
+}
 func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
@@ -14512,6 +14552,26 @@
 		v.AddArg(v0)
 		return true
 	}
+	// match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x)))
+	// cond: z.Uses == 1
+	// result: (SRDconst [63] x)
+	for {
+		if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
+			break
+		}
+		x := z.Args[0]
+		if !(z.Uses == 1) {
+			break
+		}
+		v.reset(OpPPC64SRDconst)
+		v.AuxInt = int64ToAuxInt(63)
+		v.AddArg(x)
+		return true
+	}
 	return false
 }
 func rewriteValuePPC64_OpSelect1(v *Value) bool {
@@ -15223,56 +15283,6 @@
 	typ := &b.Func.Config.Types
 	switch b.Kind {
 	case BlockPPC64EQ:
-		// match: (EQ (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-		for b.Controls[0].Op == OpPPC64CMPconst {
-			v_0 := b.Controls[0]
-			if auxIntToInt64(v_0.AuxInt) != 0 {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpSelect0 {
-				break
-			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
-				break
-			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
-			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
-			b.resetWithControl(BlockPPC64EQ, v0)
-			return true
-		}
-		// match: (EQ (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-		for b.Controls[0].Op == OpPPC64CMPWconst {
-			v_0 := b.Controls[0]
-			if auxIntToInt32(v_0.AuxInt) != 0 {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpSelect0 {
-				break
-			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
-				break
-			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
-			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
-			b.resetWithControl(BlockPPC64EQ, v0)
-			return true
-		}
 		// match: (EQ (FlagEQ) yes no)
 		// result: (First yes no)
 		for b.Controls[0].Op == OpPPC64FlagEQ {
@@ -15301,8 +15311,8 @@
 			b.resetWithControl(BlockPPC64EQ, cmp)
 			return true
 		}
-		// match: (EQ (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (EQ (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15312,22 +15322,17 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64EQ, v0)
 			return true
 		}
-		// match: (EQ (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (EQ (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (EQ (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPWconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15337,17 +15342,12 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64EQ, v0)
 			return true
 		}
@@ -15469,8 +15469,8 @@
 			b.resetWithControl(BlockPPC64LE, cmp)
 			return true
 		}
-		// match: (GE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (GE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (GE (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15480,22 +15480,17 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64GE, v0)
 			return true
 		}
-		// match: (GE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (GE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (GE (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPWconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15505,17 +15500,12 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64GE, v0)
 			return true
 		}
@@ -15638,8 +15628,8 @@
 			b.resetWithControl(BlockPPC64LT, cmp)
 			return true
 		}
-		// match: (GT (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (GT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (GT (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15649,22 +15639,17 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64GT, v0)
 			return true
 		}
-		// match: (GT (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (GT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (GT (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPWconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15674,17 +15659,12 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64GT, v0)
 			return true
 		}
@@ -15902,8 +15882,8 @@
 			b.resetWithControl(BlockPPC64GE, cmp)
 			return true
 		}
-		// match: (LE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (LE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (LE (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -15913,22 +15893,17 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64LE, v0)
 			return true
 		}
-		// match: (LE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (LE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (LE (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPWconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -15938,17 +15913,12 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64LE, v0)
 			return true
 		}
@@ -16071,8 +16041,8 @@
 			b.resetWithControl(BlockPPC64GT, cmp)
 			return true
 		}
-		// match: (LT (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (LT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (LT (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -16082,22 +16052,17 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64LT, v0)
 			return true
 		}
-		// match: (LT (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (LT (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (LT (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPWconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -16107,17 +16072,12 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64LT, v0)
 			return true
 		}
@@ -16442,56 +16402,6 @@
 			b.resetWithControl(BlockPPC64FGE, cc)
 			return true
 		}
-		// match: (NE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-		for b.Controls[0].Op == OpPPC64CMPconst {
-			v_0 := b.Controls[0]
-			if auxIntToInt64(v_0.AuxInt) != 0 {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpSelect0 {
-				break
-			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
-				break
-			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
-			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
-			b.resetWithControl(BlockPPC64NE, v0)
-			return true
-		}
-		// match: (NE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
-		for b.Controls[0].Op == OpPPC64CMPWconst {
-			v_0 := b.Controls[0]
-			if auxIntToInt32(v_0.AuxInt) != 0 {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpSelect0 {
-				break
-			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
-				break
-			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
-			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
-			b.resetWithControl(BlockPPC64NE, v0)
-			return true
-		}
 		// match: (NE (FlagEQ) yes no)
 		// result: (First no yes)
 		for b.Controls[0].Op == OpPPC64FlagEQ {
@@ -16519,8 +16429,8 @@
 			b.resetWithControl(BlockPPC64NE, cmp)
 			return true
 		}
-		// match: (NE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (NE (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt64(v_0.AuxInt) != 0 {
@@ -16530,22 +16440,17 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64NE, v0)
 			return true
 		}
-		// match: (NE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no)
-		// result: (NE (Select1 <types.TypeFlags> (ANDCCconst [c] x)) yes no)
+		// match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+		// result: (NE (Select1 <types.TypeFlags> z) yes no)
 		for b.Controls[0].Op == OpPPC64CMPWconst {
 			v_0 := b.Controls[0]
 			if auxIntToInt32(v_0.AuxInt) != 0 {
@@ -16555,17 +16460,12 @@
 			if v_0_0.Op != OpSelect0 {
 				break
 			}
-			v_0_0_0 := v_0_0.Args[0]
-			if v_0_0_0.Op != OpPPC64ANDCCconst {
+			z := v_0_0.Args[0]
+			if z.Op != OpPPC64ANDCCconst {
 				break
 			}
-			c := auxIntToInt64(v_0_0_0.AuxInt)
-			x := v_0_0_0.Args[0]
 			v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
-			v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
-			v1.AuxInt = int64ToAuxInt(c)
-			v1.AddArg(x)
-			v0.AddArg(v1)
+			v0.AddArg(z)
 			b.resetWithControl(BlockPPC64NE, v0)
 			return true
 		}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
index 56acbe4..771dd6a 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
@@ -3,15 +3,501 @@
 package ssa
 
 import "internal/buildcfg"
+import "cmd/compile/internal/types"
 
 func rewriteValuePPC64latelower(v *Value) bool {
 	switch v.Op {
+	case OpPPC64ADD:
+		return rewriteValuePPC64latelower_OpPPC64ADD(v)
+	case OpPPC64AND:
+		return rewriteValuePPC64latelower_OpPPC64AND(v)
+	case OpPPC64CMPconst:
+		return rewriteValuePPC64latelower_OpPPC64CMPconst(v)
 	case OpPPC64ISEL:
 		return rewriteValuePPC64latelower_OpPPC64ISEL(v)
+	case OpPPC64RLDICL:
+		return rewriteValuePPC64latelower_OpPPC64RLDICL(v)
 	case OpPPC64SETBC:
 		return rewriteValuePPC64latelower_OpPPC64SETBC(v)
 	case OpPPC64SETBCR:
 		return rewriteValuePPC64latelower_OpPPC64SETBCR(v)
+	case OpSelect0:
+		return rewriteValuePPC64latelower_OpSelect0(v)
+	}
+	return false
+}
+func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (ADD (MOVDconst [m]) x)
+	// cond: supportsPPC64PCRel() && (m<<30)>>30 == m
+	// result: (ADDconst [m] x)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			if v_0.Op != OpPPC64MOVDconst {
+				continue
+			}
+			m := auxIntToInt64(v_0.AuxInt)
+			x := v_1
+			if !(supportsPPC64PCRel() && (m<<30)>>30 == m) {
+				continue
+			}
+			v.reset(OpPPC64ADDconst)
+			v.AuxInt = int64ToAuxInt(m)
+			v.AddArg(x)
+			return true
+		}
+		break
+	}
+	return false
+}
+func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	b := v.Block
+	typ := &b.Func.Config.Types
+	// match: (AND <t> x:(MOVDconst [m]) n)
+	// cond: t.Size() <= 2
+	// result: (Select0 (ANDCCconst [int64(int16(m))] n))
+	for {
+		t := v.Type
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			if x.Op != OpPPC64MOVDconst {
+				continue
+			}
+			m := auxIntToInt64(x.AuxInt)
+			n := v_1
+			if !(t.Size() <= 2) {
+				continue
+			}
+			v.reset(OpSelect0)
+			v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+			v0.AuxInt = int64ToAuxInt(int64(int16(m)))
+			v0.AddArg(n)
+			v.AddArg(v0)
+			return true
+		}
+		break
+	}
+	// match: (AND x:(MOVDconst [m]) n)
+	// cond: isPPC64ValidShiftMask(m)
+	// result: (RLDICL [encodePPC64RotateMask(0,m,64)] n)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			if x.Op != OpPPC64MOVDconst {
+				continue
+			}
+			m := auxIntToInt64(x.AuxInt)
+			n := v_1
+			if !(isPPC64ValidShiftMask(m)) {
+				continue
+			}
+			v.reset(OpPPC64RLDICL)
+			v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+			v.AddArg(n)
+			return true
+		}
+		break
+	}
+	// match: (AND x:(MOVDconst [m]) n)
+	// cond: m != 0 && isPPC64ValidShiftMask(^m)
+	// result: (RLDICR [encodePPC64RotateMask(0,m,64)] n)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			if x.Op != OpPPC64MOVDconst {
+				continue
+			}
+			m := auxIntToInt64(x.AuxInt)
+			n := v_1
+			if !(m != 0 && isPPC64ValidShiftMask(^m)) {
+				continue
+			}
+			v.reset(OpPPC64RLDICR)
+			v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+			v.AddArg(n)
+			return true
+		}
+		break
+	}
+	// match: (AND <t> x:(MOVDconst [m]) n)
+	// cond: t.Size() == 4 && isPPC64WordRotateMask(m)
+	// result: (RLWINM [encodePPC64RotateMask(0,m,32)] n)
+	for {
+		t := v.Type
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			if x.Op != OpPPC64MOVDconst {
+				continue
+			}
+			m := auxIntToInt64(x.AuxInt)
+			n := v_1
+			if !(t.Size() == 4 && isPPC64WordRotateMask(m)) {
+				continue
+			}
+			v.reset(OpPPC64RLWINM)
+			v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+			v.AddArg(n)
+			return true
+		}
+		break
+	}
+	return false
+}
+func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (CMPconst [0] z:(ADD x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64ADD {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(AND x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64AND {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(ANDN x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64ANDN {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(OR x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64OR {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(SUB x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64SUB {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(NOR x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64NOR {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(XOR x y))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64XOR {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(NEG x))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64NEG {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(CNTLZD x))
+	// cond: v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64CNTLZD {
+			break
+		}
+		if !(v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst [0] z:(ADDconst [c] x))
+	// cond: int64(int16(c)) == c && v.Block == z.Block
+	// result: (CMPconst [0] convertPPC64OpToOpCC(z))
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		z := v_0
+		if z.Op != OpPPC64ADDconst {
+			break
+		}
+		c := auxIntToInt64(z.AuxInt)
+		if !(int64(int16(c)) == c && v.Block == z.Block) {
+			break
+		}
+		v.reset(OpPPC64CMPconst)
+		v.AuxInt = int64ToAuxInt(0)
+		v.AddArg(convertPPC64OpToOpCC(z))
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(ADDCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64ADDCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(ANDCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64ANDCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(ANDNCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64ANDNCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(ORCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64ORCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(SUBCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64SUBCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(NORCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64NORCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(XORCC x y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64XORCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(ADDCCconst y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64ADDCCconst {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(NEGCC y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64NEGCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
+	}
+	// match: (CMPconst <t> [0] (Select0 z:(CNTLZDCC y)))
+	// result: (Select1 <t> z)
+	for {
+		t := v.Type
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+			break
+		}
+		z := v_0.Args[0]
+		if z.Op != OpPPC64CNTLZDCC {
+			break
+		}
+		v.reset(OpSelect1)
+		v.Type = t
+		v.AddArg(z)
+		return true
 	}
 	return false
 }
@@ -49,6 +535,29 @@
 	}
 	return false
 }
+func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (RLDICL [em] x:(SRDconst [s] a))
+	// cond: (em&0xFF0000) == 0
+	// result: (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a)
+	for {
+		em := auxIntToInt64(v.AuxInt)
+		x := v_0
+		if x.Op != OpPPC64SRDconst {
+			break
+		}
+		s := auxIntToInt64(x.AuxInt)
+		a := x.Args[0]
+		if !((em & 0xFF0000) == 0) {
+			break
+		}
+		v.reset(OpPPC64RLDICL)
+		v.AuxInt = int64ToAuxInt(mergePPC64RLDICLandSRDconst(em, s))
+		v.AddArg(a)
+		return true
+	}
+	return false
+}
 func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool {
 	v_0 := v.Args[0]
 	b := v.Block
@@ -169,6 +678,28 @@
 	}
 	return false
 }
+func rewriteValuePPC64latelower_OpSelect0(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (Select0 z:(ANDCCconst [m] x))
+	// cond: z.Uses == 1 && isPPC64ValidShiftMask(m)
+	// result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+	for {
+		z := v_0
+		if z.Op != OpPPC64ANDCCconst {
+			break
+		}
+		m := auxIntToInt64(z.AuxInt)
+		x := z.Args[0]
+		if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) {
+			break
+		}
+		v.reset(OpPPC64RLDICL)
+		v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+		v.AddArg(x)
+		return true
+	}
+	return false
+}
 func rewriteBlockPPC64latelower(b *Block) bool {
 	return false
 }
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index ffbeb1d..52ddca1 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -132,9 +132,6 @@
 		return rewriteValueRISCV64_OpConstBool(v)
 	case OpConstNil:
 		return rewriteValueRISCV64_OpConstNil(v)
-	case OpConvert:
-		v.Op = OpRISCV64MOVconvert
-		return true
 	case OpCopysign:
 		v.Op = OpRISCV64FSGNJD
 		return true
@@ -432,6 +429,9 @@
 		return true
 	case OpPanicBounds:
 		return rewriteValueRISCV64_OpPanicBounds(v)
+	case OpPubBarrier:
+		v.Op = OpRISCV64LoweredPubBarrier
+		return true
 	case OpRISCV64ADD:
 		return rewriteValueRISCV64_OpRISCV64ADD(v)
 	case OpRISCV64ADDI:
@@ -440,14 +440,30 @@
 		return rewriteValueRISCV64_OpRISCV64AND(v)
 	case OpRISCV64ANDI:
 		return rewriteValueRISCV64_OpRISCV64ANDI(v)
+	case OpRISCV64FADDD:
+		return rewriteValueRISCV64_OpRISCV64FADDD(v)
+	case OpRISCV64FADDS:
+		return rewriteValueRISCV64_OpRISCV64FADDS(v)
 	case OpRISCV64FMADDD:
 		return rewriteValueRISCV64_OpRISCV64FMADDD(v)
+	case OpRISCV64FMADDS:
+		return rewriteValueRISCV64_OpRISCV64FMADDS(v)
 	case OpRISCV64FMSUBD:
 		return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
+	case OpRISCV64FMSUBS:
+		return rewriteValueRISCV64_OpRISCV64FMSUBS(v)
 	case OpRISCV64FNMADDD:
 		return rewriteValueRISCV64_OpRISCV64FNMADDD(v)
+	case OpRISCV64FNMADDS:
+		return rewriteValueRISCV64_OpRISCV64FNMADDS(v)
 	case OpRISCV64FNMSUBD:
 		return rewriteValueRISCV64_OpRISCV64FNMSUBD(v)
+	case OpRISCV64FNMSUBS:
+		return rewriteValueRISCV64_OpRISCV64FNMSUBS(v)
+	case OpRISCV64FSUBD:
+		return rewriteValueRISCV64_OpRISCV64FSUBD(v)
+	case OpRISCV64FSUBS:
+		return rewriteValueRISCV64_OpRISCV64FSUBS(v)
 	case OpRISCV64MOVBUload:
 		return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
 	case OpRISCV64MOVBUreg:
@@ -522,10 +538,14 @@
 		return rewriteValueRISCV64_OpRISCV64SRA(v)
 	case OpRISCV64SRAI:
 		return rewriteValueRISCV64_OpRISCV64SRAI(v)
+	case OpRISCV64SRAW:
+		return rewriteValueRISCV64_OpRISCV64SRAW(v)
 	case OpRISCV64SRL:
 		return rewriteValueRISCV64_OpRISCV64SRL(v)
 	case OpRISCV64SRLI:
 		return rewriteValueRISCV64_OpRISCV64SRLI(v)
+	case OpRISCV64SRLW:
+		return rewriteValueRISCV64_OpRISCV64SRLW(v)
 	case OpRISCV64SUB:
 		return rewriteValueRISCV64_OpRISCV64SUB(v)
 	case OpRISCV64SUBW:
@@ -541,10 +561,10 @@
 	case OpRotateLeft8:
 		return rewriteValueRISCV64_OpRotateLeft8(v)
 	case OpRound32F:
-		v.Op = OpCopy
+		v.Op = OpRISCV64LoweredRound32F
 		return true
 	case OpRound64F:
-		v.Op = OpCopy
+		v.Op = OpRISCV64LoweredRound64F
 		return true
 	case OpRsh16Ux16:
 		return rewriteValueRISCV64_OpRsh16Ux16(v)
@@ -1081,20 +1101,50 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Eq32 x y)
+	// cond: x.Type.IsSigned()
+	// result: (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			y := v_1
+			if !(x.Type.IsSigned()) {
+				continue
+			}
+			v.reset(OpRISCV64SEQZ)
+			v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+			v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+			v1.AddArg(x)
+			v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+			v2.AddArg(y)
+			v0.AddArg2(v1, v2)
+			v.AddArg(v0)
+			return true
+		}
+		break
+	}
+	// match: (Eq32 x y)
+	// cond: !x.Type.IsSigned()
 	// result: (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
 	for {
-		x := v_0
-		y := v_1
-		v.reset(OpRISCV64SEQZ)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
-		v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v2.AddArg(y)
-		v0.AddArg2(v1, v2)
-		v.AddArg(v0)
-		return true
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			x := v_0
+			y := v_1
+			if !(!x.Type.IsSigned()) {
+				continue
+			}
+			v.reset(OpRISCV64SEQZ)
+			v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+			v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+			v1.AddArg(x)
+			v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+			v2.AddArg(y)
+			v0.AddArg2(v1, v2)
+			v.AddArg(v0)
+			return true
+		}
+		break
 	}
+	return false
 }
 func rewriteValueRISCV64_OpEq64(v *Value) bool {
 	v_1 := v.Args[1]
@@ -2942,17 +2992,13 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Neq16 x y)
-	// result: (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+	// result: (Not (Eq16 x y))
 	for {
 		x := v_0
 		y := v_1
-		v.reset(OpRISCV64SNEZ)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
-		v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
-		v2.AddArg(y)
-		v0.AddArg2(v1, v2)
+		v.reset(OpNot)
+		v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool)
+		v0.AddArg2(x, y)
 		v.AddArg(v0)
 		return true
 	}
@@ -2963,17 +3009,13 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Neq32 x y)
-	// result: (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+	// result: (Not (Eq32 x y))
 	for {
 		x := v_0
 		y := v_1
-		v.reset(OpRISCV64SNEZ)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
-		v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v2.AddArg(y)
-		v0.AddArg2(v1, v2)
+		v.reset(OpNot)
+		v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+		v0.AddArg2(x, y)
 		v.AddArg(v0)
 		return true
 	}
@@ -2982,13 +3024,14 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	b := v.Block
+	typ := &b.Func.Config.Types
 	// match: (Neq64 x y)
-	// result: (SNEZ (SUB <x.Type> x y))
+	// result: (Not (Eq64 x y))
 	for {
 		x := v_0
 		y := v_1
-		v.reset(OpRISCV64SNEZ)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+		v.reset(OpNot)
+		v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
 		v0.AddArg2(x, y)
 		v.AddArg(v0)
 		return true
@@ -3000,17 +3043,13 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (Neq8 x y)
-	// result: (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+	// result: (Not (Eq8 x y))
 	for {
 		x := v_0
 		y := v_1
-		v.reset(OpRISCV64SNEZ)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
-		v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
-		v2.AddArg(y)
-		v0.AddArg2(v1, v2)
+		v.reset(OpNot)
+		v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+		v0.AddArg2(x, y)
 		v.AddArg(v0)
 		return true
 	}
@@ -3038,12 +3077,12 @@
 	b := v.Block
 	typ := &b.Func.Config.Types
 	// match: (NeqPtr x y)
-	// result: (SNEZ (SUB <typ.Uintptr> x y))
+	// result: (Not (EqPtr x y))
 	for {
 		x := v_0
 		y := v_1
-		v.reset(OpRISCV64SNEZ)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
+		v.reset(OpNot)
+		v0 := b.NewValue0(v.Pos, OpEqPtr, typ.Bool)
 		v0.AddArg2(x, y)
 		v.AddArg(v0)
 		return true
@@ -3316,6 +3355,56 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64FADDD(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FADDD a (FMULD x y))
+	// cond: a.Block.Func.useFMA(v)
+	// result: (FMADDD x y a)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			a := v_0
+			if v_1.Op != OpRISCV64FMULD {
+				continue
+			}
+			y := v_1.Args[1]
+			x := v_1.Args[0]
+			if !(a.Block.Func.useFMA(v)) {
+				continue
+			}
+			v.reset(OpRISCV64FMADDD)
+			v.AddArg3(x, y, a)
+			return true
+		}
+		break
+	}
+	return false
+}
+func rewriteValueRISCV64_OpRISCV64FADDS(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FADDS a (FMULS x y))
+	// cond: a.Block.Func.useFMA(v)
+	// result: (FMADDS x y a)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			a := v_0
+			if v_1.Op != OpRISCV64FMULS {
+				continue
+			}
+			y := v_1.Args[1]
+			x := v_1.Args[0]
+			if !(a.Block.Func.useFMA(v)) {
+				continue
+			}
+			v.reset(OpRISCV64FMADDS)
+			v.AddArg3(x, y, a)
+			return true
+		}
+		break
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
@@ -3361,6 +3450,51 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64FMADDS(v *Value) bool {
+	v_2 := v.Args[2]
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FMADDS neg:(FNEGS x) y z)
+	// cond: neg.Uses == 1
+	// result: (FNMSUBS x y z)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			neg := v_0
+			if neg.Op != OpRISCV64FNEGS {
+				continue
+			}
+			x := neg.Args[0]
+			y := v_1
+			z := v_2
+			if !(neg.Uses == 1) {
+				continue
+			}
+			v.reset(OpRISCV64FNMSUBS)
+			v.AddArg3(x, y, z)
+			return true
+		}
+		break
+	}
+	// match: (FMADDS x y neg:(FNEGS z))
+	// cond: neg.Uses == 1
+	// result: (FMSUBS x y z)
+	for {
+		x := v_0
+		y := v_1
+		neg := v_2
+		if neg.Op != OpRISCV64FNEGS {
+			break
+		}
+		z := neg.Args[0]
+		if !(neg.Uses == 1) {
+			break
+		}
+		v.reset(OpRISCV64FMSUBS)
+		v.AddArg3(x, y, z)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
@@ -3406,6 +3540,51 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64FMSUBS(v *Value) bool {
+	v_2 := v.Args[2]
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FMSUBS neg:(FNEGS x) y z)
+	// cond: neg.Uses == 1
+	// result: (FNMADDS x y z)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			neg := v_0
+			if neg.Op != OpRISCV64FNEGS {
+				continue
+			}
+			x := neg.Args[0]
+			y := v_1
+			z := v_2
+			if !(neg.Uses == 1) {
+				continue
+			}
+			v.reset(OpRISCV64FNMADDS)
+			v.AddArg3(x, y, z)
+			return true
+		}
+		break
+	}
+	// match: (FMSUBS x y neg:(FNEGS z))
+	// cond: neg.Uses == 1
+	// result: (FMADDS x y z)
+	for {
+		x := v_0
+		y := v_1
+		neg := v_2
+		if neg.Op != OpRISCV64FNEGS {
+			break
+		}
+		z := neg.Args[0]
+		if !(neg.Uses == 1) {
+			break
+		}
+		v.reset(OpRISCV64FMADDS)
+		v.AddArg3(x, y, z)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
@@ -3451,6 +3630,51 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64FNMADDS(v *Value) bool {
+	v_2 := v.Args[2]
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FNMADDS neg:(FNEGS x) y z)
+	// cond: neg.Uses == 1
+	// result: (FMSUBS x y z)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			neg := v_0
+			if neg.Op != OpRISCV64FNEGS {
+				continue
+			}
+			x := neg.Args[0]
+			y := v_1
+			z := v_2
+			if !(neg.Uses == 1) {
+				continue
+			}
+			v.reset(OpRISCV64FMSUBS)
+			v.AddArg3(x, y, z)
+			return true
+		}
+		break
+	}
+	// match: (FNMADDS x y neg:(FNEGS z))
+	// cond: neg.Uses == 1
+	// result: (FNMSUBS x y z)
+	for {
+		x := v_0
+		y := v_1
+		neg := v_2
+		if neg.Op != OpRISCV64FNEGS {
+			break
+		}
+		z := neg.Args[0]
+		if !(neg.Uses == 1) {
+			break
+		}
+		v.reset(OpRISCV64FNMSUBS)
+		v.AddArg3(x, y, z)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64FNMSUBD(v *Value) bool {
 	v_2 := v.Args[2]
 	v_1 := v.Args[1]
@@ -3496,6 +3720,129 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64FNMSUBS(v *Value) bool {
+	v_2 := v.Args[2]
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FNMSUBS neg:(FNEGS x) y z)
+	// cond: neg.Uses == 1
+	// result: (FMADDS x y z)
+	for {
+		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+			neg := v_0
+			if neg.Op != OpRISCV64FNEGS {
+				continue
+			}
+			x := neg.Args[0]
+			y := v_1
+			z := v_2
+			if !(neg.Uses == 1) {
+				continue
+			}
+			v.reset(OpRISCV64FMADDS)
+			v.AddArg3(x, y, z)
+			return true
+		}
+		break
+	}
+	// match: (FNMSUBS x y neg:(FNEGS z))
+	// cond: neg.Uses == 1
+	// result: (FNMADDS x y z)
+	for {
+		x := v_0
+		y := v_1
+		neg := v_2
+		if neg.Op != OpRISCV64FNEGS {
+			break
+		}
+		z := neg.Args[0]
+		if !(neg.Uses == 1) {
+			break
+		}
+		v.reset(OpRISCV64FNMADDS)
+		v.AddArg3(x, y, z)
+		return true
+	}
+	return false
+}
+func rewriteValueRISCV64_OpRISCV64FSUBD(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FSUBD a (FMULD x y))
+	// cond: a.Block.Func.useFMA(v)
+	// result: (FNMSUBD x y a)
+	for {
+		a := v_0
+		if v_1.Op != OpRISCV64FMULD {
+			break
+		}
+		y := v_1.Args[1]
+		x := v_1.Args[0]
+		if !(a.Block.Func.useFMA(v)) {
+			break
+		}
+		v.reset(OpRISCV64FNMSUBD)
+		v.AddArg3(x, y, a)
+		return true
+	}
+	// match: (FSUBD (FMULD x y) a)
+	// cond: a.Block.Func.useFMA(v)
+	// result: (FMSUBD x y a)
+	for {
+		if v_0.Op != OpRISCV64FMULD {
+			break
+		}
+		y := v_0.Args[1]
+		x := v_0.Args[0]
+		a := v_1
+		if !(a.Block.Func.useFMA(v)) {
+			break
+		}
+		v.reset(OpRISCV64FMSUBD)
+		v.AddArg3(x, y, a)
+		return true
+	}
+	return false
+}
+func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (FSUBS a (FMULS x y))
+	// cond: a.Block.Func.useFMA(v)
+	// result: (FNMSUBS x y a)
+	for {
+		a := v_0
+		if v_1.Op != OpRISCV64FMULS {
+			break
+		}
+		y := v_1.Args[1]
+		x := v_1.Args[0]
+		if !(a.Block.Func.useFMA(v)) {
+			break
+		}
+		v.reset(OpRISCV64FNMSUBS)
+		v.AddArg3(x, y, a)
+		return true
+	}
+	// match: (FSUBS (FMULS x y) a)
+	// cond: a.Block.Func.useFMA(v)
+	// result: (FMSUBS x y a)
+	for {
+		if v_0.Op != OpRISCV64FMULS {
+			break
+		}
+		y := v_0.Args[1]
+		x := v_0.Args[0]
+		a := v_1
+		if !(a.Block.Func.useFMA(v)) {
+			break
+		}
+		v.reset(OpRISCV64FMSUBS)
+		v.AddArg3(x, y, a)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
@@ -5930,6 +6277,85 @@
 }
 func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	// match: (SRAI <t> [x] (MOVWreg y))
+	// cond: x >= 0 && x <= 31
+	// result: (SRAIW <t> [int64(x)] y)
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVWreg {
+			break
+		}
+		y := v_0.Args[0]
+		if !(x >= 0 && x <= 31) {
+			break
+		}
+		v.reset(OpRISCV64SRAIW)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(int64(x))
+		v.AddArg(y)
+		return true
+	}
+	// match: (SRAI <t> [x] (MOVBreg y))
+	// cond: x >= 8
+	// result: (SRAI [63] (SLLI <t> [56] y))
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVBreg {
+			break
+		}
+		y := v_0.Args[0]
+		if !(x >= 8) {
+			break
+		}
+		v.reset(OpRISCV64SRAI)
+		v.AuxInt = int64ToAuxInt(63)
+		v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+		v0.AuxInt = int64ToAuxInt(56)
+		v0.AddArg(y)
+		v.AddArg(v0)
+		return true
+	}
+	// match: (SRAI <t> [x] (MOVHreg y))
+	// cond: x >= 16
+	// result: (SRAI [63] (SLLI <t> [48] y))
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVHreg {
+			break
+		}
+		y := v_0.Args[0]
+		if !(x >= 16) {
+			break
+		}
+		v.reset(OpRISCV64SRAI)
+		v.AuxInt = int64ToAuxInt(63)
+		v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+		v0.AuxInt = int64ToAuxInt(48)
+		v0.AddArg(y)
+		v.AddArg(v0)
+		return true
+	}
+	// match: (SRAI <t> [x] (MOVWreg y))
+	// cond: x >= 32
+	// result: (SRAIW [31] y)
+	for {
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVWreg {
+			break
+		}
+		y := v_0.Args[0]
+		if !(x >= 32) {
+			break
+		}
+		v.reset(OpRISCV64SRAIW)
+		v.AuxInt = int64ToAuxInt(31)
+		v.AddArg(y)
+		return true
+	}
 	// match: (SRAI [x] (MOVDconst [y]))
 	// result: (MOVDconst [int64(y) >> uint32(x)])
 	for {
@@ -5944,6 +6370,24 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (SRAW x (MOVDconst [val]))
+	// result: (SRAIW [int64(val&31)] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVDconst {
+			break
+		}
+		val := auxIntToInt64(v_1.AuxInt)
+		v.reset(OpRISCV64SRAIW)
+		v.AuxInt = int64ToAuxInt(int64(val & 31))
+		v.AddArg(x)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
@@ -5964,6 +6408,76 @@
 }
 func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
 	v_0 := v.Args[0]
+	// match: (SRLI <t> [x] (MOVWUreg y))
+	// cond: x >= 0 && x <= 31
+	// result: (SRLIW <t> [int64(x)] y)
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVWUreg {
+			break
+		}
+		y := v_0.Args[0]
+		if !(x >= 0 && x <= 31) {
+			break
+		}
+		v.reset(OpRISCV64SRLIW)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(int64(x))
+		v.AddArg(y)
+		return true
+	}
+	// match: (SRLI <t> [x] (MOVBUreg y))
+	// cond: x >= 8
+	// result: (MOVDconst <t> [0])
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVBUreg {
+			break
+		}
+		if !(x >= 8) {
+			break
+		}
+		v.reset(OpRISCV64MOVDconst)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(0)
+		return true
+	}
+	// match: (SRLI <t> [x] (MOVHUreg y))
+	// cond: x >= 16
+	// result: (MOVDconst <t> [0])
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVHUreg {
+			break
+		}
+		if !(x >= 16) {
+			break
+		}
+		v.reset(OpRISCV64MOVDconst)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(0)
+		return true
+	}
+	// match: (SRLI <t> [x] (MOVWUreg y))
+	// cond: x >= 32
+	// result: (MOVDconst <t> [0])
+	for {
+		t := v.Type
+		x := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpRISCV64MOVWUreg {
+			break
+		}
+		if !(x >= 32) {
+			break
+		}
+		v.reset(OpRISCV64MOVDconst)
+		v.Type = t
+		v.AuxInt = int64ToAuxInt(0)
+		return true
+	}
 	// match: (SRLI [x] (MOVDconst [y]))
 	// result: (MOVDconst [int64(uint64(y) >> uint32(x))])
 	for {
@@ -5978,6 +6492,24 @@
 	}
 	return false
 }
+func rewriteValueRISCV64_OpRISCV64SRLW(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (SRLW x (MOVDconst [val]))
+	// result: (SRLIW [int64(val&31)] x)
+	for {
+		x := v_0
+		if v_1.Op != OpRISCV64MOVDconst {
+			break
+		}
+		val := auxIntToInt64(v_1.AuxInt)
+		v.reset(OpRISCV64SRLIW)
+		v.AuxInt = int64ToAuxInt(int64(val & 31))
+		v.AddArg(x)
+		return true
+	}
+	return false
+}
 func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
@@ -6594,7 +7126,7 @@
 	typ := &b.Func.Config.Types
 	// match: (Rsh32Ux16 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+	// result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6603,33 +7135,29 @@
 			break
 		}
 		v.reset(OpRISCV64AND)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
-		v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v1.AddArg(x)
-		v0.AddArg2(v1, y)
-		v2 := b.NewValue0(v.Pos, OpNeg32, t)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
-		v3.AuxInt = int64ToAuxInt(64)
-		v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
-		v4.AddArg(y)
-		v3.AddArg(v4)
+		v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+		v0.AddArg2(x, y)
+		v1 := b.NewValue0(v.Pos, OpNeg32, t)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+		v2.AuxInt = int64ToAuxInt(32)
+		v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+		v3.AddArg(y)
 		v2.AddArg(v3)
-		v.AddArg2(v0, v2)
+		v1.AddArg(v2)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Rsh32Ux16 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRL (ZeroExt32to64 x) y)
+	// result: (SRLW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRL)
-		v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRLW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6641,7 +7169,7 @@
 	typ := &b.Func.Config.Types
 	// match: (Rsh32Ux32 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+	// result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6650,33 +7178,29 @@
 			break
 		}
 		v.reset(OpRISCV64AND)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
-		v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v1.AddArg(x)
-		v0.AddArg2(v1, y)
-		v2 := b.NewValue0(v.Pos, OpNeg32, t)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
-		v3.AuxInt = int64ToAuxInt(64)
-		v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v4.AddArg(y)
-		v3.AddArg(v4)
+		v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+		v0.AddArg2(x, y)
+		v1 := b.NewValue0(v.Pos, OpNeg32, t)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+		v2.AuxInt = int64ToAuxInt(32)
+		v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+		v3.AddArg(y)
 		v2.AddArg(v3)
-		v.AddArg2(v0, v2)
+		v1.AddArg(v2)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Rsh32Ux32 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRL (ZeroExt32to64 x) y)
+	// result: (SRLW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRL)
-		v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRLW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6685,10 +7209,9 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	b := v.Block
-	typ := &b.Func.Config.Types
 	// match: (Rsh32Ux64 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+	// result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
 	for {
 		t := v.Type
 		x := v_0
@@ -6697,31 +7220,27 @@
 			break
 		}
 		v.reset(OpRISCV64AND)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
-		v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v1.AddArg(x)
-		v0.AddArg2(v1, y)
-		v2 := b.NewValue0(v.Pos, OpNeg32, t)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
-		v3.AuxInt = int64ToAuxInt(64)
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg2(v0, v2)
+		v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+		v0.AddArg2(x, y)
+		v1 := b.NewValue0(v.Pos, OpNeg32, t)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+		v2.AuxInt = int64ToAuxInt(32)
+		v2.AddArg(y)
+		v1.AddArg(v2)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Rsh32Ux64 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRL (ZeroExt32to64 x) y)
+	// result: (SRLW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRL)
-		v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRLW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6733,7 +7252,7 @@
 	typ := &b.Func.Config.Types
 	// match: (Rsh32Ux8 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+	// result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6742,33 +7261,29 @@
 			break
 		}
 		v.reset(OpRISCV64AND)
-		v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
-		v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v1.AddArg(x)
-		v0.AddArg2(v1, y)
-		v2 := b.NewValue0(v.Pos, OpNeg32, t)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
-		v3.AuxInt = int64ToAuxInt(64)
-		v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
-		v4.AddArg(y)
-		v3.AddArg(v4)
+		v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+		v0.AddArg2(x, y)
+		v1 := b.NewValue0(v.Pos, OpNeg32, t)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+		v2.AuxInt = int64ToAuxInt(32)
+		v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+		v3.AddArg(y)
 		v2.AddArg(v3)
-		v.AddArg2(v0, v2)
+		v1.AddArg(v2)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Rsh32Ux8 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRL (ZeroExt32to64 x) y)
+	// result: (SRLW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRL)
-		v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRLW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6780,7 +7295,7 @@
 	typ := &b.Func.Config.Types
 	// match: (Rsh32x16 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+	// result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6788,36 +7303,32 @@
 		if !(!shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
+		v.reset(OpRISCV64SRAW)
 		v.Type = t
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
-		v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
-		v2.AuxInt = int64ToAuxInt(-1)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
-		v3.AuxInt = int64ToAuxInt(64)
-		v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
-		v4.AddArg(y)
-		v3.AddArg(v4)
+		v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+		v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+		v1.AuxInt = int64ToAuxInt(-1)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+		v2.AuxInt = int64ToAuxInt(32)
+		v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+		v3.AddArg(y)
 		v2.AddArg(v3)
-		v1.AddArg2(y, v2)
-		v.AddArg2(v0, v1)
+		v1.AddArg(v2)
+		v0.AddArg2(y, v1)
+		v.AddArg2(x, v0)
 		return true
 	}
 	// match: (Rsh32x16 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRA (SignExt32to64 x) y)
+	// result: (SRAW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRAW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6829,7 +7340,7 @@
 	typ := &b.Func.Config.Types
 	// match: (Rsh32x32 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+	// result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6837,36 +7348,32 @@
 		if !(!shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
+		v.reset(OpRISCV64SRAW)
 		v.Type = t
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
-		v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
-		v2.AuxInt = int64ToAuxInt(-1)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
-		v3.AuxInt = int64ToAuxInt(64)
-		v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
-		v4.AddArg(y)
-		v3.AddArg(v4)
+		v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+		v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+		v1.AuxInt = int64ToAuxInt(-1)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+		v2.AuxInt = int64ToAuxInt(32)
+		v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+		v3.AddArg(y)
 		v2.AddArg(v3)
-		v1.AddArg2(y, v2)
-		v.AddArg2(v0, v1)
+		v1.AddArg(v2)
+		v0.AddArg2(y, v1)
+		v.AddArg2(x, v0)
 		return true
 	}
 	// match: (Rsh32x32 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRA (SignExt32to64 x) y)
+	// result: (SRAW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRAW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6875,10 +7382,9 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	b := v.Block
-	typ := &b.Func.Config.Types
 	// match: (Rsh32x64 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+	// result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6886,34 +7392,30 @@
 		if !(!shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
+		v.reset(OpRISCV64SRAW)
 		v.Type = t
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
-		v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
-		v2.AuxInt = int64ToAuxInt(-1)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
-		v3.AuxInt = int64ToAuxInt(64)
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg2(y, v2)
-		v.AddArg2(v0, v1)
+		v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+		v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+		v1.AuxInt = int64ToAuxInt(-1)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+		v2.AuxInt = int64ToAuxInt(32)
+		v2.AddArg(y)
+		v1.AddArg(v2)
+		v0.AddArg2(y, v1)
+		v.AddArg2(x, v0)
 		return true
 	}
 	// match: (Rsh32x64 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRA (SignExt32to64 x) y)
+	// result: (SRAW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRAW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
@@ -6925,7 +7427,7 @@
 	typ := &b.Func.Config.Types
 	// match: (Rsh32x8 <t> x y)
 	// cond: !shiftIsBounded(v)
-	// result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+	// result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
 	for {
 		t := v.Type
 		x := v_0
@@ -6933,36 +7435,32 @@
 		if !(!shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
+		v.reset(OpRISCV64SRAW)
 		v.Type = t
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
-		v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
-		v2.AuxInt = int64ToAuxInt(-1)
-		v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
-		v3.AuxInt = int64ToAuxInt(64)
-		v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
-		v4.AddArg(y)
-		v3.AddArg(v4)
+		v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+		v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+		v1.AuxInt = int64ToAuxInt(-1)
+		v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+		v2.AuxInt = int64ToAuxInt(32)
+		v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+		v3.AddArg(y)
 		v2.AddArg(v3)
-		v1.AddArg2(y, v2)
-		v.AddArg2(v0, v1)
+		v1.AddArg(v2)
+		v0.AddArg2(y, v1)
+		v.AddArg2(x, v0)
 		return true
 	}
 	// match: (Rsh32x8 x y)
 	// cond: shiftIsBounded(v)
-	// result: (SRA (SignExt32to64 x) y)
+	// result: (SRAW x y)
 	for {
 		x := v_0
 		y := v_1
 		if !(shiftIsBounded(v)) {
 			break
 		}
-		v.reset(OpRISCV64SRA)
-		v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
-		v0.AddArg(x)
-		v.AddArg2(v0, y)
+		v.reset(OpRISCV64SRAW)
+		v.AddArg2(x, y)
 		return true
 	}
 	return false
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index a3d6218..c2342c9 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -9060,7 +9060,7 @@
 		return true
 	}
 	// match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
-	// cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+	// cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
 	// result: (STMG2 [i-8] {s} p w0 w1 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -9076,7 +9076,7 @@
 			break
 		}
 		w0 := x.Args[1]
-		if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+		if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTMG2)
@@ -9086,7 +9086,7 @@
 		return true
 	}
 	// match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
-	// cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+	// cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)
 	// result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -9103,7 +9103,7 @@
 		}
 		w0 := x.Args[1]
 		w1 := x.Args[2]
-		if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+		if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTMG3)
@@ -9113,7 +9113,7 @@
 		return true
 	}
 	// match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
-	// cond: x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)
+	// cond: x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)
 	// result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -9131,7 +9131,7 @@
 		w0 := x.Args[1]
 		w1 := x.Args[2]
 		w2 := x.Args[3]
-		if !(x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)) {
+		if !(x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTMG4)
@@ -10595,7 +10595,7 @@
 		return true
 	}
 	// match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
-	// cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)
+	// cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)
 	// result: (STM2 [i-4] {s} p w0 w1 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -10611,7 +10611,7 @@
 			break
 		}
 		w0 := x.Args[1]
-		if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)) {
+		if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTM2)
@@ -10621,7 +10621,7 @@
 		return true
 	}
 	// match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
-	// cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+	// cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
 	// result: (STM3 [i-8] {s} p w0 w1 w2 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -10638,7 +10638,7 @@
 		}
 		w0 := x.Args[1]
 		w1 := x.Args[2]
-		if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+		if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTM3)
@@ -10648,7 +10648,7 @@
 		return true
 	}
 	// match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
-	// cond: x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)
+	// cond: x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)
 	// result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -10666,7 +10666,7 @@
 		w0 := x.Args[1]
 		w1 := x.Args[2]
 		w2 := x.Args[3]
-		if !(x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)) {
+		if !(x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTM4)
@@ -13107,7 +13107,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
-	// cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+	// cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
 	// result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -13125,7 +13125,7 @@
 		}
 		w0 := x.Args[1]
 		w1 := x.Args[2]
-		if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+		if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTM4)
@@ -13162,7 +13162,7 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	// match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
-	// cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+	// cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)
 	// result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
 	for {
 		i := auxIntToInt32(v.AuxInt)
@@ -13180,7 +13180,7 @@
 		}
 		w0 := x.Args[1]
 		w1 := x.Args[2]
-		if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+		if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) {
 			break
 		}
 		v.reset(OpS390XSTMG4)
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
index 5c04708..3c481ad 100644
--- a/src/cmd/compile/internal/ssa/rewritedec.go
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -6,12 +6,18 @@
 
 func rewriteValuedec(v *Value) bool {
 	switch v.Op {
+	case OpArrayMake1:
+		return rewriteValuedec_OpArrayMake1(v)
+	case OpArraySelect:
+		return rewriteValuedec_OpArraySelect(v)
 	case OpComplexImag:
 		return rewriteValuedec_OpComplexImag(v)
 	case OpComplexReal:
 		return rewriteValuedec_OpComplexReal(v)
 	case OpIData:
 		return rewriteValuedec_OpIData(v)
+	case OpIMake:
+		return rewriteValuedec_OpIMake(v)
 	case OpITab:
 		return rewriteValuedec_OpITab(v)
 	case OpLoad:
@@ -30,11 +36,92 @@
 		return rewriteValuedec_OpStringLen(v)
 	case OpStringPtr:
 		return rewriteValuedec_OpStringPtr(v)
+	case OpStructMake1:
+		return rewriteValuedec_OpStructMake1(v)
+	case OpStructSelect:
+		return rewriteValuedec_OpStructSelect(v)
+	}
+	return false
+}
+func rewriteValuedec_OpArrayMake1(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (ArrayMake1 x)
+	// cond: x.Type.IsPtrShaped()
+	// result: x
+	for {
+		x := v_0
+		if !(x.Type.IsPtrShaped()) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	return false
+}
+func rewriteValuedec_OpArraySelect(v *Value) bool {
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (ArraySelect [0] x)
+	// cond: x.Type.IsPtrShaped()
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		x := v_0
+		if !(x.Type.IsPtrShaped()) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (ArraySelect (ArrayMake1 x))
+	// result: x
+	for {
+		if v_0.Op != OpArrayMake1 {
+			break
+		}
+		x := v_0.Args[0]
+		v.copyOf(x)
+		return true
+	}
+	// match: (ArraySelect [0] (IData x))
+	// result: (IData x)
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+			break
+		}
+		x := v_0.Args[0]
+		v.reset(OpIData)
+		v.AddArg(x)
+		return true
+	}
+	// match: (ArraySelect [i] x:(Load <t> ptr mem))
+	// result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.Elem().Size()*i] ptr) mem)
+	for {
+		i := auxIntToInt64(v.AuxInt)
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+		v1.AuxInt = int64ToAuxInt(t.Elem().Size() * i)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
 	}
 	return false
 }
 func rewriteValuedec_OpComplexImag(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	typ := &b.Func.Config.Types
 	// match: (ComplexImag (ComplexMake _ imag ))
 	// result: imag
 	for {
@@ -45,10 +132,58 @@
 		v.copyOf(imag)
 		return true
 	}
+	// match: (ComplexImag x:(Load <t> ptr mem))
+	// cond: t.IsComplex() && t.Size() == 8
+	// result: @x.Block (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsComplex() && t.Size() == 8) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+		v1.AuxInt = int64ToAuxInt(4)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
+	// match: (ComplexImag x:(Load <t> ptr mem))
+	// cond: t.IsComplex() && t.Size() == 16
+	// result: @x.Block (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsComplex() && t.Size() == 16) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+		v1.AuxInt = int64ToAuxInt(8)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpComplexReal(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	typ := &b.Func.Config.Types
 	// match: (ComplexReal (ComplexMake real _ ))
 	// result: real
 	for {
@@ -59,10 +194,53 @@
 		v.copyOf(real)
 		return true
 	}
+	// match: (ComplexReal x:(Load <t> ptr mem))
+	// cond: t.IsComplex() && t.Size() == 8
+	// result: @x.Block (Load <typ.Float32> ptr mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsComplex() && t.Size() == 8) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+		v.copyOf(v0)
+		v0.AddArg2(ptr, mem)
+		return true
+	}
+	// match: (ComplexReal x:(Load <t> ptr mem))
+	// cond: t.IsComplex() && t.Size() == 16
+	// result: @x.Block (Load <typ.Float64> ptr mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsComplex() && t.Size() == 16) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+		v.copyOf(v0)
+		v0.AddArg2(ptr, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpIData(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
+	typ := &b.Func.Config.Types
 	// match: (IData (IMake _ data))
 	// result: data
 	for {
@@ -73,10 +251,52 @@
 		v.copyOf(data)
 		return true
 	}
+	// match: (IData x:(Load <t> ptr mem))
+	// cond: t.IsInterface()
+	// result: @x.Block (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsInterface()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+		v1.AuxInt = int64ToAuxInt(config.PtrSize)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
+	return false
+}
+func rewriteValuedec_OpIMake(v *Value) bool {
+	v_1 := v.Args[1]
+	v_0 := v.Args[0]
+	// match: (IMake _typ (StructMake1 val))
+	// result: (IMake _typ val)
+	for {
+		_typ := v_0
+		if v_1.Op != OpStructMake1 {
+			break
+		}
+		val := v_1.Args[0]
+		v.reset(OpIMake)
+		v.AddArg2(_typ, val)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpITab(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	typ := &b.Func.Config.Types
 	// match: (ITab (IMake itab _))
 	// result: itab
 	for {
@@ -87,6 +307,26 @@
 		v.copyOf(itab)
 		return true
 	}
+	// match: (ITab x:(Load <t> ptr mem))
+	// cond: t.IsInterface()
+	// result: @x.Block (Load <typ.Uintptr> ptr mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsInterface()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
+		v.copyOf(v0)
+		v0.AddArg2(ptr, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpLoad(v *Value) bool {
@@ -209,6 +449,9 @@
 }
 func rewriteValuedec_OpSliceCap(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
+	typ := &b.Func.Config.Types
 	// match: (SliceCap (SliceMake _ _ cap))
 	// result: cap
 	for {
@@ -219,10 +462,36 @@
 		v.copyOf(cap)
 		return true
 	}
+	// match: (SliceCap x:(Load <t> ptr mem))
+	// cond: t.IsSlice()
+	// result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsSlice()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+		v1.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpSliceLen(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
+	typ := &b.Func.Config.Types
 	// match: (SliceLen (SliceMake _ len _))
 	// result: len
 	for {
@@ -233,10 +502,34 @@
 		v.copyOf(len)
 		return true
 	}
+	// match: (SliceLen x:(Load <t> ptr mem))
+	// cond: t.IsSlice()
+	// result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsSlice()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+		v1.AuxInt = int64ToAuxInt(config.PtrSize)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpSlicePtr(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
 	// match: (SlicePtr (SliceMake ptr _ _ ))
 	// result: ptr
 	for {
@@ -247,6 +540,26 @@
 		v.copyOf(ptr)
 		return true
 	}
+	// match: (SlicePtr x:(Load <t> ptr mem))
+	// cond: t.IsSlice()
+	// result: @x.Block (Load <t.Elem().PtrTo()> ptr mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsSlice()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
+		v.copyOf(v0)
+		v0.AddArg2(ptr, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpSlicePtrUnchecked(v *Value) bool {
@@ -270,6 +583,18 @@
 	b := v.Block
 	config := b.Func.Config
 	typ := &b.Func.Config.Types
+	// match: (Store {t} _ _ mem)
+	// cond: t.Size() == 0
+	// result: mem
+	for {
+		t := auxToType(v.Aux)
+		mem := v_2
+		if !(t.Size() == 0) {
+			break
+		}
+		v.copyOf(mem)
+		return true
+	}
 	// match: (Store {t} dst (ComplexMake real imag) mem)
 	// cond: t.Size() == 8
 	// result: (Store {typ.Float32} (OffPtr <typ.Float32Ptr> [4] dst) imag (Store {typ.Float32} dst real mem))
@@ -393,10 +718,141 @@
 		v.AddArg3(v0, data, v1)
 		return true
 	}
+	// match: (Store dst (StructMake1 <t> f0) mem)
+	// result: (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+	for {
+		dst := v_0
+		if v_1.Op != OpStructMake1 {
+			break
+		}
+		t := v_1.Type
+		f0 := v_1.Args[0]
+		mem := v_2
+		v.reset(OpStore)
+		v.Aux = typeToAux(t.FieldType(0))
+		v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+		v0.AuxInt = int64ToAuxInt(0)
+		v0.AddArg(dst)
+		v.AddArg3(v0, f0, mem)
+		return true
+	}
+	// match: (Store dst (StructMake2 <t> f0 f1) mem)
+	// result: (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))
+	for {
+		dst := v_0
+		if v_1.Op != OpStructMake2 {
+			break
+		}
+		t := v_1.Type
+		f1 := v_1.Args[1]
+		f0 := v_1.Args[0]
+		mem := v_2
+		v.reset(OpStore)
+		v.Aux = typeToAux(t.FieldType(1))
+		v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+		v0.AuxInt = int64ToAuxInt(t.FieldOff(1))
+		v0.AddArg(dst)
+		v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+		v1.Aux = typeToAux(t.FieldType(0))
+		v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+		v2.AuxInt = int64ToAuxInt(0)
+		v2.AddArg(dst)
+		v1.AddArg3(v2, f0, mem)
+		v.AddArg3(v0, f1, v1)
+		return true
+	}
+	// match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
+	// result: (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))
+	for {
+		dst := v_0
+		if v_1.Op != OpStructMake3 {
+			break
+		}
+		t := v_1.Type
+		f2 := v_1.Args[2]
+		f0 := v_1.Args[0]
+		f1 := v_1.Args[1]
+		mem := v_2
+		v.reset(OpStore)
+		v.Aux = typeToAux(t.FieldType(2))
+		v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+		v0.AuxInt = int64ToAuxInt(t.FieldOff(2))
+		v0.AddArg(dst)
+		v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+		v1.Aux = typeToAux(t.FieldType(1))
+		v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+		v2.AuxInt = int64ToAuxInt(t.FieldOff(1))
+		v2.AddArg(dst)
+		v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+		v3.Aux = typeToAux(t.FieldType(0))
+		v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+		v4.AuxInt = int64ToAuxInt(0)
+		v4.AddArg(dst)
+		v3.AddArg3(v4, f0, mem)
+		v1.AddArg3(v2, f1, v3)
+		v.AddArg3(v0, f2, v1)
+		return true
+	}
+	// match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
+	// result: (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))))
+	for {
+		dst := v_0
+		if v_1.Op != OpStructMake4 {
+			break
+		}
+		t := v_1.Type
+		f3 := v_1.Args[3]
+		f0 := v_1.Args[0]
+		f1 := v_1.Args[1]
+		f2 := v_1.Args[2]
+		mem := v_2
+		v.reset(OpStore)
+		v.Aux = typeToAux(t.FieldType(3))
+		v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+		v0.AuxInt = int64ToAuxInt(t.FieldOff(3))
+		v0.AddArg(dst)
+		v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+		v1.Aux = typeToAux(t.FieldType(2))
+		v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+		v2.AuxInt = int64ToAuxInt(t.FieldOff(2))
+		v2.AddArg(dst)
+		v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+		v3.Aux = typeToAux(t.FieldType(1))
+		v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+		v4.AuxInt = int64ToAuxInt(t.FieldOff(1))
+		v4.AddArg(dst)
+		v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+		v5.Aux = typeToAux(t.FieldType(0))
+		v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+		v6.AuxInt = int64ToAuxInt(0)
+		v6.AddArg(dst)
+		v5.AddArg3(v6, f0, mem)
+		v3.AddArg3(v4, f1, v5)
+		v1.AddArg3(v2, f2, v3)
+		v.AddArg3(v0, f3, v1)
+		return true
+	}
+	// match: (Store dst (ArrayMake1 e) mem)
+	// result: (Store {e.Type} dst e mem)
+	for {
+		dst := v_0
+		if v_1.Op != OpArrayMake1 {
+			break
+		}
+		e := v_1.Args[0]
+		mem := v_2
+		v.reset(OpStore)
+		v.Aux = typeToAux(e.Type)
+		v.AddArg3(dst, e, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpStringLen(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	config := b.Func.Config
+	typ := &b.Func.Config.Types
 	// match: (StringLen (StringMake _ len))
 	// result: len
 	for {
@@ -407,10 +863,35 @@
 		v.copyOf(len)
 		return true
 	}
+	// match: (StringLen x:(Load <t> ptr mem))
+	// cond: t.IsString()
+	// result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsString()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+		v1.AuxInt = int64ToAuxInt(config.PtrSize)
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
 	return false
 }
 func rewriteValuedec_OpStringPtr(v *Value) bool {
 	v_0 := v.Args[0]
+	b := v.Block
+	typ := &b.Func.Config.Types
 	// match: (StringPtr (StringMake ptr _))
 	// result: ptr
 	for {
@@ -421,6 +902,191 @@
 		v.copyOf(ptr)
 		return true
 	}
+	// match: (StringPtr x:(Load <t> ptr mem))
+	// cond: t.IsString()
+	// result: @x.Block (Load <typ.BytePtr> ptr mem)
+	for {
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		if !(t.IsString()) {
+			break
+		}
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+		v.copyOf(v0)
+		v0.AddArg2(ptr, mem)
+		return true
+	}
+	return false
+}
+func rewriteValuedec_OpStructMake1(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (StructMake1 x)
+	// cond: x.Type.IsPtrShaped()
+	// result: x
+	for {
+		x := v_0
+		if !(x.Type.IsPtrShaped()) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	return false
+}
+func rewriteValuedec_OpStructSelect(v *Value) bool {
+	v_0 := v.Args[0]
+	b := v.Block
+	// match: (StructSelect [0] (IData x))
+	// result: (IData x)
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+			break
+		}
+		x := v_0.Args[0]
+		v.reset(OpIData)
+		v.AddArg(x)
+		return true
+	}
+	// match: (StructSelect (StructMake1 x))
+	// result: x
+	for {
+		if v_0.Op != OpStructMake1 {
+			break
+		}
+		x := v_0.Args[0]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [0] (StructMake2 x _))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 {
+			break
+		}
+		x := v_0.Args[0]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [1] (StructMake2 _ x))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 {
+			break
+		}
+		x := v_0.Args[1]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [0] (StructMake3 x _ _))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 {
+			break
+		}
+		x := v_0.Args[0]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [1] (StructMake3 _ x _))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 {
+			break
+		}
+		x := v_0.Args[1]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [2] (StructMake3 _ _ x))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 {
+			break
+		}
+		x := v_0.Args[2]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [0] (StructMake4 x _ _ _))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 {
+			break
+		}
+		x := v_0.Args[0]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [1] (StructMake4 _ x _ _))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 {
+			break
+		}
+		x := v_0.Args[1]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [2] (StructMake4 _ _ x _))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 {
+			break
+		}
+		x := v_0.Args[2]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [3] (StructMake4 _ _ _ x))
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 {
+			break
+		}
+		x := v_0.Args[3]
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [0] x)
+	// cond: x.Type.IsPtrShaped()
+	// result: x
+	for {
+		if auxIntToInt64(v.AuxInt) != 0 {
+			break
+		}
+		x := v_0
+		if !(x.Type.IsPtrShaped()) {
+			break
+		}
+		v.copyOf(x)
+		return true
+	}
+	// match: (StructSelect [i] x:(Load <t> ptr mem))
+	// result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+	for {
+		i := auxIntToInt64(v.AuxInt)
+		x := v_0
+		if x.Op != OpLoad {
+			break
+		}
+		t := x.Type
+		mem := x.Args[1]
+		ptr := x.Args[0]
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+		v.copyOf(v0)
+		v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+		v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i)))
+		v1.AddArg(ptr)
+		v0.AddArg2(v1, mem)
+		return true
+	}
 	return false
 }
 func rewriteBlockdec(b *Block) bool {
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index e5bd8bc..a018ca0 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -397,6 +397,8 @@
 		return rewriteValuegeneric_OpSlicemask(v)
 	case OpSqrt:
 		return rewriteValuegeneric_OpSqrt(v)
+	case OpStaticCall:
+		return rewriteValuegeneric_OpStaticCall(v)
 	case OpStaticLECall:
 		return rewriteValuegeneric_OpStaticLECall(v)
 	case OpStore:
@@ -12585,7 +12587,6 @@
 	v_0 := v.Args[0]
 	b := v.Block
 	config := b.Func.Config
-	fe := b.Func.fe
 	// match: (Load <t1> p1 (Store {t2} p2 x _))
 	// cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()
 	// result: x
@@ -12797,7 +12798,7 @@
 		return true
 	}
 	// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _)))
-	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
+	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
 	// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
 	for {
 		t1 := v.Type
@@ -12819,7 +12820,7 @@
 		}
 		n := auxIntToInt64(mem.AuxInt)
 		p3 := mem.Args[0]
-		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
+		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
 			break
 		}
 		b = mem.Block
@@ -12832,7 +12833,7 @@
 		return true
 	}
 	// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _))))
-	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
 	// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
 	for {
 		t1 := v.Type
@@ -12861,7 +12862,7 @@
 		}
 		n := auxIntToInt64(mem.AuxInt)
 		p4 := mem.Args[0]
-		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
 			break
 		}
 		b = mem.Block
@@ -12874,7 +12875,7 @@
 		return true
 	}
 	// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _)))))
-	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
 	// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
 	for {
 		t1 := v.Type
@@ -12910,7 +12911,7 @@
 		}
 		n := auxIntToInt64(mem.AuxInt)
 		p5 := mem.Args[0]
-		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
 			break
 		}
 		b = mem.Block
@@ -12923,7 +12924,7 @@
 		return true
 	}
 	// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _))))))
-	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
+	// cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
 	// result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
 	for {
 		t1 := v.Type
@@ -12966,7 +12967,7 @@
 		}
 		n := auxIntToInt64(mem.AuxInt)
 		p6 := mem.Args[0]
-		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
+		if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
 			break
 		}
 		b = mem.Block
@@ -13133,24 +13134,24 @@
 		return true
 	}
 	// match: (Load <t> _ _)
-	// cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
+	// cond: t.IsStruct() && t.NumFields() == 0 && CanSSA(t)
 	// result: (StructMake0)
 	for {
 		t := v.Type
-		if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
+		if !(t.IsStruct() && t.NumFields() == 0 && CanSSA(t)) {
 			break
 		}
 		v.reset(OpStructMake0)
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
+	// cond: t.IsStruct() && t.NumFields() == 1 && CanSSA(t)
 	// result: (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
 	for {
 		t := v.Type
 		ptr := v_0
 		mem := v_1
-		if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
+		if !(t.IsStruct() && t.NumFields() == 1 && CanSSA(t)) {
 			break
 		}
 		v.reset(OpStructMake1)
@@ -13163,13 +13164,13 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
+	// cond: t.IsStruct() && t.NumFields() == 2 && CanSSA(t)
 	// result: (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
 	for {
 		t := v.Type
 		ptr := v_0
 		mem := v_1
-		if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
+		if !(t.IsStruct() && t.NumFields() == 2 && CanSSA(t)) {
 			break
 		}
 		v.reset(OpStructMake2)
@@ -13187,13 +13188,13 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
+	// cond: t.IsStruct() && t.NumFields() == 3 && CanSSA(t)
 	// result: (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
 	for {
 		t := v.Type
 		ptr := v_0
 		mem := v_1
-		if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
+		if !(t.IsStruct() && t.NumFields() == 3 && CanSSA(t)) {
 			break
 		}
 		v.reset(OpStructMake3)
@@ -13216,13 +13217,13 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
+	// cond: t.IsStruct() && t.NumFields() == 4 && CanSSA(t)
 	// result: (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
 	for {
 		t := v.Type
 		ptr := v_0
 		mem := v_1
-		if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
+		if !(t.IsStruct() && t.NumFields() == 4 && CanSSA(t)) {
 			break
 		}
 		v.reset(OpStructMake4)
@@ -13261,13 +13262,13 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
+	// cond: t.IsArray() && t.NumElem() == 1 && CanSSA(t)
 	// result: (ArrayMake1 (Load <t.Elem()> ptr mem))
 	for {
 		t := v.Type
 		ptr := v_0
 		mem := v_1
-		if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
+		if !(t.IsArray() && t.NumElem() == 1 && CanSSA(t)) {
 			break
 		}
 		v.reset(OpArrayMake1)
@@ -18967,79 +18968,84 @@
 	v_0 := v.Args[0]
 	b := v.Block
 	fe := b.Func.fe
-	// match: (NilCheck (GetG mem) mem)
-	// result: mem
+	// match: (NilCheck ptr:(GetG mem) mem)
+	// result: ptr
 	for {
-		if v_0.Op != OpGetG {
+		ptr := v_0
+		if ptr.Op != OpGetG {
 			break
 		}
-		mem := v_0.Args[0]
+		mem := ptr.Args[0]
 		if mem != v_1 {
 			break
 		}
-		v.copyOf(mem)
+		v.copyOf(ptr)
 		return true
 	}
-	// match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
+	// match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
 	// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
-	// result: (Invalid)
+	// result: ptr
 	for {
-		if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+		ptr := v_0
+		if ptr.Op != OpSelectN || auxIntToInt64(ptr.AuxInt) != 0 {
 			break
 		}
-		call := v_0.Args[0]
+		call := ptr.Args[0]
 		if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
 			break
 		}
-		v.reset(OpInvalid)
+		v.copyOf(ptr)
 		return true
 	}
-	// match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+	// match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
 	// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
-	// result: (Invalid)
+	// result: ptr
 	for {
-		if v_0.Op != OpOffPtr {
+		ptr := v_0
+		if ptr.Op != OpOffPtr {
 			break
 		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+		ptr_0 := ptr.Args[0]
+		if ptr_0.Op != OpSelectN || auxIntToInt64(ptr_0.AuxInt) != 0 {
 			break
 		}
-		call := v_0_0.Args[0]
+		call := ptr_0.Args[0]
 		if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
 			break
 		}
-		v.reset(OpInvalid)
+		v.copyOf(ptr)
 		return true
 	}
-	// match: (NilCheck (Addr {_} (SB)) _)
-	// result: (Invalid)
+	// match: (NilCheck ptr:(Addr {_} (SB)) _)
+	// result: ptr
 	for {
-		if v_0.Op != OpAddr {
+		ptr := v_0
+		if ptr.Op != OpAddr {
 			break
 		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpSB {
+		ptr_0 := ptr.Args[0]
+		if ptr_0.Op != OpSB {
 			break
 		}
-		v.reset(OpInvalid)
+		v.copyOf(ptr)
 		return true
 	}
-	// match: (NilCheck (Convert (Addr {_} (SB)) _) _)
-	// result: (Invalid)
+	// match: (NilCheck ptr:(Convert (Addr {_} (SB)) _) _)
+	// result: ptr
 	for {
-		if v_0.Op != OpConvert {
+		ptr := v_0
+		if ptr.Op != OpConvert {
 			break
 		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpAddr {
+		ptr_0 := ptr.Args[0]
+		if ptr_0.Op != OpAddr {
 			break
 		}
-		v_0_0_0 := v_0_0.Args[0]
-		if v_0_0_0.Op != OpSB {
+		ptr_0_0 := ptr_0.Args[0]
+		if ptr_0_0.Op != OpSB {
 			break
 		}
-		v.reset(OpInvalid)
+		v.copyOf(ptr)
 		return true
 	}
 	return false
@@ -28219,6 +28225,31 @@
 	}
 	return false
 }
+func rewriteValuegeneric_OpStaticCall(v *Value) bool {
+	b := v.Block
+	typ := &b.Func.Config.Types
+	// match: (StaticCall {callAux} p q _ mem)
+	// cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)
+	// result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+	for {
+		if len(v.Args) != 4 {
+			break
+		}
+		callAux := auxToCall(v.Aux)
+		mem := v.Args[3]
+		p := v.Args[0]
+		q := v.Args[1]
+		if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) {
+			break
+		}
+		v.reset(OpMakeResult)
+		v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+		v0.AuxInt = boolToAuxInt(true)
+		v.AddArg2(v0, mem)
+		return true
+	}
+	return false
+}
 func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
 	b := v.Block
 	config := b.Func.Config
@@ -28506,6 +28537,26 @@
 		v.AddArg2(v0, mem)
 		return true
 	}
+	// match: (StaticLECall {callAux} p q _ mem)
+	// cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)
+	// result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+	for {
+		if len(v.Args) != 4 {
+			break
+		}
+		callAux := auxToCall(v.Aux)
+		mem := v.Args[3]
+		p := v.Args[0]
+		q := v.Args[1]
+		if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) {
+			break
+		}
+		v.reset(OpMakeResult)
+		v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+		v0.AuxInt = boolToAuxInt(true)
+		v.AddArg2(v0, mem)
+		return true
+	}
 	// match: (StaticLECall {callAux} _ (Const64 [0]) (Const64 [0]) mem)
 	// cond: isSameCall(callAux, "runtime.makeslice")
 	// result: (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem)
@@ -28563,7 +28614,6 @@
 	v_1 := v.Args[1]
 	v_0 := v.Args[0]
 	b := v.Block
-	fe := b.Func.fe
 	// match: (Store {t1} p1 (Load <t2> p2 mem) mem)
 	// cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
 	// result: mem
@@ -28940,7 +28990,7 @@
 		return true
 	}
 	// match: (Store {t} dst (Load src mem) mem)
-	// cond: !fe.CanSSA(t)
+	// cond: !CanSSA(t)
 	// result: (Move {t} [t.Size()] dst src mem)
 	for {
 		t := auxToType(v.Aux)
@@ -28950,7 +29000,7 @@
 		}
 		mem := v_1.Args[1]
 		src := v_1.Args[0]
-		if mem != v_2 || !(!fe.CanSSA(t)) {
+		if mem != v_2 || !(!CanSSA(t)) {
 			break
 		}
 		v.reset(OpMove)
@@ -28960,7 +29010,7 @@
 		return true
 	}
 	// match: (Store {t} dst (Load src mem) (VarDef {x} mem))
-	// cond: !fe.CanSSA(t)
+	// cond: !CanSSA(t)
 	// result: (Move {t} [t.Size()] dst src (VarDef {x} mem))
 	for {
 		t := auxToType(v.Aux)
@@ -28974,7 +29024,7 @@
 			break
 		}
 		x := auxToSym(v_2.Aux)
-		if mem != v_2.Args[0] || !(!fe.CanSSA(t)) {
+		if mem != v_2.Args[0] || !(!CanSSA(t)) {
 			break
 		}
 		v.reset(OpMove)
@@ -29450,7 +29500,6 @@
 func rewriteValuegeneric_OpStructSelect(v *Value) bool {
 	v_0 := v.Args[0]
 	b := v.Block
-	fe := b.Func.fe
 	// match: (StructSelect (StructMake1 x))
 	// result: x
 	for {
@@ -29552,7 +29601,7 @@
 		return true
 	}
 	// match: (StructSelect [i] x:(Load <t> ptr mem))
-	// cond: !fe.CanSSA(t)
+	// cond: !CanSSA(t)
 	// result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
 	for {
 		i := auxIntToInt64(v.AuxInt)
@@ -29563,7 +29612,7 @@
 		t := x.Type
 		mem := x.Args[1]
 		ptr := x.Args[0]
-		if !(!fe.CanSSA(t)) {
+		if !(!CanSSA(t)) {
 			break
 		}
 		b = x.Block
diff --git a/src/cmd/compile/internal/ssa/sccp.go b/src/cmd/compile/internal/ssa/sccp.go
new file mode 100644
index 0000000..77a6f50
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sccp.go
@@ -0,0 +1,585 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+	"fmt"
+)
+
+// ----------------------------------------------------------------------------
+// Sparse Conditional Constant Propagation
+//
+// Described in
+// Mark N. Wegman, F. Kenneth Zadeck: Constant Propagation with Conditional Branches.
+// TOPLAS 1991.
+//
+// This algorithm uses three level lattice for SSA value
+//
+//      Top        undefined
+//     / | \
+// .. 1  2  3 ..   constant
+//     \ | /
+//     Bottom      not constant
+//
+// It starts with optimistically assuming that all SSA values are initially Top
+// and then propagates constant facts only along reachable control flow paths.
+// Since some basic blocks are not visited yet, corresponding inputs of phi become
+// Top, we use the meet(phi) to compute its lattice.
+//
+// 	  Top ∩ any = any
+// 	  Bottom ∩ any = Bottom
+// 	  ConstantA ∩ ConstantA = ConstantA
+// 	  ConstantA ∩ ConstantB = Bottom
+//
+// Each lattice value is lowered most twice(Top to Constant, Constant to Bottom)
+// due to lattice depth, resulting in a fast convergence speed of the algorithm.
+// In this way, sccp can discover optimization opportunities that cannot be found
+// by just combining constant folding and constant propagation and dead code
+// elimination separately.
+
+// Three level lattice holds compile time knowledge about SSA value
+const (
+	top      int8 = iota // undefined
+	constant             // constant
+	bottom               // not a constant
+)
+
+type lattice struct {
+	tag int8   // lattice type
+	val *Value // constant value
+}
+
+type worklist struct {
+	f            *Func               // the target function to be optimized out
+	edges        []Edge              // propagate constant facts through edges
+	uses         []*Value            // re-visiting set
+	visited      map[Edge]bool       // visited edges
+	latticeCells map[*Value]lattice  // constant lattices
+	defUse       map[*Value][]*Value // def-use chains for some values
+	defBlock     map[*Value][]*Block // use blocks of def
+	visitedBlock []bool              // visited block
+}
+
+// sccp stands for sparse conditional constant propagation, it propagates constants
+// through CFG conditionally and applies constant folding, constant replacement and
+// dead code elimination all together.
+func sccp(f *Func) {
+	var t worklist
+	t.f = f
+	t.edges = make([]Edge, 0)
+	t.visited = make(map[Edge]bool)
+	t.edges = append(t.edges, Edge{f.Entry, 0})
+	t.defUse = make(map[*Value][]*Value)
+	t.defBlock = make(map[*Value][]*Block)
+	t.latticeCells = make(map[*Value]lattice)
+	t.visitedBlock = f.Cache.allocBoolSlice(f.NumBlocks())
+	defer f.Cache.freeBoolSlice(t.visitedBlock)
+
+	// build it early since we rely heavily on the def-use chain later
+	t.buildDefUses()
+
+	// pick up either an edge or SSA value from worklilst, process it
+	for {
+		if len(t.edges) > 0 {
+			edge := t.edges[0]
+			t.edges = t.edges[1:]
+			if _, exist := t.visited[edge]; !exist {
+				dest := edge.b
+				destVisited := t.visitedBlock[dest.ID]
+
+				// mark edge as visited
+				t.visited[edge] = true
+				t.visitedBlock[dest.ID] = true
+				for _, val := range dest.Values {
+					if val.Op == OpPhi || !destVisited {
+						t.visitValue(val)
+					}
+				}
+				// propagates constants facts through CFG, taking condition test
+				// into account
+				if !destVisited {
+					t.propagate(dest)
+				}
+			}
+			continue
+		}
+		if len(t.uses) > 0 {
+			use := t.uses[0]
+			t.uses = t.uses[1:]
+			t.visitValue(use)
+			continue
+		}
+		break
+	}
+
+	// apply optimizations based on discovered constants
+	constCnt, rewireCnt := t.replaceConst()
+	if f.pass.debug > 0 {
+		if constCnt > 0 || rewireCnt > 0 {
+			fmt.Printf("Phase SCCP for %v : %v constants, %v dce\n", f.Name, constCnt, rewireCnt)
+		}
+	}
+}
+
+func equals(a, b lattice) bool {
+	if a == b {
+		// fast path
+		return true
+	}
+	if a.tag != b.tag {
+		return false
+	}
+	if a.tag == constant {
+		// The same content of const value may be different, we should
+		// compare with auxInt instead
+		v1 := a.val
+		v2 := b.val
+		if v1.Op == v2.Op && v1.AuxInt == v2.AuxInt {
+			return true
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// possibleConst checks if Value can be fold to const. For those Values that can
+// never become constants(e.g. StaticCall), we don't make futile efforts.
+func possibleConst(val *Value) bool {
+	if isConst(val) {
+		return true
+	}
+	switch val.Op {
+	case OpCopy:
+		return true
+	case OpPhi:
+		return true
+	case
+		// negate
+		OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F,
+		OpCom8, OpCom16, OpCom32, OpCom64,
+		// math
+		OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt,
+		// conversion
+		OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8,
+		OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F,
+		OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64,
+		OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F,
+		OpCvtBoolToUint8,
+		OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32,
+		OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32,
+		OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64,
+		// bit
+		OpCtz8, OpCtz16, OpCtz32, OpCtz64,
+		// mask
+		OpSlicemask,
+		// safety check
+		OpIsNonNil,
+		// not
+		OpNot:
+		return true
+	case
+		// add
+		OpAdd64, OpAdd32, OpAdd16, OpAdd8,
+		OpAdd32F, OpAdd64F,
+		// sub
+		OpSub64, OpSub32, OpSub16, OpSub8,
+		OpSub32F, OpSub64F,
+		// mul
+		OpMul64, OpMul32, OpMul16, OpMul8,
+		OpMul32F, OpMul64F,
+		// div
+		OpDiv32F, OpDiv64F,
+		OpDiv8, OpDiv16, OpDiv32, OpDiv64,
+		OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u,
+		OpMod8, OpMod16, OpMod32, OpMod64,
+		OpMod8u, OpMod16u, OpMod32u, OpMod64u,
+		// compare
+		OpEq64, OpEq32, OpEq16, OpEq8,
+		OpEq32F, OpEq64F,
+		OpLess64, OpLess32, OpLess16, OpLess8,
+		OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+		OpLess32F, OpLess64F,
+		OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+		OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U,
+		OpLeq32F, OpLeq64F,
+		OpEqB, OpNeqB,
+		// shift
+		OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64,
+		OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64,
+		OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64,
+		// safety check
+		OpIsInBounds, OpIsSliceInBounds,
+		// bit
+		OpAnd8, OpAnd16, OpAnd32, OpAnd64,
+		OpOr8, OpOr16, OpOr32, OpOr64,
+		OpXor8, OpXor16, OpXor32, OpXor64:
+		return true
+	default:
+		return false
+	}
+}
+
+func (t *worklist) getLatticeCell(val *Value) lattice {
+	if !possibleConst(val) {
+		// they are always worst
+		return lattice{bottom, nil}
+	}
+	lt, exist := t.latticeCells[val]
+	if !exist {
+		return lattice{top, nil} // optimistically for un-visited value
+	}
+	return lt
+}
+
+func isConst(val *Value) bool {
+	switch val.Op {
+	case OpConst64, OpConst32, OpConst16, OpConst8,
+		OpConstBool, OpConst32F, OpConst64F:
+		return true
+	default:
+		return false
+	}
+}
+
+// buildDefUses builds def-use chain for some values early, because once the
+// lattice of a value is changed, we need to update lattices of use. But we don't
+// need all uses of it, only uses that can become constants would be added into
+// re-visit worklist since no matter how many times they are revisited, uses which
+// can't become constants lattice remains unchanged, i.e. Bottom.
+func (t *worklist) buildDefUses() {
+	for _, block := range t.f.Blocks {
+		for _, val := range block.Values {
+			for _, arg := range val.Args {
+				// find its uses, only uses that can become constants take into account
+				if possibleConst(arg) && possibleConst(val) {
+					if _, exist := t.defUse[arg]; !exist {
+						t.defUse[arg] = make([]*Value, 0, arg.Uses)
+					}
+					t.defUse[arg] = append(t.defUse[arg], val)
+				}
+			}
+		}
+		for _, ctl := range block.ControlValues() {
+			// for control values that can become constants, find their use blocks
+			if possibleConst(ctl) {
+				t.defBlock[ctl] = append(t.defBlock[ctl], block)
+			}
+		}
+	}
+}
+
+// addUses finds all uses of value and appends them into work list for further process
+func (t *worklist) addUses(val *Value) {
+	for _, use := range t.defUse[val] {
+		if val == use {
+			// Phi may refer to itself as uses, ignore them to avoid re-visiting phi
+			// for performance reason
+			continue
+		}
+		t.uses = append(t.uses, use)
+	}
+	for _, block := range t.defBlock[val] {
+		if t.visitedBlock[block.ID] {
+			t.propagate(block)
+		}
+	}
+}
+
+// meet meets all of phi arguments and computes result lattice
+func (t *worklist) meet(val *Value) lattice {
+	optimisticLt := lattice{top, nil}
+	for i := 0; i < len(val.Args); i++ {
+		edge := Edge{val.Block, i}
+		// If incoming edge for phi is not visited, assume top optimistically.
+		// According to rules of meet:
+		// 		Top ∩ any = any
+		// Top participates in meet() but does not affect the result, so here
+		// we will ignore Top and only take other lattices into consideration.
+		if _, exist := t.visited[edge]; exist {
+			lt := t.getLatticeCell(val.Args[i])
+			if lt.tag == constant {
+				if optimisticLt.tag == top {
+					optimisticLt = lt
+				} else {
+					if !equals(optimisticLt, lt) {
+						// ConstantA ∩ ConstantB = Bottom
+						return lattice{bottom, nil}
+					}
+				}
+			} else if lt.tag == bottom {
+				// Bottom ∩ any = Bottom
+				return lattice{bottom, nil}
+			} else {
+				// Top ∩ any = any
+			}
+		} else {
+			// Top ∩ any = any
+		}
+	}
+
+	// ConstantA ∩ ConstantA = ConstantA or Top ∩ any = any
+	return optimisticLt
+}
+
+func computeLattice(f *Func, val *Value, args ...*Value) lattice {
+	// In general, we need to perform constant evaluation based on constant args:
+	//
+	//  res := lattice{constant, nil}
+	// 	switch op {
+	// 	case OpAdd16:
+	//		res.val = newConst(argLt1.val.AuxInt16() + argLt2.val.AuxInt16())
+	// 	case OpAdd32:
+	// 		res.val = newConst(argLt1.val.AuxInt32() + argLt2.val.AuxInt32())
+	//	case OpDiv8:
+	//		if !isDivideByZero(argLt2.val.AuxInt8()) {
+	//			res.val = newConst(argLt1.val.AuxInt8() / argLt2.val.AuxInt8())
+	//		}
+	//  ...
+	// 	}
+	//
+	// However, this would create a huge switch for all opcodes that can be
+	// evaluated during compile time. Moreover, some operations can be evaluated
+	// only if its arguments satisfy additional conditions(e.g. divide by zero).
+	// It's fragile and error prone. We did a trick by reusing the existing rules
+	// in generic rules for compile-time evaluation. But generic rules rewrite
+	// original value, this behavior is undesired, because the lattice of values
+	// may change multiple times, once it was rewritten, we lose the opportunity
+	// to change it permanently, which can lead to errors. For example, We cannot
+	// change its value immediately after visiting Phi, because some of its input
+	// edges may still not be visited at this moment.
+	constValue := f.newValue(val.Op, val.Type, f.Entry, val.Pos)
+	constValue.AddArgs(args...)
+	matched := rewriteValuegeneric(constValue)
+	if matched {
+		if isConst(constValue) {
+			return lattice{constant, constValue}
+		}
+	}
+	// Either we can not match generic rules for given value or it does not
+	// satisfy additional constraints(e.g. divide by zero), in these cases, clean
+	// up temporary value immediately in case they are not dominated by their args.
+	constValue.reset(OpInvalid)
+	return lattice{bottom, nil}
+}
+
+func (t *worklist) visitValue(val *Value) {
+	if !possibleConst(val) {
+		// fast fail for always worst Values, i.e. there is no lowering happen
+		// on them, their lattices must be initially worse Bottom.
+		return
+	}
+
+	oldLt := t.getLatticeCell(val)
+	defer func() {
+		// re-visit all uses of value if its lattice is changed
+		newLt := t.getLatticeCell(val)
+		if !equals(newLt, oldLt) {
+			if int8(oldLt.tag) > int8(newLt.tag) {
+				t.f.Fatalf("Must lower lattice\n")
+			}
+			t.addUses(val)
+		}
+	}()
+
+	switch val.Op {
+	// they are constant values, aren't they?
+	case OpConst64, OpConst32, OpConst16, OpConst8,
+		OpConstBool, OpConst32F, OpConst64F: //TODO: support ConstNil ConstString etc
+		t.latticeCells[val] = lattice{constant, val}
+	// lattice value of copy(x) actually means lattice value of (x)
+	case OpCopy:
+		t.latticeCells[val] = t.getLatticeCell(val.Args[0])
+	// phi should be processed specially
+	case OpPhi:
+		t.latticeCells[val] = t.meet(val)
+	// fold 1-input operations:
+	case
+		// negate
+		OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F,
+		OpCom8, OpCom16, OpCom32, OpCom64,
+		// math
+		OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt,
+		// conversion
+		OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8,
+		OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F,
+		OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64,
+		OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F,
+		OpCvtBoolToUint8,
+		OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32,
+		OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32,
+		OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64,
+		// bit
+		OpCtz8, OpCtz16, OpCtz32, OpCtz64,
+		// mask
+		OpSlicemask,
+		// safety check
+		OpIsNonNil,
+		// not
+		OpNot:
+		lt1 := t.getLatticeCell(val.Args[0])
+
+		if lt1.tag == constant {
+			// here we take a shortcut by reusing generic rules to fold constants
+			t.latticeCells[val] = computeLattice(t.f, val, lt1.val)
+		} else {
+			t.latticeCells[val] = lattice{lt1.tag, nil}
+		}
+	// fold 2-input operations
+	case
+		// add
+		OpAdd64, OpAdd32, OpAdd16, OpAdd8,
+		OpAdd32F, OpAdd64F,
+		// sub
+		OpSub64, OpSub32, OpSub16, OpSub8,
+		OpSub32F, OpSub64F,
+		// mul
+		OpMul64, OpMul32, OpMul16, OpMul8,
+		OpMul32F, OpMul64F,
+		// div
+		OpDiv32F, OpDiv64F,
+		OpDiv8, OpDiv16, OpDiv32, OpDiv64,
+		OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, //TODO: support div128u
+		// mod
+		OpMod8, OpMod16, OpMod32, OpMod64,
+		OpMod8u, OpMod16u, OpMod32u, OpMod64u,
+		// compare
+		OpEq64, OpEq32, OpEq16, OpEq8,
+		OpEq32F, OpEq64F,
+		OpLess64, OpLess32, OpLess16, OpLess8,
+		OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+		OpLess32F, OpLess64F,
+		OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+		OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U,
+		OpLeq32F, OpLeq64F,
+		OpEqB, OpNeqB,
+		// shift
+		OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64,
+		OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64,
+		OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64,
+		// safety check
+		OpIsInBounds, OpIsSliceInBounds,
+		// bit
+		OpAnd8, OpAnd16, OpAnd32, OpAnd64,
+		OpOr8, OpOr16, OpOr32, OpOr64,
+		OpXor8, OpXor16, OpXor32, OpXor64:
+		lt1 := t.getLatticeCell(val.Args[0])
+		lt2 := t.getLatticeCell(val.Args[1])
+
+		if lt1.tag == constant && lt2.tag == constant {
+			// here we take a shortcut by reusing generic rules to fold constants
+			t.latticeCells[val] = computeLattice(t.f, val, lt1.val, lt2.val)
+		} else {
+			if lt1.tag == bottom || lt2.tag == bottom {
+				t.latticeCells[val] = lattice{bottom, nil}
+			} else {
+				t.latticeCells[val] = lattice{top, nil}
+			}
+		}
+	default:
+		// Any other type of value cannot be a constant, they are always worst(Bottom)
+	}
+}
+
+// propagate propagates constants facts through CFG. If the block has single successor,
+// add the successor anyway. If the block has multiple successors, only add the
+// branch destination corresponding to lattice value of condition value.
+func (t *worklist) propagate(block *Block) {
+	switch block.Kind {
+	case BlockExit, BlockRet, BlockRetJmp, BlockInvalid:
+		// control flow ends, do nothing then
+		break
+	case BlockDefer:
+		// we know nothing about control flow, add all branch destinations
+		t.edges = append(t.edges, block.Succs...)
+	case BlockFirst:
+		fallthrough // always takes the first branch
+	case BlockPlain:
+		t.edges = append(t.edges, block.Succs[0])
+	case BlockIf, BlockJumpTable:
+		cond := block.ControlValues()[0]
+		condLattice := t.getLatticeCell(cond)
+		if condLattice.tag == bottom {
+			// we know nothing about control flow, add all branch destinations
+			t.edges = append(t.edges, block.Succs...)
+		} else if condLattice.tag == constant {
+			// add branchIdx destinations depends on its condition
+			var branchIdx int64
+			if block.Kind == BlockIf {
+				branchIdx = 1 - condLattice.val.AuxInt
+			} else {
+				branchIdx = condLattice.val.AuxInt
+			}
+			t.edges = append(t.edges, block.Succs[branchIdx])
+		} else {
+			// condition value is not visited yet, don't propagate it now
+		}
+	default:
+		t.f.Fatalf("All kind of block should be processed above.")
+	}
+}
+
+// rewireSuccessor rewires corresponding successors according to constant value
+// discovered by previous analysis. As the result, some successors become unreachable
+// and thus can be removed in further deadcode phase
+func rewireSuccessor(block *Block, constVal *Value) bool {
+	switch block.Kind {
+	case BlockIf:
+		block.removeEdge(int(constVal.AuxInt))
+		block.Kind = BlockPlain
+		block.Likely = BranchUnknown
+		block.ResetControls()
+		return true
+	case BlockJumpTable:
+		// Remove everything but the known taken branch.
+		idx := int(constVal.AuxInt)
+		if idx < 0 || idx >= len(block.Succs) {
+			// This can only happen in unreachable code,
+			// as an invariant of jump tables is that their
+			// input index is in range.
+			// See issue 64826.
+			return false
+		}
+		block.swapSuccessorsByIdx(0, idx)
+		for len(block.Succs) > 1 {
+			block.removeEdge(1)
+		}
+		block.Kind = BlockPlain
+		block.Likely = BranchUnknown
+		block.ResetControls()
+		return true
+	default:
+		return false
+	}
+}
+
+// replaceConst will replace non-constant values that have been proven by sccp
+// to be constants.
+func (t *worklist) replaceConst() (int, int) {
+	constCnt, rewireCnt := 0, 0
+	for val, lt := range t.latticeCells {
+		if lt.tag == constant {
+			if !isConst(val) {
+				if t.f.pass.debug > 0 {
+					fmt.Printf("Replace %v with %v\n", val.LongString(), lt.val.LongString())
+				}
+				val.reset(lt.val.Op)
+				val.AuxInt = lt.val.AuxInt
+				constCnt++
+			}
+			// If const value controls this block, rewires successors according to its value
+			ctrlBlock := t.defBlock[val]
+			for _, block := range ctrlBlock {
+				if rewireSuccessor(block, lt.val) {
+					rewireCnt++
+					if t.f.pass.debug > 0 {
+						fmt.Printf("Rewire %v %v successors\n", block.Kind, block)
+					}
+				}
+			}
+		}
+	}
+	return constCnt, rewireCnt
+}
diff --git a/src/cmd/compile/internal/ssa/sccp_test.go b/src/cmd/compile/internal/ssa/sccp_test.go
new file mode 100644
index 0000000..70c23e7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sccp_test.go
@@ -0,0 +1,95 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+	"cmd/compile/internal/types"
+	"strings"
+	"testing"
+)
+
+func TestSCCPBasic(t *testing.T) {
+	c := testConfig(t)
+	fun := c.Fun("b1",
+		Bloc("b1",
+			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+			Valu("v1", OpConst64, c.config.Types.Int64, 20, nil),
+			Valu("v2", OpConst64, c.config.Types.Int64, 21, nil),
+			Valu("v3", OpConst64F, c.config.Types.Float64, 21.0, nil),
+			Valu("v4", OpConstBool, c.config.Types.Bool, 1, nil),
+			Valu("t1", OpAdd64, c.config.Types.Int64, 0, nil, "v1", "v2"),
+			Valu("t2", OpDiv64, c.config.Types.Int64, 0, nil, "t1", "v1"),
+			Valu("t3", OpAdd64, c.config.Types.Int64, 0, nil, "t1", "t2"),
+			Valu("t4", OpSub64, c.config.Types.Int64, 0, nil, "t3", "v2"),
+			Valu("t5", OpMul64, c.config.Types.Int64, 0, nil, "t4", "v2"),
+			Valu("t6", OpMod64, c.config.Types.Int64, 0, nil, "t5", "v2"),
+			Valu("t7", OpAnd64, c.config.Types.Int64, 0, nil, "t6", "v2"),
+			Valu("t8", OpOr64, c.config.Types.Int64, 0, nil, "t7", "v2"),
+			Valu("t9", OpXor64, c.config.Types.Int64, 0, nil, "t8", "v2"),
+			Valu("t10", OpNeg64, c.config.Types.Int64, 0, nil, "t9"),
+			Valu("t11", OpCom64, c.config.Types.Int64, 0, nil, "t10"),
+			Valu("t12", OpNeg64, c.config.Types.Int64, 0, nil, "t11"),
+			Valu("t13", OpFloor, c.config.Types.Float64, 0, nil, "v3"),
+			Valu("t14", OpSqrt, c.config.Types.Float64, 0, nil, "t13"),
+			Valu("t15", OpCeil, c.config.Types.Float64, 0, nil, "t14"),
+			Valu("t16", OpTrunc, c.config.Types.Float64, 0, nil, "t15"),
+			Valu("t17", OpRoundToEven, c.config.Types.Float64, 0, nil, "t16"),
+			Valu("t18", OpTrunc64to32, c.config.Types.Int64, 0, nil, "t12"),
+			Valu("t19", OpCvt64Fto64, c.config.Types.Float64, 0, nil, "t17"),
+			Valu("t20", OpCtz64, c.config.Types.Int64, 0, nil, "v2"),
+			Valu("t21", OpSlicemask, c.config.Types.Int64, 0, nil, "t20"),
+			Valu("t22", OpIsNonNil, c.config.Types.Int64, 0, nil, "v2"),
+			Valu("t23", OpNot, c.config.Types.Bool, 0, nil, "v4"),
+			Valu("t24", OpEq64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+			Valu("t25", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+			Valu("t26", OpLeq64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+			Valu("t27", OpEqB, c.config.Types.Bool, 0, nil, "v4", "v4"),
+			Valu("t28", OpLsh64x64, c.config.Types.Int64, 0, nil, "v2", "v1"),
+			Valu("t29", OpIsInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"),
+			Valu("t30", OpIsSliceInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"),
+			Goto("b2")),
+		Bloc("b2",
+			Exit("mem")))
+	sccp(fun.f)
+	CheckFunc(fun.f)
+	for name, value := range fun.values {
+		if strings.HasPrefix(name, "t") {
+			if !isConst(value) {
+				t.Errorf("Must be constant: %v", value.LongString())
+			}
+		}
+	}
+}
+
+func TestSCCPIf(t *testing.T) {
+	c := testConfig(t)
+	fun := c.Fun("b1",
+		Bloc("b1",
+			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+			Valu("v1", OpConst64, c.config.Types.Int64, 0, nil),
+			Valu("v2", OpConst64, c.config.Types.Int64, 1, nil),
+			Valu("cmp", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+			If("cmp", "b2", "b3")),
+		Bloc("b2",
+			Valu("v3", OpConst64, c.config.Types.Int64, 3, nil),
+			Goto("b4")),
+		Bloc("b3",
+			Valu("v4", OpConst64, c.config.Types.Int64, 4, nil),
+			Goto("b4")),
+		Bloc("b4",
+			Valu("merge", OpPhi, c.config.Types.Int64, 0, nil, "v3", "v4"),
+			Exit("mem")))
+	sccp(fun.f)
+	CheckFunc(fun.f)
+	for _, b := range fun.blocks {
+		for _, v := range b.Values {
+			if v == fun.values["merge"] {
+				if !isConst(v) {
+					t.Errorf("Must be constant: %v", v.LongString())
+				}
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index 19b98cc..fb38f40 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -7,7 +7,6 @@
 import (
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/types"
-	"cmd/internal/src"
 	"container/heap"
 	"sort"
 )
@@ -65,10 +64,6 @@
 	}
 
 	if x.Pos != y.Pos { // Favor in-order line stepping
-		if x.Block == x.Block.Func.Entry && x.Pos.IsStmt() != y.Pos.IsStmt() {
-			// In the entry block, put statement-marked instructions earlier.
-			return x.Pos.IsStmt() == src.PosIsStmt && y.Pos.IsStmt() != src.PosIsStmt
-		}
 		return x.Pos.Before(y.Pos)
 	}
 	if x.Op != OpPhi {
@@ -312,14 +307,21 @@
 	}
 
 	// Remove SPanchored now that we've scheduled.
+	// Also unlink nil checks now that ordering is assured
+	// between the nil check and the uses of the nil-checked pointer.
 	for _, b := range f.Blocks {
 		for _, v := range b.Values {
 			for i, a := range v.Args {
-				if a.Op == OpSPanchored {
+				if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck {
 					v.SetArg(i, a.Args[0])
 				}
 			}
 		}
+		for i, c := range b.ControlValues() {
+			if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck {
+				b.ReplaceControl(i, c.Args[0])
+			}
+		}
 	}
 	for _, b := range f.Blocks {
 		i := 0
@@ -332,6 +334,15 @@
 				v.resetArgs()
 				f.freeValue(v)
 			} else {
+				if opcodeTable[v.Op].nilCheck {
+					if v.Uses != 0 {
+						base.Fatalf("nilcheck still has %d uses", v.Uses)
+					}
+					// We can't delete the nil check, but we mark
+					// it as having void type so regalloc won't
+					// try to allocate a register for it.
+					v.Type = types.TypeVoid
+				}
 				b.Values[i] = v
 				i++
 			}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 3e24b48..c9ca778 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -280,7 +280,7 @@
 			// If there is no unused stack slot, allocate a new one.
 			if i == len(locs) {
 				s.nAuto++
-				locs = append(locs, LocalSlot{N: f.fe.Auto(v.Pos, v.Type), Type: v.Type, Off: 0})
+				locs = append(locs, LocalSlot{N: f.NewLocal(v.Pos, v.Type), Type: v.Type, Off: 0})
 				locations[v.Type] = locs
 			}
 			// Use the stack variable at that index for v.
diff --git a/src/cmd/compile/internal/ssa/stackframe.go b/src/cmd/compile/internal/ssa/stackframe.go
deleted file mode 100644
index 08be62a..0000000
--- a/src/cmd/compile/internal/ssa/stackframe.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// stackframe calls back into the frontend to assign frame offsets.
-func stackframe(f *Func) {
-	f.fe.AllocFrame(f)
-}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index e89024b..4eaab40 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -552,7 +552,11 @@
 // if its use count drops to 0.
 func (v *Value) removeable() bool {
 	if v.Type.IsVoid() {
-		// Void ops, like nil pointer checks, must stay.
+		// Void ops (inline marks), must stay.
+		return false
+	}
+	if opcodeTable[v.Op].nilCheck {
+		// Nil pointer checks must stay.
 		return false
 	}
 	if v.Type.IsMemory() {
@@ -581,3 +585,36 @@
 	nameOff := v.Aux.(*AuxNameOffset)
 	return nameOff.Name, nameOff.Offset
 }
+
+// CanSSA reports whether values of type t can be represented as a Value.
+func CanSSA(t *types.Type) bool {
+	types.CalcSize(t)
+	if t.Size() > int64(4*types.PtrSize) {
+		// 4*Widthptr is an arbitrary constant. We want it
+		// to be at least 3*Widthptr so slices can be registerized.
+		// Too big and we'll introduce too much register pressure.
+		return false
+	}
+	switch t.Kind() {
+	case types.TARRAY:
+		// We can't do larger arrays because dynamic indexing is
+		// not supported on SSA variables.
+		// TODO: allow if all indexes are constant.
+		if t.NumElem() <= 1 {
+			return CanSSA(t.Elem())
+		}
+		return false
+	case types.TSTRUCT:
+		if t.NumFields() > MaxStruct {
+			return false
+		}
+		for _, t1 := range t.Fields() {
+			if !CanSSA(t1.Type) {
+				return false
+			}
+		}
+		return true
+	default:
+		return true
+	}
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index bd9e0b8..1caccb7 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -250,6 +250,7 @@
 		// to a new block.
 		var last *Value
 		var start, end int
+		var nonPtrStores int
 		values := b.Values
 	FindSeq:
 		for i := len(values) - 1; i >= 0; i-- {
@@ -261,8 +262,17 @@
 					last = w
 					end = i + 1
 				}
+				nonPtrStores = 0
 			case OpVarDef, OpVarLive:
 				continue
+			case OpStore:
+				if last == nil {
+					continue
+				}
+				nonPtrStores++
+				if nonPtrStores > 2 {
+					break FindSeq
+				}
 			default:
 				if last == nil {
 					continue
@@ -309,7 +319,7 @@
 						}
 
 						t := val.Type.Elem()
-						tmp := f.fe.Auto(w.Pos, t)
+						tmp := f.NewLocal(w.Pos, t)
 						mem = b.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, mem)
 						tmpaddr := b.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, mem)
 						siz := t.Size()
@@ -353,7 +363,7 @@
 		memThen := mem
 		var curCall *Value
 		var curPtr *Value
-		addEntry := func(v *Value) {
+		addEntry := func(pos src.XPos, v *Value) {
 			if curCall == nil || curCall.AuxInt == maxEntries {
 				t := types.NewTuple(types.Types[types.TUINTPTR].PtrTo(), types.TypeMem)
 				curCall = bThen.NewValue1(pos, OpWB, t, memThen)
@@ -394,7 +404,7 @@
 			val := w.Args[1]
 			if !srcs.contains(val.ID) && needWBsrc(val) {
 				srcs.add(val.ID)
-				addEntry(val)
+				addEntry(pos, val)
 			}
 			if !dsts.contains(ptr.ID) && needWBdst(ptr, w.Args[2], zeroes) {
 				dsts.add(ptr.ID)
@@ -407,7 +417,7 @@
 				// combine the read and the write.
 				oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen)
 				// Save old value to write buffer.
-				addEntry(oldVal)
+				addEntry(pos, oldVal)
 			}
 			f.fe.Func().SetWBPos(pos)
 			nWBops--
@@ -449,6 +459,7 @@
 
 		// Do raw stores after merge point.
 		for _, w := range stores {
+			pos := w.Pos
 			switch w.Op {
 			case OpStoreWB:
 				ptr := w.Args[0]
@@ -483,6 +494,10 @@
 				mem.Aux = w.Aux
 			case OpVarDef, OpVarLive:
 				mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem)
+			case OpStore:
+				ptr := w.Args[0]
+				val := w.Args[1]
+				mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
 			}
 		}
 
@@ -656,7 +671,7 @@
 	for i := 0; i < nargs; i++ {
 		argTypes[i] = typ
 	}
-	call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil)))
+	call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(argTypes, nil)))
 	call.AddArgs(args...)
 	call.AuxInt = int64(nargs) * typ.Size()
 	return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
index c97d60b..56af9ce 100644
--- a/src/cmd/compile/internal/ssagen/abi.go
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -41,12 +41,8 @@
 // both to use the full path, which matches compiler-generated linker
 // symbol names.
 func (s *SymABIs) canonicalize(linksym string) string {
-	// If the symbol is already prefixed with "", rewrite it to start
-	// with LocalPkg.Prefix.
-	//
-	// TODO(mdempsky): Have cmd/asm stop writing out symbols like this.
 	if strings.HasPrefix(linksym, `"".`) {
-		return types.LocalPkg.Prefix + linksym[2:]
+		panic("non-canonical symbol name: " + linksym)
 	}
 	return linksym
 }
@@ -125,11 +121,7 @@
 	// This may generate new decls for the wrappers, but we
 	// specifically *don't* want to visit those, lest we create
 	// wrappers for wrappers.
-	for _, fn := range typecheck.Target.Decls {
-		if fn.Op() != ir.ODCLFUNC {
-			continue
-		}
-		fn := fn.(*ir.Func)
+	for _, fn := range typecheck.Target.Funcs {
 		nam := fn.Nname
 		if ir.IsBlank(nam) {
 			continue
@@ -241,11 +233,10 @@
 
 	// Q: is this needed?
 	savepos := base.Pos
-	savedclcontext := typecheck.DeclContext
 	savedcurfn := ir.CurFunc
 
-	base.Pos = base.AutogeneratedPos
-	typecheck.DeclContext = ir.PEXTERN
+	pos := base.AutogeneratedPos
+	base.Pos = pos
 
 	// At the moment we don't support wrapping a method, we'd need machinery
 	// below to handle the receiver. Panic if we see this scenario.
@@ -256,10 +247,12 @@
 	}
 
 	// Reuse f's types.Sym to create a new ODCLFUNC/function.
-	fn := typecheck.DeclFunc(f.Nname.Sym(), nil,
-		typecheck.NewFuncParams(ft.Params(), true),
-		typecheck.NewFuncParams(ft.Results(), false))
+	// TODO(mdempsky): Means we can't set sym.Def in Declfunc, ugh.
+	fn := ir.NewFunc(pos, pos, f.Sym(), types.NewSignature(nil,
+		typecheck.NewFuncParams(ft.Params()),
+		typecheck.NewFuncParams(ft.Results())))
 	fn.ABI = wrapperABI
+	typecheck.DeclFunc(fn)
 
 	fn.SetABIWrapper(true)
 	fn.SetDupok(true)
@@ -328,15 +321,11 @@
 
 	typecheck.FinishFuncBody()
 
-	typecheck.Func(fn)
 	ir.CurFunc = fn
 	typecheck.Stmts(fn.Body)
 
-	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
 	// Restore previous context.
 	base.Pos = savepos
-	typecheck.DeclContext = savedclcontext
 	ir.CurFunc = savedcurfn
 }
 
@@ -443,7 +432,7 @@
 		//
 		// 	(import "a_module" "add" (func (param i32 i32) (result i32)))
 		abiConfig := AbiForBodylessFuncStackMap(f)
-		abiInfo := abiConfig.ABIAnalyzeFuncType(f.Type().FuncType())
+		abiInfo := abiConfig.ABIAnalyzeFuncType(f.Type())
 		wi.Params = paramsToWasmFields(f, abiInfo, abiInfo.InParams())
 		wi.Results = resultsToWasmFields(f, abiInfo, abiInfo.OutParams())
 	}
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
index 9de1b9a..b8756ee 100644
--- a/src/cmd/compile/internal/ssagen/nowb.go
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -56,11 +56,8 @@
 	// important to handle it for this check, so we model it
 	// directly. This has to happen before transforming closures in walk since
 	// it's a lot harder to work out the argument after.
-	for _, n := range typecheck.Target.Decls {
-		if n.Op() != ir.ODCLFUNC {
-			continue
-		}
-		c.curfn = n.(*ir.Func)
+	for _, n := range typecheck.Target.Funcs {
+		c.curfn = n
 		if c.curfn.ABIWrapper() {
 			// We only want "real" calls to these
 			// functions, not the generated ones within
@@ -78,14 +75,14 @@
 		return
 	}
 	n := nn.(*ir.CallExpr)
-	if n.X == nil || n.X.Op() != ir.ONAME {
+	if n.Fun == nil || n.Fun.Op() != ir.ONAME {
 		return
 	}
-	fn := n.X.(*ir.Name)
+	fn := n.Fun.(*ir.Name)
 	if fn.Class != ir.PFUNC || fn.Defn == nil {
 		return
 	}
-	if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
+	if types.RuntimeSymName(fn.Sym()) != "systemstack" {
 		return
 	}
 
@@ -101,9 +98,6 @@
 	default:
 		base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
 	}
-	if callee.Op() != ir.ODCLFUNC {
-		base.Fatalf("expected ODCLFUNC node, got %+v", callee)
-	}
 	c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
 }
 
@@ -139,12 +133,7 @@
 	// q is the queue of ODCLFUNC Nodes to visit in BFS order.
 	var q ir.NameQueue
 
-	for _, n := range typecheck.Target.Decls {
-		if n.Op() != ir.ODCLFUNC {
-			continue
-		}
-		fn := n.(*ir.Func)
-
+	for _, fn := range typecheck.Target.Funcs {
 		symToFunc[fn.LSym] = fn
 
 		// Make nowritebarrierrec functions BFS roots.
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index 9fd3f2a..e7a0699 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -22,44 +22,65 @@
 )
 
 // cmpstackvarlt reports whether the stack variable a sorts before b.
-//
-// Sort the list of stack variables. Autos after anything else,
-// within autos, unused after used, within used, things with
-// pointers first, zeroed things first, and then decreasing size.
-// Because autos are laid out in decreasing addresses
-// on the stack, pointers first, zeroed things first and decreasing size
-// really means, in memory, things with pointers needing zeroing at
-// the top of the stack and increasing in size.
-// Non-autos sort on offset.
 func cmpstackvarlt(a, b *ir.Name) bool {
+	// Sort non-autos before autos.
 	if needAlloc(a) != needAlloc(b) {
 		return needAlloc(b)
 	}
 
+	// If both are non-auto (e.g., parameters, results), then sort by
+	// frame offset (defined by ABI).
 	if !needAlloc(a) {
 		return a.FrameOffset() < b.FrameOffset()
 	}
 
+	// From here on, a and b are both autos (i.e., local variables).
+
+	// Sort used before unused (so AllocFrame can truncate unused
+	// variables).
 	if a.Used() != b.Used() {
 		return a.Used()
 	}
 
+	// Sort pointer-typed before non-pointer types.
+	// Keeps the stack's GC bitmap compact.
 	ap := a.Type().HasPointers()
 	bp := b.Type().HasPointers()
 	if ap != bp {
 		return ap
 	}
 
+	// Group variables that need zeroing, so we can efficiently zero
+	// them altogether.
 	ap = a.Needzero()
 	bp = b.Needzero()
 	if ap != bp {
 		return ap
 	}
 
-	if a.Type().Size() != b.Type().Size() {
-		return a.Type().Size() > b.Type().Size()
+	// Sort variables in descending alignment order, so we can optimally
+	// pack variables into the frame.
+	if a.Type().Alignment() != b.Type().Alignment() {
+		return a.Type().Alignment() > b.Type().Alignment()
 	}
 
+	// Sort normal variables before open-coded-defer slots, so that the
+	// latter are grouped together and near the top of the frame (to
+	// minimize varint encoding of their varp offset).
+	if a.OpenDeferSlot() != b.OpenDeferSlot() {
+		return a.OpenDeferSlot()
+	}
+
+	// If a and b are both open-coded defer slots, then order them by
+	// index in descending order, so they'll be laid out in the frame in
+	// ascending order.
+	//
+	// Their index was saved in FrameOffset in state.openDeferSave.
+	if a.OpenDeferSlot() {
+		return a.FrameOffset() > b.FrameOffset()
+	}
+
+	// Tie breaker for stable results.
 	return a.Sym().Name < b.Sym().Name
 }
 
@@ -100,6 +121,14 @@
 
 	// Mark the PAUTO's unused.
 	for _, ln := range fn.Dcl {
+		if ln.OpenDeferSlot() {
+			// Open-coded defer slots have indices that were assigned
+			// upfront during SSA construction, but the defer statement can
+			// later get removed during deadcode elimination (#61895). To
+			// keep their relative offsets correct, treat them all as used.
+			continue
+		}
+
 		if needAlloc(ln) {
 			ln.SetUsed(false)
 		}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 597a196..c794d6f 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -27,6 +27,7 @@
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
+	"cmd/internal/objabi"
 	"cmd/internal/src"
 	"cmd/internal/sys"
 
@@ -86,6 +87,8 @@
 	_ = types.NewPtr(types.Types[types.TINT16])                             // *int16
 	_ = types.NewPtr(types.Types[types.TINT64])                             // *int64
 	_ = types.NewPtr(types.ErrorType)                                       // *error
+	_ = types.NewPtr(reflectdata.MapType())                                 // *runtime.hmap
+	_ = types.NewPtr(deferstruct())                                         // *runtime._defer
 	types.NewPtrCacheEnabled = false
 	ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
 	ssaConfig.Race = base.Flag.Race
@@ -100,6 +103,7 @@
 	ir.Syms.CgoCheckPtrWrite = typecheck.LookupRuntimeFunc("cgoCheckPtrWrite")
 	ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
 	ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
+	ir.Syms.Deferprocat = typecheck.LookupRuntimeFunc("deferprocat")
 	ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
 	ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
 	ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
@@ -114,6 +118,7 @@
 	ir.Syms.GCWriteBarrier[7] = typecheck.LookupRuntimeFunc("gcWriteBarrier8")
 	ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
 	ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+	ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch")
 	ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
 	ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
 	ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
@@ -128,10 +133,13 @@
 	ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
 	ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
 	ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
+	ir.Syms.Racefuncenter = typecheck.LookupRuntimeFunc("racefuncenter")
+	ir.Syms.Racefuncexit = typecheck.LookupRuntimeFunc("racefuncexit")
 	ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
 	ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
 	ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
 	ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
+	ir.Syms.TypeAssert = typecheck.LookupRuntimeFunc("typeAssert")
 	ir.Syms.WBZero = typecheck.LookupRuntimeFunc("wbZero")
 	ir.Syms.WBMove = typecheck.LookupRuntimeFunc("wbMove")
 	ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT")       // bool
@@ -248,30 +256,6 @@
 	return a
 }
 
-// dvarint writes a varint v to the funcdata in symbol x and returns the new offset.
-func dvarint(x *obj.LSym, off int, v int64) int {
-	if v < 0 || v > 1e9 {
-		panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
-	}
-	if v < 1<<7 {
-		return objw.Uint8(x, off, uint8(v))
-	}
-	off = objw.Uint8(x, off, uint8((v&127)|128))
-	if v < 1<<14 {
-		return objw.Uint8(x, off, uint8(v>>7))
-	}
-	off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
-	if v < 1<<21 {
-		return objw.Uint8(x, off, uint8(v>>14))
-	}
-	off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
-	if v < 1<<28 {
-		return objw.Uint8(x, off, uint8(v>>21))
-	}
-	off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
-	return objw.Uint8(x, off, uint8(v>>28))
-}
-
 // emitOpenDeferInfo emits FUNCDATA information about the defers in a function
 // that is using open-coded defers.  This funcdata is used to determine the active
 // defers in a function and execute those defers during panic processing.
@@ -282,47 +266,59 @@
 // top of the local variables) for their starting address. The format is:
 //
 //   - Offset of the deferBits variable
-//   - Number of defers in the function
-//   - Information about each defer call, in reverse order of appearance in the function:
-//   - Offset of the closure value to call
+//   - Offset of the first closure slot (the rest are laid out consecutively).
 func (s *state) emitOpenDeferInfo() {
+	firstOffset := s.openDefers[0].closureNode.FrameOffset()
+
+	// Verify that cmpstackvarlt laid out the slots in order.
+	for i, r := range s.openDefers {
+		have := r.closureNode.FrameOffset()
+		want := firstOffset + int64(i)*int64(types.PtrSize)
+		if have != want {
+			base.FatalfAt(s.curfn.Pos(), "unexpected frame offset for open-coded defer slot #%v: have %v, want %v", i, have, want)
+		}
+	}
+
 	x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
 	x.Set(obj.AttrContentAddressable, true)
 	s.curfn.LSym.Func().OpenCodedDeferInfo = x
+
 	off := 0
-	off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
-	off = dvarint(x, off, int64(len(s.openDefers)))
-
-	// Write in reverse-order, for ease of running in that order at runtime
-	for i := len(s.openDefers) - 1; i >= 0; i-- {
-		r := s.openDefers[i]
-		off = dvarint(x, off, -r.closureNode.FrameOffset())
-	}
-}
-
-func okOffset(offset int64) int64 {
-	if offset == types.BOGUS_FUNARG_OFFSET {
-		panic(fmt.Errorf("Bogus offset %d", offset))
-	}
-	return offset
+	off = objw.Uvarint(x, off, uint64(-s.deferBitsTemp.FrameOffset()))
+	off = objw.Uvarint(x, off, uint64(-firstOffset))
 }
 
 // buildssa builds an SSA function for fn.
 // worker indicates which of the backend workers is doing the processing.
 func buildssa(fn *ir.Func, worker int) *ssa.Func {
 	name := ir.FuncName(fn)
+
+	abiSelf := abiForFunc(fn, ssaConfig.ABI0, ssaConfig.ABI1)
+
 	printssa := false
-	if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
-		pkgDotName := base.Ctxt.Pkgpath + "." + name
-		printssa = name == ssaDump ||
-			strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
+	// match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
+	// optionally allows an ABI suffix specification in the GOSSAHASH, e.g. "(*Reader).Reset<0>" etc
+	if strings.Contains(ssaDump, name) { // in all the cases the function name is entirely contained within the GOSSAFUNC string.
+		nameOptABI := name
+		if strings.Contains(ssaDump, ",") { // ABI specification
+			nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
+		} else if strings.HasSuffix(ssaDump, ">") { // if they use the linker syntax instead....
+			l := len(ssaDump)
+			if l >= 3 && ssaDump[l-3] == '<' {
+				nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
+				ssaDump = ssaDump[:l-3] + "," + ssaDump[l-2:l-1]
+			}
+		}
+		pkgDotName := base.Ctxt.Pkgpath + "." + nameOptABI
+		printssa = nameOptABI == ssaDump || // "(*Reader).Reset"
+			pkgDotName == ssaDump || // "compress/gzip.(*Reader).Reset"
+			strings.HasSuffix(pkgDotName, ssaDump) && strings.HasSuffix(pkgDotName, "/"+ssaDump) // "gzip.(*Reader).Reset"
 	}
+
 	var astBuf *bytes.Buffer
 	if printssa {
 		astBuf = &bytes.Buffer{}
-		ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
 		ir.FDumpList(astBuf, "buildssa-body", fn.Body)
-		ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
 		if ssaDumpStdout {
 			fmt.Println("generating SSA for", name)
 			fmt.Print(astBuf.String())
@@ -339,27 +335,36 @@
 	}
 	s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
 
+	if base.Flag.Cfg.Instrumenting && fn.Pragma&ir.Norace == 0 && !fn.Linksym().ABIWrapper() {
+		if !base.Flag.Race || !objabi.LookupPkgSpecial(fn.Sym().Pkg.Path).NoRaceFunc {
+			s.instrumentMemory = true
+		}
+		if base.Flag.Race {
+			s.instrumentEnterExit = true
+		}
+	}
+
 	fe := ssafn{
 		curfn: fn,
 		log:   printssa && ssaDumpStdout,
 	}
 	s.curfn = fn
 
-	s.f = ssa.NewFunc(&fe)
+	cache := &ssaCaches[worker]
+	cache.Reset()
+
+	s.f = ssaConfig.NewFunc(&fe, cache)
 	s.config = ssaConfig
 	s.f.Type = fn.Type()
-	s.f.Config = ssaConfig
-	s.f.Cache = &ssaCaches[worker]
-	s.f.Cache.Reset()
 	s.f.Name = name
 	s.f.PrintOrHtmlSSA = printssa
 	if fn.Pragma&ir.Nosplit != 0 {
 		s.f.NoSplit = true
 	}
-	s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
-	s.f.ABI1 = ssaConfig.ABI1.Copy()
-	s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
-	s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
+	s.f.ABI0 = ssaConfig.ABI0
+	s.f.ABI1 = ssaConfig.ABI1
+	s.f.ABIDefault = abiForFunc(nil, ssaConfig.ABI0, ssaConfig.ABI1)
+	s.f.ABISelf = abiSelf
 
 	s.panics = map[funcLine]*ssa.Block{}
 	s.softFloat = s.config.SoftFloat
@@ -371,7 +376,7 @@
 	if printssa {
 		ssaDF := ssaDumpFile
 		if ssaDir != "" {
-			ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
+			ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+s.f.NameABI()+".html")
 			ssaD := filepath.Dir(ssaDF)
 			os.MkdirAll(ssaD, 0755)
 		}
@@ -396,16 +401,16 @@
 		// preceding the deferreturn/ret code that we don't track correctly.
 		s.hasOpenDefers = false
 	}
-	if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
-		// Skip doing open defers if there is any extra exit code (likely
-		// race detection), since we will not generate that code in the
-		// case of the extra deferreturn/ret segment.
+	if s.hasOpenDefers && s.instrumentEnterExit {
+		// Skip doing open defers if we need to instrument function
+		// returns for the race detector, since we will not generate that
+		// code in the case of the extra deferreturn/ret segment.
 		s.hasOpenDefers = false
 	}
 	if s.hasOpenDefers {
 		// Similarly, skip if there are any heap-allocated result
 		// parameters that need to be copied back to their stack slots.
-		for _, f := range s.curfn.Type().Results().FieldSlice() {
+		for _, f := range s.curfn.Type().Results() {
 			if !f.Nname.(*ir.Name).OnStack() {
 				s.hasOpenDefers = false
 				break
@@ -492,12 +497,12 @@
 			} else { // address was taken AND/OR too large for SSA
 				paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
 				if len(paramAssignment.Registers) > 0 {
-					if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
+					if ssa.CanSSA(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
 						v := s.newValue0A(ssa.OpArg, n.Type(), n)
 						s.store(n.Type(), s.decladdrs[n], v)
 					} else { // Too big for SSA.
 						// Brute force, and early, do a bunch of stores from registers
-						// TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
+						// Note that expand calls knows about this and doesn't trouble itself with larger-than-SSA-able Args in registers.
 						s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
 					}
 				}
@@ -527,7 +532,7 @@
 			// runtime calls that did (#43701). Since we don't
 			// convert Addrtaken variables to SSA anyway, no point
 			// in promoting them either.
-			if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
+			if n.Byval() && !n.Addrtaken() && ssa.CanSSA(n.Type()) {
 				n.Class = ir.PAUTO
 				fn.Dcl = append(fn.Dcl, n)
 				s.assign(n, s.load(n.Type(), ptr), false, 0)
@@ -542,7 +547,9 @@
 	}
 
 	// Convert the AST-based IR to the SSA-based IR
-	s.stmtList(fn.Enter)
+	if s.instrumentEnterExit {
+		s.rtcall(ir.Syms.Racefuncenter, true, nil, s.newValue0(ssa.OpGetCallerPC, types.Types[types.TUINTPTR]))
+	}
 	s.zeroResults()
 	s.paramsToHeap()
 	s.stmtList(fn.Body)
@@ -567,7 +574,9 @@
 	// Main call to ssa package to compile function
 	ssa.Compile(s.f)
 
-	if s.hasOpenDefers {
+	fe.AllocFrame(s.f)
+
+	if len(s.openDefers) != 0 {
 		s.emitOpenDeferInfo()
 	}
 
@@ -613,7 +622,7 @@
 // are always live, so we need to zero them before any allocations,
 // even allocations to move params/results to the heap.
 func (s *state) zeroResults() {
-	for _, f := range s.curfn.Type().Results().FieldSlice() {
+	for _, f := range s.curfn.Type().Results() {
 		n := f.Nname.(*ir.Name)
 		if !n.OnStack() {
 			// The local which points to the return value is the
@@ -622,7 +631,7 @@
 			continue
 		}
 		// Zero the stack location containing f.
-		if typ := n.Type(); TypeOK(typ) {
+		if typ := n.Type(); ssa.CanSSA(typ) {
 			s.assign(n, s.zeroVal(typ), false, 0)
 		} else {
 			if typ.HasPointers() {
@@ -636,8 +645,8 @@
 // paramsToHeap produces code to allocate memory for heap-escaped parameters
 // and to copy non-result parameters' values from the stack.
 func (s *state) paramsToHeap() {
-	do := func(params *types.Type) {
-		for _, f := range params.FieldSlice() {
+	do := func(params []*types.Field) {
+		for _, f := range params {
 			if f.Nname == nil {
 				continue // anonymous or blank parameter
 			}
@@ -671,12 +680,9 @@
 	}
 
 	// Declare variable to hold address.
-	addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
-	addr.SetType(types.NewPtr(n.Type()))
-	addr.Class = ir.PAUTO
+	sym := &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}
+	addr := s.curfn.NewLocal(pos, sym, types.NewPtr(n.Type()))
 	addr.SetUsed(true)
-	addr.Curfn = s.curfn
-	s.curfn.Dcl = append(s.curfn.Dcl, addr)
 	types.CalcSize(addr.Type())
 
 	if n.Class == ir.PPARAMOUT {
@@ -883,11 +889,13 @@
 	// Used to deduplicate panic calls.
 	panics map[funcLine]*ssa.Block
 
-	cgoUnsafeArgs   bool
-	hasdefer        bool // whether the function contains a defer statement
-	softFloat       bool
-	hasOpenDefers   bool // whether we are doing open-coded defers
-	checkPtrEnabled bool // whether to insert checkptr instrumentation
+	cgoUnsafeArgs       bool
+	hasdefer            bool // whether the function contains a defer statement
+	softFloat           bool
+	hasOpenDefers       bool // whether we are doing open-coded defers
+	checkPtrEnabled     bool // whether to insert checkptr instrumentation
+	instrumentEnterExit bool // whether to instrument function enter/exit
+	instrumentMemory    bool // whether to instrument memory operations
 
 	// If doing open-coded defers, list of info about the defer calls in
 	// scanning order. Hence, at exit we should run these defers in reverse
@@ -935,7 +943,7 @@
 func (s *state) Debug_checknil() bool                                { return s.f.Frontend().Debug_checknil() }
 
 func ssaMarker(name string) *ir.Name {
-	return typecheck.NewName(&types.Sym{Name: name})
+	return ir.NewNameAt(base.Pos, &types.Sym{Name: name}, nil)
 }
 
 var (
@@ -949,6 +957,7 @@
 	typVar       = ssaMarker("typ")
 	okVar        = ssaMarker("ok")
 	deferBitsVar = ssaMarker("deferBits")
+	hashVar      = ssaMarker("hash")
 )
 
 // startBlock sets the current block we're generating code in to b.
@@ -1245,7 +1254,7 @@
 		s.instrument(t, addr, kind)
 		return
 	}
-	for _, f := range t.Fields().Slice() {
+	for _, f := range t.Fields() {
 		if f.Sym.IsBlank() {
 			continue
 		}
@@ -1264,7 +1273,7 @@
 }
 
 func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
-	if !s.curfn.InstrumentBody() {
+	if !s.instrumentMemory {
 		return
 	}
 
@@ -1436,8 +1445,7 @@
 		n := n.(*ir.BlockStmt)
 		s.stmtList(n.List)
 
-	// No-ops
-	case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
+	case ir.OFALL: // no-op
 
 	// Expression statements
 	case ir.OCALLFUNC:
@@ -1451,9 +1459,9 @@
 	case ir.OCALLINTER:
 		n := n.(*ir.CallExpr)
 		s.callResult(n, callNormal)
-		if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
-			if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
-				n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
+		if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME && n.Fun.(*ir.Name).Class == ir.PFUNC {
+			if fn := n.Fun.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+				n.Fun.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
 				m := s.mem()
 				b := s.endBlock()
 				b.Kind = ssa.BlockExit
@@ -1480,10 +1488,10 @@
 			s.openDeferRecord(n.Call.(*ir.CallExpr))
 		} else {
 			d := callDefer
-			if n.Esc() == ir.EscNever {
+			if n.Esc() == ir.EscNever && n.DeferAt == nil {
 				d = callDeferStack
 			}
-			s.callResult(n.Call.(*ir.CallExpr), d)
+			s.call(n.Call.(*ir.CallExpr), d, false, n.DeferAt)
 		}
 	case ir.OGO:
 		n := n.(*ir.GoDeferStmt)
@@ -1498,7 +1506,7 @@
 			res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
 		}
 		deref := false
-		if !TypeOK(n.Rhs[0].Type()) {
+		if !ssa.CanSSA(n.Rhs[0].Type()) {
 			if res.Op != ssa.OpLoad {
 				s.Fatalf("dottype of non-load")
 			}
@@ -1657,7 +1665,7 @@
 		}
 
 		var r *ssa.Value
-		deref := !TypeOK(t)
+		deref := !ssa.CanSSA(t)
 		if deref {
 			if rhs == nil {
 				r = nil // Signal assign to use OpZero.
@@ -1988,10 +1996,119 @@
 
 		s.startBlock(bEnd)
 
+	case ir.OINTERFACESWITCH:
+		n := n.(*ir.InterfaceSwitchStmt)
+		typs := s.f.Config.Types
+
+		t := s.expr(n.RuntimeType)
+		h := s.expr(n.Hash)
+		d := s.newValue1A(ssa.OpAddr, typs.BytePtr, n.Descriptor, s.sb)
+
+		// Check the cache first.
+		var merge *ssa.Block
+		if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
+			// Note: we can only use the cache if we have the right atomic load instruction.
+			// Double-check that here.
+			if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
+				s.Fatalf("atomic load not available")
+			}
+			merge = s.f.NewBlock(ssa.BlockPlain)
+			cacheHit := s.f.NewBlock(ssa.BlockPlain)
+			cacheMiss := s.f.NewBlock(ssa.BlockPlain)
+			loopHead := s.f.NewBlock(ssa.BlockPlain)
+			loopBody := s.f.NewBlock(ssa.BlockPlain)
+
+			// Pick right size ops.
+			var mul, and, add, zext ssa.Op
+			if s.config.PtrSize == 4 {
+				mul = ssa.OpMul32
+				and = ssa.OpAnd32
+				add = ssa.OpAdd32
+				zext = ssa.OpCopy
+			} else {
+				mul = ssa.OpMul64
+				and = ssa.OpAnd64
+				add = ssa.OpAdd64
+				zext = ssa.OpZeroExt32to64
+			}
+
+			// Load cache pointer out of descriptor, with an atomic load so
+			// we ensure that we see a fully written cache.
+			atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
+			cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
+
+			// Initialize hash variable.
+			s.vars[hashVar] = s.newValue1(zext, typs.Uintptr, h)
+
+			// Load mask from cache.
+			mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
+			// Jump to loop head.
+			b := s.endBlock()
+			b.AddEdgeTo(loopHead)
+
+			// At loop head, get pointer to the cache entry.
+			//   e := &cache.Entries[hash&mask]
+			s.startBlock(loopHead)
+			entries := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, s.uintptrConstant(uint64(s.config.PtrSize)))
+			idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
+			idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(3*s.config.PtrSize)))
+			e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, entries, idx)
+			//   hash++
+			s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
+
+			// Look for a cache hit.
+			//   if e.Typ == t { goto hit }
+			eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
+			cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, t, eTyp)
+			b = s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(cmp1)
+			b.AddEdgeTo(cacheHit)
+			b.AddEdgeTo(loopBody)
+
+			// Look for an empty entry, the tombstone for this hash table.
+			//   if e.Typ == nil { goto miss }
+			s.startBlock(loopBody)
+			cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
+			b = s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(cmp2)
+			b.AddEdgeTo(cacheMiss)
+			b.AddEdgeTo(loopHead)
+
+			// On a hit, load the data fields of the cache entry.
+			//   Case = e.Case
+			//   Itab = e.Itab
+			s.startBlock(cacheHit)
+			eCase := s.newValue2(ssa.OpLoad, typs.Int, s.newValue1I(ssa.OpOffPtr, typs.IntPtr, s.config.PtrSize, e), s.mem())
+			eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, 2*s.config.PtrSize, e), s.mem())
+			s.assign(n.Case, eCase, false, 0)
+			s.assign(n.Itab, eItab, false, 0)
+			b = s.endBlock()
+			b.AddEdgeTo(merge)
+
+			// On a miss, call into the runtime to get the answer.
+			s.startBlock(cacheMiss)
+		}
+
+		r := s.rtcall(ir.Syms.InterfaceSwitch, true, []*types.Type{typs.Int, typs.BytePtr}, d, t)
+		s.assign(n.Case, r[0], false, 0)
+		s.assign(n.Itab, r[1], false, 0)
+
+		if merge != nil {
+			// Cache hits merge in here.
+			b := s.endBlock()
+			b.Kind = ssa.BlockPlain
+			b.AddEdgeTo(merge)
+			s.startBlock(merge)
+		}
+
 	case ir.OCHECKNIL:
 		n := n.(*ir.UnaryExpr)
 		p := s.expr(n.X)
-		s.nilCheck(p)
+		_ = s.nilCheck(p)
+		// TODO: check that throwing away the nilcheck result is ok.
 
 	case ir.OINLMARK:
 		n := n.(*ir.InlineMarkStmt)
@@ -2026,13 +2143,10 @@
 		}
 	}
 
-	var b *ssa.Block
-	var m *ssa.Value
 	// Do actual return.
 	// These currently turn into self-copies (in many cases).
-	resultFields := s.curfn.Type().Results().FieldSlice()
+	resultFields := s.curfn.Type().Results()
 	results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
-	m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
 	// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
 	for i, f := range resultFields {
 		n := f.Nname.(*ir.Name)
@@ -2058,15 +2172,18 @@
 		}
 	}
 
-	// Run exit code. Today, this is just racefuncexit, in -race mode.
-	// TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
-	// Spills in register allocation might just fix it.
-	s.stmtList(s.curfn.Exit)
+	// In -race mode, we need to call racefuncexit.
+	// Note: This has to happen after we load any heap-allocated results,
+	// otherwise races will be attributed to the caller instead.
+	if s.instrumentEnterExit {
+		s.rtcall(ir.Syms.Racefuncexit, true, nil)
+	}
 
 	results[len(results)-1] = s.mem()
+	m := s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
 	m.AddArgs(results...)
 
-	b = s.endBlock()
+	b := s.endBlock()
 	b.Kind = ssa.BlockRet
 	b.SetControl(m)
 	if s.hasdefer && s.hasOpenDefers {
@@ -2659,6 +2776,14 @@
 		n := n.(*ir.ConvExpr)
 		str := s.expr(n.X)
 		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
+		if !n.NonNil() {
+			// We need to ensure []byte("") evaluates to []byte{}, and not []byte(nil).
+			//
+			// TODO(mdempsky): Investigate using "len != 0" instead of "ptr != nil".
+			cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], ptr, s.constNil(ptr.Type))
+			zerobase := s.newValue1A(ssa.OpAddr, ptr.Type, ir.Syms.Zerobase, s.sb)
+			ptr = s.ternary(cond, ptr, zerobase)
+		}
 		len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
 		return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
 	case ir.OCFUNC:
@@ -2794,8 +2919,7 @@
 		}
 
 		// map <--> *hmap
-		if to.Kind() == types.TMAP && from.IsPtr() &&
-			to.MapType().Hmap == from.Elem() {
+		if to.Kind() == types.TMAP && from == types.NewPtr(reflectdata.MapType()) {
 			return v
 		}
 
@@ -3154,7 +3278,7 @@
 			p := s.addr(n)
 			return s.load(n.X.Type().Elem(), p)
 		case n.X.Type().IsArray():
-			if TypeOK(n.X.Type()) {
+			if ssa.CanSSA(n.X.Type()) {
 				// SSA can handle arrays of length at most 1.
 				bound := n.X.Type().NumElem()
 				a := s.expr(n.X)
@@ -3218,7 +3342,7 @@
 		a := s.expr(n.X)
 		return s.newValue1(ssa.OpIData, n.Type(), a)
 
-	case ir.OEFACE:
+	case ir.OMAKEFACE:
 		n := n.(*ir.BinaryExpr)
 		tab := s.expr(n.X)
 		data := s.expr(n.Y)
@@ -3357,7 +3481,7 @@
 	pa := aux.ParamAssignmentForResult(which)
 	// TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
 	// SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
-	if len(pa.Registers) == 0 && !TypeOK(t) {
+	if len(pa.Registers) == 0 && !ssa.CanSSA(t) {
 		addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
 		return s.rawLoad(t, addr)
 	}
@@ -3462,7 +3586,7 @@
 
 	// Call growslice
 	s.startBlock(grow)
-	taddr := s.expr(n.X)
+	taddr := s.expr(n.Fun)
 	r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr)
 
 	// Decompose output slice
@@ -3513,7 +3637,7 @@
 	}
 	args := make([]argRec, 0, len(n.Args[1:]))
 	for _, n := range n.Args[1:] {
-		if TypeOK(n.Type()) {
+		if ssa.CanSSA(n.Type()) {
 			args = append(args, argRec{v: s.expr(n), store: true})
 		} else {
 			v := s.addr(n)
@@ -3567,11 +3691,32 @@
 
 	if typ.IsFloat() || typ.IsString() {
 		// min/max semantics for floats are tricky because of NaNs and
-		// negative zero, so we let the runtime handle this instead.
+		// negative zero. Some architectures have instructions which
+		// we can use to generate the right result. For others we must
+		// call into the runtime instead.
 		//
 		// Strings are conceptually simpler, but we currently desugar
 		// string comparisons during walk, not ssagen.
 
+		if typ.IsFloat() {
+			switch Arch.LinkArch.Family {
+			case sys.AMD64, sys.ARM64:
+				var op ssa.Op
+				switch {
+				case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMIN:
+					op = ssa.OpMin64F
+				case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMAX:
+					op = ssa.OpMax64F
+				case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMIN:
+					op = ssa.OpMin32F
+				case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMAX:
+					op = ssa.OpMax32F
+				}
+				return fold(func(x, a *ssa.Value) *ssa.Value {
+					return s.newValue2(op, typ, x, a)
+				})
+			}
+		}
 		var name string
 		switch typ.Kind() {
 		case types.TFLOAT32:
@@ -4077,7 +4222,6 @@
 			return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
 		},
 		sys.AMD64, sys.I386, sys.Loong64, sys.MIPS64, sys.RISCV64, sys.ARM64)
-	alias("runtime", "mulUintptr", "runtime/internal/math", "MulUintptr", all...)
 	add("runtime", "KeepAlive",
 		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
 			data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
@@ -4108,7 +4252,7 @@
 			s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
 			return nil
 		},
-		sys.ARM64, sys.PPC64)
+		sys.ARM64, sys.PPC64, sys.RISCV64)
 
 	brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X}
 	if buildcfg.GOPPC64 >= 10 {
@@ -4843,13 +4987,14 @@
 		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
 			return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
 		},
-		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64)
+		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
 	alias("math/bits", "Add", "math/bits", "Add64", p8...)
+	alias("runtime/internal/math", "Add64", "math/bits", "Add64", all...)
 	addF("math/bits", "Sub64",
 		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
 			return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
 		},
-		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64)
+		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
 	alias("math/bits", "Sub", "math/bits", "Sub64", p8...)
 	addF("math/bits", "Div64",
 		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
@@ -4950,7 +5095,7 @@
 	if n == nil {
 		return false
 	}
-	name, ok := n.X.(*ir.Name)
+	name, ok := n.Fun.(*ir.Name)
 	if !ok {
 		return false
 	}
@@ -4959,7 +5104,7 @@
 
 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
 func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
-	v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
+	v := findIntrinsic(n.Fun.Sym())(s, n, s.intrinsicArgs(n))
 	if ssa.IntrinsicsDebug > 0 {
 		x := v
 		if x == nil {
@@ -4968,7 +5113,7 @@
 		if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
 			x = x.Args[0]
 		}
-		base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
+		base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Fun.Sym().Name, x.LongString())
 	}
 	return v
 }
@@ -4989,14 +5134,14 @@
 // (as well as the deferBits variable), and this will enable us to run the proper
 // defer calls during panics.
 func (s *state) openDeferRecord(n *ir.CallExpr) {
-	if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
+	if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.Fun.Type().NumResults() != 0 {
 		s.Fatalf("defer call with arguments or results: %v", n)
 	}
 
 	opendefer := &openDeferInfo{
 		n: n,
 	}
-	fn := n.X
+	fn := n.Fun
 	// We must always store the function value in a stack slot for the
 	// runtime panic code to use. But in the defer exit code, we will
 	// call the function directly if it is a static function.
@@ -5023,7 +5168,7 @@
 // (therefore SSAable). val is the value to be stored. The function returns an SSA
 // value representing a pointer to the autotmp location.
 func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
-	if !TypeOK(t) {
+	if !ssa.CanSSA(t) {
 		s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
 	}
 	if !t.HasPointers() {
@@ -5032,6 +5177,7 @@
 	pos := val.Pos
 	temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
 	temp.SetOpenDeferSlot(true)
+	temp.SetFrameOffset(int64(len(s.openDefers))) // so cmpstackvarlt can order them
 	var addrTemp *ssa.Value
 	// Use OpVarLive to make sure stack slot for the closure is not removed by
 	// dead-store elimination
@@ -5109,7 +5255,7 @@
 		// Generate code to call the function call of the defer, using the
 		// closure that were stored in argtmps at the point of the defer
 		// statement.
-		fn := r.n.X
+		fn := r.n.Fun
 		stksize := fn.Type().ArgWidth()
 		var callArgs []*ssa.Value
 		var call *ssa.Value
@@ -5117,10 +5263,10 @@
 			v := s.load(r.closure.Type.Elem(), r.closure)
 			s.maybeNilCheckClosure(v, callDefer)
 			codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
-			aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
+			aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
 			call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
 		} else {
-			aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
+			aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
 			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
 		}
 		callArgs = append(callArgs, s.mem())
@@ -5141,29 +5287,30 @@
 }
 
 func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
-	return s.call(n, k, false)
+	return s.call(n, k, false, nil)
 }
 
 func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
-	return s.call(n, k, true)
+	return s.call(n, k, true, nil)
 }
 
 // Calls the function n using the specified call type.
 // Returns the address of the return value (or nil if none).
-func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExtra ir.Expr) *ssa.Value {
 	s.prevCall = nil
-	var callee *ir.Name    // target function (if static)
-	var closure *ssa.Value // ptr to closure to run (if dynamic)
-	var codeptr *ssa.Value // ptr to target code (if dynamic)
-	var rcvr *ssa.Value    // receiver to set
-	fn := n.X
+	var calleeLSym *obj.LSym // target function (if static)
+	var closure *ssa.Value   // ptr to closure to run (if dynamic)
+	var codeptr *ssa.Value   // ptr to target code (if dynamic)
+	var dextra *ssa.Value    // defer extra arg
+	var rcvr *ssa.Value      // receiver to set
+	fn := n.Fun
 	var ACArgs []*types.Type    // AuxCall args
 	var ACResults []*types.Type // AuxCall results
 	var callArgs []*ssa.Value   // For late-expansion, the args themselves (not stored, args to the call instead).
 
 	callABI := s.f.ABIDefault
 
-	if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+	if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.Fun.Type().NumResults() != 0) {
 		s.Fatalf("go/defer call with arguments: %v", n)
 	}
 
@@ -5171,7 +5318,7 @@
 	case ir.OCALLFUNC:
 		if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
 			fn := fn.(*ir.Name)
-			callee = fn
+			calleeLSym = callTargetLSym(fn)
 			if buildcfg.Experiment.RegabiArgs {
 				// This is a static call, so it may be
 				// a direct call to a non-ABIInternal
@@ -5210,12 +5357,15 @@
 			closure = iclosure
 		}
 	}
+	if deferExtra != nil {
+		dextra = s.expr(deferExtra)
+	}
 
-	params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
+	params := callABI.ABIAnalyze(n.Fun.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
 	types.CalcSize(fn.Type())
 	stksize := params.ArgWidth() // includes receiver, args, and results
 
-	res := n.X.Type().Results()
+	res := n.Fun.Type().Results()
 	if k == callNormal || k == callTail {
 		for _, p := range params.OutParams() {
 			ACResults = append(ACResults, p.Type)
@@ -5224,38 +5374,19 @@
 
 	var call *ssa.Value
 	if k == callDeferStack {
-		// Make a defer struct d on the stack.
 		if stksize != 0 {
 			s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
 		}
-
+		// Make a defer struct on the stack.
 		t := deferstruct()
-		d := typecheck.TempAt(n.Pos(), s.curfn, t)
-
-		if t.HasPointers() {
-			s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
-		}
-		addr := s.addr(d)
-
-		// Must match deferstruct() below and src/runtime/runtime2.go:_defer.
-		// 0: started, set in deferprocStack
-		// 1: heap, set in deferprocStack
-		// 2: openDefer
-		// 3: sp, set in deferprocStack
-		// 4: pc, set in deferprocStack
-		// 5: fn
+		_, addr := s.temp(n.Pos(), t)
 		s.store(closure.Type,
-			s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
+			s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(deferStructFnField), addr),
 			closure)
-		// 6: panic, set in deferprocStack
-		// 7: link, set in deferprocStack
-		// 8: fd
-		// 9: varp
-		// 10: framepc
 
 		// Call runtime.deferprocStack with pointer to _defer record.
 		ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
-		aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+		aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
 		callArgs = append(callArgs, addr, s.mem())
 		call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
 		call.AddArgs(callArgs...)
@@ -5271,6 +5402,13 @@
 			callArgs = append(callArgs, closure)
 			stksize += int64(types.PtrSize)
 			argStart += int64(types.PtrSize)
+			if dextra != nil {
+				// Extra token of type any for deferproc
+				ACArgs = append(ACArgs, types.Types[types.TINTER])
+				callArgs = append(callArgs, dextra)
+				stksize += 2 * int64(types.PtrSize)
+				argStart += 2 * int64(types.PtrSize)
+			}
 		}
 
 		// Set receiver (for interface calls).
@@ -5279,7 +5417,7 @@
 		}
 
 		// Write args.
-		t := n.X.Type()
+		t := n.Fun.Type()
 		args := n.Args
 
 		for _, p := range params.InParams() { // includes receiver for interface calls
@@ -5298,7 +5436,7 @@
 		}
 
 		for i, n := range args {
-			callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
+			callArgs = append(callArgs, s.putArg(n, t.Param(i).Type))
 		}
 
 		callArgs = append(callArgs, s.mem())
@@ -5306,11 +5444,15 @@
 		// call target
 		switch {
 		case k == callDefer:
-			aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
+			sym := ir.Syms.Deferproc
+			if dextra != nil {
+				sym = ir.Syms.Deferprocat
+			}
+			aux := ssa.StaticAuxCall(sym, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults)) // TODO paramResultInfo for Deferproc(at)
 			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
 		case k == callGo:
-			aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
-			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
+			aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
+			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for Newproc
 		case closure != nil:
 			// rawLoad because loading the code pointer from a
 			// closure is always safe, but IsSanitizerSafeAddr
@@ -5318,14 +5460,14 @@
 			// critical that we not clobber any arguments already
 			// stored onto the stack.
 			codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
-			aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+			aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(ACArgs, ACResults))
 			call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
 		case codeptr != nil:
 			// Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
 			aux := ssa.InterfaceAuxCall(params)
 			call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
-		case callee != nil:
-			aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
+		case calleeLSym != nil:
+			aux := ssa.StaticAuxCall(calleeLSym, params)
 			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
 			if k == callTail {
 				call.Op = ssa.OpTailLECall
@@ -5368,11 +5510,11 @@
 		s.startBlock(bNext)
 	}
 
-	if res.NumFields() == 0 || k != callNormal {
+	if len(res) == 0 || k != callNormal {
 		// call has no return value. Continue with the next statement.
 		return nil
 	}
-	fp := res.Field(0)
+	fp := res[0]
 	if returnResultAddr {
 		return s.resultAddrOfCall(call, 0, fp.Type)
 	}
@@ -5552,7 +5694,7 @@
 	if n.Op() != ir.ONAME {
 		return false
 	}
-	return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
+	return s.canSSAName(n.(*ir.Name)) && ssa.CanSSA(n.Type())
 }
 
 func (s *state) canSSAName(name *ir.Name) bool {
@@ -5579,39 +5721,6 @@
 	// TODO: try to make more variables SSAable?
 }
 
-// TypeOK reports whether variables of type t are SSA-able.
-func TypeOK(t *types.Type) bool {
-	types.CalcSize(t)
-	if t.Size() > int64(4*types.PtrSize) {
-		// 4*Widthptr is an arbitrary constant. We want it
-		// to be at least 3*Widthptr so slices can be registerized.
-		// Too big and we'll introduce too much register pressure.
-		return false
-	}
-	switch t.Kind() {
-	case types.TARRAY:
-		// We can't do larger arrays because dynamic indexing is
-		// not supported on SSA variables.
-		// TODO: allow if all indexes are constant.
-		if t.NumElem() <= 1 {
-			return TypeOK(t.Elem())
-		}
-		return false
-	case types.TSTRUCT:
-		if t.NumFields() > ssa.MaxStruct {
-			return false
-		}
-		for _, t1 := range t.Fields().Slice() {
-			if !TypeOK(t1.Type) {
-				return false
-			}
-		}
-		return true
-	default:
-		return true
-	}
-}
-
 // exprPtr evaluates n to a pointer and nil-checks it.
 func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
 	p := s.expr(n)
@@ -5621,18 +5730,20 @@
 		}
 		return p
 	}
-	s.nilCheck(p)
+	p = s.nilCheck(p)
 	return p
 }
 
 // nilCheck generates nil pointer checking code.
 // Used only for automatically inserted nil checks,
 // not for user code like 'x != nil'.
-func (s *state) nilCheck(ptr *ssa.Value) {
+// Returns a "definitely not nil" copy of x to ensure proper ordering
+// of the uses of the post-nilcheck pointer.
+func (s *state) nilCheck(ptr *ssa.Value) *ssa.Value {
 	if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
-		return
+		return ptr
 	}
-	s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
+	return s.newValue2(ssa.OpNilCheck, ptr.Type, ptr, s.mem())
 }
 
 // boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
@@ -5791,7 +5902,7 @@
 
 	// Issue call
 	var call *ssa.Value
-	aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
+	aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(callArgTypes, results))
 	callArgs = append(callArgs, s.mem())
 	call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
 	call.AddArgs(callArgs...)
@@ -5937,7 +6048,7 @@
 // putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
 func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
 	var a *ssa.Value
-	if !TypeOK(t) {
+	if !ssa.CanSSA(t) {
 		a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
 	} else {
 		a = s.expr(n)
@@ -5955,7 +6066,7 @@
 		addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
 	}
 
-	if !TypeOK(t) {
+	if !ssa.CanSSA(t) {
 		a := s.addr(n)
 		s.move(t, addr, a)
 		return
@@ -5984,8 +6095,8 @@
 		if !t.Elem().IsArray() {
 			s.Fatalf("bad ptr to array in slice %v\n", t)
 		}
-		s.nilCheck(v)
-		ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
+		nv := s.nilCheck(v)
+		ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), nv)
 		len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
 		cap = len
 	default:
@@ -6397,7 +6508,7 @@
 	if n.ITab != nil {
 		targetItab = s.expr(n.ITab)
 	}
-	return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok)
+	return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor)
 }
 
 func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
@@ -6415,7 +6526,7 @@
 	} else {
 		target = s.expr(n.RType)
 	}
-	return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok)
+	return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok, nil)
 }
 
 // dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
@@ -6424,8 +6535,11 @@
 // target is the *runtime._type of dst.
 // If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
 // commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
-func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
-	byteptr := s.f.Config.Types.BytePtr
+// descriptor is a compiler-allocated internal/abi.TypeAssert whose address is passed to runtime.typeAssert when
+// the target type is a compile-time-known non-empty interface. It may be nil.
+func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool, descriptor *obj.LSym) (res, resok *ssa.Value) {
+	typs := s.f.Config.Types
+	byteptr := typs.BytePtr
 	if dst.IsInterface() {
 		if dst.IsEmptyInterface() {
 			// Converting to an empty interface.
@@ -6500,23 +6614,156 @@
 		if base.Debug.TypeAssert > 0 {
 			base.WarnfAt(pos, "type assertion not inlined")
 		}
-		if !commaok {
-			fn := ir.Syms.AssertI2I
-			if src.IsEmptyInterface() {
+
+		itab := s.newValue1(ssa.OpITab, byteptr, iface)
+		data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
+
+		// First, check for nil.
+		bNil := s.f.NewBlock(ssa.BlockPlain)
+		bNonNil := s.f.NewBlock(ssa.BlockPlain)
+		bMerge := s.f.NewBlock(ssa.BlockPlain)
+		cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+		b := s.endBlock()
+		b.Kind = ssa.BlockIf
+		b.SetControl(cond)
+		b.Likely = ssa.BranchLikely
+		b.AddEdgeTo(bNonNil)
+		b.AddEdgeTo(bNil)
+
+		s.startBlock(bNil)
+		if commaok {
+			s.vars[typVar] = itab // which will be nil
+			b := s.endBlock()
+			b.AddEdgeTo(bMerge)
+		} else {
+			// Panic if input is nil.
+			s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
+		}
+
+		// Get typ, possibly by loading out of itab.
+		s.startBlock(bNonNil)
+		typ := itab
+		if !src.IsEmptyInterface() {
+			typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab))
+		}
+
+		// Check the cache first.
+		var d *ssa.Value
+		if descriptor != nil {
+			d = s.newValue1A(ssa.OpAddr, byteptr, descriptor, s.sb)
+			if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
+				// Note: we can only use the cache if we have the right atomic load instruction.
+				// Double-check that here.
+				if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
+					s.Fatalf("atomic load not available")
+				}
+				// Pick right size ops.
+				var mul, and, add, zext ssa.Op
+				if s.config.PtrSize == 4 {
+					mul = ssa.OpMul32
+					and = ssa.OpAnd32
+					add = ssa.OpAdd32
+					zext = ssa.OpCopy
+				} else {
+					mul = ssa.OpMul64
+					and = ssa.OpAnd64
+					add = ssa.OpAdd64
+					zext = ssa.OpZeroExt32to64
+				}
+
+				loopHead := s.f.NewBlock(ssa.BlockPlain)
+				loopBody := s.f.NewBlock(ssa.BlockPlain)
+				cacheHit := s.f.NewBlock(ssa.BlockPlain)
+				cacheMiss := s.f.NewBlock(ssa.BlockPlain)
+
+				// Load cache pointer out of descriptor, with an atomic load so
+				// we ensure that we see a fully written cache.
+				atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
+				cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
+				s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
+
+				// Load hash from type or itab.
+				var hash *ssa.Value
+				if src.IsEmptyInterface() {
+					hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, typ), s.mem())
+				} else {
+					hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, itab), s.mem())
+				}
+				hash = s.newValue1(zext, typs.Uintptr, hash)
+				s.vars[hashVar] = hash
+				// Load mask from cache.
+				mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
+				// Jump to loop head.
+				b := s.endBlock()
+				b.AddEdgeTo(loopHead)
+
+				// At loop head, get pointer to the cache entry.
+				//   e := &cache.Entries[hash&mask]
+				s.startBlock(loopHead)
+				idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
+				idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(2*s.config.PtrSize)))
+				idx = s.newValue2(add, typs.Uintptr, idx, s.uintptrConstant(uint64(s.config.PtrSize)))
+				e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, idx)
+				//   hash++
+				s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
+
+				// Look for a cache hit.
+				//   if e.Typ == typ { goto hit }
+				eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
+				cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, typ, eTyp)
+				b = s.endBlock()
+				b.Kind = ssa.BlockIf
+				b.SetControl(cmp1)
+				b.AddEdgeTo(cacheHit)
+				b.AddEdgeTo(loopBody)
+
+				// Look for an empty entry, the tombstone for this hash table.
+				//   if e.Typ == nil { goto miss }
+				s.startBlock(loopBody)
+				cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
+				b = s.endBlock()
+				b.Kind = ssa.BlockIf
+				b.SetControl(cmp2)
+				b.AddEdgeTo(cacheMiss)
+				b.AddEdgeTo(loopHead)
+
+				// On a hit, load the data fields of the cache entry.
+				//   Itab = e.Itab
+				s.startBlock(cacheHit)
+				eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, s.config.PtrSize, e), s.mem())
+				s.vars[typVar] = eItab
+				b = s.endBlock()
+				b.AddEdgeTo(bMerge)
+
+				// On a miss, call into the runtime to get the answer.
+				s.startBlock(cacheMiss)
+			}
+		}
+
+		// Call into runtime to get itab for result.
+		if descriptor != nil {
+			itab = s.rtcall(ir.Syms.TypeAssert, true, []*types.Type{byteptr}, d, typ)[0]
+		} else {
+			var fn *obj.LSym
+			if commaok {
+				fn = ir.Syms.AssertE2I2
+			} else {
 				fn = ir.Syms.AssertE2I
 			}
-			data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
-			tab := s.newValue1(ssa.OpITab, byteptr, iface)
-			tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
-			return s.newValue2(ssa.OpIMake, dst, tab, data), nil
+			itab = s.rtcall(fn, true, []*types.Type{byteptr}, target, typ)[0]
 		}
-		fn := ir.Syms.AssertI2I2
-		if src.IsEmptyInterface() {
-			fn = ir.Syms.AssertE2I2
+		s.vars[typVar] = itab
+		b = s.endBlock()
+		b.AddEdgeTo(bMerge)
+
+		// Build resulting interface.
+		s.startBlock(bMerge)
+		itab = s.variable(typVar, byteptr)
+		var ok *ssa.Value
+		if commaok {
+			ok = s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
 		}
-		res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
-		resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
-		return
+		return s.newValue2(ssa.OpIMake, dst, itab, data), ok
 	}
 
 	if base.Debug.TypeAssert > 0 {
@@ -6540,7 +6787,7 @@
 
 	var tmp ir.Node     // temporary for use with large types
 	var addr *ssa.Value // address of tmp
-	if commaok && !TypeOK(dst) {
+	if commaok && !ssa.CanSSA(dst) {
 		// unSSAable type, use temporary.
 		// TODO: get rid of some of these temporaries.
 		tmp, addr = s.temp(pos, dst)
@@ -6886,7 +7133,7 @@
 	n := 0
 	writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
 
-	// Write one non-aggrgate arg/field/element.
+	// Write one non-aggregate arg/field/element.
 	write1 := func(sz, offset int64) {
 		if offset >= _special {
 			writebyte(_offsetTooLarge)
@@ -6944,7 +7191,7 @@
 				n++ // {} counts as a component
 				break
 			}
-			for _, field := range t.Fields().Slice() {
+			for _, field := range t.Fields() {
 				if !visitType(baseOffset+field.Offset, field.Type, depth) {
 					break
 				}
@@ -7078,27 +7325,6 @@
 		s.lineRunStart = nil
 		s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
 
-		// Attach a "default" liveness info. Normally this will be
-		// overwritten in the Values loop below for each Value. But
-		// for an empty block this will be used for its control
-		// instruction. We won't use the actual liveness map on a
-		// control instruction. Just mark it something that is
-		// preemptible, unless this function is "all unsafe", or
-		// the empty block is in a write barrier.
-		unsafe := liveness.IsUnsafe(f)
-		if b.Kind == ssa.BlockPlain {
-			// Empty blocks that are part of write barriers need
-			// to have their control instructions marked unsafe.
-			c := b.Succs[0].Block()
-			for _, v := range c.Values {
-				if v.Op == ssa.OpWBend {
-					unsafe = true
-					break
-				}
-			}
-		}
-		s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: unsafe}
-
 		if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
 			argLiveIdx = idx
 			p := s.pp.Prog(obj.APCDATA)
@@ -7158,6 +7384,7 @@
 				// Attach this safe point to the next
 				// instruction.
 				s.pp.NextLive = s.livenessMap.Get(v)
+				s.pp.NextUnsafe = s.livenessMap.GetUnsafe(v)
 
 				// let the backend handle it
 				Arch.SSAGenValue(&s, v)
@@ -7192,6 +7419,13 @@
 			}
 			b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
 		}
+
+		// Set unsafe mark for any end-of-block generated instructions
+		// (normally, conditional or unconditional branches).
+		// This is particularly important for empty blocks, as there
+		// are no values to inherit the unsafe mark from.
+		s.pp.NextUnsafe = s.livenessMap.GetUnsafeBlock(b)
+
 		// Emit control flow instructions for block
 		var next *ssa.Block
 		if i < len(f.Blocks)-1 && base.Flag.N == 0 {
@@ -7232,7 +7466,7 @@
 		// The results are already in memory, because they are not SSA'd
 		// when the function has defers (see canSSAName).
 		for _, o := range f.OwnAux.ABIInfo().OutParams() {
-			n := o.Name.(*ir.Name)
+			n := o.Name
 			rts, offs := o.RegisterTypesAndOffsets()
 			for i := range o.Registers {
 				Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
@@ -7344,9 +7578,9 @@
 		for i, b := range f.Blocks {
 			idToIdx[b.ID] = i
 		}
-		// Note that at this moment, Prog.Pc is a sequence number; it's
-		// not a real PC until after assembly, so this mapping has to
-		// be done later.
+		// Register a callback that will be used later to fill in PCs into location
+		// lists. At the moment, Prog.Pc is a sequence number; it's not a real PC
+		// until after assembly, so the translation needs to be deferred.
 		debugInfo.GetPC = func(b, v ssa.ID) int64 {
 			switch v {
 			case ssa.BlockStart.ID:
@@ -7537,7 +7771,7 @@
 				continue
 			}
 			n, off := ssa.AutoVar(v)
-			if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
+			if n.Class != ir.PPARAM || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] {
 				continue
 			}
 			partLiveArgsSpilled[nameOff{n, off}] = true
@@ -7545,8 +7779,8 @@
 
 		// Then, insert code to spill registers if not already.
 		for _, a := range f.OwnAux.ABIInfo().InParams() {
-			n, ok := a.Name.(*ir.Name)
-			if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
+			n := a.Name
+			if n == nil || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
 				continue
 			}
 			rts, offs := a.RegisterTypesAndOffsets()
@@ -7665,16 +7899,10 @@
 	case *ir.Name:
 		if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
 			a.Name = obj.NAME_PARAM
-			a.Sym = ir.Orig(n).(*ir.Name).Linksym()
-			a.Offset += n.FrameOffset()
-			break
-		}
-		a.Name = obj.NAME_AUTO
-		if n.Class == ir.PPARAMOUT {
-			a.Sym = ir.Orig(n).(*ir.Name).Linksym()
 		} else {
-			a.Sym = n.Linksym()
+			a.Name = obj.NAME_AUTO
 		}
+		a.Sym = n.Linksym()
 		a.Offset += n.FrameOffset()
 	default:
 		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
@@ -7917,7 +8145,7 @@
 		panic("ODOT's LHS is not a struct")
 	}
 
-	for i, f := range t.Fields().Slice() {
+	for i, f := range t.Fields() {
 		if f.Sym == n.Sel {
 			if f.Offset != n.Offset() {
 				panic("field offset doesn't match")
@@ -7962,10 +8190,6 @@
 	return data
 }
 
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
-	return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
-}
-
 // SplitSlot returns a slot representing the data of parent starting at offset.
 func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
 	node := parent.N
@@ -7975,23 +8199,14 @@
 		return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
 	}
 
-	s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
-	n := ir.NewNameAt(parent.N.Pos(), s)
-	s.Def = n
-	ir.AsNode(s.Def).Name().SetUsed(true)
-	n.SetType(t)
-	n.Class = ir.PAUTO
+	sym := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
+	n := e.curfn.NewLocal(parent.N.Pos(), sym, t)
+	n.SetUsed(true)
 	n.SetEsc(ir.EscNever)
-	n.Curfn = e.curfn
-	e.curfn.Dcl = append(e.curfn.Dcl, n)
 	types.CalcSize(t)
 	return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
 }
 
-func (e *ssafn) CanSSA(t *types.Type) bool {
-	return TypeOK(t)
-}
-
 // Logf logs a message from the compiler.
 func (e *ssafn) Logf(msg string, args ...interface{}) {
 	if e.log {
@@ -8043,10 +8258,6 @@
 	return nil
 }
 
-func (e *ssafn) MyImportPath() string {
-	return base.Ctxt.Pkgpath
-}
-
 func (e *ssafn) Func() *ir.Func {
 	return e.curfn
 }
@@ -8093,39 +8304,50 @@
 	return b
 }
 
-// deferstruct makes a runtime._defer structure.
+// deferStructFnField is the field index of _defer.fn.
+const deferStructFnField = 4
+
+var deferType *types.Type
+
+// deferstruct returns a type interchangeable with runtime._defer.
+// Make sure this stays in sync with runtime/runtime2.go:_defer.
 func deferstruct() *types.Type {
-	makefield := func(name string, typ *types.Type) *types.Field {
-		// Unlike the global makefield function, this one needs to set Pkg
-		// because these types might be compared (in SSA CSE sorting).
-		// TODO: unify this makefield and the global one above.
-		sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
-		return types.NewField(src.NoXPos, sym, typ)
+	if deferType != nil {
+		return deferType
 	}
-	// These fields must match the ones in runtime/runtime2.go:_defer and
-	// (*state).call above.
+
+	makefield := func(name string, t *types.Type) *types.Field {
+		sym := (*types.Pkg)(nil).Lookup(name)
+		return types.NewField(src.NoXPos, sym, t)
+	}
+
 	fields := []*types.Field{
-		makefield("started", types.Types[types.TBOOL]),
 		makefield("heap", types.Types[types.TBOOL]),
-		makefield("openDefer", types.Types[types.TBOOL]),
+		makefield("rangefunc", types.Types[types.TBOOL]),
 		makefield("sp", types.Types[types.TUINTPTR]),
 		makefield("pc", types.Types[types.TUINTPTR]),
 		// Note: the types here don't really matter. Defer structures
 		// are always scanned explicitly during stack copying and GC,
 		// so we make them uintptr type even though they are real pointers.
 		makefield("fn", types.Types[types.TUINTPTR]),
-		makefield("_panic", types.Types[types.TUINTPTR]),
 		makefield("link", types.Types[types.TUINTPTR]),
-		makefield("fd", types.Types[types.TUINTPTR]),
-		makefield("varp", types.Types[types.TUINTPTR]),
-		makefield("framepc", types.Types[types.TUINTPTR]),
+		makefield("head", types.Types[types.TUINTPTR]),
+	}
+	if name := fields[deferStructFnField].Sym.Name; name != "fn" {
+		base.Fatalf("deferStructFnField is %q, not fn", name)
 	}
 
+	n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("_defer"))
+	typ := types.NewNamed(n)
+	n.SetType(typ)
+	n.SetTypecheck(1)
+
 	// build struct holding the above fields
-	s := types.NewStruct(fields)
-	s.SetNoalg(true)
-	types.CalcStructSize(s)
-	return s
+	typ.SetUnderlying(types.NewStruct(fields))
+	types.CalcStructSize(typ)
+
+	deferType = typ
+	return typ
 }
 
 // SpillSlotAddr uses LocalSlot information to initialize an obj.Addr
diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go
index e39d0ee..78c332e 100644
--- a/src/cmd/compile/internal/staticdata/data.go
+++ b/src/cmd/compile/internal/staticdata/data.go
@@ -17,7 +17,6 @@
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/objw"
-	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/internal/notsha256"
 	"cmd/internal/obj"
@@ -56,7 +55,7 @@
 	if nam.Op() != ir.ONAME {
 		base.Fatalf("InitSliceBytes %v", nam)
 	}
-	InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s)))
+	InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
 }
 
 const (
@@ -97,6 +96,16 @@
 	return symdata
 }
 
+// StringSymNoCommon is like StringSym, but produces a symbol that is not content-
+// addressable. This symbol is not supposed to appear in the final binary, it is
+// only used to pass string arguments to the linker like R_USENAMEDMETHOD does.
+func StringSymNoCommon(s string) (data *obj.LSym) {
+	var nameSym obj.LSym
+	nameSym.WriteString(base.Ctxt, 0, len(s), s)
+	objw.Global(&nameSym, int32(len(s)), obj.RODATA)
+	return &nameSym
+}
+
 // maxFileSize is the maximum file size permitted by the linker
 // (see issue #9862).
 const maxFileSize = int64(2e9)
@@ -134,7 +143,7 @@
 		if readonly {
 			sym = StringSym(pos, string(data))
 		} else {
-			sym = slicedata(pos, string(data)).Linksym()
+			sym = slicedata(pos, string(data))
 		}
 		if len(hash) > 0 {
 			sum := notsha256.Sum256(data)
@@ -182,7 +191,7 @@
 	} else {
 		// Emit a zero-length data symbol
 		// and then fix up length and content to use file.
-		symdata = slicedata(pos, "").Linksym()
+		symdata = slicedata(pos, "")
 		symdata.Size = size
 		symdata.Type = objabi.SNOPTRDATA
 		info := symdata.NewFileInfo()
@@ -195,18 +204,14 @@
 
 var slicedataGen int
 
-func slicedata(pos src.XPos, s string) *ir.Name {
+func slicedata(pos src.XPos, s string) *obj.LSym {
 	slicedataGen++
 	symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
-	sym := types.LocalPkg.Lookup(symname)
-	symnode := typecheck.NewName(sym)
-	sym.Def = symnode
-
-	lsym := symnode.Linksym()
+	lsym := types.LocalPkg.Lookup(symname).LinksymABI(obj.ABI0)
 	off := dstringdata(lsym, 0, s, pos, "slice")
 	objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
 
-	return symnode
+	return lsym
 }
 
 func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index 7d1dfcb..4191f69 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -42,6 +42,11 @@
 
 	Plans map[ir.Node]*Plan
 	Temps map[ir.Node]*ir.Name
+
+	// seenMutation tracks whether we've seen an initialization
+	// expression that may have modified other package-scope variables
+	// within this package.
+	seenMutation bool
 }
 
 func (s *Schedule) append(n ir.Node) {
@@ -80,26 +85,57 @@
 	MapInitToVar[fn] = v
 }
 
+// allBlank reports whether every node in exprs is blank.
+func allBlank(exprs []ir.Node) bool {
+	for _, expr := range exprs {
+		if !ir.IsBlank(expr) {
+			return false
+		}
+	}
+	return true
+}
+
 // tryStaticInit attempts to statically execute an initialization
 // statement and reports whether it succeeded.
-func (s *Schedule) tryStaticInit(nn ir.Node) bool {
-	// Only worry about simple "l = r" assignments. Multiple
-	// variable/expression OAS2 assignments have already been
-	// replaced by multiple simple OAS assignments, and the other
-	// OAS2* assignments mostly necessitate dynamic execution
-	// anyway.
-	if nn.Op() != ir.OAS {
+func (s *Schedule) tryStaticInit(n ir.Node) bool {
+	var lhs []ir.Node
+	var rhs ir.Node
+
+	switch n.Op() {
+	default:
+		base.FatalfAt(n.Pos(), "unexpected initialization statement: %v", n)
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		lhs, rhs = []ir.Node{n.X}, n.Y
+	case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+		n := n.(*ir.AssignListStmt)
+		if len(n.Lhs) < 2 || len(n.Rhs) != 1 {
+			base.FatalfAt(n.Pos(), "unexpected shape for %v: %v", n.Op(), n)
+		}
+		lhs, rhs = n.Lhs, n.Rhs[0]
+	case ir.OCALLFUNC:
+		return false // outlined map init call; no mutations
+	}
+
+	if !s.seenMutation {
+		s.seenMutation = mayModifyPkgVar(rhs)
+	}
+
+	if allBlank(lhs) && !AnySideEffects(rhs) {
+		return true // discard
+	}
+
+	// Only worry about simple "l = r" assignments. The OAS2*
+	// assignments mostly necessitate dynamic execution anyway.
+	if len(lhs) > 1 {
 		return false
 	}
-	n := nn.(*ir.AssignStmt)
-	if ir.IsBlank(n.X) && !AnySideEffects(n.Y) {
-		// Discard.
-		return true
-	}
+
 	lno := ir.SetPos(n)
 	defer func() { base.Pos = lno }()
-	nam := n.X.(*ir.Name)
-	return s.StaticAssign(nam, 0, n.Y, nam.Type())
+
+	nam := lhs[0].(*ir.Name)
+	return s.StaticAssign(nam, 0, rhs, nam.Type())
 }
 
 // like staticassign but we are copying an already
@@ -113,6 +149,11 @@
 	if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
 		return false
 	}
+	if rn.Defn == nil {
+		// No explicit initialization value. Probably zeroed but perhaps
+		// supplied externally and of unknown value.
+		return false
+	}
 	if rn.Defn.Op() != ir.OAS {
 		return false
 	}
@@ -125,8 +166,16 @@
 	orig := rn
 	r := rn.Defn.(*ir.AssignStmt).Y
 	if r == nil {
-		// No explicit initialization value. Probably zeroed but perhaps
-		// supplied externally and of unknown value.
+		// types2.InitOrder doesn't include default initializers.
+		base.Fatalf("unexpected initializer: %v", rn.Defn)
+	}
+
+	// Variable may have been reassigned by a user-written function call
+	// that was invoked to initialize another global variable (#51913).
+	if s.seenMutation {
+		if base.Debug.StaticCopy != 0 {
+			base.WarnfAt(l.Pos(), "skipping static copy of %v+%v with %v", l, loff, r)
+		}
 		return false
 	}
 
@@ -373,9 +422,9 @@
 
 		var itab *ir.AddrExpr
 		if typ.IsEmptyInterface() {
-			itab = reflectdata.TypePtr(val.Type())
+			itab = reflectdata.TypePtrAt(base.Pos, val.Type())
 		} else {
-			itab = reflectdata.ITabAddr(val.Type(), typ)
+			itab = reflectdata.ITabAddrAt(base.Pos, val.Type(), typ)
 		}
 
 		// Create a copy of l to modify while we emit data.
@@ -677,10 +726,15 @@
 // Use readonlystaticname for read-only node.
 func StaticName(t *types.Type) *ir.Name {
 	// Don't use LookupNum; it interns the resulting string, but these are all unique.
-	n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+	sym := typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))
 	statuniqgen++
-	typecheck.Declare(n, ir.PEXTERN)
-	n.SetType(t)
+
+	n := ir.NewNameAt(base.Pos, sym, t)
+	sym.Def = n
+
+	n.Class = ir.PEXTERN
+	typecheck.Target.Externs = append(typecheck.Target.Externs, n)
+
 	n.Linksym().Set(obj.AttrStatic, true)
 	return n
 }
@@ -821,6 +875,43 @@
 	return ir.Any(n, isSideEffect)
 }
 
+// mayModifyPkgVar reports whether expression n may modify any
+// package-scope variables declared within the current package.
+func mayModifyPkgVar(n ir.Node) bool {
+	// safeLHS reports whether the assigned-to variable lhs is either a
+	// local variable or a global from another package.
+	safeLHS := func(lhs ir.Node) bool {
+		v, ok := ir.OuterValue(lhs).(*ir.Name)
+		return ok && v.Op() == ir.ONAME && !(v.Class == ir.PEXTERN && v.Sym().Pkg == types.LocalPkg)
+	}
+
+	return ir.Any(n, func(n ir.Node) bool {
+		switch n.Op() {
+		case ir.OCALLFUNC, ir.OCALLINTER:
+			return !ir.IsFuncPCIntrinsic(n.(*ir.CallExpr))
+
+		case ir.OAPPEND, ir.OCLEAR, ir.OCOPY:
+			return true // could mutate a global array
+
+		case ir.OAS:
+			n := n.(*ir.AssignStmt)
+			if !safeLHS(n.X) {
+				return true
+			}
+
+		case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+			n := n.(*ir.AssignListStmt)
+			for _, lhs := range n.Lhs {
+				if !safeLHS(lhs) {
+					return true
+				}
+			}
+		}
+
+		return false
+	})
+}
+
 // canRepeat reports whether executing n multiple times has the same effect as
 // assigning n to a single variable and using that variable multiple times.
 func canRepeat(n ir.Node) bool {
@@ -942,7 +1033,7 @@
 		for _, c := range s {
 			strs = append(strs, ir.StringVal(c))
 		}
-		return typecheck.OrigConst(n, constant.MakeString(strings.Join(strs, "")))
+		return ir.NewConstExpr(constant.MakeString(strings.Join(strs, "")), n)
 	}
 	newList := make([]ir.Node, 0, need)
 	for i := 0; i < len(s); i++ {
@@ -955,9 +1046,7 @@
 				i2++
 			}
 
-			nl := ir.Copy(n).(*ir.AddStringExpr)
-			nl.List = s[i:i2]
-			newList = append(newList, typecheck.OrigConst(nl, constant.MakeString(strings.Join(strs, ""))))
+			newList = append(newList, ir.NewConstExpr(constant.MakeString(strings.Join(strs, "")), s[i]))
 			i = i2 - 1
 		} else {
 			newList = append(newList, s[i])
@@ -971,26 +1060,26 @@
 
 const wrapGlobalMapInitSizeThreshold = 20
 
-// tryWrapGlobalMapInit examines the node 'n' to see if it is a map
-// variable initialization, and if so, possibly returns the mapvar
-// being assigned, a new function containing the init code, and a call
-// to the function passing the mapvar. Returns will be nil if the
-// assignment is not to a map, or the map init is not big enough,
-// or if the expression being assigned to the map has side effects.
-func tryWrapGlobalMapInit(n ir.Node) (mapvar *ir.Name, genfn *ir.Func, call ir.Node) {
+// tryWrapGlobalInit returns a new outlined function to contain global
+// initializer statement n, if possible and worthwhile. Otherwise, it
+// returns nil.
+//
+// Currently, it outlines map assignment statements with large,
+// side-effect-free RHS expressions.
+func tryWrapGlobalInit(n ir.Node) *ir.Func {
 	// Look for "X = ..." where X has map type.
 	// FIXME: might also be worth trying to look for cases where
 	// the LHS is of interface type but RHS is map type.
 	if n.Op() != ir.OAS {
-		return nil, nil, nil
+		return nil
 	}
 	as := n.(*ir.AssignStmt)
 	if ir.IsBlank(as.X) || as.X.Op() != ir.ONAME {
-		return nil, nil, nil
+		return nil
 	}
 	nm := as.X.(*ir.Name)
 	if !nm.Type().IsMap() {
-		return nil, nil, nil
+		return nil
 	}
 
 	// Determine size of RHS.
@@ -1010,7 +1099,7 @@
 			fmt.Fprintf(os.Stderr, "=-= skipping %v size too small at %d\n",
 				nm, rsiz)
 		}
-		return nil, nil, nil
+		return nil
 	}
 
 	// Reject right hand sides with side effects.
@@ -1018,7 +1107,7 @@
 		if base.Debug.WrapGlobalMapDbg > 0 {
 			fmt.Fprintf(os.Stderr, "=-= rejected %v due to side effects\n", nm)
 		}
-		return nil, nil, nil
+		return nil
 	}
 
 	if base.Debug.WrapGlobalMapDbg > 1 {
@@ -1027,15 +1116,19 @@
 
 	// Create a new function that will (eventually) have this form:
 	//
-	//    func map.init.%d() {
-	//      globmapvar = <map initialization>
-	//    }
+	//	func map.init.%d() {
+	//		globmapvar = <map initialization>
+	//	}
 	//
+	// Note: cmd/link expects the function name to contain "map.init".
 	minitsym := typecheck.LookupNum("map.init.", mapinitgen)
 	mapinitgen++
-	newfn := typecheck.DeclFunc(minitsym, nil, nil, nil)
+
+	fn := ir.NewFunc(n.Pos(), n.Pos(), minitsym, types.NewSignature(nil, nil, nil))
+	fn.SetInlinabilityChecked(true) // suppress inlining (which would defeat the point)
+	typecheck.DeclFunc(fn)
 	if base.Debug.WrapGlobalMapDbg > 0 {
-		fmt.Fprintf(os.Stderr, "=-= generated func is %v\n", newfn)
+		fmt.Fprintf(os.Stderr, "=-= generated func is %v\n", fn)
 	}
 
 	// NB: we're relying on this phase being run before inlining;
@@ -1043,26 +1136,17 @@
 	// need code here that relocates or duplicates inline temps.
 
 	// Insert assignment into function body; mark body finished.
-	newfn.Body = append(newfn.Body, as)
+	fn.Body = []ir.Node{as}
 	typecheck.FinishFuncBody()
 
-	typecheck.Func(newfn)
-
-	const no = `
-	// Register new function with decls.
-	typecheck.Target.Decls = append(typecheck.Target.Decls, newfn)
-`
-
-	// Create call to function, passing mapvar.
-	fncall := ir.NewCallExpr(n.Pos(), ir.OCALL, newfn.Nname, nil)
-
 	if base.Debug.WrapGlobalMapDbg > 1 {
 		fmt.Fprintf(os.Stderr, "=-= mapvar is %v\n", nm)
-		fmt.Fprintf(os.Stderr, "=-= newfunc is %+v\n", newfn)
-		fmt.Fprintf(os.Stderr, "=-= call is %+v\n", fncall)
+		fmt.Fprintf(os.Stderr, "=-= newfunc is %+v\n", fn)
 	}
 
-	return nm, newfn, typecheck.Stmt(fncall)
+	recordFuncForVar(nm, fn)
+
+	return fn
 }
 
 // mapinitgen is a counter used to uniquify compiler-generated
@@ -1099,31 +1183,28 @@
 	varToMapInit = nil
 }
 
-// OutlineMapInits walks through a list of init statements (candidates
-// for inclusion in the package "init" function) and returns an
-// updated list in which items corresponding to map variable
-// initializations have been replaced with calls to outline "map init"
-// functions (if legal/profitable). Return value is an updated list
-// and a list of any newly generated "map init" functions.
-func OutlineMapInits(stmts []ir.Node) ([]ir.Node, []*ir.Func) {
+// OutlineMapInits replaces global map initializers with outlined
+// calls to separate "map init" functions (where possible and
+// profitable), to facilitate better dead-code elimination by the
+// linker.
+func OutlineMapInits(fn *ir.Func) {
 	if base.Debug.WrapGlobalMapCtl == 1 {
-		return stmts, nil
+		return
 	}
-	newfuncs := []*ir.Func{}
-	for i := range stmts {
-		s := stmts[i]
-		// Call the helper tryWrapGlobalMapInit to see if the LHS of
-		// this assignment is to a map var, and if so whether the RHS
-		// should be outlined into a separate init function. If the
-		// outline goes through, then replace the original init
-		// statement with the call to the outlined func, and append
-		// the new outlined func to our return list.
-		if mapvar, genfn, call := tryWrapGlobalMapInit(s); call != nil {
-			stmts[i] = call
-			newfuncs = append(newfuncs, genfn)
-			recordFuncForVar(mapvar, genfn)
+
+	outlined := 0
+	for i, stmt := range fn.Body {
+		// Attempt to outline stmt. If successful, replace it with a call
+		// to the returned wrapper function.
+		if wrapperFn := tryWrapGlobalInit(stmt); wrapperFn != nil {
+			ir.WithFunc(fn, func() {
+				fn.Body[i] = typecheck.Call(stmt.Pos(), wrapperFn.Nname, nil, false)
+			})
+			outlined++
 		}
 	}
 
-	return stmts, newfuncs
+	if base.Debug.WrapGlobalMapDbg > 1 {
+		fmt.Fprintf(os.Stderr, "=-= outlined %v map initializations\n", outlined)
+	}
 }
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
index 6580f05..de277fc 100644
--- a/src/cmd/compile/internal/syntax/nodes.go
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -17,6 +17,7 @@
 	//    associated with that production; usually the left-most one
 	//    ('[' for IndexExpr, 'if' for IfStmt, etc.)
 	Pos() Pos
+	SetPos(Pos)
 	aNode()
 }
 
@@ -26,8 +27,9 @@
 	pos Pos
 }
 
-func (n *node) Pos() Pos { return n.pos }
-func (*node) aNode()     {}
+func (n *node) Pos() Pos       { return n.pos }
+func (n *node) SetPos(pos Pos) { n.pos = pos }
+func (*node) aNode()           {}
 
 // ----------------------------------------------------------------------------
 // Files
@@ -389,8 +391,9 @@
 	}
 
 	CallStmt struct {
-		Tok  token // Go or Defer
-		Call Expr
+		Tok     token // Go or Defer
+		Call    Expr
+		DeferAt Expr // argument to runtime.deferprocat
 		stmt
 	}
 
diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go
index a39f08c..a86ae87 100644
--- a/src/cmd/compile/internal/syntax/nodes_test.go
+++ b/src/cmd/compile/internal/syntax/nodes_test.go
@@ -322,8 +322,5 @@
 func typeOf(n Node) string {
 	const prefix = "*syntax."
 	k := fmt.Sprintf("%T", n)
-	if strings.HasPrefix(k, prefix) {
-		return k[len(prefix):]
-	}
-	return k
+	return strings.TrimPrefix(k, prefix)
 }
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index b5602fc..1569b5e 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -181,10 +181,9 @@
 }
 
 func trailingDigits(text string) (uint, uint, bool) {
-	// Want to use LastIndexByte below but it's not defined in Go1.4 and bootstrap fails.
-	i := strings.LastIndex(text, ":") // look from right (Windows filenames may contain ':')
+	i := strings.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':')
 	if i < 0 {
-		return 0, 0, false // no ":"
+		return 0, 0, false // no ':'
 	}
 	// i >= 0
 	n, err := strconv.ParseUint(text[i+1:], 10, 0)
@@ -799,6 +798,9 @@
 		f.Name = p.name()
 		f.TParamList, f.Type = p.funcType(context)
 	} else {
+		f.Name = NewName(p.pos(), "_")
+		f.Type = new(FuncType)
+		f.Type.pos = p.pos()
 		msg := "expected name or ("
 		if context != "" {
 			msg = "expected name"
@@ -885,7 +887,7 @@
 			p.next()
 			// unaryExpr may have returned a parenthesized composite literal
 			// (see comment in operand) - remove parentheses if any
-			x.X = unparen(p.unaryExpr())
+			x.X = Unparen(p.unaryExpr())
 			return x
 		}
 
@@ -965,7 +967,7 @@
 	p.next()
 
 	x := p.pexpr(nil, p.tok == _Lparen) // keep_parens so we can report error below
-	if t := unparen(x); t != x {
+	if t := Unparen(x); t != x {
 		p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
 		// already progressed, no need to advance
 		x = t
@@ -1145,7 +1147,7 @@
 			}
 
 			// x[i:...
-			// For better error message, don't simply use p.want(_Colon) here (issue #47704).
+			// For better error message, don't simply use p.want(_Colon) here (go.dev/issue/47704).
 			if !p.got(_Colon) {
 				p.syntaxError("expected comma, : or ]")
 				p.advance(_Comma, _Colon, _Rbrack)
@@ -1190,7 +1192,7 @@
 		case _Lbrace:
 			// operand may have returned a parenthesized complit
 			// type; accept it but complain if we have a complit
-			t := unparen(x)
+			t := Unparen(x)
 			// determine if '{' belongs to a composite literal or a block statement
 			complit_ok := false
 			switch t.(type) {
@@ -2019,7 +2021,7 @@
 
 	// distribute parameter types (len(list) > 0)
 	if named == 0 && !requireNames {
-		// all unnamed => found names are named types
+		// all unnamed and we're not in a type parameter list => found names are named types
 		for _, par := range list {
 			if typ := par.Name; typ != nil {
 				par.Type = typ
@@ -2027,40 +2029,50 @@
 			}
 		}
 	} else if named != len(list) {
-		// some named => all must have names and types
-		var pos Pos  // left-most error position (or unknown)
-		var typ Expr // current type (from right to left)
+		// some named or we're in a type parameter list => all must be named
+		var errPos Pos // left-most error position (or unknown)
+		var typ Expr   // current type (from right to left)
 		for i := len(list) - 1; i >= 0; i-- {
 			par := list[i]
 			if par.Type != nil {
 				typ = par.Type
 				if par.Name == nil {
-					pos = StartPos(typ)
-					par.Name = NewName(pos, "_")
+					errPos = StartPos(typ)
+					par.Name = NewName(errPos, "_")
 				}
 			} else if typ != nil {
 				par.Type = typ
 			} else {
 				// par.Type == nil && typ == nil => we only have a par.Name
-				pos = par.Name.Pos()
+				errPos = par.Name.Pos()
 				t := p.badExpr()
-				t.pos = pos // correct position
+				t.pos = errPos // correct position
 				par.Type = t
 			}
 		}
-		if pos.IsKnown() {
+		if errPos.IsKnown() {
 			var msg string
 			if requireNames {
+				// Not all parameters are named because named != len(list).
+				// If named == typed we must have parameters that have no types,
+				// and they must be at the end of the parameter list, otherwise
+				// the types would have been filled in by the right-to-left sweep
+				// above and we wouldn't have an error. Since we are in a type
+				// parameter list, the missing types are constraints.
 				if named == typed {
-					pos = end // position error at closing ]
+					errPos = end // position error at closing ]
 					msg = "missing type constraint"
 				} else {
-					msg = "type parameters must be named"
+					msg = "missing type parameter name"
+					// go.dev/issue/60812
+					if len(list) == 1 {
+						msg += " or invalid array length"
+					}
 				}
 			} else {
 				msg = "mixed named and unnamed parameters"
 			}
-			p.syntaxErrorAt(pos, msg)
+			p.syntaxErrorAt(errPos, msg)
 		}
 	}
 
@@ -2320,7 +2332,7 @@
 			// asking for a '{' rather than a ';' here leads to a better error message
 			p.want(_Lbrace)
 			if p.tok != _Lbrace {
-				p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., issue #22581)
+				p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., go.dev/issue/22581)
 			}
 		}
 		if keyword == _For {
@@ -2812,8 +2824,8 @@
 	return
 }
 
-// unparen removes all parentheses around an expression.
-func unparen(x Expr) Expr {
+// Unparen returns e with any enclosing parentheses stripped.
+func Unparen(x Expr) Expr {
 	for {
 		p, ok := x.(*ParenExpr)
 		if !ok {
@@ -2823,3 +2835,15 @@
 	}
 	return x
 }
+
+// UnpackListExpr unpacks a *ListExpr into a []Expr.
+func UnpackListExpr(x Expr) []Expr {
+	switch x := x.(type) {
+	case nil:
+		return nil
+	case *ListExpr:
+		return x.ElemList
+	default:
+		return []Expr{x}
+	}
+}
diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go
index d5d4290..538278b 100644
--- a/src/cmd/compile/internal/syntax/parser_test.go
+++ b/src/cmd/compile/internal/syntax/parser_test.go
@@ -374,3 +374,22 @@
 		}
 	}
 }
+
+// Test that typical uses of UnpackListExpr don't allocate.
+func TestUnpackListExprAllocs(t *testing.T) {
+	var x Expr = NewName(Pos{}, "x")
+	allocs := testing.AllocsPerRun(1000, func() {
+		list := UnpackListExpr(x)
+		if len(list) != 1 || list[0] != x {
+			t.Fatalf("unexpected result")
+		}
+	})
+
+	if allocs > 0 {
+		errorf := t.Errorf
+		if testenv.OptimizationOff() {
+			errorf = t.Logf // noopt builder disables inlining
+		}
+		errorf("UnpackListExpr allocated %v times", allocs)
+	}
+}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
index 62de68e..9f20db5 100644
--- a/src/cmd/compile/internal/syntax/printer.go
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -916,7 +916,7 @@
 			}
 			p.print(blank)
 		}
-		p.printNode(unparen(f.Type)) // no need for (extra) parentheses around parameter types
+		p.printNode(Unparen(f.Type)) // no need for (extra) parentheses around parameter types
 	}
 	// A type parameter list [P T] where the name P and the type expression T syntactically
 	// combine to another valid (value) expression requires a trailing comma, as in [P *T,]
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
index ceb512e..99baf7f 100644
--- a/src/cmd/compile/internal/syntax/printer_test.go
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -169,6 +169,7 @@
 	dup(`'a'`),
 	dup(`"foo"`),
 	dup("`bar`"),
+	dup("any"),
 
 	// func and composite literals
 	dup("func() {}"),
@@ -197,12 +198,18 @@
 	// new interfaces
 	dup("interface{int}"),
 	dup("interface{~int}"),
-	dup("interface{~int}"),
+
+	// generic constraints
+	dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
 	dup("interface{int | string}"),
 	dup("interface{~int | ~string; float64; m()}"),
-	dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
 	dup("interface{~T[int, string] | string}"),
 
+	// generic types
+	dup("x[T]"),
+	dup("x[N | A | S]"),
+	dup("x[N, A]"),
+
 	// non-type expressions
 	dup("(x)"),
 	dup("x.f"),
@@ -250,6 +257,12 @@
 	dup("f(s...)"),
 	dup("f(a, s...)"),
 
+	// generic functions
+	dup("f[T]()"),
+	dup("f[T](T)"),
+	dup("f[T, T1]()"),
+	dup("f[T, T1](T, T1)"),
+
 	dup("*x"),
 	dup("&x"),
 	dup("x + y"),
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23434.go b/src/cmd/compile/internal/syntax/testdata/issue23434.go
index 5a72a7f..e436abf 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue23434.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue23434.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test case for issue 23434: Better synchronization of
+// Test case for go.dev/issue/23434: Better synchronization of
 // parser after missing type. There should be exactly
 // one error each time, with now follow errors.
 
@@ -12,7 +12,7 @@
 
 type Map map[int] /* ERROR unexpected newline */
 
-// Examples from #23434:
+// Examples from go.dev/issue/23434:
 
 func g() {
 	m := make(map[string] /* ERROR unexpected ! */ !)
diff --git a/src/cmd/compile/internal/syntax/testdata/issue31092.go b/src/cmd/compile/internal/syntax/testdata/issue31092.go
index b1839b8..0bd40bd 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue31092.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue31092.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test cases for issue 31092: Better synchronization of
+// Test cases for go.dev/issue/31092: Better synchronization of
 // parser after seeing an := rather than an = in a const,
 // type, or variable declaration.
 
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43527.go b/src/cmd/compile/internal/syntax/testdata/issue43527.go
index dd2c9b1..99a8c09 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue43527.go
+++ b/src/cmd/compile/internal/syntax/testdata/issue43527.go
@@ -7,17 +7,17 @@
 type (
         // 0 and 1-element []-lists are syntactically valid
         _[A, B /* ERROR missing type constraint */ ] int
-        _[A, /* ERROR type parameters must be named */ interface{}] int
+        _[A, /* ERROR missing type parameter name */ interface{}] int
         _[A, B, C /* ERROR missing type constraint */ ] int
         _[A B, C /* ERROR missing type constraint */ ] int
-        _[A B, /* ERROR type parameters must be named */ interface{}] int
-        _[A B, /* ERROR type parameters must be named */ interface{}, C D] int
-        _[A B, /* ERROR type parameters must be named */ interface{}, C, D] int
-        _[A B, /* ERROR type parameters must be named */ interface{}, C, interface{}] int
-        _[A B, C interface{}, D, /* ERROR type parameters must be named */ interface{}] int
+        _[A B, /* ERROR missing type parameter name */ interface{}] int
+        _[A B, /* ERROR missing type parameter name */ interface{}, C D] int
+        _[A B, /* ERROR missing type parameter name */ interface{}, C, D] int
+        _[A B, /* ERROR missing type parameter name */ interface{}, C, interface{}] int
+        _[A B, C interface{}, D, /* ERROR missing type parameter name */ interface{}] int
 )
 
 // function type parameters use the same parsing routine - just have a couple of tests
 
 func _[A, B /* ERROR missing type constraint */ ]() {}
-func _[A, /* ERROR type parameters must be named */ interface{}]() {}
+func _[A, /* ERROR missing type parameter name */ interface{}]() {}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue63835.go b/src/cmd/compile/internal/syntax/testdata/issue63835.go
new file mode 100644
index 0000000..3d165c0
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue63835.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func (x string) /* ERROR syntax error: unexpected \[, expected name */ []byte {
+        return []byte(x)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/map2.go b/src/cmd/compile/internal/syntax/testdata/map2.go
index 2833445..3d1cbfb 100644
--- a/src/cmd/compile/internal/syntax/testdata/map2.go
+++ b/src/cmd/compile/internal/syntax/testdata/map2.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// This file is like map.go2, but instead if importing chans, it contains
+// This file is like map.go, but instead of importing chans, it contains
 // the necessary functionality at the end of the file.
 
 // Package orderedmap provides an ordered map, implemented as a binary tree.
@@ -23,7 +23,7 @@
 
 // New returns a new map.
 func New[K, V any](compare func(K, K) int) *Map[K, V] {
-        return &Map[K, V]{compare: compare}
+	return &Map[K, V]{compare: compare}
 }
 
 // find looks up key in the map, and returns either a pointer
@@ -85,7 +85,7 @@
 		// Stop sending values if sender.Send returns false,
 		// meaning that nothing is listening at the receiver end.
 		return f(n.left) &&
-                        sender.Send(keyValue[K, V]{n.key, n.val}) &&
+			sender.Send(keyValue[K, V]{n.key, n.val}) &&
 			f(n.right)
 	}
 	go func() {
@@ -119,7 +119,7 @@
 // A sender is used to send values to a Receiver.
 type chans_Sender[T any] struct {
 	values chan<- T
-	done <-chan bool
+	done   <-chan bool
 }
 
 func (s *chans_Sender[T]) Send(v T) bool {
@@ -137,10 +137,10 @@
 
 type chans_Receiver[T any] struct {
 	values <-chan T
-	done chan<- bool
+	done   chan<- bool
 }
 
 func (r *chans_Receiver[T]) Next() (T, bool) {
 	v, ok := <-r.values
 	return v, ok
-}
\ No newline at end of file
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/tparams.go b/src/cmd/compile/internal/syntax/testdata/tparams.go
index 646fbbe..4b68a15 100644
--- a/src/cmd/compile/internal/syntax/testdata/tparams.go
+++ b/src/cmd/compile/internal/syntax/testdata/tparams.go
@@ -23,7 +23,7 @@
 
 func f[a b,  /* ERROR expected ] */ 0] ()
 
-// issue #49482
+// go.dev/issue/49482
 type (
 	t[a *[]int] struct{}
 	t[a *t,] struct{}
@@ -35,7 +35,7 @@
 	t[a *struct{}|~t] struct{}
 )
 
-// issue #51488
+// go.dev/issue/51488
 type (
 	t[a *t|t,] struct{}
 	t[a *t|t, b t] struct{}
@@ -44,3 +44,14 @@
 	t[a ([]t)] struct{}
 	t[a ([]t)|t] struct{}
 )
+
+// go.dev/issue/60812
+type (
+	t [t]struct{}
+	t [[]t]struct{}
+	t [[t]t]struct{}
+	t [/* ERROR missing type parameter name or invalid array length */ t[t]]struct{}
+	t [t t[t], /* ERROR missing type parameter name */ t[t]]struct{}
+	t [/* ERROR missing type parameter name */ t[t], t t[t]]struct{}
+	t [/* ERROR missing type parameter name */ t[t], t[t]]struct{} // report only first error
+)
diff --git a/src/cmd/compile/internal/syntax/testdata/typeset.go b/src/cmd/compile/internal/syntax/testdata/typeset.go
index fe5c3f4..819025c 100644
--- a/src/cmd/compile/internal/syntax/testdata/typeset.go
+++ b/src/cmd/compile/internal/syntax/testdata/typeset.go
@@ -44,15 +44,15 @@
         _[_ t|~struct{}] t
         _[_ ~t|~struct{}] t
 
-        // test cases for issue #49175
+        // test cases for go.dev/issue/49175
         _[_ []t]t
         _[_ [1]t]t
         _[_ ~[]t]t
         _[_ ~[1]t]t
-        t [ /* ERROR type parameters must be named */ t[0]]t
+        t [ /* ERROR missing type parameter name */ t[0]]t
 )
 
-// test cases for issue #49174
+// test cases for go.dev/issue/49174
 func _[_ t]() {}
 func _[_ []t]() {}
 func _[_ [1]t]() {}
@@ -81,11 +81,11 @@
 type (
         _[_ t, t /* ERROR missing type constraint */ ] t
         _[_ ~t, t /* ERROR missing type constraint */ ] t
-        _[_ t, /* ERROR type parameters must be named */ ~t] t
-        _[_ ~t, /* ERROR type parameters must be named */ ~t] t
+        _[_ t, /* ERROR missing type parameter name */ ~t] t
+        _[_ ~t, /* ERROR missing type parameter name */ ~t] t
 
-        _[_ t|t, /* ERROR type parameters must be named */ t|t] t
-        _[_ ~t|t, /* ERROR type parameters must be named */ t|t] t
-        _[_ t|t, /* ERROR type parameters must be named */ ~t|t] t
-        _[_ ~t|t, /* ERROR type parameters must be named */ ~t|t] t
+        _[_ t|t, /* ERROR missing type parameter name */ t|t] t
+        _[_ ~t|t, /* ERROR missing type parameter name */ t|t] t
+        _[_ t|t, /* ERROR missing type parameter name */ ~t|t] t
+        _[_ ~t|t, /* ERROR missing type parameter name */ ~t|t] t
 )
diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go
index 6dece1a..b08f699 100644
--- a/src/cmd/compile/internal/syntax/tokens.go
+++ b/src/cmd/compile/internal/syntax/tokens.go
@@ -4,7 +4,9 @@
 
 package syntax
 
-type token uint
+type Token uint
+
+type token = Token
 
 //go:generate stringer -type token -linecomment tokens.go
 
diff --git a/src/cmd/compile/internal/syntax/type.go b/src/cmd/compile/internal/syntax/type.go
index 01eab7a..53132a4 100644
--- a/src/cmd/compile/internal/syntax/type.go
+++ b/src/cmd/compile/internal/syntax/type.go
@@ -39,25 +39,27 @@
 	exprFlags
 }
 
-type exprFlags uint8
+type exprFlags uint16
 
-func (f exprFlags) IsVoid() bool      { return f&1 != 0 }
-func (f exprFlags) IsType() bool      { return f&2 != 0 }
-func (f exprFlags) IsBuiltin() bool   { return f&4 != 0 }
-func (f exprFlags) IsValue() bool     { return f&8 != 0 }
-func (f exprFlags) IsNil() bool       { return f&16 != 0 }
-func (f exprFlags) Addressable() bool { return f&32 != 0 }
-func (f exprFlags) Assignable() bool  { return f&64 != 0 }
-func (f exprFlags) HasOk() bool       { return f&128 != 0 }
+func (f exprFlags) IsVoid() bool          { return f&1 != 0 }
+func (f exprFlags) IsType() bool          { return f&2 != 0 }
+func (f exprFlags) IsBuiltin() bool       { return f&4 != 0 } // a language builtin that resembles a function call, e.g., "make, append, new"
+func (f exprFlags) IsValue() bool         { return f&8 != 0 }
+func (f exprFlags) IsNil() bool           { return f&16 != 0 }
+func (f exprFlags) Addressable() bool     { return f&32 != 0 }
+func (f exprFlags) Assignable() bool      { return f&64 != 0 }
+func (f exprFlags) HasOk() bool           { return f&128 != 0 }
+func (f exprFlags) IsRuntimeHelper() bool { return f&256 != 0 } // a runtime function called from transformed syntax
 
-func (f *exprFlags) SetIsVoid()      { *f |= 1 }
-func (f *exprFlags) SetIsType()      { *f |= 2 }
-func (f *exprFlags) SetIsBuiltin()   { *f |= 4 }
-func (f *exprFlags) SetIsValue()     { *f |= 8 }
-func (f *exprFlags) SetIsNil()       { *f |= 16 }
-func (f *exprFlags) SetAddressable() { *f |= 32 }
-func (f *exprFlags) SetAssignable()  { *f |= 64 }
-func (f *exprFlags) SetHasOk()       { *f |= 128 }
+func (f *exprFlags) SetIsVoid()          { *f |= 1 }
+func (f *exprFlags) SetIsType()          { *f |= 2 }
+func (f *exprFlags) SetIsBuiltin()       { *f |= 4 }
+func (f *exprFlags) SetIsValue()         { *f |= 8 }
+func (f *exprFlags) SetIsNil()           { *f |= 16 }
+func (f *exprFlags) SetAddressable()     { *f |= 32 }
+func (f *exprFlags) SetAssignable()      { *f |= 64 }
+func (f *exprFlags) SetHasOk()           { *f |= 128 }
+func (f *exprFlags) SetIsRuntimeHelper() { *f |= 256 }
 
 // a typeAndValue contains the results of typechecking an expression.
 // It is embedded in expression nodes.
diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
index 8ed7622..b500de9 100644
--- a/src/cmd/compile/internal/test/abiutils_test.go
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -22,7 +22,7 @@
 // AMD64 registers available:
 // - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
 // - floating point: X0 - X14
-var configAMD64 = abi.NewABIConfig(9, 15, 0)
+var configAMD64 = abi.NewABIConfig(9, 15, 0, 1)
 
 func TestMain(m *testing.M) {
 	ssagen.Arch.LinkArch = &x86.Linkamd64
@@ -157,7 +157,7 @@
 	i16 := types.Types[types.TINT16]
 	i32 := types.Types[types.TINT32]
 	i64 := types.Types[types.TINT64]
-	s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+	s := mkstruct(i8, i8, mkstruct(), i8, i16)
 	ft := mkFuncType(nil, []*types.Type{i8, s, i64},
 		[]*types.Type{s, i8, i32})
 
@@ -181,8 +181,8 @@
 	//    (r1 fs, r2 fs)
 	f64 := types.Types[types.TFLOAT64]
 	i64 := types.Types[types.TINT64]
-	s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})})
-	fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+	s := mkstruct(i64, mkstruct())
+	fs := mkstruct(f64, s, mkstruct())
 	ft := mkFuncType(nil, []*types.Type{s, s, fs},
 		[]*types.Type{fs, fs})
 
@@ -213,9 +213,10 @@
 	ab2 := types.NewArray(tb, 2)
 	a2 := types.NewArray(i64, 2)
 	a3 := types.NewArray(i16, 3)
-	s := mkstruct([]*types.Type{a2, mkstruct([]*types.Type{})})
-	s2 := mkstruct([]*types.Type{a3, mkstruct([]*types.Type{})})
-	fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+	empty := mkstruct()
+	s := mkstruct(a2, empty)
+	s2 := mkstruct(a3, empty)
+	fs := mkstruct(f64, s, empty)
 	ft := mkFuncType(nil, []*types.Type{s, ab2, s2, fs, fs},
 		[]*types.Type{fs, ab2, fs})
 
@@ -233,12 +234,11 @@
 
 	abitest(t, ft, exp)
 
-	// Check to make sure that NumParamRegs yields 2 and not 3
-	// for struct "s" (e.g. that it handles the padding properly).
-	nps := configAMD64.NumParamRegs(s)
-	if nps != 2 {
-		t.Errorf("NumParams(%v) returned %d expected %d\n",
-			s, nps, 2)
+	// Test that NumParamRegs doesn't assign registers to trailing padding.
+	typ := mkstruct(i64, i64, mkstruct())
+	have := configAMD64.NumParamRegs(typ)
+	if have != 2 {
+		t.Errorf("NumParams(%v): have %v, want %v", typ, have, 2)
 	}
 }
 
@@ -279,7 +279,7 @@
 	i16 := types.Types[types.TINT16]
 	i64 := types.Types[types.TINT64]
 	f64 := types.Types[types.TFLOAT64]
-	s1 := mkstruct([]*types.Type{i16, i16, i16})
+	s1 := mkstruct(i16, i16, i16)
 	ps1 := types.NewPtr(s1)
 	a7 := types.NewArray(ps1, 7)
 	ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16},
@@ -316,7 +316,7 @@
 	nei := types.NewInterface([]*types.Field{field})
 	i16 := types.Types[types.TINT16]
 	tb := types.Types[types.TBOOL]
-	s1 := mkstruct([]*types.Type{i16, i16, tb})
+	s1 := mkstruct(i16, i16, tb)
 	ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16},
 		[]*types.Type{ei, nei, pei})
 
@@ -347,8 +347,8 @@
 	c64 := types.Types[types.TCOMPLEX64]
 	c128 := types.Types[types.TCOMPLEX128]
 
-	s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
-	a := types.NewArray(s, 3)
+	s := mkstruct(i8, i8, mkstruct(), i8, i16)
+	a := mkstruct(s, s, s)
 
 	nrtest(t, i8, 1)
 	nrtest(t, i16, 1)
@@ -360,7 +360,6 @@
 	nrtest(t, c128, 2)
 	nrtest(t, s, 4)
 	nrtest(t, a, 12)
-
 }
 
 func TestABIUtilsComputePadding(t *testing.T) {
@@ -369,11 +368,11 @@
 	i16 := types.Types[types.TINT16]
 	i32 := types.Types[types.TINT32]
 	i64 := types.Types[types.TINT64]
-	emptys := mkstruct([]*types.Type{})
-	s1 := mkstruct([]*types.Type{i8, i16, emptys, i32, i64})
+	emptys := mkstruct()
+	s1 := mkstruct(i8, i16, emptys, i32, i64)
 	// func (p1 int32, p2 s1, p3 emptys, p4 [1]int32)
 	a1 := types.NewArray(i32, 1)
-	ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, []*types.Type{})
+	ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, nil)
 
 	// Run abitest() just to document what we're expected to see.
 	exp := makeExpectedDump(`
diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go
index 07b8eb7..fb1c398 100644
--- a/src/cmd/compile/internal/test/abiutilsaux_test.go
+++ b/src/cmd/compile/internal/test/abiutilsaux_test.go
@@ -21,16 +21,15 @@
 
 func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
 	field := types.NewField(src.NoXPos, s, t)
-	n := typecheck.NewName(s)
+	n := ir.NewNameAt(src.NoXPos, s, t)
 	n.Class = which
 	field.Nname = n
-	n.SetType(t)
 	return field
 }
 
 // mkstruct is a helper routine to create a struct type with fields
 // of the types specified in 'fieldtypes'.
-func mkstruct(fieldtypes []*types.Type) *types.Type {
+func mkstruct(fieldtypes ...*types.Type) *types.Type {
 	fields := make([]*types.Field, len(fieldtypes))
 	for k, t := range fieldtypes {
 		if t == nil {
@@ -77,7 +76,7 @@
 }
 
 func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
-	n := ir.AsNode(f.Nname).(*ir.Name)
+	n := f.Nname.(*ir.Name)
 	if n.FrameOffset() != int64(r.Offset()) {
 		t.Errorf("%s %d: got offset %d wanted %d t=%v",
 			which, idx, r.Offset(), n.Offset_, f.Type)
diff --git a/src/cmd/compile/internal/test/iface_test.go b/src/cmd/compile/internal/test/iface_test.go
index ebc4f89..db41eb8 100644
--- a/src/cmd/compile/internal/test/iface_test.go
+++ b/src/cmd/compile/internal/test/iface_test.go
@@ -124,3 +124,15 @@
 func i2int(i interface{}) int {
 	return i.(int)
 }
+
+func BenchmarkTypeAssert(b *testing.B) {
+	e := any(Int(0))
+	r := true
+	for i := 0; i < b.N; i++ {
+		_, ok := e.(I)
+		if !ok {
+			r = false
+		}
+	}
+	sink = r
+}
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 205b746..0ccc7b3 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -44,15 +44,16 @@
 			"chanbuf",
 			"evacuated",
 			"fastlog2",
-			"fastrand",
 			"float64bits",
 			"funcspdelta",
 			"getm",
 			"getMCache",
 			"isDirectIface",
 			"itabHashFunc",
+			"nextslicecap",
 			"noescape",
 			"pcvalueCacheKey",
+			"rand32",
 			"readUnaligned32",
 			"readUnaligned64",
 			"releasem",
@@ -72,11 +73,13 @@
 			"gclinkptr.ptr",
 			"guintptr.ptr",
 			"writeHeapBitsForAddr",
+			"heapBitsSlice",
 			"markBits.isMarked",
 			"muintptr.ptr",
 			"puintptr.ptr",
 			"spanOf",
 			"spanOfUnchecked",
+			"typePointers.nextFast",
 			"(*gcWork).putFast",
 			"(*gcWork).tryGetFast",
 			"(*guintptr).set",
@@ -85,10 +88,15 @@
 			"(*mspan).base",
 			"(*mspan).markBitsForBase",
 			"(*mspan).markBitsForIndex",
+			"(*mspan).writeUserArenaHeapBits",
 			"(*muintptr).set",
 			"(*puintptr).set",
 			"(*wbBuf).get1",
 			"(*wbBuf).get2",
+
+			// Trace-related ones.
+			"traceLocker.ok",
+			"traceEnabled",
 		},
 		"runtime/internal/sys": {},
 		"runtime/internal/math": {
@@ -107,6 +115,9 @@
 			"(*Buffer).UnreadByte",
 			"(*Buffer).tryGrowByReslice",
 		},
+		"internal/abi": {
+			"UseInterfaceSwitchCache",
+		},
 		"compress/flate": {
 			"byLiteral.Len",
 			"byLiteral.Less",
@@ -242,6 +253,10 @@
 		want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "TrailingZeros32")
 		want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
 	}
+	if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "s390x" {
+		// runtime/internal/atomic.Loaduintptr is only intrinsified on these platforms.
+		want["runtime"] = append(want["runtime"], "traceAcquire")
+	}
 	if bits.UintSize == 64 {
 		// mix is only defined on 64-bit architectures
 		want["runtime"] = append(want["runtime"], "mix")
diff --git a/src/cmd/compile/internal/test/logic_test.go b/src/cmd/compile/internal/test/logic_test.go
index 1d7043f..0e46b5f 100644
--- a/src/cmd/compile/internal/test/logic_test.go
+++ b/src/cmd/compile/internal/test/logic_test.go
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package test
 
 import "testing"
diff --git a/src/cmd/compile/internal/test/math_test.go b/src/cmd/compile/internal/test/math_test.go
index 6bcb460..1febe9d 100644
--- a/src/cmd/compile/internal/test/math_test.go
+++ b/src/cmd/compile/internal/test/math_test.go
@@ -1,3 +1,7 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package test
 
 import (
diff --git a/src/cmd/compile/internal/test/memcombine_test.go b/src/cmd/compile/internal/test/memcombine_test.go
index c7e7a20..3fc4a00 100644
--- a/src/cmd/compile/internal/test/memcombine_test.go
+++ b/src/cmd/compile/internal/test/memcombine_test.go
@@ -71,3 +71,129 @@
 //go:noinline
 func nop() {
 }
+
+type T32 struct {
+	a, b uint32
+}
+
+//go:noinline
+func (t *T32) bigEndianLoad() uint64 {
+	return uint64(t.a)<<32 | uint64(t.b)
+}
+
+//go:noinline
+func (t *T32) littleEndianLoad() uint64 {
+	return uint64(t.a) | (uint64(t.b) << 32)
+}
+
+//go:noinline
+func (t *T32) bigEndianStore(x uint64) {
+	t.a = uint32(x >> 32)
+	t.b = uint32(x)
+}
+
+//go:noinline
+func (t *T32) littleEndianStore(x uint64) {
+	t.a = uint32(x)
+	t.b = uint32(x >> 32)
+}
+
+type T16 struct {
+	a, b uint16
+}
+
+//go:noinline
+func (t *T16) bigEndianLoad() uint32 {
+	return uint32(t.a)<<16 | uint32(t.b)
+}
+
+//go:noinline
+func (t *T16) littleEndianLoad() uint32 {
+	return uint32(t.a) | (uint32(t.b) << 16)
+}
+
+//go:noinline
+func (t *T16) bigEndianStore(x uint32) {
+	t.a = uint16(x >> 16)
+	t.b = uint16(x)
+}
+
+//go:noinline
+func (t *T16) littleEndianStore(x uint32) {
+	t.a = uint16(x)
+	t.b = uint16(x >> 16)
+}
+
+type T8 struct {
+	a, b uint8
+}
+
+//go:noinline
+func (t *T8) bigEndianLoad() uint16 {
+	return uint16(t.a)<<8 | uint16(t.b)
+}
+
+//go:noinline
+func (t *T8) littleEndianLoad() uint16 {
+	return uint16(t.a) | (uint16(t.b) << 8)
+}
+
+//go:noinline
+func (t *T8) bigEndianStore(x uint16) {
+	t.a = uint8(x >> 8)
+	t.b = uint8(x)
+}
+
+//go:noinline
+func (t *T8) littleEndianStore(x uint16) {
+	t.a = uint8(x)
+	t.b = uint8(x >> 8)
+}
+
+func TestIssue64468(t *testing.T) {
+	t32 := T32{1, 2}
+	if got, want := t32.bigEndianLoad(), uint64(1<<32+2); got != want {
+		t.Errorf("T32.bigEndianLoad got %x want %x\n", got, want)
+	}
+	if got, want := t32.littleEndianLoad(), uint64(1+2<<32); got != want {
+		t.Errorf("T32.littleEndianLoad got %x want %x\n", got, want)
+	}
+	t16 := T16{1, 2}
+	if got, want := t16.bigEndianLoad(), uint32(1<<16+2); got != want {
+		t.Errorf("T16.bigEndianLoad got %x want %x\n", got, want)
+	}
+	if got, want := t16.littleEndianLoad(), uint32(1+2<<16); got != want {
+		t.Errorf("T16.littleEndianLoad got %x want %x\n", got, want)
+	}
+	t8 := T8{1, 2}
+	if got, want := t8.bigEndianLoad(), uint16(1<<8+2); got != want {
+		t.Errorf("T8.bigEndianLoad got %x want %x\n", got, want)
+	}
+	if got, want := t8.littleEndianLoad(), uint16(1+2<<8); got != want {
+		t.Errorf("T8.littleEndianLoad got %x want %x\n", got, want)
+	}
+	t32.bigEndianStore(1<<32 + 2)
+	if got, want := t32, (T32{1, 2}); got != want {
+		t.Errorf("T32.bigEndianStore got %x want %x\n", got, want)
+	}
+	t32.littleEndianStore(1<<32 + 2)
+	if got, want := t32, (T32{2, 1}); got != want {
+		t.Errorf("T32.littleEndianStore got %x want %x\n", got, want)
+	}
+	t16.bigEndianStore(1<<16 + 2)
+	if got, want := t16, (T16{1, 2}); got != want {
+		t.Errorf("T16.bigEndianStore got %x want %x\n", got, want)
+	}
+	t16.littleEndianStore(1<<16 + 2)
+	if got, want := t16, (T16{2, 1}); got != want {
+		t.Errorf("T16.littleEndianStore got %x want %x\n", got, want)
+	}
+	t8.bigEndianStore(1<<8 + 2)
+	if got, want := t8, (T8{1, 2}); got != want {
+		t.Errorf("T8.bigEndianStore got %x want %x\n", got, want)
+	}
+	t8.littleEndianStore(1<<8 + 2)
+	if got, want := t8, (T8{2, 1}); got != want {
+		t.Errorf("T8.littleEndianStore got %x want %x\n", got, want)
+	}
+}
diff --git a/src/cmd/compile/internal/test/pgo_devirtualize_test.go b/src/cmd/compile/internal/test/pgo_devirtualize_test.go
index 49e95e9..f451243 100644
--- a/src/cmd/compile/internal/test/pgo_devirtualize_test.go
+++ b/src/cmd/compile/internal/test/pgo_devirtualize_test.go
@@ -14,8 +14,13 @@
 	"testing"
 )
 
+type devirtualization struct {
+	pos    string
+	callee string
+}
+
 // testPGODevirtualize tests that specific PGO devirtualize rewrites are performed.
-func testPGODevirtualize(t *testing.T, dir string) {
+func testPGODevirtualize(t *testing.T, dir string, want []devirtualization) {
 	testenv.MustHaveGoRun(t)
 	t.Parallel()
 
@@ -23,17 +28,27 @@
 
 	// Add a go.mod so we have a consistent symbol names in this temp dir.
 	goMod := fmt.Sprintf(`module %s
-go 1.19
+go 1.21
 `, pkg)
 	if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil {
 		t.Fatalf("error writing go.mod: %v", err)
 	}
 
+	// Run the test without PGO to ensure that the test assertions are
+	// correct even in the non-optimized version.
+	cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "."))
+	cmd.Dir = dir
+	b, err := cmd.CombinedOutput()
+	t.Logf("Test without PGO:\n%s", b)
+	if err != nil {
+		t.Fatalf("Test failed without PGO: %v", err)
+	}
+
 	// Build the test with the profile.
 	pprof := filepath.Join(dir, "devirt.pprof")
-	gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=2", pprof)
+	gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=3", pprof)
 	out := filepath.Join(dir, "test.exe")
-	cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "build", "-o", out, gcflag, "."))
+	cmd = testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-o", out, gcflag, "."))
 	cmd.Dir = dir
 
 	pr, pw, err := os.Pipe()
@@ -50,25 +65,9 @@
 		t.Fatalf("error starting go test: %v", err)
 	}
 
-	type devirtualization struct {
-		pos    string
-		callee string
-	}
-
-	want := []devirtualization{
-		{
-			pos:    "./devirt.go:61:21",
-			callee: "mult.Mult.Multiply",
-		},
-		{
-			pos:    "./devirt.go:61:31",
-			callee: "Add.Add",
-		},
-	}
-
 	got := make(map[devirtualization]struct{})
 
-	devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing .* to (.*)`)
+	devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing \w+ call .* to (.*)`)
 
 	scanner := bufio.NewScanner(pr)
 	for scanner.Scan() {
@@ -102,6 +101,15 @@
 		}
 		t.Errorf("devirtualization %v missing; got %v", w, got)
 	}
+
+	// Run test with PGO to ensure the assertions are still true.
+	cmd = testenv.CleanCmdEnv(testenv.Command(t, out))
+	cmd.Dir = dir
+	b, err = cmd.CombinedOutput()
+	t.Logf("Test with PGO:\n%s", b)
+	if err != nil {
+		t.Fatalf("Test failed without PGO: %v", err)
+	}
 }
 
 // TestPGODevirtualize tests that specific functions are devirtualized when PGO
@@ -115,14 +123,139 @@
 
 	// Copy the module to a scratch location so we can add a go.mod.
 	dir := t.TempDir()
-	if err := os.Mkdir(filepath.Join(dir, "mult"), 0755); err != nil {
+	if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil {
 		t.Fatalf("error creating dir: %v", err)
 	}
-	for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult", "mult.go")} {
+	for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} {
 		if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
 			t.Fatalf("error copying %s: %v", file, err)
 		}
 	}
 
-	testPGODevirtualize(t, dir)
+	want := []devirtualization{
+		// ExerciseIface
+		{
+			pos:    "./devirt.go:101:20",
+			callee: "mult.Mult.Multiply",
+		},
+		{
+			pos:    "./devirt.go:101:39",
+			callee: "Add.Add",
+		},
+		// ExerciseFuncConcrete
+		{
+			pos:    "./devirt.go:173:36",
+			callee: "AddFn",
+		},
+		{
+			pos:    "./devirt.go:173:15",
+			callee: "mult.MultFn",
+		},
+		// ExerciseFuncField
+		{
+			pos:    "./devirt.go:207:35",
+			callee: "AddFn",
+		},
+		{
+			pos:    "./devirt.go:207:19",
+			callee: "mult.MultFn",
+		},
+		// ExerciseFuncClosure
+		// TODO(prattmic): Closure callees not implemented.
+		//{
+		//	pos:    "./devirt.go:249:27",
+		//	callee: "AddClosure.func1",
+		//},
+		//{
+		//	pos:    "./devirt.go:249:15",
+		//	callee: "mult.MultClosure.func1",
+		//},
+	}
+
+	testPGODevirtualize(t, dir, want)
+}
+
+// Regression test for https://go.dev/issue/65615. If a target function changes
+// from non-generic to generic we can't devirtualize it (don't know the type
+// parameters), but the compiler should not crash.
+func TestLookupFuncGeneric(t *testing.T) {
+	wd, err := os.Getwd()
+	if err != nil {
+		t.Fatalf("error getting wd: %v", err)
+	}
+	srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize")
+
+	// Copy the module to a scratch location so we can add a go.mod.
+	dir := t.TempDir()
+	if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil {
+		t.Fatalf("error creating dir: %v", err)
+	}
+	for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} {
+		if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+			t.Fatalf("error copying %s: %v", file, err)
+		}
+	}
+
+	// Change MultFn from a concrete function to a parameterized function.
+	if err := convertMultToGeneric(filepath.Join(dir, "mult.pkg", "mult.go")); err != nil {
+		t.Fatalf("error editing mult.go: %v", err)
+	}
+
+	// Same as TestPGODevirtualize except for MultFn, which we cannot
+	// devirtualize to because it has become generic.
+	//
+	// Note that the important part of this test is that the build is
+	// successful, not the specific devirtualizations.
+	want := []devirtualization{
+		// ExerciseIface
+		{
+			pos:    "./devirt.go:101:20",
+			callee: "mult.Mult.Multiply",
+		},
+		{
+			pos:    "./devirt.go:101:39",
+			callee: "Add.Add",
+		},
+		// ExerciseFuncConcrete
+		{
+			pos:    "./devirt.go:173:36",
+			callee: "AddFn",
+		},
+		// ExerciseFuncField
+		{
+			pos:    "./devirt.go:207:35",
+			callee: "AddFn",
+		},
+		// ExerciseFuncClosure
+		// TODO(prattmic): Closure callees not implemented.
+		//{
+		//	pos:    "./devirt.go:249:27",
+		//	callee: "AddClosure.func1",
+		//},
+		//{
+		//	pos:    "./devirt.go:249:15",
+		//	callee: "mult.MultClosure.func1",
+		//},
+	}
+
+	testPGODevirtualize(t, dir, want)
+}
+
+var multFnRe = regexp.MustCompile(`func MultFn\(a, b int64\) int64`)
+
+func convertMultToGeneric(path string) error {
+	content, err := os.ReadFile(path)
+	if err != nil {
+		return fmt.Errorf("error opening: %w", err)
+	}
+
+	if !multFnRe.Match(content) {
+		return fmt.Errorf("MultFn not found; update regexp?")
+	}
+
+	// Users of MultFn shouldn't need adjustment, type inference should
+	// work OK.
+	content = multFnRe.ReplaceAll(content, []byte(`func MultFn[T int32|int64](a, b T) T`))
+
+	return os.WriteFile(path, content, 0644)
 }
diff --git a/src/cmd/compile/internal/test/pgo_inl_test.go b/src/cmd/compile/internal/test/pgo_inl_test.go
index 4d6b5a1..da6c4a5 100644
--- a/src/cmd/compile/internal/test/pgo_inl_test.go
+++ b/src/cmd/compile/internal/test/pgo_inl_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"bufio"
+	"bytes"
 	"fmt"
 	"internal/profile"
 	"internal/testenv"
@@ -17,11 +18,7 @@
 	"testing"
 )
 
-// testPGOIntendedInlining tests that specific functions are inlined.
-func testPGOIntendedInlining(t *testing.T, dir string) {
-	testenv.MustHaveGoRun(t)
-	t.Parallel()
-
+func buildPGOInliningTest(t *testing.T, dir string, gcflag string) []byte {
 	const pkg = "example.com/pgo/inline"
 
 	// Add a go.mod so we have a consistent symbol names in this temp dir.
@@ -32,6 +29,26 @@
 		t.Fatalf("error writing go.mod: %v", err)
 	}
 
+	exe := filepath.Join(dir, "test.exe")
+	args := []string{"test", "-c", "-o", exe, "-gcflags=" + gcflag}
+	cmd := testenv.Command(t, testenv.GoToolPath(t), args...)
+	cmd.Dir = dir
+	cmd = testenv.CleanCmdEnv(cmd)
+	t.Log(cmd)
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("build failed: %v, output:\n%s", err, out)
+	}
+	return out
+}
+
+// testPGOIntendedInlining tests that specific functions are inlined.
+func testPGOIntendedInlining(t *testing.T, dir string) {
+	testenv.MustHaveGoRun(t)
+	t.Parallel()
+
+	const pkg = "example.com/pgo/inline"
+
 	want := []string{
 		"(*BS).NS",
 	}
@@ -70,26 +87,10 @@
 	// Build the test with the profile. Use a smaller threshold to test.
 	// TODO: maybe adjust the test to work with default threshold.
 	pprof := filepath.Join(dir, "inline_hot.pprof")
-	gcflag := fmt.Sprintf("-gcflags=-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof)
-	out := filepath.Join(dir, "test.exe")
-	cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-c", "-o", out, gcflag, "."))
-	cmd.Dir = dir
+	gcflag := fmt.Sprintf("-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof)
+	out := buildPGOInliningTest(t, dir, gcflag)
 
-	pr, pw, err := os.Pipe()
-	if err != nil {
-		t.Fatalf("error creating pipe: %v", err)
-	}
-	defer pr.Close()
-	cmd.Stdout = pw
-	cmd.Stderr = pw
-
-	err = cmd.Start()
-	pw.Close()
-	if err != nil {
-		t.Fatalf("error starting go test: %v", err)
-	}
-
-	scanner := bufio.NewScanner(pr)
+	scanner := bufio.NewScanner(bytes.NewReader(out))
 	curPkg := ""
 	canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
 	haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
@@ -128,11 +129,8 @@
 			continue
 		}
 	}
-	if err := cmd.Wait(); err != nil {
-		t.Fatalf("error running go test: %v", err)
-	}
 	if err := scanner.Err(); err != nil {
-		t.Fatalf("error reading go test output: %v", err)
+		t.Fatalf("error reading output: %v", err)
 	}
 	for fullName, reason := range notInlinedReason {
 		t.Errorf("%s was not inlined: %s", fullName, reason)
@@ -297,3 +295,50 @@
 	_, err = io.Copy(d, s)
 	return err
 }
+
+// TestPGOHash tests that PGO optimization decisions can be selected by pgohash.
+func TestPGOHash(t *testing.T) {
+	testenv.MustHaveGoRun(t)
+	t.Parallel()
+
+	const pkg = "example.com/pgo/inline"
+
+	wd, err := os.Getwd()
+	if err != nil {
+		t.Fatalf("error getting wd: %v", err)
+	}
+	srcDir := filepath.Join(wd, "testdata/pgo/inline")
+
+	// Copy the module to a scratch location so we can add a go.mod.
+	dir := t.TempDir()
+
+	for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} {
+		if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+			t.Fatalf("error copying %s: %v", file, err)
+		}
+	}
+
+	pprof := filepath.Join(dir, "inline_hot.pprof")
+	// build with -trimpath so the source location (thus the hash)
+	// does not depend on the temporary directory path.
+	gcflag0 := fmt.Sprintf("-pgoprofile=%s -trimpath %s=>%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90,pgodebug=1", pprof, dir, pkg)
+
+	// Check that a hash match allows PGO inlining.
+	const srcPos = "example.com/pgo/inline/inline_hot.go:81:19"
+	const hashMatch = "pgohash triggered " + srcPos + " (inline)"
+	pgoDebugRE := regexp.MustCompile(`hot-budget check allows inlining for call .* at ` + strings.ReplaceAll(srcPos, ".", "\\."))
+	hash := "v1" // 1 matches srcPos, v for verbose (print source location)
+	gcflag := gcflag0 + ",pgohash=" + hash
+	out := buildPGOInliningTest(t, dir, gcflag)
+	if !bytes.Contains(out, []byte(hashMatch)) || !pgoDebugRE.Match(out) {
+		t.Errorf("output does not contain expected source line, out:\n%s", out)
+	}
+
+	// Check that a hash mismatch turns off PGO inlining.
+	hash = "v0" // 0 should not match srcPos
+	gcflag = gcflag0 + ",pgohash=" + hash
+	out = buildPGOInliningTest(t, dir, gcflag)
+	if bytes.Contains(out, []byte(hashMatch)) || pgoDebugRE.Match(out) {
+		t.Errorf("output contains unexpected source line, out:\n%s", out)
+	}
+}
diff --git a/src/cmd/compile/internal/test/ssa_test.go b/src/cmd/compile/internal/test/ssa_test.go
index 5f8acdc..7f2faa1 100644
--- a/src/cmd/compile/internal/test/ssa_test.go
+++ b/src/cmd/compile/internal/test/ssa_test.go
@@ -169,7 +169,7 @@
 				continue
 			}
 			t.Run(fmt.Sprintf("%s%s", test.name[4:], flag), func(t *testing.T) {
-				out, err := testenv.Command(t, filepath.Join(tmpdir, "code.test"), "-test.run="+test.name).CombinedOutput()
+				out, err := testenv.Command(t, filepath.Join(tmpdir, "code.test"), "-test.run=^"+test.name+"$").CombinedOutput()
 				if err != nil || string(out) != "PASS\n" {
 					t.Errorf("Failed:\n%s\n", out)
 				}
diff --git a/src/cmd/compile/internal/test/switch_test.go b/src/cmd/compile/internal/test/switch_test.go
index 30dee62..1d12361 100644
--- a/src/cmd/compile/internal/test/switch_test.go
+++ b/src/cmd/compile/internal/test/switch_test.go
@@ -120,6 +120,165 @@
 	sink = n
 }
 
+func BenchmarkSwitchTypePredictable(b *testing.B) {
+	benchmarkSwitchType(b, true)
+}
+func BenchmarkSwitchTypeUnpredictable(b *testing.B) {
+	benchmarkSwitchType(b, false)
+}
+func benchmarkSwitchType(b *testing.B, predictable bool) {
+	a := []any{
+		int8(1),
+		int16(2),
+		int32(3),
+		int64(4),
+		uint8(5),
+		uint16(6),
+		uint32(7),
+		uint64(8),
+	}
+	n := 0
+	rng := newRNG()
+	for i := 0; i < b.N; i++ {
+		rng = rng.next(predictable)
+		switch a[rng.value()&7].(type) {
+		case int8:
+			n += 1
+		case int16:
+			n += 2
+		case int32:
+			n += 3
+		case int64:
+			n += 4
+		case uint8:
+			n += 5
+		case uint16:
+			n += 6
+		case uint32:
+			n += 7
+		case uint64:
+			n += 8
+		}
+	}
+	sink = n
+}
+
+func BenchmarkSwitchInterfaceTypePredictable(b *testing.B) {
+	benchmarkSwitchInterfaceType(b, true)
+}
+func BenchmarkSwitchInterfaceTypeUnpredictable(b *testing.B) {
+	benchmarkSwitchInterfaceType(b, false)
+}
+
+type SI0 interface {
+	si0()
+}
+type ST0 struct {
+}
+
+func (ST0) si0() {
+}
+
+type SI1 interface {
+	si1()
+}
+type ST1 struct {
+}
+
+func (ST1) si1() {
+}
+
+type SI2 interface {
+	si2()
+}
+type ST2 struct {
+}
+
+func (ST2) si2() {
+}
+
+type SI3 interface {
+	si3()
+}
+type ST3 struct {
+}
+
+func (ST3) si3() {
+}
+
+type SI4 interface {
+	si4()
+}
+type ST4 struct {
+}
+
+func (ST4) si4() {
+}
+
+type SI5 interface {
+	si5()
+}
+type ST5 struct {
+}
+
+func (ST5) si5() {
+}
+
+type SI6 interface {
+	si6()
+}
+type ST6 struct {
+}
+
+func (ST6) si6() {
+}
+
+type SI7 interface {
+	si7()
+}
+type ST7 struct {
+}
+
+func (ST7) si7() {
+}
+
+func benchmarkSwitchInterfaceType(b *testing.B, predictable bool) {
+	a := []any{
+		ST0{},
+		ST1{},
+		ST2{},
+		ST3{},
+		ST4{},
+		ST5{},
+		ST6{},
+		ST7{},
+	}
+	n := 0
+	rng := newRNG()
+	for i := 0; i < b.N; i++ {
+		rng = rng.next(predictable)
+		switch a[rng.value()&7].(type) {
+		case SI0:
+			n += 1
+		case SI1:
+			n += 2
+		case SI2:
+			n += 3
+		case SI3:
+			n += 4
+		case SI4:
+			n += 5
+		case SI5:
+			n += 6
+		case SI6:
+			n += 7
+		case SI7:
+			n += 8
+		}
+	}
+	sink = n
+}
+
 // A simple random number generator used to make switches conditionally predictable.
 type rng uint64
 
diff --git a/src/cmd/compile/internal/test/test.go b/src/cmd/compile/internal/test/test.go
index 56e5404..195c65a 100644
--- a/src/cmd/compile/internal/test/test.go
+++ b/src/cmd/compile/internal/test/test.go
@@ -1 +1,5 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package test
diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go
index 2b8cd9f..cd7b5bc 100644
--- a/src/cmd/compile/internal/test/testdata/arith_test.go
+++ b/src/cmd/compile/internal/test/testdata/arith_test.go
@@ -268,6 +268,70 @@
 	}
 }
 
+//go:noinline
+func rsh64x64ConstOverflow8(x int8) int64 {
+	return int64(x) >> 9
+}
+
+//go:noinline
+func rsh64x64ConstOverflow16(x int16) int64 {
+	return int64(x) >> 17
+}
+
+//go:noinline
+func rsh64x64ConstOverflow32(x int32) int64 {
+	return int64(x) >> 33
+}
+
+func testArithRightShiftConstOverflow(t *testing.T) {
+	allSet := int64(-1)
+	if got, want := rsh64x64ConstOverflow8(0x7f), int64(0); got != want {
+		t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64x64ConstOverflow16(0x7fff), int64(0); got != want {
+		t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64x64ConstOverflow32(0x7ffffff), int64(0); got != want {
+		t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64x64ConstOverflow8(int8(-1)), allSet; got != want {
+		t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64x64ConstOverflow16(int16(-1)), allSet; got != want {
+		t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64x64ConstOverflow32(int32(-1)), allSet; got != want {
+		t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want)
+	}
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow8(x uint8) uint64 {
+	return uint64(x) >> 9
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow16(x uint16) uint64 {
+	return uint64(x) >> 17
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow32(x uint32) uint64 {
+	return uint64(x) >> 33
+}
+
+func testRightShiftConstOverflow(t *testing.T) {
+	if got, want := rsh64Ux64ConstOverflow8(0xff), uint64(0); got != want {
+		t.Errorf("rsh64Ux64ConstOverflow8 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64Ux64ConstOverflow16(0xffff), uint64(0); got != want {
+		t.Errorf("rsh64Ux64ConstOverflow16 failed: got %v, want %v", got, want)
+	}
+	if got, want := rsh64Ux64ConstOverflow32(0xffffffff), uint64(0); got != want {
+		t.Errorf("rsh64Ux64ConstOverflow32 failed: got %v, want %v", got, want)
+	}
+}
+
 // test64BitConstMult tests that rewrite rules don't fold 64 bit constants
 // into multiply instructions.
 func test64BitConstMult(t *testing.T) {
@@ -918,6 +982,8 @@
 	testShiftCX(t)
 	testSubConst(t)
 	testOverflowConstShift(t)
+	testArithRightShiftConstOverflow(t)
+	testRightShiftConstOverflow(t)
 	testArithConstShift(t)
 	testArithRshConst(t)
 	testLargeConst(t)
diff --git a/src/cmd/compile/internal/test/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go
index ff3a160..501f79e 100644
--- a/src/cmd/compile/internal/test/testdata/ctl_test.go
+++ b/src/cmd/compile/internal/test/testdata/ctl_test.go
@@ -70,7 +70,6 @@
 		ret += 1
 	}
 	return ret
-
 }
 
 func fallthrough_ssa(a int) int {
@@ -92,7 +91,6 @@
 		ret++
 	}
 	return ret
-
 }
 
 func testFallthrough(t *testing.T) {
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
index 390b6c3..ac238f6 100644
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
@@ -11,7 +11,16 @@
 
 package devirt
 
-import "example.com/pgo/devirtualize/mult"
+// Devirtualization of callees from transitive dependencies should work even if
+// they aren't directly referenced in the package. See #61577.
+//
+// Dots in the last package path component are escaped in symbol names. Use one
+// to ensure the escaping doesn't break lookup.
+import (
+	"fmt"
+
+	"example.com/pgo/devirtualize/mult.pkg"
+)
 
 var sink int
 
@@ -37,15 +46,46 @@
 	return a - b
 }
 
-// Exercise calls mostly a1 and m1.
+// ExerciseIface calls mostly a1 and m1.
 //
 //go:noinline
-func Exercise(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) {
+func ExerciseIface(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) int {
+	// The call below must evaluate selectA() to determine the receiver to
+	// use. This should happen exactly once per iteration. Assert that is
+	// the case to ensure the IR manipulation does not result in over- or
+	// under-evaluation.
+	selectI := 0
+	selectA := func(gotI int) Adder {
+		if gotI != selectI {
+			panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI))
+		}
+		selectI++
+
+		if gotI%10 == 0 {
+			return a2
+		}
+		return a1
+	}
+	oneI := 0
+	one := func(gotI int) int {
+		if gotI != oneI {
+			panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI))
+		}
+		oneI++
+
+		// The function value must be evaluated before arguments, so
+		// selectI must have been incremented already.
+		if selectI != oneI {
+			panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI))
+		}
+
+		return 1
+	}
+
+	val := 0
 	for i := 0; i < iter; i++ {
-		a := a1
 		m := m1
 		if i%10 == 0 {
-			a = a2
 			m = m2
 		}
 
@@ -58,16 +98,155 @@
 		// If they were not mutually exclusive (for example, two Add
 		// calls), then we could not definitively select the correct
 		// callee.
-		sink += m.Multiply(42, a.Add(1, 2))
+		val += m.Multiply(42, selectA(i).Add(one(i), 2))
 	}
+	return val
 }
 
-func init() {
-	// TODO: until https://golang.org/cl/497175 or similar lands,
-	// we need to create an explicit reference to callees
-	// in another package for devirtualization to work.
-	m := mult.Mult{}
-	m.Multiply(42, 0)
-	n := mult.NegMult{}
-	n.Multiply(42, 0)
+type AddFunc func(int, int) int
+
+func AddFn(a, b int) int {
+	for i := 0; i < 1000; i++ {
+		sink++
+	}
+	return a + b
+}
+
+func SubFn(a, b int) int {
+	for i := 0; i < 1000; i++ {
+		sink++
+	}
+	return a - b
+}
+
+// ExerciseFuncConcrete calls mostly a1 and m1.
+//
+//go:noinline
+func ExerciseFuncConcrete(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+	// The call below must evaluate selectA() to determine the function to
+	// call. This should happen exactly once per iteration. Assert that is
+	// the case to ensure the IR manipulation does not result in over- or
+	// under-evaluation.
+	selectI := 0
+	selectA := func(gotI int) AddFunc {
+		if gotI != selectI {
+			panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI))
+		}
+		selectI++
+
+		if gotI%10 == 0 {
+			return a2
+		}
+		return a1
+	}
+	oneI := 0
+	one := func(gotI int) int {
+		if gotI != oneI {
+			panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI))
+		}
+		oneI++
+
+		// The function value must be evaluated before arguments, so
+		// selectI must have been incremented already.
+		if selectI != oneI {
+			panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI))
+		}
+
+		return 1
+	}
+
+	val := 0
+	for i := 0; i < iter; i++ {
+		m := m1
+		if i%10 == 0 {
+			m = m2
+		}
+
+		// N.B. Profiles only distinguish calls on a per-line level,
+		// making the two calls ambiguous. However because the
+		// function types are mutually exclusive, devirtualization can
+		// still select the correct callee for each.
+		//
+		// If they were not mutually exclusive (for example, two
+		// AddFunc calls), then we could not definitively select the
+		// correct callee.
+		val += int(m(42, int64(selectA(i)(one(i), 2))))
+	}
+	return val
+}
+
+// ExerciseFuncField calls mostly a1 and m1.
+//
+// This is a simplified version of ExerciseFuncConcrete, but accessing the
+// function values via a struct field.
+//
+//go:noinline
+func ExerciseFuncField(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+	ops := struct {
+		a AddFunc
+		m mult.MultFunc
+	}{}
+
+	val := 0
+	for i := 0; i < iter; i++ {
+		ops.a = a1
+		ops.m = m1
+		if i%10 == 0 {
+			ops.a = a2
+			ops.m = m2
+		}
+
+		// N.B. Profiles only distinguish calls on a per-line level,
+		// making the two calls ambiguous. However because the
+		// function types are mutually exclusive, devirtualization can
+		// still select the correct callee for each.
+		//
+		// If they were not mutually exclusive (for example, two
+		// AddFunc calls), then we could not definitively select the
+		// correct callee.
+		val += int(ops.m(42, int64(ops.a(1, 2))))
+	}
+	return val
+}
+
+//go:noinline
+func AddClosure() AddFunc {
+	// Implicit closure by capturing the receiver.
+	var a Add
+	return a.Add
+}
+
+//go:noinline
+func SubClosure() AddFunc {
+	var s Sub
+	return s.Add
+}
+
+// ExerciseFuncClosure calls mostly a1 and m1.
+//
+// This is a simplified version of ExerciseFuncConcrete, but we need two
+// distinct call sites to test two different types of function values.
+//
+//go:noinline
+func ExerciseFuncClosure(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+	val := 0
+	for i := 0; i < iter; i++ {
+		a := a1
+		m := m1
+		if i%10 == 0 {
+			a = a2
+			m = m2
+		}
+
+		// N.B. Profiles only distinguish calls on a per-line level,
+		// making the two calls ambiguous. However because the
+		// function types are mutually exclusive, devirtualization can
+		// still select the correct callee for each.
+		//
+		// If they were not mutually exclusive (for example, two
+		// AddFunc calls), then we could not definitively select the
+		// correct callee.
+		val += int(m(42, int64(a(1, 2))))
+	}
+	return val
 }
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
index 5fe5dd6..2a27f1b 100644
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
Binary files differ
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
index f4cbbb8..59b565d 100644
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
@@ -14,10 +14,10 @@
 import (
 	"testing"
 
-	"example.com/pgo/devirtualize/mult"
+	"example.com/pgo/devirtualize/mult.pkg"
 )
 
-func BenchmarkDevirt(b *testing.B) {
+func BenchmarkDevirtIface(b *testing.B) {
 	var (
 		a1 Add
 		a2 Sub
@@ -25,5 +25,49 @@
 		m2 mult.NegMult
 	)
 
-	Exercise(b.N, a1, a2, m1, m2)
+	ExerciseIface(b.N, a1, a2, m1, m2)
+}
+
+// Verify that devirtualization doesn't result in calls or side effects applying more than once.
+func TestDevirtIface(t *testing.T) {
+	var (
+		a1 Add
+		a2 Sub
+		m1 mult.Mult
+		m2 mult.NegMult
+	)
+
+	if v := ExerciseIface(10, a1, a2, m1, m2); v != 1176 {
+		t.Errorf("ExerciseIface(10) got %d want 1176", v)
+	}
+}
+
+func BenchmarkDevirtFuncConcrete(b *testing.B) {
+	ExerciseFuncConcrete(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn)
+}
+
+func TestDevirtFuncConcrete(t *testing.T) {
+	if v := ExerciseFuncConcrete(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 {
+		t.Errorf("ExerciseFuncConcrete(10) got %d want 1176", v)
+	}
+}
+
+func BenchmarkDevirtFuncField(b *testing.B) {
+	ExerciseFuncField(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn)
+}
+
+func TestDevirtFuncField(t *testing.T) {
+	if v := ExerciseFuncField(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 {
+		t.Errorf("ExerciseFuncField(10) got %d want 1176", v)
+	}
+}
+
+func BenchmarkDevirtFuncClosure(b *testing.B) {
+	ExerciseFuncClosure(b.N, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure())
+}
+
+func TestDevirtFuncClosure(t *testing.T) {
+	if v := ExerciseFuncClosure(10, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()); v != 1176 {
+		t.Errorf("ExerciseFuncClosure(10) got %d want 1176", v)
+	}
 }
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go
new file mode 100644
index 0000000..113a5e1
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file.
+// See the warning in ../devirt.go for more details.
+
+package mult
+
+var sink int
+
+type Multiplier interface {
+	Multiply(a, b int) int
+}
+
+type Mult struct{}
+
+func (Mult) Multiply(a, b int) int {
+	for i := 0; i < 1000; i++ {
+		sink++
+	}
+	return a * b
+}
+
+type NegMult struct{}
+
+func (NegMult) Multiply(a, b int) int {
+	for i := 0; i < 1000; i++ {
+		sink++
+	}
+	return -1 * a * b
+}
+
+// N.B. Different types than AddFunc to test intra-line disambiguation.
+type MultFunc func(int64, int64) int64
+
+func MultFn(a, b int64) int64 {
+	for i := 0; i < 1000; i++ {
+		sink++
+	}
+	return a * b
+}
+
+func NegMultFn(a, b int64) int64 {
+	for i := 0; i < 1000; i++ {
+		sink++
+	}
+	return -1 * a * b
+}
+
+//go:noinline
+func MultClosure() MultFunc {
+	// Explicit closure to differentiate from AddClosure.
+	c := 1
+	return func(a, b int64) int64 {
+		for i := 0; i < 1000; i++ {
+			sink++
+		}
+		return a * b * int64(c)
+	}
+}
+
+//go:noinline
+func NegMultClosure() MultFunc {
+	c := 1
+	return func(a, b int64) int64 {
+		for i := 0; i < 1000; i++ {
+			sink++
+		}
+		return -1 * a * b * int64(c)
+	}
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go
deleted file mode 100644
index 8a026a5..0000000
--- a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult/mult.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// WARNING: Please avoid updating this file.
-// See the warning in ../devirt.go for more details.
-
-package mult
-
-var sink int
-
-type Multiplier interface {
-	Multiply(a, b int) int
-}
-
-type Mult struct{}
-
-func (Mult) Multiply(a, b int) int {
-	for i := 0; i < 1000; i++ {
-		sink++
-	}
-	return a * b
-}
-
-type NegMult struct{}
-
-func (NegMult) Multiply(a, b int) int {
-	for i := 0; i < 1000; i++ {
-		sink++
-	}
-	return -1 * a * b
-}
diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go
index b533212..b07f437 100644
--- a/src/cmd/compile/internal/typebits/typebits.go
+++ b/src/cmd/compile/internal/typebits/typebits.go
@@ -86,7 +86,7 @@
 		}
 
 	case types.TSTRUCT:
-		for _, f := range t.Fields().Slice() {
+		for _, f := range t.Fields() {
 			set(f.Type, off+f.Offset, bv, skip)
 		}
 
diff --git a/src/cmd/compile/internal/typecheck/_builtin/coverage.go b/src/cmd/compile/internal/typecheck/_builtin/coverage.go
index ea4462d..0222635 100644
--- a/src/cmd/compile/internal/typecheck/_builtin/coverage.go
+++ b/src/cmd/compile/internal/typecheck/_builtin/coverage.go
@@ -7,7 +7,6 @@
 // to avoid depending on having a working compiler binary.
 
 //go:build ignore
-// +build ignore
 
 package coverage
 
diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
index 2e1e94b..4211529 100644
--- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go
+++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
@@ -82,9 +82,6 @@
 func decoderune(string, int) (retv rune, retk int)
 func countrunes(string) int
 
-// Non-empty-interface to non-empty-interface conversion.
-func convI2I(typ *byte, itab *uintptr) (ret *uintptr)
-
 // Convert non-interface type to the data word of a (empty or nonempty) interface.
 func convT(typ *byte, elem *any) unsafe.Pointer
 
@@ -105,19 +102,27 @@
 
 // interface type assertions x.(T)
 func assertE2I(inter *byte, typ *byte) *byte
-func assertE2I2(inter *byte, eface any) (ret any)
-func assertI2I(inter *byte, tab *byte) *byte
-func assertI2I2(inter *byte, iface any) (ret any)
+func assertE2I2(inter *byte, typ *byte) *byte
 func panicdottypeE(have, want, iface *byte)
 func panicdottypeI(have, want, iface *byte)
 func panicnildottype(want *byte)
+func typeAssert(s *byte, typ *byte) *byte
+
+// interface switches
+func interfaceSwitch(s *byte, t *byte) (int, *byte)
 
 // interface equality. Type/itab pointers are already known to be equal, so
 // we only need to pass one.
 func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
 func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
 
-func fastrand() uint32
+// panic for iteration after exit in range func
+func panicrangeexit()
+
+// defer in range over func
+func deferrangefunc() interface{}
+
+func rand32() uint32
 
 // *byte is really *runtime.Type
 func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any)
@@ -158,7 +163,6 @@
 var writeBarrier struct {
 	enabled bool
 	pad     [3]byte
-	needed  bool
 	cgo     bool
 	alignme uint64
 }
@@ -186,8 +190,6 @@
 func panicunsafestringlen()
 func panicunsafestringnilptr()
 
-func mulUintptr(x, y uintptr) (uintptr, bool)
-
 func memmove(to *any, frm *any, length uintptr)
 func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
 func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
@@ -280,3 +282,5 @@
 var x86HasFMA bool
 var armHasVFPv4 bool
 var arm64HasATOMICS bool
+
+func asanregisterglobals(unsafe.Pointer, uintptr)
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
index bf87b4d..09f60c6 100644
--- a/src/cmd/compile/internal/typecheck/builtin.go
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -86,24 +86,25 @@
 	{"slicecopy", funcTag, 54},
 	{"decoderune", funcTag, 55},
 	{"countrunes", funcTag, 56},
-	{"convI2I", funcTag, 58},
-	{"convT", funcTag, 59},
-	{"convTnoptr", funcTag, 59},
-	{"convT16", funcTag, 61},
-	{"convT32", funcTag, 63},
-	{"convT64", funcTag, 64},
-	{"convTstring", funcTag, 65},
-	{"convTslice", funcTag, 68},
-	{"assertE2I", funcTag, 69},
-	{"assertE2I2", funcTag, 70},
-	{"assertI2I", funcTag, 69},
-	{"assertI2I2", funcTag, 70},
-	{"panicdottypeE", funcTag, 71},
-	{"panicdottypeI", funcTag, 71},
-	{"panicnildottype", funcTag, 72},
-	{"ifaceeq", funcTag, 73},
-	{"efaceeq", funcTag, 73},
-	{"fastrand", funcTag, 74},
+	{"convT", funcTag, 57},
+	{"convTnoptr", funcTag, 57},
+	{"convT16", funcTag, 59},
+	{"convT32", funcTag, 61},
+	{"convT64", funcTag, 62},
+	{"convTstring", funcTag, 63},
+	{"convTslice", funcTag, 66},
+	{"assertE2I", funcTag, 67},
+	{"assertE2I2", funcTag, 67},
+	{"panicdottypeE", funcTag, 68},
+	{"panicdottypeI", funcTag, 68},
+	{"panicnildottype", funcTag, 69},
+	{"typeAssert", funcTag, 67},
+	{"interfaceSwitch", funcTag, 70},
+	{"ifaceeq", funcTag, 72},
+	{"efaceeq", funcTag, 72},
+	{"panicrangeexit", funcTag, 9},
+	{"deferrangefunc", funcTag, 73},
+	{"rand32", funcTag, 74},
 	{"makemap64", funcTag, 76},
 	{"makemap", funcTag, 77},
 	{"makemap_small", funcTag, 78},
@@ -155,86 +156,86 @@
 	{"unsafestringcheckptr", funcTag, 119},
 	{"panicunsafestringlen", funcTag, 9},
 	{"panicunsafestringnilptr", funcTag, 9},
-	{"mulUintptr", funcTag, 120},
-	{"memmove", funcTag, 121},
-	{"memclrNoHeapPointers", funcTag, 122},
-	{"memclrHasPointers", funcTag, 122},
-	{"memequal", funcTag, 123},
-	{"memequal0", funcTag, 124},
-	{"memequal8", funcTag, 124},
-	{"memequal16", funcTag, 124},
-	{"memequal32", funcTag, 124},
-	{"memequal64", funcTag, 124},
-	{"memequal128", funcTag, 124},
-	{"f32equal", funcTag, 125},
-	{"f64equal", funcTag, 125},
-	{"c64equal", funcTag, 125},
-	{"c128equal", funcTag, 125},
-	{"strequal", funcTag, 125},
-	{"interequal", funcTag, 125},
-	{"nilinterequal", funcTag, 125},
-	{"memhash", funcTag, 126},
-	{"memhash0", funcTag, 127},
-	{"memhash8", funcTag, 127},
-	{"memhash16", funcTag, 127},
-	{"memhash32", funcTag, 127},
-	{"memhash64", funcTag, 127},
-	{"memhash128", funcTag, 127},
-	{"f32hash", funcTag, 128},
-	{"f64hash", funcTag, 128},
-	{"c64hash", funcTag, 128},
-	{"c128hash", funcTag, 128},
-	{"strhash", funcTag, 128},
-	{"interhash", funcTag, 128},
-	{"nilinterhash", funcTag, 128},
-	{"int64div", funcTag, 129},
-	{"uint64div", funcTag, 130},
-	{"int64mod", funcTag, 129},
-	{"uint64mod", funcTag, 130},
-	{"float64toint64", funcTag, 131},
-	{"float64touint64", funcTag, 132},
-	{"float64touint32", funcTag, 133},
-	{"int64tofloat64", funcTag, 134},
-	{"int64tofloat32", funcTag, 136},
-	{"uint64tofloat64", funcTag, 137},
-	{"uint64tofloat32", funcTag, 138},
-	{"uint32tofloat64", funcTag, 139},
-	{"complex128div", funcTag, 140},
-	{"getcallerpc", funcTag, 141},
-	{"getcallersp", funcTag, 141},
+	{"memmove", funcTag, 120},
+	{"memclrNoHeapPointers", funcTag, 121},
+	{"memclrHasPointers", funcTag, 121},
+	{"memequal", funcTag, 122},
+	{"memequal0", funcTag, 123},
+	{"memequal8", funcTag, 123},
+	{"memequal16", funcTag, 123},
+	{"memequal32", funcTag, 123},
+	{"memequal64", funcTag, 123},
+	{"memequal128", funcTag, 123},
+	{"f32equal", funcTag, 124},
+	{"f64equal", funcTag, 124},
+	{"c64equal", funcTag, 124},
+	{"c128equal", funcTag, 124},
+	{"strequal", funcTag, 124},
+	{"interequal", funcTag, 124},
+	{"nilinterequal", funcTag, 124},
+	{"memhash", funcTag, 125},
+	{"memhash0", funcTag, 126},
+	{"memhash8", funcTag, 126},
+	{"memhash16", funcTag, 126},
+	{"memhash32", funcTag, 126},
+	{"memhash64", funcTag, 126},
+	{"memhash128", funcTag, 126},
+	{"f32hash", funcTag, 127},
+	{"f64hash", funcTag, 127},
+	{"c64hash", funcTag, 127},
+	{"c128hash", funcTag, 127},
+	{"strhash", funcTag, 127},
+	{"interhash", funcTag, 127},
+	{"nilinterhash", funcTag, 127},
+	{"int64div", funcTag, 128},
+	{"uint64div", funcTag, 129},
+	{"int64mod", funcTag, 128},
+	{"uint64mod", funcTag, 129},
+	{"float64toint64", funcTag, 130},
+	{"float64touint64", funcTag, 131},
+	{"float64touint32", funcTag, 132},
+	{"int64tofloat64", funcTag, 133},
+	{"int64tofloat32", funcTag, 135},
+	{"uint64tofloat64", funcTag, 136},
+	{"uint64tofloat32", funcTag, 137},
+	{"uint32tofloat64", funcTag, 138},
+	{"complex128div", funcTag, 139},
+	{"getcallerpc", funcTag, 140},
+	{"getcallersp", funcTag, 140},
 	{"racefuncenter", funcTag, 31},
 	{"racefuncexit", funcTag, 9},
 	{"raceread", funcTag, 31},
 	{"racewrite", funcTag, 31},
-	{"racereadrange", funcTag, 142},
-	{"racewriterange", funcTag, 142},
-	{"msanread", funcTag, 142},
-	{"msanwrite", funcTag, 142},
-	{"msanmove", funcTag, 143},
-	{"asanread", funcTag, 142},
-	{"asanwrite", funcTag, 142},
-	{"checkptrAlignment", funcTag, 144},
-	{"checkptrArithmetic", funcTag, 146},
-	{"libfuzzerTraceCmp1", funcTag, 147},
-	{"libfuzzerTraceCmp2", funcTag, 148},
-	{"libfuzzerTraceCmp4", funcTag, 149},
-	{"libfuzzerTraceCmp8", funcTag, 150},
-	{"libfuzzerTraceConstCmp1", funcTag, 147},
-	{"libfuzzerTraceConstCmp2", funcTag, 148},
-	{"libfuzzerTraceConstCmp4", funcTag, 149},
-	{"libfuzzerTraceConstCmp8", funcTag, 150},
-	{"libfuzzerHookStrCmp", funcTag, 151},
-	{"libfuzzerHookEqualFold", funcTag, 151},
-	{"addCovMeta", funcTag, 153},
+	{"racereadrange", funcTag, 141},
+	{"racewriterange", funcTag, 141},
+	{"msanread", funcTag, 141},
+	{"msanwrite", funcTag, 141},
+	{"msanmove", funcTag, 142},
+	{"asanread", funcTag, 141},
+	{"asanwrite", funcTag, 141},
+	{"checkptrAlignment", funcTag, 143},
+	{"checkptrArithmetic", funcTag, 145},
+	{"libfuzzerTraceCmp1", funcTag, 146},
+	{"libfuzzerTraceCmp2", funcTag, 147},
+	{"libfuzzerTraceCmp4", funcTag, 148},
+	{"libfuzzerTraceCmp8", funcTag, 149},
+	{"libfuzzerTraceConstCmp1", funcTag, 146},
+	{"libfuzzerTraceConstCmp2", funcTag, 147},
+	{"libfuzzerTraceConstCmp4", funcTag, 148},
+	{"libfuzzerTraceConstCmp8", funcTag, 149},
+	{"libfuzzerHookStrCmp", funcTag, 150},
+	{"libfuzzerHookEqualFold", funcTag, 150},
+	{"addCovMeta", funcTag, 152},
 	{"x86HasPOPCNT", varTag, 6},
 	{"x86HasSSE41", varTag, 6},
 	{"x86HasFMA", varTag, 6},
 	{"armHasVFPv4", varTag, 6},
 	{"arm64HasATOMICS", varTag, 6},
+	{"asanregisterglobals", funcTag, 121},
 }
 
 func runtimeTypes() []*types.Type {
-	var typs [154]*types.Type
+	var typs [153]*types.Type
 	typs[0] = types.ByteType
 	typs[1] = types.NewPtr(typs[0])
 	typs[2] = types.Types[types.TANY]
@@ -292,41 +293,41 @@
 	typs[54] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15]))
 	typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15]))
 	typs[56] = newSig(params(typs[28]), params(typs[15]))
-	typs[57] = types.NewPtr(typs[5])
-	typs[58] = newSig(params(typs[1], typs[57]), params(typs[57]))
-	typs[59] = newSig(params(typs[1], typs[3]), params(typs[7]))
-	typs[60] = types.Types[types.TUINT16]
+	typs[57] = newSig(params(typs[1], typs[3]), params(typs[7]))
+	typs[58] = types.Types[types.TUINT16]
+	typs[59] = newSig(params(typs[58]), params(typs[7]))
+	typs[60] = types.Types[types.TUINT32]
 	typs[61] = newSig(params(typs[60]), params(typs[7]))
-	typs[62] = types.Types[types.TUINT32]
-	typs[63] = newSig(params(typs[62]), params(typs[7]))
-	typs[64] = newSig(params(typs[24]), params(typs[7]))
-	typs[65] = newSig(params(typs[28]), params(typs[7]))
-	typs[66] = types.Types[types.TUINT8]
-	typs[67] = types.NewSlice(typs[66])
-	typs[68] = newSig(params(typs[67]), params(typs[7]))
-	typs[69] = newSig(params(typs[1], typs[1]), params(typs[1]))
-	typs[70] = newSig(params(typs[1], typs[2]), params(typs[2]))
-	typs[71] = newSig(params(typs[1], typs[1], typs[1]), nil)
-	typs[72] = newSig(params(typs[1]), nil)
-	typs[73] = newSig(params(typs[57], typs[7], typs[7]), params(typs[6]))
-	typs[74] = newSig(nil, params(typs[62]))
+	typs[62] = newSig(params(typs[24]), params(typs[7]))
+	typs[63] = newSig(params(typs[28]), params(typs[7]))
+	typs[64] = types.Types[types.TUINT8]
+	typs[65] = types.NewSlice(typs[64])
+	typs[66] = newSig(params(typs[65]), params(typs[7]))
+	typs[67] = newSig(params(typs[1], typs[1]), params(typs[1]))
+	typs[68] = newSig(params(typs[1], typs[1], typs[1]), nil)
+	typs[69] = newSig(params(typs[1]), nil)
+	typs[70] = newSig(params(typs[1], typs[1]), params(typs[15], typs[1]))
+	typs[71] = types.NewPtr(typs[5])
+	typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6]))
+	typs[73] = newSig(nil, params(typs[10]))
+	typs[74] = newSig(nil, params(typs[60]))
 	typs[75] = types.NewMap(typs[2], typs[2])
 	typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75]))
 	typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75]))
 	typs[78] = newSig(nil, params(typs[75]))
 	typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3]))
-	typs[80] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3]))
+	typs[80] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3]))
 	typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3]))
 	typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3]))
 	typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3]))
 	typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6]))
-	typs[85] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3], typs[6]))
+	typs[85] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3], typs[6]))
 	typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6]))
 	typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6]))
 	typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6]))
 	typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3]))
 	typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil)
-	typs[91] = newSig(params(typs[1], typs[75], typs[62]), nil)
+	typs[91] = newSig(params(typs[1], typs[75], typs[60]), nil)
 	typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil)
 	typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil)
 	typs[94] = newSig(params(typs[3]), nil)
@@ -340,14 +341,14 @@
 	typs[102] = types.NewChan(typs[2], types.Csend)
 	typs[103] = newSig(params(typs[102], typs[3]), nil)
 	typs[104] = types.NewArray(typs[0], 3)
-	typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+	typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
 	typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil)
 	typs[107] = newSig(params(typs[1], typs[3]), nil)
 	typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
 	typs[109] = newSig(params(typs[102], typs[3]), params(typs[6]))
 	typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6]))
-	typs[111] = newSig(params(typs[57]), nil)
-	typs[112] = newSig(params(typs[1], typs[1], typs[57], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
+	typs[111] = newSig(params(typs[71]), nil)
+	typs[112] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
 	typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
 	typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
 	typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
@@ -355,40 +356,39 @@
 	typs[117] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[116]))
 	typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil)
 	typs[119] = newSig(params(typs[7], typs[22]), nil)
-	typs[120] = newSig(params(typs[5], typs[5]), params(typs[5], typs[6]))
-	typs[121] = newSig(params(typs[3], typs[3], typs[5]), nil)
-	typs[122] = newSig(params(typs[7], typs[5]), nil)
-	typs[123] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
-	typs[124] = newSig(params(typs[3], typs[3]), params(typs[6]))
-	typs[125] = newSig(params(typs[7], typs[7]), params(typs[6]))
-	typs[126] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
-	typs[127] = newSig(params(typs[7], typs[5]), params(typs[5]))
-	typs[128] = newSig(params(typs[3], typs[5]), params(typs[5]))
-	typs[129] = newSig(params(typs[22], typs[22]), params(typs[22]))
-	typs[130] = newSig(params(typs[24], typs[24]), params(typs[24]))
-	typs[131] = newSig(params(typs[20]), params(typs[22]))
-	typs[132] = newSig(params(typs[20]), params(typs[24]))
-	typs[133] = newSig(params(typs[20]), params(typs[62]))
-	typs[134] = newSig(params(typs[22]), params(typs[20]))
-	typs[135] = types.Types[types.TFLOAT32]
-	typs[136] = newSig(params(typs[22]), params(typs[135]))
-	typs[137] = newSig(params(typs[24]), params(typs[20]))
-	typs[138] = newSig(params(typs[24]), params(typs[135]))
-	typs[139] = newSig(params(typs[62]), params(typs[20]))
-	typs[140] = newSig(params(typs[26], typs[26]), params(typs[26]))
-	typs[141] = newSig(nil, params(typs[5]))
-	typs[142] = newSig(params(typs[5], typs[5]), nil)
-	typs[143] = newSig(params(typs[5], typs[5], typs[5]), nil)
-	typs[144] = newSig(params(typs[7], typs[1], typs[5]), nil)
-	typs[145] = types.NewSlice(typs[7])
-	typs[146] = newSig(params(typs[7], typs[145]), nil)
-	typs[147] = newSig(params(typs[66], typs[66], typs[17]), nil)
+	typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil)
+	typs[121] = newSig(params(typs[7], typs[5]), nil)
+	typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+	typs[123] = newSig(params(typs[3], typs[3]), params(typs[6]))
+	typs[124] = newSig(params(typs[7], typs[7]), params(typs[6]))
+	typs[125] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
+	typs[126] = newSig(params(typs[7], typs[5]), params(typs[5]))
+	typs[127] = newSig(params(typs[3], typs[5]), params(typs[5]))
+	typs[128] = newSig(params(typs[22], typs[22]), params(typs[22]))
+	typs[129] = newSig(params(typs[24], typs[24]), params(typs[24]))
+	typs[130] = newSig(params(typs[20]), params(typs[22]))
+	typs[131] = newSig(params(typs[20]), params(typs[24]))
+	typs[132] = newSig(params(typs[20]), params(typs[60]))
+	typs[133] = newSig(params(typs[22]), params(typs[20]))
+	typs[134] = types.Types[types.TFLOAT32]
+	typs[135] = newSig(params(typs[22]), params(typs[134]))
+	typs[136] = newSig(params(typs[24]), params(typs[20]))
+	typs[137] = newSig(params(typs[24]), params(typs[134]))
+	typs[138] = newSig(params(typs[60]), params(typs[20]))
+	typs[139] = newSig(params(typs[26], typs[26]), params(typs[26]))
+	typs[140] = newSig(nil, params(typs[5]))
+	typs[141] = newSig(params(typs[5], typs[5]), nil)
+	typs[142] = newSig(params(typs[5], typs[5], typs[5]), nil)
+	typs[143] = newSig(params(typs[7], typs[1], typs[5]), nil)
+	typs[144] = types.NewSlice(typs[7])
+	typs[145] = newSig(params(typs[7], typs[144]), nil)
+	typs[146] = newSig(params(typs[64], typs[64], typs[17]), nil)
+	typs[147] = newSig(params(typs[58], typs[58], typs[17]), nil)
 	typs[148] = newSig(params(typs[60], typs[60], typs[17]), nil)
-	typs[149] = newSig(params(typs[62], typs[62], typs[17]), nil)
-	typs[150] = newSig(params(typs[24], typs[24], typs[17]), nil)
-	typs[151] = newSig(params(typs[28], typs[28], typs[17]), nil)
-	typs[152] = types.NewArray(typs[0], 16)
-	typs[153] = newSig(params(typs[7], typs[62], typs[152], typs[28], typs[15], typs[66], typs[66]), params(typs[62]))
+	typs[149] = newSig(params(typs[24], typs[24], typs[17]), nil)
+	typs[150] = newSig(params(typs[28], typs[28], typs[17]), nil)
+	typs[151] = types.NewArray(typs[0], 16)
+	typs[152] = newSig(params(typs[7], typs[60], typs[151], typs[28], typs[15], typs[64], typs[64]), params(typs[60]))
 	return typs[:]
 }
 
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
index f4fb614..e7f9ec5 100644
--- a/src/cmd/compile/internal/typecheck/const.go
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"go/constant"
 	"go/token"
-	"internal/types/errors"
 	"math"
 	"math/big"
 	"unicode"
@@ -113,7 +112,7 @@
 		base.Fatalf("unexpected untyped expression: %v", n)
 
 	case ir.OLITERAL:
-		v := convertVal(n.Val(), t, explicit)
+		v := ConvertVal(n.Val(), t, explicit)
 		if v.Kind() == constant.Unknown {
 			n = ir.NewConstExpr(n.Val(), n)
 			break
@@ -219,12 +218,13 @@
 	return nil
 }
 
-// convertVal converts v into a representation appropriate for t. If
-// no such representation exists, it returns Val{} instead.
+// ConvertVal converts v into a representation appropriate for t. If
+// no such representation exists, it returns constant.MakeUnknown()
+// instead.
 //
 // If explicit is true, then conversions from integer to string are
 // also allowed.
-func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+func ConvertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
 	switch ct := v.Kind(); ct {
 	case constant.Bool:
 		if t.IsBoolean() {
@@ -304,8 +304,7 @@
 	}
 
 	// Prevent follow-on errors.
-	// TODO(mdempsky): Use constant.MakeUnknown() instead.
-	return constant.MakeInt64(1)
+	return constant.MakeUnknown()
 }
 
 func tostr(v constant.Value) constant.Value {
@@ -319,35 +318,6 @@
 	return v
 }
 
-var tokenForOp = [...]token.Token{
-	ir.OPLUS:   token.ADD,
-	ir.ONEG:    token.SUB,
-	ir.ONOT:    token.NOT,
-	ir.OBITNOT: token.XOR,
-
-	ir.OADD:    token.ADD,
-	ir.OSUB:    token.SUB,
-	ir.OMUL:    token.MUL,
-	ir.ODIV:    token.QUO,
-	ir.OMOD:    token.REM,
-	ir.OOR:     token.OR,
-	ir.OXOR:    token.XOR,
-	ir.OAND:    token.AND,
-	ir.OANDNOT: token.AND_NOT,
-	ir.OOROR:   token.LOR,
-	ir.OANDAND: token.LAND,
-
-	ir.OEQ: token.EQL,
-	ir.ONE: token.NEQ,
-	ir.OLT: token.LSS,
-	ir.OLE: token.LEQ,
-	ir.OGT: token.GTR,
-	ir.OGE: token.GEQ,
-
-	ir.OLSH: token.SHL,
-	ir.ORSH: token.SHR,
-}
-
 func makeFloat64(f float64) constant.Value {
 	if math.IsInf(f, 0) {
 		base.Fatalf("infinity is not a valid constant")
@@ -359,50 +329,6 @@
 	return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
 }
 
-// For matching historical "constant OP overflow" error messages.
-// TODO(mdempsky): Replace with error messages like go/types uses.
-var overflowNames = [...]string{
-	ir.OADD:    "addition",
-	ir.OSUB:    "subtraction",
-	ir.OMUL:    "multiplication",
-	ir.OLSH:    "shift",
-	ir.OXOR:    "bitwise XOR",
-	ir.OBITNOT: "bitwise complement",
-}
-
-// OrigConst returns an OLITERAL with orig n and value v.
-func OrigConst(n ir.Node, v constant.Value) ir.Node {
-	lno := ir.SetPos(n)
-	v = convertVal(v, n.Type(), false)
-	base.Pos = lno
-
-	switch v.Kind() {
-	case constant.Int:
-		if constant.BitLen(v) <= ir.ConstPrec {
-			break
-		}
-		fallthrough
-	case constant.Unknown:
-		what := overflowNames[n.Op()]
-		if what == "" {
-			base.Fatalf("unexpected overflow: %v", n.Op())
-		}
-		base.ErrorfAt(n.Pos(), errors.NumericOverflow, "constant %v overflow", what)
-		n.SetType(nil)
-		return n
-	}
-
-	return ir.NewConstExpr(v, n)
-}
-
-func OrigBool(n ir.Node, v bool) ir.Node {
-	return OrigConst(n, constant.MakeBool(v))
-}
-
-func OrigInt(n ir.Node, v int64) ir.Node {
-	return OrigConst(n, constant.MakeInt64(v))
-}
-
 // DefaultLit on both nodes simultaneously;
 // if they're both ideal going in they better
 // get the same type going out.
@@ -544,9 +470,10 @@
 		ir.ONEW,
 		ir.OPANIC,
 		ir.OPRINT,
-		ir.OPRINTN,
+		ir.OPRINTLN,
 		ir.OREAL,
 		ir.ORECOVER,
+		ir.ORECOVERFP,
 		ir.ORECV,
 		ir.OUNSAFEADD,
 		ir.OUNSAFESLICE,
@@ -557,96 +484,3 @@
 	}
 	return false
 }
-
-// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n ir.Node) int64 {
-	switch n.Op() {
-	case ir.OALIGNOF, ir.OSIZEOF:
-		n := n.(*ir.UnaryExpr)
-		n.X = Expr(n.X)
-		n.X = DefaultLit(n.X, nil)
-		tr := n.X.Type()
-		if tr == nil {
-			return 0
-		}
-		types.CalcSize(tr)
-		if n.Op() == ir.OALIGNOF {
-			return tr.Alignment()
-		}
-		return tr.Size()
-
-	case ir.OOFFSETOF:
-		// must be a selector.
-		n := n.(*ir.UnaryExpr)
-		// ODOT and ODOTPTR are allowed in case the OXDOT transformation has
-		// already happened (e.g. during -G=3 stenciling).
-		if n.X.Op() != ir.OXDOT && n.X.Op() != ir.ODOT && n.X.Op() != ir.ODOTPTR {
-			base.Errorf("invalid expression %v", n)
-			return 0
-		}
-		sel := n.X.(*ir.SelectorExpr)
-
-		// Remember base of selector to find it back after dot insertion.
-		// Since r->left may be mutated by typechecking, check it explicitly
-		// first to track it correctly.
-		sel.X = Expr(sel.X)
-		sbase := sel.X
-
-		// Implicit dot may already be resolved for instantiating generic function. So we
-		// need to remove any implicit dot until we reach the first non-implicit one, it's
-		// the right base selector. See issue #53137.
-		var clobberBase func(n ir.Node) ir.Node
-		clobberBase = func(n ir.Node) ir.Node {
-			if sel, ok := n.(*ir.SelectorExpr); ok && sel.Implicit() {
-				return clobberBase(sel.X)
-			}
-			return n
-		}
-		sbase = clobberBase(sbase)
-
-		tsel := Expr(sel)
-		n.X = tsel
-		if tsel.Type() == nil {
-			return 0
-		}
-		switch tsel.Op() {
-		case ir.ODOT, ir.ODOTPTR:
-			break
-		case ir.OMETHVALUE:
-			base.Errorf("invalid expression %v: argument is a method value", n)
-			return 0
-		default:
-			base.Errorf("invalid expression %v", n)
-			return 0
-		}
-
-		// Sum offsets for dots until we reach sbase.
-		var v int64
-		var next ir.Node
-		for r := tsel; r != sbase; r = next {
-			switch r.Op() {
-			case ir.ODOTPTR:
-				// For Offsetof(s.f), s may itself be a pointer,
-				// but accessing f must not otherwise involve
-				// indirection via embedded pointer types.
-				r := r.(*ir.SelectorExpr)
-				if r.X != sbase {
-					base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X)
-					return 0
-				}
-				fallthrough
-			case ir.ODOT:
-				r := r.(*ir.SelectorExpr)
-				v += r.Offset()
-				next = r.X
-			default:
-				ir.Dump("unsafenmagic", tsel)
-				base.Fatalf("impossible %v node after dot insertion", r.Op())
-			}
-		}
-		return v
-	}
-
-	base.Fatalf("unexpected op %v", n.Op())
-	return 0
-}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
index 029c14f..4a847e8 100644
--- a/src/cmd/compile/internal/typecheck/dcl.go
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -6,7 +6,6 @@
 
 import (
 	"fmt"
-	"internal/types/errors"
 	"sync"
 
 	"cmd/compile/internal/base"
@@ -15,108 +14,26 @@
 	"cmd/internal/src"
 )
 
-var DeclContext ir.Class = ir.PEXTERN // PEXTERN/PAUTO
+var funcStack []*ir.Func // stack of previous values of ir.CurFunc
 
-func DeclFunc(sym *types.Sym, recv *ir.Field, params, results []*ir.Field) *ir.Func {
-	fn := ir.NewFunc(base.Pos)
-	fn.Nname = ir.NewNameAt(base.Pos, sym)
-	fn.Nname.Func = fn
+// DeclFunc declares the parameters for fn and adds it to
+// Target.Funcs.
+//
+// Before returning, it sets CurFunc to fn. When the caller is done
+// constructing fn, it must call FinishFuncBody to restore CurFunc.
+func DeclFunc(fn *ir.Func) {
+	fn.DeclareParams(true)
 	fn.Nname.Defn = fn
-	ir.MarkFunc(fn.Nname)
-	StartFuncBody(fn)
+	Target.Funcs = append(Target.Funcs, fn)
 
-	var recv1 *types.Field
-	if recv != nil {
-		recv1 = declareParam(ir.PPARAM, -1, recv)
-	}
-
-	typ := types.NewSignature(recv1, declareParams(ir.PPARAM, params), declareParams(ir.PPARAMOUT, results))
-	checkdupfields("argument", typ.Recvs().FieldSlice(), typ.Params().FieldSlice(), typ.Results().FieldSlice())
-	fn.Nname.SetType(typ)
-	fn.Nname.SetTypecheck(1)
-
-	return fn
-}
-
-// Declare records that Node n declares symbol n.Sym in the specified
-// declaration context.
-func Declare(n *ir.Name, ctxt ir.Class) {
-	if ir.IsBlank(n) {
-		return
-	}
-
-	s := n.Sym()
-
-	// kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later.
-	if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
-		base.ErrorfAt(n.Pos(), 0, "cannot declare name %v", s)
-	}
-
-	if ctxt == ir.PEXTERN {
-		if s.Name == "init" {
-			base.ErrorfAt(n.Pos(), errors.InvalidInitDecl, "cannot declare init - must be func")
-		}
-		if s.Name == "main" && s.Pkg.Name == "main" {
-			base.ErrorfAt(n.Pos(), errors.InvalidMainDecl, "cannot declare main - must be func")
-		}
-		Target.Externs = append(Target.Externs, n)
-		s.Def = n
-	} else {
-		if ir.CurFunc == nil && ctxt == ir.PAUTO {
-			base.Pos = n.Pos()
-			base.Fatalf("automatic outside function")
-		}
-		if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
-			ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
-		}
-		n.Curfn = ir.CurFunc
-	}
-
-	if ctxt == ir.PAUTO {
-		n.SetFrameOffset(0)
-	}
-
-	n.Class = ctxt
-	if ctxt == ir.PFUNC {
-		n.Sym().SetFunc(true)
-	}
-
-	autoexport(n, ctxt)
-}
-
-// Export marks n for export (or reexport).
-func Export(n *ir.Name) {
-	if n.Sym().OnExportList() {
-		return
-	}
-	n.Sym().SetOnExportList(true)
-
-	if base.Flag.E != 0 {
-		fmt.Printf("export symbol %v\n", n.Sym())
-	}
-
-	Target.Exports = append(Target.Exports, n)
-}
-
-// declare the function proper
-// and declare the arguments.
-// called in extern-declaration context
-// returns in auto-declaration context.
-func StartFuncBody(fn *ir.Func) {
-	// change the declaration context from extern to auto
-	funcStack = append(funcStack, funcStackEnt{ir.CurFunc, DeclContext})
+	funcStack = append(funcStack, ir.CurFunc)
 	ir.CurFunc = fn
-	DeclContext = ir.PAUTO
 }
 
-// finish the body.
-// called in auto-declaration context.
-// returns in extern-declaration context.
+// FinishFuncBody restores ir.CurFunc to its state before the last
+// call to DeclFunc.
 func FinishFuncBody() {
-	// change the declaration context from auto to previous context
-	var e funcStackEnt
-	funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
-	ir.CurFunc, DeclContext = e.curfn, e.dclcontext
+	funcStack, ir.CurFunc = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
 }
 
 func CheckFuncStack() {
@@ -125,150 +42,29 @@
 	}
 }
 
-func autoexport(n *ir.Name, ctxt ir.Class) {
-	if n.Sym().Pkg != types.LocalPkg {
-		return
-	}
-	if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || DeclContext != ir.PEXTERN {
-		return
-	}
-	if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
-		return
-	}
-
-	if types.IsExported(n.Sym().Name) || n.Sym().Name == "init" {
-		Export(n)
-	}
-	if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
-		n.Sym().SetAsm(true)
-		Target.Asms = append(Target.Asms, n)
-	}
-}
-
-// checkdupfields emits errors for duplicately named fields or methods in
-// a list of struct or interface types.
-func checkdupfields(what string, fss ...[]*types.Field) {
-	seen := make(map[*types.Sym]bool)
-	for _, fs := range fss {
-		for _, f := range fs {
-			if f.Sym == nil || f.Sym.IsBlank() {
-				continue
-			}
-			if seen[f.Sym] {
-				base.ErrorfAt(f.Pos, errors.DuplicateFieldAndMethod, "duplicate %s %s", what, f.Sym.Name)
-				continue
-			}
-			seen[f.Sym] = true
-		}
-	}
-}
-
-// structs, functions, and methods.
-// they don't belong here, but where do they belong?
-func checkembeddedtype(t *types.Type) {
-	if t == nil {
-		return
-	}
-
-	if t.Sym() == nil && t.IsPtr() {
-		t = t.Elem()
-		if t.IsInterface() {
-			base.Errorf("embedded type cannot be a pointer to interface")
-		}
-	}
-
-	if t.IsPtr() || t.IsUnsafePtr() {
-		base.Errorf("embedded type cannot be a pointer")
-	} else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
-		t.ForwardType().Embedlineno = base.Pos
-	}
-}
-
-var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext
-
-type funcStackEnt struct {
-	curfn      *ir.Func
-	dclcontext ir.Class
-}
-
-func declareParams(ctxt ir.Class, l []*ir.Field) []*types.Field {
-	fields := make([]*types.Field, len(l))
-	for i, n := range l {
-		fields[i] = declareParam(ctxt, i, n)
-	}
-	return fields
-}
-
-func declareParam(ctxt ir.Class, i int, param *ir.Field) *types.Field {
-	f := types.NewField(param.Pos, param.Sym, param.Type)
-	f.SetIsDDD(param.IsDDD)
-
-	sym := param.Sym
-	if ctxt == ir.PPARAMOUT {
-		if sym == nil {
-			// Name so that escape analysis can track it. ~r stands for 'result'.
-			sym = LookupNum("~r", i)
-		} else if sym.IsBlank() {
-			// Give it a name so we can assign to it during return. ~b stands for 'blank'.
-			// The name must be different from ~r above because if you have
-			//	func f() (_ int)
-			//	func g() int
-			// f is allowed to use a plain 'return' with no arguments, while g is not.
-			// So the two cases must be distinguished.
-			sym = LookupNum("~b", i)
-		}
-	}
-
-	if sym != nil {
-		name := ir.NewNameAt(param.Pos, sym)
-		name.SetType(f.Type)
-		name.SetTypecheck(1)
-		Declare(name, ctxt)
-
-		f.Nname = name
-	}
-
-	return f
-}
-
-func Temp(t *types.Type) *ir.Name {
-	return TempAt(base.Pos, ir.CurFunc, t)
-}
-
 // make a new Node off the books.
-func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
+func TempAt(pos src.XPos, curfn *ir.Func, typ *types.Type) *ir.Name {
 	if curfn == nil {
-		base.Fatalf("no curfn for TempAt")
+		base.FatalfAt(pos, "no curfn for TempAt")
 	}
-	if curfn.Op() == ir.OCLOSURE {
-		ir.Dump("TempAt", curfn)
-		base.Fatalf("adding TempAt to wrong closure function")
+	if typ == nil {
+		base.FatalfAt(pos, "TempAt called with nil type")
 	}
-	if t == nil {
-		base.Fatalf("TempAt called with nil type")
+	if typ.Kind() == types.TFUNC && typ.Recv() != nil {
+		base.FatalfAt(pos, "misuse of method type: %v", typ)
 	}
-	if t.Kind() == types.TFUNC && t.Recv() != nil {
-		base.Fatalf("misuse of method type: %v", t)
-	}
+	types.CalcSize(typ)
 
-	s := &types.Sym{
+	sym := &types.Sym{
 		Name: autotmpname(len(curfn.Dcl)),
 		Pkg:  types.LocalPkg,
 	}
-	n := ir.NewNameAt(pos, s)
-	s.Def = n
-	n.SetType(t)
-	n.SetTypecheck(1)
-	n.Class = ir.PAUTO
-	n.SetEsc(ir.EscNever)
-	n.Curfn = curfn
-	n.SetUsed(true)
-	n.SetAutoTemp(true)
-	curfn.Dcl = append(curfn.Dcl, n)
+	name := curfn.NewLocal(pos, sym, typ)
+	name.SetEsc(ir.EscNever)
+	name.SetUsed(true)
+	name.SetAutoTemp(true)
 
-	types.CalcSize(t)
-
-	return n
+	return name
 }
 
 var (
@@ -310,18 +106,18 @@
 	// TODO(mdempsky): Move this function to types.
 	// TODO(mdempsky): Preserve positions, names, and package from sig+recv.
 
-	params := make([]*types.Field, nrecvs+sig.Params().Fields().Len())
+	params := make([]*types.Field, nrecvs+sig.NumParams())
 	if recv != nil {
 		params[0] = types.NewField(base.Pos, nil, recv)
 	}
-	for i, param := range sig.Params().Fields().Slice() {
+	for i, param := range sig.Params() {
 		d := types.NewField(base.Pos, nil, param.Type)
 		d.SetIsDDD(param.IsDDD())
 		params[nrecvs+i] = d
 	}
 
-	results := make([]*types.Field, sig.Results().Fields().Len())
-	for i, t := range sig.Results().Fields().Slice() {
+	results := make([]*types.Field, sig.NumResults())
+	for i, t := range sig.Results() {
 		results[i] = types.NewField(base.Pos, nil, t.Type)
 	}
 
diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go
index af56ea8..585c1b7 100644
--- a/src/cmd/compile/internal/typecheck/export.go
+++ b/src/cmd/compile/internal/typecheck/export.go
@@ -5,70 +5,29 @@
 package typecheck
 
 import (
-	"go/constant"
-
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
 )
 
-// importalias declares symbol s as an imported type alias with type t.
-// ipkg is the package being imported.
-func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
-	return importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)
-}
-
-// importconst declares symbol s as an imported constant with type t and value val.
-// ipkg is the package being imported.
-func importconst(pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
-	n := importobj(pos, s, ir.OLITERAL, ir.PEXTERN, t)
-	n.SetVal(val)
-	return n
-}
-
 // importfunc declares symbol s as an imported function with type t.
-// ipkg is the package being imported.
-func importfunc(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
-	n := importobj(pos, s, ir.ONAME, ir.PFUNC, t)
-	n.Func = ir.NewFunc(pos)
-	n.Func.Nname = n
-	return n
-}
-
-// importobj declares symbol s as an imported object representable by op.
-// ipkg is the package being imported.
-func importobj(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
-	n := importsym(pos, s, op, ctxt)
-	n.SetType(t)
-	if ctxt == ir.PFUNC {
-		n.Sym().SetFunc(true)
-	}
-	return n
-}
-
-func importsym(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
-	if n := s.PkgDef(); n != nil {
-		base.Fatalf("importsym of symbol that already exists: %v", n)
-	}
-
-	n := ir.NewDeclNameAt(pos, op, s)
-	n.Class = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too?
-	s.SetPkgDef(n)
-	return n
-}
-
-// importtype returns the named type declared by symbol s.
-// If no such type has been declared yet, a forward declaration is returned.
-// ipkg is the package being imported.
-func importtype(pos src.XPos, s *types.Sym) *ir.Name {
-	n := importsym(pos, s, ir.OTYPE, ir.PEXTERN)
-	n.SetType(types.NewNamed(n))
-	return n
+func importfunc(s *types.Sym, t *types.Type) {
+	fn := ir.NewFunc(src.NoXPos, src.NoXPos, s, t)
+	importsym(fn.Nname)
 }
 
 // importvar declares symbol s as an imported variable with type t.
-// ipkg is the package being imported.
-func importvar(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
-	return importobj(pos, s, ir.ONAME, ir.PEXTERN, t)
+func importvar(s *types.Sym, t *types.Type) {
+	n := ir.NewNameAt(src.NoXPos, s, t)
+	n.Class = ir.PEXTERN
+	importsym(n)
+}
+
+func importsym(name *ir.Name) {
+	sym := name.Sym()
+	if sym.Def != nil {
+		base.Fatalf("importsym of symbol that already exists: %v", sym.Def)
+	}
+	sym.Def = name
 }
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index 2d25f80..12d1743 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -8,45 +8,15 @@
 	"fmt"
 	"go/constant"
 	"go/token"
+	"internal/types/errors"
 	"strings"
 
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
+	"cmd/internal/src"
 )
 
-// tcAddr typechecks an OADDR node.
-func tcAddr(n *ir.AddrExpr) ir.Node {
-	n.X = Expr(n.X)
-	if n.X.Type() == nil {
-		n.SetType(nil)
-		return n
-	}
-
-	switch n.X.Op() {
-	case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
-		n.SetOp(ir.OPTRLIT)
-
-	default:
-		checklvalue(n.X, "take the address of")
-		r := ir.OuterValue(n.X)
-		if r.Op() == ir.ONAME {
-			r := r.(*ir.Name)
-			if ir.Orig(r) != r {
-				base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
-			}
-		}
-		n.X = DefaultLit(n.X, nil)
-		if n.X.Type() == nil {
-			n.SetType(nil)
-			return n
-		}
-	}
-
-	n.SetType(types.NewPtr(n.X.Type()))
-	return n
-}
-
 func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
 	if l.Type() == nil || r.Type() == nil {
 		return l, r, nil
@@ -99,7 +69,7 @@
 		// The conversion allocates, so only do it if the concrete type is huge.
 		converted := false
 		if r.Type().Kind() != types.TBLANK {
-			aop, _ = Assignop(l.Type(), r.Type())
+			aop, _ = assignOp(l.Type(), r.Type())
 			if aop != ir.OXXX {
 				if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) {
 					base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
@@ -118,7 +88,7 @@
 		}
 
 		if !converted && l.Type().Kind() != types.TBLANK {
-			aop, _ = Assignop(r.Type(), l.Type())
+			aop, _ = assignOp(r.Type(), l.Type())
 			if aop != ir.OXXX {
 				if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) {
 					base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
@@ -200,9 +170,6 @@
 		base.Pos = lno
 	}()
 
-	// Save original node (including n.Right)
-	n.SetOrig(ir.Copy(n))
-
 	ir.SetPos(n)
 
 	t := n.Type()
@@ -274,7 +241,7 @@
 				// walkClosure(), because the instantiated
 				// function is compiled as if in the source
 				// package of the generic function.
-				if !(ir.CurFunc != nil && strings.Index(ir.CurFunc.Nname.Sym().Name, "[") >= 0) {
+				if !(ir.CurFunc != nil && strings.Contains(ir.CurFunc.Nname.Sym().Name, "[")) {
 					if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
 						base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
 					}
@@ -385,9 +352,12 @@
 		n.SetType(nil)
 		return n
 	}
-	op, why := Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+	op, why := convertOp(n.X.Op() == ir.OLITERAL, t, n.Type())
 	if op == ir.OXXX {
-		base.Fatalf("cannot convert %L to type %v%s", n.X, n.Type(), why)
+		// Due to //go:nointerface, we may be stricter than types2 here (#63333).
+		base.ErrorfAt(n.Pos(), errors.InvalidConversion, "cannot convert %L to type %v%s", n.X, n.Type(), why)
+		n.SetType(nil)
+		return n
 	}
 
 	n.SetOp(op)
@@ -436,6 +406,66 @@
 	return n
 }
 
+// DotField returns a field selector expression that selects the
+// index'th field of the given expression, which must be of struct or
+// pointer-to-struct type.
+func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
+	op, typ := ir.ODOT, x.Type()
+	if typ.IsPtr() {
+		op, typ = ir.ODOTPTR, typ.Elem()
+	}
+	if !typ.IsStruct() {
+		base.FatalfAt(pos, "DotField of non-struct: %L", x)
+	}
+
+	// TODO(mdempsky): This is the backend's responsibility.
+	types.CalcSize(typ)
+
+	field := typ.Field(index)
+	return dot(pos, field.Type, op, x, field)
+}
+
+func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr {
+	n := ir.NewSelectorExpr(pos, op, x, selection.Sym)
+	n.Selection = selection
+	n.SetType(typ)
+	n.SetTypecheck(1)
+	return n
+}
+
+// XDotMethod returns an expression representing the field selection
+// x.sym. If any implicit field selection are necessary, those are
+// inserted too.
+func XDotField(pos src.XPos, x ir.Node, sym *types.Sym) *ir.SelectorExpr {
+	n := Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr)
+	if n.Op() != ir.ODOT && n.Op() != ir.ODOTPTR {
+		base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+	}
+	return n
+}
+
+// XDotMethod returns an expression representing the method value
+// x.sym (i.e., x is a value, not a type). If any implicit field
+// selection are necessary, those are inserted too.
+//
+// If callee is true, the result is an ODOTMETH/ODOTINTER, otherwise
+// an OMETHVALUE.
+func XDotMethod(pos src.XPos, x ir.Node, sym *types.Sym, callee bool) *ir.SelectorExpr {
+	n := ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)
+	if callee {
+		n = Callee(n).(*ir.SelectorExpr)
+		if n.Op() != ir.ODOTMETH && n.Op() != ir.ODOTINTER {
+			base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+		}
+	} else {
+		n = Expr(n).(*ir.SelectorExpr)
+		if n.Op() != ir.OMETHVALUE {
+			base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+		}
+	}
+	return n
+}
+
 // tcDot typechecks an OXDOT or ODOT node.
 func tcDot(n *ir.SelectorExpr, top int) ir.Node {
 	if n.Op() == ir.OXDOT {
@@ -447,7 +477,7 @@
 		}
 	}
 
-	n.X = typecheck(n.X, ctxExpr|ctxType)
+	n.X = Expr(n.X)
 	n.X = DefaultLit(n.X, nil)
 
 	t := n.X.Type()
@@ -458,7 +488,7 @@
 	}
 
 	if n.X.Op() == ir.OTYPE {
-		return typecheckMethodExpr(n)
+		base.FatalfAt(n.Pos(), "use NewMethodExpr to construct OMETHEXPR")
 	}
 
 	if t.IsPtr() && !t.Elem().IsInterface() {
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index 47d6c1e..5c54a5b 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -17,18 +17,15 @@
 
 // MakeDotArgs package all the arguments that match a ... T parameter into a []T.
 func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
-	var n ir.Node
 	if len(args) == 0 {
-		n = ir.NewNilExpr(pos)
-		n.SetType(typ)
-	} else {
-		args = append([]ir.Node(nil), args...)
-		lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args)
-		lit.SetImplicit(true)
-		n = lit
+		return ir.NewNilExpr(pos, typ)
 	}
 
-	n = Expr(n)
+	args = append([]ir.Node(nil), args...)
+	lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args)
+	lit.SetImplicit(true)
+
+	n := Expr(lit)
 	if n.Type() == nil {
 		base.FatalfAt(pos, "mkdotargslice: typecheck failed")
 	}
@@ -38,13 +35,13 @@
 // FixVariadicCall rewrites calls to variadic functions to use an
 // explicit ... argument if one is not already present.
 func FixVariadicCall(call *ir.CallExpr) {
-	fntype := call.X.Type()
+	fntype := call.Fun.Type()
 	if !fntype.IsVariadic() || call.IsDDD {
 		return
 	}
 
 	vi := fntype.NumParams() - 1
-	vt := fntype.Params().Field(vi).Type
+	vt := fntype.Param(vi).Type
 
 	args := call.Args
 	extra := args[vi:]
@@ -59,25 +56,25 @@
 
 // FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...).
 func FixMethodCall(call *ir.CallExpr) {
-	if call.X.Op() != ir.ODOTMETH {
+	if call.Fun.Op() != ir.ODOTMETH {
 		return
 	}
 
-	dot := call.X.(*ir.SelectorExpr)
+	dot := call.Fun.(*ir.SelectorExpr)
 
-	fn := Expr(ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym))
+	fn := NewMethodExpr(dot.Pos(), dot.X.Type(), dot.Selection.Sym)
 
 	args := make([]ir.Node, 1+len(call.Args))
 	args[0] = dot.X
 	copy(args[1:], call.Args)
 
 	call.SetOp(ir.OCALLFUNC)
-	call.X = fn
+	call.Fun = fn
 	call.Args = args
 }
 
 func AssertFixedCall(call *ir.CallExpr) {
-	if call.X.Type().IsVariadic() && !call.IsDDD {
+	if call.Fun.Type().IsVariadic() && !call.IsDDD {
 		base.FatalfAt(call.Pos(), "missed FixVariadicCall")
 	}
 	if call.Op() == ir.OCALLMETH {
@@ -130,82 +127,6 @@
 	return t
 }
 
-// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
-// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *ir.Name) *types.Pkg {
-	if ir.IsMethod(fn) {
-		// method
-		rcvr := fn.Type().Recv().Type
-
-		if rcvr.IsPtr() {
-			rcvr = rcvr.Elem()
-		}
-		if rcvr.Sym() == nil {
-			base.Fatalf("receiver with no sym: [%v] %L  (%v)", fn.Sym(), fn, rcvr)
-		}
-		return rcvr.Sym().Pkg
-	}
-
-	// non-method
-	return fn.Sym().Pkg
-}
-
-// tcClosure typechecks an OCLOSURE node. It also creates the named
-// function associated with the closure.
-// TODO: This creation of the named function should probably really be done in a
-// separate pass from type-checking.
-func tcClosure(clo *ir.ClosureExpr, top int) ir.Node {
-	fn := clo.Func
-
-	// We used to allow IR builders to typecheck the underlying Func
-	// themselves, but that led to too much variety and inconsistency
-	// around who's responsible for naming the function, typechecking
-	// it, or adding it to Target.Decls.
-	//
-	// It's now all or nothing. Callers are still allowed to do these
-	// themselves, but then they assume responsibility for all of them.
-	if fn.Typecheck() == 1 {
-		base.FatalfAt(fn.Pos(), "underlying closure func already typechecked: %v", fn)
-	}
-
-	ir.NameClosure(clo, ir.CurFunc)
-	Func(fn)
-
-	// Type check the body now, but only if we're inside a function.
-	// At top level (in a variable initialization: curfn==nil) we're not
-	// ready to type check code yet; we'll check it later, because the
-	// underlying closure function we create is added to Target.Decls.
-	if ir.CurFunc != nil {
-		oldfn := ir.CurFunc
-		ir.CurFunc = fn
-		Stmts(fn.Body)
-		ir.CurFunc = oldfn
-	}
-
-	out := 0
-	for _, v := range fn.ClosureVars {
-		if v.Type() == nil {
-			// If v.Type is nil, it means v looked like it was going to be
-			// used in the closure, but isn't. This happens in struct
-			// literals like s{f: x} where we can't distinguish whether f is
-			// a field identifier or expression until resolving s.
-			continue
-		}
-
-		// type check closed variables outside the closure, so that the
-		// outer frame also captures them.
-		Expr(v.Outer)
-
-		fn.ClosureVars[out] = v
-		out++
-	}
-	fn.ClosureVars = fn.ClosureVars[:out]
-
-	clo.SetType(fn.Type())
-
-	return ir.UseClosure(clo, Target)
-}
-
 // type check function definition
 // To be called by typecheck, not directly.
 // (Call typecheck.Func instead.)
@@ -223,9 +144,9 @@
 // tcCall typechecks an OCALL node.
 func tcCall(n *ir.CallExpr, top int) ir.Node {
 	Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
-	n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee)
+	n.Fun = typecheck(n.Fun, ctxExpr|ctxType|ctxCallee)
 
-	l := n.X
+	l := n.Fun
 
 	if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 {
 		l := l.(*ir.Name)
@@ -238,16 +159,16 @@
 		default:
 			base.Fatalf("unknown builtin %v", l)
 
-		case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+		case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
 			n.SetOp(l.BuiltinOp)
-			n.X = nil
+			n.Fun = nil
 			n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
 			return typecheck(n, top)
 
 		case ir.OCAP, ir.OCLEAR, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
 			typecheckargs(n)
 			fallthrough
-		case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+		case ir.ONEW:
 			arg, ok := needOneArg(n, "%v", n.Op())
 			if !ok {
 				n.SetType(nil)
@@ -269,8 +190,8 @@
 		panic("unreachable")
 	}
 
-	n.X = DefaultLit(n.X, nil)
-	l = n.X
+	n.Fun = DefaultLit(n.Fun, nil)
+	l = n.Fun
 	if l.Op() == ir.OTYPE {
 		if n.IsDDD {
 			base.Fatalf("invalid use of ... in type conversion to %v", l.Type())
@@ -318,7 +239,7 @@
 	default:
 		n.SetOp(ir.OCALLFUNC)
 		if t.Kind() != types.TFUNC {
-			if o := ir.Orig(l); o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+			if o := l; o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
 				// be more specific when the non-function
 				// name matches a predeclared function
 				base.Errorf("cannot call non-function %L, declared at %s",
@@ -331,17 +252,17 @@
 		}
 	}
 
-	typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+	typecheckaste(ir.OCALL, n.Fun, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.Fun) })
 	FixVariadicCall(n)
 	FixMethodCall(n)
 	if t.NumResults() == 0 {
 		return n
 	}
 	if t.NumResults() == 1 {
-		n.SetType(l.Type().Results().Field(0).Type)
+		n.SetType(l.Type().Result(0).Type)
 
-		if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
-			if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+		if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME {
+			if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" {
 				// Emit code for runtime.getg() directly instead of calling function.
 				// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
 				// so that the ordering pass can make sure to preserve the semantics of the original code
@@ -360,7 +281,7 @@
 		return n
 	}
 
-	n.SetType(l.Type().Results())
+	n.SetType(l.Type().ResultsTuple())
 	return n
 }
 
@@ -834,22 +755,17 @@
 		return n
 	}
 
-	n.SetType(types.Types[types.TINTER])
-	return n
-}
-
-// tcRecoverFP typechecks an ORECOVERFP node.
-func tcRecoverFP(n *ir.CallExpr) ir.Node {
-	if len(n.Args) != 1 {
-		base.FatalfAt(n.Pos(), "wrong number of arguments: %v", n)
+	// FP is equal to caller's SP plus FixedFrameSize.
+	var fp ir.Node = ir.NewCallExpr(n.Pos(), ir.OGETCALLERSP, nil, nil)
+	if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
+		fp = ir.NewBinaryExpr(n.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off))
 	}
+	// TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
+	fp = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
 
-	n.Args[0] = Expr(n.Args[0])
-	if !n.Args[0].Type().IsPtrShaped() {
-		base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.Args[0])
-	}
-
+	n.SetOp(ir.ORECOVERFP)
 	n.SetType(types.Types[types.TINTER])
+	n.Args = []ir.Node{Expr(fp)}
 	return n
 }
 
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index df579b7..83d35b3 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -235,69 +235,11 @@
 package typecheck
 
 import (
-	"go/constant"
-	"strconv"
 	"strings"
-
-	"cmd/compile/internal/base"
-	"cmd/compile/internal/ir"
-	"cmd/compile/internal/types"
 )
 
-// predeclReserved is the number of type offsets reserved for types
-// implicitly declared in the universe block.
-const predeclReserved = 32
-
-// An itag distinguishes the kind of type that was written into the
-// indexed export format.
-type itag uint64
-
-const (
-	// Types
-	definedType itag = iota
-	pointerType
-	sliceType
-	arrayType
-	chanType
-	mapType
-	signatureType
-	structType
-	interfaceType
-	typeParamType
-	instanceType // Instantiation of a generic type
-	unionType
-)
-
-const (
-	debug = false
-	magic = 0x6742937dc293105
-)
-
-// exportPath returns the path for pkg as it appears in the iexport
-// file format. For historical reasons (before cmd/compile required
-// the -p flag), the local package is represented as the empty string,
-// instead of its actual path.
-func exportPath(pkg *types.Pkg) string {
-	if pkg == types.LocalPkg {
-		return ""
-	}
-	return pkg.Path
-}
-
 const blankMarker = "$"
 
-// TparamExportName creates a unique name for type param in a method or a generic
-// type, using the specified unique prefix and the index of the type param. The index
-// is only used if the type param is blank, in which case the blank is replace by
-// "$<index>". A unique name is needed for later substitution in the compiler and
-// export/import that keeps blank type params associated with the correct constraint.
-func TparamExportName(prefix string, name string, index int) string {
-	if name == "_" {
-		name = blankMarker + strconv.Itoa(index)
-	}
-	return prefix + "." + name
-}
-
 // TparamName returns the real name of a type parameter, after stripping its
 // qualifying prefix and reverting blank-name encoding. See TparamExportName
 // for details.
@@ -314,83 +256,5 @@
 	return name
 }
 
-func constTypeOf(typ *types.Type) constant.Kind {
-	switch typ {
-	case types.UntypedInt, types.UntypedRune:
-		return constant.Int
-	case types.UntypedFloat:
-		return constant.Float
-	case types.UntypedComplex:
-		return constant.Complex
-	}
-
-	switch typ.Kind() {
-	case types.TBOOL:
-		return constant.Bool
-	case types.TSTRING:
-		return constant.String
-	case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
-		types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
-		return constant.Int
-	case types.TFLOAT32, types.TFLOAT64:
-		return constant.Float
-	case types.TCOMPLEX64, types.TCOMPLEX128:
-		return constant.Complex
-	}
-
-	base.Fatalf("unexpected constant type: %v", typ)
-	return 0
-}
-
-func intSize(typ *types.Type) (signed bool, maxBytes uint) {
-	if typ.IsUntyped() {
-		return true, ir.ConstPrec / 8
-	}
-
-	switch typ.Kind() {
-	case types.TFLOAT32, types.TCOMPLEX64:
-		return true, 3
-	case types.TFLOAT64, types.TCOMPLEX128:
-		return true, 7
-	}
-
-	signed = typ.IsSigned()
-	maxBytes = uint(typ.Size())
-
-	// The go/types API doesn't expose sizes to importers, so they
-	// don't know how big these types are.
-	switch typ.Kind() {
-	case types.TINT, types.TUINT, types.TUINTPTR:
-		maxBytes = 8
-	}
-
-	return
-}
-
-func isNonEmptyAssign(n ir.Node) bool {
-	switch n.Op() {
-	case ir.OAS:
-		if n.(*ir.AssignStmt).Y != nil {
-			return true
-		}
-	case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
-		return true
-	}
-	return false
-}
-func isNamedTypeSwitch(x ir.Node) bool {
-	guard, ok := x.(*ir.TypeSwitchGuard)
-	return ok && guard.Tag != nil
-}
-
-func simplifyForExport(n ir.Node) ir.Node {
-	switch n.Op() {
-	case ir.OPAREN:
-		n := n.(*ir.ParenExpr)
-		return simplifyForExport(n.X)
-	}
-	return n
-}
-
 // The name used for dictionary parameters or local variables.
 const LocalDictName = ".dict"
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index 9dea261..8d79248 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -20,70 +20,6 @@
 }
 
 func typecheckrangeExpr(n *ir.RangeStmt) {
-	n.X = Expr(n.X)
-	if n.X.Type() == nil {
-		return
-	}
-
-	t := RangeExprType(n.X.Type())
-	// delicate little dance.  see tcAssignList
-	if n.Key != nil && !ir.DeclaredBy(n.Key, n) {
-		n.Key = AssignExpr(n.Key)
-	}
-	if n.Value != nil && !ir.DeclaredBy(n.Value, n) {
-		n.Value = AssignExpr(n.Value)
-	}
-
-	var tk, tv *types.Type
-	toomany := false
-	switch t.Kind() {
-	default:
-		base.ErrorfAt(n.Pos(), errors.InvalidRangeExpr, "cannot range over %L", n.X)
-		return
-
-	case types.TARRAY, types.TSLICE:
-		tk = types.Types[types.TINT]
-		tv = t.Elem()
-
-	case types.TMAP:
-		tk = t.Key()
-		tv = t.Elem()
-
-	case types.TCHAN:
-		if !t.ChanDir().CanRecv() {
-			base.ErrorfAt(n.Pos(), errors.InvalidRangeExpr, "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type())
-			return
-		}
-
-		tk = t.Elem()
-		tv = nil
-		if n.Value != nil {
-			toomany = true
-		}
-
-	case types.TSTRING:
-		tk = types.Types[types.TINT]
-		tv = types.RuneType
-	}
-
-	if toomany {
-		base.ErrorfAt(n.Pos(), errors.InvalidIterVar, "too many variables in range")
-	}
-
-	do := func(nn ir.Node, t *types.Type) {
-		if nn != nil {
-			if ir.DeclaredBy(nn, n) && nn.Type() == nil {
-				nn.SetType(t)
-			} else if nn.Type() != nil {
-				if op, why := Assignop(t, nn.Type()); op == ir.OXXX {
-					base.ErrorfAt(n.Pos(), errors.InvalidIterVar, "cannot assign type %v to %L in range%s", t, nn, why)
-				}
-			}
-			checkassign(nn)
-		}
-	}
-	do(n.Key, tk)
-	do(n.Value, tv)
 }
 
 // type check assignment.
@@ -127,7 +63,6 @@
 	// so that the conversion below happens).
 
 	checkLHS := func(i int, typ *types.Type) {
-		lhs[i] = Resolve(lhs[i])
 		if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Type() == nil {
 			base.Assertf(typ.Kind() == types.TNIL, "unexpected untyped nil")
 			n.SetType(defaultType(typ))
@@ -186,7 +121,7 @@
 	if len(lhs) != cr {
 		if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 {
 			if r.Type() != nil {
-				base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.X, cr, plural(cr))
+				base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.Fun, cr, plural(cr))
 			}
 		} else {
 			base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs)))
@@ -263,57 +198,186 @@
 	return n
 }
 
+// tcGoDefer typechecks (normalizes) an OGO/ODEFER statement.
 func tcGoDefer(n *ir.GoDeferStmt) {
-	what := "defer"
-	if n.Op() == ir.OGO {
-		what = "go"
-	}
+	call := normalizeGoDeferCall(n.Pos(), n.Op(), n.Call, n.PtrInit())
+	call.GoDefer = true
+	n.Call = call
+}
 
-	switch n.Call.Op() {
-	// ok
-	case ir.OCALLINTER,
-		ir.OCALLMETH,
-		ir.OCALLFUNC,
-		ir.OCLEAR,
-		ir.OCLOSE,
-		ir.OCOPY,
-		ir.ODELETE,
-		ir.OMAX,
-		ir.OMIN,
-		ir.OPANIC,
-		ir.OPRINT,
-		ir.OPRINTN,
-		ir.ORECOVER:
-		return
+// normalizeGoDeferCall normalizes call into a normal function call
+// with no arguments and no results, suitable for use in an OGO/ODEFER
+// statement.
+//
+// For example, it normalizes:
+//
+//	f(x, y)
+//
+// into:
+//
+//	x1, y1 := x, y          // added to init
+//	func() { f(x1, y1) }()  // result
+func normalizeGoDeferCall(pos src.XPos, op ir.Op, call ir.Node, init *ir.Nodes) *ir.CallExpr {
+	init.Append(ir.TakeInit(call)...)
 
-	case ir.OAPPEND,
-		ir.OCAP,
-		ir.OCOMPLEX,
-		ir.OIMAG,
-		ir.OLEN,
-		ir.OMAKE,
-		ir.OMAKESLICE,
-		ir.OMAKECHAN,
-		ir.OMAKEMAP,
-		ir.ONEW,
-		ir.OREAL,
-		ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
-		if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV {
-			break
+	if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+		if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() == 0 {
+			return call // already in normal form
 		}
-		base.ErrorfAt(n.Pos(), errors.UnusedResults, "%s discards result of %v", what, n.Call)
-		return
 	}
 
-	// type is broken or missing, most likely a method call on a broken type
-	// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
-	if n.Call.Type() == nil {
-		return
+	// Create a new wrapper function without parameters or results.
+	wrapperFn := ir.NewClosureFunc(pos, pos, op, types.NewSignature(nil, nil, nil), ir.CurFunc, Target)
+	wrapperFn.DeclareParams(true)
+	wrapperFn.SetWrapper(true)
+
+	// argps collects the list of operands within the call expression
+	// that must be evaluated at the go/defer statement.
+	var argps []*ir.Node
+
+	var visit func(argp *ir.Node)
+	visit = func(argp *ir.Node) {
+		arg := *argp
+		if arg == nil {
+			return
+		}
+
+		// Recognize a few common expressions that can be evaluated within
+		// the wrapper, so we don't need to allocate space for them within
+		// the closure.
+		switch arg.Op() {
+		case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR, ir.ONEW:
+			return
+		case ir.ONAME:
+			arg := arg.(*ir.Name)
+			if arg.Class == ir.PFUNC {
+				return // reference to global function
+			}
+		case ir.OADDR:
+			arg := arg.(*ir.AddrExpr)
+			if arg.X.Op() == ir.OLINKSYMOFFSET {
+				return // address of global symbol
+			}
+
+		case ir.OCONVNOP:
+			arg := arg.(*ir.ConvExpr)
+
+			// For unsafe.Pointer->uintptr conversion arguments, save the
+			// unsafe.Pointer argument. This is necessary to handle cases
+			// like fixedbugs/issue24491a.go correctly.
+			//
+			// TODO(mdempsky): Limit to static callees with
+			// //go:uintptr{escapes,keepalive}?
+			if arg.Type().IsUintptr() && arg.X.Type().IsUnsafePtr() {
+				visit(&arg.X)
+				return
+			}
+
+		case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+			// TODO(mdempsky): For very large slices, it may be preferable
+			// to construct them at the go/defer statement instead.
+			list := arg.(*ir.CompLitExpr).List
+			for i, el := range list {
+				switch el := el.(type) {
+				case *ir.KeyExpr:
+					visit(&el.Value)
+				case *ir.StructKeyExpr:
+					visit(&el.Value)
+				default:
+					visit(&list[i])
+				}
+			}
+			return
+		}
+
+		argps = append(argps, argp)
 	}
 
-	// The syntax made sure it was a call, so this must be
-	// a conversion.
-	base.FatalfAt(n.Pos(), "%s requires function call, not conversion", what)
+	visitList := func(list []ir.Node) {
+		for i := range list {
+			visit(&list[i])
+		}
+	}
+
+	switch call.Op() {
+	default:
+		base.Fatalf("unexpected call op: %v", call.Op())
+
+	case ir.OCALLFUNC:
+		call := call.(*ir.CallExpr)
+
+		// If the callee is a named function, link to the original callee.
+		if wrapped := ir.StaticCalleeName(call.Fun); wrapped != nil {
+			wrapperFn.WrappedFunc = wrapped.Func
+		}
+
+		visit(&call.Fun)
+		visitList(call.Args)
+
+	case ir.OCALLINTER:
+		call := call.(*ir.CallExpr)
+		argps = append(argps, &call.Fun.(*ir.SelectorExpr).X) // must be first for OCHECKNIL; see below
+		visitList(call.Args)
+
+	case ir.OAPPEND, ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+		call := call.(*ir.CallExpr)
+		visitList(call.Args)
+		visit(&call.RType)
+
+	case ir.OCOPY:
+		call := call.(*ir.BinaryExpr)
+		visit(&call.X)
+		visit(&call.Y)
+		visit(&call.RType)
+
+	case ir.OCLEAR, ir.OCLOSE, ir.OPANIC:
+		call := call.(*ir.UnaryExpr)
+		visit(&call.X)
+	}
+
+	if len(argps) != 0 {
+		// Found one or more operands that need to be evaluated upfront
+		// and spilled to temporary variables, which can be captured by
+		// the wrapper function.
+
+		stmtPos := base.Pos
+		callPos := base.Pos
+
+		as := ir.NewAssignListStmt(callPos, ir.OAS2, make([]ir.Node, len(argps)), make([]ir.Node, len(argps)))
+		for i, argp := range argps {
+			arg := *argp
+
+			pos := callPos
+			if ir.HasUniquePos(arg) {
+				pos = arg.Pos()
+			}
+
+			// tmp := arg
+			tmp := TempAt(pos, ir.CurFunc, arg.Type())
+			init.Append(Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
+			tmp.Defn = as
+			as.Lhs[i] = tmp
+			as.Rhs[i] = arg
+
+			// Rewrite original expression to use/capture tmp.
+			*argp = ir.NewClosureVar(pos, wrapperFn, tmp)
+		}
+		init.Append(Stmt(as))
+
+		// For "go/defer iface.M()", if iface is nil, we need to panic at
+		// the point of the go/defer statement.
+		if call.Op() == ir.OCALLINTER {
+			iface := as.Lhs[0]
+			init.Append(Stmt(ir.NewUnaryExpr(stmtPos, ir.OCHECKNIL, ir.NewUnaryExpr(iface.Pos(), ir.OITAB, iface))))
+		}
+	}
+
+	// Move call into the wrapper function, now that it's safe to
+	// evaluate there.
+	wrapperFn.Body = []ir.Node{call}
+
+	// Finally, construct a call to the wrapper.
+	return Call(call.Pos(), wrapperFn.OClosure, nil, false).(*ir.CallExpr)
 }
 
 // tcIf typechecks an OIF node.
@@ -334,18 +398,23 @@
 
 // range
 func tcRange(n *ir.RangeStmt) {
-	// Typechecking order is important here:
-	// 0. first typecheck range expression (slice/map/chan),
-	//	it is evaluated only once and so logically it is not part of the loop.
-	// 1. typecheck produced values,
-	//	this part can declare new vars and so it must be typechecked before body,
-	//	because body can contain a closure that captures the vars.
-	// 2. decldepth++ to denote loop body.
-	// 3. typecheck body.
-	// 4. decldepth--.
-	typecheckrangeExpr(n)
+	n.X = Expr(n.X)
 
-	// second half of dance, the first half being typecheckrangeExpr
+	// delicate little dance.  see tcAssignList
+	if n.Key != nil {
+		if !ir.DeclaredBy(n.Key, n) {
+			n.Key = AssignExpr(n.Key)
+		}
+		checkassign(n.Key)
+	}
+	if n.Value != nil {
+		if !ir.DeclaredBy(n.Value, n) {
+			n.Value = AssignExpr(n.Value)
+		}
+		checkassign(n.Value)
+	}
+
+	// second half of dance
 	n.SetTypecheck(1)
 	if n.Key != nil && n.Key.Typecheck() == 0 {
 		n.Key = AssignExpr(n.Key)
@@ -359,17 +428,14 @@
 
 // tcReturn typechecks an ORETURN node.
 func tcReturn(n *ir.ReturnStmt) ir.Node {
-	typecheckargs(n)
 	if ir.CurFunc == nil {
-		base.Errorf("return outside function")
-		n.SetType(nil)
-		return n
+		base.FatalfAt(n.Pos(), "return outside function")
 	}
 
-	if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 {
-		return n
+	typecheckargs(n)
+	if len(n.Results) != 0 {
+		typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
 	}
-	typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
 	return n
 }
 
@@ -538,8 +604,8 @@
 			} else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) {
 				base.ErrorfAt(ncase.Pos(), errors.UndefinedOp, "invalid case %L in switch (incomparable type)", n1)
 			} else {
-				op1, _ := Assignop(n1.Type(), t)
-				op2, _ := Assignop(t, n1.Type())
+				op1, _ := assignOp(n1.Type(), t)
+				op2, _ := assignOp(t, n1.Type())
 				if op1 == ir.OXXX && op2 == ir.OXXX {
 					if n.Tag != nil {
 						base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t)
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 2bb978a..d64b0f0 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -26,33 +26,14 @@
 }
 
 // Given funarg struct list, return list of fn args.
-func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field {
-	var args []*ir.Field
-	gen := 0
-	for _, t := range tl.Fields().Slice() {
-		s := t.Sym
-		if mustname && (s == nil || s.Name == "_") {
-			// invent a name so that we can refer to it in the trampoline
-			s = LookupNum(".anon", gen)
-			gen++
-		} else if s != nil && s.Pkg != types.LocalPkg {
-			// TODO(mdempsky): Preserve original position, name, and package.
-			s = Lookup(s.Name)
-		}
-		a := ir.NewField(base.Pos, s, t.Type)
-		a.Pos = t.Pos
-		a.IsDDD = t.IsDDD()
-		args = append(args, a)
+func NewFuncParams(origs []*types.Field) []*types.Field {
+	res := make([]*types.Field, len(origs))
+	for i, orig := range origs {
+		p := types.NewField(orig.Pos, orig.Sym, orig.Type)
+		p.SetIsDDD(orig.IsDDD())
+		res[i] = p
 	}
-
-	return args
-}
-
-// NewName returns a new ONAME Node associated with symbol s.
-func NewName(s *types.Sym) *ir.Name {
-	n := ir.NewNameAt(base.Pos, s)
-	n.Curfn = ir.CurFunc
-	return n
+	return res
 }
 
 // NodAddr returns a node representing &n at base.Pos.
@@ -62,60 +43,7 @@
 
 // NodAddrAt returns a node representing &n at position pos.
 func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
-	n = markAddrOf(n)
-	return ir.NewAddrExpr(pos, n)
-}
-
-func markAddrOf(n ir.Node) ir.Node {
-	if IncrementalAddrtaken {
-		// We can only do incremental addrtaken computation when it is ok
-		// to typecheck the argument of the OADDR. That's only safe after the
-		// main typecheck has completed, and not loading the inlined body.
-		// The argument to OADDR needs to be typechecked because &x[i] takes
-		// the address of x if x is an array, but not if x is a slice.
-		// Note: OuterValue doesn't work correctly until n is typechecked.
-		n = typecheck(n, ctxExpr)
-		if x := ir.OuterValue(n); x.Op() == ir.ONAME {
-			x.Name().SetAddrtaken(true)
-		}
-	} else {
-		// Remember that we built an OADDR without computing the Addrtaken bit for
-		// its argument. We'll do that later in bulk using computeAddrtaken.
-		DirtyAddrtaken = true
-	}
-	return n
-}
-
-// If IncrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node
-// when it is built. The Addrtaken bits are set in bulk by computeAddrtaken.
-// If IncrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken
-// field of its argument is updated immediately.
-var IncrementalAddrtaken = false
-
-// If DirtyAddrtaken is true, then there are OADDR whose corresponding arguments
-// have not yet been marked as Addrtaken.
-var DirtyAddrtaken = false
-
-func ComputeAddrtaken(top []ir.Node) {
-	for _, n := range top {
-		var doVisit func(n ir.Node)
-		doVisit = func(n ir.Node) {
-			if n.Op() == ir.OADDR {
-				if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME {
-					x.Name().SetAddrtaken(true)
-					if x.Name().IsClosureVar() {
-						// Mark the original variable as Addrtaken so that capturevars
-						// knows not to pass it by value.
-						x.Name().Defn.Name().SetAddrtaken(true)
-					}
-				}
-			}
-			if n.Op() == ir.OCLOSURE {
-				ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doVisit)
-			}
-		}
-		ir.Visit(n, doVisit)
-	}
+	return ir.NewAddrExpr(pos, Expr(n))
 }
 
 // LinksymAddr returns a new expression that evaluates to the address
@@ -126,9 +54,7 @@
 }
 
 func NodNil() ir.Node {
-	n := ir.NewNilExpr(base.Pos)
-	n.SetType(types.Types[types.TNIL])
-	return n
+	return ir.NewNilExpr(base.Pos, types.Types[types.TNIL])
 }
 
 // AddImplicitDots finds missing fields in obj.field that
@@ -170,13 +96,13 @@
 // CalcMethods calculates all the methods (including embedding) of a non-interface
 // type t.
 func CalcMethods(t *types.Type) {
-	if t == nil || t.AllMethods().Len() != 0 {
+	if t == nil || len(t.AllMethods()) != 0 {
 		return
 	}
 
 	// mark top-level method symbols
 	// so that expand1 doesn't consider them.
-	for _, f := range t.Methods().Slice() {
+	for _, f := range t.Methods() {
 		f.Sym.SetUniq(true)
 	}
 
@@ -213,11 +139,11 @@
 		ms = append(ms, f)
 	}
 
-	for _, f := range t.Methods().Slice() {
+	for _, f := range t.Methods() {
 		f.Sym.SetUniq(false)
 	}
 
-	ms = append(ms, t.Methods().Slice()...)
+	ms = append(ms, t.Methods()...)
 	sort.Sort(types.MethodsByName(ms))
 	t.SetAllMethods(ms)
 }
@@ -255,13 +181,13 @@
 		return c, false
 	}
 
-	var fields *types.Fields
+	var fields []*types.Field
 	if u.IsStruct() {
 		fields = u.Fields()
 	} else {
 		fields = u.AllMethods()
 	}
-	for _, f := range fields.Slice() {
+	for _, f := range fields {
 		if f.Embedded == 0 || f.Sym == nil {
 			continue
 		}
@@ -311,7 +237,7 @@
 		return n
 	}
 
-	op, why := Assignop(n.Type(), t)
+	op, why := assignOp(n.Type(), t)
 	if op == ir.OXXX {
 		base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
 		op = ir.OCONV
@@ -327,7 +253,7 @@
 // If so, return op code to use in conversion.
 // If not, return OXXX. In this case, the string return parameter may
 // hold a reason why. In all other cases, it'll be the empty string.
-func Assignop(src, dst *types.Type) (ir.Op, string) {
+func assignOp(src, dst *types.Type) (ir.Op, string) {
 	if src == dst {
 		return ir.OCONVNOP, ""
 	}
@@ -339,10 +265,7 @@
 	if types.Identical(src, dst) {
 		return ir.OCONVNOP, ""
 	}
-	return Assignop1(src, dst)
-}
 
-func Assignop1(src, dst *types.Type) (ir.Op, string) {
 	// 2. src and dst have identical underlying types and
 	//   a. either src or dst is not a named type, or
 	//   b. both are empty interface types, or
@@ -441,7 +364,7 @@
 // If not, return OXXX. In this case, the string return parameter may
 // hold a reason why. In all other cases, it'll be the empty string.
 // srcConstant indicates whether the value of type src is a constant.
-func Convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
+func convertOp(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
 	if src == dst {
 		return ir.OCONVNOP, ""
 	}
@@ -464,7 +387,7 @@
 	}
 
 	// 1. src can be assigned to dst.
-	op, why := Assignop(src, dst)
+	op, why := assignOp(src, dst)
 	if op != ir.OXXX {
 		return op, why
 	}
@@ -549,15 +472,7 @@
 		return ir.OCONVNOP, ""
 	}
 
-	// 10. src is map and dst is a pointer to corresponding hmap.
-	// This rule is needed for the implementation detail that
-	// go gc maps are implemented as a pointer to a hmap struct.
-	if src.Kind() == types.TMAP && dst.IsPtr() &&
-		src.MapType().Hmap == dst.Elem() {
-		return ir.OCONVNOP, ""
-	}
-
-	// 11. src is a slice and dst is an array or pointer-to-array.
+	// 10. src is a slice and dst is an array or pointer-to-array.
 	// They must have same element type.
 	if src.IsSlice() {
 		if dst.IsArray() && types.Identical(src.Elem(), dst.Elem()) {
@@ -612,7 +527,7 @@
 	}
 
 	if u.IsInterface() {
-		for _, f := range u.AllMethods().Slice() {
+		for _, f := range u.AllMethods() {
 			if f.Sym.Uniq() {
 				continue
 			}
@@ -625,7 +540,7 @@
 
 	u = types.ReceiverBaseType(t)
 	if u != nil {
-		for _, f := range u.Methods().Slice() {
+		for _, f := range u.Methods() {
 			if f.Sym.Uniq() {
 				continue
 			}
@@ -651,13 +566,13 @@
 	}
 
 	if u.IsStruct() || u.IsInterface() {
-		var fields *types.Fields
+		var fields []*types.Field
 		if u.IsStruct() {
 			fields = u.Fields()
 		} else {
 			fields = u.AllMethods()
 		}
-		for _, f := range fields.Slice() {
+		for _, f := range fields {
 			if f.Embedded == 0 {
 				continue
 			}
@@ -736,8 +651,8 @@
 
 	if t.IsInterface() {
 		i := 0
-		tms := t.AllMethods().Slice()
-		for _, im := range iface.AllMethods().Slice() {
+		tms := t.AllMethods()
+		for _, im := range iface.AllMethods() {
 			for i < len(tms) && tms[i].Sym != im.Sym {
 				i++
 			}
@@ -763,10 +678,10 @@
 	var tms []*types.Field
 	if t != nil {
 		CalcMethods(t)
-		tms = t.AllMethods().Slice()
+		tms = t.AllMethods()
 	}
 	i := 0
-	for _, im := range iface.AllMethods().Slice() {
+	for _, im := range iface.AllMethods() {
 		for i < len(tms) && tms[i].Sym != im.Sym {
 			i++
 		}
@@ -783,12 +698,10 @@
 			*ptr = 0
 			return false
 		}
-		followptr := tm.Embedded == 2
 
 		// if pointer receiver in method,
 		// the method does not exist for value types.
-		rcvr := tm.Type.Recv().Type
-		if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) {
+		if !types.IsMethodApplicable(t0, tm) {
 			if false && base.Flag.LowerR != 0 {
 				base.Errorf("interface pointer mismatch")
 			}
@@ -831,13 +744,13 @@
 
 	c := 0
 	if u.IsStruct() || u.IsInterface() {
-		var fields *types.Fields
+		var fields []*types.Field
 		if u.IsStruct() {
 			fields = u.Fields()
 		} else {
 			fields = u.AllMethods()
 		}
-		for _, f := range fields.Slice() {
+		for _, f := range fields {
 			if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
 				if save != nil {
 					*save = f
@@ -854,7 +767,7 @@
 	}
 	u = types.ReceiverBaseType(u)
 	if u != nil {
-		for _, f := range u.Methods().Slice() {
+		for _, f := range u.Methods() {
 			if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
 				if save != nil {
 					*save = f
@@ -877,7 +790,3 @@
 type symlink struct {
 	field *types.Field
 }
-
-func assert(p bool) {
-	base.Assert(p)
-}
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
index 7fe649f..a977b5e 100644
--- a/src/cmd/compile/internal/typecheck/syms.go
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -9,30 +9,32 @@
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
-	"cmd/internal/src"
 )
 
-func LookupRuntime(name string) *ir.Name {
+// LookupRuntime returns a function or variable declared in
+// _builtin/runtime.go. If types_ is non-empty, successive occurrences
+// of the "any" placeholder type will be substituted.
+func LookupRuntime(name string, types_ ...*types.Type) *ir.Name {
 	s := ir.Pkgs.Runtime.Lookup(name)
 	if s == nil || s.Def == nil {
 		base.Fatalf("LookupRuntime: can't find runtime.%s", name)
 	}
-	return ir.AsNode(s.Def).(*ir.Name)
+	n := s.Def.(*ir.Name)
+	if len(types_) != 0 {
+		n = substArgTypes(n, types_...)
+	}
+	return n
 }
 
 // SubstArgTypes substitutes the given list of types for
 // successive occurrences of the "any" placeholder in the
 // type syntax expression n.Type.
-// The result of SubstArgTypes MUST be assigned back to old, e.g.
-//
-//	n.Left = SubstArgTypes(n.Left, t1, t2)
-func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
 	for _, t := range types_ {
 		types.CalcSize(t)
 	}
-	n := ir.NewNameAt(old.Pos(), old.Sym())
+	n := ir.NewNameAt(old.Pos(), old.Sym(), types.SubstAny(old.Type(), &types_))
 	n.Class = old.Class
-	n.SetType(types.SubstAny(old.Type(), &types_))
 	n.Func = old.Func
 	if len(types_) > 0 {
 		base.Fatalf("SubstArgTypes: too many argument types")
@@ -75,9 +77,9 @@
 		typ := typs[d.typ]
 		switch d.tag {
 		case funcTag:
-			importfunc(src.NoXPos, sym, typ)
+			importfunc(sym, typ)
 		case varTag:
-			importvar(src.NoXPos, sym, typ)
+			importvar(sym, typ)
 		default:
 			base.Fatalf("unhandled declaration tag %v", d.tag)
 		}
@@ -111,9 +113,9 @@
 		typ := typs[d.typ]
 		switch d.tag {
 		case funcTag:
-			importfunc(src.NoXPos, sym, typ)
+			importfunc(sym, typ)
 		case varTag:
-			importvar(src.NoXPos, sym, typ)
+			importvar(sym, typ)
 		default:
 			base.Fatalf("unhandled declaration tag %v", d.tag)
 		}
@@ -128,5 +130,5 @@
 	if sym == nil {
 		base.Fatalf("LookupCoverage: can't find runtime/coverage.%s", name)
 	}
-	return ir.AsNode(sym.Def).(*ir.Name)
+	return sym.Def.(*ir.Name)
 }
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index 6e4feec..b22e453 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"go/constant"
 	"go/token"
-	"internal/types/errors"
 	"strings"
 
 	"cmd/compile/internal/base"
@@ -17,18 +16,6 @@
 	"cmd/internal/src"
 )
 
-// Function collecting autotmps generated during typechecking,
-// to be included in the package-level init function.
-var InitTodoFunc = ir.NewFunc(base.Pos)
-
-var inimport bool // set during import
-
-var TypecheckAllowed bool
-
-var (
-	NeedRuntimeType = func(*types.Type) {}
-)
-
 func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
 func Expr(n ir.Node) ir.Node       { return typecheck(n, ctxExpr) }
 func Stmt(n ir.Node) ir.Node       { return typecheck(n, ctxStmt) }
@@ -105,20 +92,6 @@
 // marks variables that escape the local frame.
 // rewrites n.Op to be more specific in some cases.
 
-// Resolve resolves an ONONAME node to a definition, if any. If n is not an ONONAME node,
-// Resolve returns n unchanged. If n is an ONONAME node and not in the same package,
-// then n.Sym() is resolved using import data. Otherwise, Resolve returns
-// n.Sym().Def. An ONONAME node can be created using ir.NewIdent(), so an imported
-// symbol can be resolved via Resolve(ir.NewIdent(src.NoXPos, sym)).
-func Resolve(n ir.Node) (res ir.Node) {
-	if n == nil || n.Op() != ir.ONONAME {
-		return n
-	}
-
-	base.Fatalf("unexpected NONAME node: %+v", n)
-	panic("unreachable")
-}
-
 func typecheckslice(l []ir.Node, top int) {
 	for i := range l {
 		l[i] = typecheck(l[i], top)
@@ -170,56 +143,11 @@
 	return fmt.Sprintf("etype=%d", et)
 }
 
-func cycleFor(start ir.Node) []ir.Node {
-	// Find the start node in typecheck_tcstack.
-	// We know that it must exist because each time we mark
-	// a node with n.SetTypecheck(2) we push it on the stack,
-	// and each time we mark a node with n.SetTypecheck(2) we
-	// pop it from the stack. We hit a cycle when we encounter
-	// a node marked 2 in which case is must be on the stack.
-	i := len(typecheck_tcstack) - 1
-	for i > 0 && typecheck_tcstack[i] != start {
-		i--
-	}
-
-	// collect all nodes with same Op
-	var cycle []ir.Node
-	for _, n := range typecheck_tcstack[i:] {
-		if n.Op() == start.Op() {
-			cycle = append(cycle, n)
-		}
-	}
-
-	return cycle
-}
-
-func cycleTrace(cycle []ir.Node) string {
-	var s string
-	for i, n := range cycle {
-		s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
-	}
-	return s
-}
-
-var typecheck_tcstack []ir.Node
-
-func Func(fn *ir.Func) {
-	new := Stmt(fn)
-	if new != fn {
-		base.Fatalf("typecheck changed func")
-	}
-}
-
 // typecheck type checks node n.
 // The result of typecheck MUST be assigned back to n, e.g.
 //
 //	n.Left = typecheck(n.Left, top)
 func typecheck(n ir.Node, top int) (res ir.Node) {
-	// cannot type check until all the source has been parsed
-	if !TypecheckAllowed {
-		base.Fatalf("early typecheck")
-	}
-
 	if n == nil {
 		return nil
 	}
@@ -230,120 +158,33 @@
 	}
 
 	lno := ir.SetPos(n)
+	defer func() { base.Pos = lno }()
 
 	// Skip over parens.
 	for n.Op() == ir.OPAREN {
 		n = n.(*ir.ParenExpr).X
 	}
 
-	// Resolve definition of name and value of iota lazily.
-	n = Resolve(n)
-
 	// Skip typecheck if already done.
 	// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
 	if n.Typecheck() == 1 || n.Typecheck() == 3 {
 		switch n.Op() {
-		case ir.ONAME, ir.OTYPE, ir.OLITERAL:
+		case ir.ONAME:
 			break
 
 		default:
-			base.Pos = lno
 			return n
 		}
 	}
 
 	if n.Typecheck() == 2 {
-		// Typechecking loop. Trying printing a meaningful message,
-		// otherwise a stack trace of typechecking.
-		switch n.Op() {
-		// We can already diagnose variables used as types.
-		case ir.ONAME:
-			n := n.(*ir.Name)
-			if top&(ctxExpr|ctxType) == ctxType {
-				base.Errorf("%v is not a type", n)
-			}
-
-		case ir.OTYPE:
-			// Only report a type cycle if we are expecting a type.
-			// Otherwise let other code report an error.
-			if top&ctxType == ctxType {
-				// A cycle containing only alias types is an error
-				// since it would expand indefinitely when aliases
-				// are substituted.
-				cycle := cycleFor(n)
-				for _, n1 := range cycle {
-					if n1.Name() != nil && !n1.Name().Alias() {
-						// Cycle is ok. But if n is an alias type and doesn't
-						// have a type yet, we have a recursive type declaration
-						// with aliases that we can't handle properly yet.
-						// Report an error rather than crashing later.
-						if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
-							base.Pos = n.Pos()
-							base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
-						}
-						base.Pos = lno
-						return n
-					}
-				}
-				base.ErrorfAt(n.Pos(), errors.InvalidDeclCycle, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
-			}
-
-		case ir.OLITERAL:
-			if top&(ctxExpr|ctxType) == ctxType {
-				base.Errorf("%v is not a type", n)
-				break
-			}
-			base.ErrorfAt(n.Pos(), errors.InvalidInitCycle, "constant definition loop%s", cycleTrace(cycleFor(n)))
-		}
-
-		if base.Errors() == 0 {
-			var trace string
-			for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
-				x := typecheck_tcstack[i]
-				trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
-			}
-			base.Errorf("typechecking loop involving %v%s", n, trace)
-		}
-
-		base.Pos = lno
-		return n
+		base.FatalfAt(n.Pos(), "typechecking loop")
 	}
 
-	typecheck_tcstack = append(typecheck_tcstack, n)
-
 	n.SetTypecheck(2)
 	n = typecheck1(n, top)
 	n.SetTypecheck(1)
 
-	last := len(typecheck_tcstack) - 1
-	typecheck_tcstack[last] = nil
-	typecheck_tcstack = typecheck_tcstack[:last]
-
-	_, isExpr := n.(ir.Expr)
-	_, isStmt := n.(ir.Stmt)
-	isMulti := false
-	switch n.Op() {
-	case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
-		n := n.(*ir.CallExpr)
-		if t := n.X.Type(); t != nil && t.Kind() == types.TFUNC {
-			nr := t.NumResults()
-			isMulti = nr > 1
-			if nr == 0 {
-				isExpr = false
-			}
-		}
-	case ir.OAPPEND, ir.OMIN, ir.OMAX:
-		// Must be used.
-		isStmt = false
-	case ir.OCLEAR, ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN:
-		// Must not be used.
-		isExpr = false
-		isStmt = true
-	case ir.OCOPY, ir.ORECOVER, ir.ORECV:
-		// Can be used or not.
-		isStmt = true
-	}
-
 	t := n.Type()
 	if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
 		switch t.Kind() {
@@ -356,25 +197,6 @@
 		}
 	}
 
-	// TODO(rsc): Lots of the complexity here is because typecheck can
-	// see OTYPE, ONAME, and OLITERAL nodes multiple times.
-	// Once we make the IR a proper tree, we should be able to simplify
-	// this code a bit, especially the final case.
-	switch {
-	case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
-		base.Fatalf("%v used as value", n)
-
-	case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
-		base.Fatalf("type %v is not an expression", n.Type())
-
-	case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
-		base.Fatalf("%v evaluated but not used", n)
-
-	case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
-		base.Fatalf("%v is not a type", n)
-	}
-
-	base.Pos = lno
 	return n
 }
 
@@ -400,22 +222,6 @@
 		base.Fatalf("typecheck %v", n.Op())
 		panic("unreachable")
 
-	case ir.OLITERAL:
-		if n.Sym() == nil && n.Type() == nil {
-			base.Fatalf("literal missing type: %v", n)
-		}
-		return n
-
-	case ir.ONIL:
-		return n
-
-	// names
-	case ir.ONONAME:
-		// Note: adderrorname looks for this string and
-		// adds context about the outer expression
-		base.FatalfAt(n.Pos(), "undefined: %v", n.Sym())
-		panic("unreachable")
-
 	case ir.ONAME:
 		n := n.(*ir.Name)
 		if n.BuiltinOp != 0 {
@@ -437,14 +243,6 @@
 		}
 		return n
 
-	case ir.OLINKSYMOFFSET:
-		// type already set
-		return n
-
-	// types (ODEREF is with exprs)
-	case ir.OTYPE:
-		return n
-
 	// type or expr
 	case ir.ODEREF:
 		n := n.(*ir.StarExpr)
@@ -548,10 +346,6 @@
 		return tcUnaryArith(n)
 
 	// exprs
-	case ir.OADDR:
-		n := n.(*ir.AddrExpr)
-		return tcAddr(n)
-
 	case ir.OCOMPLIT:
 		return tcCompLit(n.(*ir.CompLitExpr))
 
@@ -596,11 +390,6 @@
 		n := n.(*ir.CallExpr)
 		return tcCall(n, top)
 
-	case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
-		n := n.(*ir.UnaryExpr)
-		n.SetType(types.Types[types.TUINTPTR])
-		return OrigInt(n, evalunsafe(n))
-
 	case ir.OCAP, ir.OLEN:
 		n := n.(*ir.UnaryExpr)
 		return tcLenCap(n)
@@ -649,7 +438,7 @@
 		n := n.(*ir.UnaryExpr)
 		return tcNew(n)
 
-	case ir.OPRINT, ir.OPRINTN:
+	case ir.OPRINT, ir.OPRINTLN:
 		n := n.(*ir.CallExpr)
 		return tcPrint(n)
 
@@ -661,10 +450,6 @@
 		n := n.(*ir.CallExpr)
 		return tcRecover(n)
 
-	case ir.ORECOVERFP:
-		n := n.(*ir.CallExpr)
-		return tcRecoverFP(n)
-
 	case ir.OUNSAFEADD:
 		n := n.(*ir.BinaryExpr)
 		return tcUnsafeAdd(n)
@@ -685,10 +470,6 @@
 		n := n.(*ir.UnaryExpr)
 		return tcUnsafeData(n)
 
-	case ir.OCLOSURE:
-		n := n.(*ir.ClosureExpr)
-		return tcClosure(n, top)
-
 	case ir.OITAB:
 		n := n.(*ir.UnaryExpr)
 		return tcITab(n)
@@ -806,17 +587,6 @@
 	case ir.ODCLFUNC:
 		tcFunc(n.(*ir.Func))
 		return n
-
-	case ir.ODCLCONST:
-		n := n.(*ir.Decl)
-		n.X = Expr(n.X).(*ir.Name)
-		return n
-
-	case ir.ODCLTYPE:
-		n := n.(*ir.Decl)
-		n.X = typecheck(n.X, ctxType).(*ir.Name)
-		types.CheckSize(n.X.Type())
-		return n
 	}
 
 	// No return n here!
@@ -849,11 +619,6 @@
 		return
 	}
 
-	// Save n as n.Orig for fmt.go.
-	if ir.Orig(n) == n {
-		n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
-	}
-
 	// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
 	RewriteMultiValueCall(n, list[0])
 }
@@ -861,10 +626,7 @@
 // RewriteNonNameCall replaces non-Name call expressions with temps,
 // rewriting f()(...) to t0 := f(); t0(...).
 func RewriteNonNameCall(n *ir.CallExpr) {
-	np := &n.X
-	if inst, ok := (*np).(*ir.InstExpr); ok && inst.Op() == ir.OFUNCINST {
-		np = &inst.X
-	}
+	np := &n.Fun
 	if dot, ok := (*np).(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOTMETH || dot.Op() == ir.ODOTINTER || dot.Op() == ir.OMETHVALUE) {
 		np = &dot.X // peel away method selector
 	}
@@ -876,49 +638,26 @@
 		return
 	}
 
-	// See comment (1) in RewriteMultiValueCall.
-	static := ir.CurFunc == nil
-	if static {
-		ir.CurFunc = InitTodoFunc
-	}
-
-	tmp := Temp((*np).Type())
+	tmp := TempAt(base.Pos, ir.CurFunc, (*np).Type())
 	as := ir.NewAssignStmt(base.Pos, tmp, *np)
 	as.PtrInit().Append(Stmt(ir.NewDecl(n.Pos(), ir.ODCL, tmp)))
 	*np = tmp
 
-	if static {
-		ir.CurFunc = nil
-	}
-
 	n.PtrInit().Append(Stmt(as))
 }
 
 // RewriteMultiValueCall rewrites multi-valued f() to use temporaries,
 // so the backend wouldn't need to worry about tuple-valued expressions.
 func RewriteMultiValueCall(n ir.InitNode, call ir.Node) {
-	// If we're outside of function context, then this call will
-	// be executed during the generated init function. However,
-	// init.go hasn't yet created it. Instead, associate the
-	// temporary variables with  InitTodoFunc for now, and init.go
-	// will reassociate them later when it's appropriate. (1)
-	static := ir.CurFunc == nil
-	if static {
-		ir.CurFunc = InitTodoFunc
-	}
-
 	as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{call})
-	results := call.Type().FieldSlice()
+	results := call.Type().Fields()
 	list := make([]ir.Node, len(results))
 	for i, result := range results {
-		tmp := Temp(result.Type)
+		tmp := TempAt(base.Pos, ir.CurFunc, result.Type)
 		as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, tmp))
 		as.Lhs.Append(tmp)
 		list[i] = tmp
 	}
-	if static {
-		ir.CurFunc = nil
-	}
 
 	n.PtrInit().Append(Stmt(as))
 
@@ -1034,9 +773,9 @@
 // the matching field or nil. If dostrcmp is 0, it matches the symbols. If
 // dostrcmp is 1, it matches by name exactly. If dostrcmp is 2, it matches names
 // with case folding.
-func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs []*types.Field, dostrcmp int) *types.Field {
 	var r *types.Field
-	for _, f := range fs.Slice() {
+	for _, f := range fs {
 		if dostrcmp != 0 && f.Sym.Name == s.Name {
 			return f
 		}
@@ -1063,64 +802,35 @@
 	return r
 }
 
-// typecheckMethodExpr checks selector expressions (ODOT) where the
-// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
-	if base.EnableTrace && base.Flag.LowerT {
-		defer tracePrint("typecheckMethodExpr", n)(&res)
-	}
-
-	t := n.X.Type()
-
-	// Compute the method set for t.
-	var ms *types.Fields
-	if t.IsInterface() {
-		ms = t.AllMethods()
+// NewMethodExpr returns an OMETHEXPR node representing method
+// expression "recv.sym".
+func NewMethodExpr(pos src.XPos, recv *types.Type, sym *types.Sym) *ir.SelectorExpr {
+	// Compute the method set for recv.
+	var ms []*types.Field
+	if recv.IsInterface() {
+		ms = recv.AllMethods()
 	} else {
-		mt := types.ReceiverBaseType(t)
+		mt := types.ReceiverBaseType(recv)
 		if mt == nil {
-			base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel)
-			n.SetType(nil)
-			return n
+			base.FatalfAt(pos, "type %v has no receiver base type", recv)
 		}
 		CalcMethods(mt)
 		ms = mt.AllMethods()
-
-		// The method expression T.m requires a wrapper when T
-		// is different from m's declared receiver type. We
-		// normally generate these wrappers while writing out
-		// runtime type descriptors, which is always done for
-		// types declared at package scope. However, we need
-		// to make sure to generate wrappers for anonymous
-		// receiver types too.
-		if mt.Sym() == nil {
-			NeedRuntimeType(t)
-		}
 	}
 
-	s := n.Sel
-	m := Lookdot1(n, s, t, ms, 0)
+	m := Lookdot1(nil, sym, recv, ms, 0)
 	if m == nil {
-		if Lookdot1(n, s, t, ms, 1) != nil {
-			base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
-		} else if _, ambig := dotpath(s, t, nil, false); ambig {
-			base.Errorf("%v undefined (ambiguous selector)", n) // method or field
-		} else {
-			base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
-		}
-		n.SetType(nil)
-		return n
+		base.FatalfAt(pos, "type %v has no method %v", recv, sym)
 	}
 
-	if !types.IsMethodApplicable(t, m) {
-		base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
-		n.SetType(nil)
-		return n
+	if !types.IsMethodApplicable(recv, m) {
+		base.FatalfAt(pos, "invalid method expression %v.%v (needs pointer receiver)", recv, sym)
 	}
 
-	n.SetOp(ir.OMETHEXPR)
+	n := ir.NewSelectorExpr(pos, ir.OMETHEXPR, ir.TypeNode(recv), sym)
 	n.Selection = m
-	n.SetType(NewMethodType(m.Type, n.X.Type()))
+	n.SetType(NewMethodType(m.Type, recv))
+	n.SetTypecheck(1)
 	return n
 }
 
@@ -1260,8 +970,9 @@
 	return true
 }
 
-func hasddd(t *types.Type) bool {
-	for _, tl := range t.Fields().Slice() {
+func hasddd(params []*types.Field) bool {
+	// TODO(mdempsky): Simply check the last param.
+	for _, tl := range params {
 		if tl.IsDDD() {
 			return true
 		}
@@ -1271,7 +982,7 @@
 }
 
 // typecheck assignment: type list = expression list
-func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, params []*types.Field, nl ir.Nodes, desc func() string) {
 	var t *types.Type
 	var i int
 
@@ -1283,9 +994,9 @@
 		n = nl[0]
 	}
 
-	n1 := tstruct.NumFields()
+	n1 := len(params)
 	n2 := len(nl)
-	if !hasddd(tstruct) {
+	if !hasddd(params) {
 		if isddd {
 			goto invalidddd
 		}
@@ -1311,7 +1022,7 @@
 	}
 
 	i = 0
-	for _, tl := range tstruct.Fields().Slice() {
+	for _, tl := range params {
 		t = tl.Type
 		if tl.IsDDD() {
 			if isddd {
@@ -1367,98 +1078,12 @@
 
 notenough:
 	if n == nil || n.Type() != nil {
-		details := errorDetails(nl, tstruct, isddd)
-		if call != nil {
-			// call is the expression being called, not the overall call.
-			// Method expressions have the form T.M, and the compiler has
-			// rewritten those to ONAME nodes but left T in Left.
-			if call.Op() == ir.OMETHEXPR {
-				call := call.(*ir.SelectorExpr)
-				base.Errorf("not enough arguments in call to method expression %v%s", call, details)
-			} else {
-				base.Errorf("not enough arguments in call to %v%s", call, details)
-			}
-		} else {
-			base.Errorf("not enough arguments to %v%s", op, details)
-		}
-		if n != nil {
-			base.Fatalf("invalid call")
-		}
+		base.Fatalf("not enough arguments to %v", op)
 	}
 	return
 
 toomany:
-	details := errorDetails(nl, tstruct, isddd)
-	if call != nil {
-		base.Errorf("too many arguments in call to %v%s", call, details)
-	} else {
-		base.Errorf("too many arguments to %v%s", op, details)
-	}
-}
-
-func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
-	// Suppress any return message signatures if:
-	//
-	// (1) We don't know any type at a call site (see #19012).
-	// (2) Any node has an unknown type.
-	// (3) Invalid type for variadic parameter (see #46957).
-	if tstruct == nil {
-		return "" // case 1
-	}
-
-	if isddd && !nl[len(nl)-1].Type().IsSlice() {
-		return "" // case 3
-	}
-
-	for _, n := range nl {
-		if n.Type() == nil {
-			return "" // case 2
-		}
-	}
-	return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
-}
-
-// sigrepr is a type's representation to the outside world,
-// in string representations of return signatures
-// e.g in error messages about wrong arguments to return.
-func sigrepr(t *types.Type, isddd bool) string {
-	switch t {
-	case types.UntypedString:
-		return "string"
-	case types.UntypedBool:
-		return "bool"
-	}
-
-	if t.Kind() == types.TIDEAL {
-		// "untyped number" is not commonly used
-		// outside of the compiler, so let's use "number".
-		// TODO(mdempsky): Revisit this.
-		return "number"
-	}
-
-	// Turn []T... argument to ...T for clearer error message.
-	if isddd {
-		if !t.IsSlice() {
-			base.Fatalf("bad type for ... argument: %v", t)
-		}
-		return "..." + t.Elem().String()
-	}
-	return t.String()
-}
-
-// fmtSignature returns the signature of the types at the call or return.
-func fmtSignature(nl ir.Nodes, isddd bool) string {
-	if len(nl) < 1 {
-		return "()"
-	}
-
-	var typeStrings []string
-	for i, n := range nl {
-		isdddArg := isddd && i == len(nl)-1
-		typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
-	}
-
-	return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
+	base.Fatalf("too many arguments to %v", op)
 }
 
 // type check composite.
@@ -1584,7 +1209,7 @@
 		return
 	}
 
-	if op, why := Assignop(src, dst.Type()); op == ir.OXXX {
+	if op, why := assignOp(src, dst.Type()); op == ir.OXXX {
 		base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
 		return
 	}
diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go
index e43bede..4c4487c 100644
--- a/src/cmd/compile/internal/typecheck/universe.go
+++ b/src/cmd/compile/internal/typecheck/universe.go
@@ -14,7 +14,6 @@
 
 var (
 	okfor [ir.OEND][]bool
-	iscmp [ir.OEND]bool
 )
 
 var (
@@ -47,7 +46,7 @@
 	{"new", ir.ONEW},
 	{"panic", ir.OPANIC},
 	{"print", ir.OPRINT},
-	{"println", ir.OPRINTN},
+	{"println", ir.OPRINTLN},
 	{"real", ir.OREAL},
 	{"recover", ir.ORECOVER},
 }
@@ -57,9 +56,6 @@
 	op   ir.Op
 }{
 	{"Add", ir.OUNSAFEADD},
-	{"Alignof", ir.OALIGNOF},
-	{"Offsetof", ir.OOFFSETOF},
-	{"Sizeof", ir.OSIZEOF},
 	{"Slice", ir.OUNSAFESLICE},
 	{"SliceData", ir.OUNSAFESLICEDATA},
 	{"String", ir.OUNSAFESTRING},
@@ -71,22 +67,17 @@
 	types.InitTypes(func(sym *types.Sym, typ *types.Type) types.Object {
 		n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym)
 		n.SetType(typ)
+		n.SetTypecheck(1)
 		sym.Def = n
 		return n
 	})
 
 	for _, s := range &builtinFuncs {
-		s2 := types.BuiltinPkg.Lookup(s.name)
-		def := NewName(s2)
-		def.BuiltinOp = s.op
-		s2.Def = def
+		ir.NewBuiltin(types.BuiltinPkg.Lookup(s.name), s.op)
 	}
 
 	for _, s := range &unsafeFuncs {
-		s2 := types.UnsafePkg.Lookup(s.name)
-		def := NewName(s2)
-		def.BuiltinOp = s.op
-		s2.Def = def
+		ir.NewBuiltin(types.UnsafePkg.Lookup(s.name), s.op)
 	}
 
 	s := types.BuiltinPkg.Lookup("true")
@@ -97,14 +88,11 @@
 
 	s = Lookup("_")
 	types.BlankSym = s
-	s.Def = NewName(s)
-	ir.BlankNode = ir.AsNode(s.Def)
-	ir.BlankNode.SetType(types.Types[types.TBLANK])
-	ir.BlankNode.SetTypecheck(1)
+	ir.BlankNode = ir.NewNameAt(src.NoXPos, s, types.Types[types.TBLANK])
+	s.Def = ir.BlankNode
 
 	s = types.BuiltinPkg.Lookup("_")
-	s.Def = NewName(s)
-	ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+	s.Def = ir.NewNameAt(src.NoXPos, s, types.Types[types.TBLANK])
 
 	s = types.BuiltinPkg.Lookup("nil")
 	s.Def = NodNil()
@@ -207,22 +195,3 @@
 	okfor[ir.OCAP] = okforcap[:]
 	okfor[ir.OLEN] = okforlen[:]
 }
-
-// DeclareUniverse makes the universe block visible within the current package.
-func DeclareUniverse() {
-	// Operationally, this is similar to a dot import of builtinpkg, except
-	// that we silently skip symbols that are already declared in the
-	// package block rather than emitting a redeclared symbol error.
-
-	for _, s := range types.BuiltinPkg.Syms {
-		if s.Def == nil {
-			continue
-		}
-		s1 := Lookup(s.Name)
-		if s1.Def != nil {
-			continue
-		}
-
-		s1.Def = s.Def
-	}
-}
diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go
index 8d56dec..d3b4462 100644
--- a/src/cmd/compile/internal/types/alg.go
+++ b/src/cmd/compile/internal/types/alg.go
@@ -103,7 +103,7 @@
 		return ASPECIAL, nil
 
 	case TSTRUCT:
-		fields := t.FieldSlice()
+		fields := t.Fields()
 
 		// One-field struct is same as that one field alone.
 		if len(fields) == 1 && !fields[0].Sym.IsBlank() {
@@ -147,7 +147,7 @@
 
 // IncomparableField returns an incomparable Field of struct Type t, if any.
 func IncomparableField(t *Type) *Field {
-	for _, f := range t.FieldSlice() {
+	for _, f := range t.Fields() {
 		if !IsComparable(f.Type) {
 			return f
 		}
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index c5d9941..c9b9853 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -8,9 +8,7 @@
 	"bytes"
 	"encoding/binary"
 	"fmt"
-	"go/constant"
 	"strconv"
-	"strings"
 	"sync"
 
 	"cmd/compile/internal/base"
@@ -29,31 +27,6 @@
 // BlankSym is the blank (_) symbol.
 var BlankSym *Sym
 
-// OrigSym returns the original symbol written by the user.
-func OrigSym(s *Sym) *Sym {
-	if s == nil {
-		return nil
-	}
-
-	if len(s.Name) > 1 && s.Name[0] == '~' {
-		switch s.Name[1] {
-		case 'r': // originally an unnamed result
-			return nil
-		case 'b': // originally the blank identifier _
-			// TODO(mdempsky): Does s.Pkg matter here?
-			return BlankSym
-		}
-		return s
-	}
-
-	if strings.HasPrefix(s.Name, ".anon") {
-		// originally an unnamed or _ name (see subr.go: NewFuncParams)
-		return nil
-	}
-
-	return s
-}
-
 // numImport tracks how often a package with a given name is imported.
 // It is used to provide a better error message (by using the package
 // path to disambiguate) if a package that appears multiple times with
@@ -346,14 +319,6 @@
 			}
 		}
 		sconv2(b, sym, verb, mode)
-
-		// TODO(mdempsky): Investigate including Vargen in fmtTypeIDName
-		// output too. It seems like it should, but that mode is currently
-		// used in string representation used by reflection, which is
-		// user-visible and doesn't expect this.
-		if mode == fmtTypeID && t.vargen != 0 {
-			fmt.Fprintf(b, "·%d", t.vargen)
-		}
 		return
 	}
 
@@ -452,7 +417,7 @@
 			break
 		}
 		b.WriteString("interface {")
-		for i, f := range t.AllMethods().Slice() {
+		for i, f := range t.AllMethods() {
 			if i != 0 {
 				b.WriteByte(';')
 			}
@@ -472,7 +437,7 @@
 			}
 			tconv2(b, f.Type, 'S', mode, visited)
 		}
-		if t.AllMethods().Len() != 0 {
+		if len(t.AllMethods()) != 0 {
 			b.WriteByte(' ')
 		}
 		b.WriteByte('}')
@@ -483,12 +448,12 @@
 		} else {
 			if t.Recv() != nil {
 				b.WriteString("method")
-				tconv2(b, t.Recvs(), 0, mode, visited)
+				formatParams(b, t.Recvs(), mode, visited)
 				b.WriteByte(' ')
 			}
 			b.WriteString("func")
 		}
-		tconv2(b, t.Params(), 0, mode, visited)
+		formatParams(b, t.Params(), mode, visited)
 
 		switch t.NumResults() {
 		case 0:
@@ -496,11 +461,11 @@
 
 		case 1:
 			b.WriteByte(' ')
-			tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
+			tconv2(b, t.Result(0).Type, 0, mode, visited) // struct->field->field's type
 
 		default:
 			b.WriteByte(' ')
-			tconv2(b, t.Results(), 0, mode, visited)
+			formatParams(b, t.Results(), mode, visited)
 		}
 
 	case TSTRUCT:
@@ -511,10 +476,6 @@
 			switch t {
 			case mt.Bucket:
 				b.WriteString("map.bucket[")
-			case mt.Hmap:
-				b.WriteString("map.hdr[")
-			case mt.Hiter:
-				b.WriteString("map.iter[")
 			default:
 				base.Fatalf("unknown internal map type")
 			}
@@ -524,39 +485,18 @@
 			break
 		}
 
-		if funarg := t.StructType().Funarg; funarg != FunargNone {
-			open, close := '(', ')'
-			if funarg == FunargTparams {
-				open, close = '[', ']'
+		b.WriteString("struct {")
+		for i, f := range t.Fields() {
+			if i != 0 {
+				b.WriteByte(';')
 			}
-			b.WriteByte(byte(open))
-			fieldVerb := 'v'
-			switch mode {
-			case fmtTypeID, fmtTypeIDName, fmtGo:
-				// no argument names on function signature, and no "noescape"/"nosplit" tags
-				fieldVerb = 'S'
-			}
-			for i, f := range t.Fields().Slice() {
-				if i != 0 {
-					b.WriteString(", ")
-				}
-				fldconv(b, f, fieldVerb, mode, visited, funarg)
-			}
-			b.WriteByte(byte(close))
-		} else {
-			b.WriteString("struct {")
-			for i, f := range t.Fields().Slice() {
-				if i != 0 {
-					b.WriteByte(';')
-				}
-				b.WriteByte(' ')
-				fldconv(b, f, 'L', mode, visited, funarg)
-			}
-			if t.NumFields() != 0 {
-				b.WriteByte(' ')
-			}
-			b.WriteByte('}')
+			b.WriteByte(' ')
+			fldconv(b, f, 'L', mode, visited, false)
 		}
+		if t.NumFields() != 0 {
+			b.WriteByte(' ')
+		}
+		b.WriteByte('}')
 
 	case TFORW:
 		b.WriteString("undefined")
@@ -581,7 +521,24 @@
 	}
 }
 
-func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, funarg Funarg) {
+func formatParams(b *bytes.Buffer, params []*Field, mode fmtMode, visited map[*Type]int) {
+	b.WriteByte('(')
+	fieldVerb := 'v'
+	switch mode {
+	case fmtTypeID, fmtTypeIDName, fmtGo:
+		// no argument names on function signature, and no "noescape"/"nosplit" tags
+		fieldVerb = 'S'
+	}
+	for i, param := range params {
+		if i != 0 {
+			b.WriteString(", ")
+		}
+		fldconv(b, param, fieldVerb, mode, visited, true)
+	}
+	b.WriteByte(')')
+}
+
+func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, isParam bool) {
 	if f == nil {
 		b.WriteString("<T>")
 		return
@@ -592,11 +549,6 @@
 	if verb != 'S' {
 		s := f.Sym
 
-		// Take the name from the original.
-		if mode == fmtGo {
-			s = OrigSym(s)
-		}
-
 		// Using type aliases and embedded fields, it's possible to
 		// construct types that can't be directly represented as a
 		// type literal. For example, given "type Int = int" (#50190),
@@ -638,7 +590,7 @@
 		}
 
 		if s != nil {
-			if funarg != FunargNone {
+			if isParam {
 				name = fmt.Sprint(f.Nname)
 			} else if verb == 'L' {
 				name = s.Name
@@ -667,7 +619,7 @@
 		tconv2(b, f.Type, 0, mode, visited)
 	}
 
-	if verb != 'S' && funarg == FunargNone && f.Note != "" {
+	if verb != 'S' && !isParam && f.Note != "" {
 		b.WriteString(" ")
 		b.WriteString(strconv.Quote(f.Note))
 	}
@@ -688,41 +640,6 @@
 	return name, ""
 }
 
-// Val
-
-func FmtConst(v constant.Value, sharp bool) string {
-	if !sharp && v.Kind() == constant.Complex {
-		real, imag := constant.Real(v), constant.Imag(v)
-
-		var re string
-		sre := constant.Sign(real)
-		if sre != 0 {
-			re = real.String()
-		}
-
-		var im string
-		sim := constant.Sign(imag)
-		if sim != 0 {
-			im = imag.String()
-		}
-
-		switch {
-		case sre == 0 && sim == 0:
-			return "0"
-		case sre == 0:
-			return im + "i"
-		case sim == 0:
-			return re
-		case sim < 0:
-			return fmt.Sprintf("(%s%si)", re, im)
-		default:
-			return fmt.Sprintf("(%s+%si)", re, im)
-		}
-	}
-
-	return v.String()
-}
-
 // TypeHash computes a hash value for type t to use in type switch statements.
 func TypeHash(t *Type) uint32 {
 	p := t.LinkString()
diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go
index c57493a..ac08a49 100644
--- a/src/cmd/compile/internal/types/goversion.go
+++ b/src/cmd/compile/internal/types/goversion.go
@@ -34,7 +34,7 @@
 }
 
 // ParseLangFlag verifies that the -lang flag holds a valid value, and
-// exits if not. It initializes data used by langSupported.
+// exits if not. It initializes data used by AllowsGoVersion.
 func ParseLangFlag() {
 	if base.Flag.Lang == "" {
 		return
@@ -59,6 +59,10 @@
 
 // parseLang parses a -lang option into a langVer.
 func parseLang(s string) (lang, error) {
+	if s == "go1" { // cmd/go's new spelling of "go1.0" (#65528)
+		s = "go1.0"
+	}
+
 	matches := goVersionRE.FindStringSubmatch(s)
 	if matches == nil {
 		return lang{}, fmt.Errorf(`should be something like "go1.12"`)
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
index 6b3bc2d..fa28c03 100644
--- a/src/cmd/compile/internal/types/identity.go
+++ b/src/cmd/compile/internal/types/identity.go
@@ -92,11 +92,11 @@
 		return true
 
 	case TINTER:
-		if t1.AllMethods().Len() != t2.AllMethods().Len() {
+		if len(t1.AllMethods()) != len(t2.AllMethods()) {
 			return false
 		}
-		for i, f1 := range t1.AllMethods().Slice() {
-			f2 := t2.AllMethods().Index(i)
+		for i, f1 := range t1.AllMethods() {
+			f2 := t2.AllMethods()[i]
 			if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, flags, assumedEqual) {
 				return false
 			}
@@ -107,7 +107,7 @@
 		if t1.NumFields() != t2.NumFields() {
 			return false
 		}
-		for i, f1 := range t1.FieldSlice() {
+		for i, f1 := range t1.Fields() {
 			f2 := t2.Field(i)
 			if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, flags, assumedEqual) {
 				return false
@@ -122,18 +122,18 @@
 		// Check parameters and result parameters for type equality.
 		// We intentionally ignore receiver parameters for type
 		// equality, because they're never relevant.
-		for _, f := range ParamsResults {
-			// Loop over fields in structs, ignoring argument names.
-			fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
-			if len(fs1) != len(fs2) {
+		if t1.NumParams() != t2.NumParams() ||
+			t1.NumResults() != t2.NumResults() ||
+			t1.IsVariadic() != t2.IsVariadic() {
+			return false
+		}
+
+		fs1 := t1.ParamsResults()
+		fs2 := t2.ParamsResults()
+		for i, f1 := range fs1 {
+			if !identical(f1.Type, fs2[i].Type, flags, assumedEqual) {
 				return false
 			}
-			for i, f1 := range fs1 {
-				f2 := fs2[i]
-				if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, flags, assumedEqual) {
-					return false
-				}
-			}
 		}
 		return true
 
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index 9a21494..c6ce788 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -8,7 +8,6 @@
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
 	"fmt"
-	"sort"
 	"strconv"
 	"sync"
 )
@@ -55,25 +54,10 @@
 	return p
 }
 
-// ImportedPkgList returns the list of directly imported packages.
-// The list is sorted by package path.
-func ImportedPkgList() []*Pkg {
-	var list []*Pkg
-	for _, p := range pkgMap {
-		if p.Direct {
-			list = append(list, p)
-		}
-	}
-	sort.Sort(byPath(list))
-	return list
+func PkgMap() map[string]*Pkg {
+	return pkgMap
 }
 
-type byPath []*Pkg
-
-func (a byPath) Len() int           { return len(a) }
-func (a byPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
-func (a byPath) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
 var nopkg = &Pkg{
 	Syms: make(map[string]*Sym),
 }
@@ -122,6 +106,14 @@
 	return pkg.LookupBytes(b)
 }
 
+// Selector looks up a selector identifier.
+func (pkg *Pkg) Selector(name string) *Sym {
+	if IsExported(name) {
+		pkg = LocalPkg
+	}
+	return pkg.Lookup(name)
+}
+
 var (
 	internedStringsmu sync.Mutex // protects internedStrings
 	internedStrings   = map[string]string{}
@@ -137,12 +129,3 @@
 	internedStringsmu.Unlock()
 	return s
 }
-
-// CleanroomDo invokes f in an environment with no preexisting packages.
-// For testing of import/export only.
-func CleanroomDo(f func()) {
-	saved := pkgMap
-	pkgMap = make(map[string]*Pkg)
-	f()
-	pkgMap = saved
-}
diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go
deleted file mode 100644
index 438a3f9..0000000
--- a/src/cmd/compile/internal/types/scope.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-// PkgDef returns the definition associated with s at package scope.
-func (s *Sym) PkgDef() Object { return s.Def }
-
-// SetPkgDef sets the definition associated with s at package scope.
-func (s *Sym) SetPkgDef(n Object) { s.Def = n }
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index 1c20350..6ba2b91 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -5,6 +5,7 @@
 package types
 
 import (
+	"math"
 	"sort"
 
 	"cmd/compile/internal/base"
@@ -91,7 +92,7 @@
 	}
 
 	{
-		methods := t.Methods().Slice()
+		methods := t.Methods()
 		sort.SliceStable(methods, func(i, j int) bool {
 			mi, mj := methods[i], methods[j]
 
@@ -110,7 +111,7 @@
 		})
 	}
 
-	for _, m := range t.Methods().Slice() {
+	for _, m := range t.Methods() {
 		if m.Sym == nil {
 			continue
 		}
@@ -119,7 +120,7 @@
 		addMethod(m, true)
 	}
 
-	for _, m := range t.Methods().Slice() {
+	for _, m := range t.Methods() {
 		if m.Sym != nil || m.Type == nil {
 			continue
 		}
@@ -133,7 +134,7 @@
 
 		// Embedded interface: duplicate all methods
 		// and add to t's method set.
-		for _, t1 := range m.Type.AllMethods().Slice() {
+		for _, t1 := range m.Type.AllMethods() {
 			f := NewField(m.Pos, t1.Sym, t1.Type)
 			addMethod(f, false)
 
@@ -157,90 +158,48 @@
 	t.SetAllMethods(methods)
 }
 
-func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
-	// flag is 0 (receiver), 1 (actual struct), or RegSize (in/out parameters)
-	isStruct := flag == 1
-	starto := o
-	maxalign := int32(flag)
-	if maxalign < 1 {
-		maxalign = 1
-	}
-	// Special case: sync/atomic.align64 is an empty struct we recognize
-	// as a signal that the struct it contains must be 64-bit-aligned.
-	//
-	// This logic is duplicated in go/types and cmd/compile/internal/types2.
-	if isStruct && t.NumFields() == 0 && t.Sym() != nil && t.Sym().Name == "align64" && isAtomicStdPkg(t.Sym().Pkg) {
-		maxalign = 8
-	}
-	lastzero := int64(0)
-	for _, f := range t.Fields().Slice() {
-		if f.Type == nil {
-			// broken field, just skip it so that other valid fields
-			// get a width.
-			continue
-		}
-
+// calcStructOffset computes the offsets of a sequence of fields,
+// starting at the given offset. It returns the resulting offset and
+// maximum field alignment.
+func calcStructOffset(t *Type, fields []*Field, offset int64) int64 {
+	for _, f := range fields {
 		CalcSize(f.Type)
-		// If type T contains a field F marked as not-in-heap,
-		// then T must also be a not-in-heap type. Otherwise,
-		// you could heap allocate T and then get a pointer F,
-		// which would be a heap pointer to a not-in-heap type.
-		if f.Type.NotInHeap() {
-			t.SetNotInHeap(true)
-		}
-		if int32(f.Type.align) > maxalign {
-			maxalign = int32(f.Type.align)
-		}
-		if f.Type.align > 0 {
-			o = RoundUp(o, int64(f.Type.align))
-		}
-		if isStruct { // For receiver/args/results, do not set, it depends on ABI
-			f.Offset = o
+		offset = RoundUp(offset, int64(f.Type.align))
+
+		if t.IsStruct() { // param offsets depend on ABI
+			f.Offset = offset
+
+			// If type T contains a field F marked as not-in-heap,
+			// then T must also be a not-in-heap type. Otherwise,
+			// you could heap allocate T and then get a pointer F,
+			// which would be a heap pointer to a not-in-heap type.
+			if f.Type.NotInHeap() {
+				t.SetNotInHeap(true)
+			}
 		}
 
-		w := f.Type.width
-		if w < 0 {
-			base.Fatalf("invalid width %d", f.Type.width)
-		}
-		if w == 0 {
-			lastzero = o
-		}
-		o += w
+		offset += f.Type.width
+
 		maxwidth := MaxWidth
 		// On 32-bit systems, reflect tables impose an additional constraint
 		// that each field start offset must fit in 31 bits.
 		if maxwidth < 1<<32 {
 			maxwidth = 1<<31 - 1
 		}
-		if o >= maxwidth {
-			base.ErrorfAt(typePos(errtype), 0, "type %L too large", errtype)
-			o = 8 // small but nonzero
+		if offset >= maxwidth {
+			base.ErrorfAt(typePos(t), 0, "type %L too large", t)
+			offset = 8 // small but nonzero
 		}
 	}
 
-	// For nonzero-sized structs which end in a zero-sized thing, we add
-	// an extra byte of padding to the type. This padding ensures that
-	// taking the address of the zero-sized thing can't manufacture a
-	// pointer to the next object in the heap. See issue 9401.
-	if flag == 1 && o > starto && o == lastzero {
-		o++
-	}
-
-	// final width is rounded
-	if flag != 0 {
-		o = RoundUp(o, int64(maxalign))
-	}
-	t.align = uint8(maxalign)
-
-	// type width only includes back to first field's offset
-	t.width = o - starto
-
-	return o
+	return offset
 }
 
 func isAtomicStdPkg(p *Pkg) bool {
-	return (p.Prefix == "sync/atomic" || p.Prefix == `""` && base.Ctxt.Pkgpath == "sync/atomic") ||
-		(p.Prefix == "runtime/internal/atomic" || p.Prefix == `""` && base.Ctxt.Pkgpath == "runtime/internal/atomic")
+	if p.Prefix == `""` {
+		panic("bad package prefix")
+	}
+	return p.Prefix == "sync/atomic" || p.Prefix == "runtime/internal/atomic"
 }
 
 // CalcSize calculates and stores the size and alignment for t.
@@ -309,39 +268,58 @@
 	case TINT8, TUINT8, TBOOL:
 		// bool is int8
 		w = 1
+		t.intRegs = 1
 
 	case TINT16, TUINT16:
 		w = 2
+		t.intRegs = 1
 
-	case TINT32, TUINT32, TFLOAT32:
+	case TINT32, TUINT32:
 		w = 4
+		t.intRegs = 1
 
-	case TINT64, TUINT64, TFLOAT64:
+	case TINT64, TUINT64:
 		w = 8
 		t.align = uint8(RegSize)
+		t.intRegs = uint8(8 / RegSize)
+
+	case TFLOAT32:
+		w = 4
+		t.floatRegs = 1
+
+	case TFLOAT64:
+		w = 8
+		t.align = uint8(RegSize)
+		t.floatRegs = 1
 
 	case TCOMPLEX64:
 		w = 8
 		t.align = 4
+		t.floatRegs = 2
 
 	case TCOMPLEX128:
 		w = 16
 		t.align = uint8(RegSize)
+		t.floatRegs = 2
 
 	case TPTR:
 		w = int64(PtrSize)
+		t.intRegs = 1
 		CheckSize(t.Elem())
 
 	case TUNSAFEPTR:
 		w = int64(PtrSize)
+		t.intRegs = 1
 
 	case TINTER: // implemented as 2 pointers
 		w = 2 * int64(PtrSize)
 		t.align = uint8(PtrSize)
+		t.intRegs = 2
 		expandiface(t)
 
 	case TCHAN: // implemented as pointer
 		w = int64(PtrSize)
+		t.intRegs = 1
 
 		CheckSize(t.Elem())
 
@@ -365,15 +343,14 @@
 
 	case TMAP: // implemented as pointer
 		w = int64(PtrSize)
+		t.intRegs = 1
 		CheckSize(t.Elem())
 		CheckSize(t.Key())
 
 	case TFORW: // should have been filled in
 		base.Fatalf("invalid recursive type %v", t)
-		w = 1 // anything will do
 
-	case TANY:
-		// not a real type; should be replaced before use.
+	case TANY: // not a real type; should be replaced before use.
 		base.Fatalf("CalcSize any")
 
 	case TSTRING:
@@ -382,6 +359,7 @@
 		}
 		w = StringSize
 		t.align = uint8(PtrSize)
+		t.intRegs = 2
 
 	case TARRAY:
 		if t.Elem() == nil {
@@ -399,6 +377,20 @@
 		w = t.NumElem() * t.Elem().width
 		t.align = t.Elem().align
 
+		// ABIInternal only allows "trivial" arrays (i.e., length 0 or 1)
+		// to be passed by register.
+		switch t.NumElem() {
+		case 0:
+			t.intRegs = 0
+			t.floatRegs = 0
+		case 1:
+			t.intRegs = t.Elem().intRegs
+			t.floatRegs = t.Elem().floatRegs
+		default:
+			t.intRegs = math.MaxUint8
+			t.floatRegs = math.MaxUint8
+		}
+
 	case TSLICE:
 		if t.Elem() == nil {
 			break
@@ -406,16 +398,14 @@
 		w = SliceSize
 		CheckSize(t.Elem())
 		t.align = uint8(PtrSize)
+		t.intRegs = 3
 
 	case TSTRUCT:
 		if t.IsFuncArgStruct() {
 			base.Fatalf("CalcSize fn struct %v", t)
 		}
-		// Recognize and mark runtime/internal/sys.nih as not-in-heap.
-		if sym := t.Sym(); sym != nil && sym.Pkg.Path == "runtime/internal/sys" && sym.Name == "nih" {
-			t.SetNotInHeap(true)
-		}
-		w = calcStructOffset(t, t, 0, 1)
+		CalcStructSize(t)
+		w = t.width
 
 	// make fake type to check later to
 	// trigger function argument computation.
@@ -423,18 +413,19 @@
 		t1 := NewFuncArgs(t)
 		CheckSize(t1)
 		w = int64(PtrSize) // width of func type is pointer
+		t.intRegs = 1
 
 	// function is 3 cated structures;
 	// compute their widths as side-effect.
 	case TFUNCARGS:
 		t1 := t.FuncArgs()
-		w = calcStructOffset(t1, t1.Recvs(), 0, 0)
-		w = calcStructOffset(t1, t1.Params(), w, RegSize)
-		w = calcStructOffset(t1, t1.Results(), w, RegSize)
+		// TODO(mdempsky): Should package abi be responsible for computing argwid?
+		w = calcStructOffset(t1, t1.Recvs(), 0)
+		w = calcStructOffset(t1, t1.Params(), w)
+		w = RoundUp(w, int64(RegSize))
+		w = calcStructOffset(t1, t1.Results(), w)
+		w = RoundUp(w, int64(RegSize))
 		t1.extra.(*Func).Argwid = w
-		if w%int64(RegSize) != 0 {
-			base.Warn("bad type %v %d\n", t1, w)
-		}
 		t.align = 1
 	}
 
@@ -455,19 +446,62 @@
 	ResumeCheckSize()
 }
 
-// CalcStructSize calculates the size of s,
-// filling in s.Width and s.Align,
+// CalcStructSize calculates the size of t,
+// filling in t.width, t.align, t.intRegs, and t.floatRegs,
 // even if size calculation is otherwise disabled.
-func CalcStructSize(s *Type) {
-	s.width = calcStructOffset(s, s, 0, 1) // sets align
-}
+func CalcStructSize(t *Type) {
+	var maxAlign uint8 = 1
 
-// RecalcSize is like CalcSize, but recalculates t's size even if it
-// has already been calculated before. It does not recalculate other
-// types.
-func RecalcSize(t *Type) {
-	t.align = 0
-	CalcSize(t)
+	// Recognize special types. This logic is duplicated in go/types and
+	// cmd/compile/internal/types2.
+	if sym := t.Sym(); sym != nil {
+		switch {
+		case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg):
+			maxAlign = 8
+		case sym.Pkg.Path == "runtime/internal/sys" && sym.Name == "nih":
+			t.SetNotInHeap(true)
+		}
+	}
+
+	fields := t.Fields()
+	size := calcStructOffset(t, fields, 0)
+
+	// For non-zero-sized structs which end in a zero-sized field, we
+	// add an extra byte of padding to the type. This padding ensures
+	// that taking the address of a zero-sized field can't manufacture a
+	// pointer to the next object in the heap. See issue 9401.
+	if size > 0 && fields[len(fields)-1].Type.width == 0 {
+		size++
+	}
+
+	var intRegs, floatRegs uint64
+	for _, field := range fields {
+		typ := field.Type
+
+		// The alignment of a struct type is the maximum alignment of its
+		// field types.
+		if align := typ.align; align > maxAlign {
+			maxAlign = align
+		}
+
+		// Each field needs its own registers.
+		// We sum in uint64 to avoid possible overflows.
+		intRegs += uint64(typ.intRegs)
+		floatRegs += uint64(typ.floatRegs)
+	}
+
+	// Final size includes trailing padding.
+	size = RoundUp(size, int64(maxAlign))
+
+	if intRegs > math.MaxUint8 || floatRegs > math.MaxUint8 {
+		intRegs = math.MaxUint8
+		floatRegs = math.MaxUint8
+	}
+
+	t.width = size
+	t.align = maxAlign
+	t.intRegs = uint8(intRegs)
+	t.floatRegs = uint8(floatRegs)
 }
 
 func (t *Type) widthCalculated() bool {
@@ -583,7 +617,7 @@
 
 	case TSTRUCT:
 		// Find the last field that has pointers, if any.
-		fs := t.Fields().Slice()
+		fs := t.Fields()
 		for i := len(fs) - 1; i >= 0; i-- {
 			if size := PtrDataSize(fs[i].Type); size > 0 {
 				return fs[i].Offset + size
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index 76ccbd5..8a6f241 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -22,9 +22,9 @@
 	}{
 		{Sym{}, 32, 64},
 		{Type{}, 56, 96},
-		{Map{}, 20, 40},
+		{Map{}, 12, 24},
 		{Forward{}, 20, 32},
-		{Func{}, 20, 32},
+		{Func{}, 32, 56},
 		{Struct{}, 12, 24},
 		{Interface{}, 0, 0},
 		{Chan{}, 8, 16},
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index c390b81..2777b4f 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -6,8 +6,10 @@
 
 import (
 	"cmd/compile/internal/base"
+	"cmd/internal/objabi"
 	"cmd/internal/src"
 	"fmt"
+	"go/constant"
 	"internal/types/errors"
 	"sync"
 )
@@ -127,6 +129,25 @@
 	UntypedComplex = newType(TIDEAL)
 )
 
+// UntypedTypes maps from a constant.Kind to its untyped Type
+// representation.
+var UntypedTypes = [...]*Type{
+	constant.Bool:    UntypedBool,
+	constant.String:  UntypedString,
+	constant.Int:     UntypedInt,
+	constant.Float:   UntypedFloat,
+	constant.Complex: UntypedComplex,
+}
+
+// DefaultKinds maps from a constant.Kind to its default Kind.
+var DefaultKinds = [...]Kind{
+	constant.Bool:    TBOOL,
+	constant.String:  TSTRING,
+	constant.Int:     TINT,
+	constant.Float:   TFLOAT64,
+	constant.Complex: TCOMPLEX128,
+}
+
 // A Type represents a Go type.
 //
 // There may be multiple unnamed types with identical structure. However, there must
@@ -135,7 +156,7 @@
 // package.Lookup(name)) and checking sym.Def. If sym.Def is non-nil, the type
 // already exists at package scope and is available at sym.Def.(*ir.Name).Type().
 // Local types (which may have the same name as a package-level type) are
-// distinguished by the value of vargen.
+// distinguished by their vargen, which is embedded in their symbol name.
 type Type struct {
 	// extra contains extra etype-specific fields.
 	// As an optimization, those etype-specific structs which contain exactly
@@ -159,9 +180,9 @@
 	width int64 // valid if Align > 0
 
 	// list of base methods (excluding embedding)
-	methods Fields
+	methods fields
 	// list of all methods (including embedding)
-	allMethods Fields
+	allMethods fields
 
 	// canonical OTYPE node for a named type (should be an ir.Name node with same sym)
 	obj Object
@@ -174,11 +195,11 @@
 		slice *Type // []T, or nil
 	}
 
-	vargen int32 // unique name for OTYPE/ONAME
-
 	kind  Kind  // kind of type
 	align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
 
+	intRegs, floatRegs uint8 // registers needed for ABIInternal
+
 	flags bitset8
 
 	// For defined (named) generic types, a pointer to the list of type params
@@ -192,6 +213,17 @@
 	rparams *[]*Type
 }
 
+// Registers returns the number of integer and floating-point
+// registers required to represent a parameter of this type under the
+// ABIInternal calling conventions.
+//
+// If t must be passed by memory, Registers returns (math.MaxUint8,
+// math.MaxUint8).
+func (t *Type) Registers() (uint8, uint8) {
+	CalcSize(t)
+	return t.intRegs, t.floatRegs
+}
+
 func (*Type) CanBeAnSSAAux() {}
 
 const (
@@ -276,8 +308,6 @@
 	Elem *Type // Val (elem) type
 
 	Bucket *Type // internal struct type representing a hash bucket
-	Hmap   *Type // internal struct type representing the Hmap (map header object)
-	Hiter  *Type // internal struct type representing hash iterator state
 }
 
 // MapType returns t's extra map-specific fields.
@@ -292,17 +322,20 @@
 	Embedlineno src.XPos // first use of this type as an embedded type
 }
 
-// ForwardType returns t's extra forward-type-specific fields.
-func (t *Type) ForwardType() *Forward {
+// forwardType returns t's extra forward-type-specific fields.
+func (t *Type) forwardType() *Forward {
 	t.wantEtype(TFORW)
 	return t.extra.(*Forward)
 }
 
 // Func contains Type fields specific to func types.
 type Func struct {
-	Receiver *Type // function receiver
-	Results  *Type // function results
-	Params   *Type // function params
+	allParams []*Field // slice of all parameters, in receiver/params/results order
+
+	startParams  int // index of the start of the (regular) parameters section
+	startResults int // index of the start of the results section
+
+	resultsTuple *Type // struct-like type representing multi-value results
 
 	// Argwid is the total width of the function receiver, params, and results.
 	// It gets calculated via a temporary TFUNCARGS type.
@@ -310,34 +343,29 @@
 	Argwid int64
 }
 
-// FuncType returns t's extra func-specific fields.
-func (t *Type) FuncType() *Func {
+func (ft *Func) recvs() []*Field         { return ft.allParams[:ft.startParams] }
+func (ft *Func) params() []*Field        { return ft.allParams[ft.startParams:ft.startResults] }
+func (ft *Func) results() []*Field       { return ft.allParams[ft.startResults:] }
+func (ft *Func) recvParams() []*Field    { return ft.allParams[:ft.startResults] }
+func (ft *Func) paramsResults() []*Field { return ft.allParams[ft.startParams:] }
+
+// funcType returns t's extra func-specific fields.
+func (t *Type) funcType() *Func {
 	t.wantEtype(TFUNC)
 	return t.extra.(*Func)
 }
 
 // StructType contains Type fields specific to struct types.
 type Struct struct {
-	fields Fields
+	fields fields
 
 	// Maps have three associated internal structs (see struct MapType).
 	// Map links such structs back to their map type.
 	Map *Type
 
-	Funarg Funarg // type of function arguments for arg struct
+	ParamTuple bool // whether this struct is actually a tuple of signature parameters
 }
 
-// Funarg records the kind of function argument
-type Funarg uint8
-
-const (
-	FunargNone    Funarg = iota
-	FunargRcvr           // receiver
-	FunargParams         // input parameters
-	FunargResults        // output results
-	FunargTparams        // type params
-)
-
 // StructType returns t's extra struct-specific fields.
 func (t *Type) StructType() *Struct {
 	t.wantEtype(TSTRUCT)
@@ -369,8 +397,8 @@
 	Dir  ChanDir // channel direction
 }
 
-// ChanType returns t's extra channel-specific fields.
-func (t *Type) ChanType() *Chan {
+// chanType returns t's extra channel-specific fields.
+func (t *Type) chanType() *Chan {
 	t.wantEtype(TCHAN)
 	return t.extra.(*Chan)
 }
@@ -421,8 +449,7 @@
 	Nname Object
 
 	// Offset in bytes of this field or method within its enclosing struct
-	// or interface Type.  Exception: if field is function receiver, arg or
-	// result, then this is BOGUS_FUNARG_OFFSET; types does not know the Abi.
+	// or interface Type. For parameters, this is BADWIDTH.
 	Offset int64
 }
 
@@ -447,39 +474,25 @@
 	return f.Type.kind == TFUNC && f.Type.Recv() != nil
 }
 
-// Fields is a pointer to a slice of *Field.
+// fields is a pointer to a slice of *Field.
 // This saves space in Types that do not have fields or methods
 // compared to a simple slice of *Field.
-type Fields struct {
+type fields struct {
 	s *[]*Field
 }
 
-// Len returns the number of entries in f.
-func (f *Fields) Len() int {
-	if f.s == nil {
-		return 0
-	}
-	return len(*f.s)
-}
-
 // Slice returns the entries in f as a slice.
 // Changes to the slice entries will be reflected in f.
-func (f *Fields) Slice() []*Field {
+func (f *fields) Slice() []*Field {
 	if f.s == nil {
 		return nil
 	}
 	return *f.s
 }
 
-// Index returns the i'th element of Fields.
-// It panics if f does not have at least i+1 elements.
-func (f *Fields) Index(i int) *Field {
-	return (*f.s)[i]
-}
-
 // Set sets f to a slice.
 // This takes ownership of the slice.
-func (f *Fields) Set(s []*Field) {
+func (f *fields) Set(s []*Field) {
 	if len(s) == 0 {
 		f.s = nil
 	} else {
@@ -490,14 +503,6 @@
 	}
 }
 
-// Append appends entries to f.
-func (f *Fields) Append(s ...*Field) {
-	if f.s == nil {
-		f.s = new([]*Field)
-	}
-	*f.s = append(*f.s, s...)
-}
-
 // newType returns a new Type of the specified kind.
 func newType(et Kind) *Type {
 	t := &Type{
@@ -570,7 +575,7 @@
 // NewChan returns a new chan Type with direction dir.
 func NewChan(elem *Type, dir ChanDir) *Type {
 	t := newType(TCHAN)
-	ct := t.ChanType()
+	ct := t.chanType()
 	ct.Elem = elem
 	ct.Dir = dir
 	if elem.HasShape() {
@@ -645,6 +650,7 @@
 	t.extra = Ptr{Elem: elem}
 	t.width = int64(PtrSize)
 	t.align = uint8(PtrSize)
+	t.intRegs = 1
 	if NewPtrCacheEnabled {
 		elem.cache.ptr = t
 	}
@@ -737,34 +743,40 @@
 		}
 
 	case TFUNC:
-		recvs := SubstAny(t.Recvs(), types)
-		params := SubstAny(t.Params(), types)
-		results := SubstAny(t.Results(), types)
-		if recvs != t.Recvs() || params != t.Params() || results != t.Results() {
-			t = t.copy()
-			t.FuncType().Receiver = recvs
-			t.FuncType().Results = results
-			t.FuncType().Params = params
-		}
+		ft := t.funcType()
+		allParams := substFields(ft.allParams, types)
+
+		t = t.copy()
+		ft = t.funcType()
+		ft.allParams = allParams
+
+		rt := ft.resultsTuple
+		rt = rt.copy()
+		ft.resultsTuple = rt
+		rt.setFields(t.Results())
 
 	case TSTRUCT:
 		// Make a copy of all fields, including ones whose type does not change.
 		// This prevents aliasing across functions, which can lead to later
 		// fields getting their Offset incorrectly overwritten.
-		fields := t.FieldSlice()
-		nfs := make([]*Field, len(fields))
-		for i, f := range fields {
-			nft := SubstAny(f.Type, types)
-			nfs[i] = f.Copy()
-			nfs[i].Type = nft
-		}
+		nfs := substFields(t.Fields(), types)
 		t = t.copy()
-		t.SetFields(nfs)
+		t.setFields(nfs)
 	}
 
 	return t
 }
 
+func substFields(fields []*Field, types *[]*Type) []*Field {
+	nfs := make([]*Field, len(fields))
+	for i, f := range fields {
+		nft := SubstAny(f.Type, types)
+		nfs[i] = f.Copy()
+		nfs[i].Type = nft
+	}
+	return nfs
+}
+
 // copy returns a shallow copy of the Type.
 func (t *Type) copy() *Type {
 	if t == nil {
@@ -815,45 +827,56 @@
 	}
 }
 
-func (t *Type) Recvs() *Type   { return t.FuncType().Receiver }
-func (t *Type) Params() *Type  { return t.FuncType().Params }
-func (t *Type) Results() *Type { return t.FuncType().Results }
+// ResultTuple returns the result type of signature type t as a tuple.
+// This can be used as the type of multi-valued call expressions.
+func (t *Type) ResultsTuple() *Type { return t.funcType().resultsTuple }
 
-func (t *Type) NumRecvs() int   { return t.FuncType().Receiver.NumFields() }
-func (t *Type) NumParams() int  { return t.FuncType().Params.NumFields() }
-func (t *Type) NumResults() int { return t.FuncType().Results.NumFields() }
+// Recvs returns a slice of receiver parameters of signature type t.
+// The returned slice always has length 0 or 1.
+func (t *Type) Recvs() []*Field { return t.funcType().recvs() }
+
+// Params returns a slice of regular parameters of signature type t.
+func (t *Type) Params() []*Field { return t.funcType().params() }
+
+// Results returns a slice of result parameters of signature type t.
+func (t *Type) Results() []*Field { return t.funcType().results() }
+
+// RecvsParamsResults returns a slice containing all of the
+// signature's parameters in receiver (if any), (normal) parameters,
+// and then results.
+func (t *Type) RecvParamsResults() []*Field { return t.funcType().allParams }
+
+// RecvParams returns a slice containing the signature's receiver (if
+// any) followed by its (normal) parameters.
+func (t *Type) RecvParams() []*Field { return t.funcType().recvParams() }
+
+// ParamsResults returns a slice containing the signature's (normal)
+// parameters followed by its results.
+func (t *Type) ParamsResults() []*Field { return t.funcType().paramsResults() }
+
+func (t *Type) NumRecvs() int   { return len(t.Recvs()) }
+func (t *Type) NumParams() int  { return len(t.Params()) }
+func (t *Type) NumResults() int { return len(t.Results()) }
 
 // IsVariadic reports whether function type t is variadic.
 func (t *Type) IsVariadic() bool {
 	n := t.NumParams()
-	return n > 0 && t.Params().Field(n-1).IsDDD()
+	return n > 0 && t.Param(n-1).IsDDD()
 }
 
 // Recv returns the receiver of function type t, if any.
 func (t *Type) Recv() *Field {
-	s := t.Recvs()
-	if s.NumFields() == 0 {
-		return nil
+	if s := t.Recvs(); len(s) == 1 {
+		return s[0]
 	}
-	return s.Field(0)
+	return nil
 }
 
-// RecvsParamsResults stores the accessor functions for a function Type's
-// receiver, parameters, and result parameters, in that order.
-// It can be used to iterate over all of a function's parameter lists.
-var RecvsParamsResults = [3]func(*Type) *Type{
-	(*Type).Recvs, (*Type).Params, (*Type).Results,
-}
+// Param returns the i'th parameter of signature type t.
+func (t *Type) Param(i int) *Field { return t.Params()[i] }
 
-// RecvsParams is like RecvsParamsResults, but omits result parameters.
-var RecvsParams = [2]func(*Type) *Type{
-	(*Type).Recvs, (*Type).Params,
-}
-
-// ParamsResults is like RecvsParamsResults, but omits receiver parameters.
-var ParamsResults = [2]func(*Type) *Type{
-	(*Type).Params, (*Type).Results,
-}
+// Result returns the i'th result of signature type t.
+func (t *Type) Result(i int) *Field { return t.Results()[i] }
 
 // Key returns the key type of map type t.
 func (t *Type) Key() *Type {
@@ -894,55 +917,56 @@
 
 // IsFuncArgStruct reports whether t is a struct representing function parameters or results.
 func (t *Type) IsFuncArgStruct() bool {
-	return t.kind == TSTRUCT && t.extra.(*Struct).Funarg != FunargNone
+	return t.kind == TSTRUCT && t.extra.(*Struct).ParamTuple
 }
 
 // Methods returns a pointer to the base methods (excluding embedding) for type t.
 // These can either be concrete methods (for non-interface types) or interface
 // methods (for interface types).
-func (t *Type) Methods() *Fields {
-	return &t.methods
+func (t *Type) Methods() []*Field {
+	return t.methods.Slice()
 }
 
 // AllMethods returns a pointer to all the methods (including embedding) for type t.
 // For an interface type, this is the set of methods that are typically iterated
 // over. For non-interface types, AllMethods() only returns a valid result after
 // CalcMethods() has been called at least once.
-func (t *Type) AllMethods() *Fields {
+func (t *Type) AllMethods() []*Field {
 	if t.kind == TINTER {
 		// Calculate the full method set of an interface type on the fly
 		// now, if not done yet.
 		CalcSize(t)
 	}
-	return &t.allMethods
+	return t.allMethods.Slice()
 }
 
-// SetAllMethods sets the set of all methods (including embedding) for type t.
-// Use this method instead of t.AllMethods().Set(), which might call CalcSize() on
-// an uninitialized interface type.
+// SetMethods sets the direct method set for type t (i.e., *not*
+// including promoted methods from embedded types).
+func (t *Type) SetMethods(fs []*Field) {
+	t.methods.Set(fs)
+}
+
+// SetAllMethods sets the set of all methods for type t (i.e.,
+// including promoted methods from embedded types).
 func (t *Type) SetAllMethods(fs []*Field) {
 	t.allMethods.Set(fs)
 }
 
-// Fields returns the fields of struct type t.
-func (t *Type) Fields() *Fields {
+// fields returns the fields of struct type t.
+func (t *Type) fields() *fields {
 	t.wantEtype(TSTRUCT)
 	return &t.extra.(*Struct).fields
 }
 
 // Field returns the i'th field of struct type t.
-func (t *Type) Field(i int) *Field {
-	return t.Fields().Slice()[i]
-}
+func (t *Type) Field(i int) *Field { return t.Fields()[i] }
 
-// FieldSlice returns a slice of containing all fields of
+// Fields returns a slice of containing all fields of
 // a struct type t.
-func (t *Type) FieldSlice() []*Field {
-	return t.Fields().Slice()
-}
+func (t *Type) Fields() []*Field { return t.fields().Slice() }
 
-// SetFields sets struct type t's fields to fields.
-func (t *Type) SetFields(fields []*Field) {
+// setFields sets struct type t's fields to fields.
+func (t *Type) setFields(fields []*Field) {
 	// If we've calculated the width of t before,
 	// then some other type such as a function signature
 	// might now have the wrong type.
@@ -953,13 +977,13 @@
 		base.Fatalf("SetFields of %v: width previously calculated", t)
 	}
 	t.wantEtype(TSTRUCT)
-	t.Fields().Set(fields)
+	t.fields().Set(fields)
 }
 
 // SetInterface sets the base methods of an interface type t.
 func (t *Type) SetInterface(methods []*Field) {
 	t.wantEtype(TINTER)
-	t.Methods().Set(methods)
+	t.methods.Set(methods)
 }
 
 // ArgWidth returns the total aligned argument size for a function.
@@ -1102,10 +1126,6 @@
 	}
 
 	if x.obj != nil {
-		// Syms non-nil, if vargens match then equal.
-		if t.vargen != x.vargen {
-			return cmpForNe(t.vargen < x.vargen)
-		}
 		return CMPeq
 	}
 	// both syms nil, look at structure below.
@@ -1187,8 +1207,8 @@
 			return CMPgt // bucket maps are least
 		} // If t != t.Map.Bucket, fall through to general case
 
-		tfs := t.FieldSlice()
-		xfs := x.FieldSlice()
+		tfs := t.Fields()
+		xfs := x.Fields()
 		for i := 0; i < len(tfs) && i < len(xfs); i++ {
 			t1, x1 := tfs[i], xfs[i]
 			if t1.Embedded != x1.Embedded {
@@ -1210,8 +1230,8 @@
 		return CMPeq
 
 	case TINTER:
-		tfs := t.AllMethods().Slice()
-		xfs := x.AllMethods().Slice()
+		tfs := t.AllMethods()
+		xfs := x.AllMethods()
 		for i := 0; i < len(tfs) && i < len(xfs); i++ {
 			t1, x1 := tfs[i], xfs[i]
 			if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
@@ -1227,22 +1247,24 @@
 		return CMPeq
 
 	case TFUNC:
-		for _, f := range RecvsParamsResults {
-			// Loop over fields in structs, ignoring argument names.
-			tfs := f(t).FieldSlice()
-			xfs := f(x).FieldSlice()
-			for i := 0; i < len(tfs) && i < len(xfs); i++ {
-				ta := tfs[i]
-				tb := xfs[i]
-				if ta.IsDDD() != tb.IsDDD() {
-					return cmpForNe(!ta.IsDDD())
-				}
-				if c := ta.Type.cmp(tb.Type); c != CMPeq {
-					return c
-				}
-			}
-			if len(tfs) != len(xfs) {
-				return cmpForNe(len(tfs) < len(xfs))
+		if tn, xn := t.NumRecvs(), x.NumRecvs(); tn != xn {
+			return cmpForNe(tn < xn)
+		}
+		if tn, xn := t.NumParams(), x.NumParams(); tn != xn {
+			return cmpForNe(tn < xn)
+		}
+		if tn, xn := t.NumResults(), x.NumResults(); tn != xn {
+			return cmpForNe(tn < xn)
+		}
+		if tv, xv := t.IsVariadic(), x.IsVariadic(); tv != xv {
+			return cmpForNe(!tv)
+		}
+
+		tfs := t.RecvParamsResults()
+		xfs := x.RecvParamsResults()
+		for i, tf := range tfs {
+			if c := tf.Type.cmp(xfs[i].Type); c != CMPeq {
+				return c
 			}
 		}
 		return CMPeq
@@ -1399,7 +1421,7 @@
 
 // IsEmptyInterface reports whether t is an empty interface type.
 func (t *Type) IsEmptyInterface() bool {
-	return t.IsInterface() && t.AllMethods().Len() == 0
+	return t.IsInterface() && len(t.AllMethods()) == 0
 }
 
 // IsScalar reports whether 't' is a scalar Go type, e.g.
@@ -1424,7 +1446,7 @@
 	if t.kind == TRESULTS {
 		return len(t.extra.(*Results).Types)
 	}
-	return t.Fields().Len()
+	return len(t.Fields())
 }
 func (t *Type) FieldType(i int) *Type {
 	if t.kind == TTUPLE {
@@ -1449,6 +1471,21 @@
 	return t.Field(i).Sym.Name
 }
 
+// OffsetOf reports the offset of the field of a struct.
+// The field is looked up by name.
+func (t *Type) OffsetOf(name string) int64 {
+	if t.kind != TSTRUCT {
+		base.Fatalf("can't call OffsetOf on non-struct %v", t)
+	}
+	for _, f := range t.Fields() {
+		if f.Sym.Name == name {
+			return f.Offset
+		}
+	}
+	base.Fatalf("couldn't find field %s in %v", name, t)
+	return -1
+}
+
 func (t *Type) NumElem() int64 {
 	t.wantEtype(TARRAY)
 	return t.extra.(*Array).Bound
@@ -1474,7 +1511,7 @@
 			base.Fatalf("NumComponents func arg struct")
 		}
 		var n int64
-		for _, f := range t.FieldSlice() {
+		for _, f := range t.Fields() {
 			if countBlank == IgnoreBlankFields && f.Sym.IsBlank() {
 				continue
 			}
@@ -1603,42 +1640,25 @@
 	return t.obj
 }
 
-// typeGen tracks the number of function-scoped defined types that
-// have been declared. It's used to generate unique linker symbols for
-// their runtime type descriptors.
-var typeGen int32
-
-// SetVargen assigns a unique generation number to type t, which must
-// be a defined type declared within function scope. The generation
-// number is used to distinguish it from other similarly spelled
-// defined types from the same package.
-//
-// TODO(mdempsky): Come up with a better solution.
-func (t *Type) SetVargen() {
-	base.Assertf(t.Sym() != nil, "SetVargen on anonymous type %v", t)
-	base.Assertf(t.vargen == 0, "type %v already has Vargen %v", t, t.vargen)
-
-	typeGen++
-	t.vargen = typeGen
-}
-
 // SetUnderlying sets the underlying type of an incomplete type (i.e. type whose kind
 // is currently TFORW). SetUnderlying automatically updates any types that were waiting
 // for this type to be completed.
 func (t *Type) SetUnderlying(underlying *Type) {
 	if underlying.kind == TFORW {
 		// This type isn't computed yet; when it is, update n.
-		underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
+		underlying.forwardType().Copyto = append(underlying.forwardType().Copyto, t)
 		return
 	}
 
-	ft := t.ForwardType()
+	ft := t.forwardType()
 
 	// TODO(mdempsky): Fix Type rekinding.
 	t.kind = underlying.kind
 	t.extra = underlying.extra
 	t.width = underlying.width
 	t.align = underlying.align
+	t.intRegs = underlying.intRegs
+	t.floatRegs = underlying.floatRegs
 	t.underlying = underlying.underlying
 
 	if underlying.NotInHeap() {
@@ -1700,40 +1720,38 @@
 	return t
 }
 
-const BOGUS_FUNARG_OFFSET = -1000000000
-
-func unzeroFieldOffsets(f []*Field) {
-	for i := range f {
-		f[i].Offset = BOGUS_FUNARG_OFFSET // This will cause an explosion if it is not corrected
-	}
-}
-
 // NewSignature returns a new function type for the given receiver,
 // parameters, and results, any of which may be nil.
 func NewSignature(recv *Field, params, results []*Field) *Type {
-	var recvs []*Field
+	startParams := 0
 	if recv != nil {
-		recvs = []*Field{recv}
+		startParams = 1
 	}
+	startResults := startParams + len(params)
+
+	allParams := make([]*Field, startResults+len(results))
+	if recv != nil {
+		allParams[0] = recv
+	}
+	copy(allParams[startParams:], params)
+	copy(allParams[startResults:], results)
 
 	t := newType(TFUNC)
-	ft := t.FuncType()
+	ft := t.funcType()
 
-	funargs := func(fields []*Field, funarg Funarg) *Type {
+	funargs := func(fields []*Field) *Type {
 		s := NewStruct(fields)
-		s.StructType().Funarg = funarg
+		s.StructType().ParamTuple = true
 		return s
 	}
 
-	if recv != nil {
-		recv.Offset = BOGUS_FUNARG_OFFSET
-	}
-	unzeroFieldOffsets(params)
-	unzeroFieldOffsets(results)
-	ft.Receiver = funargs(recvs, FunargRcvr)
-	ft.Params = funargs(params, FunargParams)
-	ft.Results = funargs(results, FunargResults)
-	if fieldsHasShape(recvs) || fieldsHasShape(params) || fieldsHasShape(results) {
+	ft.allParams = allParams
+	ft.startParams = startParams
+	ft.startResults = startResults
+
+	ft.resultsTuple = funargs(allParams[startResults:])
+
+	if fieldsHasShape(allParams) {
 		t.SetHasShape(true)
 	}
 
@@ -1743,7 +1761,7 @@
 // NewStruct returns a new struct with the given fields.
 func NewStruct(fields []*Field) *Type {
 	t := newType(TSTRUCT)
-	t.SetFields(fields)
+	t.setFields(fields)
 	if fieldsHasShape(fields) {
 		t.SetHasShape(true)
 	}
@@ -1792,7 +1810,7 @@
 		return IsReflexive(t.Elem())
 
 	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
+		for _, t1 := range t.Fields() {
 			if !IsReflexive(t1.Type) {
 				return false
 			}
@@ -1845,44 +1863,34 @@
 	return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2
 }
 
-// IsRuntimePkg reports whether p is package runtime.
-func IsRuntimePkg(p *Pkg) bool {
-	if base.Flag.CompilingRuntime && p == LocalPkg {
-		return true
+// RuntimeSymName returns the name of s if it's in package "runtime"; otherwise
+// it returns "".
+func RuntimeSymName(s *Sym) string {
+	if s.Pkg.Path == "runtime" {
+		return s.Name
 	}
-	return p.Path == "runtime"
+	return ""
 }
 
-// IsReflectPkg reports whether p is package reflect.
-func IsReflectPkg(p *Pkg) bool {
-	return p.Path == "reflect"
-}
-
-// IsTypePkg reports whether p is pesudo package type.
-func IsTypePkg(p *Pkg) bool {
-	return p == typepkg
+// ReflectSymName returns the name of s if it's in package "reflect"; otherwise
+// it returns "".
+func ReflectSymName(s *Sym) string {
+	if s.Pkg.Path == "reflect" {
+		return s.Name
+	}
+	return ""
 }
 
 // IsNoInstrumentPkg reports whether p is a package that
 // should not be instrumented.
 func IsNoInstrumentPkg(p *Pkg) bool {
-	for _, np := range base.NoInstrumentPkgs {
-		if p.Path == np {
-			return true
-		}
-	}
-	return false
+	return objabi.LookupPkgSpecial(p.Path).NoInstrument
 }
 
 // IsNoRacePkg reports whether p is a package that
 // should not be race instrumented.
 func IsNoRacePkg(p *Pkg) bool {
-	for _, np := range base.NoRacePkgs {
-		if p.Path == np {
-			return true
-		}
-	}
-	return false
+	return objabi.LookupPkgSpecial(p.Path).NoRaceFunc
 }
 
 // ReceiverBaseType returns the underlying type, if any,
diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go
new file mode 100644
index 0000000..06dfba1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/alias.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "fmt"
+
+// An Alias represents an alias type.
+// Whether or not Alias types are created is controlled by the
+// gotypesalias setting with the GODEBUG environment variable.
+// For gotypesalias=1, alias declarations produce an Alias type.
+// Otherwise, the alias information is only in the type name,
+// which points directly to the actual (aliased) type.
+type Alias struct {
+	obj     *TypeName // corresponding declared alias object
+	fromRHS Type      // RHS of type alias declaration; may be an alias
+	actual  Type      // actual (aliased) type; never an alias
+}
+
+// NewAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func NewAlias(obj *TypeName, rhs Type) *Alias {
+	alias := (*Checker)(nil).newAlias(obj, rhs)
+	// Ensure that alias.actual is set (#65455).
+	unalias(alias)
+	return alias
+}
+
+func (a *Alias) Obj() *TypeName   { return a.obj }
+func (a *Alias) Underlying() Type { return unalias(a).Underlying() }
+func (a *Alias) String() string   { return TypeString(a, nil) }
+
+// Type accessors
+
+// Unalias returns t if it is not an alias type;
+// otherwise it follows t's alias chain until it
+// reaches a non-alias type which is then returned.
+// Consequently, the result is never an alias type.
+func Unalias(t Type) Type {
+	if a0, _ := t.(*Alias); a0 != nil {
+		return unalias(a0)
+	}
+	return t
+}
+
+func unalias(a0 *Alias) Type {
+	if a0.actual != nil {
+		return a0.actual
+	}
+	var t Type
+	for a := a0; a != nil; a, _ = t.(*Alias) {
+		t = a.fromRHS
+	}
+	if t == nil {
+		panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name))
+	}
+	a0.actual = t
+	return t
+}
+
+// asNamed returns t as *Named if that is t's
+// actual type. It returns nil otherwise.
+func asNamed(t Type) *Named {
+	n, _ := Unalias(t).(*Named)
+	return n
+}
+
+// newAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
+	assert(rhs != nil)
+	a := &Alias{obj, rhs, nil}
+	if obj.typ == nil {
+		obj.typ = a
+	}
+
+	// Ensure that a.actual is set at the end of type checking.
+	if check != nil {
+		check.needsCleanup(a)
+	}
+
+	return a
+}
+
+func (a *Alias) cleanup() {
+	Unalias(a)
+}
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index d0c0cdb..bb02d91 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -268,6 +268,15 @@
 	// scope, the function scopes are embedded in the file scope of the file
 	// containing the function declaration.
 	//
+	// The Scope of a function contains the declarations of any
+	// type parameters, parameters, and named results, plus any
+	// local declarations in the body block.
+	// It is coextensive with the complete extent of the
+	// function's syntax ([*ast.FuncDecl] or [*ast.FuncLit]).
+	// The Scopes mapping does not contain an entry for the
+	// function body ([*ast.BlockStmt]); the function's scope is
+	// associated with the [*ast.FuncType].
+	//
 	// The following node types may appear in Scopes:
 	//
 	//     *syntax.File
@@ -288,6 +297,13 @@
 	// in source order. Variables without an initialization expression do not
 	// appear in this list.
 	InitOrder []*Initializer
+
+	// FileVersions maps a file to its Go version string.
+	// If the file doesn't specify a version, the reported
+	// string is Config.GoVersion.
+	// Version strings begin with “go”, like “go1.21”, and
+	// are suitable for use with the [go/version] package.
+	FileVersions map[*syntax.PosBase]string
 }
 
 func (info *Info) recordTypes() bool {
@@ -330,6 +346,23 @@
 	return info.Uses[id]
 }
 
+// PkgNameOf returns the local package name defined by the import,
+// or nil if not found.
+//
+// For dot-imports, the package name is ".".
+//
+// Precondition: the Defs and Implicts maps are populated.
+func (info *Info) PkgNameOf(imp *syntax.ImportDecl) *PkgName {
+	var obj Object
+	if imp.LocalPkgName != nil {
+		obj = info.Defs[imp.LocalPkgName]
+	} else {
+		obj = info.Implicits[imp]
+	}
+	pkgname, _ := obj.(*PkgName)
+	return pkgname
+}
+
 // TypeAndValue reports the type and value (for constants)
 // of the corresponding expression.
 type TypeAndValue struct {
@@ -436,80 +469,3 @@
 	pkg := NewPackage(path, "")
 	return pkg, NewChecker(conf, pkg, info).Files(files)
 }
-
-// AssertableTo reports whether a value of type V can be asserted to have type T.
-//
-// The behavior of AssertableTo is unspecified in three cases:
-//   - if T is Typ[Invalid]
-//   - if V is a generalized interface; i.e., an interface that may only be used
-//     as a type constraint in Go code
-//   - if T is an uninstantiated generic type
-func AssertableTo(V *Interface, T Type) bool {
-	// Checker.newAssertableTo suppresses errors for invalid types, so we need special
-	// handling here.
-	if T.Underlying() == Typ[Invalid] {
-		return false
-	}
-	return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
-}
-
-// AssignableTo reports whether a value of type V is assignable to a variable
-// of type T.
-//
-// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
-// uninstantiated generic type.
-func AssignableTo(V, T Type) bool {
-	x := operand{mode: value, typ: V}
-	ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
-	return ok
-}
-
-// ConvertibleTo reports whether a value of type V is convertible to a value of
-// type T.
-//
-// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
-// uninstantiated generic type.
-func ConvertibleTo(V, T Type) bool {
-	x := operand{mode: value, typ: V}
-	return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
-}
-
-// Implements reports whether type V implements interface T.
-//
-// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
-// generic type.
-func Implements(V Type, T *Interface) bool {
-	if T.Empty() {
-		// All types (even Typ[Invalid]) implement the empty interface.
-		return true
-	}
-	// Checker.implements suppresses errors for invalid types, so we need special
-	// handling here.
-	if V.Underlying() == Typ[Invalid] {
-		return false
-	}
-	return (*Checker)(nil).implements(nopos, V, T, false, nil)
-}
-
-// Satisfies reports whether type V satisfies the constraint T.
-//
-// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
-// generic type.
-func Satisfies(V Type, T *Interface) bool {
-	return (*Checker)(nil).implements(nopos, V, T, true, nil)
-}
-
-// Identical reports whether x and y are identical types.
-// Receivers of Signature types are ignored.
-func Identical(x, y Type) bool {
-	var c comparer
-	return c.identical(x, y, nil)
-}
-
-// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
-// Receivers of Signature types are ignored.
-func IdenticalIgnoreTags(x, y Type) bool {
-	var c comparer
-	c.ignoreTags = true
-	return c.identical(x, y, nil)
-}
diff --git a/src/cmd/compile/internal/types2/api_predicates.go b/src/cmd/compile/internal/types2/api_predicates.go
new file mode 100644
index 0000000..480f711
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api_predicates.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements exported type predicates.
+
+package types2
+
+// AssertableTo reports whether a value of type V can be asserted to have type T.
+//
+// The behavior of AssertableTo is unspecified in three cases:
+//   - if T is Typ[Invalid]
+//   - if V is a generalized interface; i.e., an interface that may only be used
+//     as a type constraint in Go code
+//   - if T is an uninstantiated generic type
+func AssertableTo(V *Interface, T Type) bool {
+	// Checker.newAssertableTo suppresses errors for invalid types, so we need special
+	// handling here.
+	if !isValid(T.Underlying()) {
+		return false
+	}
+	return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
+}
+
+// AssignableTo reports whether a value of type V is assignable to a variable
+// of type T.
+//
+// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func AssignableTo(V, T Type) bool {
+	x := operand{mode: value, typ: V}
+	ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
+	return ok
+}
+
+// ConvertibleTo reports whether a value of type V is convertible to a value of
+// type T.
+//
+// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func ConvertibleTo(V, T Type) bool {
+	x := operand{mode: value, typ: V}
+	return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
+}
+
+// Implements reports whether type V implements interface T.
+//
+// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Implements(V Type, T *Interface) bool {
+	if T.Empty() {
+		// All types (even Typ[Invalid]) implement the empty interface.
+		return true
+	}
+	// Checker.implements suppresses errors for invalid types, so we need special
+	// handling here.
+	if !isValid(V.Underlying()) {
+		return false
+	}
+	return (*Checker)(nil).implements(nopos, V, T, false, nil)
+}
+
+// Satisfies reports whether type V satisfies the constraint T.
+//
+// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Satisfies(V Type, T *Interface) bool {
+	return (*Checker)(nil).implements(nopos, V, T, true, nil)
+}
+
+// Identical reports whether x and y are identical types.
+// Receivers of [Signature] types are ignored.
+func Identical(x, y Type) bool {
+	var c comparer
+	return c.identical(x, y, nil)
+}
+
+// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
+// Receivers of [Signature] types are ignored.
+func IdenticalIgnoreTags(x, y Type) bool {
+	var c comparer
+	c.ignoreTags = true
+	return c.identical(x, y, nil)
+}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 0f50650..bacba71 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -8,11 +8,13 @@
 	"cmd/compile/internal/syntax"
 	"errors"
 	"fmt"
+	"internal/goversion"
 	"internal/testenv"
 	"reflect"
 	"regexp"
 	"sort"
 	"strings"
+	"sync"
 	"testing"
 
 	. "cmd/compile/internal/types2"
@@ -957,6 +959,80 @@
 	}
 }
 
+func TestPkgNameOf(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+
+	const src = `
+package p
+
+import (
+	. "os"
+	_ "io"
+	"math"
+	"path/filepath"
+	snort "sort"
+)
+
+// avoid imported and not used errors
+var (
+	_ = Open // os.Open
+	_ = math.Sin
+	_ = filepath.Abs
+	_ = snort.Ints
+)
+`
+
+	var tests = []struct {
+		path string // path string enclosed in "'s
+		want string
+	}{
+		{`"os"`, "."},
+		{`"io"`, "_"},
+		{`"math"`, "math"},
+		{`"path/filepath"`, "filepath"},
+		{`"sort"`, "snort"},
+	}
+
+	f := mustParse(src)
+	info := Info{
+		Defs:      make(map[*syntax.Name]Object),
+		Implicits: make(map[syntax.Node]Object),
+	}
+	var conf Config
+	conf.Importer = defaultImporter()
+	_, err := conf.Check("p", []*syntax.File{f}, &info)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// map import paths to importDecl
+	imports := make(map[string]*syntax.ImportDecl)
+	for _, d := range f.DeclList {
+		if imp, _ := d.(*syntax.ImportDecl); imp != nil {
+			imports[imp.Path.Value] = imp
+		}
+	}
+
+	for _, test := range tests {
+		imp := imports[test.path]
+		if imp == nil {
+			t.Fatalf("invalid test case: import path %s not found", test.path)
+		}
+		got := info.PkgNameOf(imp)
+		if got == nil {
+			t.Fatalf("import %s: package name not found", test.path)
+		}
+		if got.Name() != test.want {
+			t.Errorf("import %s: got %s; want %s", test.path, got.Name(), test.want)
+		}
+	}
+
+	// test non-existing importDecl
+	if got := info.PkgNameOf(new(syntax.ImportDecl)); got != nil {
+		t.Errorf("got %s for non-existing import declaration", got.Name())
+	}
+}
+
 func predString(tv TypeAndValue) string {
 	var buf strings.Builder
 	pred := func(b bool, s string) {
@@ -1816,12 +1892,12 @@
 type T struct{}
 var Y, _ = lib.X, X
 
-func F(){
+func F[T *U, U any](param1, param2 int) /*param1=undef*/ (res1 /*res1=undef*/, res2 int) /*param1=var:12*/ /*res1=var:12*/ /*U=typename:12*/ {
 	const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/
 	type /*t=undef*/ t /*t=typename:14*/ *t
 	print(Y) /*Y=var:10*/
 	x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y
-	var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F
+	var F = /*F=func:12*/ F[*int, int] /*F=var:17*/ ; _ = F
 
 	var a []int
 	for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
@@ -1840,6 +1916,10 @@
         	println(int)
         default /*int=var:31*/ :
         }
+
+	_ = param1
+	_ = res1
+	return
 }
 /*main=undef*/
 `
@@ -1905,7 +1985,29 @@
 
 		_, gotObj := inner.LookupParent(id.Value, id.Pos())
 		if gotObj != wantObj {
-			t.Errorf("%s: got %v, want %v", id.Pos(), gotObj, wantObj)
+			// Print the scope tree of mainScope in case of error.
+			var printScopeTree func(indent string, s *Scope)
+			printScopeTree = func(indent string, s *Scope) {
+				t.Logf("%sscope %s %v-%v = %v",
+					indent,
+					ScopeComment(s),
+					s.Pos(),
+					s.End(),
+					s.Names())
+				for i := range s.NumChildren() {
+					printScopeTree(indent+"  ", s.Child(i))
+				}
+			}
+			printScopeTree("", mainScope)
+
+			t.Errorf("%s: Scope(%s).LookupParent(%s@%v) got %v, want %v [scopePos=%v]",
+				id.Pos(),
+				ScopeComment(inner),
+				id.Value,
+				id.Pos(),
+				gotObj,
+				wantObj,
+				ObjectScopePos(wantObj))
 			continue
 		}
 	}
@@ -2093,6 +2195,12 @@
 	iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
 }
 
+func TestNewAlias_Issue65455(t *testing.T) {
+	obj := NewTypeName(nopos, nil, "A", nil)
+	alias := NewAlias(obj, Typ[Int])
+	alias.Underlying() // must not panic
+}
+
 func TestIssue15305(t *testing.T) {
 	const src = "package p; func f() int16; var _ = f(undef)"
 	f := mustParse(src)
@@ -2318,6 +2426,60 @@
 	}
 }
 
+func TestInstantiateConcurrent(t *testing.T) {
+	const src = `package p
+
+type I[P any] interface {
+	m(P)
+	n() P
+}
+
+type J = I[int]
+
+type Nested[P any] *interface{b(P)}
+
+type K = Nested[string]
+`
+	pkg := mustTypecheck(src, nil, nil)
+
+	insts := []*Interface{
+		pkg.Scope().Lookup("J").Type().Underlying().(*Interface),
+		pkg.Scope().Lookup("K").Type().Underlying().(*Pointer).Elem().(*Interface),
+	}
+
+	// Use the interface instances concurrently.
+	for _, inst := range insts {
+		var (
+			counts  [2]int      // method counts
+			methods [2][]string // method strings
+		)
+		var wg sync.WaitGroup
+		for i := 0; i < 2; i++ {
+			i := i
+			wg.Add(1)
+			go func() {
+				defer wg.Done()
+
+				counts[i] = inst.NumMethods()
+				for mi := 0; mi < counts[i]; mi++ {
+					methods[i] = append(methods[i], inst.Method(mi).String())
+				}
+			}()
+		}
+		wg.Wait()
+
+		if counts[0] != counts[1] {
+			t.Errorf("mismatching method counts for %s: %d vs %d", inst, counts[0], counts[1])
+			continue
+		}
+		for i := 0; i < counts[0]; i++ {
+			if m0, m1 := methods[0][i], methods[1][i]; m0 != m1 {
+				t.Errorf("mismatching methods for %s: %s vs %s", inst, m0, m1)
+			}
+		}
+	}
+}
+
 func TestInstantiateErrors(t *testing.T) {
 	tests := []struct {
 		src    string // by convention, T must be the type being instantiated
@@ -2709,3 +2871,69 @@
 		t.Errorf("src1: unexpected error: got %v", err)
 	}
 }
+
+func TestModuleVersion(t *testing.T) {
+	// version go1.dd must be able to typecheck go1.dd.0, go1.dd.1, etc.
+	goversion := fmt.Sprintf("go1.%d", goversion.Version)
+	for _, v := range []string{
+		goversion,
+		goversion + ".0",
+		goversion + ".1",
+		goversion + ".rc",
+	} {
+		conf := Config{GoVersion: v}
+		pkg := mustTypecheck("package p", &conf, nil)
+		if pkg.GoVersion() != conf.GoVersion {
+			t.Errorf("got %s; want %s", pkg.GoVersion(), conf.GoVersion)
+		}
+	}
+}
+
+func TestFileVersions(t *testing.T) {
+	for _, test := range []struct {
+		goVersion   string
+		fileVersion string
+		wantVersion string
+	}{
+		{"", "", ""},                   // no versions specified
+		{"go1.19", "", "go1.19"},       // module version specified
+		{"", "go1.20", ""},             // file upgrade ignored
+		{"go1.19", "go1.20", "go1.20"}, // file upgrade permitted
+		{"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted
+		{"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21)
+
+		// versions containing release numbers
+		// (file versions containing release numbers are considered invalid)
+		{"go1.19.0", "", "go1.19.0"},         // no file version specified
+		{"go1.20", "go1.20.1", "go1.20"},     // file upgrade ignored
+		{"go1.20.1", "go1.20", "go1.20.1"},   // file upgrade ignored
+		{"go1.20.1", "go1.21", "go1.21"},     // file upgrade permitted
+		{"go1.20.1", "go1.19", "go1.20.1"},   // file downgrade not permitted
+		{"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version)
+		{"go1.21.1", "go1.19", "go1.19"},     // file downgrade permitted (module version is >= go1.21)
+	} {
+		var src string
+		if test.fileVersion != "" {
+			src = "//go:build " + test.fileVersion + "\n"
+		}
+		src += "package p"
+
+		conf := Config{GoVersion: test.goVersion}
+		versions := make(map[*syntax.PosBase]string)
+		var info Info
+		info.FileVersions = versions
+		mustTypecheck(src, &conf, &info)
+
+		n := 0
+		for _, v := range info.FileVersions {
+			want := test.wantVersion
+			if v != want {
+				t.Errorf("%q: unexpected file version: got %v, want %v", src, v, want)
+			}
+			n++
+		}
+		if n != 1 {
+			t.Errorf("%q: incorrect number of map entries: got %d", src, n)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index cba102e..8abafdb 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -102,7 +102,7 @@
 }
 
 func (check *Checker) initConst(lhs *Const, x *operand) {
-	if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+	if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
 		if lhs.typ == nil {
 			lhs.typ = Typ[Invalid]
 		}
@@ -137,7 +137,7 @@
 // or Typ[Invalid] in case of an error.
 // If the initialization check fails, x.mode is set to invalid.
 func (check *Checker) initVar(lhs *Var, x *operand, context string) {
-	if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+	if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
 		if lhs.typ == nil {
 			lhs.typ = Typ[Invalid]
 		}
@@ -170,7 +170,7 @@
 // and Typ[Invalid] if it is an invalid lhs expression.
 func (check *Checker) lhsVar(lhs syntax.Expr) Type {
 	// Determine if the lhs is a (possibly parenthesized) identifier.
-	ident, _ := unparen(lhs).(*syntax.Name)
+	ident, _ := syntax.Unparen(lhs).(*syntax.Name)
 
 	// Don't evaluate lhs if it is the blank identifier.
 	if ident != nil && ident.Value == "_" {
@@ -202,7 +202,7 @@
 		v.used = v_used // restore v.used
 	}
 
-	if x.mode == invalid || x.typ == Typ[Invalid] {
+	if x.mode == invalid || !isValid(x.typ) {
 		return Typ[Invalid]
 	}
 
@@ -232,9 +232,9 @@
 // assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil).
 // If x != nil, it must be the evaluation of rhs (and rhs will be ignored).
 // If the assignment check fails and x != nil, x.mode is set to invalid.
-func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand) {
+func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string) {
 	T := check.lhsVar(lhs) // nil if lhs is _
-	if T == Typ[Invalid] {
+	if !isValid(T) {
 		if x != nil {
 			x.mode = invalid
 		} else {
@@ -244,12 +244,18 @@
 	}
 
 	if x == nil {
+		var target *target
+		// avoid calling syntax.String if not needed
+		if T != nil {
+			if _, ok := under(T).(*Signature); ok {
+				target = newTarget(T, syntax.String(lhs))
+			}
+		}
 		x = new(operand)
-		check.expr(T, x, rhs)
+		check.expr(target, x, rhs)
 	}
 
-	context := "assignment"
-	if T == nil {
+	if T == nil && context == "assignment" {
 		context = "assignment to _ identifier"
 	}
 	check.assignment(x, T, context)
@@ -282,7 +288,7 @@
 		switch {
 		case t == nil:
 			fallthrough // should not happen but be cautious
-		case t == Typ[Invalid]:
+		case !isValid(t):
 			s = "unknown type"
 		case isUntyped(t):
 			if isNumeric(t) {
@@ -320,7 +326,7 @@
 	rhs0 := rhs[0]
 
 	if len(rhs) == 1 {
-		if call, _ := unparen(rhs0).(*syntax.CallExpr); call != nil {
+		if call, _ := syntax.Unparen(rhs0).(*syntax.CallExpr); call != nil {
 			check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals)
 			return
 		}
@@ -361,7 +367,7 @@
 	// error message don't handle it as n:n mapping below.
 	isCall := false
 	if r == 1 {
-		_, isCall = unparen(orig_rhs[0]).(*syntax.CallExpr)
+		_, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr)
 	}
 
 	// If we have a n:n mapping from lhs variable to rhs expression,
@@ -369,7 +375,11 @@
 	if l == r && !isCall {
 		var x operand
 		for i, lhs := range lhs {
-			check.expr(lhs.typ, &x, orig_rhs[i])
+			desc := lhs.name
+			if returnStmt != nil && desc == "" {
+				desc = "result variable"
+			}
+			check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i])
 			check.initVar(lhs, &x, context)
 		}
 		return
@@ -436,14 +446,14 @@
 	// error message don't handle it as n:n mapping below.
 	isCall := false
 	if r == 1 {
-		_, isCall = unparen(orig_rhs[0]).(*syntax.CallExpr)
+		_, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr)
 	}
 
 	// If we have a n:n mapping from lhs variable to rhs expression,
 	// each value can be assigned to its corresponding variable.
 	if l == r && !isCall {
 		for i, lhs := range lhs {
-			check.assignVar(lhs, orig_rhs[i], nil)
+			check.assignVar(lhs, orig_rhs[i], nil, "assignment")
 		}
 		return
 	}
@@ -464,7 +474,7 @@
 	r = len(rhs)
 	if l == r {
 		for i, lhs := range lhs {
-			check.assignVar(lhs, nil, rhs[i])
+			check.assignVar(lhs, nil, rhs[i], "assignment")
 		}
 		// Only record comma-ok expression if both assignments succeeded
 		// (go.dev/issue/59371).
@@ -483,21 +493,6 @@
 	// orig_rhs[0] was already evaluated
 }
 
-// unpackExpr unpacks a *syntax.ListExpr into a list of syntax.Expr.
-// Helper introduced for the go/types -> types2 port.
-// TODO(gri) Should find a more efficient solution that doesn't
-// require introduction of a new slice for simple
-// expressions.
-func unpackExpr(x syntax.Expr) []syntax.Expr {
-	if x, _ := x.(*syntax.ListExpr); x != nil {
-		return x.ElemList
-	}
-	if x != nil {
-		return []syntax.Expr{x}
-	}
-	return nil
-}
-
 func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) {
 	top := len(check.delayed)
 	scope := check.scope
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index 7a209e7..60f6d7f 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -206,7 +206,7 @@
 
 		if mode == invalid {
 			// avoid error if underlying type is invalid
-			if under(x.typ) != Typ[Invalid] {
+			if isValid(under(x.typ)) {
 				code := InvalidCap
 				if id == _Len {
 					code = InvalidLen
@@ -490,7 +490,7 @@
 		// (no argument evaluated yet)
 		arg0 := argList[0]
 		T := check.varType(arg0)
-		if T == Typ[Invalid] {
+		if !isValid(T) {
 			return
 		}
 
@@ -600,7 +600,7 @@
 		// new(T)
 		// (no argument evaluated yet)
 		T := check.varType(argList[0])
-		if T == Typ[Invalid] {
+		if !isValid(T) {
 			return
 		}
 
@@ -706,7 +706,7 @@
 		// unsafe.Offsetof(x T) uintptr, where x must be a selector
 		// (no argument evaluated yet)
 		arg0 := argList[0]
-		selx, _ := unparen(arg0).(*syntax.SelectorExpr)
+		selx, _ := syntax.Unparen(arg0).(*syntax.SelectorExpr)
 		if selx == nil {
 			check.errorf(arg0, BadOffsetofSyntax, invalidArg+"%s is not a selector expression", arg0)
 			check.use(arg0)
@@ -799,7 +799,7 @@
 		// unsafe.Slice(ptr *T, len IntegerType) []T
 		check.verifyVersionf(call.Fun, go1_17, "unsafe.Slice")
 
-		ptr, _ := under(x.typ).(*Pointer) // TODO(gri) should this be coreType rather than under?
+		ptr, _ := coreType(x.typ).(*Pointer)
 		if ptr == nil {
 			check.errorf(x, InvalidUnsafeSlice, invalidArg+"%s is not a pointer", x)
 			return
@@ -820,7 +820,7 @@
 		// unsafe.SliceData(slice []T) *T
 		check.verifyVersionf(call.Fun, go1_20, "unsafe.SliceData")
 
-		slice, _ := under(x.typ).(*Slice) // TODO(gri) should this be coreType rather than under?
+		slice, _ := coreType(x.typ).(*Slice)
 		if slice == nil {
 			check.errorf(x, InvalidUnsafeSliceData, invalidArg+"%s is not a slice", x)
 			return
@@ -923,7 +923,7 @@
 	// Cycles are only possible through *Named types.
 	// The seen map is used to detect cycles and track
 	// the results of previously seen types.
-	if named, _ := t.(*Named); named != nil {
+	if named := asNamed(t); named != nil {
 		if v, ok := seen[named]; ok {
 			return v
 		}
@@ -954,7 +954,7 @@
 }
 
 // applyTypeFunc applies f to x. If x is a type parameter,
-// the result is a type parameter constrained by an new
+// the result is a type parameter constrained by a new
 // interface bound. The type bounds for that interface
 // are computed by applying f to each of the type bounds
 // of x. If any of these applications of f return nil,
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 24f54c3..db7d86e 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -16,8 +16,8 @@
 // funcInst type-checks a function instantiation.
 // The incoming x must be a generic function.
 // If inst != nil, it provides some or all of the type arguments (inst.Index).
-// If target type tsig != nil, the signature may be used to infer missing type
-// arguments of x, if any. At least one of tsig or inst must be provided.
+// If target != nil, it may be used to infer missing type arguments of x, if any.
+// At least one of T or inst must be provided.
 //
 // There are two modes of operation:
 //
@@ -32,8 +32,8 @@
 //
 // If an error (other than a version error) occurs in any case, it is reported
 // and x.mode is set to invalid.
-func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) {
-	assert(tsig != nil || inst != nil)
+func (check *Checker) funcInst(T *target, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) {
+	assert(T != nil || inst != nil)
 
 	var instErrPos poser
 	if inst != nil {
@@ -47,7 +47,7 @@
 	var targs []Type
 	var xlist []syntax.Expr
 	if inst != nil {
-		xlist = unpackExpr(inst.Index)
+		xlist = syntax.UnpackListExpr(inst.Index)
 		targs = check.typeList(xlist)
 		if targs == nil {
 			x.mode = invalid
@@ -87,7 +87,8 @@
 		//
 		var args []*operand
 		var params []*Var
-		if tsig != nil && sig.tparams != nil {
+		var reverse bool
+		if T != nil && sig.tparams != nil {
 			if !versionErr && !check.allowVersion(check.pkg, instErrPos, go1_21) {
 				if inst != nil {
 					check.versionErrorf(instErrPos, go1_21, "partially instantiated function in assignment")
@@ -100,15 +101,16 @@
 			// The type of the argument operand is tsig, which is the type of the LHS in an assignment
 			// or the result type in a return statement. Create a pseudo-expression for that operand
 			// that makes sense when reported in error messages from infer, below.
-			expr := syntax.NewName(x.Pos(), "variable in assignment")
-			args = []*operand{{mode: value, expr: expr, typ: tsig}}
+			expr := syntax.NewName(x.Pos(), T.desc)
+			args = []*operand{{mode: value, expr: expr, typ: T.sig}}
+			reverse = true
 		}
 
 		// Rename type parameters to avoid problems with recursive instantiations.
 		// Note that NewTuple(params...) below is (*Tuple)(nil) if len(params) == 0, as desired.
 		tparams, params2 := check.renameTParams(pos, sig.TypeParams().list(), NewTuple(params...))
 
-		targs = check.infer(pos, tparams, targs, params2.(*Tuple), args)
+		targs = check.infer(pos, tparams, targs, params2.(*Tuple), args, reverse)
 		if targs == nil {
 			// error was already reported
 			x.mode = invalid
@@ -258,7 +260,7 @@
 	var xlist []syntax.Expr
 	var targs []Type
 	if inst != nil {
-		xlist = unpackExpr(inst.Index)
+		xlist = syntax.UnpackListExpr(inst.Index)
 		targs = check.typeList(xlist)
 		if targs == nil {
 			check.use(call.ArgList...)
@@ -575,8 +577,7 @@
 				// Before we change the type (type parameter renaming, below), make
 				// a clone of it as otherwise we implicitly modify the object's type
 				// (go.dev/issues/63260).
-				clone := *asig
-				asig = &clone
+				asig = clone(asig)
 				// Rename type parameters for cases like f(g, g); this gives each
 				// generic function argument a unique type identity (go.dev/issues/59956).
 				// TODO(gri) Consider only doing this if a function argument appears
@@ -609,7 +610,7 @@
 
 	// infer missing type arguments of callee and function arguments
 	if len(tparams) > 0 {
-		targs = check.infer(call.Pos(), tparams, targs, sigParams, args)
+		targs = check.infer(call.Pos(), tparams, targs, sigParams, args, false)
 		if targs == nil {
 			// TODO(gri) If infer inferred the first targs[:n], consider instantiating
 			//           the call signature for better error messages/gopls behavior.
@@ -666,7 +667,7 @@
 	"_Cmacro_", // function to evaluate the expanded expression
 }
 
-func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *Named, wantType bool) {
+func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName, wantType bool) {
 	// these must be declared before the "goto Error" statements
 	var (
 		obj      Object
@@ -767,8 +768,8 @@
 	switch x.mode {
 	case typexpr:
 		// don't crash for "type T T.x" (was go.dev/issue/51509)
-		if def != nil && x.typ == def {
-			check.cycleError([]Object{def.obj})
+		if def != nil && def.typ == x.typ {
+			check.cycleError([]Object{def})
 			goto Error
 		}
 	case builtin:
@@ -800,7 +801,7 @@
 	obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
 	if obj == nil {
 		// Don't report another error if the underlying type was invalid (go.dev/issue/49541).
-		if under(x.typ) == Typ[Invalid] {
+		if !isValid(under(x.typ)) {
 			goto Error
 		}
 
@@ -961,7 +962,7 @@
 func (check *Checker) use1(e syntax.Expr, lhs bool) bool {
 	var x operand
 	x.mode = value // anything but invalid
-	switch n := unparen(e).(type) {
+	switch n := syntax.Unparen(e).(type) {
 	case nil:
 		// nothing to do
 	case *syntax.Name:
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 0a2a490..0582367 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -11,7 +11,7 @@
 	"errors"
 	"fmt"
 	"go/constant"
-	"internal/goversion"
+	"internal/godebug"
 	. "internal/types/errors"
 )
 
@@ -21,6 +21,9 @@
 // debugging/development support
 const debug = false // leave on during development
 
+// gotypesalias controls the use of Alias types.
+var gotypesalias = godebug.New("#gotypesalias")
+
 // exprInfo stores information about an untyped expression.
 type exprInfo struct {
 	isLhs bool // expression is lhs operand of a shift with delayed type-check
@@ -93,11 +96,17 @@
 type Checker struct {
 	// package information
 	// (initialized by NewChecker, valid for the life-time of checker)
+
+	// If enableAlias is set, alias declarations produce an Alias type.
+	// Otherwise the alias information is only in the type name, which
+	// points directly to the actual (aliased) type.
+	enableAlias bool
+
 	conf *Config
 	ctxt *Context // context for de-duplicating instances
 	pkg  *Package
 	*Info
-	version version                // accepted language version
+	version goVersion              // accepted language version
 	nextID  uint64                 // unique Id for type parameters (first valid Id is 1)
 	objMap  map[Object]*declInfo   // maps package-level objects and (non-interface) methods to declaration info
 	impMap  map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
@@ -117,7 +126,7 @@
 	// (initialized by Files, valid only for the duration of check.Files;
 	// maps and lists are allocated on demand)
 	files         []*syntax.File              // list of package files
-	posVers       map[*syntax.PosBase]version // Pos -> Go version mapping
+	versions      map[*syntax.PosBase]string  // maps file bases to version strings (each file has an entry)
 	imports       []*PkgName                  // list of imported packages
 	dotImportMap  map[dotImportKey]*PkgName   // maps dot-imported objects to the package they were dot-imported through
 	recvTParamMap map[*syntax.Name]*TypeParam // maps blank receiver type parameters to their type
@@ -152,9 +161,14 @@
 	from.addDep(to)
 }
 
+// Note: The following three alias-related functions are only used
+//       when Alias types are not enabled.
+
 // brokenAlias records that alias doesn't have a determined type yet.
 // It also sets alias.typ to Typ[Invalid].
+// Not used if check.enableAlias is set.
 func (check *Checker) brokenAlias(alias *TypeName) {
+	assert(!check.enableAlias)
 	if check.brokenAliases == nil {
 		check.brokenAliases = make(map[*TypeName]bool)
 	}
@@ -164,13 +178,15 @@
 
 // validAlias records that alias has the valid type typ (possibly Typ[Invalid]).
 func (check *Checker) validAlias(alias *TypeName, typ Type) {
+	assert(!check.enableAlias)
 	delete(check.brokenAliases, alias)
 	alias.typ = typ
 }
 
 // isBrokenAlias reports whether alias doesn't have a determined type yet.
 func (check *Checker) isBrokenAlias(alias *TypeName) bool {
-	return alias.typ == Typ[Invalid] && check.brokenAliases[alias]
+	assert(!check.enableAlias)
+	return check.brokenAliases[alias]
 }
 
 func (check *Checker) rememberUntyped(e syntax.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) {
@@ -239,12 +255,14 @@
 	// (previously, pkg.goVersion was mutated here: go.dev/issue/61212)
 
 	return &Checker{
-		conf:   conf,
-		ctxt:   conf.Context,
-		pkg:    pkg,
-		Info:   info,
-		objMap: make(map[Object]*declInfo),
-		impMap: make(map[importKey]*Package),
+		enableAlias: gotypesalias.Value() == "1",
+		conf:        conf,
+		ctxt:        conf.Context,
+		pkg:         pkg,
+		Info:        info,
+		version:     asGoVersion(conf.GoVersion),
+		objMap:      make(map[Object]*declInfo),
+		impMap:      make(map[importKey]*Package),
 	}
 }
 
@@ -284,33 +302,51 @@
 		}
 	}
 
+	// reuse Info.FileVersions if provided
+	versions := check.Info.FileVersions
+	if versions == nil {
+		versions = make(map[*syntax.PosBase]string)
+	}
+	check.versions = versions
+
+	pkgVersionOk := check.version.isValid()
+	downgradeOk := check.version.cmp(go1_21) >= 0
+
+	// determine Go version for each file
 	for _, file := range check.files {
-		v, _ := parseGoVersion(file.GoVersion)
-		if v.major > 0 {
-			if v.equal(check.version) {
-				continue
+		// use unaltered Config.GoVersion by default
+		// (This version string may contain dot-release numbers as in go1.20.1,
+		// unlike file versions which are Go language versions only, if valid.)
+		v := check.conf.GoVersion
+		// use the file version, if applicable
+		// (file versions are either the empty string or of the form go1.dd)
+		if pkgVersionOk {
+			fileVersion := asGoVersion(file.GoVersion)
+			if fileVersion.isValid() {
+				cmp := fileVersion.cmp(check.version)
+				// Go 1.21 introduced the feature of setting the go.mod
+				// go line to an early version of Go and allowing //go:build lines
+				// to “upgrade” (cmp > 0) the Go version in a given file.
+				// We can do that backwards compatibly.
+				//
+				// Go 1.21 also introduced the feature of allowing //go:build lines
+				// to “downgrade” (cmp < 0) the Go version in a given file.
+				// That can't be done compatibly in general, since before the
+				// build lines were ignored and code got the module's Go version.
+				// To work around this, downgrades are only allowed when the
+				// module's Go version is Go 1.21 or later.
+				//
+				// If there is no valid check.version, then we don't really know what
+				// Go version to apply.
+				// Legacy tools may do this, and they historically have accepted everything.
+				// Preserve that behavior by ignoring //go:build constraints entirely in that
+				// case (!pkgVersionOk).
+				if cmp > 0 || cmp < 0 && downgradeOk {
+					v = file.GoVersion
+				}
 			}
-			// Go 1.21 introduced the feature of setting the go.mod
-			// go line to an early version of Go and allowing //go:build lines
-			// to “upgrade” the Go version in a given file.
-			// We can do that backwards compatibly.
-			// Go 1.21 also introduced the feature of allowing //go:build lines
-			// to “downgrade” the Go version in a given file.
-			// That can't be done compatibly in general, since before the
-			// build lines were ignored and code got the module's Go version.
-			// To work around this, downgrades are only allowed when the
-			// module's Go version is Go 1.21 or later.
-			// If there is no check.version, then we don't really know what Go version to apply.
-			// Legacy tools may do this, and they historically have accepted everything.
-			// Preserve that behavior by ignoring //go:build constraints entirely in that case.
-			if (v.before(check.version) && check.version.before(version{1, 21})) || check.version.equal(version{0, 0}) {
-				continue
-			}
-			if check.posVers == nil {
-				check.posVers = make(map[*syntax.PosBase]version)
-			}
-			check.posVers[base(file.Pos())] = v
 		}
+		versions[base(file.Pos())] = v // base(file.Pos()) may be nil for tests
 	}
 }
 
@@ -341,11 +377,8 @@
 		return nil
 	}
 
-	check.version, err = parseGoVersion(check.conf.GoVersion)
-	if err != nil {
-		return err
-	}
-	if check.version.after(version{1, goversion.Version}) {
+	// Note: NewChecker doesn't return an error, so we need to check the version here.
+	if check.version.cmp(go_current) > 0 {
 		return fmt.Errorf("package requires newer Go version %v", check.version)
 	}
 	if check.conf.FakeImportC && check.conf.go115UsesCgo {
@@ -496,7 +529,7 @@
 		assert(val != nil)
 		// We check allBasic(typ, IsConstType) here as constant expressions may be
 		// recorded as type parameters.
-		assert(typ == Typ[Invalid] || allBasic(typ, IsConstType))
+		assert(!isValid(typ) || allBasic(typ, IsConstType))
 	}
 	if m := check.Types; m != nil {
 		m[x] = TypeAndValue{mode, typ, val}
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
index 8cb3000..a9d6202 100644
--- a/src/cmd/compile/internal/types2/check_test.go
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -34,11 +34,13 @@
 	"cmd/compile/internal/syntax"
 	"flag"
 	"fmt"
+	"internal/buildcfg"
 	"internal/testenv"
 	"os"
 	"path/filepath"
 	"reflect"
 	"regexp"
+	"runtime"
 	"strconv"
 	"strings"
 	"testing"
@@ -110,6 +112,8 @@
 
 // testFiles type-checks the package consisting of the given files, and
 // compares the resulting errors with the ERROR annotations in the source.
+// Except for manual tests, each package is type-checked twice, once without
+// use of Alias types, and once with Alias types.
 //
 // The srcs slice contains the file content for the files named in the
 // filenames slice. The colDelta parameter specifies the tolerance for position
@@ -118,25 +122,25 @@
 //
 // If provided, opts may be used to mutate the Config before type-checking.
 func testFiles(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) {
+	// Alias types are disabled by default
+	testFilesImpl(t, filenames, srcs, colDelta, manual, opts...)
+	if !manual {
+		t.Setenv("GODEBUG", "gotypesalias=1")
+		testFilesImpl(t, filenames, srcs, colDelta, manual, opts...)
+	}
+}
+
+func testFilesImpl(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) {
 	if len(filenames) == 0 {
 		t.Fatal("no source files")
 	}
 
-	var conf Config
-	flags := flag.NewFlagSet("", flag.PanicOnError)
-	flags.StringVar(&conf.GoVersion, "lang", "", "")
-	flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
-	if err := parseFlags(srcs[0], flags); err != nil {
-		t.Fatal(err)
-	}
-
+	// parse files
 	files, errlist := parseFiles(t, filenames, srcs, 0)
-
 	pkgName := "<no package>"
 	if len(files) > 0 {
 		pkgName = files[0].PkgName.Value
 	}
-
 	listErrors := manual && !*verifyErrors
 	if listErrors && len(errlist) > 0 {
 		t.Errorf("--- %s:", pkgName)
@@ -145,7 +149,8 @@
 		}
 	}
 
-	// typecheck and collect typechecker errors
+	// set up typechecker
+	var conf Config
 	conf.Trace = manual && testing.Verbose()
 	conf.Importer = defaultImporter()
 	conf.Error = func(err error) {
@@ -159,12 +164,51 @@
 		errlist = append(errlist, err)
 	}
 
+	// apply custom configuration
 	for _, opt := range opts {
 		opt(&conf)
 	}
 
-	conf.Check(pkgName, files, nil)
+	// apply flag setting (overrides custom configuration)
+	var goexperiment, gotypesalias string
+	flags := flag.NewFlagSet("", flag.PanicOnError)
+	flags.StringVar(&conf.GoVersion, "lang", "", "")
+	flags.StringVar(&goexperiment, "goexperiment", "", "")
+	flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
+	flags.StringVar(&gotypesalias, "gotypesalias", "", "")
+	if err := parseFlags(srcs[0], flags); err != nil {
+		t.Fatal(err)
+	}
 
+	exp, err := buildcfg.ParseGOEXPERIMENT(runtime.GOOS, runtime.GOARCH, goexperiment)
+	if err != nil {
+		t.Fatal(err)
+	}
+	old := buildcfg.Experiment
+	defer func() {
+		buildcfg.Experiment = old
+	}()
+	buildcfg.Experiment = *exp
+
+	// By default, gotypesalias is not set.
+	if gotypesalias != "" {
+		t.Setenv("GODEBUG", "gotypesalias="+gotypesalias)
+	}
+
+	// Provide Config.Info with all maps so that info recording is tested.
+	info := Info{
+		Types:        make(map[syntax.Expr]TypeAndValue),
+		Instances:    make(map[*syntax.Name]Instance),
+		Defs:         make(map[*syntax.Name]Object),
+		Uses:         make(map[*syntax.Name]Object),
+		Implicits:    make(map[syntax.Node]Object),
+		Selections:   make(map[*syntax.SelectorExpr]*Selection),
+		Scopes:       make(map[syntax.Node]*Scope),
+		FileVersions: make(map[*syntax.PosBase]string),
+	}
+
+	// typecheck
+	conf.Check(pkgName, files, &info)
 	if listErrors {
 		return
 	}
@@ -345,6 +389,12 @@
 }
 
 func TestCheck(t *testing.T) {
+	old := buildcfg.Experiment.RangeFunc
+	defer func() {
+		buildcfg.Experiment.RangeFunc = old
+	}()
+	buildcfg.Experiment.RangeFunc = true
+
 	DefPredeclaredTestFuncs()
 	testDirFiles(t, "../../../../internal/types/testdata/check", 50, false) // TODO(gri) narrow column tolerance
 }
diff --git a/src/cmd/compile/internal/types2/context.go b/src/cmd/compile/internal/types2/context.go
index ae39c7b..7723124 100644
--- a/src/cmd/compile/internal/types2/context.go
+++ b/src/cmd/compile/internal/types2/context.go
@@ -79,7 +79,7 @@
 		h.typeList(targs)
 	}
 
-	return strings.Replace(buf.String(), " ", "#", -1) // ReplaceAll is not available in Go1.4
+	return strings.ReplaceAll(buf.String(), " ", "#")
 }
 
 // lookup returns an existing instantiation of orig with targs, if it exists.
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index ef0094d..8027092 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -42,6 +42,14 @@
 	case constArg && isConstType(T):
 		// constant conversion
 		ok = constConvertibleTo(T, &x.val)
+		// A conversion from an integer constant to an integer type
+		// can only fail if there's overflow. Give a concise error.
+		// (go.dev/issue/63563)
+		if !ok && isInteger(x.typ) && isInteger(T) {
+			check.errorf(x, InvalidConversion, "constant %s overflows %s", x.val, T)
+			x.mode = invalid
+			return
+		}
 	case constArg && isTypeParam(T):
 		// x is convertible to T if it is convertible
 		// to each specific type in the type set of T.
@@ -58,7 +66,12 @@
 				return true
 			}
 			if !constConvertibleTo(u, nil) {
-				cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+				if isInteger(x.typ) && isInteger(u) {
+					// see comment above on constant conversion
+					cause = check.sprintf("constant %s overflows %s (in %s)", x.val, u, T)
+				} else {
+					cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+				}
 				return false
 			}
 			return true
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 88864cb..f3e3418 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -55,7 +55,7 @@
 
 // objDecl type-checks the declaration of obj in its respective (file) environment.
 // For the meaning of def, see Checker.definedType, in typexpr.go.
-func (check *Checker) objDecl(obj Object, def *Named) {
+func (check *Checker) objDecl(obj Object, def *TypeName) {
 	if check.conf.Trace && obj.Type() == nil {
 		if check.indent == 0 {
 			fmt.Println() // empty line between top-level objects for readability
@@ -251,10 +251,14 @@
 			// the syntactic information. We should consider storing
 			// this information explicitly in the object.
 			var alias bool
-			if d := check.objMap[obj]; d != nil {
-				alias = d.tdecl.Alias // package-level object
+			if check.enableAlias {
+				alias = obj.IsAlias()
 			} else {
-				alias = obj.IsAlias() // function local object
+				if d := check.objMap[obj]; d != nil {
+					alias = d.tdecl.Alias // package-level object
+				} else {
+					alias = obj.IsAlias() // function local object
+				}
 			}
 			if !alias {
 				ndef++
@@ -322,7 +326,11 @@
 	// If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
 	tname, _ := obj.(*TypeName)
 	if tname != nil && tname.IsAlias() {
-		check.validAlias(tname, Typ[Invalid])
+		// If we use Alias nodes, it is initialized with Typ[Invalid].
+		// TODO(gri) Adjust this code if we initialize with nil.
+		if !check.enableAlias {
+			check.validAlias(tname, Typ[Invalid])
+		}
 	}
 
 	// report a more concise error for self references
@@ -387,7 +395,7 @@
 		if !isConstType(t) {
 			// don't report an error if the type is an invalid C (defined) type
 			// (go.dev/issue/22090)
-			if under(t) != Typ[Invalid] {
+			if isValid(under(t)) {
 				check.errorf(typ, InvalidConstType, "invalid constant type %s", t)
 			}
 			obj.typ = Typ[Invalid]
@@ -441,7 +449,7 @@
 	if lhs == nil || len(lhs) == 1 {
 		assert(lhs == nil || lhs[0] == obj)
 		var x operand
-		check.expr(obj.typ, &x, init)
+		check.expr(newTarget(obj.typ, obj.name), &x, init)
 		check.initVar(obj, &x, "variable declaration")
 		return
 	}
@@ -475,7 +483,7 @@
 
 // isImportedConstraint reports whether typ is an imported type constraint.
 func (check *Checker) isImportedConstraint(typ Type) bool {
-	named, _ := typ.(*Named)
+	named := asNamed(typ)
 	if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil {
 		return false
 	}
@@ -483,38 +491,50 @@
 	return u != nil && !u.IsMethodSet()
 }
 
-func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named) {
+func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeName) {
 	assert(obj.typ == nil)
 
 	var rhs Type
 	check.later(func() {
-		if t, _ := obj.typ.(*Named); t != nil { // type may be invalid
+		if t := asNamed(obj.typ); t != nil { // type may be invalid
 			check.validType(t)
 		}
 		// If typ is local, an error was already reported where typ is specified/defined.
 		_ = check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs)
 	}).describef(obj, "validType(%s)", obj.Name())
 
-	alias := tdecl.Alias
-	if alias && tdecl.TParamList != nil {
+	aliasDecl := tdecl.Alias
+	if aliasDecl && tdecl.TParamList != nil {
 		// The parser will ensure this but we may still get an invalid AST.
 		// Complain and continue as regular type definition.
 		check.error(tdecl, BadDecl, "generic type cannot be alias")
-		alias = false
+		aliasDecl = false
 	}
 
 	// alias declaration
-	if alias {
+	if aliasDecl {
 		check.verifyVersionf(tdecl, go1_9, "type aliases")
-		check.brokenAlias(obj)
-		rhs = check.typ(tdecl.Type)
-		check.validAlias(obj, rhs)
+		if check.enableAlias {
+			// TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark
+			//           the alias as incomplete. Currently this causes problems
+			//           with certain cycles. Investigate.
+			alias := check.newAlias(obj, Typ[Invalid])
+			setDefType(def, alias)
+			rhs = check.definedType(tdecl.Type, obj)
+			assert(rhs != nil)
+			alias.fromRHS = rhs
+			Unalias(alias) // resolve alias.actual
+		} else {
+			check.brokenAlias(obj)
+			rhs = check.typ(tdecl.Type)
+			check.validAlias(obj, rhs)
+		}
 		return
 	}
 
 	// type definition or generic type declaration
 	named := check.newNamed(obj, nil, nil)
-	def.setUnderlying(named)
+	setDefType(def, named)
 
 	if tdecl.TParamList != nil {
 		check.openScope(tdecl, "type parameters")
@@ -523,7 +543,7 @@
 	}
 
 	// determine underlying type of named
-	rhs = check.definedType(tdecl.Type, named)
+	rhs = check.definedType(tdecl.Type, obj)
 	assert(rhs != nil)
 	named.fromRHS = rhs
 
@@ -550,8 +570,11 @@
 	// Declare type parameters up-front.
 	// The scope of type parameters starts at the beginning of the type parameter
 	// list (so we can have mutually recursive parameterized type bounds).
-	for i, f := range list {
-		tparams[i] = check.declareTypeParam(f.Name)
+	if len(list) > 0 {
+		scopePos := list[0].Pos()
+		for i, f := range list {
+			tparams[i] = check.declareTypeParam(f.Name, scopePos)
+		}
 	}
 
 	// Set the type parameters before collecting the type constraints because
@@ -608,7 +631,7 @@
 	return check.typ(x)
 }
 
-func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam {
+func (check *Checker) declareTypeParam(name *syntax.Name, scopePos syntax.Pos) *TypeParam {
 	// Use Typ[Invalid] for the type constraint to ensure that a type
 	// is present even if the actual constraint has not been assigned
 	// yet.
@@ -616,8 +639,8 @@
 	//           constraints to make sure we don't rely on them if they
 	//           are not properly set yet.
 	tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil)
-	tpar := check.newTypeParam(tname, Typ[Invalid])          // assigns type to tname as a side-effect
-	check.declare(check.scope, name, tname, check.scope.pos) // TODO(gri) check scope position
+	tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
+	check.declare(check.scope, name, tname, scopePos)
 	return tpar
 }
 
@@ -638,7 +661,7 @@
 
 	// spec: "If the base type is a struct type, the non-blank method
 	// and field names must be distinct."
-	base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
+	base := asNamed(obj.typ) // shouldn't fail but be conservative
 	if base != nil {
 		assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
 
@@ -730,6 +753,11 @@
 	check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type)
 	obj.color_ = saved
 
+	// Set the scope's extent to the complete "func (...) { ... }"
+	// so that Scope.Innermost works correctly.
+	sig.scope.pos = fdecl.Pos()
+	sig.scope.end = syntax.EndPos(fdecl)
+
 	if len(fdecl.TParamList) > 0 && fdecl.Body == nil {
 		check.softErrorf(fdecl, BadDecl, "generic function is missing function body")
 	}
@@ -777,7 +805,7 @@
 
 			// declare all constants
 			lhs := make([]*Const, len(s.NameList))
-			values := unpackExpr(last.Values)
+			values := syntax.UnpackListExpr(last.Values)
 			for i, name := range s.NameList {
 				obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
 				lhs[i] = obj
@@ -814,7 +842,7 @@
 			}
 
 			// initialize all variables
-			values := unpackExpr(s.Values)
+			values := syntax.UnpackListExpr(s.Values)
 			for i, obj := range lhs0 {
 				var lhs []*Var
 				var init syntax.Expr
diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go
index 6153b42..ba4dc87 100644
--- a/src/cmd/compile/internal/types2/errorcalls_test.go
+++ b/src/cmd/compile/internal/types2/errorcalls_test.go
@@ -1,6 +1,6 @@
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE ast.
+// license that can be found in the LICENSE file.
 
 package types2_test
 
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
index 7db06d9..b8414b4 100644
--- a/src/cmd/compile/internal/types2/errors.go
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -250,7 +250,7 @@
 		pos = check.errpos
 	}
 
-	// If we have an URL for error codes, add a link to the first line.
+	// If we have a URL for error codes, add a link to the first line.
 	if code != 0 && check.conf.ErrorURL != "" {
 		u := fmt.Sprintf(check.conf.ErrorURL, code)
 		if i := strings.Index(msg, "\n"); i >= 0 {
@@ -297,7 +297,7 @@
 	check.err(at, code, check.sprintf(format, args...), true)
 }
 
-func (check *Checker) versionErrorf(at poser, v version, format string, args ...interface{}) {
+func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...interface{}) {
 	msg := check.sprintf(format, args...)
 	msg = fmt.Sprintf("%s requires %s or later", msg, v)
 	check.err(at, UnsupportedFeature, msg, true)
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 67afbfb..124d970 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -147,7 +147,7 @@
 	case syntax.And:
 		// spec: "As an exception to the addressability
 		// requirement x may also be a composite literal."
-		if _, ok := unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable {
+		if _, ok := syntax.Unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable {
 			check.errorf(x, UnaddressableOperand, invalidOp+"cannot take address of %s", x)
 			x.mode = invalid
 			return
@@ -392,7 +392,7 @@
 // If x is a constant operand, the returned constant.Value will be the
 // representation of x in this context.
 func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, Code) {
-	if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
+	if x.mode == invalid || isTyped(x.typ) || !isValid(target) {
 		return x.typ, nil, 0
 	}
 	// x is untyped
@@ -474,7 +474,7 @@
 // If switchCase is true, the operator op is ignored.
 func (check *Checker) comparison(x, y *operand, op syntax.Operator, switchCase bool) {
 	// Avoid spurious errors if any of the operands has an invalid type (go.dev/issue/54405).
-	if x.typ == Typ[Invalid] || y.typ == Typ[Invalid] {
+	if !isValid(x.typ) || !isValid(y.typ) {
 		x.mode = invalid
 		return
 	}
@@ -828,7 +828,7 @@
 	if !Identical(x.typ, y.typ) {
 		// only report an error if we have valid types
 		// (otherwise we had an error reported elsewhere already)
-		if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
+		if isValid(x.typ) && isValid(y.typ) {
 			if e != nil {
 				check.errorf(x, MismatchedTypes, invalidOp+"%s (mismatched types %s and %s)", e, x.typ, y.typ)
 			} else {
@@ -956,18 +956,32 @@
 	statement
 )
 
-// TODO(gri) In rawExpr below, consider using T instead of hint and
-//           some sort of "operation mode" instead of allowGeneric.
-//           May be clearer and less error-prone.
+// target represent the (signature) type and description of the LHS
+// variable of an assignment, or of a function result variable.
+type target struct {
+	sig  *Signature
+	desc string
+}
+
+// newTarget creates a new target for the given type and description.
+// The result is nil if typ is not a signature.
+func newTarget(typ Type, desc string) *target {
+	if typ != nil {
+		if sig, _ := under(typ).(*Signature); sig != nil {
+			return &target{sig, desc}
+		}
+	}
+	return nil
+}
 
 // rawExpr typechecks expression e and initializes x with the expression
 // value or type. If an error occurred, x.mode is set to invalid.
-// If a non-nil target type T is given and e is a generic function
-// or function call, T is used to infer the type arguments for e.
+// If a non-nil target T is given and e is a generic function,
+// T is used to infer the type arguments for e.
 // If hint != nil, it is the type of a composite literal element.
 // If allowGeneric is set, the operand type may be an uninstantiated
 // parameterized type or function value.
-func (check *Checker) rawExpr(T Type, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind {
+func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind {
 	if check.conf.Trace {
 		check.trace(e.Pos(), "-- expr %s", e)
 		check.indent++
@@ -989,9 +1003,9 @@
 }
 
 // If x is a generic type, or a generic function whose type arguments cannot be inferred
-// from a non-nil target type T, nonGeneric reports an error and invalidates x.mode and x.typ.
+// from a non-nil target T, nonGeneric reports an error and invalidates x.mode and x.typ.
 // Otherwise it leaves x alone.
-func (check *Checker) nonGeneric(T Type, x *operand) {
+func (check *Checker) nonGeneric(T *target, x *operand) {
 	if x.mode == invalid || x.mode == novalue {
 		return
 	}
@@ -1004,10 +1018,8 @@
 	case *Signature:
 		if t.tparams != nil {
 			if enableReverseTypeInference && T != nil {
-				if tsig, _ := under(T).(*Signature); tsig != nil {
-					check.funcInst(tsig, x.Pos(), x, nil, true)
-					return
-				}
+				check.funcInst(T, x.Pos(), x, nil, true)
+				return
 			}
 			what = "function"
 		}
@@ -1022,7 +1034,7 @@
 // exprInternal contains the core of type checking of expressions.
 // Must only be called by rawExpr.
 // (See rawExpr for an explanation of the parameters.)
-func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) exprKind {
+func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Type) exprKind {
 	// make sure x has a valid state in case of bailout
 	// (was go.dev/issue/5770)
 	x.mode = invalid
@@ -1081,6 +1093,10 @@
 
 	case *syntax.FuncLit:
 		if sig, ok := check.typ(e.Type).(*Signature); ok {
+			// Set the Scope's extent to the complete "func (...) {...}"
+			// so that Scope.Innermost works correctly.
+			sig.scope.pos = e.Pos()
+			sig.scope.end = syntax.EndPos(e)
 			if !check.conf.IgnoreFuncBodies && e.Body != nil {
 				// Anonymous functions are considered part of the
 				// init expression/func declaration which contains
@@ -1308,7 +1324,7 @@
 				check.use(e)
 			}
 			// if utyp is invalid, an error was reported before
-			if utyp != Typ[Invalid] {
+			if isValid(utyp) {
 				check.errorf(e, InvalidLit, "invalid composite literal type %s", typ)
 				goto Error
 			}
@@ -1328,11 +1344,10 @@
 
 	case *syntax.IndexExpr:
 		if check.indexExpr(x, e) {
-			var tsig *Signature
-			if enableReverseTypeInference && T != nil {
-				tsig, _ = under(T).(*Signature)
+			if !enableReverseTypeInference {
+				T = nil
 			}
-			check.funcInst(tsig, e.Pos(), x, e, true)
+			check.funcInst(T, e.Pos(), x, e, true)
 		}
 		if x.mode == invalid {
 			goto Error
@@ -1363,7 +1378,7 @@
 			goto Error
 		}
 		T := check.varType(e.Type)
-		if T == Typ[Invalid] {
+		if !isValid(T) {
 			goto Error
 		}
 		check.typeAssertion(e, x, T, false)
@@ -1543,11 +1558,11 @@
 }
 
 // expr typechecks expression e and initializes x with the expression value.
-// If a non-nil target type T is given and e is a generic function
-// or function call, T is used to infer the type arguments for e.
+// If a non-nil target T is given and e is a generic function or
+// a function call, T is used to infer the type arguments for e.
 // The result must be a single value.
 // If an error occurred, x.mode is set to invalid.
-func (check *Checker) expr(T Type, x *operand, e syntax.Expr) {
+func (check *Checker) expr(T *target, x *operand, e syntax.Expr) {
 	check.rawExpr(T, x, e, nil, false)
 	check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
 	check.singleValue(x)
diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go
new file mode 100644
index 0000000..d204d9f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/gcsizes.go
@@ -0,0 +1,170 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+type gcSizes struct {
+	WordSize int64 // word size in bytes - must be >= 4 (32bits)
+	MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *gcSizes) Alignof(T Type) (result int64) {
+	defer func() {
+		assert(result >= 1)
+	}()
+
+	// For arrays and structs, alignment is defined in terms
+	// of alignment of the elements and fields, respectively.
+	switch t := under(T).(type) {
+	case *Array:
+		// spec: "For a variable x of array type: unsafe.Alignof(x)
+		// is the same as unsafe.Alignof(x[0]), but at least 1."
+		return s.Alignof(t.elem)
+	case *Struct:
+		if len(t.fields) == 0 && IsSyncAtomicAlign64(T) {
+			// Special case: sync/atomic.align64 is an
+			// empty struct we recognize as a signal that
+			// the struct it contains must be
+			// 64-bit-aligned.
+			//
+			// This logic is equivalent to the logic in
+			// cmd/compile/internal/types/size.go:calcStructOffset
+			return 8
+		}
+
+		// spec: "For a variable x of struct type: unsafe.Alignof(x)
+		// is the largest of the values unsafe.Alignof(x.f) for each
+		// field f of x, but at least 1."
+		max := int64(1)
+		for _, f := range t.fields {
+			if a := s.Alignof(f.typ); a > max {
+				max = a
+			}
+		}
+		return max
+	case *Slice, *Interface:
+		// Multiword data structures are effectively structs
+		// in which each element has size WordSize.
+		// Type parameters lead to variable sizes/alignments;
+		// StdSizes.Alignof won't be called for them.
+		assert(!isTypeParam(T))
+		return s.WordSize
+	case *Basic:
+		// Strings are like slices and interfaces.
+		if t.Info()&IsString != 0 {
+			return s.WordSize
+		}
+	case *TypeParam, *Union:
+		unreachable()
+	}
+	a := s.Sizeof(T) // may be 0 or negative
+	// spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+	if a < 1 {
+		return 1
+	}
+	// complex{64,128} are aligned like [2]float{32,64}.
+	if isComplex(T) {
+		a /= 2
+	}
+	if a > s.MaxAlign {
+		return s.MaxAlign
+	}
+	return a
+}
+
+func (s *gcSizes) Offsetsof(fields []*Var) []int64 {
+	offsets := make([]int64, len(fields))
+	var offs int64
+	for i, f := range fields {
+		if offs < 0 {
+			// all remaining offsets are too large
+			offsets[i] = -1
+			continue
+		}
+		// offs >= 0
+		a := s.Alignof(f.typ)
+		offs = align(offs, a) // possibly < 0 if align overflows
+		offsets[i] = offs
+		if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 {
+			offs += d // ok to overflow to < 0
+		} else {
+			offs = -1 // f.typ or offs is too large
+		}
+	}
+	return offsets
+}
+
+func (s *gcSizes) Sizeof(T Type) int64 {
+	switch t := under(T).(type) {
+	case *Basic:
+		assert(isTyped(T))
+		k := t.kind
+		if int(k) < len(basicSizes) {
+			if s := basicSizes[k]; s > 0 {
+				return int64(s)
+			}
+		}
+		if k == String {
+			return s.WordSize * 2
+		}
+	case *Array:
+		n := t.len
+		if n <= 0 {
+			return 0
+		}
+		// n > 0
+		esize := s.Sizeof(t.elem)
+		if esize < 0 {
+			return -1 // element too large
+		}
+		if esize == 0 {
+			return 0 // 0-size element
+		}
+		// esize > 0
+		// Final size is esize * n; and size must be <= maxInt64.
+		const maxInt64 = 1<<63 - 1
+		if esize > maxInt64/n {
+			return -1 // esize * n overflows
+		}
+		return esize * n
+	case *Slice:
+		return s.WordSize * 3
+	case *Struct:
+		n := t.NumFields()
+		if n == 0 {
+			return 0
+		}
+		offsets := s.Offsetsof(t.fields)
+		offs := offsets[n-1]
+		size := s.Sizeof(t.fields[n-1].typ)
+		if offs < 0 || size < 0 {
+			return -1 // type too large
+		}
+		// gc: The last field of a non-zero-sized struct is not allowed to
+		// have size 0.
+		if offs > 0 && size == 0 {
+			size = 1
+		}
+		// gc: Size includes alignment padding.
+		return align(offs+size, s.Alignof(t)) // may overflow to < 0 which is ok
+	case *Interface:
+		// Type parameters lead to variable sizes/alignments;
+		// StdSizes.Sizeof won't be called for them.
+		assert(!isTypeParam(T))
+		return s.WordSize * 2
+	case *TypeParam, *Union:
+		unreachable()
+	}
+	return s.WordSize // catch-all
+}
+
+// gcSizesFor returns the Sizes used by gc for an architecture.
+// The result is a nil *gcSizes pointer (which is not a valid types.Sizes)
+// if a compiler/architecture pair is not known.
+func gcSizesFor(compiler, arch string) *gcSizes {
+	if compiler != "gc" {
+		return nil
+	}
+	return gcArchSizes[arch]
+}
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index 3ebe851..4db2213 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -29,7 +29,7 @@
 		x.mode = invalid
 		// TODO(gri) here we re-evaluate e.X - try to avoid this
 		x.typ = check.varType(e)
-		if x.typ != Typ[Invalid] {
+		if isValid(x.typ) {
 			x.mode = typexpr
 		}
 		return false
@@ -428,7 +428,7 @@
 		validIndex := false
 		eval := e
 		if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
-			if typ, i := check.index(kv.Key, length); typ != Typ[Invalid] {
+			if typ, i := check.index(kv.Key, length); isValid(typ) {
 				if i >= 0 {
 					index = i
 					validIndex = true
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 5eb916c..a520f70 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -24,10 +24,16 @@
 // based on the given type parameters tparams, type arguments targs, function parameters params, and
 // function arguments args, if any. There must be at least one type parameter, no more type arguments
 // than type parameters, and params and args must match in number (incl. zero).
+// If reverse is set, an error message's contents are reversed for a better error message for some
+// errors related to reverse type inference (where the function call is synthetic).
 // If successful, infer returns the complete list of given and inferred type arguments, one for each
 // type parameter. Otherwise the result is nil and appropriate errors will be reported.
-func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (inferred []Type) {
-	if debug {
+func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand, reverse bool) (inferred []Type) {
+	// Don't verify result conditions if there's no error handler installed:
+	// in that case, an error leads to an exit panic and the result value may
+	// be incorrect. But in that case it doesn't matter because callers won't
+	// be able to use it either.
+	if check.conf.Error != nil {
 		defer func() {
 			assert(inferred == nil || len(inferred) == len(tparams) && !containsNil(inferred))
 		}()
@@ -52,6 +58,14 @@
 		return targs
 	}
 
+	// If we have invalid (ordinary) arguments, an error was reported before.
+	// Avoid additional inference errors and exit early (go.dev/issue/60434).
+	for _, arg := range args {
+		if arg.mode == invalid {
+			return nil
+		}
+	}
+
 	// Make sure we have a "full" list of type arguments, some of which may
 	// be nil (unknown). Make a copy so as to not clobber the incoming slice.
 	if len(targs) < n {
@@ -98,7 +112,7 @@
 	// Terminology: generic parameter = function parameter with a type-parameterized type
 	u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
 
-	errorf := func(kind string, tpar, targ Type, arg *operand) {
+	errorf := func(tpar, targ Type, arg *operand) {
 		// provide a better error message if we can
 		targs := u.inferred(tparams)
 		if targs[0] == nil {
@@ -113,7 +127,7 @@
 				}
 			}
 			if allFailed {
-				check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s (cannot infer %s)", kind, targ, arg.expr, tpar, typeParamsString(tparams))
+				check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s (cannot infer %s)", targ, arg.expr, tpar, typeParamsString(tparams))
 				return
 			}
 		}
@@ -125,9 +139,13 @@
 		// InvalidTypeArg). We can't differentiate these cases, so fall back on
 		// the more general CannotInferTypeArgs.
 		if inferred != tpar {
-			check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar)
+			if reverse {
+				check.errorf(arg, CannotInferTypeArgs, "inferred type %s for %s does not match type %s of %s", inferred, tpar, targ, arg.expr)
+			} else {
+				check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match inferred type %s for %s", targ, arg.expr, inferred, tpar)
+			}
 		} else {
-			check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s", kind, targ, arg.expr, tpar)
+			check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s", targ, arg.expr, tpar)
 		}
 	}
 
@@ -156,7 +174,7 @@
 			// Collect the indices of untyped arguments and handle them later.
 			if isTyped(arg.typ) {
 				if !u.unify(par.typ, arg.typ, assign) {
-					errorf("type", par.typ, arg.typ, arg)
+					errorf(par.typ, arg.typ, arg)
 					return nil
 				}
 			} else if _, ok := par.typ.(*TypeParam); ok && !arg.isNil() {
@@ -538,6 +556,9 @@
 	case *Basic:
 		// nothing to do
 
+	case *Alias:
+		return w.isParameterized(Unalias(t))
+
 	case *Array:
 		return w.isParameterized(t.elem)
 
@@ -689,6 +710,9 @@
 	case *Basic:
 		// nothing to do
 
+	case *Alias:
+		w.typ(Unalias(t))
+
 	case *Array:
 		w.typ(t.elem)
 
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index 6024035..e33d4b4 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -122,7 +122,8 @@
 		assert(expanding == nil) // function instances cannot be reached from Named types
 
 		tparams := orig.TypeParams()
-		if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
+		// TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here)
+		if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) {
 			return Typ[Invalid]
 		}
 		if tparams.Len() == 0 {
@@ -150,19 +151,27 @@
 	return updateContexts(res)
 }
 
-// validateTArgLen verifies that the length of targs and tparams matches,
-// reporting an error if not. If validation fails and check is nil,
-// validateTArgLen panics.
-func (check *Checker) validateTArgLen(pos syntax.Pos, ntparams, ntargs int) bool {
-	if ntargs != ntparams {
-		// TODO(gri) provide better error message
-		if check != nil {
-			check.errorf(pos, WrongTypeArgCount, "got %d arguments but %d type parameters", ntargs, ntparams)
-			return false
-		}
-		panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, ntargs, ntparams))
+// validateTArgLen checks that the number of type arguments (got) matches the
+// number of type parameters (want); if they don't match an error is reported.
+// If validation fails and check is nil, validateTArgLen panics.
+func (check *Checker) validateTArgLen(pos syntax.Pos, name string, want, got int) bool {
+	var qual string
+	switch {
+	case got < want:
+		qual = "not enough"
+	case got > want:
+		qual = "too many"
+	default:
+		return true
 	}
-	return true
+
+	msg := check.sprintf("%s type arguments for type %s: have %d, want %d", qual, name, got, want)
+	if check != nil {
+		check.error(atPos(pos), WrongTypeArgCount, msg)
+		return false
+	}
+
+	panic(fmt.Sprintf("%v: %s", pos, msg))
 }
 
 func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
@@ -192,10 +201,10 @@
 func (check *Checker) implements(pos syntax.Pos, V, T Type, constraint bool, cause *string) bool {
 	Vu := under(V)
 	Tu := under(T)
-	if Vu == Typ[Invalid] || Tu == Typ[Invalid] {
+	if !isValid(Vu) || !isValid(Tu) {
 		return true // avoid follow-on errors
 	}
-	if p, _ := Vu.(*Pointer); p != nil && under(p.base) == Typ[Invalid] {
+	if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) {
 		return true // avoid follow-on errors (see go.dev/issue/49541 for an example)
 	}
 
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
index 872a321..4072098 100644
--- a/src/cmd/compile/internal/types2/interface.go
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -112,11 +112,12 @@
 // Implementation
 
 func (t *Interface) cleanup() {
+	t.typeSet() // any interface that escapes type checking must be safe for concurrent use
 	t.check = nil
 	t.embedPos = nil
 }
 
-func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *TypeName) {
 	addEmbedded := func(pos syntax.Pos, typ Type) {
 		ityp.embeddeds = append(ityp.embeddeds, typ)
 		if ityp.embedPos == nil {
@@ -142,7 +143,7 @@
 		typ := check.typ(f.Type)
 		sig, _ := typ.(*Signature)
 		if sig == nil {
-			if typ != Typ[Invalid] {
+			if isValid(typ) {
 				check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ)
 			}
 			continue // ignore
@@ -151,7 +152,9 @@
 		// use named receiver type if available (for better error messages)
 		var recvTyp Type = ityp
 		if def != nil {
-			recvTyp = def
+			if named := asNamed(def.typ); named != nil {
+				recvTyp = named
+			}
 		}
 		sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
 
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
index 3ac3457..0117571 100644
--- a/src/cmd/compile/internal/types2/issues_test.go
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -921,6 +921,22 @@
 	conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
 }
 
+func TestIssue61938(t *testing.T) {
+	const src = `
+package p
+
+func f[T any]() {}
+func _()        { f() }
+`
+	// no error handler provided (this issue)
+	var conf Config
+	typecheck(src, &conf, nil) // must not panic
+
+	// with error handler (sanity check)
+	conf.Error = func(error) {}
+	typecheck(src, &conf, nil) // must not panic
+}
+
 func TestIssue63260(t *testing.T) {
 	const src = `
 package p
@@ -964,3 +980,116 @@
 		t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T)
 	}
 }
+
+func TestIssue44410(t *testing.T) {
+	const src = `
+package p
+
+type A = []int
+type S struct{ A }
+`
+
+	t.Setenv("GODEBUG", "gotypesalias=1")
+	pkg := mustTypecheck(src, nil, nil)
+
+	S := pkg.Scope().Lookup("S")
+	if S == nil {
+		t.Fatal("object S not found")
+	}
+
+	got := S.String()
+	const want = "type p.S struct{p.A}"
+	if got != want {
+		t.Fatalf("got %q; want %q", got, want)
+	}
+}
+
+func TestIssue59831(t *testing.T) {
+	// Package a exports a type S with an unexported method m;
+	// the tests check the error messages when m is not found.
+	const asrc = `package a; type S struct{}; func (S) m() {}`
+	apkg := mustTypecheck(asrc, nil, nil)
+
+	// Package b exports a type S with an exported method m;
+	// the tests check the error messages when M is not found.
+	const bsrc = `package b; type S struct{}; func (S) M() {}`
+	bpkg := mustTypecheck(bsrc, nil, nil)
+
+	tests := []struct {
+		imported *Package
+		src, err string
+	}{
+		// tests importing a (or nothing)
+		{apkg, `package a1; import "a"; var _ interface { M() } = a.S{}`,
+			"a.S does not implement interface{M()} (missing method M) have m() want M()"},
+
+		{apkg, `package a2; import "a"; var _ interface { m() } = a.S{}`,
+			"a.S does not implement interface{m()} (unexported method m)"}, // test for issue
+
+		{nil, `package a3; type S struct{}; func (S) m(); var _ interface { M() } = S{}`,
+			"S does not implement interface{M()} (missing method M) have m() want M()"},
+
+		{nil, `package a4; type S struct{}; func (S) m(); var _ interface { m() } = S{}`,
+			""}, // no error expected
+
+		{nil, `package a5; type S struct{}; func (S) m(); var _ interface { n() } = S{}`,
+			"S does not implement interface{n()} (missing method n)"},
+
+		// tests importing b (or nothing)
+		{bpkg, `package b1; import "b"; var _ interface { m() } = b.S{}`,
+			"b.S does not implement interface{m()} (missing method m) have M() want m()"},
+
+		{bpkg, `package b2; import "b"; var _ interface { M() } = b.S{}`,
+			""}, // no error expected
+
+		{nil, `package b3; type S struct{}; func (S) M(); var _ interface { M() } = S{}`,
+			""}, // no error expected
+
+		{nil, `package b4; type S struct{}; func (S) M(); var _ interface { m() } = S{}`,
+			"S does not implement interface{m()} (missing method m) have M() want m()"},
+
+		{nil, `package b5; type S struct{}; func (S) M(); var _ interface { n() } = S{}`,
+			"S does not implement interface{n()} (missing method n)"},
+	}
+
+	for _, test := range tests {
+		// typecheck test source
+		conf := Config{Importer: importHelper{pkg: test.imported}}
+		pkg, err := typecheck(test.src, &conf, nil)
+		if err == nil {
+			if test.err != "" {
+				t.Errorf("package %s: got no error, want %q", pkg.Name(), test.err)
+			}
+			continue
+		}
+		if test.err == "" {
+			t.Errorf("package %s: got %q, want not error", pkg.Name(), err.Error())
+		}
+
+		// flatten reported error message
+		errmsg := strings.ReplaceAll(err.Error(), "\n", " ")
+		errmsg = strings.ReplaceAll(errmsg, "\t", "")
+
+		// verify error message
+		if !strings.Contains(errmsg, test.err) {
+			t.Errorf("package %s: got %q, want %q", pkg.Name(), errmsg, test.err)
+		}
+	}
+}
+
+func TestIssue64759(t *testing.T) {
+	const src = `
+//go:build go1.18
+package p
+
+func f[S ~[]E, E any](S) {}
+
+func _() {
+	f([]string{})
+}
+`
+	// Per the go:build directive, the source must typecheck
+	// even though the (module) Go version is set to go1.17.
+	conf := Config{GoVersion: "go1.17"}
+	mustTypecheck(src, &conf, nil)
+}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index b7370ca..bc47c15 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -54,7 +54,7 @@
 	// Thus, if we have a named pointer type, proceed with the underlying
 	// pointer type but discard the result if it is a method since we would
 	// not have found it for T (see also go.dev/issue/8590).
-	if t, _ := T.(*Named); t != nil {
+	if t := asNamed(T); t != nil {
 		if p, _ := t.Underlying().(*Pointer); p != nil {
 			obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, false)
 			if _, ok := obj.(*Func); ok {
@@ -96,7 +96,7 @@
 // and missingMethod (the latter doesn't care about struct fields).
 //
 // If foldCase is true, method names are considered equal if they are equal
-// with case folding.
+// with case folding, irrespective of which package they are in.
 //
 // The resulting object may not be fully type-checked.
 func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) {
@@ -138,7 +138,7 @@
 
 			// If we have a named type, we may have associated methods.
 			// Look for those first.
-			if named, _ := typ.(*Named); named != nil {
+			if named := asNamed(typ); named != nil {
 				if alt := seen.lookup(named); alt != nil {
 					// We have seen this type before, at a more shallow depth
 					// (note that multiples of this type at the current depth
@@ -343,6 +343,7 @@
 		ok = iota
 		notFound
 		wrongName
+		unexported
 		wrongSig
 		ambigSel
 		ptrRecv
@@ -388,6 +389,11 @@
 					f, _ = obj.(*Func)
 					if f != nil {
 						state = wrongName
+						if f.name == m.name {
+							// If the names are equal, f must be unexported
+							// (otherwise the package wouldn't matter).
+							state = unexported
+						}
 					}
 				}
 				break
@@ -436,8 +442,9 @@
 			}
 		case wrongName:
 			fs, ms := check.funcString(f, false), check.funcString(m, false)
-			*cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s",
-				m.Name(), fs, ms)
+			*cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
+		case unexported:
+			*cause = check.sprintf("(unexported method %s)", m.Name())
 		case wrongSig:
 			fs, ms := check.funcString(f, false), check.funcString(m, false)
 			if fs == ms {
@@ -445,8 +452,18 @@
 				// Add package information to disambiguate (go.dev/issue/54258).
 				fs, ms = check.funcString(f, true), check.funcString(m, true)
 			}
-			*cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s",
-				m.Name(), fs, ms)
+			if fs == ms {
+				// We still have "want Foo, have Foo".
+				// This is most likely due to different type parameters with
+				// the same name appearing in the instantiated signatures
+				// (go.dev/issue/61685).
+				// Rather than reporting this misleading error cause, for now
+				// just point out that the method signature is incorrect.
+				// TODO(gri) should find a good way to report the root cause
+				*cause = check.sprintf("(wrong type for method %s)", m.Name())
+				break
+			}
+			*cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
 		case ambigSel:
 			*cause = check.sprintf("(ambiguous selector %s.%s)", V, m.Name())
 		case ptrRecv:
@@ -527,7 +544,7 @@
 // with an underlying pointer type!) and returns its base and true.
 // Otherwise it returns (typ, false).
 func deref(typ Type) (Type, bool) {
-	if p, _ := typ.(*Pointer); p != nil {
+	if p, _ := Unalias(typ).(*Pointer); p != nil {
 		// p.base should never be nil, but be conservative
 		if p.base == nil {
 			if debug {
@@ -572,11 +589,12 @@
 }
 
 // lookupMethod returns the index of and method with matching package and name, or (-1, nil).
-// If foldCase is true, method names are considered equal if they are equal with case folding.
+// If foldCase is true, method names are considered equal if they are equal with case folding
+// and their packages are ignored (e.g., pkg1.m, pkg1.M, pkg2.m, and pkg2.M are all equal).
 func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) {
 	if name != "_" {
 		for i, m := range methods {
-			if (m.name == name || foldCase && strings.EqualFold(m.name, name)) && m.sameId(pkg, m.name) {
+			if m.sameId(pkg, name) || foldCase && strings.EqualFold(m.name, name) {
 				return i, m
 			}
 		}
diff --git a/src/cmd/compile/internal/types2/mono.go b/src/cmd/compile/internal/types2/mono.go
index 5b68f2a..dae9230 100644
--- a/src/cmd/compile/internal/types2/mono.go
+++ b/src/cmd/compile/internal/types2/mono.go
@@ -208,7 +208,7 @@
 	// type parameters.
 	var do func(typ Type)
 	do = func(typ Type) {
-		switch typ := typ.(type) {
+		switch typ := Unalias(typ).(type) {
 		default:
 			panic("unexpected type")
 
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index 5408c7e..893247d 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -141,7 +141,7 @@
 // If the given type name obj doesn't have a type yet, its type is set to the returned named type.
 // The underlying type must not be a *Named.
 func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
-	if _, ok := underlying.(*Named); ok {
+	if asNamed(underlying) != nil {
 		panic("underlying type must not be *Named")
 	}
 	return (*Checker)(nil).newNamed(obj, underlying, methods)
@@ -224,7 +224,7 @@
 	atomic.StoreUint32(&n.state_, uint32(state))
 }
 
-// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
+// newNamed is like NewNamed but with a *Checker receiver.
 func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
 	typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
 	if obj.typ == nil {
@@ -434,7 +434,7 @@
 	if underlying == nil {
 		panic("underlying type must not be nil")
 	}
-	if _, ok := underlying.(*Named); ok {
+	if asNamed(underlying) != nil {
 		panic("underlying type must not be *Named")
 	}
 	t.resolve().underlying = underlying
@@ -453,7 +453,8 @@
 	}
 }
 
-func (t *Named) Underlying() Type { return t.resolve().underlying }
+// TODO(gri) Investigate if Unalias can be moved to where underlying is set.
+func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) }
 func (t *Named) String() string   { return TypeString(t, nil) }
 
 // ----------------------------------------------------------------------------
@@ -550,12 +551,6 @@
 	return u
 }
 
-func (n *Named) setUnderlying(typ Type) {
-	if n != nil {
-		n.underlying = typ
-	}
-}
-
 func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
 	n.resolve()
 	// If n is an instance, we may not have yet instantiated all of its methods.
@@ -598,7 +593,7 @@
 	orig := n.inst.orig
 	targs := n.inst.targs
 
-	if _, unexpanded := orig.underlying.(*Named); unexpanded {
+	if asNamed(orig.underlying) != nil {
 		// We should only get a Named underlying type here during type checking
 		// (for example, in recursive type declarations).
 		assert(check != nil)
@@ -633,11 +628,18 @@
 				old := iface
 				iface = check.newInterface()
 				iface.embeddeds = old.embeddeds
+				assert(old.complete) // otherwise we are copying incomplete data
 				iface.complete = old.complete
 				iface.implicit = old.implicit // should be false but be conservative
 				underlying = iface
 			}
 			iface.methods = methods
+			iface.tset = nil // recompute type set with new methods
+
+			// If check != nil, check.newInterface will have saved the interface for later completion.
+			if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated
+				iface.typeSet()
+			}
 		}
 	}
 
@@ -649,7 +651,7 @@
 //
 // TODO(rfindley): eliminate this function or give it a better name.
 func safeUnderlying(typ Type) Type {
-	if t, _ := typ.(*Named); t != nil {
+	if t := asNamed(typ); t != nil {
 		return t.underlying
 	}
 	return typ.Underlying()
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 5c0ea8c..2515872 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -285,6 +285,8 @@
 	switch t := obj.typ.(type) {
 	case nil:
 		return false
+	// case *Alias:
+	//	handled by default case
 	case *Basic:
 		// unsafe.Pointer is not an alias.
 		if obj.pkg == Unsafe {
@@ -406,6 +408,12 @@
 	return obj
 }
 
+// Pkg returns the package to which the function belongs.
+//
+// The result is nil for methods of types in the Universe scope,
+// like method Error of the error built-in interface type.
+func (obj *Func) Pkg() *Package { return obj.object.Pkg() }
+
 // hasPtrRecv reports whether the receiver is of the form *T for the given method obj.
 func (obj *Func) hasPtrRecv() bool {
 	// If a method's receiver type is set, use that as the source of truth for the receiver.
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 0469b00..3f15100 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -172,7 +172,7 @@
 
 	// <typ>
 	if hasType {
-		if x.typ != Typ[Invalid] {
+		if isValid(x.typ) {
 			var intro string
 			if isGeneric(x.typ) {
 				intro = " of generic type "
@@ -245,7 +245,7 @@
 // if assignableTo is invoked through an exported API call, i.e., when all
 // methods have been type-checked.
 func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Code) {
-	if x.mode == invalid || T == Typ[Invalid] {
+	if x.mode == invalid || !isValid(T) {
 		return true, 0 // avoid spurious errors
 	}
 
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index 13a3bf8..7a096e3 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -6,6 +6,9 @@
 
 package types2
 
+// isValid reports whether t is a valid type.
+func isValid(t Type) bool { return Unalias(t) != Typ[Invalid] }
+
 // The isX predicates below report whether t is an X.
 // If t is a type parameter the result is false; i.e.,
 // these predicates don't look inside a type parameter.
@@ -47,7 +50,7 @@
 // for all specific types of the type parameter's type set.
 // allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
 func allBasic(t Type, info BasicInfo) bool {
-	if tpar, _ := t.(*TypeParam); tpar != nil {
+	if tpar, _ := Unalias(t).(*TypeParam); tpar != nil {
 		return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
 	}
 	return isBasic(t, info)
@@ -57,7 +60,7 @@
 // predeclared types, defined types, and type parameters.
 // hasName may be called with types that are not fully set up.
 func hasName(t Type) bool {
-	switch t.(type) {
+	switch Unalias(t).(type) {
 	case *Basic, *Named, *TypeParam:
 		return true
 	}
@@ -68,7 +71,7 @@
 // This includes all non-defined types, but also basic types.
 // isTypeLit may be called with types that are not fully set up.
 func isTypeLit(t Type) bool {
-	switch t.(type) {
+	switch Unalias(t).(type) {
 	case *Named, *TypeParam:
 		return false
 	}
@@ -79,8 +82,10 @@
 // constant or boolean. isTyped may be called with types that
 // are not fully set up.
 func isTyped(t Type) bool {
-	// isTyped is called with types that are not fully
-	// set up. Must not call under()!
+	// Alias or Named types cannot denote untyped types,
+	// thus we don't need to call Unalias or under
+	// (which would be unsafe to do for types that are
+	// not fully set up).
 	b, _ := t.(*Basic)
 	return b == nil || b.info&IsUntyped == 0
 }
@@ -103,7 +108,7 @@
 
 // isTypeParam reports whether t is a type parameter.
 func isTypeParam(t Type) bool {
-	_, ok := t.(*TypeParam)
+	_, ok := Unalias(t).(*TypeParam)
 	return ok
 }
 
@@ -112,7 +117,7 @@
 // use anywhere, but it may report a false negative if the type set has not been
 // computed yet.
 func hasEmptyTypeset(t Type) bool {
-	if tpar, _ := t.(*TypeParam); tpar != nil && tpar.bound != nil {
+	if tpar, _ := Unalias(t).(*TypeParam); tpar != nil && tpar.bound != nil {
 		iface, _ := safeUnderlying(tpar.bound).(*Interface)
 		return iface != nil && iface.tset != nil && iface.tset.IsEmpty()
 	}
@@ -124,7 +129,7 @@
 // TODO(gri) should we include signatures or assert that they are not present?
 func isGeneric(t Type) bool {
 	// A parameterized type is only generic if it doesn't have an instantiation already.
-	named, _ := t.(*Named)
+	named := asNamed(t)
 	return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
 }
 
@@ -218,11 +223,14 @@
 
 // For changes to this code the corresponding changes should be made to unifier.nify.
 func (c *comparer) identical(x, y Type, p *ifacePair) bool {
+	x = Unalias(x)
+	y = Unalias(y)
+
 	if x == y {
 		return true
 	}
 
-	if c.ignoreInvalids && (x == Typ[Invalid] || y == Typ[Invalid]) {
+	if c.ignoreInvalids && (!isValid(x) || !isValid(y)) {
 		return true
 	}
 
@@ -435,7 +443,7 @@
 		// Two named types are identical if their type names originate
 		// in the same type declaration; if they are instantiated they
 		// must have identical type argument lists.
-		if y, ok := y.(*Named); ok {
+		if y := asNamed(y); y != nil {
 			// check type arguments before origins to match unifier
 			// (for correct source code we need to do all checks so
 			// order doesn't matter)
@@ -449,7 +457,7 @@
 					return false
 				}
 			}
-			return indenticalOrigin(x, y)
+			return identicalOrigin(x, y)
 		}
 
 	case *TypeParam:
@@ -466,7 +474,7 @@
 }
 
 // identicalOrigin reports whether x and y originated in the same declaration.
-func indenticalOrigin(x, y *Named) bool {
+func identicalOrigin(x, y *Named) bool {
 	// TODO(gri) is this correct?
 	return x.Origin().obj == y.Origin().obj
 }
@@ -492,7 +500,7 @@
 // it returns the incoming type for all other types. The default type
 // for untyped nil is untyped nil.
 func Default(t Type) Type {
-	if t, ok := t.(*Basic); ok {
+	if t, ok := Unalias(t).(*Basic); ok {
 		switch t.kind {
 		case UntypedBool:
 			return Typ[Bool]
@@ -530,3 +538,9 @@
 	}
 	return nil
 }
+
+// clone makes a "flat copy" of *p and returns a pointer to the copy.
+func clone[P *T, T any](p P) P {
+	c := *p
+	return &c
+}
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
index d051fb5..0cf7c91 100644
--- a/src/cmd/compile/internal/types2/resolver.go
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -351,7 +351,7 @@
 				}
 
 				// declare all constants
-				values := unpackExpr(last.Values)
+				values := syntax.UnpackListExpr(last.Values)
 				for i, name := range s.NameList {
 					obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
 
@@ -382,7 +382,7 @@
 				}
 
 				// declare all variables
-				values := unpackExpr(s.Values)
+				values := syntax.UnpackListExpr(s.Values)
 				for i, name := range s.NameList {
 					obj := NewVar(name.Pos(), pkg, name.Value, nil)
 					lhs[i] = obj
@@ -538,7 +538,7 @@
 	if ptyp, _ := rtyp.(*syntax.IndexExpr); ptyp != nil {
 		rtyp = ptyp.X
 		if unpackParams {
-			for _, arg := range unpackExpr(ptyp.Index) {
+			for _, arg := range syntax.UnpackListExpr(ptyp.Index) {
 				var par *syntax.Name
 				switch arg := arg.(type) {
 				case *syntax.Name:
@@ -588,7 +588,7 @@
 				return false, nil
 			}
 			ptr = true
-			typ = unparen(pexpr.X) // continue with pointer base type
+			typ = syntax.Unparen(pexpr.X) // continue with pointer base type
 		}
 
 		// typ must be a name, or a C.name cgo selector.
@@ -677,32 +677,39 @@
 		}
 	}
 
-	// We process non-alias type declarations first, followed by alias declarations,
-	// and then everything else. This appears to avoid most situations where the type
-	// of an alias is needed before it is available.
-	// There may still be cases where this is not good enough (see also go.dev/issue/25838).
-	// In those cases Checker.ident will report an error ("invalid use of type alias").
-	var aliasList []*TypeName
-	var othersList []Object // everything that's not a type
-	// phase 1: non-alias type declarations
-	for _, obj := range objList {
-		if tname, _ := obj.(*TypeName); tname != nil {
-			if check.objMap[tname].tdecl.Alias {
-				aliasList = append(aliasList, tname)
-			} else {
-				check.objDecl(obj, nil)
-			}
-		} else {
-			othersList = append(othersList, obj)
+	if check.enableAlias {
+		// With Alias nodes we can process declarations in any order.
+		for _, obj := range objList {
+			check.objDecl(obj, nil)
 		}
-	}
-	// phase 2: alias type declarations
-	for _, obj := range aliasList {
-		check.objDecl(obj, nil)
-	}
-	// phase 3: all other declarations
-	for _, obj := range othersList {
-		check.objDecl(obj, nil)
+	} else {
+		// Without Alias nodes, we process non-alias type declarations first, followed by
+		// alias declarations, and then everything else. This appears to avoid most situations
+		// where the type of an alias is needed before it is available.
+		// There may still be cases where this is not good enough (see also go.dev/issue/25838).
+		// In those cases Checker.ident will report an error ("invalid use of type alias").
+		var aliasList []*TypeName
+		var othersList []Object // everything that's not a type
+		// phase 1: non-alias type declarations
+		for _, obj := range objList {
+			if tname, _ := obj.(*TypeName); tname != nil {
+				if check.objMap[tname].tdecl.Alias {
+					aliasList = append(aliasList, tname)
+				} else {
+					check.objDecl(obj, nil)
+				}
+			} else {
+				othersList = append(othersList, obj)
+			}
+		}
+		// phase 2: alias type declarations
+		for _, obj := range aliasList {
+			check.objDecl(obj, nil)
+		}
+		// phase 3: all other declarations
+		for _, obj := range othersList {
+			check.objDecl(obj, nil)
+		}
 	}
 
 	// At this point we may have a non-empty check.methods map; this means that not all
diff --git a/src/cmd/compile/internal/types2/return.go b/src/cmd/compile/internal/types2/return.go
index ab611ef..01988b0 100644
--- a/src/cmd/compile/internal/types2/return.go
+++ b/src/cmd/compile/internal/types2/return.go
@@ -27,7 +27,7 @@
 
 	case *syntax.ExprStmt:
 		// calling the predeclared (possibly parenthesized) panic() function is terminating
-		if call, ok := unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] {
+		if call, ok := syntax.Unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] {
 			return true
 		}
 
diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go
index c820a29..dfbf3a0 100644
--- a/src/cmd/compile/internal/types2/selection.go
+++ b/src/cmd/compile/internal/types2/selection.go
@@ -13,6 +13,39 @@
 
 // SelectionKind describes the kind of a selector expression x.f
 // (excluding qualified identifiers).
+//
+// If x is a struct or *struct, a selector expression x.f may denote a
+// sequence of selection operations x.a.b.c.f. The SelectionKind
+// describes the kind of the final (explicit) operation; all the
+// previous (implicit) operations are always field selections.
+// Each element of Indices specifies an implicit field (a, b, c)
+// by its index in the struct type of the field selection operand.
+//
+// For a FieldVal operation, the final selection refers to the field
+// specified by Selection.Obj.
+//
+// For a MethodVal operation, the final selection refers to a method.
+// If the "pointerness" of the method's declared receiver does not
+// match that of the effective receiver after implicit field
+// selection, then an & or * operation is implicitly applied to the
+// receiver variable or value.
+// So, x.f denotes (&x.a.b.c).f when f requires a pointer receiver but
+// x.a.b.c is a non-pointer variable; and it denotes (*x.a.b.c).f when
+// f requires a non-pointer receiver but x.a.b.c is a pointer value.
+//
+// All pointer indirections, whether due to implicit or explicit field
+// selections or * operations inserted for "pointerness", panic if
+// applied to a nil pointer, so a method call x.f() may panic even
+// before the function call.
+//
+// By contrast, a MethodExpr operation T.f is essentially equivalent
+// to a function literal of the form:
+//
+//	func(x T, args) (results) { return x.f(args) }
+//
+// Consequently, any implicit field selections and * operations
+// inserted for "pointerness" are not evaluated until the function is
+// called, so a T.f or (*T).f expression never panics.
 type SelectionKind int
 
 const (
@@ -102,6 +135,11 @@
 
 // Indirect reports whether any pointer indirection was required to get from
 // x to f in x.f.
+//
+// Beware: Indirect spuriously returns true (Go issue #8353) for a
+// MethodVal selection in which the receiver argument and parameter
+// both have type *T so there is no indirection.
+// Unfortunately, a fix is too risky.
 func (s *Selection) Indirect() bool { return s.indirect }
 
 func (s *Selection) String() string { return SelectionString(s, nil) }
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index 8e0dfe2..18a64ec 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -108,9 +108,12 @@
 		// - the receiver specification acts as local declaration for its type parameters, which may be blank
 		_, rname, rparams := check.unpackRecv(recvPar.Type, true)
 		if len(rparams) > 0 {
+			// The scope of the type parameter T in "func (r T[T]) f()"
+			// starts after f, not at "r"; see #52038.
+			scopePos := ftyp.Pos()
 			tparams := make([]*TypeParam, len(rparams))
 			for i, rparam := range rparams {
-				tparams[i] = check.declareTypeParam(rparam)
+				tparams[i] = check.declareTypeParam(rparam, scopePos)
 			}
 			sig.rparams = bindTParams(tparams)
 			// Blank identifiers don't get declared, so naive type-checking of the
@@ -136,7 +139,7 @@
 				// Also: Don't report an error via genericType since it will be reported
 				//       again when we type-check the signature.
 				// TODO(gri) maybe the receiver should be marked as invalid instead?
-				if recv, _ := check.genericType(rname, nil).(*Named); recv != nil {
+				if recv := asNamed(check.genericType(rname, nil)); recv != nil {
 					recvTParams = recv.TypeParams().list()
 				}
 			}
@@ -167,16 +170,21 @@
 		check.collectTypeParams(&sig.tparams, tparams)
 	}
 
-	// Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
-	// declarations and then squash that scope into the parent scope (and report any redeclarations at
-	// that time).
+	// Use a temporary scope for all parameter declarations and then
+	// squash that scope into the parent scope (and report any
+	// redeclarations at that time).
+	//
+	// TODO(adonovan): now that each declaration has the correct
+	// scopePos, there should be no need for scope squashing.
+	// Audit to ensure all lookups honor scopePos and simplify.
 	scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
-	var recvList []*Var // TODO(gri) remove the need for making a list here
+	scopePos := syntax.EndPos(ftyp) // all parameters' scopes start after the signature
+	var recvList []*Var             // TODO(gri) remove the need for making a list here
 	if recvPar != nil {
-		recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false) // use rewritten receiver type, if any
+		recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false, scopePos) // use rewritten receiver type, if any
 	}
-	params, variadic := check.collectParams(scope, ftyp.ParamList, true)
-	results, _ := check.collectParams(scope, ftyp.ResultList, false)
+	params, variadic := check.collectParams(scope, ftyp.ParamList, true, scopePos)
+	results, _ := check.collectParams(scope, ftyp.ResultList, false, scopePos)
 	scope.Squash(func(obj, alt Object) {
 		var err error_
 		err.code = DuplicateDecl
@@ -208,13 +216,14 @@
 		check.later(func() {
 			// spec: "The receiver type must be of the form T or *T where T is a type name."
 			rtyp, _ := deref(recv.typ)
-			if rtyp == Typ[Invalid] {
+			atyp := Unalias(rtyp)
+			if !isValid(atyp) {
 				return // error was reported before
 			}
 			// spec: "The type denoted by T is called the receiver base type; it must not
 			// be a pointer or interface type and it must be declared in the same package
 			// as the method."
-			switch T := rtyp.(type) {
+			switch T := atyp.(type) {
 			case *Named:
 				// The receiver type may be an instantiated type referred to
 				// by an alias (which cannot have receiver parameters for now).
@@ -258,7 +267,7 @@
 
 // collectParams declares the parameters of list in scope and returns the corresponding
 // variable list.
-func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool) (params []*Var, variadic bool) {
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool, scopePos syntax.Pos) (params []*Var, variadic bool) {
 	if list == nil {
 		return
 	}
@@ -293,7 +302,7 @@
 				// ok to continue
 			}
 			par := NewParam(field.Name.Pos(), check.pkg, name, typ)
-			check.declare(scope, field.Name, par, scope.pos)
+			check.declare(scope, field.Name, par, scopePos)
 			params = append(params, par)
 			named = true
 		} else {
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index 59f600a..486c05c 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -112,8 +112,8 @@
 }
 
 func IsSyncAtomicAlign64(T Type) bool {
-	named, ok := T.(*Named)
-	if !ok {
+	named := asNamed(T)
+	if named == nil {
 		return false
 	}
 	obj := named.Obj()
@@ -227,7 +227,7 @@
 }
 
 // common architecture word sizes and alignments
-var gcArchSizes = map[string]*StdSizes{
+var gcArchSizes = map[string]*gcSizes{
 	"386":      {4, 4},
 	"amd64":    {8, 8},
 	"amd64p32": {4, 8},
@@ -255,20 +255,17 @@
 // "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle",
 // "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
 func SizesFor(compiler, arch string) Sizes {
-	var m map[string]*StdSizes
 	switch compiler {
 	case "gc":
-		m = gcArchSizes
+		if s := gcSizesFor(compiler, arch); s != nil {
+			return Sizes(s)
+		}
 	case "gccgo":
-		m = gccgoArchSizes
-	default:
-		return nil
+		if s, ok := gccgoArchSizes[arch]; ok {
+			return Sizes(s)
+		}
 	}
-	s, ok := m[arch]
-	if !ok {
-		return nil
-	}
-	return s
+	return nil
 }
 
 // stdSizes is used if Config.Sizes == nil.
diff --git a/src/cmd/compile/internal/types2/sizes_test.go b/src/cmd/compile/internal/types2/sizes_test.go
index 7af8958..9a772f4 100644
--- a/src/cmd/compile/internal/types2/sizes_test.go
+++ b/src/cmd/compile/internal/types2/sizes_test.go
@@ -133,3 +133,62 @@
 		})
 	}
 }
+
+type gcSizeTest struct {
+	name string
+	src  string
+}
+
+var gcSizesTests = []gcSizeTest{
+	{
+		"issue60431",
+		`
+package main
+
+import "unsafe"
+
+// The foo struct size is expected to be rounded up to 16 bytes.
+type foo struct {
+	a int64
+	b bool
+}
+
+func main() {
+	assert(unsafe.Sizeof(foo{}) == 16)
+}`,
+	},
+	{
+		"issue60734",
+		`
+package main
+
+import (
+	"unsafe"
+)
+
+// The Data struct size is expected to be rounded up to 16 bytes.
+type Data struct {
+	Value  uint32   // 4 bytes
+	Label  [10]byte // 10 bytes
+	Active bool     // 1 byte
+	// padded with 1 byte to make it align
+}
+
+func main() {
+	assert(unsafe.Sizeof(Data{}) == 16)
+}
+`,
+	},
+}
+
+func TestGCSizes(t *testing.T) {
+	types2.DefPredeclaredTestFuncs()
+	for _, tc := range gcSizesTests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+			conf := types2.Config{Importer: defaultImporter(), Sizes: types2.SizesFor("gc", "amd64")}
+			mustTypecheck(tc.src, &conf, nil)
+		})
+	}
+}
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
index ee852f5..405af78 100644
--- a/src/cmd/compile/internal/types2/stdlib_test.go
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -191,7 +191,7 @@
 		}
 		text = strings.TrimSpace(text[2:])
 
-		if strings.HasPrefix(text, "+build ") {
+		if strings.HasPrefix(text, "go:build ") {
 			panic("skip")
 		}
 		if first == "" {
@@ -233,6 +233,9 @@
 		filename := filepath.Join(path, f.Name())
 		goVersion := ""
 		if comment := firstComment(filename); comment != "" {
+			if strings.Contains(comment, "-goexperiment") {
+				continue // ignore this file
+			}
 			fields := strings.Fields(comment)
 			switch fields[0] {
 			case "skip", "compiledir":
@@ -308,6 +311,7 @@
 
 	testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "fixedbugs"),
 		"bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
+		"bug398.go",      // types2 doesn't check for anonymous interface cycles (go.dev/issue/56103)
 		"issue6889.go",   // gc-specific test
 		"issue11362.go",  // canonical import path check
 		"issue16369.go",  // types2 handles this correctly - not an issue
@@ -325,6 +329,7 @@
 		"issue49767.go",  // go/types does not have constraints on channel element size
 		"issue49814.go",  // go/types does not have constraints on array size
 		"issue56103.go",  // anonymous interface cycles; will be a type checker error in 1.22
+		"issue52697.go",  // types2 does not have constraints on stack size
 
 		// These tests requires runtime/cgo.Incomplete, which is only available on some platforms.
 		// However, types2 does not know about build constraints.
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index f13ab69..c9713da 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -9,6 +9,7 @@
 import (
 	"cmd/compile/internal/syntax"
 	"go/constant"
+	"internal/buildcfg"
 	. "internal/types/errors"
 	"sort"
 )
@@ -22,10 +23,6 @@
 		check.trace(body.Pos(), "-- %s: %s", name, sig)
 	}
 
-	// set function scope extent
-	sig.scope.pos = body.Pos()
-	sig.scope.end = syntax.EndPos(body)
-
 	// save/restore current environment and set up function environment
 	// (and use 0 indentation at function start)
 	defer func(env environment, indent int) {
@@ -279,7 +276,7 @@
 // isNil reports whether the expression e denotes the predeclared value nil.
 func (check *Checker) isNil(e syntax.Expr) bool {
 	// The only way to express the nil value is by literally writing nil (possibly in parentheses).
-	if name, _ := unparen(e).(*syntax.Name); name != nil {
+	if name, _ := syntax.Unparen(e).(*syntax.Name); name != nil {
 		_, ok := check.lookup(name.Value).(*Nil)
 		return ok
 	}
@@ -297,7 +294,7 @@
 			check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings
 		} else {
 			T = check.varType(e)
-			if T == Typ[Invalid] {
+			if !isValid(T) {
 				continue L
 			}
 		}
@@ -341,7 +338,7 @@
 // 			hash = "<nil>" // avoid collision with a type named nil
 // 		} else {
 // 			T = check.varType(e)
-// 			if T == Typ[Invalid] {
+// 			if !isValid(T) {
 // 				continue L
 // 			}
 // 			hash = typeHash(T, nil)
@@ -458,12 +455,12 @@
 				check.errorf(s.Lhs, NonNumericIncDec, invalidOp+"%s%s%s (non-numeric type %s)", s.Lhs, s.Op, s.Op, x.typ)
 				return
 			}
-			check.assignVar(s.Lhs, nil, &x)
+			check.assignVar(s.Lhs, nil, &x, "assignment")
 			return
 		}
 
-		lhs := unpackExpr(s.Lhs)
-		rhs := unpackExpr(s.Rhs)
+		lhs := syntax.UnpackListExpr(s.Lhs)
+		rhs := syntax.UnpackListExpr(s.Rhs)
 		switch s.Op {
 		case 0:
 			check.assignVars(lhs, rhs)
@@ -481,7 +478,7 @@
 
 		var x operand
 		check.binary(&x, nil, lhs[0], rhs[0], s.Op)
-		check.assignVar(lhs[0], nil, &x)
+		check.assignVar(lhs[0], nil, &x, "assignment")
 
 	case *syntax.CallStmt:
 		kind := "go"
@@ -494,7 +491,7 @@
 		res := check.sig.results
 		// Return with implicit results allowed for function with named results.
 		// (If one is named, all are named.)
-		results := unpackExpr(s.Results)
+		results := syntax.UnpackListExpr(s.Results)
 		if len(results) == 0 && res.Len() > 0 && res.vars[0].name != "" {
 			// spec: "Implementation restriction: A compiler may disallow an empty expression
 			// list in a "return" statement if a different entity (constant, type, or variable)
@@ -621,7 +618,7 @@
 
 			// if present, rhs must be a receive operation
 			if rhs != nil {
-				if x, _ := unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv {
+				if x, _ := syntax.Unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv {
 					valid = true
 				}
 			}
@@ -718,7 +715,7 @@
 		} else {
 			inner |= finalSwitchCase
 		}
-		check.caseValues(&x, unpackExpr(clause.Cases), seen)
+		check.caseValues(&x, syntax.UnpackListExpr(clause.Cases), seen)
 		check.openScopeUntil(clause, end, "case")
 		check.stmtList(inner, clause.Body)
 		check.closeScope()
@@ -778,7 +775,7 @@
 			end = s.Body[i+1].Pos()
 		}
 		// Check each type in this type switch case.
-		cases := unpackExpr(clause.Cases)
+		cases := syntax.UnpackListExpr(clause.Cases)
 		T := check.caseTypes(sx, cases, seen)
 		check.openScopeUntil(clause, end, "case")
 		// If lhs exists, declare a corresponding variable in the case-local scope.
@@ -828,7 +825,10 @@
 }
 
 func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) {
-	// determine lhs, if any
+	// Convert syntax form to local variables.
+	type Expr = syntax.Expr
+	type identType = syntax.Name
+	identName := func(n *identType) string { return n.Value }
 	sKey := rclause.Lhs // possibly nil
 	var sValue, sExtra syntax.Expr
 	if p, _ := sKey.(*syntax.ListExpr); p != nil {
@@ -844,43 +844,50 @@
 			sExtra = p.ElemList[2]
 		}
 	}
+	isDef := rclause.Def
+	rangeVar := rclause.X
+	noNewVarPos := s
+
+	// Do not use rclause anymore.
+	rclause = nil
+
+	// Everything from here on is shared between cmd/compile/internal/types2 and go/types.
 
 	// check expression to iterate over
 	var x operand
-	check.expr(nil, &x, rclause.X)
+	check.expr(nil, &x, rangeVar)
 
 	// determine key/value types
 	var key, val Type
 	if x.mode != invalid {
 		// Ranging over a type parameter is permitted if it has a core type.
-		var cause string
-		u := coreType(x.typ)
-		if t, _ := u.(*Chan); t != nil {
-			if sValue != nil {
-				check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x)
-				// ok to continue
+		k, v, cause, isFunc, ok := rangeKeyVal(x.typ, func(v goVersion) bool {
+			return check.allowVersion(check.pkg, x.expr, v)
+		})
+		switch {
+		case !ok && cause != "":
+			check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s: %s", &x, cause)
+		case !ok:
+			check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
+		case k == nil && sKey != nil:
+			check.softErrorf(sKey, InvalidIterVar, "range over %s permits no iteration variables", &x)
+		case v == nil && sValue != nil:
+			check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x)
+		case sExtra != nil:
+			check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables")
+		case isFunc && ((k == nil) != (sKey == nil) || (v == nil) != (sValue == nil)):
+			var count string
+			switch {
+			case k == nil:
+				count = "no iteration variables"
+			case v == nil:
+				count = "one iteration variable"
+			default:
+				count = "two iteration variables"
 			}
-			if t.dir == SendOnly {
-				cause = "receive from send-only channel"
-			}
-		} else {
-			if sExtra != nil {
-				check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables")
-				// ok to continue
-			}
-			if u == nil {
-				cause = check.sprintf("%s has no core type", x.typ)
-			}
+			check.softErrorf(&x, InvalidIterVar, "range over %s must have %s", &x, count)
 		}
-		key, val = rangeKeyVal(u)
-		if key == nil || cause != "" {
-			if cause == "" {
-				check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
-			} else {
-				check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s (%s)", &x, cause)
-			}
-			// ok to continue
-		}
+		key, val = k, v
 	}
 
 	// Open the for-statement block scope now, after the range clause.
@@ -892,10 +899,12 @@
 	// (irregular assignment, cannot easily map to existing assignment checks)
 
 	// lhs expressions and initialization value (rhs) types
-	lhs := [2]syntax.Expr{sKey, sValue}
-	rhs := [2]Type{key, val} // key, val may be nil
+	lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil
+	rhs := [2]Type{key, val}     // key, val may be nil
 
-	if rclause.Def {
+	constIntRange := x.mode == constant_ && isInteger(x.typ)
+
+	if isDef {
 		// short variable declaration
 		var vars []*Var
 		for i, lhs := range lhs {
@@ -905,9 +914,9 @@
 
 			// determine lhs variable
 			var obj *Var
-			if ident, _ := lhs.(*syntax.Name); ident != nil {
+			if ident, _ := lhs.(*identType); ident != nil {
 				// declare new variable
-				name := ident.Value
+				name := identName(ident)
 				obj = NewVar(ident.Pos(), check.pkg, name, nil)
 				check.recordDef(ident, obj)
 				// _ variables don't count as new variables
@@ -920,11 +929,13 @@
 			}
 
 			// initialize lhs variable
-			if typ := rhs[i]; typ != nil {
+			if constIntRange {
+				check.initVar(obj, &x, "range clause")
+			} else if typ := rhs[i]; typ != nil {
 				x.mode = value
 				x.expr = lhs // we don't have a better rhs expression to use here
 				x.typ = typ
-				check.initVar(obj, &x, "range clause")
+				check.initVar(obj, &x, "assignment") // error is on variable, use "assignment" not "range clause"
 			} else {
 				obj.typ = Typ[Invalid]
 				obj.used = true // don't complain about unused variable
@@ -938,43 +949,111 @@
 				check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
 			}
 		} else {
-			check.error(s, NoNewVar, "no new variables on left side of :=")
+			check.error(noNewVarPos, NoNewVar, "no new variables on left side of :=")
 		}
-	} else {
+	} else if sKey != nil /* lhs[0] != nil */ {
 		// ordinary assignment
 		for i, lhs := range lhs {
 			if lhs == nil {
 				continue
 			}
-			if typ := rhs[i]; typ != nil {
+
+			if constIntRange {
+				check.assignVar(lhs, nil, &x, "range clause")
+			} else if typ := rhs[i]; typ != nil {
 				x.mode = value
 				x.expr = lhs // we don't have a better rhs expression to use here
 				x.typ = typ
-				check.assignVar(lhs, nil, &x)
+				check.assignVar(lhs, nil, &x, "assignment") // error is on variable, use "assignment" not "range clause"
 			}
 		}
+	} else if constIntRange {
+		// If we don't have any iteration variables, we still need to
+		// check that a (possibly untyped) integer range expression x
+		// is valid.
+		// We do this by checking the assignment _ = x. This ensures
+		// that an untyped x can be converted to a value of type int.
+		check.assignment(&x, nil, "range clause")
 	}
 
 	check.stmt(inner, s.Body)
 }
 
+// RangeKeyVal returns the key and value types for a range over typ.
+// Exported for use by the compiler (does not exist in go/types).
+func RangeKeyVal(typ Type) (Type, Type) {
+	key, val, _, _, _ := rangeKeyVal(typ, nil)
+	return key, val
+}
+
 // rangeKeyVal returns the key and value type produced by a range clause
-// over an expression of type typ. If the range clause is not permitted
-// the results are nil.
-func rangeKeyVal(typ Type) (key, val Type) {
-	switch typ := arrayPtrDeref(typ).(type) {
+// over an expression of type typ.
+// If allowVersion != nil, it is used to check the required language version.
+// If the range clause is not permitted, rangeKeyVal returns ok = false.
+// When ok = false, rangeKeyVal may also return a reason in cause.
+func rangeKeyVal(typ Type, allowVersion func(goVersion) bool) (key, val Type, cause string, isFunc, ok bool) {
+	bad := func(cause string) (Type, Type, string, bool, bool) {
+		return Typ[Invalid], Typ[Invalid], cause, false, false
+	}
+	toSig := func(t Type) *Signature {
+		sig, _ := coreType(t).(*Signature)
+		return sig
+	}
+
+	orig := typ
+	switch typ := arrayPtrDeref(coreType(typ)).(type) {
+	case nil:
+		return bad("no core type")
 	case *Basic:
 		if isString(typ) {
-			return Typ[Int], universeRune // use 'rune' name
+			return Typ[Int], universeRune, "", false, true // use 'rune' name
+		}
+		if isInteger(typ) {
+			if allowVersion != nil && !allowVersion(go1_22) {
+				return bad("requires go1.22 or later")
+			}
+			return orig, nil, "", false, true
 		}
 	case *Array:
-		return Typ[Int], typ.elem
+		return Typ[Int], typ.elem, "", false, true
 	case *Slice:
-		return Typ[Int], typ.elem
+		return Typ[Int], typ.elem, "", false, true
 	case *Map:
-		return typ.key, typ.elem
+		return typ.key, typ.elem, "", false, true
 	case *Chan:
-		return typ.elem, Typ[Invalid]
+		if typ.dir == SendOnly {
+			return bad("receive from send-only channel")
+		}
+		return typ.elem, nil, "", false, true
+	case *Signature:
+		// TODO(gri) when this becomes enabled permanently, add version check
+		if !buildcfg.Experiment.RangeFunc {
+			break
+		}
+		assert(typ.Recv() == nil)
+		switch {
+		case typ.Params().Len() != 1:
+			return bad("func must be func(yield func(...) bool): wrong argument count")
+		case toSig(typ.Params().At(0).Type()) == nil:
+			return bad("func must be func(yield func(...) bool): argument is not func")
+		case typ.Results().Len() != 0:
+			return bad("func must be func(yield func(...) bool): unexpected results")
+		}
+		cb := toSig(typ.Params().At(0).Type())
+		assert(cb.Recv() == nil)
+		switch {
+		case cb.Params().Len() > 2:
+			return bad("func must be func(yield func(...) bool): yield func has too many parameters")
+		case cb.Results().Len() != 1 || !isBoolean(cb.Results().At(0).Type()):
+			return bad("func must be func(yield func(...) bool): yield func does not return bool")
+		}
+		if cb.Params().Len() >= 1 {
+			key = cb.Params().At(0).Type()
+		}
+		if cb.Params().Len() >= 2 {
+			val = cb.Params().At(1).Type()
+		}
+		return key, val, "", true, true
 	}
 	return
 }
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
index 125e946..9e46b34 100644
--- a/src/cmd/compile/internal/types2/struct.go
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -147,7 +147,7 @@
 				t, isPtr := deref(embeddedTyp)
 				switch u := under(t).(type) {
 				case *Basic:
-					if t == Typ[Invalid] {
+					if !isValid(t) {
 						// error was reported before
 						return
 					}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index 74d6294..09dc585 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -169,7 +169,9 @@
 		if mcopied || ecopied {
 			iface := subst.check.newInterface()
 			iface.embeddeds = embeddeds
+			iface.embedPos = t.embedPos
 			iface.implicit = t.implicit
+			assert(t.complete) // otherwise we are copying incomplete data
 			iface.complete = t.complete
 			// If we've changed the interface type, we may need to replace its
 			// receiver if the receiver type is the original interface. Receivers of
@@ -185,6 +187,11 @@
 			// need to create new interface methods to hold the instantiated
 			// receiver. This is handled by Named.expandUnderlying.
 			iface.methods, _ = replaceRecvType(methods, t, iface)
+
+			// If check != nil, check.newInterface will have saved the interface for later completion.
+			if subst.check == nil { // golang/go#61561: all newly created interfaces must be completed
+				iface.typeSet()
+			}
 			return iface
 		}
 
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
index aebbec2..5c6030b 100644
--- a/src/cmd/compile/internal/types2/typeparam.go
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -9,11 +9,11 @@
 // Note: This is a uint32 rather than a uint64 because the
 // respective 64 bit atomic instructions are not available
 // on all platforms.
-var lastID uint32
+var lastID atomic.Uint32
 
 // nextID returns a value increasing monotonically by 1 with
 // each call, starting with 1. It may be called concurrently.
-func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
+func nextID() uint64 { return uint64(lastID.Add(1)) }
 
 // A TypeParam represents a type parameter type.
 type TypeParam struct {
@@ -108,7 +108,7 @@
 	var ityp *Interface
 	switch u := under(bound).(type) {
 	case *Basic:
-		if u == Typ[Invalid] {
+		if !isValid(u) {
 			// error is reported elsewhere
 			return &emptyInterface
 		}
@@ -132,7 +132,7 @@
 		// pos is used for tracing output; start with the type parameter position.
 		pos := t.obj.pos
 		// use the (original or possibly instantiated) type bound position if we have one
-		if n, _ := bound.(*Named); n != nil {
+		if n := asNamed(bound); n != nil {
 			pos = n.obj.pos
 		}
 		computeInterfaceTypeSet(t.check, pos, ityp)
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
index 70b9e36..a6ccfdb 100644
--- a/src/cmd/compile/internal/types2/typeset.go
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -290,7 +290,7 @@
 			assert(len(tset.methods) == 0)
 			terms = tset.terms
 		default:
-			if u == Typ[Invalid] {
+			if !isValid(u) {
 				continue
 			}
 			if check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) {
@@ -304,7 +304,6 @@
 		// separately. Here we only need to intersect the term lists and comparable bits.
 		allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
 	}
-	ityp.embedPos = nil // not needed anymore (errors have been reported)
 
 	ityp.tset.comparable = allComparable
 	if len(allMethods) != 0 {
@@ -389,7 +388,7 @@
 			// For now we don't permit type parameters as constraints.
 			assert(!isTypeParam(t.typ))
 			terms = computeInterfaceTypeSet(check, pos, ui).terms
-		} else if u == Typ[Invalid] {
+		} else if !isValid(u) {
 			continue
 		} else {
 			if t.tilde && !Identical(t.typ, u) {
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 2f4fb52..4b410af 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -218,7 +218,7 @@
 				w.string("any")
 				break
 			}
-			if t == universeComparable.Type().(*Named).underlying {
+			if t == asNamed(universeComparable.Type()).underlying {
 				w.string("interface{comparable}")
 				break
 			}
@@ -322,10 +322,17 @@
 			// error messages. This doesn't need to be super-elegant; we just
 			// need a clear indication that this is not a predeclared name.
 			if w.ctxt == nil && Universe.Lookup(t.obj.name) != nil {
-				w.string(sprintf(nil, false, " /* with %s declared at %s */", t.obj.name, t.obj.Pos()))
+				w.string(fmt.Sprintf(" /* with %s declared at %s */", t.obj.name, t.obj.Pos()))
 			}
 		}
 
+	case *Alias:
+		w.typeName(t.obj)
+		if w.ctxt != nil {
+			// TODO(gri) do we need to print the alias type name, too?
+			w.typ(Unalias(t.obj.typ))
+		}
+
 	default:
 		// For externally defined implementations of Type.
 		// Note: In this case cycles won't be caught.
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index ca717fe..81adcbd 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -18,7 +18,7 @@
 // If an error occurred, x.mode is set to invalid.
 // For the meaning of def, see Checker.definedType, below.
 // If wantType is set, the identifier e is expected to denote a type.
-func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType bool) {
+func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType bool) {
 	x.mode = invalid
 	x.expr = e
 
@@ -78,7 +78,7 @@
 
 	case *Const:
 		check.addDeclDep(obj)
-		if typ == Typ[Invalid] {
+		if !isValid(typ) {
 			return
 		}
 		if obj == universeIota {
@@ -94,7 +94,7 @@
 		x.mode = constant_
 
 	case *TypeName:
-		if check.isBrokenAlias(obj) {
+		if !check.enableAlias && check.isBrokenAlias(obj) {
 			check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", obj.name)
 			return
 		}
@@ -108,7 +108,7 @@
 			obj.used = true
 		}
 		check.addDeclDep(obj)
-		if typ == Typ[Invalid] {
+		if !isValid(typ) {
 			return
 		}
 		x.mode = variable
@@ -173,10 +173,10 @@
 }
 
 // definedType is like typ but also accepts a type name def.
-// If def != nil, e is the type specification for the defined type def, declared
-// in a type declaration, and def.underlying will be set to the type of e before
-// any components of e are type-checked.
-func (check *Checker) definedType(e syntax.Expr, def *Named) Type {
+// If def != nil, e is the type specification for the type named def, declared
+// in a type declaration, and def.typ.underlying will be set to the type of e
+// before any components of e are type-checked.
+func (check *Checker) definedType(e syntax.Expr, def *TypeName) Type {
 	typ := check.typInternal(e, def)
 	assert(isTyped(typ))
 	if isGeneric(typ) {
@@ -193,7 +193,7 @@
 func (check *Checker) genericType(e syntax.Expr, cause *string) Type {
 	typ := check.typInternal(e, nil)
 	assert(isTyped(typ))
-	if typ != Typ[Invalid] && !isGeneric(typ) {
+	if isValid(typ) && !isGeneric(typ) {
 		if cause != nil {
 			*cause = check.sprintf("%s is not a generic type", typ)
 		}
@@ -207,12 +207,12 @@
 // goTypeName returns the Go type name for typ and
 // removes any occurrences of "types2." from that name.
 func goTypeName(typ Type) string {
-	return strings.Replace(fmt.Sprintf("%T", typ), "types2.", "", -1) // strings.ReplaceAll is not available in Go 1.4
+	return strings.ReplaceAll(fmt.Sprintf("%T", typ), "types2.", "")
 }
 
 // typInternal drives type checking of types.
 // Must only be called by definedType or genericType.
-func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
+func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) {
 	if check.conf.Trace {
 		check.trace(e0.Pos(), "-- type %s", e0)
 		check.indent++
@@ -243,7 +243,7 @@
 		switch x.mode {
 		case typexpr:
 			typ := x.typ
-			def.setUnderlying(typ)
+			setDefType(def, typ)
 			return typ
 		case invalid:
 			// ignore - error reported before
@@ -260,7 +260,7 @@
 		switch x.mode {
 		case typexpr:
 			typ := x.typ
-			def.setUnderlying(typ)
+			setDefType(def, typ)
 			return typ
 		case invalid:
 			// ignore - error reported before
@@ -272,7 +272,7 @@
 
 	case *syntax.IndexExpr:
 		check.verifyVersionf(e, go1_18, "type instantiation")
-		return check.instantiatedType(e.X, unpackExpr(e.Index), def)
+		return check.instantiatedType(e.X, syntax.UnpackListExpr(e.Index), def)
 
 	case *syntax.ParenExpr:
 		// Generic types must be instantiated before they can be used in any form.
@@ -281,7 +281,7 @@
 
 	case *syntax.ArrayType:
 		typ := new(Array)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		if e.Len != nil {
 			typ.len = check.arrayLength(e.Len)
 		} else {
@@ -297,7 +297,7 @@
 
 	case *syntax.SliceType:
 		typ := new(Slice)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		typ.elem = check.varType(e.Elem)
 		return typ
 
@@ -309,7 +309,7 @@
 
 	case *syntax.StructType:
 		typ := new(Struct)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		check.structType(typ, e)
 		return typ
 
@@ -317,13 +317,13 @@
 		if e.Op == syntax.Mul && e.Y == nil {
 			typ := new(Pointer)
 			typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration
-			def.setUnderlying(typ)
+			setDefType(def, typ)
 			typ.base = check.varType(e.X)
 			// If typ.base is invalid, it's unlikely that *base is particularly
 			// useful - even a valid dereferenciation will lead to an invalid
 			// type again, and in some cases we get unexpected follow-on errors
 			// (e.g., go.dev/issue/49005). Return an invalid type instead.
-			if typ.base == Typ[Invalid] {
+			if !isValid(typ.base) {
 				return Typ[Invalid]
 			}
 			return typ
@@ -334,19 +334,19 @@
 
 	case *syntax.FuncType:
 		typ := new(Signature)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		check.funcType(typ, nil, nil, e)
 		return typ
 
 	case *syntax.InterfaceType:
 		typ := check.newInterface()
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		check.interfaceType(typ, e, def)
 		return typ
 
 	case *syntax.MapType:
 		typ := new(Map)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 
 		typ.key = check.varType(e.Key)
 		typ.elem = check.varType(e.Value)
@@ -371,7 +371,7 @@
 
 	case *syntax.ChanType:
 		typ := new(Chan)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 
 		dir := SendRecv
 		switch e.Dir {
@@ -396,11 +396,31 @@
 	}
 
 	typ := Typ[Invalid]
-	def.setUnderlying(typ)
+	setDefType(def, typ)
 	return typ
 }
 
-func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *Named) (res Type) {
+func setDefType(def *TypeName, typ Type) {
+	if def != nil {
+		switch t := def.typ.(type) {
+		case *Alias:
+			// t.fromRHS should always be set, either to an invalid type
+			// in the beginning, or to typ in certain cyclic declarations.
+			if t.fromRHS != Typ[Invalid] && t.fromRHS != typ {
+				panic(sprintf(nil, true, "t.fromRHS = %s, typ = %s\n", t.fromRHS, typ))
+			}
+			t.fromRHS = typ
+		case *Basic:
+			assert(t == Typ[Invalid])
+		case *Named:
+			t.underlying = typ
+		default:
+			panic(fmt.Sprintf("unexpected type %T", t))
+		}
+	}
+}
+
+func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *TypeName) (res Type) {
 	if check.conf.Trace {
 		check.trace(x.Pos(), "-- instantiating type %s with %s", x, xlist)
 		check.indent++
@@ -416,11 +436,11 @@
 	if cause != "" {
 		check.errorf(x, NotAGenericType, invalidOp+"%s%s (%s)", x, xlist, cause)
 	}
-	if gtyp == Typ[Invalid] {
+	if !isValid(gtyp) {
 		return gtyp // error already reported
 	}
 
-	orig, _ := gtyp.(*Named)
+	orig := asNamed(gtyp)
 	if orig == nil {
 		panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
 	}
@@ -428,13 +448,13 @@
 	// evaluate arguments
 	targs := check.typeList(xlist)
 	if targs == nil {
-		def.setUnderlying(Typ[Invalid]) // avoid errors later due to lazy instantiation
+		setDefType(def, Typ[Invalid]) // avoid errors later due to lazy instantiation
 		return Typ[Invalid]
 	}
 
 	// create the instance
-	inst := check.instance(x.Pos(), orig, targs, nil, check.context()).(*Named)
-	def.setUnderlying(inst)
+	inst := asNamed(check.instance(x.Pos(), orig, targs, nil, check.context()))
+	setDefType(def, inst)
 
 	// orig.tparams may not be set up, so we need to do expansion later.
 	check.later(func() {
@@ -443,7 +463,7 @@
 		// errors.
 		check.recordInstance(x, inst.TypeArgs().list(), inst)
 
-		if check.validateTArgLen(x.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) {
+		if check.validateTArgLen(x.Pos(), inst.obj.name, inst.TypeParams().Len(), inst.TypeArgs().Len()) {
 			if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
 				// best position for error reporting
 				pos := x.Pos()
@@ -520,7 +540,7 @@
 	res := make([]Type, len(list)) // res != nil even if len(list) == 0
 	for i, x := range list {
 		t := check.varType(x)
-		if t == Typ[Invalid] {
+		if !isValid(t) {
 			res = nil
 		}
 		if res != nil {
diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go
index 887f781..6b24399 100644
--- a/src/cmd/compile/internal/types2/under.go
+++ b/src/cmd/compile/internal/types2/under.go
@@ -9,7 +9,7 @@
 // under must only be called when a type is known
 // to be fully set up.
 func under(t Type) Type {
-	if t, _ := t.(*Named); t != nil {
+	if t := asNamed(t); t != nil {
 		return t.under()
 	}
 	return t.Underlying()
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index 4e9c771..8218939 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -291,6 +291,9 @@
 		u.depth--
 	}()
 
+	x = Unalias(x)
+	y = Unalias(y)
+
 	// nothing to do if x == y
 	if x == y {
 		return true
@@ -311,7 +314,7 @@
 	// Ensure that if we have at least one
 	// - defined type, make sure one is in y
 	// - type parameter recorded with u, make sure one is in x
-	if _, ok := x.(*Named); ok || u.asTypeParam(y) != nil {
+	if asNamed(x) != nil || u.asTypeParam(y) != nil {
 		if traceInference {
 			u.tracef("%s ≡ %s\t// swap", y, x)
 		}
@@ -335,7 +338,7 @@
 	// we will fail at function instantiation or argument assignment time.
 	//
 	// If we have at least one defined type, there is one in y.
-	if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
+	if ny := asNamed(y); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
 		if traceInference {
 			u.tracef("%s ≡ under %s", x, ny)
 		}
@@ -372,8 +375,8 @@
 				// We have a match, possibly through underlying types.
 				xi := asInterface(x)
 				yi := asInterface(y)
-				_, xn := x.(*Named)
-				_, yn := y.(*Named)
+				xn := asNamed(x) != nil
+				yn := asNamed(y) != nil
 				// If we have two interfaces, what to do depends on
 				// whether they are named and their method sets.
 				if xi != nil && yi != nil {
@@ -448,13 +451,6 @@
 	// x != y if we get here
 	assert(x != y)
 
-	// Type elements (array, slice, etc. elements) use emode for unification.
-	// Element types must match exactly if the types are used in an assignment.
-	emode := mode
-	if mode&assign != 0 {
-		emode |= exact
-	}
-
 	// If u.EnableInterfaceInference is set and we don't require exact unification,
 	// if both types are interfaces, one interface must have a subset of the
 	// methods of the other and corresponding method signatures must unify.
@@ -570,6 +566,13 @@
 		x, y = y, x
 	}
 
+	// Type elements (array, slice, etc. elements) use emode for unification.
+	// Element types must match exactly if the types are used in an assignment.
+	emode := mode
+	if mode&assign != 0 {
+		emode |= exact
+	}
+
 	switch x := x.(type) {
 	case *Basic:
 		// Basic types are singletons except for the rune and byte
@@ -728,7 +731,7 @@
 	case *Named:
 		// Two named types unify if their type names originate in the same type declaration.
 		// If they are instantiated, their type argument lists must unify.
-		if y, ok := y.(*Named); ok {
+		if y := asNamed(y); y != nil {
 			// Check type arguments before origins so they unify
 			// even if the origins don't match; for better error
 			// messages (see go.dev/issue/53692).
@@ -742,7 +745,7 @@
 					return false
 				}
 			}
-			return indenticalOrigin(x, y)
+			return identicalOrigin(x, y)
 		}
 
 	case *TypeParam:
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
index 8f354a7..1bf4353 100644
--- a/src/cmd/compile/internal/types2/union.go
+++ b/src/cmd/compile/internal/types2/union.go
@@ -66,7 +66,7 @@
 			return term.typ // typ already recorded through check.typ in parseTilde
 		}
 		if len(terms) >= maxTermCount {
-			if u != Typ[Invalid] {
+			if isValid(u) {
 				check.errorf(x, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
 				u = Typ[Invalid]
 			}
@@ -80,7 +80,7 @@
 		}
 	}
 
-	if u == Typ[Invalid] {
+	if !isValid(u) {
 		return u
 	}
 
@@ -89,7 +89,7 @@
 	// Note: This is a quadratic algorithm, but unions tend to be short.
 	check.later(func() {
 		for i, t := range terms {
-			if t.typ == Typ[Invalid] {
+			if !isValid(t.typ) {
 				continue
 			}
 
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index 79cd8cb..c8be81b 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -265,7 +265,7 @@
 		return // nothing to do
 	}
 	// fix Obj link for named types
-	if typ, _ := obj.Type().(*Named); typ != nil {
+	if typ := asNamed(obj.Type()); typ != nil {
 		typ.obj = obj.(*TypeName)
 	}
 	// exported identifiers go into package unsafe
diff --git a/src/cmd/compile/internal/types2/util_test.go b/src/cmd/compile/internal/types2/util_test.go
index 4cbd002..70058aa 100644
--- a/src/cmd/compile/internal/types2/util_test.go
+++ b/src/cmd/compile/internal/types2/util_test.go
@@ -7,6 +7,11 @@
 
 package types2
 
-import "cmd/compile/internal/syntax"
+import (
+	"cmd/compile/internal/syntax"
+)
 
 func CmpPos(p, q syntax.Pos) int { return cmpPos(p, q) }
+
+func ScopeComment(s *Scope) string         { return s.comment }
+func ObjectScopePos(obj Object) syntax.Pos { return obj.scopePos() }
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
index dbe91dc..a880a3d 100644
--- a/src/cmd/compile/internal/types2/validtype.go
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -23,7 +23,7 @@
 // (say S->F->S) we have an invalid recursive type. The path list is the full
 // path of named types in a cycle, it is only needed for error reporting.
 func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
-	switch t := typ.(type) {
+	switch t := Unalias(typ).(type) {
 	case nil:
 		// We should never see a nil type but be conservative and panic
 		// only in debug mode.
@@ -68,7 +68,7 @@
 		// Don't report a 2nd error if we already know the type is invalid
 		// (e.g., if a cycle was detected earlier, via under).
 		// Note: ensure that t.orig is fully resolved by calling Underlying().
-		if t.Underlying() == Typ[Invalid] {
+		if !isValid(t.Underlying()) {
 			return false
 		}
 
diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go
index e525f16..5aa3c80 100644
--- a/src/cmd/compile/internal/types2/version.go
+++ b/src/cmd/compile/internal/types2/version.go
@@ -7,90 +7,47 @@
 import (
 	"cmd/compile/internal/syntax"
 	"fmt"
+	"go/version"
+	"internal/goversion"
 	"strings"
 )
 
-// A version represents a released Go version.
-type version struct {
-	major, minor int
+// A goVersion is a Go language version string of the form "go1.%d"
+// where d is the minor version number. goVersion strings don't
+// contain release numbers ("go1.20.1" is not a valid goVersion).
+type goVersion string
+
+// asGoVersion returns v as a goVersion (e.g., "go1.20.1" becomes "go1.20").
+// If v is not a valid Go version, the result is the empty string.
+func asGoVersion(v string) goVersion {
+	return goVersion(version.Lang(v))
 }
 
-func (v version) String() string {
-	return fmt.Sprintf("go%d.%d", v.major, v.minor)
+// isValid reports whether v is a valid Go version.
+func (v goVersion) isValid() bool {
+	return v != ""
 }
 
-func (v version) equal(u version) bool {
-	return v.major == u.major && v.minor == u.minor
+// cmp returns -1, 0, or +1 depending on whether x < y, x == y, or x > y,
+// interpreted as Go versions.
+func (x goVersion) cmp(y goVersion) int {
+	return version.Compare(string(x), string(y))
 }
 
-func (v version) before(u version) bool {
-	return v.major < u.major || v.major == u.major && v.minor < u.minor
-}
-
-func (v version) after(u version) bool {
-	return v.major > u.major || v.major == u.major && v.minor > u.minor
-}
-
-// Go versions that introduced language changes.
 var (
-	go0_0  = version{0, 0} // no version specified
-	go1_9  = version{1, 9}
-	go1_13 = version{1, 13}
-	go1_14 = version{1, 14}
-	go1_17 = version{1, 17}
-	go1_18 = version{1, 18}
-	go1_20 = version{1, 20}
-	go1_21 = version{1, 21}
-)
+	// Go versions that introduced language changes
+	go1_9  = asGoVersion("go1.9")
+	go1_13 = asGoVersion("go1.13")
+	go1_14 = asGoVersion("go1.14")
+	go1_17 = asGoVersion("go1.17")
+	go1_18 = asGoVersion("go1.18")
+	go1_20 = asGoVersion("go1.20")
+	go1_21 = asGoVersion("go1.21")
+	go1_22 = asGoVersion("go1.22")
 
-// parseGoVersion parses a Go version string (such as "go1.12")
-// and returns the version, or an error. If s is the empty
-// string, the version is 0.0.
-func parseGoVersion(s string) (v version, err error) {
-	bad := func() (version, error) {
-		return version{}, fmt.Errorf("invalid Go version syntax %q", s)
-	}
-	if s == "" {
-		return
-	}
-	if !strings.HasPrefix(s, "go") {
-		return bad()
-	}
-	s = s[len("go"):]
-	i := 0
-	for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
-		if i >= 10 || i == 0 && s[i] == '0' {
-			return bad()
-		}
-		v.major = 10*v.major + int(s[i]) - '0'
-	}
-	if i > 0 && i == len(s) {
-		return
-	}
-	if i == 0 || s[i] != '.' {
-		return bad()
-	}
-	s = s[i+1:]
-	if s == "0" {
-		// We really should not accept "go1.0",
-		// but we didn't reject it from the start
-		// and there are now programs that use it.
-		// So accept it.
-		return
-	}
-	i = 0
-	for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
-		if i >= 10 || i == 0 && s[i] == '0' {
-			return bad()
-		}
-		v.minor = 10*v.minor + int(s[i]) - '0'
-	}
-	// Accept any suffix after the minor number.
-	// We are only looking for the language version (major.minor)
-	// but want to accept any valid Go version, like go1.21.0
-	// and go1.21rc2.
-	return
-}
+	// current (deployed) Go version
+	go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version))
+)
 
 // langCompat reports an error if the representation of a numeric
 // literal is not compatible with the current language version.
@@ -121,30 +78,30 @@
 	}
 }
 
-// allowVersion reports whether the given package
-// is allowed to use version major.minor.
-func (check *Checker) allowVersion(pkg *Package, at poser, v version) bool {
+// allowVersion reports whether the given package is allowed to use version v.
+func (check *Checker) allowVersion(pkg *Package, at poser, v goVersion) bool {
 	// We assume that imported packages have all been checked,
 	// so we only have to check for the local package.
 	if pkg != check.pkg {
 		return true
 	}
 
-	// If the source file declares its Go version, use that to decide.
-	if check.posVers != nil {
-		if src, ok := check.posVers[base(at.Pos())]; ok && src.major >= 1 {
-			return !src.before(v)
-		}
+	// If no explicit file version is specified,
+	// fileVersion corresponds to the module version.
+	var fileVersion goVersion
+	if pos := at.Pos(); pos.IsKnown() {
+		// We need version.Lang below because file versions
+		// can be (unaltered) Config.GoVersion strings that
+		// may contain dot-release information.
+		fileVersion = asGoVersion(check.versions[base(pos)])
 	}
-
-	// Otherwise fall back to the version in the checker.
-	return check.version.equal(go0_0) || !check.version.before(v)
+	return !fileVersion.isValid() || fileVersion.cmp(v) >= 0
 }
 
 // verifyVersionf is like allowVersion but also accepts a format string and arguments
 // which are used to report a version error if allowVersion returns false. It uses the
 // current package.
-func (check *Checker) verifyVersionf(at poser, v version, format string, args ...interface{}) bool {
+func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args ...interface{}) bool {
 	if !check.allowVersion(check.pkg, at, v) {
 		check.versionErrorf(at, v, format, args...)
 		return false
@@ -154,7 +111,9 @@
 
 // base finds the underlying PosBase of the source file containing pos,
 // skipping over intermediate PosBase layers created by //line directives.
+// The positions must be known.
 func base(pos syntax.Pos) *syntax.PosBase {
+	assert(pos.IsKnown())
 	b := pos.Base()
 	for {
 		bb := b.Pos().Base()
diff --git a/src/cmd/compile/internal/types2/version_test.go b/src/cmd/compile/internal/types2/version_test.go
deleted file mode 100644
index 651758e..0000000
--- a/src/cmd/compile/internal/types2/version_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types2
-
-import "testing"
-
-var parseGoVersionTests = []struct {
-	in  string
-	out version
-}{
-	{"go1.21", version{1, 21}},
-	{"go1.21.0", version{1, 21}},
-	{"go1.21rc2", version{1, 21}},
-}
-
-func TestParseGoVersion(t *testing.T) {
-	for _, tt := range parseGoVersionTests {
-		if out, err := parseGoVersion(tt.in); out != tt.out || err != nil {
-			t.Errorf("parseGoVersion(%q) = %v, %v, want %v, nil", tt.in, out, err, tt.out)
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
index 4207b48..fc3b858 100644
--- a/src/cmd/compile/internal/walk/assign.go
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -6,6 +6,7 @@
 
 import (
 	"go/constant"
+	"internal/abi"
 
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
@@ -103,7 +104,7 @@
 			// Left in place for back end.
 			// Do not add a new write barrier.
 			// Set up address of type for back end.
-			r.X = reflectdata.AppendElemRType(base.Pos, r)
+			r.Fun = reflectdata.AppendElemRType(base.Pos, r)
 			return as
 		}
 		// Otherwise, lowered for race detector.
@@ -168,13 +169,13 @@
 	a := n.Lhs[0]
 
 	var call *ir.CallExpr
-	if w := t.Elem().Size(); w <= zeroValSize {
+	if w := t.Elem().Size(); w <= abi.ZeroValSize {
 		fn := mapfn(mapaccess2[fast], t, false)
-		call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
+		call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
 	} else {
 		fn := mapfn("mapaccess2_fat", t, true)
 		z := reflectdata.ZeroAddr(w)
-		call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
+		call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
 	}
 
 	// mapaccess2* returns a typed bool, but due to spec changes,
@@ -191,7 +192,7 @@
 		return walkExpr(typecheck.Stmt(n), init)
 	}
 
-	var_ := typecheck.Temp(types.NewPtr(t.Elem()))
+	var_ := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t.Elem()))
 	var_.SetTypecheck(1)
 	var_.MarkNonNil() // mapaccess always returns a non-nil pointer
 
@@ -230,7 +231,7 @@
 		return n
 	}
 
-	results := fn.Type().Results().FieldSlice()
+	results := fn.Type().Results()
 	dsts := make([]ir.Node, len(results))
 	for i, v := range results {
 		// TODO(mdempsky): typecheck should have already checked the result variables.
@@ -392,7 +393,7 @@
 			continue
 		}
 
-		if sym := types.OrigSym(name.Sym()); sym == nil || sym.IsBlank() {
+		if ir.IsBlank(name) {
 			// We can ignore assignments to blank or anonymous result parameters.
 			// These can't appear in expressions anyway.
 			continue
@@ -432,7 +433,6 @@
 		ir.OBITNOT,
 		ir.OCONV,
 		ir.OCONVIFACE,
-		ir.OCONVIDATA,
 		ir.OCONVNOP,
 		ir.ODIV,
 		ir.ODOT,
@@ -484,7 +484,7 @@
 	var nodes ir.Nodes
 
 	// var s []T
-	s := typecheck.Temp(l1.Type())
+	s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
 	nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
 
 	elemtype := s.Type().Elem()
@@ -498,7 +498,7 @@
 	num := ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)
 
 	// newLen := oldLen + num
-	newLen := typecheck.Temp(types.Types[types.TINT])
+	newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 	nodes.Append(ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, oldLen, num)))
 
 	// if uint(newLen) <= uint(oldCap)
@@ -513,12 +513,8 @@
 	slice.SetBounded(true)
 	nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, slice)}
 
-	// func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) []T
-	fn := typecheck.LookupRuntime("growslice")
-	fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
-
 	// else { s = growslice(oldPtr, newLen, oldCap, num, T) }
-	call := mkcall1(fn, s.Type(), nif.PtrInit(), oldPtr, newLen, oldCap, num, reflectdata.TypePtr(elemtype))
+	call := walkGrowslice(s, nif.PtrInit(), oldPtr, newLen, oldCap, num)
 	nif.Else = []ir.Node{ir.NewAssignStmt(base.Pos, s, call)}
 
 	nodes.Append(nif)
@@ -541,8 +537,7 @@
 		ir.CurFunc.SetWBPos(n.Pos())
 
 		// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
-		fn := typecheck.LookupRuntime("typedslicecopy")
-		fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+		fn := typecheck.LookupRuntime("typedslicecopy", l1.Type().Elem(), l2.Type().Elem())
 		ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
 		ptr2, len2 := backingArrayPtrLen(l2)
 		ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.AppendElemRType(base.Pos, n), ptr1, len1, ptr2, len2)
@@ -557,8 +552,7 @@
 		ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
 		ptr2, len2 := backingArrayPtrLen(l2)
 
-		fn := typecheck.LookupRuntime("slicecopy")
-		fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+		fn := typecheck.LookupRuntime("slicecopy", ptr1.Type().Elem(), ptr2.Type().Elem())
 		ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(base.Pos, elemtype.Size()))
 	} else {
 		// memmove(&s[idx], &l2[0], len(l2)*sizeof(T))
@@ -572,8 +566,7 @@
 		nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, elemtype.Size()))
 
 		// instantiate func memmove(to *any, frm *any, length uintptr)
-		fn := typecheck.LookupRuntime("memmove")
-		fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+		fn := typecheck.LookupRuntime("memmove", elemtype, elemtype)
 		ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
 	}
 	ln := append(nodes, ncopy)
@@ -675,13 +668,13 @@
 	nodes = append(nodes, nifneg)
 
 	// s := l1
-	s := typecheck.Temp(l1.Type())
+	s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
 	nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
 
 	elemtype := s.Type().Elem()
 
 	// n := s.len + l2
-	nn := typecheck.Temp(types.Types[types.TINT])
+	nn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 	nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
 
 	// if uint(n) <= uint(s.cap)
@@ -695,18 +688,13 @@
 	nt.SetBounded(true)
 	nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, nt)}
 
-	// instantiate growslice(oldPtr *any, newLen, oldCap, num int, typ *type) []any
-	fn := typecheck.LookupRuntime("growslice")
-	fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
-
 	// else { s = growslice(s.ptr, n, s.cap, l2, T) }
 	nif.Else = []ir.Node{
-		ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(),
+		ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
 			ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
 			nn,
 			ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
-			l2,
-			reflectdata.TypePtr(elemtype))),
+			l2)),
 	}
 
 	nodes = append(nodes, nif)
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 0bb5018..37143ba 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -79,14 +79,14 @@
 	var l []ir.Node
 
 	// s = slice to append to
-	s := typecheck.Temp(nsrc.Type())
+	s := typecheck.TempAt(base.Pos, ir.CurFunc, nsrc.Type())
 	l = append(l, ir.NewAssignStmt(base.Pos, s, nsrc))
 
 	// num = number of things to append
 	num := ir.NewInt(base.Pos, int64(argc))
 
 	// newLen := s.len + num
-	newLen := typecheck.Temp(types.Types[types.TINT])
+	newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 	l = append(l, ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), num)))
 
 	// if uint(newLen) <= uint(s.cap)
@@ -101,17 +101,13 @@
 		ir.NewAssignStmt(base.Pos, s, slice),
 	}
 
-	fn := typecheck.LookupRuntime("growslice") //   growslice(ptr *T, newLen, oldCap, num int, <type>) (ret []T)
-	fn = typecheck.SubstArgTypes(fn, s.Type().Elem(), s.Type().Elem())
-
 	// else { s = growslice(s.ptr, n, s.cap, a, T) }
 	nif.Else = []ir.Node{
-		ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(),
+		ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
 			ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
 			newLen,
 			ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
-			num,
-			reflectdata.TypePtr(s.Type().Elem()))),
+			num)),
 	}
 
 	l = append(l, nif)
@@ -130,6 +126,14 @@
 	return s
 }
 
+// growslice(ptr *T, newLen, oldCap, num int, <type>) (ret []T)
+func walkGrowslice(slice *ir.Name, init *ir.Nodes, oldPtr, newLen, oldCap, num ir.Node) *ir.CallExpr {
+	elemtype := slice.Type().Elem()
+	fn := typecheck.LookupRuntime("growslice", elemtype, elemtype)
+	elemtypeptr := reflectdata.TypePtrAt(base.Pos, elemtype)
+	return mkcall1(fn, slice.Type(), init, oldPtr, newLen, oldCap, num, elemtypeptr)
+}
+
 // walkClear walks an OCLEAR node.
 func walkClear(n *ir.UnaryExpr) ir.Node {
 	typ := n.X.Type()
@@ -149,8 +153,7 @@
 // walkClose walks an OCLOSE node.
 func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
 	// cannot use chanfn - closechan takes any, not chan any
-	fn := typecheck.LookupRuntime("closechan")
-	fn = typecheck.SubstArgTypes(fn, n.X.Type())
+	fn := typecheck.LookupRuntime("closechan", n.X.Type())
 	return mkcall1(fn, nil, init, n.X)
 }
 
@@ -185,16 +188,15 @@
 		n.Y = cheapExpr(n.Y, init)
 		ptrR, lenR := backingArrayPtrLen(n.Y)
 
-		fn := typecheck.LookupRuntime("slicecopy")
-		fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
+		fn := typecheck.LookupRuntime("slicecopy", ptrL.Type().Elem(), ptrR.Type().Elem())
 
 		return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(base.Pos, n.X.Type().Elem().Size()))
 	}
 
 	n.X = walkExpr(n.X, init)
 	n.Y = walkExpr(n.Y, init)
-	nl := typecheck.Temp(n.X.Type())
-	nr := typecheck.Temp(n.Y.Type())
+	nl := typecheck.TempAt(base.Pos, ir.CurFunc, n.X.Type())
+	nr := typecheck.TempAt(base.Pos, ir.CurFunc, n.Y.Type())
 	var l []ir.Node
 	l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
 	l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
@@ -202,7 +204,7 @@
 	nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
 	nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
 
-	nlen := typecheck.Temp(types.Types[types.TINT])
+	nlen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 
 	// n = len(to)
 	l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
@@ -219,9 +221,8 @@
 	ne.Likely = true
 	l = append(l, ne)
 
-	fn := typecheck.LookupRuntime("memmove")
-	fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
-	nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
+	fn := typecheck.LookupRuntime("memmove", nl.Type().Elem(), nl.Type().Elem())
+	nwid := ir.Node(typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR]))
 	setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
 	ne.Body.Append(setwid)
 	nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, nl.Type().Elem().Size()))
@@ -273,7 +274,7 @@
 	}
 	if t.IsArray() {
 		safeExpr(n.X, init)
-		con := typecheck.OrigInt(n, t.NumElem())
+		con := ir.NewConstExpr(constant.MakeInt64(t.NumElem()), n)
 		con.SetTypecheck(1)
 		return con
 	}
@@ -302,7 +303,7 @@
 // walkMakeMap walks an OMAKEMAP node.
 func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
 	t := n.Type()
-	hmapType := reflectdata.MapType(t)
+	hmapType := reflectdata.MapType()
 	hint := n.Len
 
 	// var h *hmap
@@ -340,7 +341,7 @@
 
 			// h.buckets = b
 			bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
-			na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
+			na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
 			nif.Body.Append(na)
 			appendWalkStmt(init, nif)
 		}
@@ -357,16 +358,15 @@
 		if n.Esc() == ir.EscNone {
 			// Only need to initialize h.hash0 since
 			// hmap h has been allocated on the stack already.
-			// h.hash0 = fastrand()
-			rand := mkcall("fastrand", types.Types[types.TUINT32], init)
+			// h.hash0 = rand32()
+			rand := mkcall("rand32", types.Types[types.TUINT32], init)
 			hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
 			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
 			return typecheck.ConvNop(h, t)
 		}
 		// Call runtime.makehmap to allocate an
 		// hmap on the heap and initialize hmap's hash0 field.
-		fn := typecheck.LookupRuntime("makemap_small")
-		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+		fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
 		return mkcall1(fn, n.Type(), init)
 	}
 
@@ -392,8 +392,7 @@
 		argtype = types.Types[types.TINT]
 	}
 
-	fn := typecheck.LookupRuntime(fnname)
-	fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
+	fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
 	return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
 }
 
@@ -434,7 +433,7 @@
 		init.Append(typecheck.Stmt(nif))
 
 		t = types.NewArray(t.Elem(), i) // [r]T
-		var_ := typecheck.Temp(t)
+		var_ := typecheck.TempAt(base.Pos, ir.CurFunc, t)
 		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))  // zero temp
 		r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
 		// The conv is necessary in case n.Type is named.
@@ -497,14 +496,13 @@
 		ptr.MarkNonNil()
 		sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
 
-		s := typecheck.Temp(t)
+		s := typecheck.TempAt(base.Pos, ir.CurFunc, t)
 		r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
 		r = walkExpr(r, init)
 		init.Append(r)
 
 		// instantiate memmove(to *any, frm *any, size uintptr)
-		fn = typecheck.LookupRuntime("memmove")
-		fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+		fn = typecheck.LookupRuntime("memmove", t.Elem(), t.Elem())
 		ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
 		init.Append(walkExpr(typecheck.Stmt(ncopy), init))
 
@@ -548,7 +546,7 @@
 	walkExprListCheap(nn.Args, init)
 
 	// For println, add " " between elements and "\n" at the end.
-	if nn.Op() == ir.OPRINTN {
+	if nn.Op() == ir.OPRINTLN {
 		s := nn.Args
 		t := make([]ir.Node, 0, len(s)*2)
 		for i, n := range s {
@@ -609,11 +607,10 @@
 		switch n.Type().Kind() {
 		case types.TINTER:
 			if n.Type().IsEmptyInterface() {
-				on = typecheck.LookupRuntime("printeface")
+				on = typecheck.LookupRuntime("printeface", n.Type())
 			} else {
-				on = typecheck.LookupRuntime("printiface")
+				on = typecheck.LookupRuntime("printiface", n.Type())
 			}
-			on = typecheck.SubstArgTypes(on, n.Type()) // any-1
 		case types.TPTR:
 			if n.Type().Elem().NotInHeap() {
 				on = typecheck.LookupRuntime("printuintptr")
@@ -625,13 +622,11 @@
 			}
 			fallthrough
 		case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
-			on = typecheck.LookupRuntime("printpointer")
-			on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+			on = typecheck.LookupRuntime("printpointer", n.Type())
 		case types.TSLICE:
-			on = typecheck.LookupRuntime("printslice")
-			on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+			on = typecheck.LookupRuntime("printslice", n.Type())
 		case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
-			if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
+			if types.RuntimeSymName(n.Type().Sym()) == "hex" {
 				on = typecheck.LookupRuntime("printhex")
 			} else {
 				on = typecheck.LookupRuntime("printuint")
@@ -663,7 +658,7 @@
 		}
 
 		r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
-		if params := on.Type().Params().FieldSlice(); len(params) > 0 {
+		if params := on.Type().Params(); len(params) > 0 {
 			t := params[0].Type
 			n = typecheck.Conv(n, t)
 			r.Args.Append(n)
@@ -753,11 +748,23 @@
 			return walkExpr(typecheck.Expr(h), init)
 		}
 
-		// mem, overflow := runtime.mulUintptr(et.size, len)
-		mem := typecheck.Temp(types.Types[types.TUINTPTR])
-		overflow := typecheck.Temp(types.Types[types.TBOOL])
-		fn := typecheck.LookupRuntime("mulUintptr")
-		call := mkcall1(fn, fn.Type().Results(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
+		// mem, overflow := math.mulUintptr(et.size, len)
+		mem := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
+		overflow := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+
+		decl := types.NewSignature(nil,
+			[]*types.Field{
+				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+			},
+			[]*types.Field{
+				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+				types.NewField(base.Pos, nil, types.Types[types.TBOOL]),
+			})
+
+		fn := ir.NewFunc(n.Pos(), n.Pos(), math_MulUintptr, decl)
+
+		call := mkcall1(fn.Nname, fn.Type().ResultsTuple(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
 		appendWalkStmt(init, ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{mem, overflow}, []ir.Node{call}))
 
 		// if overflow || mem > -uintptr(ptr) {
@@ -783,6 +790,8 @@
 	return walkExpr(typecheck.Expr(h), init)
 }
 
+var math_MulUintptr = &types.Sym{Pkg: types.NewPkg("runtime/internal/math", "math"), Name: "MulUintptr"}
+
 func walkUnsafeString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
 	ptr := safeExpr(n.X, init)
 	len := safeExpr(n.Y, init)
@@ -863,9 +872,7 @@
 }
 
 func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
-	fn := typecheck.LookupRuntime(name)
-	fn = typecheck.SubstArgTypes(fn, l, r)
-	return fn
+	return typecheck.LookupRuntime(name, l, r)
 }
 
 // isRuneCount reports whether n is of the form len([]rune(string)).
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
index 1fa3ac0..38c6c03 100644
--- a/src/cmd/compile/internal/walk/closure.go
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -30,7 +30,7 @@
 //		(*&byref)++
 //	}(byval, &byref, 42)
 func directClosureCall(n *ir.CallExpr) {
-	clo := n.X.(*ir.ClosureExpr)
+	clo := n.Fun.(*ir.ClosureExpr)
 	clofn := clo.Func
 
 	if ir.IsTrivialClosure(clo) {
@@ -47,9 +47,8 @@
 			// and v remains PAUTOHEAP with &v heapaddr
 			// (accesses will implicitly deref &v).
 
-			addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name))
+			addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name), types.NewPtr(v.Type()))
 			addr.Curfn = clofn
-			addr.SetType(types.NewPtr(v.Type()))
 			v.Heapaddr = addr
 			v = addr
 		}
@@ -68,12 +67,12 @@
 
 	// Create new function type with parameters prepended, and
 	// then update type and declarations.
-	typ = types.NewSignature(nil, append(params, typ.Params().FieldSlice()...), typ.Results().FieldSlice())
+	typ = types.NewSignature(nil, append(params, typ.Params()...), typ.Results())
 	f.SetType(typ)
 	clofn.Dcl = append(decls, clofn.Dcl...)
 
 	// Rewrite call.
-	n.X = f
+	n.Fun = f
 	n.Args.Prepend(closureArgs(clo)...)
 
 	// Update the call expression's type. We need to do this
@@ -81,9 +80,9 @@
 	// node, but we only rewrote the ONAME node's type. Logically,
 	// they're the same, but the stack offsets probably changed.
 	if typ.NumResults() == 1 {
-		n.SetType(typ.Results().Field(0).Type)
+		n.SetType(typ.Result(0).Type)
 	} else {
-		n.SetType(typ.Results())
+		n.SetType(typ.ResultsTuple())
 	}
 
 	// Add to Closures for enqueueFunc. It's no longer a proper
@@ -145,7 +144,7 @@
 	return walkExpr(cfn, init)
 }
 
-// closureArgs returns a slice of expressions that an be used to
+// closureArgs returns a slice of expressions that can be used to
 // initialize the given closure's free variables. These correspond
 // one-to-one with the variables in clo.Func.ClosureVars, and will be
 // either an ONAME node (if the variable is captured by value) or an
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
index 6330530..adc44ca 100644
--- a/src/cmd/compile/internal/walk/complit.go
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -7,7 +7,7 @@
 import (
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
-	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/ssa"
 	"cmd/compile/internal/staticdata"
 	"cmd/compile/internal/staticinit"
 	"cmd/compile/internal/typecheck"
@@ -18,7 +18,7 @@
 // walkCompLit walks a composite literal node:
 // OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
 func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
-	if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
+	if isStaticCompositeLiteral(n) && !ssa.CanSSA(n.Type()) {
 		n := n.(*ir.CompLitExpr) // not OPTRLIT
 		// n can be directly represented in the read-only data section.
 		// Make direct reference to the static data. See issue 12841.
@@ -26,7 +26,7 @@
 		fixedlit(inInitFunction, initKindStatic, n, vstat, init)
 		return typecheck.Expr(vstat)
 	}
-	var_ := typecheck.Temp(n.Type())
+	var_ := typecheck.TempAt(base.Pos, ir.CurFunc, n.Type())
 	anylit(n, var_, init)
 	return var_
 }
@@ -341,7 +341,7 @@
 	}
 
 	// make new auto *array (3 declare)
-	vauto := typecheck.Temp(types.NewPtr(t))
+	vauto := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t))
 
 	// set auto to point at new temp or heap (3 assign)
 	var a ir.Node
@@ -352,7 +352,7 @@
 		}
 		a = initStackTemp(init, x, vstat)
 	} else if n.Esc() == ir.EscNone {
-		a = initStackTemp(init, typecheck.Temp(t), vstat)
+		a = initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, t), vstat)
 	} else {
 		a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
 	}
@@ -464,7 +464,7 @@
 		// for i = 0; i < len(vstatk); i++ {
 		//	map[vstatk[i]] = vstate[i]
 		// }
-		i := typecheck.Temp(types.Types[types.TINT])
+		i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 		rhs := ir.NewIndexExpr(base.Pos, vstate, i)
 		rhs.SetBounded(true)
 
@@ -497,8 +497,8 @@
 	// Use temporaries so that mapassign1 can have addressable key, elem.
 	// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
 	// TODO(khr): assign these temps in order phase so we can reuse them across multiple maplits?
-	tmpkey := typecheck.Temp(m.Type().Key())
-	tmpelem := typecheck.Temp(m.Type().Elem())
+	tmpkey := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Key())
+	tmpelem := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Elem())
 
 	for _, r := range entries {
 		r := r.(*ir.KeyExpr)
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
index bfa0c54..280b3b6 100644
--- a/src/cmd/compile/internal/walk/convert.go
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -57,7 +57,7 @@
 
 	if !fromType.IsInterface() {
 		typeWord := reflectdata.ConvIfaceTypeWord(base.Pos, n)
-		l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n, init))
+		l := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, dataWord(n, init))
 		l.SetType(toType)
 		l.SetTypecheck(n.Typecheck())
 		return l
@@ -67,18 +67,9 @@
 	}
 
 	// Evaluate the input interface.
-	c := typecheck.Temp(fromType)
+	c := typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
 	init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
 
-	// Grab its parts.
-	itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
-	itab.SetType(types.Types[types.TUINTPTR].PtrTo())
-	itab.SetTypecheck(1)
-	data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c)
-	data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
-	data.SetTypecheck(1)
-
-	var typeWord ir.Node
 	if toType.IsEmptyInterface() {
 		// Implement interface to empty interface conversion:
 		//
@@ -87,27 +78,50 @@
 		// if res != nil {
 		//    res = res.type
 		// }
-		typeWord = typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
+
+		// Grab its parts.
+		itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
+		itab.SetType(types.Types[types.TUINTPTR].PtrTo())
+		itab.SetTypecheck(1)
+		data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c)
+		data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
+		data.SetTypecheck(1)
+
+		typeWord := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(types.Types[types.TUINT8]))
 		init.Append(ir.NewAssignStmt(base.Pos, typeWord, typecheck.Conv(typecheck.Conv(itab, types.Types[types.TUNSAFEPTR]), typeWord.Type())))
 		nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, typeWord, typecheck.NodNil())), nil, nil)
 		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, typeWord, itabType(typeWord))}
 		init.Append(nif)
-	} else {
-		// Must be converting I2I (more specific to less specific interface).
-		// res = convI2I(toType, itab)
-		fn := typecheck.LookupRuntime("convI2I")
-		types.CalcSize(fn.Type())
-		call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
-		call.Args = []ir.Node{reflectdata.ConvIfaceTypeWord(base.Pos, n), itab}
-		typeWord = walkExpr(typecheck.Expr(call), init)
+
+		// Build the result.
+		// e = iface{typeWord, data}
+		e := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, data)
+		e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+		e.SetTypecheck(1)
+		return e
 	}
 
-	// Build the result.
-	// e = iface{typeWord, data}
-	e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, data)
-	e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
-	e.SetTypecheck(1)
-	return e
+	// Must be converting I2I (more specific to less specific interface).
+	// Use the same code as e, _ = c.(T).
+	var rhs ir.Node
+	if n.TypeWord == nil || n.TypeWord.Op() == ir.OADDR && n.TypeWord.(*ir.AddrExpr).X.Op() == ir.OLINKSYMOFFSET {
+		// Fixed (not loaded from a dictionary) type.
+		ta := ir.NewTypeAssertExpr(base.Pos, c, toType)
+		ta.SetOp(ir.ODOTTYPE2)
+		// Allocate a descriptor for this conversion to pass to the runtime.
+		ta.Descriptor = makeTypeAssertDescriptor(toType, true)
+		rhs = ta
+	} else {
+		ta := ir.NewDynamicTypeAssertExpr(base.Pos, ir.ODYNAMICDOTTYPE2, c, n.TypeWord)
+		rhs = ta
+	}
+	rhs.SetType(toType)
+	rhs.SetTypecheck(1)
+
+	res := typecheck.TempAt(base.Pos, ir.CurFunc, toType)
+	as := ir.NewAssignListStmt(base.Pos, ir.OAS2DOTTYPE, []ir.Node{res, ir.BlankNode}, []ir.Node{rhs})
+	init.Append(as)
+	return res
 }
 
 // Returns the data word (the second word) used to represent conv.X in
@@ -155,7 +169,7 @@
 		value = n
 	case conv.Esc() == ir.EscNone && fromType.Size() <= 1024:
 		// n does not escape. Use a stack temporary initialized to n.
-		value = typecheck.Temp(fromType)
+		value = typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
 		init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
 	}
 	if value != nil {
@@ -165,7 +179,7 @@
 
 	// Time to do an allocation. We'll call into the runtime for that.
 	fnname, argType, needsaddr := dataWordFuncName(fromType)
-	fn := typecheck.LookupRuntime(fnname)
+	var fn *ir.Name
 
 	var args []ir.Node
 	if needsaddr {
@@ -178,11 +192,12 @@
 		if !ir.IsAddressable(n) {
 			n = copyExpr(n, fromType, init)
 		}
-		fn = typecheck.SubstArgTypes(fn, fromType)
+		fn = typecheck.LookupRuntime(fnname, fromType)
 		args = []ir.Node{reflectdata.ConvIfaceSrcRType(base.Pos, conv), typecheck.NodAddr(n)}
 	} else {
 		// Use a specialized conversion routine that takes the type being
 		// converted by value, not by pointer.
+		fn = typecheck.LookupRuntime(fnname)
 		var arg ir.Node
 		switch {
 		case fromType == argType:
@@ -211,12 +226,6 @@
 	return safeExpr(walkExpr(typecheck.Expr(call), init), init)
 }
 
-// walkConvIData walks an OCONVIDATA node.
-func walkConvIData(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
-	n.X = walkExpr(n.X, init)
-	return dataWord(n, init)
-}
-
 // walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
 func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
 	a := typecheck.NodNil()
@@ -276,7 +285,7 @@
 			a.SetTypecheck(1)
 			a.MarkNonNil()
 		}
-		p := typecheck.Temp(t.PtrTo()) // *[n]byte
+		p := typecheck.TempAt(base.Pos, ir.CurFunc, t.PtrTo()) // *[n]byte
 		init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
 
 		// Copy from the static string data to the [n]byte.
@@ -414,11 +423,11 @@
 		case n.Type().IsStruct():
 			if n.Type().Field(0).Sym.IsBlank() {
 				// Treat blank fields as the zero value as the Go language requires.
-				n = typecheck.Temp(n.Type().Field(0).Type)
+				n = typecheck.TempAt(base.Pos, ir.CurFunc, n.Type().Field(0).Type)
 				appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n, nil))
 				continue
 			}
-			n = typecheck.Expr(ir.NewSelectorExpr(n.Pos(), ir.OXDOT, n, n.Type().Field(0).Sym))
+			n = typecheck.DotField(n.Pos(), n, 0)
 		case n.Type().IsArray():
 			n = typecheck.Expr(ir.NewIndexExpr(n.Pos(), n, ir.NewInt(base.Pos, 0)))
 		default:
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index 909e7d6..268f793 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -7,16 +7,20 @@
 import (
 	"fmt"
 	"go/constant"
+	"internal/abi"
 	"internal/buildcfg"
 	"strings"
 
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
 	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/rttype"
 	"cmd/compile/internal/staticdata"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
+	"cmd/internal/objabi"
 )
 
 // The result of walkExpr MUST be assigned back to n, e.g.
@@ -122,7 +126,7 @@
 		n.X = walkExpr(n.X, init)
 		return n
 
-	case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH,
+	case ir.OMAKEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH,
 		ir.OUNSAFEADD:
 		n := n.(*ir.BinaryExpr)
 		n.X = walkExpr(n.X, init)
@@ -171,7 +175,7 @@
 		n := n.(*ir.LogicalExpr)
 		return walkLogical(n, init)
 
-	case ir.OPRINT, ir.OPRINTN:
+	case ir.OPRINT, ir.OPRINTLN:
 		return walkPrint(n.(*ir.CallExpr), init)
 
 	case ir.OPANIC:
@@ -223,10 +227,6 @@
 		n := n.(*ir.ConvExpr)
 		return walkConvInterface(n, init)
 
-	case ir.OCONVIDATA:
-		n := n.(*ir.ConvExpr)
-		return walkConvIData(n, init)
-
 	case ir.OCONV, ir.OCONVNOP:
 		n := n.(*ir.ConvExpr)
 		return walkConv(n, init)
@@ -459,7 +459,7 @@
 }
 
 func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
-	l := typecheck.Temp(t)
+	l := typecheck.TempAt(base.Pos, ir.CurFunc, t)
 	appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
 	return l
 }
@@ -535,7 +535,7 @@
 	if n.Op() == ir.OCALLMETH {
 		base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
 	}
-	if n.Op() == ir.OCALLINTER || n.X.Op() == ir.OMETHEXPR {
+	if n.Op() == ir.OCALLINTER || n.Fun.Op() == ir.OMETHEXPR {
 		// We expect both interface call reflect.Type.Method and concrete
 		// call reflect.(*rtype).Method.
 		usemethod(n)
@@ -544,14 +544,14 @@
 		reflectdata.MarkUsedIfaceMethod(n)
 	}
 
-	if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {
+	if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.OCLOSURE {
 		directClosureCall(n)
 	}
 
-	if isFuncPCIntrinsic(n) {
+	if ir.IsFuncPCIntrinsic(n) {
 		// For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite
 		// it to the address of the function of the ABI fn is defined.
-		name := n.X.(*ir.Name).Sym().Name
+		name := n.Fun.(*ir.Name).Sym().Name
 		arg := n.Args[0]
 		var wantABI obj.ABI
 		switch name {
@@ -560,30 +560,22 @@
 		case "FuncPCABIInternal":
 			wantABI = obj.ABIInternal
 		}
-		if isIfaceOfFunc(arg) {
-			fn := arg.(*ir.ConvExpr).X.(*ir.Name)
-			abi := fn.Func.ABI
-			if abi != wantABI {
-				base.ErrorfAt(n.Pos(), 0, "internal/abi.%s expects an %v function, %s is defined as %v", name, wantABI, fn.Sym().Name, abi)
-			}
-			var e ir.Node = ir.NewLinksymExpr(n.Pos(), fn.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])
-			e = ir.NewAddrExpr(n.Pos(), e)
-			e.SetType(types.Types[types.TUINTPTR].PtrTo())
-			return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, n.Type(), e))
+		if n.Type() != types.Types[types.TUINTPTR] {
+			base.FatalfAt(n.Pos(), "FuncPC intrinsic should return uintptr, got %v", n.Type()) // as expected by typecheck.FuncPC.
 		}
-		// fn is not a defined function. It must be ABIInternal.
-		// Read the address from func value, i.e. *(*uintptr)(idata(fn)).
-		if wantABI != obj.ABIInternal {
-			base.ErrorfAt(n.Pos(), 0, "internal/abi.%s does not accept func expression, which is ABIInternal", name)
+		n := ir.FuncPC(n.Pos(), arg, wantABI)
+		return walkExpr(n, init)
+	}
+
+	if name, ok := n.Fun.(*ir.Name); ok {
+		sym := name.Sym()
+		if sym.Pkg.Path == "go.runtime" && sym.Name == "deferrangefunc" {
+			// Call to runtime.deferrangefunc is being shared with a range-over-func
+			// body that might add defers to this frame, so we cannot use open-coded defers
+			// and we need to call deferreturn even if we don't see any other explicit defers.
+			ir.CurFunc.SetHasDefer(true)
+			ir.CurFunc.SetOpenCodedDeferDisallowed(true)
 		}
-		arg = walkExpr(arg, init)
-		var e ir.Node = ir.NewUnaryExpr(n.Pos(), ir.OIDATA, arg)
-		e.SetType(n.Type().PtrTo())
-		e.SetTypecheck(1)
-		e = ir.NewStarExpr(n.Pos(), e)
-		e.SetType(n.Type())
-		e.SetTypecheck(1)
-		return e
 	}
 
 	walkCall1(n, init)
@@ -601,14 +593,14 @@
 	}
 
 	args := n.Args
-	params := n.X.Type().Params()
+	params := n.Fun.Type().Params()
 
-	n.X = walkExpr(n.X, init)
+	n.Fun = walkExpr(n.Fun, init)
 	walkExprList(args, init)
 
 	for i, arg := range args {
 		// Validate argument and parameter types match.
-		param := params.Field(i)
+		param := params[i]
 		if !types.Identical(arg.Type(), param.Type) {
 			base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type)
 		}
@@ -618,14 +610,14 @@
 		// to prevent that calls from clobbering arguments already on the stack.
 		if mayCall(arg) {
 			// assignment of arg to Temp
-			tmp := typecheck.Temp(param.Type)
+			tmp := typecheck.TempAt(base.Pos, ir.CurFunc, param.Type)
 			init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init))
 			// replace arg with temp
 			args[i] = tmp
 		}
 	}
 
-	funSym := n.X.Sym()
+	funSym := n.Fun.Sym()
 	if base.Debug.Libfuzzer != 0 && funSym != nil {
 		if hook, found := hooks[funSym.Pkg.Path+"."+funSym.Name]; found {
 			if len(args) != hook.argsNum {
@@ -714,16 +706,50 @@
 	n.X = walkExpr(n.X, init)
 	// Set up interface type addresses for back end.
 	if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
-		n.ITab = reflectdata.ITabAddr(n.Type(), n.X.Type())
+		n.ITab = reflectdata.ITabAddrAt(base.Pos, n.Type(), n.X.Type())
+	}
+	if n.X.Type().IsInterface() && n.Type().IsInterface() && !n.Type().IsEmptyInterface() {
+		// This kind of conversion needs a runtime call. Allocate
+		// a descriptor for that call.
+		n.Descriptor = makeTypeAssertDescriptor(n.Type(), n.Op() == ir.ODOTTYPE2)
 	}
 	return n
 }
 
+func makeTypeAssertDescriptor(target *types.Type, canFail bool) *obj.LSym {
+	// When converting from an interface to a non-empty interface. Needs a runtime call.
+	// Allocate an internal/abi.TypeAssert descriptor for that call.
+	lsym := types.LocalPkg.Lookup(fmt.Sprintf(".typeAssert.%d", typeAssertGen)).LinksymABI(obj.ABI0)
+	typeAssertGen++
+	c := rttype.NewCursor(lsym, 0, rttype.TypeAssert)
+	c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyTypeAssertCache"))
+	c.Field("Inter").WritePtr(reflectdata.TypeSym(target).Linksym())
+	c.Field("CanFail").WriteBool(canFail)
+	objw.Global(lsym, int32(rttype.TypeAssert.Size()), obj.LOCAL)
+	lsym.Gotype = reflectdata.TypeLinksym(rttype.TypeAssert)
+	return lsym
+}
+
+var typeAssertGen int
+
 // walkDynamicDotType walks an ODYNAMICDOTTYPE or ODYNAMICDOTTYPE2 node.
 func walkDynamicDotType(n *ir.DynamicTypeAssertExpr, init *ir.Nodes) ir.Node {
 	n.X = walkExpr(n.X, init)
 	n.RType = walkExpr(n.RType, init)
 	n.ITab = walkExpr(n.ITab, init)
+	// Convert to non-dynamic if we can.
+	if n.RType != nil && n.RType.Op() == ir.OADDR {
+		addr := n.RType.(*ir.AddrExpr)
+		if addr.X.Op() == ir.OLINKSYMOFFSET {
+			r := ir.NewTypeAssertExpr(n.Pos(), n.X, n.Type())
+			if n.Op() == ir.ODYNAMICDOTTYPE2 {
+				r.SetOp(ir.ODOTTYPE2)
+			}
+			r.SetType(n.Type())
+			r.SetTypecheck(1)
+			return walkExpr(r, init)
+		}
+	}
 	return n
 }
 
@@ -800,7 +826,7 @@
 	switch {
 	case n.Assigned:
 		mapFn = mapfn(mapassign[fast], t, false)
-	case t.Elem().Size() > zeroValSize:
+	case t.Elem().Size() > abi.ZeroValSize:
 		args = append(args, reflectdata.ZeroAddr(t.Elem().Size()))
 		mapFn = mapfn("mapaccess1_fat", t, true)
 	default:
@@ -945,57 +971,87 @@
 	return false
 }
 
-// usemethod checks calls for uses of reflect.Type.{Method,MethodByName}.
+// usemethod checks calls for uses of Method and MethodByName of reflect.Value,
+// reflect.Type, reflect.(*rtype), and reflect.(*interfaceType).
 func usemethod(n *ir.CallExpr) {
 	// Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
 	// Those functions may be alive via the itab, which should not cause all methods
 	// alive. We only want to mark their callers.
 	if base.Ctxt.Pkgpath == "reflect" {
-		switch ir.CurFunc.Nname.Sym().Name { // TODO: is there a better way than hardcoding the names?
-		case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+		// TODO: is there a better way than hardcoding the names?
+		switch fn := ir.CurFunc.Nname.Sym().Name; {
+		case fn == "(*rtype).Method", fn == "(*rtype).MethodByName":
+			return
+		case fn == "(*interfaceType).Method", fn == "(*interfaceType).MethodByName":
+			return
+		case fn == "Value.Method", fn == "Value.MethodByName":
 			return
 		}
 	}
 
-	dot, ok := n.X.(*ir.SelectorExpr)
+	dot, ok := n.Fun.(*ir.SelectorExpr)
 	if !ok {
 		return
 	}
 
-	// Looking for either direct method calls and interface method calls of:
-	//	reflect.Type.Method       - func(int) reflect.Method
-	//	reflect.Type.MethodByName - func(string) (reflect.Method, bool)
-	var pKind types.Kind
-
-	switch dot.Sel.Name {
-	case "Method":
-		pKind = types.TINT
-	case "MethodByName":
-		pKind = types.TSTRING
-	default:
-		return
-	}
-
+	// looking for either direct method calls and interface method calls of:
+	//	reflect.Type.Method        - func(int) reflect.Method
+	//	reflect.Type.MethodByName  - func(string) (reflect.Method, bool)
+	//
+	//	reflect.Value.Method       - func(int) reflect.Value
+	//	reflect.Value.MethodByName - func(string) reflect.Value
+	methodName := dot.Sel.Name
 	t := dot.Selection.Type
-	if t.NumParams() != 1 || t.Params().Field(0).Type.Kind() != pKind {
+
+	// Check the number of arguments and return values.
+	if t.NumParams() != 1 || (t.NumResults() != 1 && t.NumResults() != 2) {
 		return
 	}
-	switch t.NumResults() {
-	case 1:
-		// ok
-	case 2:
-		if t.Results().Field(1).Type.Kind() != types.TBOOL {
-			return
+
+	// Check the type of the argument.
+	switch pKind := t.Param(0).Type.Kind(); {
+	case methodName == "Method" && pKind == types.TINT,
+		methodName == "MethodByName" && pKind == types.TSTRING:
+
+	default:
+		// not a call to Method or MethodByName of reflect.{Type,Value}.
+		return
+	}
+
+	// Check that first result type is "reflect.Method" or "reflect.Value".
+	// Note that we have to check sym name and sym package separately, as
+	// we can't check for exact string "reflect.Method" reliably
+	// (e.g., see #19028 and #38515).
+	switch s := t.Result(0).Type.Sym(); {
+	case s != nil && types.ReflectSymName(s) == "Method",
+		s != nil && types.ReflectSymName(s) == "Value":
+
+	default:
+		// not a call to Method or MethodByName of reflect.{Type,Value}.
+		return
+	}
+
+	var targetName ir.Node
+	switch dot.Op() {
+	case ir.ODOTINTER:
+		if methodName == "MethodByName" {
+			targetName = n.Args[0]
+		}
+	case ir.OMETHEXPR:
+		if methodName == "MethodByName" {
+			targetName = n.Args[1]
 		}
 	default:
-		return
+		base.FatalfAt(dot.Pos(), "usemethod: unexpected dot.Op() %s", dot.Op())
 	}
 
-	// Check that first result type is "reflect.Method". Note that we have to check sym name and sym package
-	// separately, as we can't check for exact string "reflect.Method" reliably (e.g., see #19028 and #38515).
-	if s := t.Results().Field(0).Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
-		ir.CurFunc.SetReflectMethod(true)
-		// The LSym is initialized at this point. We need to set the attribute on the LSym.
+	if ir.IsConst(targetName, constant.String) {
+		name := constant.StringVal(targetName.Val())
+
+		r := obj.Addrel(ir.CurFunc.LSym)
+		r.Type = objabi.R_USENAMEDMETHOD
+		r.Sym = staticdata.StringSymNoCommon(name)
+	} else {
 		ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
 	}
 }
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 057e0b7..179fbdb 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -11,6 +11,7 @@
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssa"
 	"cmd/compile/internal/staticinit"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
@@ -73,7 +74,7 @@
 		}
 		o.free[key] = a[:len(a)-1]
 	} else {
-		v = typecheck.Temp(t)
+		v = typecheck.TempAt(base.Pos, ir.CurFunc, t)
 	}
 	if clear {
 		o.append(ir.NewAssignStmt(base.Pos, v, nil))
@@ -128,7 +129,7 @@
 		if l == n.X {
 			return n
 		}
-		a := ir.SepCopy(n).(*ir.UnaryExpr)
+		a := ir.Copy(n).(*ir.UnaryExpr)
 		a.X = l
 		return typecheck.Expr(a)
 	}
@@ -154,7 +155,7 @@
 		if l == n.X {
 			return n
 		}
-		a := ir.SepCopy(n).(*ir.UnaryExpr)
+		a := ir.Copy(n).(*ir.UnaryExpr)
 		a.X = l
 		return typecheck.Expr(a)
 
@@ -164,7 +165,7 @@
 		if l == n.X {
 			return n
 		}
-		a := ir.SepCopy(n).(*ir.SelectorExpr)
+		a := ir.Copy(n).(*ir.SelectorExpr)
 		a.X = l
 		return typecheck.Expr(a)
 
@@ -174,7 +175,7 @@
 		if l == n.X {
 			return n
 		}
-		a := ir.SepCopy(n).(*ir.SelectorExpr)
+		a := ir.Copy(n).(*ir.SelectorExpr)
 		a.X = l
 		return typecheck.Expr(a)
 
@@ -184,7 +185,7 @@
 		if l == n.X {
 			return n
 		}
-		a := ir.SepCopy(n).(*ir.StarExpr)
+		a := ir.Copy(n).(*ir.StarExpr)
 		a.X = l
 		return typecheck.Expr(a)
 
@@ -200,7 +201,7 @@
 		if l == n.X && r == n.Index {
 			return n
 		}
-		a := ir.SepCopy(n).(*ir.IndexExpr)
+		a := ir.Copy(n).(*ir.IndexExpr)
 		a.X = l
 		a.Index = r
 		return typecheck.Expr(a)
@@ -231,14 +232,29 @@
 		vstat = typecheck.Expr(vstat).(*ir.Name)
 		return vstat
 	}
+
+	// Prevent taking the address of an SSA-able local variable (#63332).
+	//
+	// TODO(mdempsky): Note that OuterValue unwraps OCONVNOPs, but
+	// IsAddressable does not. It should be possible to skip copying for
+	// at least some of these OCONVNOPs (e.g., reinsert them after the
+	// OADDR operation), but at least walkCompare needs to be fixed to
+	// support that (see trybot failures on go.dev/cl/541715, PS1).
 	if ir.IsAddressable(n) {
+		if name, ok := ir.OuterValue(n).(*ir.Name); ok && name.Op() == ir.ONAME {
+			if name.Class == ir.PAUTO && !name.Addrtaken() && ssa.CanSSA(name.Type()) {
+				goto Copy
+			}
+		}
+
 		return n
 	}
+
+Copy:
 	return o.copyExpr(n)
 }
 
 // mapKeyTemp prepares n to be a key in a map runtime call and returns n.
-// It should only be used for map runtime calls which have *_fast* versions.
 // The first parameter is the position of n's containing node, for use in case
 // that n's position is not unique (e.g., if n is an ONAME).
 func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node {
@@ -538,14 +554,14 @@
 	n := nn.(*ir.CallExpr)
 	typecheck.AssertFixedCall(n)
 
-	if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) {
+	if ir.IsFuncPCIntrinsic(n) && ir.IsIfaceOfFunc(n.Args[0]) != nil {
 		// For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
 		// do not introduce temporaries here, so it is easier to rewrite it
 		// to symbol address reference later in walk.
 		return
 	}
 
-	n.X = o.expr(n.X, nil)
+	n.Fun = o.expr(n.Fun, nil)
 	o.exprList(n.Args)
 }
 
@@ -603,8 +619,38 @@
 	case ir.OAS:
 		n := n.(*ir.AssignStmt)
 		t := o.markTemp()
+
+		// There's a delicate interaction here between two OINDEXMAP
+		// optimizations.
+		//
+		// First, we want to handle m[k] = append(m[k], ...) with a single
+		// runtime call to mapassign. This requires the m[k] expressions to
+		// satisfy ir.SameSafeExpr in walkAssign.
+		//
+		// But if k is a slow map key type that's passed by reference (e.g.,
+		// byte), then we want to avoid marking user variables as addrtaken,
+		// if that might prevent the compiler from keeping k in a register.
+		//
+		// TODO(mdempsky): It would be better if walk was responsible for
+		// inserting temporaries as needed.
+		mapAppend := n.X.Op() == ir.OINDEXMAP && n.Y.Op() == ir.OAPPEND &&
+			ir.SameSafeExpr(n.X, n.Y.(*ir.CallExpr).Args[0])
+
 		n.X = o.expr(n.X, nil)
-		n.Y = o.expr(n.Y, n.X)
+		if mapAppend {
+			indexLHS := n.X.(*ir.IndexExpr)
+			indexLHS.X = o.cheapExpr(indexLHS.X)
+			indexLHS.Index = o.cheapExpr(indexLHS.Index)
+
+			call := n.Y.(*ir.CallExpr)
+			indexRHS := call.Args[0].(*ir.IndexExpr)
+			indexRHS.X = indexLHS.X
+			indexRHS.Index = indexLHS.Index
+
+			o.exprList(call.Args[1:])
+		} else {
+			n.Y = o.expr(n.Y, n.X)
+		}
 		o.mapAssign(n)
 		o.popTemp(t)
 
@@ -713,8 +759,6 @@
 	case ir.OBREAK,
 		ir.OCONTINUE,
 		ir.ODCL,
-		ir.ODCLCONST,
-		ir.ODCLTYPE,
 		ir.OFALL,
 		ir.OGOTO,
 		ir.OLABEL,
@@ -755,7 +799,7 @@
 		o.out = append(o.out, n)
 		o.popTemp(t)
 
-	case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
+	case ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
 		n := n.(*ir.CallExpr)
 		t := o.markTemp()
 		o.call(n)
@@ -817,8 +861,14 @@
 		// Mark []byte(str) range expression to reuse string backing storage.
 		// It is safe because the storage cannot be mutated.
 		n := n.(*ir.RangeStmt)
-		if n.X.Op() == ir.OSTR2BYTES {
-			n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
+		if x, ok := n.X.(*ir.ConvExpr); ok {
+			switch x.Op() {
+			case ir.OSTR2BYTES:
+				x.SetOp(ir.OSTR2BYTESTMP)
+				fallthrough
+			case ir.OSTR2BYTESTMP:
+				x.MarkNonNil() // "range []byte(nil)" is fine
+			}
 		}
 
 		t := o.markTemp()
@@ -826,11 +876,14 @@
 
 		orderBody := true
 		xt := typecheck.RangeExprType(n.X.Type())
-		switch xt.Kind() {
+		switch k := xt.Kind(); {
 		default:
 			base.Fatalf("order.stmt range %v", n.Type())
 
-		case types.TARRAY, types.TSLICE:
+		case types.IsInt[k]:
+			// Used only once, no need to copy.
+
+		case k == types.TARRAY, k == types.TSLICE:
 			if n.Value == nil || ir.IsBlank(n.Value) {
 				// for i := range x will only use x once, to compute len(x).
 				// No need to copy it.
@@ -838,7 +891,7 @@
 			}
 			fallthrough
 
-		case types.TCHAN, types.TSTRING:
+		case k == types.TCHAN, k == types.TSTRING:
 			// chan, string, slice, array ranges use value multiple times.
 			// make copy.
 			r := n.X
@@ -851,7 +904,7 @@
 
 			n.X = o.copyExpr(r)
 
-		case types.TMAP:
+		case k == types.TMAP:
 			if isMapClear(n) {
 				// Preserve the body of the map clear pattern so it can
 				// be detected during walk. The loop body will not be used
@@ -868,7 +921,7 @@
 
 			// n.Prealloc is the temp for the iterator.
 			// MapIterType contains pointers and needs to be zeroed.
-			n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
+			n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
 		}
 		n.Key = o.exprInPlace(n.Key)
 		n.Value = o.exprInPlace(n.Value)
@@ -1151,7 +1204,7 @@
 			}
 		}
 
-		// key must be addressable
+		// key may need to be be addressable
 		n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index)
 		if needCopy {
 			return o.copyExpr(n)
@@ -1160,7 +1213,7 @@
 
 	// concrete type (not interface) argument might need an addressable
 	// temporary to pass to the runtime conversion routine.
-	case ir.OCONVIFACE, ir.OCONVIDATA:
+	case ir.OCONVIFACE:
 		n := n.(*ir.ConvExpr)
 		n.X = o.expr(n.X, nil)
 		if n.X.Type().IsInterface() {
@@ -1169,7 +1222,7 @@
 		if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
 			// Need a temp if we need to pass the address to the conversion function.
 			// We also process static composite literal node here, making a named static global
-			// whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk).
+			// whose address we can put directly in an interface (see OCONVIFACE case in walk).
 			n.X = o.addrTemp(n.X)
 		}
 		return n
@@ -1495,18 +1548,3 @@
 	o.out = append(o.out, n)
 	o.stmt(typecheck.Stmt(as))
 }
-
-// isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
-func isFuncPCIntrinsic(n *ir.CallExpr) bool {
-	if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
-		return false
-	}
-	fn := n.X.(*ir.Name).Sym()
-	return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
-		(fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi")
-}
-
-// isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func.
-func isIfaceOfFunc(n ir.Node) bool {
-	return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC
-}
diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go
deleted file mode 100644
index 859e5c5..0000000
--- a/src/cmd/compile/internal/walk/race.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package walk
-
-import (
-	"cmd/compile/internal/base"
-	"cmd/compile/internal/ir"
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-)
-
-func instrument(fn *ir.Func) {
-	if fn.Pragma&ir.Norace != 0 || (fn.Linksym() != nil && fn.Linksym().ABIWrapper()) {
-		return
-	}
-
-	if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) {
-		fn.SetInstrumentBody(true)
-	}
-
-	if base.Flag.Race {
-		lno := base.Pos
-		base.Pos = src.NoXPos
-		var init ir.Nodes
-		fn.Enter.Prepend(mkcallstmt("racefuncenter", mkcall("getcallerpc", types.Types[types.TUINTPTR], &init)))
-		if len(init) != 0 {
-			base.Fatalf("race walk: unexpected init for getcallerpc")
-		}
-		fn.Exit.Append(mkcallstmt("racefuncexit"))
-		base.Pos = lno
-	}
-}
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
index 38479b3..93898b3 100644
--- a/src/cmd/compile/internal/walk/range.go
+++ b/src/cmd/compile/internal/walk/range.go
@@ -74,11 +74,25 @@
 
 	var body []ir.Node
 	var init []ir.Node
-	switch t.Kind() {
+	switch k := t.Kind(); {
 	default:
 		base.Fatalf("walkRange")
 
-	case types.TARRAY, types.TSLICE, types.TPTR: // TPTR is pointer-to-array
+	case types.IsInt[k]:
+		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+		hn := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+
+		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+		init = append(init, ir.NewAssignStmt(base.Pos, hn, a))
+
+		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
+
+		if v1 != nil {
+			body = []ir.Node{rangeAssign(nrange, hv1)}
+		}
+
+	case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array
 		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
 			base.Pos = lno
 			return nn
@@ -96,8 +110,8 @@
 		// order.stmt arranged for a copy of the array/slice variable if needed.
 		ha := a
 
-		hv1 := typecheck.Temp(types.Types[types.TINT])
-		hn := typecheck.Temp(types.Types[types.TINT])
+		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+		hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
 
 		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
 		init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
@@ -196,14 +210,14 @@
 		ptr.SetBounded(true)
 		huVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], ptr)
 		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
-		hu := typecheck.Temp(types.Types[types.TUINTPTR])
+		hu := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
 		init = append(init, ir.NewAssignStmt(base.Pos, hu, huVal))
 
 		// Convert hu to hp at the top of the loop (after the condition has been checked).
 		hpVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hu)
 		hpVal.SetCheckPtr(true) // disable checkptr on this conversion
 		hpVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, elem.PtrTo(), hpVal)
-		hp := typecheck.Temp(elem.PtrTo())
+		hp := typecheck.TempAt(base.Pos, ir.CurFunc, elem.PtrTo())
 		body = append(body, ir.NewAssignStmt(base.Pos, hp, hpVal))
 
 		// Assign variables on the LHS of the range statement. Use *hp to get the element.
@@ -219,7 +233,7 @@
 		as := ir.NewAssignStmt(base.Pos, hu, ir.NewBinaryExpr(base.Pos, ir.OADD, huVal, ir.NewInt(base.Pos, elem.Size())))
 		nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
 
-	case types.TMAP:
+	case k == types.TMAP:
 		// order.stmt allocated the iterator for us.
 		// we only use a once, so no copy needed.
 		ha := a
@@ -231,36 +245,33 @@
 		keysym := th.Field(0).Sym
 		elemsym := th.Field(1).Sym // ditto
 
-		fn := typecheck.LookupRuntime("mapiterinit")
-
-		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
+		fn := typecheck.LookupRuntime("mapiterinit", t.Key(), t.Elem(), th)
 		init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
 		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
 
-		fn = typecheck.LookupRuntime("mapiternext")
-		fn = typecheck.SubstArgTypes(fn, th)
+		fn = typecheck.LookupRuntime("mapiternext", th)
 		nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
 
-		key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
+		key := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), types.NewPtr(t.Key())))
 		if v1 == nil {
 			body = nil
 		} else if v2 == nil {
 			body = []ir.Node{rangeAssign(nrange, key)}
 		} else {
-			elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
+			elem := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym), types.NewPtr(t.Elem())))
 			body = []ir.Node{rangeAssign2(nrange, key, elem)}
 		}
 
-	case types.TCHAN:
+	case k == types.TCHAN:
 		// order.stmt arranged for a copy of the channel variable.
 		ha := a
 
-		hv1 := typecheck.Temp(t.Elem())
+		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t.Elem())
 		hv1.SetTypecheck(1)
 		if t.Elem().HasPointers() {
 			init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
 		}
-		hb := typecheck.Temp(types.Types[types.TBOOL])
+		hb := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
 
 		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(base.Pos, false))
 		lhs := []ir.Node{hv1, hb}
@@ -278,7 +289,7 @@
 		// See issue 15281.
 		body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
 
-	case types.TSTRING:
+	case k == types.TSTRING:
 		// Transform string range statements like "for v1, v2 = range a" into
 		//
 		// ha := a
@@ -297,9 +308,9 @@
 		// order.stmt arranged for a copy of the string variable.
 		ha := a
 
-		hv1 := typecheck.Temp(types.Types[types.TINT])
-		hv1t := typecheck.Temp(types.Types[types.TINT])
-		hv2 := typecheck.Temp(types.RuneType)
+		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+		hv1t := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+		hv2 := typecheck.TempAt(base.Pos, ir.CurFunc, types.RuneType)
 
 		// hv1 := 0
 		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
@@ -327,7 +338,7 @@
 		// } else {
 		// hv2, hv1 = decoderune(ha, hv1)
 		fn := typecheck.LookupRuntime("decoderune")
-		call := mkcall1(fn, fn.Type().Results(), &nif.Else, ha, hv1)
+		call := mkcall1(fn, fn.Type().ResultsTuple(), &nif.Else, ha, hv1)
 		a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
 		nif.Else.Append(a)
 
@@ -454,8 +465,7 @@
 	t := m.Type()
 
 	// instantiate mapclear(typ *type, hmap map[any]any)
-	fn := typecheck.LookupRuntime("mapclear")
-	fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+	fn := typecheck.LookupRuntime("mapclear", t.Key(), t.Elem())
 	n := mkcallstmt1(fn, rtyp, m)
 	return walkStmt(typecheck.Stmt(n))
 }
@@ -529,7 +539,7 @@
 	n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 0))
 
 	// hp = &a[0]
-	hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
+	hp := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUNSAFEPTR])
 
 	ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(base.Pos, 0))
 	ix.SetBounded(true)
@@ -537,7 +547,7 @@
 	n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
 
 	// hn = len(a) * sizeof(elem(a))
-	hn := typecheck.Temp(types.Types[types.TUINTPTR])
+	hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
 	mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, elemsize)), types.Types[types.TUINTPTR])
 	n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
 
@@ -564,18 +574,3 @@
 	typecheck.Stmts(n.Body)
 	return walkStmt(n)
 }
-
-// addptr returns (*T)(uintptr(p) + n).
-func addptr(p ir.Node, n int64) ir.Node {
-	t := p.Type()
-
-	p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
-	p.SetType(types.Types[types.TUINTPTR])
-
-	p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(base.Pos, n))
-
-	p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
-	p.SetType(t)
-
-	return p
-}
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
index c676a76..ca6a76a 100644
--- a/src/cmd/compile/internal/walk/select.go
+++ b/src/cmd/compile/internal/walk/select.go
@@ -9,6 +9,7 @@
 	"cmd/compile/internal/ir"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/src"
 )
 
 func walkSelect(sel *ir.SelectStmt) {
@@ -125,9 +126,9 @@
 			if ir.IsBlank(elem) {
 				elem = typecheck.NodNil()
 			}
-			cond = typecheck.Temp(types.Types[types.TBOOL])
+			cond = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
 			fn := chanfn("selectnbrecv", 2, ch.Type())
-			call := mkcall1(fn, fn.Type().Results(), r.PtrInit(), elem, ch)
+			call := mkcall1(fn, fn.Type().ResultsTuple(), r.PtrInit(), elem, ch)
 			as := ir.NewAssignListStmt(r.Pos(), ir.OAS2, []ir.Node{cond, n.Lhs[1]}, []ir.Node{call})
 			r.PtrInit().Append(typecheck.Stmt(as))
 		}
@@ -148,15 +149,15 @@
 
 	// generate sel-struct
 	base.Pos = sellineno
-	selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas)))
+	selv := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(scasetype(), int64(ncas)))
 	init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
 
 	// No initialization for order; runtime.selectgo is responsible for that.
-	order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
+	order := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
 
 	var pc0, pcs ir.Node
 	if base.Flag.Race {
-		pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+		pcs = typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
 		pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(base.Pos, 0))))
 	} else {
 		pc0 = typecheck.NodNil()
@@ -220,13 +221,13 @@
 
 	// run the select
 	base.Pos = sellineno
-	chosen := typecheck.Temp(types.Types[types.TINT])
-	recvOK := typecheck.Temp(types.Types[types.TBOOL])
+	chosen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+	recvOK := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
 	r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
 	r.Lhs = []ir.Node{chosen, recvOK}
 	fn := typecheck.LookupRuntime("selectgo")
 	var fnInit ir.Nodes
-	r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(base.Pos, int64(nsends)), ir.NewInt(base.Pos, int64(nrecvs)), ir.NewBool(base.Pos, dflt == nil))}
+	r.Rhs = []ir.Node{mkcall1(fn, fn.Type().ResultsTuple(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(base.Pos, int64(nsends)), ir.NewInt(base.Pos, int64(nrecvs)), ir.NewBool(base.Pos, dflt == nil))}
 	init = append(init, fnInit...)
 	init = append(init, typecheck.Stmt(r))
 
@@ -287,11 +288,15 @@
 // Keep in sync with src/runtime/select.go.
 func scasetype() *types.Type {
 	if scase == nil {
-		scase = types.NewStruct([]*types.Field{
+		n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("scase"))
+		scase = types.NewNamed(n)
+		n.SetType(scase)
+		n.SetTypecheck(1)
+
+		scase.SetUnderlying(types.NewStruct([]*types.Field{
 			types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]),
 			types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]),
-		})
-		scase.SetNoalg(true)
+		}))
 	}
 	return scase
 }
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index c6a03d2..b2a226e 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -48,13 +48,14 @@
 		ir.ODELETE,
 		ir.OSEND,
 		ir.OPRINT,
-		ir.OPRINTN,
+		ir.OPRINTLN,
 		ir.OPANIC,
 		ir.ORECOVERFP,
 		ir.OGETG:
 		if n.Typecheck() == 0 {
 			base.Fatalf("missing typecheck: %+v", n)
 		}
+
 		init := ir.TakeInit(n)
 		n = walkExpr(n, &init)
 		if n.Op() == ir.ONAME {
@@ -87,9 +88,8 @@
 		ir.OGOTO,
 		ir.OLABEL,
 		ir.OJUMPTABLE,
+		ir.OINTERFACESWITCH,
 		ir.ODCL,
-		ir.ODCLCONST,
-		ir.ODCLTYPE,
 		ir.OCHECKNIL:
 		return n
 
@@ -106,10 +106,11 @@
 		n := n.(*ir.GoDeferStmt)
 		ir.CurFunc.SetHasDefer(true)
 		ir.CurFunc.NumDefers++
-		if ir.CurFunc.NumDefers > maxOpenDefers {
+		if ir.CurFunc.NumDefers > maxOpenDefers || n.DeferAt != nil {
 			// Don't allow open-coded defers if there are more than
 			// 8 defers in the function, since we use a single
 			// byte to record active defers.
+			// Also don't allow if we need to use deferprocat.
 			ir.CurFunc.SetOpenCodedDeferDisallowed(true)
 		}
 		if n.Esc() != ir.EscNever {
@@ -138,7 +139,7 @@
 		n := n.(*ir.TailCallStmt)
 
 		var init ir.Nodes
-		n.Call.X = walkExpr(n.Call.X, &init)
+		n.Call.Fun = walkExpr(n.Call.Fun, &init)
 
 		if len(init) > 0 {
 			init.Append(n)
@@ -195,7 +196,7 @@
 // call without arguments or results.
 func validGoDeferCall(call ir.Node) bool {
 	if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 {
-		sig := call.X.Type()
+		sig := call.Fun.Type()
 		return sig.NumParams()+sig.NumResults() == 0
 	}
 	return false
@@ -210,7 +211,7 @@
 	var init ir.Nodes
 
 	call := n.Call.(*ir.CallExpr)
-	call.X = walkExpr(call.X, &init)
+	call.Fun = walkExpr(call.Fun, &init)
 
 	if len(init) > 0 {
 		init.Append(n)
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
index 1a167d3..b67d011 100644
--- a/src/cmd/compile/internal/walk/switch.go
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -5,15 +5,21 @@
 package walk
 
 import (
+	"fmt"
 	"go/constant"
 	"go/token"
+	"math/bits"
 	"sort"
 
 	"cmd/compile/internal/base"
 	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/rttype"
 	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/obj"
 	"cmd/internal/src"
 )
 
@@ -232,7 +238,7 @@
 			s.done.Append(ir.NewBranchStmt(pos, ir.OGOTO, endLabel))
 
 			// Add length case to outer switch.
-			cas := ir.NewBasicLit(pos, constant.MakeInt64(runLen(run)))
+			cas := ir.NewInt(pos, runLen(run))
 			jmp := ir.NewBranchStmt(pos, ir.OGOTO, label)
 			outer.Add(pos, cas, nil, jmp)
 		}
@@ -378,17 +384,19 @@
 // type switch.
 func walkSwitchType(sw *ir.SwitchStmt) {
 	var s typeSwitch
-	s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
-	sw.Tag = nil
-
-	s.facename = walkExpr(s.facename, sw.PtrInit())
-	s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled)
-	s.okname = typecheck.Temp(types.Types[types.TBOOL])
+	s.srcName = sw.Tag.(*ir.TypeSwitchGuard).X
+	s.srcName = walkExpr(s.srcName, sw.PtrInit())
+	s.srcName = copyExpr(s.srcName, s.srcName.Type(), &sw.Compiled)
+	s.okName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+	s.itabName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINT8].PtrTo())
 
 	// Get interface descriptor word.
 	// For empty interfaces this will be the type.
 	// For non-empty interfaces this will be the itab.
-	itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename)
+	srcItab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.srcName)
+	srcData := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s.srcName)
+	srcData.SetType(types.Types[types.TUINT8].PtrTo())
+	srcData.SetTypecheck(1)
 
 	// For empty interfaces, do:
 	//     if e._type == nil {
@@ -397,42 +405,49 @@
 	//     h := e._type.hash
 	// Use a similar strategy for non-empty interfaces.
 	ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
-	ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil())
+	ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, srcItab, typecheck.NodNil())
 	base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
 	ifNil.Cond = typecheck.Expr(ifNil.Cond)
 	ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
-	// ifNil.Nbody assigned at end.
+	// ifNil.Nbody assigned later.
 	sw.Compiled.Append(ifNil)
 
 	// Load hash from type or itab.
-	dotHash := typeHashFieldOf(base.Pos, itab)
-	s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+	dotHash := typeHashFieldOf(base.Pos, srcItab)
+	s.hashName = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
 
+	// Make a label for each case body.
+	labels := make([]*types.Sym, len(sw.Cases))
+	for i := range sw.Cases {
+		labels[i] = typecheck.AutoLabel(".s")
+	}
+
+	// "jump" to execute if no case matches.
 	br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+
+	// Assemble a list of all the types we're looking for.
+	// This pass flattens the case lists, as well as handles
+	// some unusual cases, like default and nil cases.
+	type oneCase struct {
+		pos src.XPos
+		jmp ir.Node // jump to body of selected case
+
+		// The case we're matching. Normally the type we're looking for
+		// is typ.Type(), but when typ is ODYNAMICTYPE the actual type
+		// we're looking for is not a compile-time constant (typ.Type()
+		// will be its shape).
+		typ ir.Node
+	}
+	var cases []oneCase
 	var defaultGoto, nilGoto ir.Node
-	var body ir.Nodes
-	for _, ncase := range sw.Cases {
-		caseVar := ncase.Var
-
-		// For single-type cases with an interface type,
-		// we initialize the case variable as part of the type assertion.
-		// In other cases, we initialize it in the body.
-		var singleType *types.Type
-		if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE {
-			singleType = ncase.List[0].Type()
-		}
-		caseVarInitialized := false
-
-		label := typecheck.AutoLabel(".s")
-		jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
-
+	for i, ncase := range sw.Cases {
+		jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, labels[i])
 		if len(ncase.List) == 0 { // default:
 			if defaultGoto != nil {
 				base.Fatalf("duplicate default case not detected during typechecking")
 			}
 			defaultGoto = jmp
 		}
-
 		for _, n1 := range ncase.List {
 			if ir.IsNil(n1) { // case nil:
 				if nilGoto != nil {
@@ -441,45 +456,30 @@
 				nilGoto = jmp
 				continue
 			}
-
-			if singleType != nil && singleType.IsInterface() {
-				s.Add(ncase.Pos(), n1, caseVar, jmp)
-				caseVarInitialized = true
-			} else {
-				s.Add(ncase.Pos(), n1, nil, jmp)
-			}
-		}
-
-		body.Append(ir.NewLabelStmt(ncase.Pos(), label))
-		if caseVar != nil && !caseVarInitialized {
-			val := s.facename
-			if singleType != nil {
-				// We have a single concrete type. Extract the data.
-				if singleType.IsInterface() {
-					base.Fatalf("singleType interface should have been handled in Add")
+			if n1.Op() == ir.ODYNAMICTYPE {
+				// Convert dynamic to static, if the dynamic is actually static.
+				// TODO: why isn't this OTYPE to begin with?
+				dt := n1.(*ir.DynamicType)
+				if dt.RType != nil && dt.RType.Op() == ir.OADDR {
+					addr := dt.RType.(*ir.AddrExpr)
+					if addr.X.Op() == ir.OLINKSYMOFFSET {
+						n1 = ir.TypeNode(n1.Type())
+					}
 				}
-				val = ifaceData(ncase.Pos(), s.facename, singleType)
+				if dt.ITab != nil && dt.ITab.Op() == ir.OADDR {
+					addr := dt.ITab.(*ir.AddrExpr)
+					if addr.X.Op() == ir.OLINKSYMOFFSET {
+						n1 = ir.TypeNode(n1.Type())
+					}
+				}
 			}
-			if len(ncase.List) == 1 && ncase.List[0].Op() == ir.ODYNAMICTYPE {
-				dt := ncase.List[0].(*ir.DynamicType)
-				x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.RType)
-				x.ITab = dt.ITab
-				x.SetType(caseVar.Type())
-				x.SetTypecheck(1)
-				val = x
-			}
-			l := []ir.Node{
-				ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
-				ir.NewAssignStmt(ncase.Pos(), caseVar, val),
-			}
-			typecheck.Stmts(l)
-			body.Append(l...)
+			cases = append(cases, oneCase{
+				pos: ncase.Pos(),
+				typ: n1,
+				jmp: jmp,
+			})
 		}
-		body.Append(ncase.Body...)
-		body.Append(br)
 	}
-	sw.Cases = nil
-
 	if defaultGoto == nil {
 		defaultGoto = br
 	}
@@ -488,13 +488,201 @@
 	}
 	ifNil.Body = []ir.Node{nilGoto}
 
-	s.Emit(&sw.Compiled)
-	sw.Compiled.Append(defaultGoto)
-	sw.Compiled.Append(body.Take()...)
+	// Now go through the list of cases, processing groups as we find them.
+	var concreteCases []oneCase
+	var interfaceCases []oneCase
+	flush := func() {
+		// Process all the concrete types first. Because we handle shadowing
+		// below, it is correct to do all the concrete types before all of
+		// the interface types.
+		// The concrete cases can all be handled without a runtime call.
+		if len(concreteCases) > 0 {
+			var clauses []typeClause
+			for _, c := range concreteCases {
+				as := ir.NewAssignListStmt(c.pos, ir.OAS2,
+					[]ir.Node{ir.BlankNode, s.okName},                               // _, ok =
+					[]ir.Node{ir.NewTypeAssertExpr(c.pos, s.srcName, c.typ.Type())}) // iface.(type)
+				nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil)
+				clauses = append(clauses, typeClause{
+					hash: types.TypeHash(c.typ.Type()),
+					body: []ir.Node{typecheck.Stmt(as), typecheck.Stmt(nif)},
+				})
+			}
+			s.flush(clauses, &sw.Compiled)
+			concreteCases = concreteCases[:0]
+		}
+
+		// The "any" case, if it exists, must be the last interface case, because
+		// it would shadow all subsequent cases. Strip it off here so the runtime
+		// call only needs to handle non-empty interfaces.
+		var anyGoto ir.Node
+		if len(interfaceCases) > 0 && interfaceCases[len(interfaceCases)-1].typ.Type().IsEmptyInterface() {
+			anyGoto = interfaceCases[len(interfaceCases)-1].jmp
+			interfaceCases = interfaceCases[:len(interfaceCases)-1]
+		}
+
+		// Next, process all the interface types with a single call to the runtime.
+		if len(interfaceCases) > 0 {
+
+			// Build an internal/abi.InterfaceSwitch descriptor to pass to the runtime.
+			lsym := types.LocalPkg.Lookup(fmt.Sprintf(".interfaceSwitch.%d", interfaceSwitchGen)).LinksymABI(obj.ABI0)
+			interfaceSwitchGen++
+			c := rttype.NewCursor(lsym, 0, rttype.InterfaceSwitch)
+			c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyInterfaceSwitchCache"))
+			c.Field("NCases").WriteInt(int64(len(interfaceCases)))
+			array, sizeDelta := c.Field("Cases").ModifyArray(len(interfaceCases))
+			for i, c := range interfaceCases {
+				array.Elem(i).WritePtr(reflectdata.TypeSym(c.typ.Type()).Linksym())
+			}
+			objw.Global(lsym, int32(rttype.InterfaceSwitch.Size()+sizeDelta), obj.LOCAL)
+			// The GC only needs to see the first pointer in the structure (all the others
+			// are to static locations). So the InterfaceSwitch type itself is fine, even
+			// though it might not cover the whole array we wrote above.
+			lsym.Gotype = reflectdata.TypeLinksym(rttype.InterfaceSwitch)
+
+			// Call runtime to do switch
+			// case, itab = runtime.interfaceSwitch(&descriptor, typeof(arg))
+			var typeArg ir.Node
+			if s.srcName.Type().IsEmptyInterface() {
+				typeArg = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINT8].PtrTo(), srcItab)
+			} else {
+				typeArg = itabType(srcItab)
+			}
+			caseVar := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+			isw := ir.NewInterfaceSwitchStmt(base.Pos, caseVar, s.itabName, typeArg, dotHash, lsym)
+			sw.Compiled.Append(isw)
+
+			// Switch on the result of the call (or cache lookup).
+			var newCases []*ir.CaseClause
+			for i, c := range interfaceCases {
+				newCases = append(newCases, &ir.CaseClause{
+					List: []ir.Node{ir.NewInt(base.Pos, int64(i))},
+					Body: []ir.Node{c.jmp},
+				})
+			}
+			// TODO: add len(newCases) case, mark switch as bounded
+			sw2 := ir.NewSwitchStmt(base.Pos, caseVar, newCases)
+			sw.Compiled.Append(typecheck.Stmt(sw2))
+			interfaceCases = interfaceCases[:0]
+		}
+
+		if anyGoto != nil {
+			// We've already handled the nil case, so everything
+			// that reaches here matches the "any" case.
+			sw.Compiled.Append(anyGoto)
+		}
+	}
+caseLoop:
+	for _, c := range cases {
+		if c.typ.Op() == ir.ODYNAMICTYPE {
+			flush() // process all previous cases
+			dt := c.typ.(*ir.DynamicType)
+			dot := ir.NewDynamicTypeAssertExpr(c.pos, ir.ODYNAMICDOTTYPE, s.srcName, dt.RType)
+			dot.ITab = dt.ITab
+			dot.SetType(c.typ.Type())
+			dot.SetTypecheck(1)
+
+			as := ir.NewAssignListStmt(c.pos, ir.OAS2, nil, nil)
+			as.Lhs = []ir.Node{ir.BlankNode, s.okName} // _, ok =
+			as.Rhs = []ir.Node{dot}
+			typecheck.Stmt(as)
+
+			nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil)
+			sw.Compiled.Append(as, nif)
+			continue
+		}
+
+		// Check for shadowing (a case that will never fire because
+		// a previous case would have always fired first). This check
+		// allows us to reorder concrete and interface cases.
+		// (TODO: these should be vet failures, maybe?)
+		for _, ic := range interfaceCases {
+			// An interface type case will shadow all
+			// subsequent types that implement that interface.
+			if typecheck.Implements(c.typ.Type(), ic.typ.Type()) {
+				continue caseLoop
+			}
+			// Note that we don't need to worry about:
+			// 1. Two concrete types shadowing each other. That's
+			//    disallowed by the spec.
+			// 2. A concrete type shadowing an interface type.
+			//    That can never happen, as interface types can
+			//    be satisfied by an infinite set of concrete types.
+			// The correctness of this step also depends on handling
+			// the dynamic type cases separately, as we do above.
+		}
+
+		if c.typ.Type().IsInterface() {
+			interfaceCases = append(interfaceCases, c)
+		} else {
+			concreteCases = append(concreteCases, c)
+		}
+	}
+	flush()
+
+	sw.Compiled.Append(defaultGoto) // if none of the cases matched
+
+	// Now generate all the case bodies
+	for i, ncase := range sw.Cases {
+		sw.Compiled.Append(ir.NewLabelStmt(ncase.Pos(), labels[i]))
+		if caseVar := ncase.Var; caseVar != nil {
+			val := s.srcName
+			if len(ncase.List) == 1 {
+				// single type. We have to downcast the input value to the target type.
+				if ncase.List[0].Op() == ir.OTYPE { // single compile-time known type
+					t := ncase.List[0].Type()
+					if t.IsInterface() {
+						// This case is an interface. Build case value from input interface.
+						// The data word will always be the same, but the itab/type changes.
+						if t.IsEmptyInterface() {
+							var typ ir.Node
+							if s.srcName.Type().IsEmptyInterface() {
+								// E->E, nothing to do, type is already correct.
+								typ = srcItab
+							} else {
+								// I->E, load type out of itab
+								typ = itabType(srcItab)
+								typ.SetPos(ncase.Pos())
+							}
+							val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, typ, srcData)
+						} else {
+							// The itab we need was returned by a runtime.interfaceSwitch call.
+							val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, s.itabName, srcData)
+						}
+					} else {
+						// This case is a concrete type, just read its value out of the interface.
+						val = ifaceData(ncase.Pos(), s.srcName, t)
+					}
+				} else if ncase.List[0].Op() == ir.ODYNAMICTYPE { // single runtime known type
+					dt := ncase.List[0].(*ir.DynamicType)
+					x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.RType)
+					x.ITab = dt.ITab
+					val = x
+				} else if ir.IsNil(ncase.List[0]) {
+				} else {
+					base.Fatalf("unhandled type switch case %v", ncase.List[0])
+				}
+				val.SetType(caseVar.Type())
+				val.SetTypecheck(1)
+			}
+			l := []ir.Node{
+				ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
+				ir.NewAssignStmt(ncase.Pos(), caseVar, val),
+			}
+			typecheck.Stmts(l)
+			sw.Compiled.Append(l...)
+		}
+		sw.Compiled.Append(ncase.Body...)
+		sw.Compiled.Append(br)
+	}
 
 	walkStmtList(sw.Compiled)
+	sw.Tag = nil
+	sw.Cases = nil
 }
 
+var interfaceSwitchGen int
+
 // typeHashFieldOf returns an expression to select the type hash field
 // from an interface's descriptor word (whether a *runtime._type or
 // *runtime.itab pointer).
@@ -506,7 +694,7 @@
 	if itab.X.Type().IsEmptyInterface() {
 		// runtime._type's hash field
 		if rtypeHashField == nil {
-			rtypeHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+			rtypeHashField = runtimeField("hash", rttype.Type.OffsetOf("Hash"), types.Types[types.TUINT32])
 		}
 		hashField = rtypeHashField
 	} else {
@@ -524,12 +712,10 @@
 // A typeSwitch walks a type switch.
 type typeSwitch struct {
 	// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
-	facename ir.Node // value being type-switched on
-	hashname ir.Node // type hash of the value being type-switched on
-	okname   ir.Node // boolean used for comma-ok type assertions
-
-	done    ir.Nodes
-	clauses []typeClause
+	srcName  ir.Node // value being type-switched on
+	hashName ir.Node // type hash of the value being type-switched on
+	okName   ir.Node // boolean used for comma-ok type assertions
+	itabName ir.Node // itab value to use for first word of non-empty interface
 }
 
 type typeClause struct {
@@ -537,68 +723,7 @@
 	body ir.Nodes
 }
 
-func (s *typeSwitch) Add(pos src.XPos, n1 ir.Node, caseVar *ir.Name, jmp ir.Node) {
-	typ := n1.Type()
-	var body ir.Nodes
-	if caseVar != nil {
-		l := []ir.Node{
-			ir.NewDecl(pos, ir.ODCL, caseVar),
-			ir.NewAssignStmt(pos, caseVar, nil),
-		}
-		typecheck.Stmts(l)
-		body.Append(l...)
-	} else {
-		caseVar = ir.BlankNode.(*ir.Name)
-	}
-
-	// cv, ok = iface.(type)
-	as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
-	as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
-	switch n1.Op() {
-	case ir.OTYPE:
-		// Static type assertion (non-generic)
-		dot := ir.NewTypeAssertExpr(pos, s.facename, typ) // iface.(type)
-		as.Rhs = []ir.Node{dot}
-	case ir.ODYNAMICTYPE:
-		// Dynamic type assertion (generic)
-		dt := n1.(*ir.DynamicType)
-		dot := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, s.facename, dt.RType)
-		dot.ITab = dt.ITab
-		dot.SetType(typ)
-		dot.SetTypecheck(1)
-		as.Rhs = []ir.Node{dot}
-	default:
-		base.Fatalf("unhandled type case %s", n1.Op())
-	}
-	appendWalkStmt(&body, as)
-
-	// if ok { goto label }
-	nif := ir.NewIfStmt(pos, nil, nil, nil)
-	nif.Cond = s.okname
-	nif.Body = []ir.Node{jmp}
-	body.Append(nif)
-
-	if n1.Op() == ir.OTYPE && !typ.IsInterface() {
-		// Defer static, noninterface cases so they can be binary searched by hash.
-		s.clauses = append(s.clauses, typeClause{
-			hash: types.TypeHash(n1.Type()),
-			body: body,
-		})
-		return
-	}
-
-	s.flush()
-	s.done.Append(body.Take()...)
-}
-
-func (s *typeSwitch) Emit(out *ir.Nodes) {
-	s.flush()
-	out.Append(s.done.Take()...)
-}
-
-func (s *typeSwitch) flush() {
-	cc := s.clauses
-	s.clauses = nil
+func (s *typeSwitch) flush(cc []typeClause, compiled *ir.Nodes) {
 	if len(cc) == 0 {
 		return
 	}
@@ -617,21 +742,100 @@
 	}
 	cc = merged
 
-	// TODO: figure out if we could use a jump table using some low bits of the type hashes.
-	binarySearch(len(cc), &s.done,
+	if s.tryJumpTable(cc, compiled) {
+		return
+	}
+	binarySearch(len(cc), compiled,
 		func(i int) ir.Node {
-			return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(base.Pos, int64(cc[i-1].hash)))
+			return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashName, ir.NewInt(base.Pos, int64(cc[i-1].hash)))
 		},
 		func(i int, nif *ir.IfStmt) {
 			// TODO(mdempsky): Omit hash equality check if
 			// there's only one type.
 			c := cc[i]
-			nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(base.Pos, int64(c.hash)))
+			nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashName, ir.NewInt(base.Pos, int64(c.hash)))
 			nif.Body.Append(c.body.Take()...)
 		},
 	)
 }
 
+// Try to implement the clauses with a jump table. Returns true if successful.
+func (s *typeSwitch) tryJumpTable(cc []typeClause, out *ir.Nodes) bool {
+	const minCases = 5 // have at least minCases cases in the switch
+	if base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable || base.Ctxt.Retpoline {
+		return false
+	}
+	if len(cc) < minCases {
+		return false // not enough cases for it to be worth it
+	}
+	hashes := make([]uint32, len(cc))
+	// b = # of bits to use. Start with the minimum number of
+	// bits possible, but try a few larger sizes if needed.
+	b0 := bits.Len(uint(len(cc) - 1))
+	for b := b0; b < b0+3; b++ {
+	pickI:
+		for i := 0; i <= 32-b; i++ { // starting bit position
+			// Compute the hash we'd get from all the cases,
+			// selecting b bits starting at bit i.
+			hashes = hashes[:0]
+			for _, c := range cc {
+				h := c.hash >> i & (1<<b - 1)
+				hashes = append(hashes, h)
+			}
+			// Order by increasing hash.
+			sort.Slice(hashes, func(j, k int) bool {
+				return hashes[j] < hashes[k]
+			})
+			for j := 1; j < len(hashes); j++ {
+				if hashes[j] == hashes[j-1] {
+					// There is a duplicate hash; try a different b/i pair.
+					continue pickI
+				}
+			}
+
+			// All hashes are distinct. Use these values of b and i.
+			h := s.hashName
+			if i != 0 {
+				h = ir.NewBinaryExpr(base.Pos, ir.ORSH, h, ir.NewInt(base.Pos, int64(i)))
+			}
+			h = ir.NewBinaryExpr(base.Pos, ir.OAND, h, ir.NewInt(base.Pos, int64(1<<b-1)))
+			h = typecheck.Expr(h)
+
+			// Build jump table.
+			jt := ir.NewJumpTableStmt(base.Pos, h)
+			jt.Cases = make([]constant.Value, 1<<b)
+			jt.Targets = make([]*types.Sym, 1<<b)
+			out.Append(jt)
+
+			// Start with all hashes going to the didn't-match target.
+			noMatch := typecheck.AutoLabel(".s")
+			for j := 0; j < 1<<b; j++ {
+				jt.Cases[j] = constant.MakeInt64(int64(j))
+				jt.Targets[j] = noMatch
+			}
+			// This statement is not reachable, but it will make it obvious that we don't
+			// fall through to the first case.
+			out.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, noMatch))
+
+			// Emit each of the actual cases.
+			for _, c := range cc {
+				h := c.hash >> i & (1<<b - 1)
+				label := typecheck.AutoLabel(".s")
+				jt.Targets[h] = label
+				out.Append(ir.NewLabelStmt(base.Pos, label))
+				out.Append(c.body...)
+				// We reach here if the hash matches but the type equality test fails.
+				out.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, noMatch))
+			}
+			// Emit point to go to if type doesn't match any case.
+			out.Append(ir.NewLabelStmt(base.Pos, noMatch))
+			return true
+		}
+	}
+	// Couldn't find a perfect hash. Fall back to binary search.
+	return false
+}
+
 // binarySearch constructs a binary search tree for handling n cases,
 // and appends it to out. It's used for efficiently implementing
 // switch statements.
@@ -736,6 +940,7 @@
 	// Convert expr to a []int8
 	slice := ir.NewConvExpr(base.Pos, ir.OSTR2BYTESTMP, types.NewSlice(types.Types[types.TINT8]), expr)
 	slice.SetTypecheck(1) // legacy typechecker doesn't handle this op
+	slice.MarkNonNil()
 	// Load the byte we're splitting on.
 	load := ir.NewIndexExpr(base.Pos, slice, ir.NewInt(base.Pos, int64(bestIdx)))
 	// Compare with the value we're splitting on.
diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go
index d2ffb22..886b5be 100644
--- a/src/cmd/compile/internal/walk/temp.go
+++ b/src/cmd/compile/internal/walk/temp.go
@@ -25,7 +25,7 @@
 // allocated temporary variable of the given type. Statements to
 // zero-initialize tmp are appended to init.
 func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
-	return initStackTemp(init, typecheck.Temp(typ), nil)
+	return initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, typ), nil)
 }
 
 // stackBufAddr returns the expression &tmp, where tmp is a newly
@@ -35,6 +35,6 @@
 	if elem.HasPointers() {
 		base.FatalfAt(base.Pos, "%v has pointers", elem)
 	}
-	tmp := typecheck.Temp(types.NewArray(elem, len))
+	tmp := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(elem, len))
 	return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
 }
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
index 149e23a..001edcc 100644
--- a/src/cmd/compile/internal/walk/walk.go
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -5,7 +5,6 @@
 package walk
 
 import (
-	"errors"
 	"fmt"
 
 	"cmd/compile/internal/base"
@@ -19,7 +18,6 @@
 
 // The constant is known to runtime.
 const tmpstringbufsize = 32
-const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
 
 func Walk(fn *ir.Func) {
 	ir.CurFunc = fn
@@ -46,10 +44,6 @@
 		ir.DumpList(s, ir.CurFunc.Body)
 	}
 
-	if base.Flag.Cfg.Instrumenting {
-		instrument(fn)
-	}
-
 	// Eagerly compute sizes of all variables for SSA.
 	for _, n := range fn.Dcl {
 		types.CalcSize(n.Type())
@@ -98,8 +92,6 @@
 	return n
 }
 
-var stop = errors.New("stop")
-
 func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
 	if init == nil {
 		base.Fatalf("mkcall with nil init: %v", fn)
@@ -144,42 +136,34 @@
 	if !t.IsChan() {
 		base.Fatalf("chanfn %v", t)
 	}
-	fn := typecheck.LookupRuntime(name)
 	switch n {
-	default:
-		base.Fatalf("chanfn %d", n)
 	case 1:
-		fn = typecheck.SubstArgTypes(fn, t.Elem())
+		return typecheck.LookupRuntime(name, t.Elem())
 	case 2:
-		fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+		return typecheck.LookupRuntime(name, t.Elem(), t.Elem())
 	}
-	return fn
+	base.Fatalf("chanfn %d", n)
+	return nil
 }
 
 func mapfn(name string, t *types.Type, isfat bool) ir.Node {
 	if !t.IsMap() {
 		base.Fatalf("mapfn %v", t)
 	}
-	fn := typecheck.LookupRuntime(name)
 	if mapfast(t) == mapslow || isfat {
-		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
-	} else {
-		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Elem())
+		return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key(), t.Elem())
 	}
-	return fn
+	return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Elem())
 }
 
 func mapfndel(name string, t *types.Type) ir.Node {
 	if !t.IsMap() {
 		base.Fatalf("mapfn %v", t)
 	}
-	fn := typecheck.LookupRuntime(name)
 	if mapfast(t) == mapslow {
-		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
-	} else {
-		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+		return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key())
 	}
-	return fn
+	return typecheck.LookupRuntime(name, t.Key(), t.Elem())
 }
 
 const (
@@ -344,7 +328,7 @@
 			return n.Type().IsString() || n.Type().IsFloat()
 
 		case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
-			ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE,
+			ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OMAKEFACE,
 			ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
 			ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
 			ir.OCONVNOP, ir.ODOT,
diff --git a/src/cmd/covdata/doc.go b/src/cmd/covdata/doc.go
index 1836bb8..ae2e4e4 100644
--- a/src/cmd/covdata/doc.go
+++ b/src/cmd/covdata/doc.go
@@ -18,7 +18,7 @@
 Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
 selecting a specific reporting, merging, or data manipulation operation.
 Descriptions on the various modes (run "go tool cover <mode> -help" for
-specifics on usage of a given mode:
+specifics on usage of a given mode):
 
 1. Report percent of statements covered in each profiled package
 
diff --git a/src/cmd/cover/cfg_test.go b/src/cmd/cover/cfg_test.go
index 6782ec8..701de61 100644
--- a/src/cmd/cover/cfg_test.go
+++ b/src/cmd/cover/cfg_test.go
@@ -5,9 +5,9 @@
 package main_test
 
 import (
+	"cmd/internal/cov/covcmd"
 	"encoding/json"
 	"fmt"
-	"internal/coverage"
 	"internal/testenv"
 	"os"
 	"path/filepath"
@@ -21,14 +21,15 @@
 	}
 }
 
-func writePkgConfig(t *testing.T, outdir, tag, ppath, pname string, gran string) string {
+func writePkgConfig(t *testing.T, outdir, tag, ppath, pname string, gran string, mpath string) string {
 	incfg := filepath.Join(outdir, tag+"incfg.txt")
 	outcfg := filepath.Join(outdir, "outcfg.txt")
-	p := coverage.CoverPkgConfig{
-		PkgPath:     ppath,
-		PkgName:     pname,
-		Granularity: gran,
-		OutConfig:   outcfg,
+	p := covcmd.CoverPkgConfig{
+		PkgPath:      ppath,
+		PkgName:      pname,
+		Granularity:  gran,
+		OutConfig:    outcfg,
+		EmitMetaFile: mpath,
 	}
 	data, err := json.Marshal(p)
 	if err != nil {
@@ -74,10 +75,6 @@
 	}
 }
 
-// Set to true when debugging unit test (to inspect debris, etc).
-// Note that this functionality does not work on windows.
-const debugWorkDir = false
-
 func TestCoverWithCfg(t *testing.T) {
 	testenv.MustHaveGoRun(t)
 
@@ -85,29 +82,7 @@
 
 	// Subdir in testdata that has our input files of interest.
 	tpath := filepath.Join("testdata", "pkgcfg")
-
-	// Helper to collect input paths (go files) for a subdir in 'pkgcfg'
-	pfiles := func(subdir string) []string {
-		de, err := os.ReadDir(filepath.Join(tpath, subdir))
-		if err != nil {
-			t.Fatalf("reading subdir %s: %v", subdir, err)
-		}
-		paths := []string{}
-		for _, e := range de {
-			if !strings.HasSuffix(e.Name(), ".go") || strings.HasSuffix(e.Name(), "_test.go") {
-				continue
-			}
-			paths = append(paths, filepath.Join(tpath, subdir, e.Name()))
-		}
-		return paths
-	}
-
 	dir := tempDir(t)
-	if debugWorkDir {
-		dir = "/tmp/qqq"
-		os.RemoveAll(dir)
-		os.Mkdir(dir, 0777)
-	}
 	instdira := filepath.Join(dir, "insta")
 	if err := os.Mkdir(instdira, 0777); err != nil {
 		t.Fatal(err)
@@ -131,6 +106,7 @@
 	}
 
 	var incfg string
+	apkgfiles := []string{filepath.Join(tpath, "a", "a.go")}
 	for _, scenario := range scenarios {
 		// Instrument package "a", producing a set of instrumented output
 		// files and an 'output config' file to pass on to the compiler.
@@ -139,9 +115,9 @@
 		mode := scenario.mode
 		gran := scenario.gran
 		tag := mode + "_" + gran
-		incfg = writePkgConfig(t, instdira, tag, ppath, pname, gran)
+		incfg = writePkgConfig(t, instdira, tag, ppath, pname, gran, "")
 		ofs, outcfg, _ := runPkgCover(t, instdira, tag, incfg, mode,
-			pfiles("a"), false)
+			apkgfiles, false)
 		t.Logf("outfiles: %+v\n", ofs)
 
 		// Run the compiler on the files to make sure the result is
@@ -161,7 +137,7 @@
 	errExpected := true
 	tag := "errors"
 	_, _, errmsg := runPkgCover(t, instdira, tag, "/not/a/file", mode,
-		pfiles("a"), errExpected)
+		apkgfiles, errExpected)
 	want := "error reading pkgconfig file"
 	if !strings.Contains(errmsg, want) {
 		t.Errorf("'bad config file' test: wanted %s got %s", want, errmsg)
@@ -171,7 +147,7 @@
 	t.Logf("mangling in config")
 	writeFile(t, incfg, []byte("blah=foo\n"))
 	_, _, errmsg = runPkgCover(t, instdira, tag, incfg, mode,
-		pfiles("a"), errExpected)
+		apkgfiles, errExpected)
 	want = "error reading pkgconfig file"
 	if !strings.Contains(errmsg, want) {
 		t.Errorf("'bad config file' test: wanted %s got %s", want, errmsg)
@@ -181,8 +157,115 @@
 	t.Logf("writing empty config")
 	writeFile(t, incfg, []byte("\n"))
 	_, _, errmsg = runPkgCover(t, instdira, tag, incfg, mode,
-		pfiles("a"), errExpected)
+		apkgfiles, errExpected)
 	if !strings.Contains(errmsg, want) {
 		t.Errorf("'bad config file' test: wanted %s got %s", want, errmsg)
 	}
 }
+
+func TestCoverOnPackageWithNoTestFiles(t *testing.T) {
+	testenv.MustHaveGoRun(t)
+
+	// For packages with no test files, the new "go test -cover"
+	// strategy is to run cmd/cover on the package in a special
+	// "EmitMetaFile" mode. When running in this mode, cmd/cover walks
+	// the package doing instrumention, but when finished, instead of
+	// writing out instrumented source files, it directly emits a
+	// meta-data file for the package in question, essentially
+	// simulating the effect that you would get if you added a dummy
+	// "no-op" x_test.go file and then did a build and run of the test.
+
+	t.Run("YesFuncsNoTests", func(t *testing.T) {
+		testCoverNoTestsYesFuncs(t)
+	})
+	t.Run("NoFuncsNoTests", func(t *testing.T) {
+		testCoverNoTestsNoFuncs(t)
+	})
+}
+
+func testCoverNoTestsYesFuncs(t *testing.T) {
+	t.Parallel()
+	dir := tempDir(t)
+
+	// Run the cover command with "emit meta" enabled on a package
+	// with functions but no test files.
+	tpath := filepath.Join("testdata", "pkgcfg")
+	pkg1files := []string{filepath.Join(tpath, "yesFuncsNoTests", "yfnt.go")}
+	ppath := "cfg/yesFuncsNoTests"
+	pname := "yesFuncsNoTests"
+	mode := "count"
+	gran := "perblock"
+	tag := mode + "_" + gran
+	instdir := filepath.Join(dir, "inst")
+	if err := os.Mkdir(instdir, 0777); err != nil {
+		t.Fatal(err)
+	}
+	mdir := filepath.Join(dir, "meta")
+	if err := os.Mkdir(mdir, 0777); err != nil {
+		t.Fatal(err)
+	}
+	mpath := filepath.Join(mdir, "covmeta.xxx")
+	incfg := writePkgConfig(t, instdir, tag, ppath, pname, gran, mpath)
+	_, _, errmsg := runPkgCover(t, instdir, tag, incfg, mode,
+		pkg1files, false)
+	if errmsg != "" {
+		t.Fatalf("runPkgCover err: %q", errmsg)
+	}
+
+	// Check for existence of meta-data file.
+	if inf, err := os.Open(mpath); err != nil {
+		t.Fatalf("meta-data file not created: %v", err)
+	} else {
+		inf.Close()
+	}
+
+	// Make sure it is digestible.
+	cdargs := []string{"tool", "covdata", "percent", "-i", mdir}
+	cmd := testenv.Command(t, testenv.GoToolPath(t), cdargs...)
+	run(cmd, t)
+}
+
+func testCoverNoTestsNoFuncs(t *testing.T) {
+	t.Parallel()
+	dir := tempDir(t)
+
+	// Run the cover command with "emit meta" enabled on a package
+	// with no functions and no test files.
+	tpath := filepath.Join("testdata", "pkgcfg")
+	pkgfiles := []string{filepath.Join(tpath, "noFuncsNoTests", "nfnt.go")}
+	pname := "noFuncsNoTests"
+	mode := "count"
+	gran := "perblock"
+	ppath := "cfg/" + pname
+	tag := mode + "_" + gran
+	instdir := filepath.Join(dir, "inst2")
+	if err := os.Mkdir(instdir, 0777); err != nil {
+		t.Fatal(err)
+	}
+	mdir := filepath.Join(dir, "meta2")
+	if err := os.Mkdir(mdir, 0777); err != nil {
+		t.Fatal(err)
+	}
+	mpath := filepath.Join(mdir, "covmeta.yyy")
+	incfg := writePkgConfig(t, instdir, tag, ppath, pname, gran, mpath)
+	_, _, errmsg := runPkgCover(t, instdir, tag, incfg, mode,
+		pkgfiles, false)
+	if errmsg != "" {
+		t.Fatalf("runPkgCover err: %q", errmsg)
+	}
+
+	// We expect to see an empty meta-data file in this case.
+	if inf, err := os.Open(mpath); err != nil {
+		t.Fatalf("opening meta-data file: error %v", err)
+	} else {
+		defer inf.Close()
+		fi, err := inf.Stat()
+		if err != nil {
+			t.Fatalf("stat meta-data file: %v", err)
+		}
+		if fi.Size() != 0 {
+			t.Fatalf("want zero-sized meta-data file got size %d",
+				fi.Size())
+		}
+	}
+}
diff --git a/src/cmd/cover/cover.go b/src/cmd/cover/cover.go
index a4b837c..ba7694b 100644
--- a/src/cmd/cover/cover.go
+++ b/src/cmd/cover/cover.go
@@ -6,6 +6,7 @@
 
 import (
 	"bytes"
+	"cmd/internal/cov/covcmd"
 	"encoding/json"
 	"flag"
 	"fmt"
@@ -50,7 +51,7 @@
 package name, module path, and related info from "go build",
 and -outfilelist points to a file containing the filenames
 of the instrumented output files (one per input file).
-See https://pkg.go.dev/internal/coverage#CoverPkgConfig for
+See https://pkg.go.dev/cmd/internal/cov/covcmd#CoverPkgConfig for
 more on the package config.
 `
 
@@ -63,30 +64,22 @@
 }
 
 var (
-	mode        = flag.String("mode", "", "coverage mode: set, count, atomic")
-	varVar      = flag.String("var", "GoCover", "name of coverage variable to generate")
-	output      = flag.String("o", "", "file for output")
-	outfilelist = flag.String("outfilelist", "", "file containing list of output files (one per line) if -pkgcfg is in use")
-	htmlOut     = flag.String("html", "", "generate HTML representation of coverage profile")
-	funcOut     = flag.String("func", "", "output coverage profile information for each function")
-	pkgcfg      = flag.String("pkgcfg", "", "enable full-package instrumentation mode using params from specified config file")
+	mode             = flag.String("mode", "", "coverage mode: set, count, atomic")
+	varVar           = flag.String("var", "GoCover", "name of coverage variable to generate")
+	output           = flag.String("o", "", "file for output")
+	outfilelist      = flag.String("outfilelist", "", "file containing list of output files (one per line) if -pkgcfg is in use")
+	htmlOut          = flag.String("html", "", "generate HTML representation of coverage profile")
+	funcOut          = flag.String("func", "", "output coverage profile information for each function")
+	pkgcfg           = flag.String("pkgcfg", "", "enable full-package instrumentation mode using params from specified config file")
+	pkgconfig        covcmd.CoverPkgConfig
+	outputfiles      []string // list of *.cover.go instrumented outputs to write, one per input (set when -pkgcfg is in use)
+	profile          string   // The profile to read; the value of -html or -func
+	counterStmt      func(*File, string) string
+	covervarsoutfile string // an additional Go source file into which we'll write definitions of coverage counter variables + meta data variables (set when -pkgcfg is in use).
+	cmode            coverage.CounterMode
+	cgran            coverage.CounterGranularity
 )
 
-var pkgconfig coverage.CoverPkgConfig
-
-// outputfiles is the list of *.cover.go instrumented outputs to write,
-// one per input (set when -pkgcfg is in use)
-var outputfiles []string
-
-// covervarsoutfile is an additional Go source file into which we'll
-// write definitions of coverage counter variables + meta data variables
-// (set when -pkgcfg is in use).
-var covervarsoutfile string
-
-var profile string // The profile to read; the value of -html or -func
-
-var counterStmt func(*File, string) string
-
 const (
 	atomicPackagePath = "sync/atomic"
 	atomicPackageName = "_cover_atomic_"
@@ -151,12 +144,19 @@
 		switch *mode {
 		case "set":
 			counterStmt = setCounterStmt
+			cmode = coverage.CtrModeSet
 		case "count":
 			counterStmt = incCounterStmt
+			cmode = coverage.CtrModeCount
 		case "atomic":
 			counterStmt = atomicCounterStmt
-		case "regonly", "testmain":
+			cmode = coverage.CtrModeAtomic
+		case "regonly":
 			counterStmt = nil
+			cmode = coverage.CtrModeRegOnly
+		case "testmain":
+			counterStmt = nil
+			cmode = coverage.CtrModeTestMain
 		default:
 			return fmt.Errorf("unknown -mode %v", *mode)
 		}
@@ -214,7 +214,12 @@
 	if err := json.Unmarshal(data, &pkgconfig); err != nil {
 		return fmt.Errorf("error reading pkgconfig file %q: %v", path, err)
 	}
-	if pkgconfig.Granularity != "perblock" && pkgconfig.Granularity != "perfunc" {
+	switch pkgconfig.Granularity {
+	case "perblock":
+		cgran = coverage.CtrGranularityPerBlock
+	case "perfunc":
+		cgran = coverage.CtrGranularityPerFunc
+	default:
 		return fmt.Errorf(`%s: pkgconfig requires perblock/perfunc value`, path)
 	}
 	return nil
@@ -1087,6 +1092,14 @@
 		return
 	}
 
+	// If the "EmitMetaFile" path has been set, invoke a helper
+	// that will write out a pre-cooked meta-data file for this package
+	// to the specified location, in effect simulating the execution
+	// of a test binary that doesn't do any testing to speak of.
+	if pkgconfig.EmitMetaFile != "" {
+		p.emitMetaFile(pkgconfig.EmitMetaFile)
+	}
+
 	// Something went wrong if regonly/testmain mode is in effect and
 	// we have instrumented functions.
 	if counterStmt == nil && len(p.counterLengths) != 0 {
@@ -1122,7 +1135,7 @@
 	}
 	fmt.Fprintf(w, "}\n")
 
-	fixcfg := coverage.CoverFixupConfig{
+	fixcfg := covcmd.CoverFixupConfig{
 		Strategy:           "normal",
 		MetaVar:            mkMetaVar(),
 		MetaLen:            len(payload),
@@ -1157,3 +1170,40 @@
 	}
 	return atomicPackageName + "."
 }
+
+func (p *Package) emitMetaFile(outpath string) {
+	// Open output file.
+	of, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+	if err != nil {
+		log.Fatalf("opening covmeta %s: %v", outpath, err)
+	}
+
+	if len(p.counterLengths) == 0 {
+		// This corresponds to the case where we have no functions
+		// in the package to instrument. Leave the file empty file if
+		// this happens.
+		if err = of.Close(); err != nil {
+			log.Fatalf("closing meta-data file: %v", err)
+		}
+		return
+	}
+
+	// Encode meta-data.
+	var sws slicewriter.WriteSeeker
+	digest, err := p.mdb.Emit(&sws)
+	if err != nil {
+		log.Fatalf("encoding meta-data: %v", err)
+	}
+	payload := sws.BytesWritten()
+	blobs := [][]byte{payload}
+
+	// Write meta-data file directly.
+	mfw := encodemeta.NewCoverageMetaFileWriter(outpath, of)
+	err = mfw.Write(digest, blobs, cmode, cgran)
+	if err != nil {
+		log.Fatalf("writing meta-data file: %v", err)
+	}
+	if err = of.Close(); err != nil {
+		log.Fatalf("closing meta-data file: %v", err)
+	}
+}
diff --git a/src/cmd/cover/doc.go b/src/cmd/cover/doc.go
index 82580cd..f5b9b1c 100644
--- a/src/cmd/cover/doc.go
+++ b/src/cmd/cover/doc.go
@@ -11,7 +11,7 @@
 is referred to "instrumentation"). Cover can operate in "legacy mode"
 on a single Go source file at a time, or when invoked by the Go tool
 it will process all the source files in a single package at a time
-(package-scope instrumentation is enabled via "-pkgcfg" option,
+(package-scope instrumentation is enabled via "-pkgcfg" option).
 
 When generated instrumented code, the cover tool computes approximate
 basic block information by studying the source. It is thus more
diff --git a/src/cmd/cover/testdata/pkgcfg/b/b.go b/src/cmd/cover/testdata/pkgcfg/b/b.go
deleted file mode 100644
index 9e330ee..0000000
--- a/src/cmd/cover/testdata/pkgcfg/b/b.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package b
-
-func B(x int) int {
-	if x == 0 {
-		return 22
-	} else if x == 1 {
-		return 33
-	}
-	return 44
-}
diff --git a/src/cmd/cover/testdata/pkgcfg/b/b_test.go b/src/cmd/cover/testdata/pkgcfg/b/b_test.go
deleted file mode 100644
index 7bdb73b..0000000
--- a/src/cmd/cover/testdata/pkgcfg/b/b_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package b
-
-import "testing"
-
-func TestB(t *testing.T) {
-	B(0)
-	B(1)
-	B(2)
-}
diff --git a/src/cmd/cover/testdata/pkgcfg/main/main.go b/src/cmd/cover/testdata/pkgcfg/main/main.go
deleted file mode 100644
index a908931..0000000
--- a/src/cmd/cover/testdata/pkgcfg/main/main.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package main
-
-import (
-	"cfg/a"
-	"cfg/b"
-)
-
-func main() {
-	a.A(2)
-	a.A(1)
-	a.A(0)
-	b.B(1)
-	b.B(0)
-	println("done")
-}
diff --git a/src/cmd/cover/testdata/pkgcfg/noFuncsNoTests/nfnt.go b/src/cmd/cover/testdata/pkgcfg/noFuncsNoTests/nfnt.go
new file mode 100644
index 0000000..52df23c
--- /dev/null
+++ b/src/cmd/cover/testdata/pkgcfg/noFuncsNoTests/nfnt.go
@@ -0,0 +1,8 @@
+package noFuncsNoTests
+
+const foo = 1
+
+var G struct {
+	x int
+	y bool
+}
diff --git a/src/cmd/cover/testdata/pkgcfg/yesFuncsNoTests/yfnt.go b/src/cmd/cover/testdata/pkgcfg/yesFuncsNoTests/yfnt.go
new file mode 100644
index 0000000..4e536b0
--- /dev/null
+++ b/src/cmd/cover/testdata/pkgcfg/yesFuncsNoTests/yfnt.go
@@ -0,0 +1,13 @@
+package yesFuncsNoTests
+
+func F1() {
+	println("hi")
+}
+
+func F2(x int) int {
+	if x < 0 {
+		return 9
+	} else {
+		return 10
+	}
+}
diff --git a/src/cmd/dist/README b/src/cmd/dist/README
index 673c0f3..0f99284 100644
--- a/src/cmd/dist/README
+++ b/src/cmd/dist/README
@@ -4,24 +4,18 @@
 in Go, making bootstrapping a little more involved than in the past.
 The approach is to build the current release of Go with an earlier one.
 
-The process to install Go 1.x, for x ≥ 20, is:
+The process to install Go 1.x, for x ≥ 22, is:
 
-1. Build cmd/dist with Go 1.17.13.
-2. Using dist, build Go 1.x compiler toolchain with Go 1.17.13.
+1. Build cmd/dist with Go 1.20.6.
+2. Using dist, build Go 1.x compiler toolchain with Go 1.20.6.
 3. Using dist, rebuild Go 1.x compiler toolchain with itself.
 4. Using dist, build Go 1.x cmd/go (as go_bootstrap) with Go 1.x compiler toolchain.
 5. Using go_bootstrap, build the remaining Go 1.x standard library and commands.
 
-NOTE: During the transition from the old C-based toolchain to the Go-based one,
-step 2 also builds the parts of the toolchain written in C, and step 3 does not
-recompile those.
+Because of backward compatibility, although the steps above say Go 1.20.6,
+in practice any release ≥ Go 1.20.6 but < Go 1.x will work as the bootstrap base.
+Releases ≥ Go 1.x are very likely to work as well.
 
-Because of backward compatibility, although the steps above say Go 1.17.13,
-in practice any release ≥ Go 1.17.13 but < Go 1.x will work as the bootstrap base.
+See https://go.dev/s/go15bootstrap for more details about the original bootstrap
+and https://go.dev/issue/54265 for details about later bootstrap version bumps.
 
-See golang.org/s/go15bootstrap for more details.
-
-Compared to Go 1.4 and earlier, dist will also take over much of what used to
-be done by make.bash/make.bat/make.rc and all of what used to be done by
-run.bash/run.bat/run.rc, because it is nicer to implement that logic in Go
-than in three different scripting languages simultaneously.
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 8973a87..32e59b4 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -576,9 +576,7 @@
 func mustLinkExternal(goos, goarch string, cgoEnabled bool) bool {
 	if cgoEnabled {
 		switch goarch {
-		case "loong64",
-			"mips", "mipsle", "mips64", "mips64le",
-			"riscv64":
+		case "loong64", "mips", "mipsle", "mips64", "mips64le":
 			// Internally linking cgo is incomplete on some architectures.
 			// https://golang.org/issue/14449
 			return true
@@ -589,7 +587,9 @@
 			}
 		case "ppc64":
 			// Big Endian PPC64 cgo internal linking is not implemented for aix or linux.
-			return true
+			if goos == "aix" || goos == "linux" {
+				return true
+			}
 		}
 
 		switch goos {
@@ -892,9 +892,6 @@
 		}
 	}
 	goasmh := pathf("%s/go_asm.h", workdir)
-	if IsRuntimePackagePath(pkg) {
-		asmArgs = append(asmArgs, "-compiling-runtime")
-	}
 
 	// Collect symabis from assembly code.
 	var symabis string
@@ -949,9 +946,6 @@
 	if gogcflags != "" {
 		compile = append(compile, strings.Fields(gogcflags)...)
 	}
-	if pkg == "runtime" {
-		compile = append(compile, "-+")
-	}
 	if len(sfiles) > 0 {
 		compile = append(compile, "-asmhdr", goasmh)
 	}
@@ -1577,6 +1571,9 @@
 		ok[f] = true
 	}
 	for _, f := range binFiles {
+		if gohostos == "darwin" && filepath.Base(f) == ".DS_Store" {
+			continue // unfortunate but not unexpected
+		}
 		elem := strings.TrimSuffix(filepath.Base(f), ".exe")
 		if !ok[f] && elem != "go" && elem != "gofmt" && elem != goos+"_"+goarch {
 			fatalf("unexpected new file in $GOROOT/bin: %s", elem)
@@ -1732,6 +1729,7 @@
 	"openbsd/arm64":   true,
 	"openbsd/mips64":  true,
 	"openbsd/ppc64":   false,
+	"openbsd/riscv64": false,
 	"plan9/386":       false,
 	"plan9/amd64":     false,
 	"plan9/arm":       false,
@@ -1747,9 +1745,9 @@
 // get filtered out of cgoEnabled for 'dist list'.
 // See go.dev/issue/56679.
 var broken = map[string]bool{
-	"linux/sparc64":  true, // An incomplete port. See CL 132155.
-	"openbsd/ppc64":  true, // An incomplete port: go.dev/issue/56001.
-	"openbsd/mips64": true, // Broken: go.dev/issue/58110.
+	"linux/sparc64":   true, // An incomplete port. See CL 132155.
+	"openbsd/mips64":  true, // Broken: go.dev/issue/58110.
+	"openbsd/riscv64": true, // An incomplete port: go.dev/issue/55999.
 }
 
 // List of platforms which are first class ports. See go.dev/issue/38874.
@@ -1944,29 +1942,6 @@
 	}
 }
 
-// IsRuntimePackagePath examines 'pkgpath' and returns TRUE if it
-// belongs to the collection of "runtime-related" packages, including
-// "runtime" itself, "reflect", "syscall", and the
-// "runtime/internal/*" packages.
-//
-// Keep in sync with cmd/internal/objabi/path.go:IsRuntimePackagePath.
-func IsRuntimePackagePath(pkgpath string) bool {
-	rval := false
-	switch pkgpath {
-	case "runtime":
-		rval = true
-	case "reflect":
-		rval = true
-	case "syscall":
-		rval = true
-	case "internal/bytealg":
-		rval = true
-	default:
-		rval = strings.HasPrefix(pkgpath, "runtime/internal")
-	}
-	return rval
-}
-
 func setNoOpt() {
 	for _, gcflag := range strings.Split(gogcflags, " ") {
 		if gcflag == "-N" || gcflag == "-l" {
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index a528d7a..3232896 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -31,6 +31,7 @@
 // include all packages within subdirectories as well.
 // These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big.
 var bootstrapDirs = []string{
+	"cmp",
 	"cmd/asm",
 	"cmd/asm/internal/...",
 	"cmd/cgo",
@@ -61,14 +62,17 @@
 	"debug/pe",
 	"go/build/constraint",
 	"go/constant",
+	"go/version",
 	"internal/abi",
 	"internal/coverage",
+	"cmd/internal/cov/covcmd",
 	"internal/bisect",
 	"internal/buildcfg",
 	"internal/goarch",
 	"internal/godebugs",
 	"internal/goexperiment",
 	"internal/goroot",
+	"internal/gover",
 	"internal/goversion",
 	// internal/lazyregexp is provided by Go 1.17, which permits it to
 	// be imported by other packages in this list, but is not provided
@@ -85,10 +89,8 @@
 	"internal/unsafeheader",
 	"internal/xcoff",
 	"internal/zstd",
-	"math/big",
 	"math/bits",
 	"sort",
-	"strconv",
 }
 
 // File prefixes that are ignored by go/build anyway, and cause
diff --git a/src/cmd/dist/exec.go b/src/cmd/dist/exec.go
new file mode 100644
index 0000000..602b812
--- /dev/null
+++ b/src/cmd/dist/exec.go
@@ -0,0 +1,40 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"os/exec"
+	"strings"
+)
+
+// setDir sets cmd.Dir to dir, and also adds PWD=dir to cmd's environment.
+func setDir(cmd *exec.Cmd, dir string) {
+	cmd.Dir = dir
+	if cmd.Env != nil {
+		// os/exec won't set PWD automatically.
+		setEnv(cmd, "PWD", dir)
+	}
+}
+
+// setEnv sets cmd.Env so that key = value.
+func setEnv(cmd *exec.Cmd, key, value string) {
+	cmd.Env = append(cmd.Environ(), key+"="+value)
+}
+
+// unsetEnv sets cmd.Env so that key is not present in the environment.
+func unsetEnv(cmd *exec.Cmd, key string) {
+	cmd.Env = cmd.Environ()
+
+	prefix := key + "="
+	newEnv := []string{}
+	for _, entry := range cmd.Env {
+		if strings.HasPrefix(entry, prefix) {
+			continue
+		}
+		newEnv = append(newEnv, entry)
+		// key may appear multiple times, so keep going.
+	}
+	cmd.Env = newEnv
+}
diff --git a/src/cmd/dist/exec_118.go b/src/cmd/dist/exec_118.go
deleted file mode 100644
index a1c3c64..0000000
--- a/src/cmd/dist/exec_118.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.19
-// +build !go1.19
-
-package main
-
-import (
-	"os"
-	"os/exec"
-	"strings"
-)
-
-// setDir sets cmd.Dir to dir, and also adds PWD=dir to cmd's environment.
-func setDir(cmd *exec.Cmd, dir string) {
-	cmd.Dir = dir
-	setEnv(cmd, "PWD", dir)
-}
-
-// setEnv sets cmd.Env so that key = value.
-func setEnv(cmd *exec.Cmd, key, value string) {
-	kv := key + "=" + value
-	if cmd.Env == nil {
-		cmd.Env = os.Environ()
-	}
-	cmd.Env = append(cmd.Env, kv)
-}
-
-// unsetEnv sets cmd.Env so that key is not present in the environment.
-func unsetEnv(cmd *exec.Cmd, key string) {
-	if cmd.Env == nil {
-		cmd.Env = os.Environ()
-	}
-
-	prefix := key + "="
-	newEnv := []string{}
-	for _, entry := range cmd.Env {
-		if strings.HasPrefix(entry, prefix) {
-			continue
-		}
-		newEnv = append(newEnv, entry)
-		// key may appear multiple times, so keep going.
-	}
-	cmd.Env = newEnv
-}
diff --git a/src/cmd/dist/exec_119.go b/src/cmd/dist/exec_119.go
deleted file mode 100644
index 0b4baa0..0000000
--- a/src/cmd/dist/exec_119.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19
-// +build go1.19
-
-package main
-
-import (
-	"os/exec"
-	"strings"
-)
-
-// setDir sets cmd.Dir to dir, and also adds PWD=dir to cmd's environment.
-func setDir(cmd *exec.Cmd, dir string) {
-	cmd.Dir = dir
-	if cmd.Env != nil {
-		// os/exec won't set PWD automatically.
-		setEnv(cmd, "PWD", dir)
-	}
-}
-
-// setEnv sets cmd.Env so that key = value.
-func setEnv(cmd *exec.Cmd, key, value string) {
-	cmd.Env = append(cmd.Environ(), key+"="+value)
-}
-
-// unsetEnv sets cmd.Env so that key is not present in the environment.
-func unsetEnv(cmd *exec.Cmd, key string) {
-	cmd.Env = cmd.Environ()
-
-	prefix := key + "="
-	newEnv := []string{}
-	for _, entry := range cmd.Env {
-		if strings.HasPrefix(entry, prefix) {
-			continue
-		}
-		newEnv = append(newEnv, entry)
-		// key may appear multiple times, so keep going.
-	}
-	cmd.Env = newEnv
-}
diff --git a/src/cmd/dist/notgo117.go b/src/cmd/dist/notgo117.go
deleted file mode 100644
index 8d551df..0000000
--- a/src/cmd/dist/notgo117.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Go 1.20 and later requires Go 1.17 as the bootstrap toolchain.
-// If cmd/dist is built using an earlier Go version, this file will be
-// included in the build and cause an error like:
-//
-// % GOROOT_BOOTSTRAP=$HOME/sdk/go1.16 ./make.bash
-// Building Go cmd/dist using /Users/rsc/sdk/go1.16. (go1.16 darwin/amd64)
-// found packages main (build.go) and building_Go_requires_Go_1_17_13_or_later (notgo117.go) in /Users/rsc/go/src/cmd/dist
-// %
-//
-// which is the best we can do under the circumstances.
-//
-// See go.dev/issue/44505 for more background on
-// why Go moved on from Go 1.4 for bootstrap.
-
-//go:build !go1.17
-// +build !go1.17
-
-package building_Go_requires_Go_1_17_13_or_later
diff --git a/src/cmd/dist/notgo120.go b/src/cmd/dist/notgo120.go
new file mode 100644
index 0000000..0b89ab3
--- /dev/null
+++ b/src/cmd/dist/notgo120.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go 1.22 and later requires Go 1.20 as the bootstrap toolchain.
+// If cmd/dist is built using an earlier Go version, this file will be
+// included in the build and cause an error like:
+//
+// % GOROOT_BOOTSTRAP=$HOME/sdk/go1.16 ./make.bash
+// Building Go cmd/dist using /Users/rsc/sdk/go1.16. (go1.16 darwin/amd64)
+// found packages main (build.go) and building_Go_requires_Go_1_20_6_or_later (notgo120.go) in /Users/rsc/go/src/cmd/dist
+// %
+//
+// which is the best we can do under the circumstances.
+//
+// See go.dev/issue/44505 for more background on
+// why Go moved on from Go 1.4 for bootstrap.
+
+//go:build !go1.20
+
+package building_Go_requires_Go_1_20_6_or_later
diff --git a/src/cmd/dist/sys_default.go b/src/cmd/dist/sys_default.go
index e87c84c..ae10227 100644
--- a/src/cmd/dist/sys_default.go
+++ b/src/cmd/dist/sys_default.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !windows
-// +build !windows
 
 package main
 
diff --git a/src/cmd/dist/sys_windows.go b/src/cmd/dist/sys_windows.go
index 265f729..37dffb8 100644
--- a/src/cmd/dist/sys_windows.go
+++ b/src/cmd/dist/sys_windows.go
@@ -14,7 +14,7 @@
 	procGetSystemInfo = modkernel32.NewProc("GetSystemInfo")
 )
 
-// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx
+// see https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info
 type systeminfo struct {
 	wProcessorArchitecture      uint16
 	wReserved                   uint16
@@ -29,7 +29,7 @@
 	wProcessorRevision          uint16
 }
 
-// See https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info
+// See https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info
 const (
 	PROCESSOR_ARCHITECTURE_AMD64 = 9
 	PROCESSOR_ARCHITECTURE_INTEL = 0
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index 36a20e8..5e62bbf 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -71,7 +71,6 @@
 
 	short      bool
 	cgoEnabled bool
-	partial    bool
 	json       bool
 
 	tests        []distTest // use addTest to extend
@@ -81,34 +80,21 @@
 	worklist []*work
 }
 
+// work tracks command execution for a test.
 type work struct {
-	dt    *distTest
-	cmd   *exec.Cmd // Must write stdout/stderr to work.out
-	flush func()    // If non-nil, called after cmd.Run
-	start chan bool
-	out   bytes.Buffer
-	err   error
-	end   chan bool
+	dt    *distTest     // unique test name, etc.
+	cmd   *exec.Cmd     // must write stdout/stderr to out
+	flush func()        // if non-nil, called after cmd.Run
+	start chan bool     // a true means to start, a false means to skip
+	out   bytes.Buffer  // combined stdout/stderr from cmd
+	err   error         // work result
+	end   chan struct{} // a value means cmd ended (or was skipped)
 }
 
 // printSkip prints a skip message for all of work.
 func (w *work) printSkip(t *tester, msg string) {
 	if t.json {
-		type event struct {
-			Time    time.Time
-			Action  string
-			Package string
-			Output  string `json:",omitempty"`
-		}
-		enc := json.NewEncoder(&w.out)
-		ev := event{Time: time.Now(), Package: w.dt.name, Action: "start"}
-		enc.Encode(ev)
-		ev.Action = "output"
-		ev.Output = msg
-		enc.Encode(ev)
-		ev.Action = "skip"
-		ev.Output = ""
-		enc.Encode(ev)
+		synthesizeSkipEvent(json.NewEncoder(&w.out), w.dt.name, msg)
 		return
 	}
 	fmt.Fprintln(&w.out, msg)
@@ -248,11 +234,13 @@
 		}
 	}
 
+	var anyIncluded, someExcluded bool
 	for _, dt := range t.tests {
 		if !t.shouldRunTest(dt.name) {
-			t.partial = true
+			someExcluded = true
 			continue
 		}
+		anyIncluded = true
 		dt := dt // dt used in background after this iteration
 		if err := dt.fn(&dt); err != nil {
 			t.runPending(&dt) // in case that hasn't been done yet
@@ -270,7 +258,11 @@
 	if !t.json {
 		if t.failed {
 			fmt.Println("\nFAILED")
-		} else if t.partial {
+		} else if !anyIncluded {
+			fmt.Println()
+			errprintf("go tool dist: warning: %q matched no tests; use the -list flag to list available tests\n", t.runRxStr)
+			fmt.Println("NO TESTS TO RUN")
+		} else if someExcluded {
 			fmt.Println("\nALL TESTS PASSED (some were excluded)")
 		} else {
 			fmt.Println("\nALL TESTS PASSED")
@@ -525,6 +517,18 @@
 	return pkgs
 }
 
+// printSkip prints a skip message for all of goTest.
+func (opts *goTest) printSkip(t *tester, msg string) {
+	if t.json {
+		enc := json.NewEncoder(os.Stdout)
+		for _, pkg := range opts.packages() {
+			synthesizeSkipEvent(enc, pkg, msg)
+		}
+		return
+	}
+	fmt.Println(msg)
+}
+
 // ranGoTest and stdMatches are state closed over by the stdlib
 // testing func in registerStdTest below. The tests are run
 // sequentially, so there's no need for locks.
@@ -624,9 +628,22 @@
 			}
 		}
 	} else {
-		// Use a format string to only list packages and commands that have tests.
-		const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}"
-		cmd := exec.Command(gorootBinGo, "list", "-f", format)
+		// Use 'go list std cmd' to get a list of all Go packages
+		// that running 'go test std cmd' could find problems in.
+		// (In race test mode, also set -tags=race.)
+		//
+		// In long test mode, this includes vendored packages and other
+		// packages without tests so that 'dist test' finds if any of
+		// them don't build, have a problem reported by high-confidence
+		// vet checks that come with 'go test', and anything else it
+		// may check in the future. See go.dev/issue/60463.
+		cmd := exec.Command(gorootBinGo, "list")
+		if t.short {
+			// In short test mode, use a format string to only
+			// list packages and commands that have tests.
+			const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}"
+			cmd.Args = append(cmd.Args, "-f", format)
+		}
 		if t.race {
 			cmd.Args = append(cmd.Args, "-tags=race")
 		}
@@ -702,8 +719,33 @@
 			})
 	}
 
-	// morestack tests. We only run these on in long-test mode
-	// (with GO_TEST_SHORT=false) because the runtime test is
+	// GOEXPERIMENT=rangefunc tests
+	if !t.compileOnly {
+		t.registerTest("GOEXPERIMENT=rangefunc go test iter",
+			&goTest{
+				variant: "iter",
+				short:   t.short,
+				env:     []string{"GOEXPERIMENT=rangefunc"},
+				pkg:     "iter",
+			})
+	}
+
+	// GODEBUG=gcstoptheworld=2 tests. We only run these in long-test
+	// mode (with GO_TEST_SHORT=0) because this is just testing a
+	// non-critical debug setting.
+	if !t.compileOnly && !t.short {
+		t.registerTest("GODEBUG=gcstoptheworld=2 archive/zip",
+			&goTest{
+				variant: "runtime:gcstoptheworld2",
+				timeout: 300 * time.Second,
+				short:   true,
+				env:     []string{"GODEBUG=gcstoptheworld=2"},
+				pkg:     "archive/zip",
+			})
+	}
+
+	// morestack tests. We only run these in long-test mode
+	// (with GO_TEST_SHORT=0) because the runtime test is
 	// already quite long and mayMoreStackMove makes it about
 	// twice as slow.
 	if !t.compileOnly && !t.short {
@@ -928,7 +970,7 @@
 			if skipFunc != nil {
 				msg, skip := skipFunc(dt)
 				if skip {
-					t.printSkip(test, msg)
+					test.printSkip(t, msg)
 					return nil
 				}
 			}
@@ -956,30 +998,6 @@
 	}
 }
 
-func (t *tester) printSkip(test *goTest, msg string) {
-	if !t.json {
-		fmt.Println(msg)
-		return
-	}
-	type event struct {
-		Time    time.Time
-		Action  string
-		Package string
-		Output  string `json:",omitempty"`
-	}
-	out := json.NewEncoder(os.Stdout)
-	for _, pkg := range test.packages() {
-		ev := event{Time: time.Now(), Package: testName(pkg, test.variant), Action: "start"}
-		out.Encode(ev)
-		ev.Action = "output"
-		ev.Output = msg
-		out.Encode(ev)
-		ev.Action = "skip"
-		ev.Output = ""
-		out.Encode(ev)
-	}
-}
-
 // dirCmd constructs a Cmd intended to be run in the foreground.
 // The command will be run in dir, and Stdout and Stderr will go to os.Stdout
 // and os.Stderr.
@@ -1241,8 +1259,8 @@
 	}
 }
 
-// run pending test commands, in parallel, emitting headers as appropriate.
-// When finished, emit header for nextTest, which is going to run after the
+// runPending runs pending test commands, in parallel, emitting headers as appropriate.
+// When finished, it emits header for nextTest, which is going to run after the
 // pending commands are done (and runPending returns).
 // A test should call runPending if it wants to make sure that it is not
 // running in parallel with earlier tests, or if it has some other reason
@@ -1252,7 +1270,7 @@
 	t.worklist = nil
 	for _, w := range worklist {
 		w.start = make(chan bool)
-		w.end = make(chan bool)
+		w.end = make(chan struct{})
 		// w.cmd must be set up to write to w.out. We can't check that, but we
 		// can check for easy mistakes.
 		if w.cmd.Stdout == nil || w.cmd.Stdout == os.Stdout || w.cmd.Stderr == nil || w.cmd.Stderr == os.Stderr {
@@ -1278,7 +1296,7 @@
 				}
 			}
 			timelog("end", w.dt.name)
-			w.end <- true
+			w.end <- struct{}{}
 		}(w)
 	}
 
@@ -1526,7 +1544,7 @@
 // internal/platform.RaceDetectorSupported, which can't be used here
 // because cmd/dist can not import internal packages during bootstrap.
 // The race detector only supports 48-bit VMA on arm64. But we don't have
-// a good solution to check VMA size(See https://golang.org/issue/29948)
+// a good solution to check VMA size (see https://go.dev/issue/29948).
 // raceDetectorSupported will always return true for arm64. But race
 // detector tests may abort on non 48-bit VMA configuration, the tests
 // will be marked as "skipped" in this case.
@@ -1620,7 +1638,7 @@
 
 	case "plugin":
 		switch platform {
-		case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le",
+		case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/s390x", "linux/ppc64le",
 			"android/amd64", "android/386",
 			"darwin/amd64", "darwin/arm64",
 			"freebsd/amd64":
@@ -1635,10 +1653,10 @@
 
 // isUnsupportedVMASize reports whether the failure is caused by an unsupported
 // VMA for the race detector (for example, running the race detector on an
-// arm64 machine configured with 39-bit VMA)
+// arm64 machine configured with 39-bit VMA).
 func isUnsupportedVMASize(w *work) bool {
 	unsupportedVMA := []byte("unsupported VMA range")
-	return w.dt.name == "race" && bytes.Contains(w.out.Bytes(), unsupportedVMA)
+	return strings.Contains(w.dt.name, ":race") && bytes.Contains(w.out.Bytes(), unsupportedVMA)
 }
 
 // isEnvSet reports whether the environment variable evar is
diff --git a/src/cmd/dist/testjson.go b/src/cmd/dist/testjson.go
index 7408f95..6204593 100644
--- a/src/cmd/dist/testjson.go
+++ b/src/cmd/dist/testjson.go
@@ -11,6 +11,7 @@
 	"fmt"
 	"io"
 	"sync"
+	"time"
 )
 
 // lockedWriter serializes Write calls to an underlying Writer.
@@ -184,3 +185,20 @@
 	err := marshal1(v)
 	return buf.Bytes(), err
 }
+
+func synthesizeSkipEvent(enc *json.Encoder, pkg, msg string) {
+	type event struct {
+		Time    time.Time
+		Action  string
+		Package string
+		Output  string `json:",omitempty"`
+	}
+	ev := event{Time: time.Now(), Package: pkg, Action: "start"}
+	enc.Encode(ev)
+	ev.Action = "output"
+	ev.Output = msg
+	enc.Encode(ev)
+	ev.Action = "skip"
+	ev.Output = ""
+	enc.Encode(ev)
+}
diff --git a/src/cmd/dist/util_gc.go b/src/cmd/dist/util_gc.go
index 875784d..6efdf23 100644
--- a/src/cmd/dist/util_gc.go
+++ b/src/cmd/dist/util_gc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 package main
 
diff --git a/src/cmd/dist/util_gccgo.go b/src/cmd/dist/util_gccgo.go
index 3255b80..2f7af7e 100644
--- a/src/cmd/dist/util_gccgo.go
+++ b/src/cmd/dist/util_gccgo.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gccgo
-// +build gccgo
 
 package main
 
diff --git a/src/cmd/dist/vfp_arm.s b/src/cmd/dist/vfp_arm.s
index 525ee9b..37fb406 100644
--- a/src/cmd/dist/vfp_arm.s
+++ b/src/cmd/dist/vfp_arm.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/dist/vfp_default.s b/src/cmd/dist/vfp_default.s
index 0c1e16b0..a766eda 100644
--- a/src/cmd/dist/vfp_default.s
+++ b/src/cmd/dist/vfp_default.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !arm
-// +build gc,!arm
 
 #include "textflag.h"
 
diff --git a/src/cmd/distpack/archive.go b/src/cmd/distpack/archive.go
index 24ed077..e52dae1 100644
--- a/src/cmd/distpack/archive.go
+++ b/src/cmd/distpack/archive.go
@@ -91,13 +91,29 @@
 	})
 }
 
+func nameLess(x, y string) bool {
+	for i := 0; i < len(x) && i < len(y); i++ {
+		if x[i] != y[i] {
+			// foo/bar/baz before foo/bar.go, because foo/bar is before foo/bar.go
+			if x[i] == '/' {
+				return true
+			}
+			if y[i] == '/' {
+				return false
+			}
+			return x[i] < y[i]
+		}
+	}
+	return len(x) < len(y)
+}
+
 // Sort sorts the files in the archive.
 // It is only necessary to call Sort after calling Add or RenameGoMod.
-// ArchiveDir returns a sorted archive, and the other methods
+// NewArchive returns a sorted archive, and the other methods
 // preserve the sorting of the archive.
 func (a *Archive) Sort() {
 	sort.Slice(a.Files, func(i, j int) bool {
-		return a.Files[i].Name < a.Files[j].Name
+		return nameLess(a.Files[i].Name, a.Files[j].Name)
 	})
 }
 
diff --git a/src/cmd/distpack/test.go b/src/cmd/distpack/test.go
index 4544d72..22b54b5 100644
--- a/src/cmd/distpack/test.go
+++ b/src/cmd/distpack/test.go
@@ -26,7 +26,7 @@
 	{name: "go/VERSION"},
 	{name: "go/src/cmd/go/main.go"},
 	{name: "go/src/bytes/bytes.go"},
-	{name: "go/.DS_Store", exclude: true},
+	{name: "**/.DS_Store", exclude: true},
 	{name: "go/.git", exclude: true},
 	{name: "go/.gitattributes", exclude: true},
 	{name: "go/.github", exclude: true},
@@ -44,7 +44,7 @@
 	{name: "go/src/cmd/go/main.go"},
 	{name: "go/src/bytes/bytes.go"},
 
-	{name: "go/.DS_Store", exclude: true},
+	{name: "**/.DS_Store", exclude: true},
 	{name: "go/.git", exclude: true},
 	{name: "go/.gitattributes", exclude: true},
 	{name: "go/.github", exclude: true},
@@ -73,7 +73,7 @@
 	{name: "golang.org/toolchain@*/src/cmd/go/main.go"},
 	{name: "golang.org/toolchain@*/src/bytes/bytes.go"},
 
-	{name: "golang.org/toolchain@*/.DS_Store", exclude: true},
+	{name: "**/.DS_Store", exclude: true},
 	{name: "golang.org/toolchain@*/.git", exclude: true},
 	{name: "golang.org/toolchain@*/.gitattributes", exclude: true},
 	{name: "golang.org/toolchain@*/.github", exclude: true},
diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go
index 7690a93..354adc8 100644
--- a/src/cmd/doc/doc_test.go
+++ b/src/cmd/doc/doc_test.go
@@ -215,6 +215,7 @@
 			`type SimpleConstraint interface {`,
 			`type TildeConstraint interface {`,
 			`type StructConstraint interface {`,
+			`BUG: function body note`,
 		},
 		[]string{
 			`constThree`,
diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go
index ae1b757..273d7fe 100644
--- a/src/cmd/doc/main.go
+++ b/src/cmd/doc/main.go
@@ -147,12 +147,6 @@
 			panic(e)
 		}()
 
-		// We have a package.
-		if showAll && symbol == "" {
-			pkg.allDoc()
-			return
-		}
-
 		switch {
 		case symbol == "":
 			pkg.packageDoc() // The package exists, so we got some output.
@@ -161,13 +155,10 @@
 			if pkg.symbolDoc(symbol) {
 				return
 			}
-		default:
-			if pkg.methodDoc(symbol, method) {
-				return
-			}
-			if pkg.fieldDoc(symbol, method) {
-				return
-			}
+		case pkg.printMethodDoc(symbol, method):
+			return
+		case pkg.printFieldDoc(symbol, method):
+			return
 		}
 	}
 }
diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go
index 9779275..a21d8a4 100644
--- a/src/cmd/doc/pkg.go
+++ b/src/cmd/doc/pkg.go
@@ -43,9 +43,9 @@
 	buf         pkgBuffer
 }
 
-func (p *Package) ToText(w io.Writer, text, prefix, codePrefix string) {
-	d := p.doc.Parser().Parse(text)
-	pr := p.doc.Printer()
+func (pkg *Package) ToText(w io.Writer, text, prefix, codePrefix string) {
+	d := pkg.doc.Parser().Parse(text)
+	pr := pkg.doc.Printer()
 	pr.TextPrefix = prefix
 	pr.TextCodePrefix = codePrefix
 	w.Write(pr.Text(d))
@@ -467,87 +467,110 @@
 	return strings.Join(ss, ", ")
 }
 
-// allDoc prints all the docs for the package.
-func (pkg *Package) allDoc() {
-	pkg.Printf("") // Trigger the package clause; we know the package exists.
-	pkg.ToText(&pkg.buf, pkg.doc.Doc, "", indent)
-	pkg.newlines(1)
+// printHeader prints a header for the section named s, adding a blank line on each side.
+func (pkg *Package) printHeader(s string) {
+	pkg.Printf("\n%s\n\n", s)
+}
 
-	printed := make(map[*ast.GenDecl]bool)
-
-	hdr := ""
-	printHdr := func(s string) {
-		if hdr != s {
-			pkg.Printf("\n%s\n\n", s)
-			hdr = s
-		}
-	}
-
-	// Constants.
+// constsDoc prints all const documentation, if any, including a header.
+// The one argument is the valueDoc registry.
+func (pkg *Package) constsDoc(printed map[*ast.GenDecl]bool) {
+	var header bool
 	for _, value := range pkg.doc.Consts {
 		// Constants and variables come in groups, and valueDoc prints
 		// all the items in the group. We only need to find one exported symbol.
 		for _, name := range value.Names {
 			if isExported(name) && !pkg.typedValue[value] {
-				printHdr("CONSTANTS")
+				if !header {
+					pkg.printHeader("CONSTANTS")
+					header = true
+				}
 				pkg.valueDoc(value, printed)
 				break
 			}
 		}
 	}
+}
 
-	// Variables.
+// varsDoc prints all var documentation, if any, including a header.
+// Printed is the valueDoc registry.
+func (pkg *Package) varsDoc(printed map[*ast.GenDecl]bool) {
+	var header bool
 	for _, value := range pkg.doc.Vars {
 		// Constants and variables come in groups, and valueDoc prints
 		// all the items in the group. We only need to find one exported symbol.
 		for _, name := range value.Names {
 			if isExported(name) && !pkg.typedValue[value] {
-				printHdr("VARIABLES")
+				if !header {
+					pkg.printHeader("VARIABLES")
+					header = true
+				}
 				pkg.valueDoc(value, printed)
 				break
 			}
 		}
 	}
+}
 
-	// Functions.
+// funcsDoc prints all func documentation, if any, including a header.
+func (pkg *Package) funcsDoc() {
+	var header bool
 	for _, fun := range pkg.doc.Funcs {
 		if isExported(fun.Name) && !pkg.constructor[fun] {
-			printHdr("FUNCTIONS")
+			if !header {
+				pkg.printHeader("FUNCTIONS")
+				header = true
+			}
 			pkg.emit(fun.Doc, fun.Decl)
 		}
 	}
+}
 
-	// Types.
+// funcsDoc prints all type documentation, if any, including a header.
+func (pkg *Package) typesDoc() {
+	var header bool
 	for _, typ := range pkg.doc.Types {
 		if isExported(typ.Name) {
-			printHdr("TYPES")
+			if !header {
+				pkg.printHeader("TYPES")
+				header = true
+			}
 			pkg.typeDoc(typ)
 		}
 	}
 }
 
-// packageDoc prints the docs for the package (package doc plus one-liners of the rest).
+// packageDoc prints the docs for the package.
 func (pkg *Package) packageDoc() {
 	pkg.Printf("") // Trigger the package clause; we know the package exists.
-	if !short {
+	if showAll || !short {
 		pkg.ToText(&pkg.buf, pkg.doc.Doc, "", indent)
 		pkg.newlines(1)
 	}
 
-	if pkg.pkg.Name == "main" && !showCmd {
+	switch {
+	case showAll:
+		printed := make(map[*ast.GenDecl]bool) // valueDoc registry
+		pkg.constsDoc(printed)
+		pkg.varsDoc(printed)
+		pkg.funcsDoc()
+		pkg.typesDoc()
+
+	case pkg.pkg.Name == "main" && !showCmd:
 		// Show only package docs for commands.
 		return
+
+	default:
+		if !short {
+			pkg.newlines(2) // Guarantee blank line before the components.
+		}
+		pkg.valueSummary(pkg.doc.Consts, false)
+		pkg.valueSummary(pkg.doc.Vars, false)
+		pkg.funcSummary(pkg.doc.Funcs, false)
+		pkg.typeSummary()
 	}
 
 	if !short {
-		pkg.newlines(2) // Guarantee blank line before the components.
-	}
-
-	pkg.valueSummary(pkg.doc.Consts, false)
-	pkg.valueSummary(pkg.doc.Vars, false)
-	pkg.funcSummary(pkg.doc.Funcs, false)
-	pkg.typeSummary()
-	if !short {
 		pkg.bugs()
 	}
 }
@@ -732,11 +755,7 @@
 	// Constants and variables behave the same.
 	values := pkg.findValues(symbol, pkg.doc.Consts)
 	values = append(values, pkg.findValues(symbol, pkg.doc.Vars)...)
-	// A declaration like
-	//	const ( c = 1; C = 2 )
-	// could be printed twice if the -u flag is set, as it matches twice.
-	// So we remember which declarations we've printed to avoid duplication.
-	printed := make(map[*ast.GenDecl]bool)
+	printed := make(map[*ast.GenDecl]bool) // valueDoc registry
 	for _, value := range values {
 		pkg.valueDoc(value, printed)
 		found = true
@@ -755,7 +774,13 @@
 	return true
 }
 
-// valueDoc prints the docs for a constant or variable.
+// valueDoc prints the docs for a constant or variable. The printed map records
+// which values have been printed already to avoid duplication. Otherwise, a
+// declaration like:
+//
+//	const ( c = 1; C = 2 )
+//
+// … could be printed twice if the -u flag is set, as it matches twice.
 func (pkg *Package) valueDoc(value *doc.Value, printed map[*ast.GenDecl]bool) {
 	if printed[value.Decl] {
 		return
@@ -815,7 +840,7 @@
 	pkg.newlines(2)
 	// Show associated methods, constants, etc.
 	if showAll {
-		printed := make(map[*ast.GenDecl]bool)
+		printed := make(map[*ast.GenDecl]bool) // valueDoc registry
 		// We can use append here to print consts, then vars. Ditto for funcs and methods.
 		values := typ.Consts
 		values = append(values, typ.Vars...)
@@ -1105,16 +1130,6 @@
 	return found
 }
 
-// methodDoc prints the docs for matches of symbol.method.
-func (pkg *Package) methodDoc(symbol, method string) bool {
-	return pkg.printMethodDoc(symbol, method)
-}
-
-// fieldDoc prints the docs for matches of symbol.field.
-func (pkg *Package) fieldDoc(symbol, field string) bool {
-	return pkg.printFieldDoc(symbol, field)
-}
-
 // match reports whether the user's symbol matches the program's.
 // A lower-case character in the user's string matches either case in the program's.
 // The program string must be exported.
diff --git a/src/cmd/doc/testdata/pkg.go b/src/cmd/doc/testdata/pkg.go
index 1b1b8fb..4d269ff 100644
--- a/src/cmd/doc/testdata/pkg.go
+++ b/src/cmd/doc/testdata/pkg.go
@@ -57,6 +57,7 @@
 
 // Comment about exported function.
 func ExportedFunc(a int) bool {
+	// BUG(me): function body note
 	return true != false
 }
 
diff --git a/src/cmd/fix/cftype.go b/src/cmd/fix/cftype.go
index e4988b1..d4fcc44 100644
--- a/src/cmd/fix/cftype.go
+++ b/src/cmd/fix/cftype.go
@@ -60,8 +60,8 @@
 	// There's no easy way to map from an ast.Expr to all the places that use them, so
 	// we use reflect to find all such references.
 	if len(badNils) > 0 {
-		exprType := reflect.TypeOf((*ast.Expr)(nil)).Elem()
-		exprSliceType := reflect.TypeOf(([]ast.Expr)(nil))
+		exprType := reflect.TypeFor[ast.Expr]()
+		exprSliceType := reflect.TypeFor[[]ast.Expr]()
 		walk(f, func(n any) {
 			if n == nil {
 				return
diff --git a/src/cmd/go.mod b/src/cmd/go.mod
index ac8c9e4..7a42688 100644
--- a/src/cmd/go.mod
+++ b/src/cmd/go.mod
@@ -1,15 +1,15 @@
 module cmd
 
-go 1.21
+go 1.22
 
 require (
-	github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26
-	golang.org/x/arch v0.4.0
-	golang.org/x/mod v0.12.0
-	golang.org/x/sync v0.3.0
-	golang.org/x/sys v0.10.0
-	golang.org/x/term v0.10.0
-	golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b
+	github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17
+	golang.org/x/arch v0.6.0
+	golang.org/x/mod v0.14.0
+	golang.org/x/sync v0.5.0
+	golang.org/x/sys v0.15.0
+	golang.org/x/term v0.15.0
+	golang.org/x/tools v0.16.2-0.20231218185909-83bceaf2424d
 )
 
-require github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 // indirect
+require github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab // indirect
diff --git a/src/cmd/go.sum b/src/cmd/go.sum
index b7245ea..8ea3d75 100644
--- a/src/cmd/go.sum
+++ b/src/cmd/go.sum
@@ -1,16 +1,16 @@
-github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
-github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
-github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 h1:rcanfLhLDA8nozr/K289V1zcntHr3V+SHlXwzz1ZI2g=
-github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
-golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
-golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
-golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
-golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b h1:KIZCni6lCdxd4gxHx49Zp9mhckTFRbI/ZPDbR3jKu90=
-golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
+github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17 h1:0h35ESZ02+hN/MFZb7XZOXg+Rl9+Rk8fBIf5YLws9gA=
+github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab h1:BA4a7pe6ZTd9F8kXETBoijjFJ/ntaa//1wiH9BZu4zU=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+golang.org/x/arch v0.6.0 h1:S0JTfE48HbRj80+4tbvZDYsJ3tGv6BUU3XxyZ7CirAc=
+golang.org/x/arch v0.6.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/tools v0.16.2-0.20231218185909-83bceaf2424d h1:9YOyUBubvYqtjjtZBnI62JT9/QB9jfPwOQ7xLeyuOIU=
+golang.org/x/tools v0.16.2-0.20231218185909-83bceaf2424d/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index bb28756..e61e865 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Code generated by 'go test cmd/go -v -run=TestDocsUpToDate -fixdocs'; DO NOT EDIT.
+// Code generated by 'go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs'; DO NOT EDIT.
 // Edit the documentation in other files and then execute 'go generate cmd/go' to generate this one.
 
 // Go is a tool for managing Go source code.
@@ -44,11 +44,9 @@
 //	filetype        file types
 //	go.mod          the go.mod file
 //	gopath          GOPATH environment variable
-//	gopath-get      legacy GOPATH go get
 //	goproxy         module proxy protocol
 //	importpath      import path syntax
 //	modules         modules, module versions, and more
-//	module-get      module-aware go get
 //	module-auth     module authentication using go.sum
 //	packages        package lists and patterns
 //	private         configuration for downloading non-public code
@@ -81,11 +79,16 @@
 //
 // When compiling packages, build ignores files that end in '_test.go'.
 //
-// When compiling a single main package, build writes
-// the resulting executable to an output file named after
-// the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
-// or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe').
-// The '.exe' suffix is added when writing a Windows executable.
+// When compiling a single main package, build writes the resulting
+// executable to an output file named after the last non-major-version
+// component of the package import path. The '.exe' suffix is added
+// when writing a Windows executable.
+// So 'go build example/sam' writes 'sam' or 'sam.exe'.
+// 'go build example.com/foo/v2' writes 'foo' or 'foo.exe', not 'v2.exe'.
+//
+// When compiling a package from a list of .go files, the executable
+// is named after the first source file.
+// 'go build ed.go rx.go' writes 'ed' or 'ed.exe'.
 //
 // When compiling multiple packages or a single non-main package,
 // build compiles the packages but discards the resulting object,
@@ -119,14 +122,15 @@
 //		linux/ppc64le and linux/arm64 (only for 48-bit VMA).
 //	-msan
 //		enable interoperation with memory sanitizer.
-//		Supported only on linux/amd64, linux/arm64, freebsd/amd64
+//		Supported only on linux/amd64, linux/arm64, linux/loong64, freebsd/amd64
 //		and only with Clang/LLVM as the host C compiler.
 //		PIE build mode will be used on all platforms except linux/amd64.
 //	-asan
 //		enable interoperation with address sanitizer.
-//		Supported only on linux/arm64, linux/amd64.
-//		Supported only on linux/amd64 or linux/arm64 and only with GCC 7 and higher
+//		Supported only on linux/arm64, linux/amd64, linux/loong64.
+//		Supported on linux/amd64 or linux/arm64 and only with GCC 7 and higher
 //		or Clang/LLVM 9 and higher.
+//		And supported on linux/loong64 only with Clang/LLVM 16 and higher.
 //	-cover
 //		enable code coverage instrumentation.
 //	-covermode set,count,atomic
@@ -1320,9 +1324,6 @@
 // using import comments in .go files, vendoring tool configuration files (like
 // Gopkg.lock), and the current directory (if in GOPATH).
 //
-// If a configuration file for a vendoring tool is present, init will attempt to
-// import module requirements from it.
-//
 // See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'.
 //
 // # Add missing and remove unused modules
@@ -1504,6 +1505,7 @@
 //	init        initialize workspace file
 //	sync        sync workspace build list to modules
 //	use         add modules to workspace file
+//	vendor      make vendored copy of dependencies
 //
 // Use "go help work <command>" for more information about a command.
 //
@@ -1652,6 +1654,27 @@
 // See the workspaces reference at https://go.dev/ref/mod#workspaces
 // for more information.
 //
+// # Make vendored copy of dependencies
+//
+// Usage:
+//
+//	go work vendor [-e] [-v] [-o outdir]
+//
+// Vendor resets the workspace's vendor directory to include all packages
+// needed to build and test all the workspace's packages.
+// It does not include test code for vendored packages.
+//
+// The -v flag causes vendor to print the names of vendored
+// modules and packages to standard error.
+//
+// The -e flag causes vendor to attempt to proceed despite errors
+// encountered while loading packages.
+//
+// The -o flag causes vendor to create the vendor directory at the given
+// path instead of "vendor". The go command can only use a vendor directory
+// named "vendor" within the module root directory, so this flag is
+// primarily useful for other tools.
+//
 // # Compile and run Go program
 //
 // Usage:
@@ -1900,7 +1923,7 @@
 //
 // Constraints may appear in any kind of source file (not just Go), but
 // they must appear near the top of the file, preceded
-// only by blank lines and other line comments. These rules mean that in Go
+// only by blank lines and other comments. These rules mean that in Go
 // files a build constraint must appear before the package clause.
 //
 // To distinguish build constraints from package documentation,
@@ -2259,6 +2282,8 @@
 //	GOARM
 //		For GOARCH=arm, the ARM architecture for which to compile.
 //		Valid values are 5, 6, 7.
+//		The value can be followed by an option specifying how to implement floating point instructions.
+//		Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7).
 //	GO386
 //		For GOARCH=386, how to implement floating point instructions.
 //		Valid values are sse2 (default), softfloat.
@@ -2546,70 +2571,6 @@
 //
 // See https://golang.org/s/go15vendor for details.
 //
-// # Legacy GOPATH go get
-//
-// The 'go get' command changes behavior depending on whether the
-// go command is running in module-aware mode or legacy GOPATH mode.
-// This help text, accessible as 'go help gopath-get' even in module-aware mode,
-// describes 'go get' as it operates in legacy GOPATH mode.
-//
-// Usage: go get [-d] [-f] [-t] [-u] [-v] [-fix] [build flags] [packages]
-//
-// Get downloads the packages named by the import paths, along with their
-// dependencies. It then installs the named packages, like 'go install'.
-//
-// The -d flag instructs get to stop after downloading the packages; that is,
-// it instructs get not to install the packages.
-//
-// The -f flag, valid only when -u is set, forces get -u not to verify that
-// each package has been checked out from the source control repository
-// implied by its import path. This can be useful if the source is a local fork
-// of the original.
-//
-// The -fix flag instructs get to run the fix tool on the downloaded packages
-// before resolving dependencies or building the code.
-//
-// The -t flag instructs get to also download the packages required to build
-// the tests for the specified packages.
-//
-// The -u flag instructs get to use the network to update the named packages
-// and their dependencies. By default, get uses the network to check out
-// missing packages but does not use it to look for updates to existing packages.
-//
-// The -v flag enables verbose progress and debug output.
-//
-// Get also accepts build flags to control the installation. See 'go help build'.
-//
-// When checking out a new package, get creates the target directory
-// GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
-// get uses the first one. For more details see: 'go help gopath'.
-//
-// When checking out or updating a package, get looks for a branch or tag
-// that matches the locally installed version of Go. The most important
-// rule is that if the local installation is running version "go1", get
-// searches for a branch or tag named "go1". If no such version exists
-// it retrieves the default branch of the package.
-//
-// When go get checks out or updates a Git repository,
-// it also updates any git submodules referenced by the repository.
-//
-// Get never checks out or updates code stored in vendor directories.
-//
-// For more about build flags, see 'go help build'.
-//
-// For more about specifying packages, see 'go help packages'.
-//
-// For more about how 'go get' finds source code to
-// download, see 'go help importpath'.
-//
-// This text describes the behavior of get when using GOPATH
-// to manage source code and dependencies.
-// If instead the go command is running in module-aware mode,
-// the details of get's flags and effects change, as does 'go help get'.
-// See 'go help modules' and 'go help module-get'.
-//
-// See also: go build, go install, go clean.
-//
 // # Module proxy protocol
 //
 // A Go module proxy is any web server that can respond to GET requests for
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 54249f6..3282295 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -391,7 +391,6 @@
 	tempdir        string
 	ran            bool
 	inParallel     bool
-	hasNet         bool
 	stdout, stderr bytes.Buffer
 	execDir        string // dir for tg.run
 }
@@ -434,9 +433,6 @@
 	if tg.ran {
 		tg.t.Fatal("internal testsuite error: call to parallel after run")
 	}
-	if tg.hasNet {
-		tg.t.Fatal("internal testsuite error: call to parallel after acquireNet")
-	}
 	for _, e := range tg.env {
 		if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
 			val := e[strings.Index(e, "=")+1:]
@@ -449,25 +445,6 @@
 	tg.t.Parallel()
 }
 
-// acquireNet skips t if the network is unavailable, and otherwise acquires a
-// netTestSem token for t to be released at the end of the test.
-//
-// t.Parallel must not be called after acquireNet.
-func (tg *testgoData) acquireNet() {
-	tg.t.Helper()
-	if tg.hasNet {
-		return
-	}
-
-	testenv.MustHaveExternalNetwork(tg.t)
-	if netTestSem != nil {
-		netTestSem <- struct{}{}
-		tg.t.Cleanup(func() { <-netTestSem })
-	}
-	tg.setenv("TESTGONETWORK", "")
-	tg.hasNet = true
-}
-
 // pwd returns the current directory.
 func (tg *testgoData) pwd() string {
 	tg.t.Helper()
@@ -579,31 +556,6 @@
 	}
 }
 
-// runGit runs a git command, and expects it to succeed.
-func (tg *testgoData) runGit(dir string, args ...string) {
-	tg.t.Helper()
-	cmd := testenv.Command(tg.t, "git", args...)
-	tg.stdout.Reset()
-	tg.stderr.Reset()
-	cmd.Stdout = &tg.stdout
-	cmd.Stderr = &tg.stderr
-	cmd.Dir = dir
-	cmd.Env = tg.env
-	status := cmd.Run()
-	if tg.stdout.Len() > 0 {
-		tg.t.Log("git standard output:")
-		tg.t.Log(tg.stdout.String())
-	}
-	if tg.stderr.Len() > 0 {
-		tg.t.Log("git standard error:")
-		tg.t.Log(tg.stderr.String())
-	}
-	if status != nil {
-		tg.t.Logf("git %v failed unexpectedly: %v", args, status)
-		tg.t.FailNow()
-	}
-}
-
 // getStdout returns standard output of the testgo run as a string.
 func (tg *testgoData) getStdout() string {
 	tg.t.Helper()
@@ -816,19 +768,6 @@
 	}
 }
 
-// mustHaveContent succeeds if filePath is a path to a file,
-// and that file is readable and not empty.
-func (tg *testgoData) mustHaveContent(filePath string) {
-	tg.mustExist(filePath)
-	f, err := os.Stat(filePath)
-	if err != nil {
-		tg.t.Fatal(err)
-	}
-	if f.Size() == 0 {
-		tg.t.Fatalf("expected %s to have data, but is empty", filePath)
-	}
-}
-
 // wantExecutable fails with msg if path is not executable.
 func (tg *testgoData) wantExecutable(path, msg string) {
 	tg.t.Helper()
@@ -926,18 +865,6 @@
 	return robustio.RemoveAll(dir)
 }
 
-// failSSH puts an ssh executable in the PATH that always fails.
-// This is to stub out uses of ssh by go get.
-func (tg *testgoData) failSSH() {
-	tg.t.Helper()
-	wd, err := os.Getwd()
-	if err != nil {
-		tg.t.Fatal(err)
-	}
-	fail := filepath.Join(wd, "testdata/failssh")
-	tg.setenv("PATH", fmt.Sprintf("%v%c%v", fail, filepath.ListSeparator, os.Getenv("PATH")))
-}
-
 func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
 	if testing.Short() {
 		t.Skip("skipping lengthy test in short mode")
@@ -954,22 +881,17 @@
 
 	// Copy the runtime packages into a temporary GOROOT
 	// so that we can change files.
-	for _, copydir := range []string{
-		"src/runtime",
-		"src/internal/abi",
-		"src/internal/bytealg",
-		"src/internal/coverage/rtcov",
-		"src/internal/cpu",
-		"src/internal/goarch",
-		"src/internal/godebugs",
-		"src/internal/goexperiment",
-		"src/internal/goos",
-		"src/internal/coverage/rtcov",
-		"src/math/bits",
-		"src/unsafe",
+	var dirs []string
+	tg.run("list", "-deps", "runtime")
+	pkgs := strings.Split(strings.TrimSpace(tg.getStdout()), "\n")
+	for _, pkg := range pkgs {
+		dirs = append(dirs, filepath.Join("src", pkg))
+	}
+	dirs = append(dirs,
 		filepath.Join("pkg/tool", goHostOS+"_"+goHostArch),
 		"pkg/include",
-	} {
+	)
+	for _, copydir := range dirs {
 		srcdir := filepath.Join(testGOROOT, copydir)
 		tg.tempDir(filepath.Join("goroot", copydir))
 		err := filepath.WalkDir(srcdir,
@@ -985,6 +907,9 @@
 					return err
 				}
 				dest := filepath.Join("goroot", copydir, srcrel)
+				if _, err := os.Stat(dest); err == nil {
+					return nil
+				}
 				data, err := os.ReadFile(path)
 				if err != nil {
 					return err
@@ -1055,77 +980,6 @@
 	tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
 }
 
-// cmd/go: custom import path checking should not apply to Go packages without import comment.
-func TestIssue10952(t *testing.T) {
-	testenv.MustHaveExecPath(t, "git")
-
-	tg := testgo(t)
-	defer tg.cleanup()
-	tg.parallel()
-	tg.acquireNet()
-
-	tg.tempDir("src")
-	tg.setenv("GOPATH", tg.path("."))
-	const importPath = "github.com/zombiezen/go-get-issue-10952"
-	tg.run("get", "-d", "-u", importPath)
-	repoDir := tg.path("src/" + importPath)
-	tg.runGit(repoDir, "remote", "set-url", "origin", "https://"+importPath+".git")
-	tg.run("get", "-d", "-u", importPath)
-}
-
-// Test git clone URL that uses SCP-like syntax and custom import path checking.
-func TestIssue11457(t *testing.T) {
-	testenv.MustHaveExecPath(t, "git")
-
-	tg := testgo(t)
-	defer tg.cleanup()
-	tg.parallel()
-	tg.acquireNet()
-
-	tg.tempDir("src")
-	tg.setenv("GOPATH", tg.path("."))
-	const importPath = "rsc.io/go-get-issue-11457"
-	tg.run("get", "-d", "-u", importPath)
-	repoDir := tg.path("src/" + importPath)
-	tg.runGit(repoDir, "remote", "set-url", "origin", "git@github.com:rsc/go-get-issue-11457")
-
-	// At this time, custom import path checking compares remotes verbatim (rather than
-	// just the host and path, skipping scheme and user), so we expect go get -u to fail.
-	// However, the goal of this test is to verify that gitRemoteRepo correctly parsed
-	// the SCP-like syntax, and we expect it to appear in the error message.
-	tg.runFail("get", "-d", "-u", importPath)
-	want := " is checked out from ssh://git@github.com/rsc/go-get-issue-11457"
-	if !strings.HasSuffix(strings.TrimSpace(tg.getStderr()), want) {
-		t.Error("expected clone URL to appear in stderr")
-	}
-}
-
-func TestGetGitDefaultBranch(t *testing.T) {
-	testenv.MustHaveExecPath(t, "git")
-
-	tg := testgo(t)
-	defer tg.cleanup()
-	tg.parallel()
-	tg.acquireNet()
-
-	tg.tempDir("src")
-	tg.setenv("GOPATH", tg.path("."))
-
-	// This repo has two branches, master and another-branch.
-	// The another-branch is the default that you get from 'git clone'.
-	// The go get command variants should not override this.
-	const importPath = "github.com/rsc/go-get-default-branch"
-
-	tg.run("get", "-d", importPath)
-	repoDir := tg.path("src/" + importPath)
-	tg.runGit(repoDir, "branch", "--contains", "HEAD")
-	tg.grepStdout(`\* another-branch`, "not on correct default branch")
-
-	tg.run("get", "-d", "-u", importPath)
-	tg.runGit(repoDir, "branch", "--contains", "HEAD")
-	tg.grepStdout(`\* another-branch`, "not on correct default branch")
-}
-
 func TestPackageMainTestCompilerFlags(t *testing.T) {
 	tg := testgo(t)
 	defer tg.cleanup()
@@ -1442,35 +1296,6 @@
 	tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/")
 }
 
-func TestDefaultGOPATHGet(t *testing.T) {
-	testenv.MustHaveExecPath(t, "git")
-
-	tg := testgo(t)
-	defer tg.cleanup()
-	tg.parallel()
-	tg.acquireNet()
-
-	tg.setenv("GOPATH", "")
-	tg.tempDir("home")
-	tg.setenv(homeEnvName(), tg.path("home"))
-
-	// warn for creating directory
-	tg.run("get", "-v", "github.com/golang/example/hello")
-	tg.grepStderr("created GOPATH="+regexp.QuoteMeta(tg.path("home/go"))+"; see 'go help gopath'", "did not create GOPATH")
-
-	// no warning if directory already exists
-	tg.must(robustio.RemoveAll(tg.path("home/go")))
-	tg.tempDir("home/go")
-	tg.run("get", "github.com/golang/example/hello")
-	tg.grepStderrNot(".", "expected no output on standard error")
-
-	// error if $HOME/go is a file
-	tg.must(robustio.RemoveAll(tg.path("home/go")))
-	tg.tempFile("home/go", "")
-	tg.runFail("get", "github.com/golang/example/hello")
-	tg.grepStderr(`mkdir .*[/\\]go: .*(not a directory|cannot find the path)`, "expected error because $HOME/go is a file")
-}
-
 func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
 	tg := testgo(t)
 	defer tg.cleanup()
diff --git a/src/cmd/go/go_unix_test.go b/src/cmd/go/go_unix_test.go
index d04e496..a6b21b8 100644
--- a/src/cmd/go/go_unix_test.go
+++ b/src/cmd/go/go_unix_test.go
@@ -23,12 +23,27 @@
 	// Do not use tg.parallel; avoid other tests seeing umask manipulation.
 	mask := syscall.Umask(0077) // prohibit low bits
 	defer syscall.Umask(mask)
+
 	tg := testgo(t)
 	defer tg.cleanup()
 	tg.tempFile("x.go", `package main; func main() {}`)
-	// Make sure artifact will be output to /tmp/... in case the user
-	// has POSIX acl's on their go source tree.
-	// See issue 17909.
+
+	// We have set a umask, but if the parent directory happens to have a default
+	// ACL, the umask may be ignored. To prevent spurious failures from an ACL,
+	// we compare the file created by "go build" against a file written explicitly
+	// by os.WriteFile.
+	//
+	// (See https://go.dev/issue/62724, https://go.dev/issue/17909.)
+	control := tg.path("control")
+	tg.creatingTemp(control)
+	if err := os.WriteFile(control, []byte("#!/bin/sh\nexit 0"), 0777); err != nil {
+		t.Fatal(err)
+	}
+	cfi, err := os.Stat(control)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	exe := tg.path("x")
 	tg.creatingTemp(exe)
 	tg.run("build", "-o", exe, tg.path("x.go"))
@@ -36,8 +51,11 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	if mode := fi.Mode(); mode&0077 != 0 {
-		t.Fatalf("wrote x with mode=%v, wanted no 0077 bits", mode)
+	got, want := fi.Mode(), cfi.Mode()
+	if got == want {
+		t.Logf("wrote x with mode %v", got)
+	} else {
+		t.Fatalf("wrote x with mode %v, wanted no 0077 bits (%v)", got, want)
 	}
 }
 
diff --git a/src/cmd/go/internal/cache/cache.go b/src/cmd/go/internal/cache/cache.go
index 4a82d27..14b2dec 100644
--- a/src/cmd/go/internal/cache/cache.go
+++ b/src/cmd/go/internal/cache/cache.go
@@ -477,7 +477,7 @@
 	return nil
 }
 
-// noVerifyReadSeeker is a io.ReadSeeker wrapper sentinel type
+// noVerifyReadSeeker is an io.ReadSeeker wrapper sentinel type
 // that says that Cache.Put should skip the verify check
 // (from GODEBUG=goverifycache=1).
 type noVerifyReadSeeker struct {
diff --git a/src/cmd/go/internal/cache/prog.go b/src/cmd/go/internal/cache/prog.go
index 30f69b3..8d826f0 100644
--- a/src/cmd/go/internal/cache/prog.go
+++ b/src/cmd/go/internal/cache/prog.go
@@ -229,7 +229,7 @@
 			if c.closing.Load() {
 				return // quietly
 			}
-			if errors.Is(err, io.EOF) {
+			if err == io.EOF {
 				c.mu.Lock()
 				inFlight := len(c.inFlight)
 				c.mu.Unlock()
diff --git a/src/cmd/go/internal/cfg/bench_test.go b/src/cmd/go/internal/cfg/bench_test.go
new file mode 100644
index 0000000..2dd9931
--- /dev/null
+++ b/src/cmd/go/internal/cfg/bench_test.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cfg
+
+import (
+	"internal/testenv"
+	"testing"
+)
+
+func BenchmarkLookPath(b *testing.B) {
+	testenv.MustHaveExecPath(b, "go")
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		_, err := LookPath("go")
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
index 8caa22a..a8daa2d 100644
--- a/src/cmd/go/internal/cfg/cfg.go
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -15,7 +15,6 @@
 	"internal/cfg"
 	"io"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"runtime"
 	"strings"
@@ -161,7 +160,7 @@
 		if ctxt.CgoEnabled {
 			if os.Getenv("CC") == "" {
 				cc := DefaultCC(ctxt.GOOS, ctxt.GOARCH)
-				if _, err := exec.LookPath(cc); err != nil {
+				if _, err := LookPath(cc); err != nil {
 					ctxt.CgoEnabled = false
 				}
 			}
diff --git a/src/cmd/go/internal/cfg/lookpath.go b/src/cmd/go/internal/cfg/lookpath.go
new file mode 100644
index 0000000..1b0fdc7
--- /dev/null
+++ b/src/cmd/go/internal/cfg/lookpath.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cfg
+
+import (
+	"cmd/go/internal/par"
+	"os/exec"
+)
+
+var lookPathCache par.ErrCache[string, string]
+
+// LookPath wraps exec.LookPath and caches the result
+// which can be called by multiple Goroutines at the same time.
+func LookPath(file string) (path string, err error) {
+	return lookPathCache.Do(file,
+		func() (string, error) {
+			return exec.LookPath(file)
+		})
+}
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
index e011237..b021b78 100644
--- a/src/cmd/go/internal/clean/clean.go
+++ b/src/cmd/go/internal/clean/clean.go
@@ -150,8 +150,7 @@
 		}
 	}
 
-	var b work.Builder
-	b.Print = fmt.Print
+	sh := work.NewShell("", fmt.Print)
 
 	if cleanCache {
 		dir := cache.DefaultDir()
@@ -163,30 +162,16 @@
 			subdirs, _ := filepath.Glob(filepath.Join(str.QuoteGlob(dir), "[0-9a-f][0-9a-f]"))
 			printedErrors := false
 			if len(subdirs) > 0 {
-				if cfg.BuildN || cfg.BuildX {
-					b.Showcmd("", "rm -r %s", strings.Join(subdirs, " "))
-				}
-				if !cfg.BuildN {
-					for _, d := range subdirs {
-						// Only print the first error - there may be many.
-						// This also mimics what os.RemoveAll(dir) would do.
-						if err := os.RemoveAll(d); err != nil && !printedErrors {
-							printedErrors = true
-							base.Error(err)
-						}
-					}
+				if err := sh.RemoveAll(subdirs...); err != nil && !printedErrors {
+					printedErrors = true
+					base.Error(err)
 				}
 			}
 
 			logFile := filepath.Join(dir, "log.txt")
-			if cfg.BuildN || cfg.BuildX {
-				b.Showcmd("", "rm -f %s", logFile)
-			}
-			if !cfg.BuildN {
-				if err := os.RemoveAll(logFile); err != nil && !printedErrors {
-					printedErrors = true
-					base.Error(err)
-				}
+			if err := sh.RemoveAll(logFile); err != nil && !printedErrors {
+				printedErrors = true
+				base.Error(err)
 			}
 		}
 	}
@@ -226,7 +211,7 @@
 			base.Fatalf("go: cannot clean -modcache without a module cache")
 		}
 		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd("", "rm -rf %s", cfg.GOMODCACHE)
+			sh.ShowCmd("", "rm -rf %s", cfg.GOMODCACHE)
 		}
 		if !cfg.BuildN {
 			if err := modfetch.RemoveAll(cfg.GOMODCACHE); err != nil {
@@ -237,13 +222,8 @@
 
 	if cleanFuzzcache {
 		fuzzDir := cache.Default().FuzzDir()
-		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd("", "rm -rf %s", fuzzDir)
-		}
-		if !cfg.BuildN {
-			if err := os.RemoveAll(fuzzDir); err != nil {
-				base.Error(err)
-			}
+		if err := sh.RemoveAll(fuzzDir); err != nil {
+			base.Error(err)
 		}
 	}
 }
@@ -289,8 +269,7 @@
 		return
 	}
 
-	var b work.Builder
-	b.Print = fmt.Print
+	sh := work.NewShell("", fmt.Print)
 
 	packageFile := map[string]bool{}
 	if p.Name != "main" {
@@ -353,7 +332,7 @@
 	}
 
 	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd(p.Dir, "rm -f %s", strings.Join(allRemove, " "))
+		sh.ShowCmd(p.Dir, "rm -f %s", strings.Join(allRemove, " "))
 	}
 
 	toRemove := map[string]bool{}
@@ -365,13 +344,7 @@
 		if dir.IsDir() {
 			// TODO: Remove once Makefiles are forgotten.
 			if cleanDir[name] {
-				if cfg.BuildN || cfg.BuildX {
-					b.Showcmd(p.Dir, "rm -r %s", name)
-					if cfg.BuildN {
-						continue
-					}
-				}
-				if err := os.RemoveAll(filepath.Join(p.Dir, name)); err != nil {
+				if err := sh.RemoveAll(filepath.Join(p.Dir, name)); err != nil {
 					base.Error(err)
 				}
 			}
@@ -389,7 +362,7 @@
 
 	if cleanI && p.Target != "" {
 		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd("", "rm -f %s", p.Target)
+			sh.ShowCmd("", "rm -f %s", p.Target)
 		}
 		if !cfg.BuildN {
 			removeFile(p.Target)
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
index b83c5a3..06159db 100644
--- a/src/cmd/go/internal/fsys/fsys.go
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -690,7 +690,7 @@
 	if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
 		return 2
 	}
-	// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+	// is it UNC? https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file
 	if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
 		!isSlash(path[2]) && path[2] != '.' {
 		// first, leading `\\` and next shouldn't be `\`. its server name.
diff --git a/src/cmd/go/internal/fsys/fsys_test.go b/src/cmd/go/internal/fsys/fsys_test.go
index 2ab2bb2..612c521 100644
--- a/src/cmd/go/internal/fsys/fsys_test.go
+++ b/src/cmd/go/internal/fsys/fsys_test.go
@@ -7,7 +7,6 @@
 import (
 	"encoding/json"
 	"errors"
-	"fmt"
 	"internal/testenv"
 	"internal/txtar"
 	"io"
@@ -38,7 +37,6 @@
 		t.Fatal(err)
 	}
 	t.Cleanup(func() {
-		overlay = nil
 		if err := os.Chdir(prevwd); err != nil {
 			t.Fatal(err)
 		}
@@ -57,10 +55,13 @@
 
 	var overlayJSON OverlayJSON
 	if err := json.Unmarshal(a.Comment, &overlayJSON); err != nil {
-		t.Fatal(fmt.Errorf("parsing overlay JSON: %v", err))
+		t.Fatal("parsing overlay JSON:", err)
 	}
 
-	initFromJSON(overlayJSON)
+	if err := initFromJSON(overlayJSON); err != nil {
+		t.Fatal(err)
+	}
+	t.Cleanup(func() { overlay = nil })
 }
 
 func TestIsDir(t *testing.T) {
diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go
index f1f4a6c..6371353 100644
--- a/src/cmd/go/internal/generate/generate.go
+++ b/src/cmd/go/internal/generate/generate.go
@@ -181,6 +181,8 @@
 }
 
 func runGenerate(ctx context.Context, cmd *base.Command, args []string) {
+	modload.InitWorkfile()
+
 	if generateRunFlag != "" {
 		var err error
 		generateRunRE, err = regexp.Compile(generateRunFlag)
@@ -210,6 +212,13 @@
 			continue
 		}
 
+		if pkg.Error != nil && len(pkg.InternalAllGoFiles()) == 0 {
+			// A directory only contains a Go package if it has at least
+			// one .go source file, so the fact that there are no files
+			// implies that the package couldn't be found.
+			base.Errorf("%v", pkg.Error)
+		}
+
 		for _, file := range pkg.InternalGoFiles() {
 			if !generate(file) {
 				break
@@ -222,6 +231,7 @@
 			}
 		}
 	}
+	base.ExitIfErrors()
 }
 
 // generate runs the generation directives for a single file.
@@ -479,7 +489,7 @@
 		// intends to use the same 'go' as 'go generate' itself.
 		// Prefer to resolve the binary from GOROOT/bin, and for consistency
 		// prefer to resolve any other commands there too.
-		gorootBinPath, err := exec.LookPath(filepath.Join(cfg.GOROOTbin, path))
+		gorootBinPath, err := cfg.LookPath(filepath.Join(cfg.GOROOTbin, path))
 		if err == nil {
 			path = gorootBinPath
 		}
diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go
deleted file mode 100644
index 06b567a..0000000
--- a/src/cmd/go/internal/get/get.go
+++ /dev/null
@@ -1,640 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package get implements the “go get” command.
-package get
-
-import (
-	"context"
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strings"
-
-	"cmd/go/internal/base"
-	"cmd/go/internal/cfg"
-	"cmd/go/internal/load"
-	"cmd/go/internal/search"
-	"cmd/go/internal/str"
-	"cmd/go/internal/vcs"
-	"cmd/go/internal/web"
-	"cmd/go/internal/work"
-
-	"golang.org/x/mod/module"
-)
-
-var CmdGet = &base.Command{
-	UsageLine: "go get [-d] [-f] [-t] [-u] [-v] [-fix] [build flags] [packages]",
-	Short:     "download and install packages and dependencies",
-	Long: `
-Get downloads the packages named by the import paths, along with their
-dependencies. It then installs the named packages, like 'go install'.
-
-The -d flag instructs get to stop after downloading the packages; that is,
-it instructs get not to install the packages.
-
-The -f flag, valid only when -u is set, forces get -u not to verify that
-each package has been checked out from the source control repository
-implied by its import path. This can be useful if the source is a local fork
-of the original.
-
-The -fix flag instructs get to run the fix tool on the downloaded packages
-before resolving dependencies or building the code.
-
-The -t flag instructs get to also download the packages required to build
-the tests for the specified packages.
-
-The -u flag instructs get to use the network to update the named packages
-and their dependencies. By default, get uses the network to check out
-missing packages but does not use it to look for updates to existing packages.
-
-The -v flag enables verbose progress and debug output.
-
-Get also accepts build flags to control the installation. See 'go help build'.
-
-When checking out a new package, get creates the target directory
-GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
-get uses the first one. For more details see: 'go help gopath'.
-
-When checking out or updating a package, get looks for a branch or tag
-that matches the locally installed version of Go. The most important
-rule is that if the local installation is running version "go1", get
-searches for a branch or tag named "go1". If no such version exists
-it retrieves the default branch of the package.
-
-When go get checks out or updates a Git repository,
-it also updates any git submodules referenced by the repository.
-
-Get never checks out or updates code stored in vendor directories.
-
-For more about build flags, see 'go help build'.
-
-For more about specifying packages, see 'go help packages'.
-
-For more about how 'go get' finds source code to
-download, see 'go help importpath'.
-
-This text describes the behavior of get when using GOPATH
-to manage source code and dependencies.
-If instead the go command is running in module-aware mode,
-the details of get's flags and effects change, as does 'go help get'.
-See 'go help modules' and 'go help module-get'.
-
-See also: go build, go install, go clean.
-	`,
-}
-
-var HelpGopathGet = &base.Command{
-	UsageLine: "gopath-get",
-	Short:     "legacy GOPATH go get",
-	Long: `
-The 'go get' command changes behavior depending on whether the
-go command is running in module-aware mode or legacy GOPATH mode.
-This help text, accessible as 'go help gopath-get' even in module-aware mode,
-describes 'go get' as it operates in legacy GOPATH mode.
-
-Usage: ` + CmdGet.UsageLine + `
-` + CmdGet.Long,
-}
-
-var (
-	getD        = CmdGet.Flag.Bool("d", false, "")
-	getF        = CmdGet.Flag.Bool("f", false, "")
-	getT        = CmdGet.Flag.Bool("t", false, "")
-	getU        = CmdGet.Flag.Bool("u", false, "")
-	getFix      = CmdGet.Flag.Bool("fix", false, "")
-	getInsecure = CmdGet.Flag.Bool("insecure", false, "")
-)
-
-func init() {
-	work.AddBuildFlags(CmdGet, work.OmitModFlag|work.OmitModCommonFlags)
-	CmdGet.Run = runGet // break init loop
-}
-
-func runGet(ctx context.Context, cmd *base.Command, args []string) {
-	if cfg.ModulesEnabled {
-		// Should not happen: main.go should install the separate module-enabled get code.
-		base.Fatalf("go: modules not implemented")
-	}
-
-	work.BuildInit()
-
-	if *getF && !*getU {
-		base.Fatalf("go: cannot use -f flag without -u")
-	}
-	if *getInsecure {
-		base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead")
-	}
-
-	// Disable any prompting for passwords by Git itself.
-	// Only has an effect for 2.3.0 or later, but avoiding
-	// the prompt in earlier versions is just too hard.
-	// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
-	// prompting.
-	// See golang.org/issue/9341 and golang.org/issue/12706.
-	if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
-		os.Setenv("GIT_TERMINAL_PROMPT", "0")
-	}
-
-	// Also disable prompting for passwords by the 'ssh' subprocess spawned by
-	// Git, because apparently GIT_TERMINAL_PROMPT isn't sufficient to do that.
-	// Adding '-o BatchMode=yes' should do the trick.
-	//
-	// If a Git subprocess forks a child into the background to cache a new connection,
-	// that child keeps stdout/stderr open. After the Git subprocess exits,
-	// os /exec expects to be able to read from the stdout/stderr pipe
-	// until EOF to get all the data that the Git subprocess wrote before exiting.
-	// The EOF doesn't come until the child exits too, because the child
-	// is holding the write end of the pipe.
-	// This is unfortunate, but it has come up at least twice
-	// (see golang.org/issue/13453 and golang.org/issue/16104)
-	// and confuses users when it does.
-	// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
-	// assume they know what they are doing and don't step on it.
-	// But default to turning off ControlMaster.
-	if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
-		os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes")
-	}
-
-	// And one more source of Git prompts: the Git Credential Manager Core for Windows.
-	//
-	// See https://github.com/microsoft/Git-Credential-Manager-Core/blob/master/docs/environment.md#gcm_interactive.
-	if os.Getenv("GCM_INTERACTIVE") == "" {
-		os.Setenv("GCM_INTERACTIVE", "never")
-	}
-
-	// Phase 1. Download/update.
-	var stk load.ImportStack
-	mode := 0
-	if *getT {
-		mode |= load.GetTestDeps
-	}
-	for _, pkg := range downloadPaths(args) {
-		download(ctx, pkg, nil, &stk, mode)
-	}
-	base.ExitIfErrors()
-
-	// Phase 2. Rescan packages and re-evaluate args list.
-
-	// Code we downloaded and all code that depends on it
-	// needs to be evicted from the package cache so that
-	// the information will be recomputed. Instead of keeping
-	// track of the reverse dependency information, evict
-	// everything.
-	load.ClearPackageCache()
-
-	pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args)
-	load.CheckPackageErrors(pkgs)
-
-	// Phase 3. Install.
-	if *getD {
-		// Download only.
-		// Check delayed until now so that downloadPaths
-		// and CheckPackageErrors have a chance to print errors.
-		return
-	}
-
-	work.InstallPackages(ctx, args, pkgs)
-}
-
-// downloadPaths prepares the list of paths to pass to download.
-// It expands ... patterns that can be expanded. If there is no match
-// for a particular pattern, downloadPaths leaves it in the result list,
-// in the hope that we can figure out the repository from the
-// initial ...-free prefix.
-func downloadPaths(patterns []string) []string {
-	for _, arg := range patterns {
-		if strings.Contains(arg, "@") {
-			base.Fatalf("go: can only use path@version syntax with 'go get' and 'go install' in module-aware mode")
-		}
-
-		// Guard against 'go get x.go', a common mistake.
-		// Note that package and module paths may end with '.go', so only print an error
-		// if the argument has no slash or refers to an existing file.
-		if strings.HasSuffix(arg, ".go") {
-			if !strings.Contains(arg, "/") {
-				base.Errorf("go: %s: arguments must be package or module paths", arg)
-				continue
-			}
-			if fi, err := os.Stat(arg); err == nil && !fi.IsDir() {
-				base.Errorf("go: %s exists as a file, but 'go get' requires package arguments", arg)
-			}
-		}
-	}
-	base.ExitIfErrors()
-
-	var pkgs []string
-	noModRoots := []string{}
-	for _, m := range search.ImportPathsQuiet(patterns, noModRoots) {
-		if len(m.Pkgs) == 0 && strings.Contains(m.Pattern(), "...") {
-			pkgs = append(pkgs, m.Pattern())
-		} else {
-			pkgs = append(pkgs, m.Pkgs...)
-		}
-	}
-	return pkgs
-}
-
-// downloadCache records the import paths we have already
-// considered during the download, to avoid duplicate work when
-// there is more than one dependency sequence leading to
-// a particular package.
-var downloadCache = map[string]bool{}
-
-// downloadRootCache records the version control repository
-// root directories we have already considered during the download.
-// For example, all the packages in the github.com/google/codesearch repo
-// share the same root (the directory for that path), and we only need
-// to run the hg commands to consider each repository once.
-var downloadRootCache = map[string]bool{}
-
-// download runs the download half of the get command
-// for the package or pattern named by the argument.
-func download(ctx context.Context, arg string, parent *load.Package, stk *load.ImportStack, mode int) {
-	if mode&load.ResolveImport != 0 {
-		// Caller is responsible for expanding vendor paths.
-		panic("internal error: download mode has useVendor set")
-	}
-	load1 := func(path string, mode int) *load.Package {
-		if parent == nil {
-			mode := 0 // don't do module or vendor resolution
-			return load.LoadPackage(ctx, load.PackageOpts{}, path, base.Cwd(), stk, nil, mode)
-		}
-		p, err := load.LoadImport(ctx, load.PackageOpts{}, path, parent.Dir, parent, stk, nil, mode|load.ResolveModule)
-		if err != nil {
-			base.Errorf("%s", err)
-		}
-		return p
-	}
-
-	p := load1(arg, mode)
-	if p.Error != nil && p.Error.Hard {
-		base.Errorf("%s", p.Error)
-		return
-	}
-
-	// loadPackage inferred the canonical ImportPath from arg.
-	// Use that in the following to prevent hysteresis effects
-	// in e.g. downloadCache and packageCache.
-	// This allows invocations such as:
-	//   mkdir -p $GOPATH/src/github.com/user
-	//   cd $GOPATH/src/github.com/user
-	//   go get ./foo
-	// see: golang.org/issue/9767
-	arg = p.ImportPath
-
-	// There's nothing to do if this is a package in the standard library.
-	if p.Standard {
-		return
-	}
-
-	// Only process each package once.
-	// (Unless we're fetching test dependencies for this package,
-	// in which case we want to process it again.)
-	if downloadCache[arg] && mode&load.GetTestDeps == 0 {
-		return
-	}
-	downloadCache[arg] = true
-
-	pkgs := []*load.Package{p}
-	wildcardOkay := len(*stk) == 0
-	isWildcard := false
-
-	// Download if the package is missing, or update if we're using -u.
-	if p.Dir == "" || *getU {
-		// The actual download.
-		stk.Push(arg)
-		err := downloadPackage(p)
-		if err != nil {
-			base.Errorf("%s", &load.PackageError{ImportStack: stk.Copy(), Err: err})
-			stk.Pop()
-			return
-		}
-		stk.Pop()
-
-		args := []string{arg}
-		// If the argument has a wildcard in it, re-evaluate the wildcard.
-		// We delay this until after reloadPackage so that the old entry
-		// for p has been replaced in the package cache.
-		if wildcardOkay && strings.Contains(arg, "...") {
-			match := search.NewMatch(arg)
-			if match.IsLocal() {
-				noModRoots := []string{} // We're in gopath mode, so there are no modroots.
-				match.MatchDirs(noModRoots)
-				args = match.Dirs
-			} else {
-				match.MatchPackages()
-				args = match.Pkgs
-			}
-			for _, err := range match.Errs {
-				base.Errorf("%s", err)
-			}
-			isWildcard = true
-		}
-
-		// Clear all relevant package cache entries before
-		// doing any new loads.
-		load.ClearPackageCachePartial(args)
-
-		pkgs = pkgs[:0]
-		for _, arg := range args {
-			// Note: load calls loadPackage or loadImport,
-			// which push arg onto stk already.
-			// Do not push here too, or else stk will say arg imports arg.
-			p := load1(arg, mode)
-			if p.Error != nil {
-				base.Errorf("%s", p.Error)
-				continue
-			}
-			pkgs = append(pkgs, p)
-		}
-	}
-
-	// Process package, which might now be multiple packages
-	// due to wildcard expansion.
-	for _, p := range pkgs {
-		if *getFix {
-			files := base.RelPaths(p.InternalAllGoFiles())
-			base.Run(cfg.BuildToolexec, str.StringList(base.Tool("fix"), files))
-
-			// The imports might have changed, so reload again.
-			p = load.ReloadPackageNoFlags(arg, stk)
-			if p.Error != nil {
-				base.Errorf("%s", p.Error)
-				return
-			}
-		}
-
-		if isWildcard {
-			// Report both the real package and the
-			// wildcard in any error message.
-			stk.Push(p.ImportPath)
-		}
-
-		// Process dependencies, now that we know what they are.
-		imports := p.Imports
-		if mode&load.GetTestDeps != 0 {
-			// Process test dependencies when -t is specified.
-			// (But don't get test dependencies for test dependencies:
-			// we always pass mode 0 to the recursive calls below.)
-			imports = str.StringList(imports, p.TestImports, p.XTestImports)
-		}
-		for i, path := range imports {
-			if path == "C" {
-				continue
-			}
-			// Fail fast on import naming full vendor path.
-			// Otherwise expand path as needed for test imports.
-			// Note that p.Imports can have additional entries beyond p.Internal.Build.Imports.
-			orig := path
-			if i < len(p.Internal.Build.Imports) {
-				orig = p.Internal.Build.Imports[i]
-			}
-			if j, ok := load.FindVendor(orig); ok {
-				stk.Push(path)
-				err := &load.PackageError{
-					ImportStack: stk.Copy(),
-					Err:         load.ImportErrorf(path, "%s must be imported as %s", path, path[j+len("vendor/"):]),
-				}
-				stk.Pop()
-				base.Errorf("%s", err)
-				continue
-			}
-			// If this is a test import, apply module and vendor lookup now.
-			// We cannot pass ResolveImport to download, because
-			// download does caching based on the value of path,
-			// so it must be the fully qualified path already.
-			if i >= len(p.Imports) {
-				path = load.ResolveImportPath(p, path)
-			}
-			download(ctx, path, p, stk, 0)
-		}
-
-		if isWildcard {
-			stk.Pop()
-		}
-	}
-}
-
-// downloadPackage runs the create or download command
-// to make the first copy of or update a copy of the given package.
-func downloadPackage(p *load.Package) error {
-	var (
-		vcsCmd                  *vcs.Cmd
-		repo, rootPath, repoDir string
-		err                     error
-		blindRepo               bool // set if the repo has unusual configuration
-	)
-
-	// p can be either a real package, or a pseudo-package whose “import path” is
-	// actually a wildcard pattern.
-	// Trim the path at the element containing the first wildcard,
-	// and hope that it applies to the wildcarded parts too.
-	// This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH.
-	importPrefix := p.ImportPath
-	if i := strings.Index(importPrefix, "..."); i >= 0 {
-		slash := strings.LastIndexByte(importPrefix[:i], '/')
-		if slash < 0 {
-			return fmt.Errorf("cannot expand ... in %q", p.ImportPath)
-		}
-		importPrefix = importPrefix[:slash]
-	}
-	if err := checkImportPath(importPrefix); err != nil {
-		return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err)
-	}
-	security := web.SecureOnly
-	if module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) {
-		security = web.Insecure
-	}
-
-	if p.Internal.Build.SrcRoot != "" {
-		// Directory exists. Look for checkout along path to src.
-		const allowNesting = false
-		repoDir, vcsCmd, err = vcs.FromDir(p.Dir, p.Internal.Build.SrcRoot, allowNesting)
-		if err != nil {
-			return err
-		}
-		if !str.HasFilePathPrefix(repoDir, p.Internal.Build.SrcRoot) {
-			panic(fmt.Sprintf("repository %q not in source root %q", repo, p.Internal.Build.SrcRoot))
-		}
-		rootPath = str.TrimFilePathPrefix(repoDir, p.Internal.Build.SrcRoot)
-		if err := vcs.CheckGOVCS(vcsCmd, rootPath); err != nil {
-			return err
-		}
-
-		repo = "<local>" // should be unused; make distinctive
-
-		// Double-check where it came from.
-		if *getU && vcsCmd.RemoteRepo != nil {
-			dir := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
-			remote, err := vcsCmd.RemoteRepo(vcsCmd, dir)
-			if err != nil {
-				// Proceed anyway. The package is present; we likely just don't understand
-				// the repo configuration (e.g. unusual remote protocol).
-				blindRepo = true
-			}
-			repo = remote
-			if !*getF && err == nil {
-				if rr, err := vcs.RepoRootForImportPath(importPrefix, vcs.IgnoreMod, security); err == nil {
-					repo := rr.Repo
-					if rr.VCS.ResolveRepo != nil {
-						resolved, err := rr.VCS.ResolveRepo(rr.VCS, dir, repo)
-						if err == nil {
-							repo = resolved
-						}
-					}
-					if remote != repo && rr.IsCustom {
-						return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.Root, repo, dir, remote)
-					}
-				}
-			}
-		}
-	} else {
-		// Analyze the import path to determine the version control system,
-		// repository, and the import path for the root of the repository.
-		rr, err := vcs.RepoRootForImportPath(importPrefix, vcs.IgnoreMod, security)
-		if err != nil {
-			return err
-		}
-		vcsCmd, repo, rootPath = rr.VCS, rr.Repo, rr.Root
-	}
-	if !blindRepo && !vcsCmd.IsSecure(repo) && security != web.Insecure {
-		return fmt.Errorf("cannot download: %v uses insecure protocol", repo)
-	}
-
-	if p.Internal.Build.SrcRoot == "" {
-		// Package not found. Put in first directory of $GOPATH.
-		list := filepath.SplitList(cfg.BuildContext.GOPATH)
-		if len(list) == 0 {
-			return fmt.Errorf("cannot download: $GOPATH not set. For more details see: 'go help gopath'")
-		}
-		// Guard against people setting GOPATH=$GOROOT.
-		if filepath.Clean(list[0]) == filepath.Clean(cfg.GOROOT) {
-			return fmt.Errorf("cannot download: $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
-		}
-		if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
-			return fmt.Errorf("cannot download: %s is a GOROOT, not a GOPATH. For more details see: 'go help gopath'", list[0])
-		}
-		p.Internal.Build.Root = list[0]
-		p.Internal.Build.SrcRoot = filepath.Join(list[0], "src")
-		p.Internal.Build.PkgRoot = filepath.Join(list[0], "pkg")
-	}
-	root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
-
-	if err := vcs.CheckNested(vcsCmd, root, p.Internal.Build.SrcRoot); err != nil {
-		return err
-	}
-
-	// If we've considered this repository already, don't do it again.
-	if downloadRootCache[root] {
-		return nil
-	}
-	downloadRootCache[root] = true
-
-	if cfg.BuildV {
-		fmt.Fprintf(os.Stderr, "%s (download)\n", rootPath)
-	}
-
-	// Check that this is an appropriate place for the repo to be checked out.
-	// The target directory must either not exist or have a repo checked out already.
-	meta := filepath.Join(root, "."+vcsCmd.Cmd)
-	if _, err := os.Stat(meta); err != nil {
-		// Metadata file or directory does not exist. Prepare to checkout new copy.
-		// Some version control tools require the target directory not to exist.
-		// We require that too, just to avoid stepping on existing work.
-		if _, err := os.Stat(root); err == nil {
-			return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta)
-		}
-
-		_, err := os.Stat(p.Internal.Build.Root)
-		gopathExisted := err == nil
-
-		// Some version control tools require the parent of the target to exist.
-		parent, _ := filepath.Split(root)
-		if err = os.MkdirAll(parent, 0777); err != nil {
-			return err
-		}
-		if cfg.BuildV && !gopathExisted && p.Internal.Build.Root == cfg.BuildContext.GOPATH {
-			fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.Internal.Build.Root)
-		}
-
-		if err = vcsCmd.Create(root, repo); err != nil {
-			return err
-		}
-	} else {
-		// Metadata directory does exist; download incremental updates.
-		if err = vcsCmd.Download(root); err != nil {
-			return err
-		}
-	}
-
-	if cfg.BuildN {
-		// Do not show tag sync in -n; it's noise more than anything,
-		// and since we're not running commands, no tag will be found.
-		// But avoid printing nothing.
-		fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcsCmd.Cmd)
-		return nil
-	}
-
-	// Select and sync to appropriate version of the repository.
-	tags, err := vcsCmd.Tags(root)
-	if err != nil {
-		return err
-	}
-	vers := runtime.Version()
-	if i := strings.Index(vers, " "); i >= 0 {
-		vers = vers[:i]
-	}
-	if err := vcsCmd.TagSync(root, selectTag(vers, tags)); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// selectTag returns the closest matching tag for a given version.
-// Closest means the latest one that is not after the current release.
-// Version "goX" (or "goX.Y" or "goX.Y.Z") matches tags of the same form.
-// Version "release.rN" matches tags of the form "go.rN" (N being a floating-point number).
-// Version "weekly.YYYY-MM-DD" matches tags like "go.weekly.YYYY-MM-DD".
-//
-// NOTE(rsc): Eventually we will need to decide on some logic here.
-// For now, there is only "go1". This matches the docs in go help get.
-func selectTag(goVersion string, tags []string) (match string) {
-	for _, t := range tags {
-		if t == "go1" {
-			return "go1"
-		}
-	}
-	return ""
-}
-
-// checkImportPath is like module.CheckImportPath, but it forbids leading dots
-// in path elements. This can lead to 'go get' creating .git and other VCS
-// directories in places we might run VCS tools later.
-func checkImportPath(path string) error {
-	if err := module.CheckImportPath(path); err != nil {
-		return err
-	}
-	checkElem := func(elem string) error {
-		if elem[0] == '.' {
-			return fmt.Errorf("malformed import path %q: leading dot in path element", path)
-		}
-		return nil
-	}
-	elemStart := 0
-	for i, r := range path {
-		if r == '/' {
-			if err := checkElem(path[elemStart:]); err != nil {
-				return err
-			}
-			elemStart = i + 1
-		}
-	}
-	if err := checkElem(path[elemStart:]); err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/src/cmd/go/internal/get/tag_test.go b/src/cmd/go/internal/get/tag_test.go
deleted file mode 100644
index 9a25dfa..0000000
--- a/src/cmd/go/internal/get/tag_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package get
-
-import "testing"
-
-var selectTagTestTags = []string{
-	"go.r58",
-	"go.r58.1",
-	"go.r59",
-	"go.r59.1",
-	"go.r61",
-	"go.r61.1",
-	"go.weekly.2010-01-02",
-	"go.weekly.2011-10-12",
-	"go.weekly.2011-10-12.1",
-	"go.weekly.2011-10-14",
-	"go.weekly.2011-11-01",
-	"go1",
-	"go1.0.1",
-	"go1.999",
-	"go1.9.2",
-	"go5",
-
-	// these should be ignored:
-	"release.r59",
-	"release.r59.1",
-	"release",
-	"weekly.2011-10-12",
-	"weekly.2011-10-12.1",
-	"weekly",
-	"foo",
-	"bar",
-	"go.f00",
-	"go!r60",
-	"go.1999-01-01",
-	"go.2x",
-	"go.20000000000000",
-	"go.2.",
-	"go.2.0",
-	"go2x",
-	"go20000000000000",
-	"go2.",
-	"go2.0",
-}
-
-var selectTagTests = []struct {
-	version  string
-	selected string
-}{
-	/*
-		{"release.r57", ""},
-		{"release.r58.2", "go.r58.1"},
-		{"release.r59", "go.r59"},
-		{"release.r59.1", "go.r59.1"},
-		{"release.r60", "go.r59.1"},
-		{"release.r60.1", "go.r59.1"},
-		{"release.r61", "go.r61"},
-		{"release.r66", "go.r61.1"},
-		{"weekly.2010-01-01", ""},
-		{"weekly.2010-01-02", "go.weekly.2010-01-02"},
-		{"weekly.2010-01-02.1", "go.weekly.2010-01-02"},
-		{"weekly.2010-01-03", "go.weekly.2010-01-02"},
-		{"weekly.2011-10-12", "go.weekly.2011-10-12"},
-		{"weekly.2011-10-12.1", "go.weekly.2011-10-12.1"},
-		{"weekly.2011-10-13", "go.weekly.2011-10-12.1"},
-		{"weekly.2011-10-14", "go.weekly.2011-10-14"},
-		{"weekly.2011-10-14.1", "go.weekly.2011-10-14"},
-		{"weekly.2011-11-01", "go.weekly.2011-11-01"},
-		{"weekly.2014-01-01", "go.weekly.2011-11-01"},
-		{"weekly.3000-01-01", "go.weekly.2011-11-01"},
-		{"go1", "go1"},
-		{"go1.1", "go1.0.1"},
-		{"go1.998", "go1.9.2"},
-		{"go1.1000", "go1.999"},
-		{"go6", "go5"},
-
-		// faulty versions:
-		{"release.f00", ""},
-		{"weekly.1999-01-01", ""},
-		{"junk", ""},
-		{"", ""},
-		{"go2x", ""},
-		{"go200000000000", ""},
-		{"go2.", ""},
-		{"go2.0", ""},
-	*/
-	{"anything", "go1"},
-}
-
-func TestSelectTag(t *testing.T) {
-	for _, c := range selectTagTests {
-		selected := selectTag(c.version, selectTagTestTags)
-		if selected != c.selected {
-			t.Errorf("selectTag(%q) = %q, want %q", c.version, selected, c.selected)
-		}
-	}
-}
diff --git a/src/cmd/go/internal/gover/gover.go b/src/cmd/go/internal/gover/gover.go
index b2a8261..19c6f67 100644
--- a/src/cmd/go/internal/gover/gover.go
+++ b/src/cmd/go/internal/gover/gover.go
@@ -11,68 +11,23 @@
 package gover
 
 import (
-	"cmp"
+	"internal/gover"
 )
 
-// A version is a parsed Go version: major[.minor[.patch]][kind[pre]]
-// The numbers are the original decimal strings to avoid integer overflows
-// and since there is very little actual math. (Probably overflow doesn't matter in practice,
-// but at the time this code was written, there was an existing test that used
-// go1.99999999999, which does not fit in an int on 32-bit platforms.
-// The "big decimal" representation avoids the problem entirely.)
-type version struct {
-	major string // decimal
-	minor string // decimal or ""
-	patch string // decimal or ""
-	kind  string // "", "alpha", "beta", "rc"
-	pre   string // decimal or ""
-}
-
 // Compare returns -1, 0, or +1 depending on whether
 // x < y, x == y, or x > y, interpreted as toolchain versions.
 // The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
 // Malformed versions compare less than well-formed versions and equal to each other.
 // The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
 func Compare(x, y string) int {
-	vx := parse(x)
-	vy := parse(y)
-
-	if c := cmpInt(vx.major, vy.major); c != 0 {
-		return c
-	}
-	if c := cmpInt(vx.minor, vy.minor); c != 0 {
-		return c
-	}
-	if c := cmpInt(vx.patch, vy.patch); c != 0 {
-		return c
-	}
-	if c := cmp.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
-		return c
-	}
-	if c := cmpInt(vx.pre, vy.pre); c != 0 {
-		return c
-	}
-	return 0
+	return gover.Compare(x, y)
 }
 
 // Max returns the maximum of x and y interpreted as toolchain versions,
 // compared using Compare.
 // If x and y compare equal, Max returns x.
 func Max(x, y string) string {
-	if Compare(x, y) < 0 {
-		return y
-	}
-	return x
-}
-
-// Toolchain returns the maximum of x and y interpreted as toolchain names,
-// compared using Compare(FromToolchain(x), FromToolchain(y)).
-// If x and y compare equal, Max returns x.
-func ToolchainMax(x, y string) string {
-	if Compare(FromToolchain(x), FromToolchain(y)) < 0 {
-		return y
-	}
-	return x
+	return gover.Max(x, y)
 }
 
 // IsLang reports whether v denotes the overall Go language version
@@ -85,22 +40,17 @@
 // meaning that Go 1.21rc1 and Go 1.21.0 will both handle go.mod files that
 // say "go 1.21", but Go 1.21rc1 will not handle files that say "go 1.21.0".
 func IsLang(x string) bool {
-	v := parse(x)
-	return v != version{} && v.patch == "" && v.kind == "" && v.pre == ""
+	return gover.IsLang(x)
 }
 
 // Lang returns the Go language version. For example, Lang("1.2.3") == "1.2".
 func Lang(x string) string {
-	v := parse(x)
-	if v.minor == "" {
-		return v.major
-	}
-	return v.major + "." + v.minor
+	return gover.Lang(x)
 }
 
 // IsPrerelease reports whether v denotes a Go prerelease version.
 func IsPrerelease(x string) bool {
-	return parse(x).kind != ""
+	return gover.Parse(x).Kind != ""
 }
 
 // Prev returns the Go major release immediately preceding v,
@@ -112,143 +62,14 @@
 //	Prev("1.2") = "1.1"
 //	Prev("1.3rc4") = "1.2"
 func Prev(x string) string {
-	v := parse(x)
-	if cmpInt(v.minor, "1") <= 0 {
-		return v.major
+	v := gover.Parse(x)
+	if gover.CmpInt(v.Minor, "1") <= 0 {
+		return v.Major
 	}
-	return v.major + "." + decInt(v.minor)
+	return v.Major + "." + gover.DecInt(v.Minor)
 }
 
 // IsValid reports whether the version x is valid.
 func IsValid(x string) bool {
-	return parse(x) != version{}
-}
-
-// parse parses the Go version string x into a version.
-// It returns the zero version if x is malformed.
-func parse(x string) version {
-	var v version
-
-	// Parse major version.
-	var ok bool
-	v.major, x, ok = cutInt(x)
-	if !ok {
-		return version{}
-	}
-	if x == "" {
-		// Interpret "1" as "1.0.0".
-		v.minor = "0"
-		v.patch = "0"
-		return v
-	}
-
-	// Parse . before minor version.
-	if x[0] != '.' {
-		return version{}
-	}
-
-	// Parse minor version.
-	v.minor, x, ok = cutInt(x[1:])
-	if !ok {
-		return version{}
-	}
-	if x == "" {
-		// Patch missing is same as "0" for older versions.
-		// Starting in Go 1.21, patch missing is different from explicit .0.
-		if cmpInt(v.minor, "21") < 0 {
-			v.patch = "0"
-		}
-		return v
-	}
-
-	// Parse patch if present.
-	if x[0] == '.' {
-		v.patch, x, ok = cutInt(x[1:])
-		if !ok || x != "" {
-			// Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
-			// Allowing them would be a bit confusing because we already have:
-			//	1.21 < 1.21rc1
-			// But a prerelease of a patch would have the opposite effect:
-			//	1.21.3rc1 < 1.21.3
-			// We've never needed them before, so let's not start now.
-			return version{}
-		}
-		return v
-	}
-
-	// Parse prerelease.
-	i := 0
-	for i < len(x) && (x[i] < '0' || '9' < x[i]) {
-		if x[i] < 'a' || 'z' < x[i] {
-			return version{}
-		}
-		i++
-	}
-	if i == 0 {
-		return version{}
-	}
-	v.kind, x = x[:i], x[i:]
-	if x == "" {
-		return v
-	}
-	v.pre, x, ok = cutInt(x)
-	if !ok || x != "" {
-		return version{}
-	}
-
-	return v
-}
-
-// cutInt scans the leading decimal number at the start of x to an integer
-// and returns that value and the rest of the string.
-func cutInt(x string) (n, rest string, ok bool) {
-	i := 0
-	for i < len(x) && '0' <= x[i] && x[i] <= '9' {
-		i++
-	}
-	if i == 0 || x[0] == '0' && i != 1 {
-		return "", "", false
-	}
-	return x[:i], x[i:], true
-}
-
-// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
-// (Copied from golang.org/x/mod/semver's compareInt.)
-func cmpInt(x, y string) int {
-	if x == y {
-		return 0
-	}
-	if len(x) < len(y) {
-		return -1
-	}
-	if len(x) > len(y) {
-		return +1
-	}
-	if x < y {
-		return -1
-	} else {
-		return +1
-	}
-}
-
-// decInt returns the decimal string decremented by 1, or the empty string
-// if the decimal is all zeroes.
-// (Copied from golang.org/x/mod/module's decDecimal.)
-func decInt(decimal string) string {
-	// Scan right to left turning 0s to 9s until you find a digit to decrement.
-	digits := []byte(decimal)
-	i := len(digits) - 1
-	for ; i >= 0 && digits[i] == '0'; i-- {
-		digits[i] = '9'
-	}
-	if i < 0 {
-		// decimal is all zeros
-		return ""
-	}
-	if i == 0 && digits[i] == '1' && len(digits) > 1 {
-		digits = digits[1:]
-	} else {
-		digits[i]--
-	}
-	return string(digits)
+	return gover.IsValid(x)
 }
diff --git a/src/cmd/go/internal/gover/gover_test.go b/src/cmd/go/internal/gover/gover_test.go
index 3a0bf10..68fd56f 100644
--- a/src/cmd/go/internal/gover/gover_test.go
+++ b/src/cmd/go/internal/gover/gover_test.go
@@ -39,31 +39,13 @@
 	{"1.99999999999999998", "1.99999999999999999", -1},
 }
 
-func TestParse(t *testing.T) { test1(t, parseTests, "parse", parse) }
-
-var parseTests = []testCase1[string, version]{
-	{"1", version{"1", "0", "0", "", ""}},
-	{"1.2", version{"1", "2", "0", "", ""}},
-	{"1.2.3", version{"1", "2", "3", "", ""}},
-	{"1.2rc3", version{"1", "2", "", "rc", "3"}},
-	{"1.20", version{"1", "20", "0", "", ""}},
-	{"1.21", version{"1", "21", "", "", ""}},
-	{"1.21rc3", version{"1", "21", "", "rc", "3"}},
-	{"1.21.0", version{"1", "21", "0", "", ""}},
-	{"1.24", version{"1", "24", "", "", ""}},
-	{"1.24rc3", version{"1", "24", "", "rc", "3"}},
-	{"1.24.0", version{"1", "24", "0", "", ""}},
-	{"1.999testmod", version{"1", "999", "", "testmod", ""}},
-	{"1.99999999999999999", version{"1", "99999999999999999", "", "", ""}},
-}
-
 func TestLang(t *testing.T) { test1(t, langTests, "Lang", Lang) }
 
 var langTests = []testCase1[string, string]{
 	{"1.2rc3", "1.2"},
 	{"1.2.3", "1.2"},
 	{"1.2", "1.2"},
-	{"1", "1.0"},
+	{"1", "1"},
 	{"1.999testmod", "1.999"},
 }
 
diff --git a/src/cmd/go/internal/gover/toolchain.go b/src/cmd/go/internal/gover/toolchain.go
index a24df98..43b117e 100644
--- a/src/cmd/go/internal/gover/toolchain.go
+++ b/src/cmd/go/internal/gover/toolchain.go
@@ -52,6 +52,16 @@
 	return FromToolchain(name)
 }
 
+// ToolchainMax returns the maximum of x and y interpreted as toolchain names,
+// compared using Compare(FromToolchain(x), FromToolchain(y)).
+// If x and y compare equal, Max returns x.
+func ToolchainMax(x, y string) string {
+	if Compare(FromToolchain(x), FromToolchain(y)) < 0 {
+		return y
+	}
+	return x
+}
+
 // Startup records the information that went into the startup-time version switch.
 // It is initialized by switchGoToolchain.
 var Startup struct {
diff --git a/src/cmd/go/internal/help/help.go b/src/cmd/go/internal/help/help.go
index aeaba78..501f08e 100644
--- a/src/cmd/go/internal/help/help.go
+++ b/src/cmd/go/internal/help/help.go
@@ -16,7 +16,6 @@
 	"unicode/utf8"
 
 	"cmd/go/internal/base"
-	"cmd/go/internal/modload"
 )
 
 // Help implements the 'help' command.
@@ -27,7 +26,7 @@
 		fmt.Fprintln(w, "// Use of this source code is governed by a BSD-style")
 		fmt.Fprintln(w, "// license that can be found in the LICENSE file.")
 		fmt.Fprintln(w)
-		fmt.Fprintln(w, "// Code generated by 'go test cmd/go -v -run=TestDocsUpToDate -fixdocs'; DO NOT EDIT.")
+		fmt.Fprintln(w, "// Code generated by 'go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs'; DO NOT EDIT.")
 		fmt.Fprintln(w, "// Edit the documentation in other files and then execute 'go generate cmd/go' to generate this one.")
 		fmt.Fprintln(w)
 		buf := new(strings.Builder)
@@ -35,12 +34,6 @@
 		usage := &base.Command{Long: buf.String()}
 		cmds := []*base.Command{usage}
 		for _, cmd := range base.Go.Commands {
-			// Avoid duplication of the "get" documentation.
-			if cmd.UsageLine == "module-get" && modload.Enabled() {
-				continue
-			} else if cmd.UsageLine == "gopath-get" && !modload.Enabled() {
-				continue
-			}
 			cmds = append(cmds, cmd)
 			cmds = append(cmds, cmd.Commands...)
 		}
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index 68ac4d2..c5d1e2a 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -601,6 +601,8 @@
 	GOARM
 		For GOARCH=arm, the ARM architecture for which to compile.
 		Valid values are 5, 6, 7.
+		The value can be followed by an option specifying how to implement floating point instructions.
+		Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7).
 	GO386
 		For GOARCH=386, how to implement floating point instructions.
 		Valid values are sse2 (default), softfloat.
@@ -829,7 +831,7 @@
 
 Constraints may appear in any kind of source file (not just Go), but
 they must appear near the top of the file, preceded
-only by blank lines and other line comments. These rules mean that in Go
+only by blank lines and other comments. These rules mean that in Go
 files a build constraint must appear before the package clause.
 
 To distinguish build constraints from package documentation,
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index 92020da..d9b0907 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -780,9 +780,7 @@
 					p.Imports[i] = new
 				}
 			}
-			for old := range m {
-				delete(m, old)
-			}
+			clear(m)
 		}
 	}
 
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index c0e6265..1549800 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -18,7 +18,6 @@
 	"internal/platform"
 	"io/fs"
 	"os"
-	"os/exec"
 	pathpkg "path"
 	"path/filepath"
 	"runtime"
@@ -228,9 +227,8 @@
 	LocalPrefix       string               // interpret ./ and ../ imports relative to this prefix
 	ExeName           string               // desired name for temporary executable
 	FuzzInstrument    bool                 // package should be instrumented for fuzzing
-	CoverMode         string               // preprocess Go source files with the coverage tool in this mode
+	Cover             CoverSetup           // coverage mode and other setup info of -cover is being applied to this package
 	CoverVars         map[string]*CoverVar // variables created by coverage analysis
-	CoverageCfg       string               // coverage info config file path (passed to compiler)
 	OmitDebug         bool                 // tell linker not to write debug information
 	GobinSubdir       bool                 // install target would be subdir of GOBIN
 	BuildInfo         *debug.BuildInfo     // add this info to package main
@@ -377,6 +375,13 @@
 	Var  string // name of count struct
 }
 
+// CoverSetup holds parameters related to coverage setup for a given package (covermode, etc).
+type CoverSetup struct {
+	Mode    string // coverage mode for this package
+	Cfg     string // path to config file to pass to "go tool cover"
+	GenMeta bool   // ask cover tool to emit a static meta data if set
+}
+
 func (p *Package) copyBuild(opts PackageOpts, pp *build.Package) {
 	p.Internal.Build = pp
 
@@ -603,9 +608,7 @@
 // It is only for use by GOPATH-based "go get".
 // TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function.
 func ClearPackageCache() {
-	for name := range packageCache {
-		delete(packageCache, name)
-	}
+	clear(packageCache)
 	resolvedImportCache.Clear()
 	packageDataCache.Clear()
 }
@@ -969,7 +972,7 @@
 					// accepting them.
 					//
 					// TODO(#41410: Figure out how this actually ought to work and fix
-					// this mess.
+					// this mess).
 				} else {
 					data.err = r.err
 				}
@@ -1922,7 +1925,12 @@
 
 		// The linker loads implicit dependencies.
 		if p.Name == "main" && !p.Internal.ForceLibrary {
-			for _, dep := range LinkerDeps(p) {
+			ldDeps, err := LinkerDeps(p)
+			if err != nil {
+				setError(err)
+				return
+			}
+			for _, dep := range ldDeps {
 				addImport(dep, false)
 			}
 		}
@@ -2389,10 +2397,10 @@
 			appendSetting("-ldflags", ldflags)
 		}
 	}
-	// N.B. -pgo added later by setPGOProfilePath.
 	if cfg.BuildMSan {
 		appendSetting("-msan", "true")
 	}
+	// N.B. -pgo added later by setPGOProfilePath.
 	if cfg.BuildRace {
 		appendSetting("-race", "true")
 	}
@@ -2480,7 +2488,7 @@
 			goto omitVCS
 		}
 		if cfg.BuildBuildvcs == "auto" && vcsCmd != nil && vcsCmd.Cmd != "" {
-			if _, err := exec.LookPath(vcsCmd.Cmd); err != nil {
+			if _, err := cfg.LookPath(vcsCmd.Cmd); err != nil {
 				// We fould a repository, but the required VCS tool is not present.
 				// "-buildvcs=auto" means that we should silently drop the VCS metadata.
 				goto omitVCS
@@ -2558,12 +2566,15 @@
 }
 
 // LinkerDeps returns the list of linker-induced dependencies for main package p.
-func LinkerDeps(p *Package) []string {
+func LinkerDeps(p *Package) ([]string, error) {
 	// Everything links runtime.
 	deps := []string{"runtime"}
 
 	// External linking mode forces an import of runtime/cgo.
-	if externalLinkingForced(p) && cfg.BuildContext.Compiler != "gccgo" {
+	if what := externalLinkingReason(p); what != "" && cfg.BuildContext.Compiler != "gccgo" {
+		if !cfg.BuildContext.CgoEnabled {
+			return nil, fmt.Errorf("%s requires external (cgo) linking, but cgo is not enabled", what)
+		}
 		deps = append(deps, "runtime/cgo")
 	}
 	// On ARM with GOARM=5, it forces an import of math, for soft floating point.
@@ -2587,30 +2598,27 @@
 		deps = append(deps, "runtime/coverage")
 	}
 
-	return deps
+	return deps, nil
 }
 
-// externalLinkingForced reports whether external linking is being
-// forced even for programs that do not use cgo.
-func externalLinkingForced(p *Package) bool {
-	if !cfg.BuildContext.CgoEnabled {
-		return false
-	}
-
+// externalLinkingReason reports the reason external linking is required
+// even for programs that do not use cgo, or the empty string if external
+// linking is not required.
+func externalLinkingReason(p *Package) (what string) {
 	// Some targets must use external linking even inside GOROOT.
-	if platform.MustLinkExternal(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH, false) {
-		return true
+	if platform.MustLinkExternal(cfg.Goos, cfg.Goarch, false) {
+		return cfg.Goos + "/" + cfg.Goarch
 	}
 
 	// Some build modes always require external linking.
 	switch cfg.BuildBuildmode {
 	case "c-shared", "plugin":
-		return true
+		return "-buildmode=" + cfg.BuildBuildmode
 	}
 
 	// Using -linkshared always requires external linking.
 	if cfg.BuildLinkshared {
-		return true
+		return "-linkshared"
 	}
 
 	// Decide whether we are building a PIE,
@@ -2625,27 +2633,29 @@
 	// that does not support PIE with internal linking mode,
 	// then we must use external linking.
 	if isPIE && !platform.InternalLinkPIESupported(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH) {
-		return true
+		if cfg.BuildBuildmode == "pie" {
+			return "-buildmode=pie"
+		}
+		return "default PIE binary"
 	}
 
 	// Using -ldflags=-linkmode=external forces external linking.
 	// If there are multiple -linkmode options, the last one wins.
-	linkmodeExternal := false
 	if p != nil {
 		ldflags := BuildLdflags.For(p)
 		for i := len(ldflags) - 1; i >= 0; i-- {
 			a := ldflags[i]
 			if a == "-linkmode=external" ||
 				a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "external" {
-				linkmodeExternal = true
-				break
+				return a
 			} else if a == "-linkmode=internal" ||
 				a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "internal" {
-				break
+				return ""
 			}
 		}
 	}
-	return linkmodeExternal
+
+	return ""
 }
 
 // mkAbs rewrites list, which must be paths relative to p.Dir,
@@ -2930,6 +2940,10 @@
 		} else {
 			appendBuildSetting(p.Internal.BuildInfo, "-pgo", file)
 		}
+		// Adding -pgo breaks the sort order in BuildInfo.Settings. Restore it.
+		slices.SortFunc(p.Internal.BuildInfo.Settings, func(x, y debug.BuildSetting) int {
+			return strings.Compare(x.Key, y.Key)
+		})
 	}
 
 	switch cfg.BuildPGO {
@@ -3487,7 +3501,7 @@
 		}
 
 		// Mark package for instrumentation.
-		p.Internal.CoverMode = cmode
+		p.Internal.Cover.Mode = cmode
 		covered = append(covered, p)
 
 		// Force import of sync/atomic into package if atomic mode.
diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go
index e9ed0d3..d09abab 100644
--- a/src/cmd/go/internal/load/test.go
+++ b/src/cmd/go/internal/load/test.go
@@ -298,7 +298,11 @@
 	// Also the linker introduces implicit dependencies reported by LinkerDeps.
 	stk.Push("testmain")
 	deps := TestMainDeps // cap==len, so safe for append
-	for _, d := range LinkerDeps(p) {
+	ldDeps, err := LinkerDeps(p)
+	if err != nil && pmain.Error == nil {
+		pmain.Error = &PackageError{Err: err}
+	}
+	for _, d := range ldDeps {
 		deps = append(deps, d)
 	}
 	for _, dep := range deps {
@@ -384,15 +388,15 @@
 				// it contains p's Go files), whereas pmain contains only
 				// test harness code (don't want to instrument it, and
 				// we don't want coverage hooks in the pkg init).
-				ptest.Internal.CoverMode = p.Internal.CoverMode
-				pmain.Internal.CoverMode = "testmain"
+				ptest.Internal.Cover.Mode = p.Internal.Cover.Mode
+				pmain.Internal.Cover.Mode = "testmain"
 			}
 			// Should we apply coverage analysis locally, only for this
 			// package and only for this test? Yes, if -cover is on but
 			// -coverpkg has not specified a list of packages for global
 			// coverage.
 			if cover.Local {
-				ptest.Internal.CoverMode = cover.Mode
+				ptest.Internal.Cover.Mode = cover.Mode
 
 				if !cfg.Experiment.CoverageRedesign {
 					var coverFiles []string
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_test.go b/src/cmd/go/internal/lockedfile/lockedfile_test.go
index 8dea8f7..a9fa40b 100644
--- a/src/cmd/go/internal/lockedfile/lockedfile_test.go
+++ b/src/cmd/go/internal/lockedfile/lockedfile_test.go
@@ -238,7 +238,7 @@
 		t.Fatal(err)
 	}
 
-	cmd := testenv.Command(t, os.Args[0], "-test.run="+t.Name())
+	cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$")
 	cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
 
 	qDone := make(chan struct{})
diff --git a/src/cmd/go/internal/mmap/mmap.go b/src/cmd/go/internal/mmap/mmap.go
index 0cad9ca..fcbd3e0 100644
--- a/src/cmd/go/internal/mmap/mmap.go
+++ b/src/cmd/go/internal/mmap/mmap.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/go/internal/mmap/mmap_other.go b/src/cmd/go/internal/mmap/mmap_other.go
index 22e9395..4d2844f 100644
--- a/src/cmd/go/internal/mmap/mmap_other.go
+++ b/src/cmd/go/internal/mmap/mmap_other.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Go Authors.  All rights reserved.
+// Copyright 2022 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/go/internal/mmap/mmap_unix.go b/src/cmd/go/internal/mmap/mmap_unix.go
index 53bcbb9..5dce872 100644
--- a/src/cmd/go/internal/mmap/mmap_unix.go
+++ b/src/cmd/go/internal/mmap/mmap_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/go/internal/mmap/mmap_windows.go b/src/cmd/go/internal/mmap/mmap_windows.go
index 1cf62fe..d00bef7 100644
--- a/src/cmd/go/internal/mmap/mmap_windows.go
+++ b/src/cmd/go/internal/mmap/mmap_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
index e49cd9f..373acce 100644
--- a/src/cmd/go/internal/modcmd/download.go
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -10,6 +10,7 @@
 	"errors"
 	"os"
 	"runtime"
+	"sync"
 
 	"cmd/go/internal/base"
 	"cmd/go/internal/cfg"
@@ -17,6 +18,7 @@
 	"cmd/go/internal/modfetch"
 	"cmd/go/internal/modfetch/codehost"
 	"cmd/go/internal/modload"
+	"cmd/go/internal/toolchain"
 
 	"golang.org/x/mod/module"
 )
@@ -153,7 +155,10 @@
 				// 'go mod graph', and similar commands.
 				_, err := modload.LoadModGraph(ctx, "")
 				if err != nil {
-					base.Fatal(err)
+					// TODO(#64008): call base.Fatalf instead of toolchain.SwitchOrFatal
+					// here, since we can only reach this point with an outdated toolchain
+					// if the go.mod file is inconsistent.
+					toolchain.SwitchOrFatal(ctx, err)
 				}
 
 				for _, m := range modFile.Require {
@@ -194,8 +199,26 @@
 	// from the resulting TooNewError), all before we try the actual full download
 	// of each module.
 	//
-	// For now, we just let it fail: the user can explicitly set GOTOOLCHAIN
-	// and retry if they want to.
+	// For now, we go ahead and try all the downloads and collect the errors, and
+	// if any download failed due to a TooNewError, we switch toolchains and try
+	// again. Any downloads that already succeeded will still be in cache.
+	// That won't give optimal concurrency (we'll do two batches of concurrent
+	// downloads instead of all in one batch), and it might add a little overhead
+	// to look up the downloads from the first batch in the module cache when
+	// we see them again in the second batch. On the other hand, it's way simpler
+	// to implement, and not really any more expensive if the user is requesting
+	// no explicit arguments (their go.mod file should already list an appropriate
+	// toolchain version) or only one module (as is used by the Go Module Proxy).
+
+	if infosErr != nil {
+		var sw toolchain.Switcher
+		sw.Error(infosErr)
+		if sw.NeedSwitch() {
+			sw.Switch(ctx)
+		}
+		// Otherwise, wait to report infosErr after we have downloaded
+		// when we can.
+	}
 
 	if !haveExplicitArgs && modload.WorkFilePath() == "" {
 		// 'go mod download' is sometimes run without arguments to pre-populate the
@@ -205,7 +228,7 @@
 		// (golang.org/issue/45332). We do still fix inconsistencies in go.mod
 		// though.
 		//
-		// TODO(#45551): In the future, report an error if go.mod or go.sum need to
+		// TODO(#64008): In the future, report an error if go.mod or go.sum need to
 		// be updated after loading the build list. This may require setting
 		// the mode to "mod" or "readonly" depending on haveExplicitArgs.
 		if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil {
@@ -213,6 +236,7 @@
 		}
 	}
 
+	var downloadErrs sync.Map
 	for _, info := range infos {
 		if info.Replace != nil {
 			info = info.Replace
@@ -239,7 +263,11 @@
 		}
 		sem <- token{}
 		go func() {
-			DownloadModule(ctx, m)
+			err := DownloadModule(ctx, m)
+			if err != nil {
+				downloadErrs.Store(m, err)
+				m.Error = err.Error()
+			}
 			<-sem
 		}()
 	}
@@ -249,6 +277,39 @@
 		sem <- token{}
 	}
 
+	// If there were explicit arguments
+	// (like 'go mod download golang.org/x/tools@latest'),
+	// check whether we need to upgrade the toolchain in order to download them.
+	//
+	// (If invoked without arguments, we expect the module graph to already
+	// be tidy and the go.mod file to declare a 'go' version that satisfies
+	// transitive requirements. If that invariant holds, then we should have
+	// already upgraded when we loaded the module graph, and should not need
+	// an additional check here. See https://go.dev/issue/45551.)
+	//
+	// We also allow upgrades if in a workspace because in workspace mode
+	// with no arguments we download the module pattern "all",
+	// which may include dependencies that are normally pruned out
+	// of the individual modules in the workspace.
+	if haveExplicitArgs || modload.WorkFilePath() != "" {
+		var sw toolchain.Switcher
+		// Add errors to the Switcher in deterministic order so that they will be
+		// logged deterministically.
+		for _, m := range mods {
+			if erri, ok := downloadErrs.Load(m); ok {
+				sw.Error(erri.(error))
+			}
+		}
+		// Only call sw.Switch if it will actually switch.
+		// Otherwise, we may want to write the errors as JSON
+		// (instead of using base.Error as sw.Switch would),
+		// and we may also have other errors to report from the
+		// initial infos returned by ListModules.
+		if sw.NeedSwitch() {
+			sw.Switch(ctx)
+		}
+	}
+
 	if *downloadJSON {
 		for _, m := range mods {
 			b, err := json.MarshalIndent(m, "", "\t")
@@ -302,34 +363,27 @@
 
 // DownloadModule runs 'go mod download' for m.Path@m.Version,
 // leaving the results (including any error) in m itself.
-func DownloadModule(ctx context.Context, m *ModuleJSON) {
+func DownloadModule(ctx context.Context, m *ModuleJSON) error {
 	var err error
 	_, file, err := modfetch.InfoFile(ctx, m.Path, m.Version)
 	if err != nil {
-		m.Error = err.Error()
-		return
+		return err
 	}
 	m.Info = file
 	m.GoMod, err = modfetch.GoModFile(ctx, m.Path, m.Version)
 	if err != nil {
-		m.Error = err.Error()
-		return
+		return err
 	}
 	m.GoModSum, err = modfetch.GoModSum(ctx, m.Path, m.Version)
 	if err != nil {
-		m.Error = err.Error()
-		return
+		return err
 	}
 	mod := module.Version{Path: m.Path, Version: m.Version}
 	m.Zip, err = modfetch.DownloadZip(ctx, mod)
 	if err != nil {
-		m.Error = err.Error()
-		return
+		return err
 	}
 	m.Sum = modfetch.Sum(ctx, mod)
 	m.Dir, err = modfetch.Download(ctx, mod)
-	if err != nil {
-		m.Error = err.Error()
-		return
-	}
+	return err
 }
diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go
index 96bd608..db131b0 100644
--- a/src/cmd/go/internal/modcmd/edit.go
+++ b/src/cmd/go/internal/modcmd/edit.go
@@ -315,6 +315,9 @@
 // parsePathVersionOptional parses path[@version], using adj to
 // describe any errors.
 func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) {
+	if allowDirPath && modfile.IsDirectoryPath(arg) {
+		return arg, "", nil
+	}
 	before, after, found := strings.Cut(arg, "@")
 	if !found {
 		path = arg
@@ -322,9 +325,7 @@
 		path, version = strings.TrimSpace(before), strings.TrimSpace(after)
 	}
 	if err := module.CheckImportPath(path); err != nil {
-		if !allowDirPath || !modfile.IsDirectoryPath(path) {
-			return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
-		}
+		return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
 	}
 	if path != arg && !allowedVersionArg(version) {
 		return path, version, fmt.Errorf("invalid %s version: %q", adj, version)
diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go
index e4be73f..facdaa9 100644
--- a/src/cmd/go/internal/modcmd/init.go
+++ b/src/cmd/go/internal/modcmd/init.go
@@ -25,9 +25,6 @@
 using import comments in .go files, vendoring tool configuration files (like
 Gopkg.lock), and the current directory (if in GOPATH).
 
-If a configuration file for a vendoring tool is present, init will attempt to
-import module requirements from it.
-
 See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'.
 `,
 	Run: runInit,
diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go
index 1a0d69e..3db85bd 100644
--- a/src/cmd/go/internal/modcmd/vendor.go
+++ b/src/cmd/go/internal/modcmd/vendor.go
@@ -66,6 +66,14 @@
 }
 
 func runVendor(ctx context.Context, cmd *base.Command, args []string) {
+	modload.InitWorkfile()
+	if modload.WorkFilePath() != "" {
+		base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.")
+	}
+	RunVendor(ctx, vendorE, vendorO, args)
+}
+
+func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) {
 	if len(args) != 0 {
 		base.Fatalf("go: 'go mod vendor' accepts no arguments")
 	}
@@ -98,7 +106,7 @@
 	modpkgs := make(map[module.Version][]string)
 	for _, pkg := range pkgs {
 		m := modload.PackageModule(pkg)
-		if m.Path == "" || m.Version == "" && modload.MainModules.Contains(m.Path) {
+		if m.Path == "" || modload.MainModules.Contains(m.Path) {
 			continue
 		}
 		modpkgs[m] = append(modpkgs[m], pkg)
@@ -107,21 +115,25 @@
 	includeAllReplacements := false
 	includeGoVersions := false
 	isExplicit := map[module.Version]bool{}
-	if gv := modload.ModFile().Go; gv != nil {
-		if gover.Compare(gv.Version, "1.14") >= 0 {
-			// If the Go version is at least 1.14, annotate all explicit 'require' and
-			// 'replace' targets found in the go.mod file so that we can perform a
-			// stronger consistency check when -mod=vendor is set.
-			for _, r := range modload.ModFile().Require {
-				isExplicit[r.Mod] = true
+	gv := modload.MainModules.GoVersion()
+	if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) {
+		// If the Go version is at least 1.14, annotate all explicit 'require' and
+		// 'replace' targets found in the go.mod file so that we can perform a
+		// stronger consistency check when -mod=vendor is set.
+		for _, m := range modload.MainModules.Versions() {
+			if modFile := modload.MainModules.ModFile(m); modFile != nil {
+				for _, r := range modFile.Require {
+					isExplicit[r.Mod] = true
+				}
 			}
-			includeAllReplacements = true
+
 		}
-		if gover.Compare(gv.Version, "1.17") >= 0 {
-			// If the Go version is at least 1.17, annotate all modules with their
-			// 'go' version directives.
-			includeGoVersions = true
-		}
+		includeAllReplacements = true
+	}
+	if gover.Compare(gv, "1.17") >= 0 {
+		// If the Go version is at least 1.17, annotate all modules with their
+		// 'go' version directives.
+		includeGoVersions = true
 	}
 
 	var vendorMods []module.Version
@@ -143,9 +155,15 @@
 		w = io.MultiWriter(&buf, os.Stderr)
 	}
 
+	if modload.MainModules.WorkFile() != nil {
+		fmt.Fprintf(w, "## workspace\n")
+	}
+
+	replacementWritten := make(map[module.Version]bool)
 	for _, m := range vendorMods {
 		replacement := modload.Replacement(m)
 		line := moduleLine(m, replacement)
+		replacementWritten[m] = true
 		io.WriteString(w, line)
 
 		goVersion := ""
@@ -173,17 +191,41 @@
 		// Record unused and wildcard replacements at the end of the modules.txt file:
 		// without access to the complete build list, the consumer of the vendor
 		// directory can't otherwise determine that those replacements had no effect.
-		for _, r := range modload.ModFile().Replace {
-			if len(modpkgs[r.Old]) > 0 {
-				// We we already recorded this replacement in the entry for the replaced
-				// module with the packages it provides.
-				continue
-			}
+		for _, m := range modload.MainModules.Versions() {
+			if workFile := modload.MainModules.WorkFile(); workFile != nil {
+				for _, r := range workFile.Replace {
+					if replacementWritten[r.Old] {
+						// We already recorded this replacement.
+						continue
+					}
+					replacementWritten[r.Old] = true
 
-			line := moduleLine(r.Old, r.New)
-			buf.WriteString(line)
-			if cfg.BuildV {
-				os.Stderr.WriteString(line)
+					line := moduleLine(r.Old, r.New)
+					buf.WriteString(line)
+					if cfg.BuildV {
+						os.Stderr.WriteString(line)
+					}
+				}
+			}
+			if modFile := modload.MainModules.ModFile(m); modFile != nil {
+				for _, r := range modFile.Replace {
+					if replacementWritten[r.Old] {
+						// We already recorded this replacement.
+						continue
+					}
+					replacementWritten[r.Old] = true
+					rNew := modload.Replacement(r.Old)
+					if rNew == (module.Version{}) {
+						// There is no replacement. Don't try to write it.
+						continue
+					}
+
+					line := moduleLine(r.Old, rNew)
+					buf.WriteString(line)
+					if cfg.BuildV {
+						os.Stderr.WriteString(line)
+					}
+				}
 			}
 		}
 	}
@@ -257,7 +299,7 @@
 	// a MultiplePackageError on an otherwise valid package: the package could
 	// have different names for GOOS=windows and GOOS=mac for example. On the
 	// other hand if there's a NoGoError, the package might have source files
-	// specifying "// +build ignore" those packages should be skipped because
+	// specifying "//go:build ignore" those packages should be skipped because
 	// embeds from ignored files can't be used.
 	// TODO(#42504): Find a better way to avoid errors from ImportDir. We'll
 	// need to figure this out when we switch to PackagesAndErrors as per the
@@ -271,7 +313,15 @@
 			base.Fatalf("internal error: failed to find embedded files of %s: %v\n", pkg, err)
 		}
 	}
-	embedPatterns := str.StringList(bp.EmbedPatterns, bp.TestEmbedPatterns, bp.XTestEmbedPatterns)
+	var embedPatterns []string
+	if gover.Compare(modload.MainModules.GoVersion(), "1.22") >= 0 {
+		embedPatterns = bp.EmbedPatterns
+	} else {
+		// Maintain the behavior of https://github.com/golang/go/issues/63473
+		// so that we continue to agree with older versions of the go command
+		// about the contents of vendor directories in existing modules
+		embedPatterns = str.StringList(bp.EmbedPatterns, bp.TestEmbedPatterns, bp.XTestEmbedPatterns)
+	}
 	embeds, err := load.ResolveEmbed(bp.Dir, embedPatterns)
 	if err != nil {
 		base.Fatal(err)
@@ -367,7 +417,7 @@
 		return false
 	}
 	if info.Name() == "go.mod" || info.Name() == "go.sum" {
-		if gv := modload.ModFile().Go; gv != nil && gover.Compare(gv.Version, "1.17") >= 0 {
+		if gv := modload.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 {
 			// As of Go 1.17, we strip go.mod and go.sum files from dependency modules.
 			// Otherwise, 'go' commands invoked within the vendor subtree may misidentify
 			// an arbitrary directory within the vendor tree as a module root.
diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go
index 4552ed1..d07f730 100644
--- a/src/cmd/go/internal/modcmd/verify.go
+++ b/src/cmd/go/internal/modcmd/verify.go
@@ -61,7 +61,7 @@
 	if err != nil {
 		base.Fatal(err)
 	}
-	mods := mg.BuildList()[modload.MainModules.Len():]
+	mods := mg.BuildList()
 	// Use a slice of result channels, so that the output is deterministic.
 	errsChans := make([]<-chan []error, len(mods))
 
@@ -94,6 +94,9 @@
 		// "go" and "toolchain" have no disk footprint; nothing to verify.
 		return nil
 	}
+	if modload.MainModules.Contains(mod.Path) {
+		return nil
+	}
 	var errs []error
 	zip, zipErr := modfetch.CachePath(ctx, mod, "zip")
 	if zipErr == nil {
diff --git a/src/cmd/go/internal/modconv/convert.go b/src/cmd/go/internal/modconv/convert.go
deleted file mode 100644
index 9c861f8..0000000
--- a/src/cmd/go/internal/modconv/convert.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"fmt"
-	"os"
-	"runtime"
-	"sort"
-	"strings"
-
-	"cmd/go/internal/base"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-	"golang.org/x/mod/semver"
-)
-
-// ConvertLegacyConfig converts legacy config to modfile.
-// The file argument is slash-delimited.
-func ConvertLegacyConfig(f *modfile.File, file string, data []byte, queryPackage func(path, rev string) (module.Version, error)) error {
-	i := strings.LastIndex(file, "/")
-	j := -2
-	if i >= 0 {
-		j = strings.LastIndex(file[:i], "/")
-	}
-	convert := Converters[file[i+1:]]
-	if convert == nil && j != -2 {
-		convert = Converters[file[j+1:]]
-	}
-	if convert == nil {
-		return fmt.Errorf("unknown legacy config file %s", file)
-	}
-	mf, err := convert(file, data)
-	if err != nil {
-		return fmt.Errorf("parsing %s: %v", file, err)
-	}
-
-	// Convert requirements block, which may use raw SHA1 hashes as versions,
-	// to valid semver requirement list, respecting major versions.
-	versions := make([]module.Version, len(mf.Require))
-	replace := make(map[string]*modfile.Replace)
-
-	for _, r := range mf.Replace {
-		replace[r.New.Path] = r
-		replace[r.Old.Path] = r
-	}
-
-	type token struct{}
-	sem := make(chan token, runtime.GOMAXPROCS(0))
-	for i, r := range mf.Require {
-		m := r.Mod
-		if m.Path == "" {
-			continue
-		}
-		if re, ok := replace[m.Path]; ok {
-			m = re.New
-		}
-		sem <- token{}
-		go func(i int, m module.Version) {
-			defer func() { <-sem }()
-			version, err := queryPackage(m.Path, m.Version)
-			if err != nil {
-				fmt.Fprintf(os.Stderr, "go: converting %s: stat %s@%s: %v\n", base.ShortPath(file), m.Path, m.Version, err)
-				return
-			}
-
-			versions[i] = version
-		}(i, m)
-	}
-	// Fill semaphore channel to wait for all tasks to finish.
-	for n := cap(sem); n > 0; n-- {
-		sem <- token{}
-	}
-
-	need := map[string]string{}
-	for _, v := range versions {
-		if v.Path == "" {
-			continue
-		}
-		// Don't use semver.Max here; need to preserve +incompatible suffix.
-		if needv, ok := need[v.Path]; !ok || semver.Compare(needv, v.Version) < 0 {
-			need[v.Path] = v.Version
-		}
-	}
-	paths := make([]string, 0, len(need))
-	for path := range need {
-		paths = append(paths, path)
-	}
-	sort.Strings(paths)
-	for _, path := range paths {
-		if re, ok := replace[path]; ok {
-			err := f.AddReplace(re.Old.Path, re.Old.Version, path, need[path])
-			if err != nil {
-				return fmt.Errorf("add replace: %v", err)
-			}
-		}
-		f.AddNewRequire(path, need[path], false)
-	}
-
-	f.Cleanup()
-	return nil
-}
diff --git a/src/cmd/go/internal/modconv/dep.go b/src/cmd/go/internal/modconv/dep.go
deleted file mode 100644
index 9bea761..0000000
--- a/src/cmd/go/internal/modconv/dep.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"fmt"
-	"internal/lazyregexp"
-	"net/url"
-	"path"
-	"strconv"
-	"strings"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-	"golang.org/x/mod/semver"
-)
-
-func ParseGopkgLock(file string, data []byte) (*modfile.File, error) {
-	type pkg struct {
-		Path    string
-		Version string
-		Source  string
-	}
-	mf := new(modfile.File)
-	var list []pkg
-	var r *pkg
-	for lineno, line := range strings.Split(string(data), "\n") {
-		lineno++
-		if i := strings.Index(line, "#"); i >= 0 {
-			line = line[:i]
-		}
-		line = strings.TrimSpace(line)
-		if line == "[[projects]]" {
-			list = append(list, pkg{})
-			r = &list[len(list)-1]
-			continue
-		}
-		if strings.HasPrefix(line, "[") {
-			r = nil
-			continue
-		}
-		if r == nil {
-			continue
-		}
-		before, after, found := strings.Cut(line, "=")
-		if !found {
-			continue
-		}
-		key := strings.TrimSpace(before)
-		val := strings.TrimSpace(after)
-		if len(val) >= 2 && val[0] == '"' && val[len(val)-1] == '"' {
-			q, err := strconv.Unquote(val) // Go unquoting, but close enough for now
-			if err != nil {
-				return nil, fmt.Errorf("%s:%d: invalid quoted string: %v", file, lineno, err)
-			}
-			val = q
-		}
-		switch key {
-		case "name":
-			r.Path = val
-		case "source":
-			r.Source = val
-		case "revision", "version":
-			// Note: key "version" should take priority over "revision",
-			// and it does, because dep writes toml keys in alphabetical order,
-			// so we see version (if present) second.
-			if key == "version" {
-				if !semver.IsValid(val) || semver.Canonical(val) != val {
-					break
-				}
-			}
-			r.Version = val
-		}
-	}
-	for _, r := range list {
-		if r.Path == "" || r.Version == "" {
-			return nil, fmt.Errorf("%s: empty [[projects]] stanza (%s)", file, r.Path)
-		}
-		mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: r.Path, Version: r.Version}})
-
-		if r.Source != "" {
-			// Convert "source" to import path, such as
-			// git@test.com:x/y.git and https://test.com/x/y.git.
-			// We get "test.com/x/y" at last.
-			source, err := decodeSource(r.Source)
-			if err != nil {
-				return nil, err
-			}
-			old := module.Version{Path: r.Path, Version: r.Version}
-			new := module.Version{Path: source, Version: r.Version}
-			mf.Replace = append(mf.Replace, &modfile.Replace{Old: old, New: new})
-		}
-	}
-	return mf, nil
-}
-
-var scpSyntaxReg = lazyregexp.New(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
-
-func decodeSource(source string) (string, error) {
-	var u *url.URL
-	var p string
-	if m := scpSyntaxReg.FindStringSubmatch(source); m != nil {
-		// Match SCP-like syntax and convert it to a URL.
-		// Eg, "git@github.com:user/repo" becomes
-		// "ssh://git@github.com/user/repo".
-		u = &url.URL{
-			Scheme: "ssh",
-			User:   url.User(m[1]),
-			Host:   m[2],
-			Path:   "/" + m[3],
-		}
-	} else {
-		var err error
-		u, err = url.Parse(source)
-		if err != nil {
-			return "", fmt.Errorf("%q is not a valid URI", source)
-		}
-	}
-
-	// If no scheme was passed, then the entire path will have been put into
-	// u.Path. Either way, construct the normalized path correctly.
-	if u.Host == "" {
-		p = source
-	} else {
-		p = path.Join(u.Host, u.Path)
-	}
-	p = strings.TrimSuffix(p, ".git")
-	p = strings.TrimSuffix(p, ".hg")
-	return p, nil
-}
diff --git a/src/cmd/go/internal/modconv/glide.go b/src/cmd/go/internal/modconv/glide.go
deleted file mode 100644
index d1de3f7..0000000
--- a/src/cmd/go/internal/modconv/glide.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"strings"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseGlideLock(file string, data []byte) (*modfile.File, error) {
-	mf := new(modfile.File)
-	imports := false
-	name := ""
-	for _, line := range strings.Split(string(data), "\n") {
-		if line == "" {
-			continue
-		}
-		if strings.HasPrefix(line, "imports:") {
-			imports = true
-		} else if line[0] != '-' && line[0] != ' ' && line[0] != '\t' {
-			imports = false
-		}
-		if !imports {
-			continue
-		}
-		if strings.HasPrefix(line, "- name:") {
-			name = strings.TrimSpace(line[len("- name:"):])
-		}
-		if strings.HasPrefix(line, "  version:") {
-			version := strings.TrimSpace(line[len("  version:"):])
-			if name != "" && version != "" {
-				mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: name, Version: version}})
-			}
-		}
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/glock.go b/src/cmd/go/internal/modconv/glock.go
deleted file mode 100644
index b8dc204..0000000
--- a/src/cmd/go/internal/modconv/glock.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"strings"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseGLOCKFILE(file string, data []byte) (*modfile.File, error) {
-	mf := new(modfile.File)
-	for _, line := range strings.Split(string(data), "\n") {
-		f := strings.Fields(line)
-		if len(f) >= 2 && f[0] != "cmd" {
-			mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: f[0], Version: f[1]}})
-		}
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/godeps.go b/src/cmd/go/internal/modconv/godeps.go
deleted file mode 100644
index 09c0fa3..0000000
--- a/src/cmd/go/internal/modconv/godeps.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"encoding/json"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseGodepsJSON(file string, data []byte) (*modfile.File, error) {
-	var cfg struct {
-		ImportPath string
-		Deps       []struct {
-			ImportPath string
-			Rev        string
-		}
-	}
-	if err := json.Unmarshal(data, &cfg); err != nil {
-		return nil, err
-	}
-	mf := new(modfile.File)
-	for _, d := range cfg.Deps {
-		mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: d.ImportPath, Version: d.Rev}})
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/modconv.go b/src/cmd/go/internal/modconv/modconv.go
deleted file mode 100644
index dc06072..0000000
--- a/src/cmd/go/internal/modconv/modconv.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import "golang.org/x/mod/modfile"
-
-var Converters = map[string]func(string, []byte) (*modfile.File, error){
-	"GLOCKFILE":          ParseGLOCKFILE,
-	"Godeps/Godeps.json": ParseGodepsJSON,
-	"Gopkg.lock":         ParseGopkgLock,
-	"dependencies.tsv":   ParseDependenciesTSV,
-	"glide.lock":         ParseGlideLock,
-	"vendor.conf":        ParseVendorConf,
-	"vendor.yml":         ParseVendorYML,
-	"vendor/manifest":    ParseVendorManifest,
-	"vendor/vendor.json": ParseVendorJSON,
-}
diff --git a/src/cmd/go/internal/modconv/modconv_test.go b/src/cmd/go/internal/modconv/modconv_test.go
deleted file mode 100644
index 750525d..0000000
--- a/src/cmd/go/internal/modconv/modconv_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"bytes"
-	"fmt"
-	"os"
-	"path/filepath"
-	"testing"
-)
-
-var extMap = map[string]string{
-	".dep":       "Gopkg.lock",
-	".glide":     "glide.lock",
-	".glock":     "GLOCKFILE",
-	".godeps":    "Godeps/Godeps.json",
-	".tsv":       "dependencies.tsv",
-	".vconf":     "vendor.conf",
-	".vjson":     "vendor/vendor.json",
-	".vyml":      "vendor.yml",
-	".vmanifest": "vendor/manifest",
-}
-
-func Test(t *testing.T) {
-	tests, _ := filepath.Glob("testdata/*")
-	if len(tests) == 0 {
-		t.Fatalf("no tests found")
-	}
-	for _, test := range tests {
-		file := filepath.Base(test)
-		ext := filepath.Ext(file)
-		if ext == ".out" {
-			continue
-		}
-		t.Run(file, func(t *testing.T) {
-			if extMap[ext] == "" {
-				t.Fatal("unknown extension")
-			}
-			if Converters[extMap[ext]] == nil {
-				t.Fatalf("Converters[%q] == nil", extMap[ext])
-			}
-			data, err := os.ReadFile(test)
-			if err != nil {
-				t.Fatal(err)
-			}
-			out, err := Converters[extMap[ext]](test, data)
-			if err != nil {
-				t.Fatal(err)
-			}
-			want, err := os.ReadFile(test[:len(test)-len(ext)] + ".out")
-			if err != nil {
-				t.Error(err)
-			}
-			var buf bytes.Buffer
-			for _, r := range out.Require {
-				fmt.Fprintf(&buf, "%s %s\n", r.Mod.Path, r.Mod.Version)
-			}
-			for _, r := range out.Replace {
-				fmt.Fprintf(&buf, "replace: %s %s %s %s\n", r.Old.Path, r.Old.Version, r.New.Path, r.New.Version)
-			}
-			if !bytes.Equal(buf.Bytes(), want) {
-				t.Errorf("have:\n%s\nwant:\n%s", buf.Bytes(), want)
-			}
-		})
-	}
-}
diff --git a/src/cmd/go/internal/modconv/testdata/cockroach.glock b/src/cmd/go/internal/modconv/testdata/cockroach.glock
deleted file mode 100644
index 221c8ac..0000000
--- a/src/cmd/go/internal/modconv/testdata/cockroach.glock
+++ /dev/null
@@ -1,41 +0,0 @@
-cmd github.com/cockroachdb/c-protobuf/cmd/protoc
-cmd github.com/cockroachdb/yacc
-cmd github.com/gogo/protobuf/protoc-gen-gogo
-cmd github.com/golang/lint/golint
-cmd github.com/jteeuwen/go-bindata/go-bindata
-cmd github.com/kisielk/errcheck
-cmd github.com/robfig/glock
-cmd github.com/tebeka/go2xunit
-cmd golang.org/x/tools/cmd/goimports
-cmd golang.org/x/tools/cmd/stringer
-github.com/agtorre/gocolorize f42b554bf7f006936130c9bb4f971afd2d87f671
-github.com/biogo/store e1f74b3c58befe661feed7fa4cf52436de753128
-github.com/cockroachdb/c-lz4 6e71f140a365017bbe0904710007f8725fd3f809
-github.com/cockroachdb/c-protobuf 0f9ab7b988ca7474cf76b9a961ab03c0552abcb3
-github.com/cockroachdb/c-rocksdb 7fc876fe79b96de0e25069c9ae27e6444637bd54
-github.com/cockroachdb/c-snappy 618733f9e5bab8463b9049117a335a7a1bfc9fd5
-github.com/cockroachdb/yacc 572e006f8e6b0061ebda949d13744f5108389514
-github.com/coreos/etcd 18ecc297bc913bed6fc093d66b1fa22020dba7dc
-github.com/docker/docker 7374852be9def787921aea2ca831771982badecf
-github.com/elazarl/go-bindata-assetfs 3dcc96556217539f50599357fb481ac0dc7439b9
-github.com/gogo/protobuf 98e73e511a62a9c232152f94999112c80142a813
-github.com/golang/lint 7b7f4364ff76043e6c3610281525fabc0d90f0e4
-github.com/google/btree cc6329d4279e3f025a53a83c397d2339b5705c45
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/jteeuwen/go-bindata dce55d09e24ac40a6e725c8420902b86554f8046
-github.com/julienschmidt/httprouter 6aacfd5ab513e34f7e64ea9627ab9670371b34e7
-github.com/kisielk/errcheck 50b84cf7fa18ee2985b8c63ba3de5edd604b9259
-github.com/kisielk/gotool d678387370a2eb9b5b0a33218bc8c9d8de15b6be
-github.com/lib/pq a8d8d01c4f91602f876bf5aa210274e8203a6b45
-github.com/montanaflynn/stats 44fb56da2a2a67d394dec0e18a82dd316f192529
-github.com/peterh/liner 1bb0d1c1a25ed393d8feb09bab039b2b1b1fbced
-github.com/robfig/glock cb3c3ec56de988289cab7bbd284eddc04dfee6c9
-github.com/samalba/dockerclient 12570e600d71374233e5056ba315f657ced496c7
-github.com/spf13/cobra 66816bcd0378e248c613e3c443c020f544c28804
-github.com/spf13/pflag 67cbc198fd11dab704b214c1e629a97af392c085
-github.com/tebeka/go2xunit d45000af2242dd0e7b8c7b07d82a1068adc5fd40
-golang.org/x/crypto cc04154d65fb9296747569b107cfd05380b1ea3e
-golang.org/x/net 8bfde94a845cb31000de3266ac83edbda58dab09
-golang.org/x/text d4cc1b1e16b49d6dafc4982403b40fe89c512cd5
-golang.org/x/tools d02228d1857b9f49cd0252788516ff5584266eb6
-gopkg.in/yaml.v1 9f9df34309c04878acc86042b16630b0f696e1de
diff --git a/src/cmd/go/internal/modconv/testdata/cockroach.out b/src/cmd/go/internal/modconv/testdata/cockroach.out
deleted file mode 100644
index 30cdbb7..0000000
--- a/src/cmd/go/internal/modconv/testdata/cockroach.out
+++ /dev/null
@@ -1,31 +0,0 @@
-github.com/agtorre/gocolorize f42b554bf7f006936130c9bb4f971afd2d87f671
-github.com/biogo/store e1f74b3c58befe661feed7fa4cf52436de753128
-github.com/cockroachdb/c-lz4 6e71f140a365017bbe0904710007f8725fd3f809
-github.com/cockroachdb/c-protobuf 0f9ab7b988ca7474cf76b9a961ab03c0552abcb3
-github.com/cockroachdb/c-rocksdb 7fc876fe79b96de0e25069c9ae27e6444637bd54
-github.com/cockroachdb/c-snappy 618733f9e5bab8463b9049117a335a7a1bfc9fd5
-github.com/cockroachdb/yacc 572e006f8e6b0061ebda949d13744f5108389514
-github.com/coreos/etcd 18ecc297bc913bed6fc093d66b1fa22020dba7dc
-github.com/docker/docker 7374852be9def787921aea2ca831771982badecf
-github.com/elazarl/go-bindata-assetfs 3dcc96556217539f50599357fb481ac0dc7439b9
-github.com/gogo/protobuf 98e73e511a62a9c232152f94999112c80142a813
-github.com/golang/lint 7b7f4364ff76043e6c3610281525fabc0d90f0e4
-github.com/google/btree cc6329d4279e3f025a53a83c397d2339b5705c45
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/jteeuwen/go-bindata dce55d09e24ac40a6e725c8420902b86554f8046
-github.com/julienschmidt/httprouter 6aacfd5ab513e34f7e64ea9627ab9670371b34e7
-github.com/kisielk/errcheck 50b84cf7fa18ee2985b8c63ba3de5edd604b9259
-github.com/kisielk/gotool d678387370a2eb9b5b0a33218bc8c9d8de15b6be
-github.com/lib/pq a8d8d01c4f91602f876bf5aa210274e8203a6b45
-github.com/montanaflynn/stats 44fb56da2a2a67d394dec0e18a82dd316f192529
-github.com/peterh/liner 1bb0d1c1a25ed393d8feb09bab039b2b1b1fbced
-github.com/robfig/glock cb3c3ec56de988289cab7bbd284eddc04dfee6c9
-github.com/samalba/dockerclient 12570e600d71374233e5056ba315f657ced496c7
-github.com/spf13/cobra 66816bcd0378e248c613e3c443c020f544c28804
-github.com/spf13/pflag 67cbc198fd11dab704b214c1e629a97af392c085
-github.com/tebeka/go2xunit d45000af2242dd0e7b8c7b07d82a1068adc5fd40
-golang.org/x/crypto cc04154d65fb9296747569b107cfd05380b1ea3e
-golang.org/x/net 8bfde94a845cb31000de3266ac83edbda58dab09
-golang.org/x/text d4cc1b1e16b49d6dafc4982403b40fe89c512cd5
-golang.org/x/tools d02228d1857b9f49cd0252788516ff5584266eb6
-gopkg.in/yaml.v1 9f9df34309c04878acc86042b16630b0f696e1de
diff --git a/src/cmd/go/internal/modconv/testdata/dockermachine.godeps b/src/cmd/go/internal/modconv/testdata/dockermachine.godeps
deleted file mode 100644
index a551002..0000000
--- a/src/cmd/go/internal/modconv/testdata/dockermachine.godeps
+++ /dev/null
@@ -1,159 +0,0 @@
-{
-	"ImportPath": "github.com/docker/machine",
-	"GoVersion": "go1.4.2",
-	"Deps": [
-		{
-			"ImportPath": "code.google.com/p/goauth2/oauth",
-			"Comment": "weekly-56",
-			"Rev": "afe77d958c701557ec5dc56f6936fcc194d15520"
-		},
-		{
-			"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go",
-			"Comment": "v1.1-17-g515f3ec",
-			"Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
-		},
-		{
-			"ImportPath": "github.com/cenkalti/backoff",
-			"Rev": "9831e1e25c874e0a0601b6dc43641071414eec7a"
-		},
-		{
-			"ImportPath": "github.com/codegangsta/cli",
-			"Comment": "1.2.0-64-ge1712f3",
-			"Rev": "e1712f381785e32046927f64a7c86fe569203196"
-		},
-		{
-			"ImportPath": "github.com/digitalocean/godo",
-			"Comment": "v0.5.0",
-			"Rev": "5478aae80694de1d2d0e02c386bbedd201266234"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/dockerversion",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/engine",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/archive",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/fileutils",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/ioutils",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/mflag",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/parsers",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/pools",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/promise",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/system",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/term",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/timeutils",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/units",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/version",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
-			"Comment": "v1.5.0",
-			"Rev": "a8a31eff10544860d2188dddabdee4d727545796"
-		},
-		{
-			"ImportPath": "github.com/docker/libtrust",
-			"Rev": "c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41"
-		},
-		{
-			"ImportPath": "github.com/google/go-querystring/query",
-			"Rev": "30f7a39f4a218feb5325f3aebc60c32a572a8274"
-		},
-		{
-			"ImportPath": "github.com/mitchellh/mapstructure",
-			"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
-		},
-		{
-			"ImportPath": "github.com/rackspace/gophercloud",
-			"Comment": "v1.0.0-558-ce0f487",
-			"Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
-		},
-		{
-			"ImportPath": "github.com/skarademir/naturalsort",
-			"Rev": "983d4d86054d80f91fd04dd62ec52c1d078ce403"
-		},
-		{
-			"ImportPath": "github.com/smartystreets/go-aws-auth",
-			"Rev": "1f0db8c0ee6362470abe06a94e3385927ed72a4b"
-		},
-		{
-			"ImportPath": "github.com/stretchr/testify/assert",
-			"Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325"
-		},
-		{
-			"ImportPath": "github.com/pyr/egoscale/src/egoscale",
-			"Rev": "bbaa67324aeeacc90430c1fe0a9c620d3929512e"
-		},
-		{
-			"ImportPath": "github.com/tent/http-link-go",
-			"Rev": "ac974c61c2f990f4115b119354b5e0b47550e888"
-		},
-		{
-			"ImportPath": "github.com/vmware/govcloudair",
-			"Comment": "v0.0.2",
-			"Rev": "66a23eaabc61518f91769939ff541886fe1dceef"
-		},
-		{
-			"ImportPath": "golang.org/x/crypto/ssh",
-			"Rev": "1fbbd62cfec66bd39d91e97749579579d4d3037e"
-		},
-		{
-			"ImportPath": "google.golang.org/api/compute/v1",
-			"Rev": "aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5"
-		},
-		{
-			"ImportPath": "google.golang.org/api/googleapi",
-			"Rev": "aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5"
-		}
-	]
-}
diff --git a/src/cmd/go/internal/modconv/testdata/dockermachine.out b/src/cmd/go/internal/modconv/testdata/dockermachine.out
deleted file mode 100644
index 0b39cea..0000000
--- a/src/cmd/go/internal/modconv/testdata/dockermachine.out
+++ /dev/null
@@ -1,33 +0,0 @@
-code.google.com/p/goauth2/oauth afe77d958c701557ec5dc56f6936fcc194d15520
-github.com/MSOpenTech/azure-sdk-for-go 515f3ec74ce6a5b31e934cefae997c97bd0a1b1e
-github.com/cenkalti/backoff 9831e1e25c874e0a0601b6dc43641071414eec7a
-github.com/codegangsta/cli e1712f381785e32046927f64a7c86fe569203196
-github.com/digitalocean/godo 5478aae80694de1d2d0e02c386bbedd201266234
-github.com/docker/docker/dockerversion a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/engine a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/archive a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/fileutils a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/ioutils a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/mflag a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/parsers a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/pools a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/promise a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/system a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/term a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/timeutils a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/units a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/pkg/version a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar a8a31eff10544860d2188dddabdee4d727545796
-github.com/docker/libtrust c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41
-github.com/google/go-querystring/query 30f7a39f4a218feb5325f3aebc60c32a572a8274
-github.com/mitchellh/mapstructure 740c764bc6149d3f1806231418adb9f52c11bcbf
-github.com/rackspace/gophercloud ce0f487f6747ab43c4e4404722df25349385bebd
-github.com/skarademir/naturalsort 983d4d86054d80f91fd04dd62ec52c1d078ce403
-github.com/smartystreets/go-aws-auth 1f0db8c0ee6362470abe06a94e3385927ed72a4b
-github.com/stretchr/testify/assert e4ec8152c15fc46bd5056ce65997a07c7d415325
-github.com/pyr/egoscale/src/egoscale bbaa67324aeeacc90430c1fe0a9c620d3929512e
-github.com/tent/http-link-go ac974c61c2f990f4115b119354b5e0b47550e888
-github.com/vmware/govcloudair 66a23eaabc61518f91769939ff541886fe1dceef
-golang.org/x/crypto/ssh 1fbbd62cfec66bd39d91e97749579579d4d3037e
-google.golang.org/api/compute/v1 aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5
-google.golang.org/api/googleapi aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5
diff --git a/src/cmd/go/internal/modconv/testdata/dockerman.glide b/src/cmd/go/internal/modconv/testdata/dockerman.glide
deleted file mode 100644
index 5ec765a..0000000
--- a/src/cmd/go/internal/modconv/testdata/dockerman.glide
+++ /dev/null
@@ -1,52 +0,0 @@
-hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb
-updated: 2016-06-20T21:53:35.420817456Z
-imports:
-- name: github.com/BurntSushi/toml
-  version: f0aeabca5a127c4078abb8c8d64298b147264b55
-- name: github.com/cpuguy83/go-md2man
-  version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
-  subpackages:
-  - md2man
-- name: github.com/fsnotify/fsnotify
-  version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8
-- name: github.com/hashicorp/hcl
-  version: da486364306ed66c218be9b7953e19173447c18b
-  subpackages:
-  - hcl/ast
-  - hcl/parser
-  - hcl/token
-  - json/parser
-  - hcl/scanner
-  - hcl/strconv
-  - json/scanner
-  - json/token
-- name: github.com/inconshreveable/mousetrap
-  version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-- name: github.com/magiconair/properties
-  version: c265cfa48dda6474e208715ca93e987829f572f8
-- name: github.com/mitchellh/mapstructure
-  version: d2dd0262208475919e1a362f675cfc0e7c10e905
-- name: github.com/russross/blackfriday
-  version: 1d6b8e9301e720b08a8938b8c25c018285885438
-- name: github.com/shurcooL/sanitized_anchor_name
-  version: 10ef21a441db47d8b13ebcc5fd2310f636973c77
-- name: github.com/spf13/cast
-  version: 27b586b42e29bec072fe7379259cc719e1289da6
-- name: github.com/spf13/jwalterweatherman
-  version: 33c24e77fb80341fe7130ee7c594256ff08ccc46
-- name: github.com/spf13/pflag
-  version: dabebe21bf790f782ea4c7bbd2efc430de182afd
-- name: github.com/spf13/viper
-  version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd
-- name: golang.org/x/sys
-  version: 62bee037599929a6e9146f29d10dd5208c43507d
-  subpackages:
-  - unix
-- name: gopkg.in/yaml.v2
-  version: a83829b6f1293c91addabc89d0571c246397bbf4
-- name: github.com/spf13/cobra
-  repo: https://github.com/dnephin/cobra
-  subpackages:
-  - doc
-  version: v1.3
-devImports: []
diff --git a/src/cmd/go/internal/modconv/testdata/dockerman.out b/src/cmd/go/internal/modconv/testdata/dockerman.out
deleted file mode 100644
index 5e6370b..0000000
--- a/src/cmd/go/internal/modconv/testdata/dockerman.out
+++ /dev/null
@@ -1,16 +0,0 @@
-github.com/BurntSushi/toml f0aeabca5a127c4078abb8c8d64298b147264b55
-github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
-github.com/fsnotify/fsnotify 30411dbcefb7a1da7e84f75530ad3abe4011b4f8
-github.com/hashicorp/hcl da486364306ed66c218be9b7953e19173447c18b
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/magiconair/properties c265cfa48dda6474e208715ca93e987829f572f8
-github.com/mitchellh/mapstructure d2dd0262208475919e1a362f675cfc0e7c10e905
-github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438
-github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77
-github.com/spf13/cast 27b586b42e29bec072fe7379259cc719e1289da6
-github.com/spf13/jwalterweatherman 33c24e77fb80341fe7130ee7c594256ff08ccc46
-github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd
-github.com/spf13/viper c1ccc378a054ea8d4e38d8c67f6938d4760b53dd
-golang.org/x/sys 62bee037599929a6e9146f29d10dd5208c43507d
-gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
-github.com/spf13/cobra v1.3
diff --git a/src/cmd/go/internal/modconv/testdata/govmomi.out b/src/cmd/go/internal/modconv/testdata/govmomi.out
deleted file mode 100644
index 188c458..0000000
--- a/src/cmd/go/internal/modconv/testdata/govmomi.out
+++ /dev/null
@@ -1,5 +0,0 @@
-github.com/davecgh/go-xdr/xdr2 4930550ba2e22f87187498acfd78348b15f4e7a8
-github.com/google/uuid 6a5e28554805e78ea6141142aba763936c4761c0
-github.com/kr/pretty 2ee9d7453c02ef7fa518a83ae23644eb8872186a
-github.com/kr/pty 95d05c1eef33a45bd58676b6ce28d105839b8d0b
-github.com/vmware/vmw-guestinfo 25eff159a728be87e103a0b8045e08273f4dbec4
diff --git a/src/cmd/go/internal/modconv/testdata/govmomi.vmanifest b/src/cmd/go/internal/modconv/testdata/govmomi.vmanifest
deleted file mode 100644
index b89e4ab..0000000
--- a/src/cmd/go/internal/modconv/testdata/govmomi.vmanifest
+++ /dev/null
@@ -1,46 +0,0 @@
-{
-	"version": 0,
-	"dependencies": [
-		{
-			"importpath": "github.com/davecgh/go-xdr/xdr2",
-			"repository": "https://github.com/rasky/go-xdr",
-			"vcs": "git",
-			"revision": "4930550ba2e22f87187498acfd78348b15f4e7a8",
-			"branch": "improvements",
-			"path": "/xdr2",
-			"notests": true
-		},
-		{
-			"importpath": "github.com/google/uuid",
-			"repository": "https://github.com/google/uuid",
-			"vcs": "git",
-			"revision": "6a5e28554805e78ea6141142aba763936c4761c0",
-			"branch": "master",
-			"notests": true
-		},
-		{
-			"importpath": "github.com/kr/pretty",
-			"repository": "https://github.com/dougm/pretty",
-			"vcs": "git",
-			"revision": "2ee9d7453c02ef7fa518a83ae23644eb8872186a",
-			"branch": "govmomi",
-			"notests": true
-		},
-		{
-			"importpath": "github.com/kr/pty",
-			"repository": "https://github.com/kr/pty",
-			"vcs": "git",
-			"revision": "95d05c1eef33a45bd58676b6ce28d105839b8d0b",
-			"branch": "master",
-			"notests": true
-		},
-		{
-			"importpath": "github.com/vmware/vmw-guestinfo",
-			"repository": "https://github.com/vmware/vmw-guestinfo",
-			"vcs": "git",
-			"revision": "25eff159a728be87e103a0b8045e08273f4dbec4",
-			"branch": "master",
-			"notests": true
-		}
-	]
-}
diff --git a/src/cmd/go/internal/modconv/testdata/juju.out b/src/cmd/go/internal/modconv/testdata/juju.out
deleted file mode 100644
index c2430b1..0000000
--- a/src/cmd/go/internal/modconv/testdata/juju.out
+++ /dev/null
@@ -1,106 +0,0 @@
-github.com/Azure/azure-sdk-for-go 902d95d9f311ae585ee98cfd18f418b467d60d5a
-github.com/Azure/go-autorest 6f40a8acfe03270d792cb8155e2942c09d7cff95
-github.com/ajstarks/svgo 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518
-github.com/altoros/gosigma 31228935eec685587914528585da4eb9b073c76d
-github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
-github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c
-github.com/coreos/go-systemd 7b2428fec40033549c68f54e26e89e7ca9a9ce31
-github.com/dgrijalva/jwt-go 01aeca54ebda6e0fbfafd0a524d234159c05ec20
-github.com/dustin/go-humanize 145fabdb1ab757076a70a886d092a3af27f66f4c
-github.com/godbus/dbus 32c6cc29c14570de4cf6d7e7737d68fb2d01ad15
-github.com/golang/protobuf 4bd1920723d7b7c925de087aa32e2187708897f7
-github.com/google/go-querystring 9235644dd9e52eeae6fa48efd539fdc351a0af53
-github.com/gorilla/schema 08023a0215e7fc27a9aecd8b8c50913c40019478
-github.com/gorilla/websocket 804cb600d06b10672f2fbc0a336a7bee507a428e
-github.com/gosuri/uitable 36ee7e946282a3fb1cfecd476ddc9b35d8847e42
-github.com/joyent/gocommon ade826b8b54e81a779ccb29d358a45ba24b7809c
-github.com/joyent/gosdc 2f11feadd2d9891e92296a1077c3e2e56939547d
-github.com/joyent/gosign 0da0d5f1342065321c97812b1f4ac0c2b0bab56c
-github.com/juju/ansiterm b99631de12cf04a906c1d4e4ec54fb86eae5863d
-github.com/juju/blobstore 06056004b3d7b54bbb7984d830c537bad00fec21
-github.com/juju/bundlechanges 7725027b95e0d54635e0fb11efc2debdcdf19f75
-github.com/juju/cmd 9425a576247f348b9b40afe3b60085de63470de5
-github.com/juju/description d3742c23561884cd7d759ef7142340af1d22cab0
-github.com/juju/errors 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3
-github.com/juju/gnuflag 4e76c56581859c14d9d87e1ddbe29e1c0f10195f
-github.com/juju/go4 40d72ab9641a2a8c36a9c46a51e28367115c8e59
-github.com/juju/gojsonpointer afe8b77aa08f272b49e01b82de78510c11f61500
-github.com/juju/gojsonreference f0d24ac5ee330baa21721cdff56d45e4ee42628e
-github.com/juju/gojsonschema e1ad140384f254c82f89450d9a7c8dd38a632838
-github.com/juju/gomaasapi cfbc096bd45f276c17a391efc4db710b60ae3ad7
-github.com/juju/httpprof 14bf14c307672fd2456bdbf35d19cf0ccd3cf565
-github.com/juju/httprequest 266fd1e9debf09c037a63f074d099a2da4559ece
-github.com/juju/idmclient 4dc25171f675da4206b71695d3fd80e519ad05c1
-github.com/juju/jsonschema a0ef8b74ebcffeeff9fc374854deb4af388f037e
-github.com/juju/loggo 21bc4c63e8b435779a080e39e592969b7b90b889
-github.com/juju/mempool 24974d6c264fe5a29716e7d56ea24c4bd904b7cc
-github.com/juju/mutex 59c26ee163447c5c57f63ff71610d433862013de
-github.com/juju/persistent-cookiejar 5243747bf8f2d0897f6c7a52799327dc97d585e8
-github.com/juju/pubsub 9dcaca7eb4340dbf685aa7b3ad4cc4f8691a33d4
-github.com/juju/replicaset 6b5becf2232ce76656ea765d8d915d41755a1513
-github.com/juju/retry 62c62032529169c7ec02fa48f93349604c345e1f
-github.com/juju/rfc ebdbbdb950cd039a531d15cdc2ac2cbd94f068ee
-github.com/juju/romulus 98d6700423d63971f10ca14afea9ecf2b9b99f0f
-github.com/juju/schema 075de04f9b7d7580d60a1e12a0b3f50bb18e6998
-github.com/juju/terms-client 9b925afd677234e4146dde3cb1a11e187cbed64e
-github.com/juju/testing fce9bc4ebf7a77310c262ac4884e03b778eae06a
-github.com/juju/txn 28898197906200d603394d8e4ce537436529f1c5
-github.com/juju/usso 68a59c96c178fbbad65926e7f93db50a2cd14f33
-github.com/juju/utils 9f8aeb9b09e2d8c769be8317ccfa23f7eec62c26
-github.com/juju/version 1f41e27e54f21acccf9b2dddae063a782a8a7ceb
-github.com/juju/webbrowser 54b8c57083b4afb7dc75da7f13e2967b2606a507
-github.com/juju/xml eb759a627588d35166bc505fceb51b88500e291e
-github.com/juju/zip f6b1e93fa2e29a1d7d49b566b2b51efb060c982a
-github.com/julienschmidt/httprouter 77a895ad01ebc98a4dc95d8355bc825ce80a56f6
-github.com/lestrrat/go-jspointer f4881e611bdbe9fb413a7780721ef8400a1f2341
-github.com/lestrrat/go-jsref e452c7b5801d1c6494c9e7e0cbc7498c0f88dfd1
-github.com/lestrrat/go-jsschema b09d7650b822d2ea3dc83d5091a5e2acd8330051
-github.com/lestrrat/go-jsval b1258a10419fe0693f7b35ad65cd5074bc0ba1e5
-github.com/lestrrat/go-pdebug 2e6eaaa5717f81bda41d27070d3c966f40a1e75f
-github.com/lestrrat/go-structinfo f74c056fe41f860aa6264478c664a6fff8a64298
-github.com/lunixbochs/vtclean 4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36
-github.com/lxc/lxd 23da0234979fa6299565b91b529a6dbeb42ee36d
-github.com/masterzen/azure-sdk-for-go ee4f0065d00cd12b542f18f5bc45799e88163b12
-github.com/masterzen/simplexml 4572e39b1ab9fe03ee513ce6fc7e289e98482190
-github.com/masterzen/winrm 7a535cd943fccaeed196718896beec3fb51aff41
-github.com/masterzen/xmlpath 13f4951698adc0fa9c1dda3e275d489a24201161
-github.com/mattn/go-colorable ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8
-github.com/mattn/go-isatty 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8
-github.com/mattn/go-runewidth d96d1bd051f2bd9e7e43d602782b37b93b1b5666
-github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
-github.com/nu7hatch/gouuid 179d4d0c4d8d407a32af483c2354df1d2c91e6c3
-github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
-github.com/prometheus/client_golang 575f371f7862609249a1be4c9145f429fe065e32
-github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
-github.com/prometheus/common dd586c1c5abb0be59e60f942c22af711a2008cb4
-github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
-github.com/rogpeppe/fastuuid 6724a57986aff9bff1a1770e9347036def7c89f6
-github.com/vmware/govmomi c0c7ce63df7edd78e713257b924c89d9a2dac119
-golang.org/x/crypto 8e06e8ddd9629eb88639aba897641bff8031f1d3
-golang.org/x/net ea47fc708ee3e20177f3ca3716217c4ab75942cb
-golang.org/x/oauth2 11c60b6f71a6ad48ed6f93c65fa4c6f9b1b5b46a
-golang.org/x/sys 7a6e5648d140666db5d920909e082ca00a87ba2c
-golang.org/x/text 2910a502d2bf9e43193af9d68ca516529614eed3
-google.golang.org/api 0d3983fb069cb6651353fc44c5cb604e263f2a93
-google.golang.org/cloud f20d6dcccb44ed49de45ae3703312cb46e627db1
-gopkg.in/amz.v3 8c3190dff075bf5442c9eedbf8f8ed6144a099e7
-gopkg.in/check.v1 4f90aeace3a26ad7021961c297b22c42160c7b25
-gopkg.in/errgo.v1 442357a80af5c6bf9b6d51ae791a39c3421004f3
-gopkg.in/goose.v1 ac43167b647feacdd9a1e34ee81e574551bc748d
-gopkg.in/ini.v1 776aa739ce9373377cd16f526cdf06cb4c89b40f
-gopkg.in/juju/blobstore.v2 51fa6e26128d74e445c72d3a91af555151cc3654
-gopkg.in/juju/charm.v6-unstable 83771c4919d6810bce5b7e63f46bea5fbfed0b93
-gopkg.in/juju/charmrepo.v2-unstable e79aa298df89ea887c9bffec46063c24bfb730f7
-gopkg.in/juju/charmstore.v5-unstable fd1eef3002fc6b6daff5e97efab6f5056d22dcc7
-gopkg.in/juju/environschema.v1 7359fc7857abe2b11b5b3e23811a9c64cb6b01e0
-gopkg.in/juju/jujusvg.v2 d82160011935ef79fc7aca84aba2c6f74700fe75
-gopkg.in/juju/names.v2 0847c26d322a121e52614f969fb82eae2820c715
-gopkg.in/juju/worker.v1 6965b9d826717287bb002e02d1fd4d079978083e
-gopkg.in/macaroon-bakery.v1 469b44e6f1f9479e115c8ae879ef80695be624d5
-gopkg.in/macaroon.v1 ab3940c6c16510a850e1c2dd628b919f0f3f1464
-gopkg.in/mgo.v2 f2b6f6c918c452ad107eec89615f074e3bd80e33
-gopkg.in/natefinch/lumberjack.v2 514cbda263a734ae8caac038dadf05f8f3f9f738
-gopkg.in/natefinch/npipe.v2 c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6
-gopkg.in/retry.v1 c09f6b86ba4d5d2cf5bdf0665364aec9fd4815db
-gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
-gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
diff --git a/src/cmd/go/internal/modconv/testdata/juju.tsv b/src/cmd/go/internal/modconv/testdata/juju.tsv
deleted file mode 100644
index 0bddcef..0000000
--- a/src/cmd/go/internal/modconv/testdata/juju.tsv
+++ /dev/null
@@ -1,106 +0,0 @@
-github.com/Azure/azure-sdk-for-go	git	902d95d9f311ae585ee98cfd18f418b467d60d5a	2016-07-20T05:16:58Z
-github.com/Azure/go-autorest	git	6f40a8acfe03270d792cb8155e2942c09d7cff95	2016-07-19T23:14:56Z
-github.com/ajstarks/svgo	git	89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518	2014-10-04T21:11:59Z
-github.com/altoros/gosigma	git	31228935eec685587914528585da4eb9b073c76d	2015-04-08T14:52:32Z
-github.com/beorn7/perks	git	3ac7bf7a47d159a033b107610db8a1b6575507a4	2016-02-29T21:34:45Z
-github.com/bmizerany/pat	git	c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c	2016-02-17T10:32:42Z
-github.com/coreos/go-systemd	git	7b2428fec40033549c68f54e26e89e7ca9a9ce31	2016-02-02T21:14:25Z
-github.com/dgrijalva/jwt-go	git	01aeca54ebda6e0fbfafd0a524d234159c05ec20	2016-07-05T20:30:06Z
-github.com/dustin/go-humanize	git	145fabdb1ab757076a70a886d092a3af27f66f4c	2014-12-28T07:11:48Z
-github.com/godbus/dbus	git	32c6cc29c14570de4cf6d7e7737d68fb2d01ad15	2016-05-06T22:25:50Z
-github.com/golang/protobuf	git	4bd1920723d7b7c925de087aa32e2187708897f7	2016-11-09T07:27:36Z
-github.com/google/go-querystring	git	9235644dd9e52eeae6fa48efd539fdc351a0af53	2016-04-01T23:30:42Z
-github.com/gorilla/schema	git	08023a0215e7fc27a9aecd8b8c50913c40019478	2016-04-26T23:15:12Z
-github.com/gorilla/websocket	git	804cb600d06b10672f2fbc0a336a7bee507a428e	2017-02-14T17:41:18Z
-github.com/gosuri/uitable	git	36ee7e946282a3fb1cfecd476ddc9b35d8847e42	2016-04-04T20:39:58Z
-github.com/joyent/gocommon	git	ade826b8b54e81a779ccb29d358a45ba24b7809c	2016-03-20T19:31:33Z
-github.com/joyent/gosdc	git	2f11feadd2d9891e92296a1077c3e2e56939547d	2014-05-24T00:08:15Z
-github.com/joyent/gosign	git	0da0d5f1342065321c97812b1f4ac0c2b0bab56c	2014-05-24T00:07:34Z
-github.com/juju/ansiterm	git	b99631de12cf04a906c1d4e4ec54fb86eae5863d	2016-09-07T23:45:32Z
-github.com/juju/blobstore	git	06056004b3d7b54bbb7984d830c537bad00fec21	2015-07-29T11:18:58Z
-github.com/juju/bundlechanges	git	7725027b95e0d54635e0fb11efc2debdcdf19f75	2016-12-15T16:06:52Z
-github.com/juju/cmd	git	9425a576247f348b9b40afe3b60085de63470de5	2017-03-20T01:37:09Z
-github.com/juju/description	git	d3742c23561884cd7d759ef7142340af1d22cab0	2017-03-20T07:46:40Z
-github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
-github.com/juju/gnuflag	git	4e76c56581859c14d9d87e1ddbe29e1c0f10195f	2016-08-09T16:52:14Z
-github.com/juju/go4	git	40d72ab9641a2a8c36a9c46a51e28367115c8e59	2016-02-22T16:32:58Z
-github.com/juju/gojsonpointer	git	afe8b77aa08f272b49e01b82de78510c11f61500	2015-02-04T19:46:29Z
-github.com/juju/gojsonreference	git	f0d24ac5ee330baa21721cdff56d45e4ee42628e	2015-02-04T19:46:33Z
-github.com/juju/gojsonschema	git	e1ad140384f254c82f89450d9a7c8dd38a632838	2015-03-12T17:00:16Z
-github.com/juju/gomaasapi	git	cfbc096bd45f276c17a391efc4db710b60ae3ad7	2017-02-27T07:51:07Z
-github.com/juju/httpprof	git	14bf14c307672fd2456bdbf35d19cf0ccd3cf565	2014-12-17T16:00:36Z
-github.com/juju/httprequest	git	266fd1e9debf09c037a63f074d099a2da4559ece	2016-10-06T15:09:09Z
-github.com/juju/idmclient	git	4dc25171f675da4206b71695d3fd80e519ad05c1	2017-02-09T16:27:49Z
-github.com/juju/jsonschema	git	a0ef8b74ebcffeeff9fc374854deb4af388f037e	2016-11-02T18:19:19Z
-github.com/juju/loggo	git	21bc4c63e8b435779a080e39e592969b7b90b889	2017-02-22T12:20:47Z
-github.com/juju/mempool	git	24974d6c264fe5a29716e7d56ea24c4bd904b7cc	2016-02-05T10:49:27Z
-github.com/juju/mutex	git	59c26ee163447c5c57f63ff71610d433862013de	2016-06-17T01:09:07Z
-github.com/juju/persistent-cookiejar	git	5243747bf8f2d0897f6c7a52799327dc97d585e8	2016-11-15T13:33:28Z
-github.com/juju/pubsub	git	9dcaca7eb4340dbf685aa7b3ad4cc4f8691a33d4	2016-07-28T03:00:34Z
-github.com/juju/replicaset	git	6b5becf2232ce76656ea765d8d915d41755a1513	2016-11-25T16:08:49Z
-github.com/juju/retry	git	62c62032529169c7ec02fa48f93349604c345e1f	2015-10-29T02:48:21Z
-github.com/juju/rfc	git	ebdbbdb950cd039a531d15cdc2ac2cbd94f068ee	2016-07-11T02:42:13Z
-github.com/juju/romulus	git	98d6700423d63971f10ca14afea9ecf2b9b99f0f	2017-01-23T14:29:29Z
-github.com/juju/schema	git	075de04f9b7d7580d60a1e12a0b3f50bb18e6998	2016-04-20T04:42:03Z
-github.com/juju/terms-client	git	9b925afd677234e4146dde3cb1a11e187cbed64e	2016-08-09T13:19:00Z
-github.com/juju/testing	git	fce9bc4ebf7a77310c262ac4884e03b778eae06a	2017-02-22T09:01:19Z
-github.com/juju/txn	git	28898197906200d603394d8e4ce537436529f1c5	2016-11-16T04:07:55Z
-github.com/juju/usso	git	68a59c96c178fbbad65926e7f93db50a2cd14f33	2016-04-01T10:44:24Z
-github.com/juju/utils	git	9f8aeb9b09e2d8c769be8317ccfa23f7eec62c26	2017-02-15T08:19:00Z
-github.com/juju/version	git	1f41e27e54f21acccf9b2dddae063a782a8a7ceb	2016-10-31T05:19:06Z
-github.com/juju/webbrowser	git	54b8c57083b4afb7dc75da7f13e2967b2606a507	2016-03-09T14:36:29Z
-github.com/juju/xml	git	eb759a627588d35166bc505fceb51b88500e291e	2015-04-13T13:11:21Z
-github.com/juju/zip	git	f6b1e93fa2e29a1d7d49b566b2b51efb060c982a	2016-02-05T10:52:21Z
-github.com/julienschmidt/httprouter	git	77a895ad01ebc98a4dc95d8355bc825ce80a56f6	2015-10-13T22:55:20Z
-github.com/lestrrat/go-jspointer	git	f4881e611bdbe9fb413a7780721ef8400a1f2341	2016-02-29T02:13:54Z
-github.com/lestrrat/go-jsref	git	e452c7b5801d1c6494c9e7e0cbc7498c0f88dfd1	2016-06-01T01:32:40Z
-github.com/lestrrat/go-jsschema	git	b09d7650b822d2ea3dc83d5091a5e2acd8330051	2016-09-03T13:19:57Z
-github.com/lestrrat/go-jsval	git	b1258a10419fe0693f7b35ad65cd5074bc0ba1e5	2016-10-12T04:57:17Z
-github.com/lestrrat/go-pdebug	git	2e6eaaa5717f81bda41d27070d3c966f40a1e75f	2016-08-17T06:33:33Z
-github.com/lestrrat/go-structinfo	git	f74c056fe41f860aa6264478c664a6fff8a64298	2016-03-08T13:11:05Z
-github.com/lunixbochs/vtclean	git	4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36	2016-01-25T03:51:06Z
-github.com/lxc/lxd	git	23da0234979fa6299565b91b529a6dbeb42ee36d	2017-02-16T05:29:42Z
-github.com/masterzen/azure-sdk-for-go	git	ee4f0065d00cd12b542f18f5bc45799e88163b12	2016-10-14T13:56:28Z
-github.com/masterzen/simplexml	git	4572e39b1ab9fe03ee513ce6fc7e289e98482190	2016-06-08T18:30:07Z
-github.com/masterzen/winrm	git	7a535cd943fccaeed196718896beec3fb51aff41	2016-10-14T15:10:40Z
-github.com/masterzen/xmlpath	git	13f4951698adc0fa9c1dda3e275d489a24201161	2014-02-18T18:59:01Z
-github.com/mattn/go-colorable	git	ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8	2016-07-31T23:54:17Z
-github.com/mattn/go-isatty	git	66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8	2016-08-06T12:27:52Z
-github.com/mattn/go-runewidth	git	d96d1bd051f2bd9e7e43d602782b37b93b1b5666	2015-11-18T07:21:59Z
-github.com/matttproud/golang_protobuf_extensions	git	c12348ce28de40eed0136aa2b644d0ee0650e56c	2016-04-24T11:30:07Z
-github.com/nu7hatch/gouuid	git	179d4d0c4d8d407a32af483c2354df1d2c91e6c3	2013-12-21T20:05:32Z
-github.com/pkg/errors	git	839d9e913e063e28dfd0e6c7b7512793e0a48be9	2016-10-02T05:25:12Z
-github.com/prometheus/client_golang	git	575f371f7862609249a1be4c9145f429fe065e32	2016-11-24T15:57:32Z
-github.com/prometheus/client_model	git	fa8ad6fec33561be4280a8f0514318c79d7f6cb6	2015-02-12T10:17:44Z
-github.com/prometheus/common	git	dd586c1c5abb0be59e60f942c22af711a2008cb4	2016-05-03T22:05:32Z
-github.com/prometheus/procfs	git	abf152e5f3e97f2fafac028d2cc06c1feb87ffa5	2016-04-11T19:08:41Z
-github.com/rogpeppe/fastuuid	git	6724a57986aff9bff1a1770e9347036def7c89f6	2015-01-06T09:32:20Z
-github.com/vmware/govmomi	git	c0c7ce63df7edd78e713257b924c89d9a2dac119	2016-06-30T15:37:42Z
-golang.org/x/crypto	git	8e06e8ddd9629eb88639aba897641bff8031f1d3	2016-09-22T17:06:29Z
-golang.org/x/net	git	ea47fc708ee3e20177f3ca3716217c4ab75942cb	2015-08-29T23:03:18Z
-golang.org/x/oauth2	git	11c60b6f71a6ad48ed6f93c65fa4c6f9b1b5b46a	2015-03-25T02:00:22Z
-golang.org/x/sys	git	7a6e5648d140666db5d920909e082ca00a87ba2c	2017-02-01T05:12:45Z
-golang.org/x/text	git	2910a502d2bf9e43193af9d68ca516529614eed3	2016-07-26T16:48:57Z
-google.golang.org/api	git	0d3983fb069cb6651353fc44c5cb604e263f2a93	2014-12-10T23:51:26Z
-google.golang.org/cloud	git	f20d6dcccb44ed49de45ae3703312cb46e627db1	2015-03-19T22:36:35Z
-gopkg.in/amz.v3	git	8c3190dff075bf5442c9eedbf8f8ed6144a099e7	2016-12-15T13:08:49Z
-gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
-gopkg.in/errgo.v1	git	442357a80af5c6bf9b6d51ae791a39c3421004f3	2016-12-22T12:58:16Z
-gopkg.in/goose.v1	git	ac43167b647feacdd9a1e34ee81e574551bc748d	2017-02-15T01:56:23Z
-gopkg.in/ini.v1	git	776aa739ce9373377cd16f526cdf06cb4c89b40f	2016-02-22T23:24:41Z
-gopkg.in/juju/blobstore.v2	git	51fa6e26128d74e445c72d3a91af555151cc3654	2016-01-25T02:37:03Z
-gopkg.in/juju/charm.v6-unstable	git	83771c4919d6810bce5b7e63f46bea5fbfed0b93	2016-10-03T20:31:18Z
-gopkg.in/juju/charmrepo.v2-unstable	git	e79aa298df89ea887c9bffec46063c24bfb730f7	2016-11-17T15:25:28Z
-gopkg.in/juju/charmstore.v5-unstable	git	fd1eef3002fc6b6daff5e97efab6f5056d22dcc7	2016-09-16T10:09:07Z
-gopkg.in/juju/environschema.v1	git	7359fc7857abe2b11b5b3e23811a9c64cb6b01e0	2015-11-04T11:58:10Z
-gopkg.in/juju/jujusvg.v2	git	d82160011935ef79fc7aca84aba2c6f74700fe75	2016-06-09T10:52:15Z
-gopkg.in/juju/names.v2	git	0847c26d322a121e52614f969fb82eae2820c715	2016-11-02T13:43:03Z
-gopkg.in/juju/worker.v1	git	6965b9d826717287bb002e02d1fd4d079978083e	2017-03-08T00:24:58Z
-gopkg.in/macaroon-bakery.v1	git	469b44e6f1f9479e115c8ae879ef80695be624d5	2016-06-22T12:14:21Z
-gopkg.in/macaroon.v1	git	ab3940c6c16510a850e1c2dd628b919f0f3f1464	2015-01-21T11:42:31Z
-gopkg.in/mgo.v2	git	f2b6f6c918c452ad107eec89615f074e3bd80e33	2016-08-18T01:52:18Z
-gopkg.in/natefinch/lumberjack.v2	git	514cbda263a734ae8caac038dadf05f8f3f9f738	2016-01-25T11:17:49Z
-gopkg.in/natefinch/npipe.v2	git	c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6	2016-06-21T03:49:01Z
-gopkg.in/retry.v1	git	c09f6b86ba4d5d2cf5bdf0665364aec9fd4815db	2016-10-25T18:14:30Z
-gopkg.in/tomb.v1	git	dd632973f1e7218eb1089048e0798ec9ae7dceb8	2014-10-24T13:56:13Z
-gopkg.in/yaml.v2	git	a3f3340b5840cee44f372bddb5880fcbc419b46a	2017-02-08T14:18:51Z
diff --git a/src/cmd/go/internal/modconv/testdata/moby.out b/src/cmd/go/internal/modconv/testdata/moby.out
deleted file mode 100644
index 2cb2e05..0000000
--- a/src/cmd/go/internal/modconv/testdata/moby.out
+++ /dev/null
@@ -1,105 +0,0 @@
-github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
-github.com/Microsoft/hcsshim v0.6.5
-github.com/Microsoft/go-winio v0.4.5
-github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
-github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609
-github.com/gorilla/context v1.1
-github.com/gorilla/mux v1.1
-github.com/Microsoft/opengcs v0.3.4
-github.com/kr/pty 5cf931ef8f
-github.com/mattn/go-shellwords v1.0.3
-github.com/sirupsen/logrus v1.0.3
-github.com/tchap/go-patricia v2.2.6
-github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
-golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
-golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
-github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
-github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
-golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
-github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
-github.com/pmezard/go-difflib v1.0.0
-github.com/gotestyourself/gotestyourself v1.1.0
-github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
-github.com/imdario/mergo 0.2.1
-golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
-github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
-github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
-github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
-github.com/docker/libnetwork 68f1039f172434709a4550fe92e3e058406c74ce
-github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
-github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
-github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
-github.com/hashicorp/memberlist v0.1.0
-github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
-github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
-github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
-github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
-github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
-github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
-github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
-github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
-github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
-github.com/coreos/etcd v3.2.1
-github.com/coreos/go-semver v0.2.0
-github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
-github.com/hashicorp/consul v0.5.2
-github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
-github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
-github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
-github.com/vbatts/tar-split v0.10.1
-github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
-github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
-github.com/pborman/uuid v1.0
-google.golang.org/grpc v1.3.0
-github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
-github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
-github.com/opencontainers/runtime-spec v1.0.0
-github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
-github.com/coreos/go-systemd v4
-github.com/godbus/dbus v4.0.0
-github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
-github.com/Graylog2/go-gelf v2
-github.com/fluent/fluent-logger-golang v1.2.1
-github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
-github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
-github.com/fsnotify/fsnotify v1.4.2
-github.com/aws/aws-sdk-go v1.4.22
-github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
-github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
-github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
-golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
-google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
-cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
-github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
-google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
-github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
-github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
-github.com/docker/swarmkit 872861d2ae46958af7ead1d5fffb092c73afbaf0
-github.com/gogo/protobuf v0.4
-github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
-github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
-golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
-golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
-github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
-github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
-github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
-github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
-github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
-github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
-github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
-github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
-github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
-github.com/matttproud/golang_protobuf_extensions v1.0.0
-github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
-github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
-github.com/spf13/cobra v1.5.1
-github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c
-github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
-github.com/opencontainers/selinux v1.0.0-rc1
diff --git a/src/cmd/go/internal/modconv/testdata/moby.vconf b/src/cmd/go/internal/modconv/testdata/moby.vconf
deleted file mode 100644
index 53b90d1..0000000
--- a/src/cmd/go/internal/modconv/testdata/moby.vconf
+++ /dev/null
@@ -1,149 +0,0 @@
-# the following lines are in sorted order, FYI
-github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
-github.com/Microsoft/hcsshim v0.6.5
-github.com/Microsoft/go-winio v0.4.5
-github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
-github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
-github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
-github.com/gorilla/context v1.1
-github.com/gorilla/mux v1.1
-github.com/Microsoft/opengcs v0.3.4
-github.com/kr/pty 5cf931ef8f
-github.com/mattn/go-shellwords v1.0.3
-github.com/sirupsen/logrus v1.0.3
-github.com/tchap/go-patricia v2.2.6
-github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
-golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
-golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
-github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
-github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
-golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
-github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
-github.com/pmezard/go-difflib v1.0.0
-github.com/gotestyourself/gotestyourself v1.1.0
-
-github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
-github.com/imdario/mergo 0.2.1
-golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
-
-github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
-github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
-github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
-
-#get libnetwork packages
-github.com/docker/libnetwork 68f1039f172434709a4550fe92e3e058406c74ce 
-github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
-github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
-github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
-github.com/hashicorp/memberlist v0.1.0
-github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
-github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
-github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
-github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
-github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
-github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
-github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
-github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
-github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
-github.com/coreos/etcd v3.2.1
-github.com/coreos/go-semver v0.2.0
-github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
-github.com/hashicorp/consul v0.5.2
-github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
-github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
-
-# get graph and distribution packages
-github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
-github.com/vbatts/tar-split v0.10.1
-github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
-
-# get go-zfs packages
-github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
-github.com/pborman/uuid v1.0
-
-google.golang.org/grpc v1.3.0
-
-# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
-github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
-github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
-github.com/opencontainers/runtime-spec v1.0.0
-
-github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
-
-# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
-github.com/coreos/go-systemd v4
-github.com/godbus/dbus v4.0.0
-github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
-
-# gelf logging driver deps
-github.com/Graylog2/go-gelf v2
-
-github.com/fluent/fluent-logger-golang v1.2.1
-# fluent-logger-golang deps
-github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
-github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
-
-# fsnotify
-github.com/fsnotify/fsnotify v1.4.2
-
-# awslogs deps
-github.com/aws/aws-sdk-go v1.4.22
-github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
-github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
-
-# logentries
-github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
-
-# gcplogs deps
-golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
-google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
-cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
-github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
-google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
-
-# containerd
-github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
-github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
-
-# cluster
-github.com/docker/swarmkit 872861d2ae46958af7ead1d5fffb092c73afbaf0
-github.com/gogo/protobuf v0.4
-github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
-github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
-golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
-golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
-github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
-github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
-github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
-github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
-github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
-github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
-github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
-github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
-github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
-github.com/matttproud/golang_protobuf_extensions v1.0.0
-github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
-github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
-
-# cli
-github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
-github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
-
-# metrics
-github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
-
-github.com/opencontainers/selinux v1.0.0-rc1
-
-# archive/tar
-# mkdir -p ./vendor/archive
-# git clone git://github.com/tonistiigi/go-1.git ./go
-# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore
-# cp -a go/src/archive/tar ./vendor/archive/tar
-# rm -rf ./go
-# vndr
diff --git a/src/cmd/go/internal/modconv/testdata/panicparse.out b/src/cmd/go/internal/modconv/testdata/panicparse.out
deleted file mode 100644
index 8830033..0000000
--- a/src/cmd/go/internal/modconv/testdata/panicparse.out
+++ /dev/null
@@ -1,8 +0,0 @@
-github.com/kr/pretty 737b74a46c4bf788349f72cb256fed10aea4d0ac
-github.com/kr/text 7cafcd837844e784b526369c9bce262804aebc60
-github.com/maruel/ut a9c9f15ccfa6f8b90182a53df32f4745586fbae3
-github.com/mattn/go-colorable 9056b7a9f2d1f2d96498d6d146acd1f9d5ed3d59
-github.com/mattn/go-isatty 56b76bdf51f7708750eac80fa38b952bb9f32639
-github.com/mgutz/ansi c286dcecd19ff979eeb73ea444e479b903f2cfcb
-github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
-golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
diff --git a/src/cmd/go/internal/modconv/testdata/panicparse.vyml b/src/cmd/go/internal/modconv/testdata/panicparse.vyml
deleted file mode 100644
index ff3d43f..0000000
--- a/src/cmd/go/internal/modconv/testdata/panicparse.vyml
+++ /dev/null
@@ -1,17 +0,0 @@
-vendors:
-- path: github.com/kr/pretty
-  rev: 737b74a46c4bf788349f72cb256fed10aea4d0ac
-- path: github.com/kr/text
-  rev: 7cafcd837844e784b526369c9bce262804aebc60
-- path: github.com/maruel/ut
-  rev: a9c9f15ccfa6f8b90182a53df32f4745586fbae3
-- path: github.com/mattn/go-colorable
-  rev: 9056b7a9f2d1f2d96498d6d146acd1f9d5ed3d59
-- path: github.com/mattn/go-isatty
-  rev: 56b76bdf51f7708750eac80fa38b952bb9f32639
-- path: github.com/mgutz/ansi
-  rev: c286dcecd19ff979eeb73ea444e479b903f2cfcb
-- path: github.com/pmezard/go-difflib
-  rev: 792786c7400a136282c1664665ae0a8db921c6c2
-- path: golang.org/x/sys
-  rev: a646d33e2ee3172a661fc09bca23bb4889a41bc8
diff --git a/src/cmd/go/internal/modconv/testdata/prometheus.out b/src/cmd/go/internal/modconv/testdata/prometheus.out
deleted file mode 100644
index d11b8ec..0000000
--- a/src/cmd/go/internal/modconv/testdata/prometheus.out
+++ /dev/null
@@ -1,258 +0,0 @@
-cloud.google.com/go/compute/metadata c589d0c9f0d81640c518354c7bcae77d99820aa3
-cloud.google.com/go/internal c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/Azure/azure-sdk-for-go/arm/compute bd73d950fa4440dae889bd9917bff7cef539f86e
-github.com/Azure/azure-sdk-for-go/arm/network bd73d950fa4440dae889bd9917bff7cef539f86e
-github.com/Azure/go-autorest/autorest 8a25372bbfec739b8719a9e3987400d15ef9e179
-github.com/Azure/go-autorest/autorest/azure 8a25372bbfec739b8719a9e3987400d15ef9e179
-github.com/Azure/go-autorest/autorest/date 8a25372bbfec739b8719a9e3987400d15ef9e179
-github.com/Azure/go-autorest/autorest/to 8a25372bbfec739b8719a9e3987400d15ef9e179
-github.com/Azure/go-autorest/autorest/validation 8a25372bbfec739b8719a9e3987400d15ef9e179
-github.com/PuerkitoBio/purell c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/PuerkitoBio/urlesc c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/asaskevich/govalidator 7b3beb6df3c42abd3509abfc3bcacc0fbfb7c877
-github.com/aws/aws-sdk-go/aws 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/awserr 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/awsutil 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/client 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/client/metadata 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/corehandlers 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/credentials 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/credentials/endpointcreds 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/credentials/stscreds 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/defaults 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/ec2metadata 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/request 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/session 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/aws/signer/v4 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/endpoints 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/protocol 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/protocol/ec2query 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/protocol/query 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/protocol/query/queryutil 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/protocol/rest 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/private/waiter 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/service/ec2 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/aws/aws-sdk-go/service/sts 707203bc55114ed114446bf57949c5c211d8b7c0
-github.com/beorn7/perks/quantile 3ac7bf7a47d159a033b107610db8a1b6575507a4
-github.com/blang/semver c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/go-oidc/http c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/go-oidc/jose c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/go-oidc/key c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/go-oidc/oauth2 c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/go-oidc/oidc c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/pkg/health c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/pkg/httputil c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/coreos/pkg/timeutil c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/davecgh/go-spew/spew c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/dgrijalva/jwt-go 9ed569b5d1ac936e6494082958d63a6aa4fff99a
-github.com/docker/distribution/digest c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/docker/distribution/reference c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/emicklei/go-restful c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/emicklei/go-restful/log c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/emicklei/go-restful/swagger c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/ghodss/yaml c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/go-ini/ini 6e4869b434bd001f6983749881c7ead3545887d8
-github.com/go-openapi/jsonpointer c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/go-openapi/jsonreference c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/go-openapi/spec c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/go-openapi/swag c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/gogo/protobuf/proto c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/gogo/protobuf/sortkeys c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/golang/glog c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/golang/protobuf/proto 98fa357170587e470c5f27d3c3ea0947b71eb455
-github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
-github.com/google/gofuzz c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/hashicorp/consul/api daacc4be8bee214e3fc4b32a6dd385f5ef1b4c36
-github.com/hashicorp/go-cleanhttp ad28ea4487f05916463e2423a55166280e8254b5
-github.com/hashicorp/serf/coordinate 1d4fa605f6ff3ed628d7ae5eda7c0e56803e72a5
-github.com/influxdb/influxdb/client 291aaeb9485b43b16875c238482b2f7d0a22a13b
-github.com/influxdb/influxdb/tsdb 291aaeb9485b43b16875c238482b2f7d0a22a13b
-github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
-github.com/jonboulle/clockwork c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/juju/ratelimit c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/julienschmidt/httprouter 109e267447e95ad1bb48b758e40dd7453eb7b039
-github.com/mailru/easyjson/buffer c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/mailru/easyjson/jlexer c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/mailru/easyjson/jwriter c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/matttproud/golang_protobuf_extensions/pbutil fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
-github.com/miekg/dns 58f52c57ce9df13460ac68200cef30a008b9c468
-github.com/pborman/uuid c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/pmezard/go-difflib/difflib d77da356e56a7428ad25149ca77381849a6a5232
-github.com/prometheus/client_golang/prometheus c5b7fccd204277076155f10851dad72b76a49317
-github.com/prometheus/client_model/go fa8ad6fec33561be4280a8f0514318c79d7f6cb6
-github.com/prometheus/common/expfmt 85637ea67b04b5c3bb25e671dacded2977f8f9f6
-github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg 85637ea67b04b5c3bb25e671dacded2977f8f9f6
-github.com/prometheus/common/log 85637ea67b04b5c3bb25e671dacded2977f8f9f6
-github.com/prometheus/common/model 85637ea67b04b5c3bb25e671dacded2977f8f9f6
-github.com/prometheus/common/route 85637ea67b04b5c3bb25e671dacded2977f8f9f6
-github.com/prometheus/common/version 85637ea67b04b5c3bb25e671dacded2977f8f9f6
-github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
-github.com/samuel/go-zookeeper/zk 177002e16a0061912f02377e2dd8951a8b3551bc
-github.com/spf13/pflag c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/stretchr/testify/assert d77da356e56a7428ad25149ca77381849a6a5232
-github.com/stretchr/testify/require d77da356e56a7428ad25149ca77381849a6a5232
-github.com/syndtr/goleveldb/leveldb 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/cache 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/comparer 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/errors 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/filter 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/iterator 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/journal 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/memdb 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/opt 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/storage 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/table 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/syndtr/goleveldb/leveldb/util 6b4daa5362b502898ddf367c5c11deb9e7a5c727
-github.com/ugorji/go/codec c589d0c9f0d81640c518354c7bcae77d99820aa3
-github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
-golang.org/x/net/context b336a971b799939dd16ae9b1df8334cb8b977c4d
-golang.org/x/net/context/ctxhttp b336a971b799939dd16ae9b1df8334cb8b977c4d
-golang.org/x/net/http2 c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/net/http2/hpack c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/net/idna c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/net/internal/timeseries 6250b412798208e6c90b03b7c4f226de5aa299e2
-golang.org/x/net/lex/httplex c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/net/netutil bc3663df0ac92f928d419e31e0d2af22e683a5a2
-golang.org/x/oauth2 65a8d08c6292395d47053be10b3c5e91960def76
-golang.org/x/oauth2/google 65a8d08c6292395d47053be10b3c5e91960def76
-golang.org/x/oauth2/internal 65a8d08c6292395d47053be10b3c5e91960def76
-golang.org/x/oauth2/jws 65a8d08c6292395d47053be10b3c5e91960def76
-golang.org/x/oauth2/jwt 65a8d08c6292395d47053be10b3c5e91960def76
-golang.org/x/sys/unix c200b10b5d5e122be351b67af224adc6128af5bf
-golang.org/x/sys/windows c200b10b5d5e122be351b67af224adc6128af5bf
-golang.org/x/sys/windows/registry c200b10b5d5e122be351b67af224adc6128af5bf
-golang.org/x/sys/windows/svc/eventlog c200b10b5d5e122be351b67af224adc6128af5bf
-golang.org/x/text/cases c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/internal/tag c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/language c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/runes c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/secure/bidirule c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/secure/precis c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/transform c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/unicode/bidi c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/unicode/norm c589d0c9f0d81640c518354c7bcae77d99820aa3
-golang.org/x/text/width c589d0c9f0d81640c518354c7bcae77d99820aa3
-google.golang.org/api/compute/v1 63ade871fd3aec1225809d496e81ec91ab76ea29
-google.golang.org/api/gensupport 63ade871fd3aec1225809d496e81ec91ab76ea29
-google.golang.org/api/googleapi 63ade871fd3aec1225809d496e81ec91ab76ea29
-google.golang.org/api/googleapi/internal/uritemplates 63ade871fd3aec1225809d496e81ec91ab76ea29
-google.golang.org/appengine 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal/app_identity 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal/base 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal/datastore 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal/log 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal/modules 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/internal/remote_api 4f7eeb5305a4ba1966344836ba4af9996b7b4e05
-google.golang.org/appengine/internal/urlfetch 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/appengine/urlfetch 267c27e7492265b84fc6719503b14a1e17975d79
-google.golang.org/cloud/compute/metadata 0a83eba2cadb60eb22123673c8fb6fca02b03c94
-google.golang.org/cloud/internal 0a83eba2cadb60eb22123673c8fb6fca02b03c94
-gopkg.in/fsnotify.v1 30411dbcefb7a1da7e84f75530ad3abe4011b4f8
-gopkg.in/inf.v0 c589d0c9f0d81640c518354c7bcae77d99820aa3
-gopkg.in/yaml.v2 7ad95dd0798a40da1ccdff6dff35fd177b5edf40
-k8s.io/client-go/1.5/discovery c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/apps/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/authentication/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/authorization/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/autoscaling/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/batch/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/certificates/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/core/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/policy/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/rbac/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/kubernetes/typed/storage/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/errors c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/meta c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/meta/metatypes c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/resource c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/unversioned c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/api/validation/path c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apimachinery c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apimachinery/announced c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apimachinery/registered c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/apps c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/apps/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/apps/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/authentication c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/authentication/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/authentication/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/authorization c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/authorization/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/authorization/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/autoscaling c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/autoscaling/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/autoscaling/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/batch c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/batch/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/batch/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/batch/v2alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/certificates c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/certificates/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/certificates/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/extensions c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/extensions/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/extensions/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/policy c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/policy/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/policy/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/rbac c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/rbac/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/rbac/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/storage c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/storage/install c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/apis/storage/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/auth/user c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/conversion c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/conversion/queryparams c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/fields c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/genericapiserver/openapi/common c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/labels c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime/serializer c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime/serializer/json c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime/serializer/protobuf c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime/serializer/recognizer c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime/serializer/streaming c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/runtime/serializer/versioning c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/selection c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/third_party/forked/golang/reflect c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/types c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/cert c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/clock c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/errors c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/flowcontrol c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/framer c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/integer c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/intstr c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/json c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/labels c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/net c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/parsers c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/rand c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/runtime c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/sets c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/uuid c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/validation c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/validation/field c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/wait c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/util/yaml c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/version c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/watch c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/pkg/watch/versioned c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/plugin/pkg/client/auth c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/plugin/pkg/client/auth/gcp c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/plugin/pkg/client/auth/oidc c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/rest c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/tools/cache c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/tools/clientcmd/api c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/tools/metrics c589d0c9f0d81640c518354c7bcae77d99820aa3
-k8s.io/client-go/1.5/transport c589d0c9f0d81640c518354c7bcae77d99820aa3
diff --git a/src/cmd/go/internal/modconv/testdata/prometheus.vjson b/src/cmd/go/internal/modconv/testdata/prometheus.vjson
deleted file mode 100644
index 648bec4..0000000
--- a/src/cmd/go/internal/modconv/testdata/prometheus.vjson
+++ /dev/null
@@ -1,1605 +0,0 @@
-{
-	"comment": "",
-	"ignore": "test appengine",
-	"package": [
-		{
-			"checksumSHA1": "Cslv4/ITyQmgjSUhNXFu8q5bqOU=",
-			"origin": "k8s.io/client-go/1.5/vendor/cloud.google.com/go/compute/metadata",
-			"path": "cloud.google.com/go/compute/metadata",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "hiJXjkFEGy+sDFf6O58Ocdy9Rnk=",
-			"origin": "k8s.io/client-go/1.5/vendor/cloud.google.com/go/internal",
-			"path": "cloud.google.com/go/internal",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "oIt4tXgFYnZJBsCac1BQLnTWALM=",
-			"path": "github.com/Azure/azure-sdk-for-go/arm/compute",
-			"revision": "bd73d950fa4440dae889bd9917bff7cef539f86e",
-			"revisionTime": "2016-10-28T18:31:11Z"
-		},
-		{
-			"checksumSHA1": "QKi6LiSyD5GnRK8ExpMgZl4XiMI=",
-			"path": "github.com/Azure/azure-sdk-for-go/arm/network",
-			"revision": "bd73d950fa4440dae889bd9917bff7cef539f86e",
-			"revisionTime": "2016-10-28T18:31:11Z"
-		},
-		{
-			"checksumSHA1": "eVSHe6GIHj9/ziFrQLZ1SC7Nn6k=",
-			"path": "github.com/Azure/go-autorest/autorest",
-			"revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
-			"revisionTime": "2016-10-25T18:07:34Z"
-		},
-		{
-			"checksumSHA1": "0sYi0JprevG/PZjtMbOh8h0pt0g=",
-			"path": "github.com/Azure/go-autorest/autorest/azure",
-			"revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
-			"revisionTime": "2016-10-25T18:07:34Z"
-		},
-		{
-			"checksumSHA1": "q9Qz8PAxK5FTOZwgYKe5Lj38u4c=",
-			"path": "github.com/Azure/go-autorest/autorest/date",
-			"revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
-			"revisionTime": "2016-10-25T18:07:34Z"
-		},
-		{
-			"checksumSHA1": "Ev8qCsbFjDlMlX0N2tYAhYQFpUc=",
-			"path": "github.com/Azure/go-autorest/autorest/to",
-			"revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
-			"revisionTime": "2016-10-25T18:07:34Z"
-		},
-		{
-			"checksumSHA1": "oBixceM+55gdk47iff8DSEIh3po=",
-			"path": "github.com/Azure/go-autorest/autorest/validation",
-			"revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
-			"revisionTime": "2016-10-25T18:07:34Z"
-		},
-		{
-			"checksumSHA1": "IatnluZB5jTVUncMN134e4VOV34=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/PuerkitoBio/purell",
-			"path": "github.com/PuerkitoBio/purell",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "E/Tz8z0B/gaR551g+XqPKAhcteM=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/PuerkitoBio/urlesc",
-			"path": "github.com/PuerkitoBio/urlesc",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "BdLdZP/C2uOO3lqk9X3NCKFpXa4=",
-			"path": "github.com/asaskevich/govalidator",
-			"revision": "7b3beb6df3c42abd3509abfc3bcacc0fbfb7c877",
-			"revisionTime": "2016-10-01T16:31:30Z"
-		},
-		{
-			"checksumSHA1": "WNfR3yhLjRC5/uccgju/bwrdsxQ=",
-			"path": "github.com/aws/aws-sdk-go/aws",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
-			"path": "github.com/aws/aws-sdk-go/aws/awserr",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "+q4vdl3l1Wom8K1wfIpJ4jlFsbY=",
-			"path": "github.com/aws/aws-sdk-go/aws/awsutil",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "/232RBWA3KnT7U+wciPS2+wmvR0=",
-			"path": "github.com/aws/aws-sdk-go/aws/client",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
-			"path": "github.com/aws/aws-sdk-go/aws/client/metadata",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "c1N3Loy3AS9zD+m5CzpPNAED39U=",
-			"path": "github.com/aws/aws-sdk-go/aws/corehandlers",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "zu5C95rmCZff6NYZb62lEaT5ibE=",
-			"path": "github.com/aws/aws-sdk-go/aws/credentials",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=",
-			"path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
-			"path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=",
-			"path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "DwhFsNluCFEwqzyp3hbJR3q2Wqs=",
-			"path": "github.com/aws/aws-sdk-go/aws/defaults",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "8E0fEBUJY/1lJOyVxzTxMGQGInk=",
-			"path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "5Ac22YMTBmrX/CXaEIXzWljr8UY=",
-			"path": "github.com/aws/aws-sdk-go/aws/request",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "eOo6evLMAxQfo7Qkc5/h5euN1Sw=",
-			"path": "github.com/aws/aws-sdk-go/aws/session",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "diXvBs1LRC0RJ9WK6sllWKdzC04=",
-			"path": "github.com/aws/aws-sdk-go/aws/signer/v4",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "Esab5F8KswqkTdB4TtjSvZgs56k=",
-			"path": "github.com/aws/aws-sdk-go/private/endpoints",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
-			"path": "github.com/aws/aws-sdk-go/private/protocol",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=",
-			"path": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
-			"path": "github.com/aws/aws-sdk-go/private/protocol/query",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=",
-			"path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=",
-			"path": "github.com/aws/aws-sdk-go/private/protocol/rest",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=",
-			"path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=",
-			"path": "github.com/aws/aws-sdk-go/private/waiter",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "6h4tJ9wVtbYb9wG4srtUxyPoAYM=",
-			"path": "github.com/aws/aws-sdk-go/service/ec2",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "ouwhxcAsIYQ6oJbMRdLW/Ys/iyg=",
-			"path": "github.com/aws/aws-sdk-go/service/sts",
-			"revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
-			"revisionTime": "2016-11-02T21:59:28Z"
-		},
-		{
-			"checksumSHA1": "4QnLdmB1kG3N+KlDd1N+G9TWAGQ=",
-			"path": "github.com/beorn7/perks/quantile",
-			"revision": "3ac7bf7a47d159a033b107610db8a1b6575507a4",
-			"revisionTime": "2016-02-29T21:34:45Z"
-		},
-		{
-			"checksumSHA1": "n+s4YwtzpMWW5Rt0dEaQa7NHDGQ=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/blang/semver",
-			"path": "github.com/blang/semver",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Z2AOGSmDKKvI6nuxa+UPjQWpIeM=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/http",
-			"path": "github.com/coreos/go-oidc/http",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "8yvt1xKCgNwuuavJdxRnvaIjrIc=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/jose",
-			"path": "github.com/coreos/go-oidc/jose",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "zhXKrWBSSJLqZxVE/Xsw0M9ynFQ=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/key",
-			"path": "github.com/coreos/go-oidc/key",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "bkW0mnXvmHQwHprW/6wrbpP7lAk=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/oauth2",
-			"path": "github.com/coreos/go-oidc/oauth2",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "E1x2k5FdhJ+dzFrh3kCmC6aJfVw=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/oidc",
-			"path": "github.com/coreos/go-oidc/oidc",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "O0UMBRCOD9ItMayDqLQ2MJEjkVE=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/pkg/health",
-			"path": "github.com/coreos/pkg/health",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "74vyZz/d49FZXMbFaHOfCGvSLj0=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/pkg/httputil",
-			"path": "github.com/coreos/pkg/httputil",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "etBdQ0LN6ojGunfvUt6B5C3FNrQ=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/pkg/timeutil",
-			"path": "github.com/coreos/pkg/timeutil",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "SdSd7pyjONWWTHc5XE3AhglLo34=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/davecgh/go-spew/spew",
-			"path": "github.com/davecgh/go-spew/spew",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "2Fy1Y6Z3lRRX1891WF/+HT4XS2I=",
-			"path": "github.com/dgrijalva/jwt-go",
-			"revision": "9ed569b5d1ac936e6494082958d63a6aa4fff99a",
-			"revisionTime": "2016-11-01T19:39:35Z"
-		},
-		{
-			"checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/docker/distribution/digest",
-			"path": "github.com/docker/distribution/digest",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "PzXRTLmmqWXxmDqdIXLcRYBma18=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/docker/distribution/reference",
-			"path": "github.com/docker/distribution/reference",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "1vQR+ZyudsjKio6RNKmWhwzGTb0=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful",
-			"path": "github.com/emicklei/go-restful",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "3xWz4fZ9xW+CfADpYoPFcZCYJ4E=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful/log",
-			"path": "github.com/emicklei/go-restful/log",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "J7CtF9gIs2yH9A7lPQDDrhYxiRk=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful/swagger",
-			"path": "github.com/emicklei/go-restful/swagger",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ww7LVo7jNJ1o6sfRcromEHKyY+o=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/ghodss/yaml",
-			"path": "github.com/ghodss/yaml",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "cVyhKIRI2gQrgpn5qrBeAqErmWM=",
-			"path": "github.com/go-ini/ini",
-			"revision": "6e4869b434bd001f6983749881c7ead3545887d8",
-			"revisionTime": "2016-08-27T06:11:18Z"
-		},
-		{
-			"checksumSHA1": "NaZnW0tKj/b0k5WzcMD0twrLbrE=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/jsonpointer",
-			"path": "github.com/go-openapi/jsonpointer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "3LJXjMDxPY+veIqzQtiAvK3hXnY=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/jsonreference",
-			"path": "github.com/go-openapi/jsonreference",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "faeB3fny260hQ/gEfEXa1ZQTGtk=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/spec",
-			"path": "github.com/go-openapi/spec",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "wGpZwJ5HZtReou8A3WEV1Gdxs6k=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/swag",
-			"path": "github.com/go-openapi/swag",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "BIyZQL97iG7mzZ2UMR3XpiXbZdc=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/gogo/protobuf/proto",
-			"path": "github.com/gogo/protobuf/proto",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "e6cMbpJj41MpihS5eP4SIliRBK4=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/gogo/protobuf/sortkeys",
-			"path": "github.com/gogo/protobuf/sortkeys",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "URsJa4y/sUUw/STmbeYx9EKqaYE=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/golang/glog",
-			"path": "github.com/golang/glog",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "yDh5kmmr0zEF1r+rvYqbZcR7iLs=",
-			"path": "github.com/golang/protobuf/proto",
-			"revision": "98fa357170587e470c5f27d3c3ea0947b71eb455",
-			"revisionTime": "2016-10-12T20:53:35Z"
-		},
-		{
-			"checksumSHA1": "2a/SsTUBMKtcM6VtpbdPGO+c6c8=",
-			"path": "github.com/golang/snappy",
-			"revision": "d9eb7a3d35ec988b8585d4a0068e462c27d28380",
-			"revisionTime": "2016-05-29T05:00:41Z"
-		},
-		{
-			"checksumSHA1": "/yFfUp3tGt6cK22UVzbq8SjPDCU=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/google/gofuzz",
-			"path": "github.com/google/gofuzz",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "LclVLJYrBi03PBjsVPpgoMbUDQ8=",
-			"path": "github.com/hashicorp/consul/api",
-			"revision": "daacc4be8bee214e3fc4b32a6dd385f5ef1b4c36",
-			"revisionTime": "2016-10-28T04:06:46Z"
-		},
-		{
-			"checksumSHA1": "Uzyon2091lmwacNsl1hCytjhHtg=",
-			"path": "github.com/hashicorp/go-cleanhttp",
-			"revision": "ad28ea4487f05916463e2423a55166280e8254b5",
-			"revisionTime": "2016-04-07T17:41:26Z"
-		},
-		{
-			"checksumSHA1": "E3Xcanc9ouQwL+CZGOUyA/+giLg=",
-			"path": "github.com/hashicorp/serf/coordinate",
-			"revision": "1d4fa605f6ff3ed628d7ae5eda7c0e56803e72a5",
-			"revisionTime": "2016-10-07T00:41:22Z"
-		},
-		{
-			"path": "github.com/influxdb/influxdb/client",
-			"revision": "291aaeb9485b43b16875c238482b2f7d0a22a13b",
-			"revisionTime": "2015-09-16T14:41:53+02:00"
-		},
-		{
-			"path": "github.com/influxdb/influxdb/tsdb",
-			"revision": "291aaeb9485b43b16875c238482b2f7d0a22a13b",
-			"revisionTime": "2015-09-16T14:41:53+02:00"
-		},
-		{
-			"checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
-			"path": "github.com/jmespath/go-jmespath",
-			"revision": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d",
-			"revisionTime": "2016-08-03T19:07:31Z"
-		},
-		{
-			"checksumSHA1": "9ZVOEbIXnTuYpVqce4en8rwlkPE=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/jonboulle/clockwork",
-			"path": "github.com/jonboulle/clockwork",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "gA95N2LM2hEJLoqrTPaFsSWDJ2Y=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/juju/ratelimit",
-			"path": "github.com/juju/ratelimit",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Farach1xcmsQYrhiUfkwF2rbIaE=",
-			"path": "github.com/julienschmidt/httprouter",
-			"revision": "109e267447e95ad1bb48b758e40dd7453eb7b039",
-			"revisionTime": "2015-09-05T19:25:33+02:00"
-		},
-		{
-			"checksumSHA1": "urY45++NYCue4nh4k8OjUFnIGfU=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/buffer",
-			"path": "github.com/mailru/easyjson/buffer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "yTDKAM4KBgOvXRsZC50zg0OChvM=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/jlexer",
-			"path": "github.com/mailru/easyjson/jlexer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "4+d+6rhM1pei6lBguhqSEW7LaXs=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/jwriter",
-			"path": "github.com/mailru/easyjson/jwriter",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Q2vw4HZBbnU8BLFt8VrzStwqSJg=",
-			"path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
-			"revision": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a",
-			"revisionTime": "2015-04-06T19:39:34+02:00"
-		},
-		{
-			"checksumSHA1": "Wahi4g/9XiHhSLAJ+8jskg71PCU=",
-			"path": "github.com/miekg/dns",
-			"revision": "58f52c57ce9df13460ac68200cef30a008b9c468",
-			"revisionTime": "2016-10-18T06:08:08Z"
-		},
-		{
-			"checksumSHA1": "3YJklSuzSE1Rt8A+2dhiWSmf/fw=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/pborman/uuid",
-			"path": "github.com/pborman/uuid",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "zKKp5SZ3d3ycKe4EKMNT0BqAWBw=",
-			"origin": "github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib",
-			"path": "github.com/pmezard/go-difflib/difflib",
-			"revision": "d77da356e56a7428ad25149ca77381849a6a5232",
-			"revisionTime": "2016-06-15T09:26:46Z"
-		},
-		{
-			"checksumSHA1": "KkB+77Ziom7N6RzSbyUwYGrmDeU=",
-			"path": "github.com/prometheus/client_golang/prometheus",
-			"revision": "c5b7fccd204277076155f10851dad72b76a49317",
-			"revisionTime": "2016-08-17T15:48:24Z"
-		},
-		{
-			"checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
-			"path": "github.com/prometheus/client_model/go",
-			"revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
-			"revisionTime": "2015-02-12T10:17:44Z"
-		},
-		{
-			"checksumSHA1": "mHyjbJ3BWOfUV6q9f5PBt0gaY1k=",
-			"path": "github.com/prometheus/common/expfmt",
-			"revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
-			"revisionTime": "2016-10-02T21:02:34Z"
-		},
-		{
-			"checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
-			"path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
-			"revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
-			"revisionTime": "2016-10-02T21:02:34Z"
-		},
-		{
-			"checksumSHA1": "UU6hIfhVjnAYDADQEfE/3T7Ddm8=",
-			"path": "github.com/prometheus/common/log",
-			"revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
-			"revisionTime": "2016-10-02T21:02:34Z"
-		},
-		{
-			"checksumSHA1": "nFie+rxcX5WdIv1diZ+fu3aj6lE=",
-			"path": "github.com/prometheus/common/model",
-			"revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
-			"revisionTime": "2016-10-02T21:02:34Z"
-		},
-		{
-			"checksumSHA1": "QQKJYoGcY10nIHxhBEHwjwUZQzk=",
-			"path": "github.com/prometheus/common/route",
-			"revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
-			"revisionTime": "2016-10-02T21:02:34Z"
-		},
-		{
-			"checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=",
-			"path": "github.com/prometheus/common/version",
-			"revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
-			"revisionTime": "2016-10-02T21:02:34Z"
-		},
-		{
-			"checksumSHA1": "W218eJZPXJG783fUr/z6IaAZyes=",
-			"path": "github.com/prometheus/procfs",
-			"revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5",
-			"revisionTime": "2016-04-11T19:08:41Z"
-		},
-		{
-			"checksumSHA1": "+49Vr4Me28p3cR+gxX5SUQHbbas=",
-			"path": "github.com/samuel/go-zookeeper/zk",
-			"revision": "177002e16a0061912f02377e2dd8951a8b3551bc",
-			"revisionTime": "2015-08-17T10:50:50-07:00"
-		},
-		{
-			"checksumSHA1": "YuPBOVkkE3uuBh4RcRUTF0n+frs=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/spf13/pflag",
-			"path": "github.com/spf13/pflag",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "iydUphwYqZRq3WhstEdGsbvBAKs=",
-			"path": "github.com/stretchr/testify/assert",
-			"revision": "d77da356e56a7428ad25149ca77381849a6a5232",
-			"revisionTime": "2016-06-15T09:26:46Z"
-		},
-		{
-			"checksumSHA1": "P9FJpir2c4G5PA46qEkaWy3l60U=",
-			"path": "github.com/stretchr/testify/require",
-			"revision": "d77da356e56a7428ad25149ca77381849a6a5232",
-			"revisionTime": "2016-06-15T09:26:46Z"
-		},
-		{
-			"checksumSHA1": "VhcnDY37sYAnL8WjfYQN9YYl+W4=",
-			"path": "github.com/syndtr/goleveldb/leveldb",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
-			"path": "github.com/syndtr/goleveldb/leveldb/cache",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
-			"path": "github.com/syndtr/goleveldb/leveldb/comparer",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
-			"path": "github.com/syndtr/goleveldb/leveldb/errors",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
-			"path": "github.com/syndtr/goleveldb/leveldb/filter",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "8dXuAVIsbtaMiGGuHjzGR6Ny/5c=",
-			"path": "github.com/syndtr/goleveldb/leveldb/iterator",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
-			"path": "github.com/syndtr/goleveldb/leveldb/journal",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "j+uaQ6DwJ50dkIdfMQu1TXdlQcY=",
-			"path": "github.com/syndtr/goleveldb/leveldb/memdb",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
-			"path": "github.com/syndtr/goleveldb/leveldb/opt",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "/Wvv9HeJTN9UUjdjwUlz7X4ioIo=",
-			"path": "github.com/syndtr/goleveldb/leveldb/storage",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "JTJA+u8zk7EXy1UUmpFPNGvtO2A=",
-			"path": "github.com/syndtr/goleveldb/leveldb/table",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "4zil8Gwg8VPkDn1YzlgCvtukJFU=",
-			"path": "github.com/syndtr/goleveldb/leveldb/util",
-			"revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
-			"revisionTime": "2016-10-11T05:00:08Z"
-		},
-		{
-			"checksumSHA1": "f6Aew+ZA+HBAXCw6/xTST3mB0Lw=",
-			"origin": "k8s.io/client-go/1.5/vendor/github.com/ugorji/go/codec",
-			"path": "github.com/ugorji/go/codec",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "sFD8LpJPQtWLwGda3edjf5mNUbs=",
-			"path": "github.com/vaughan0/go-ini",
-			"revision": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1",
-			"revisionTime": "2013-09-23T16:52:12+02:00"
-		},
-		{
-			"checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
-			"path": "golang.org/x/net/context",
-			"revision": "b336a971b799939dd16ae9b1df8334cb8b977c4d",
-			"revisionTime": "2016-10-27T19:58:04Z"
-		},
-		{
-			"checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=",
-			"path": "golang.org/x/net/context/ctxhttp",
-			"revision": "b336a971b799939dd16ae9b1df8334cb8b977c4d",
-			"revisionTime": "2016-10-27T19:58:04Z"
-		},
-		{
-			"checksumSHA1": "SPYGC6DQrH9jICccUsOfbvvhB4g=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/http2",
-			"path": "golang.org/x/net/http2",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "EYNaHp7XdLWRydUCE0amEkKAtgk=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/http2/hpack",
-			"path": "golang.org/x/net/http2/hpack",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "gXiSniT8fevWOVPVKopYgrdzi60=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/idna",
-			"path": "golang.org/x/net/idna",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=",
-			"path": "golang.org/x/net/internal/timeseries",
-			"revision": "6250b412798208e6c90b03b7c4f226de5aa299e2",
-			"revisionTime": "2016-08-24T22:20:41Z"
-		},
-		{
-			"checksumSHA1": "yhndhWXMs/VSEDLks4dNyFMQStA=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/lex/httplex",
-			"path": "golang.org/x/net/lex/httplex",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "7WASrg0PEueWDDRHkFhEEN6Qrms=",
-			"path": "golang.org/x/net/netutil",
-			"revision": "bc3663df0ac92f928d419e31e0d2af22e683a5a2",
-			"revisionTime": "2016-06-21T20:48:10Z"
-		},
-		{
-			"checksumSHA1": "mktBVED98G2vv+OKcSgtnFVZC1Y=",
-			"path": "golang.org/x/oauth2",
-			"revision": "65a8d08c6292395d47053be10b3c5e91960def76",
-			"revisionTime": "2016-06-07T03:33:14Z"
-		},
-		{
-			"checksumSHA1": "2rk6lthfQa5Rfydj8j7+dilKGbo=",
-			"path": "golang.org/x/oauth2/google",
-			"revision": "65a8d08c6292395d47053be10b3c5e91960def76",
-			"revisionTime": "2016-06-07T03:33:14Z"
-		},
-		{
-			"checksumSHA1": "W/GiDqzsagBnR7/yEvxatMhUDBs=",
-			"path": "golang.org/x/oauth2/internal",
-			"revision": "65a8d08c6292395d47053be10b3c5e91960def76",
-			"revisionTime": "2016-06-07T03:33:14Z"
-		},
-		{
-			"checksumSHA1": "CPTYHWrVL4jA0B1IuC0hvgcE2AQ=",
-			"path": "golang.org/x/oauth2/jws",
-			"revision": "65a8d08c6292395d47053be10b3c5e91960def76",
-			"revisionTime": "2016-06-07T03:33:14Z"
-		},
-		{
-			"checksumSHA1": "xifBSq0Pn6pIoPA/o3tyzq8X4Ds=",
-			"path": "golang.org/x/oauth2/jwt",
-			"revision": "65a8d08c6292395d47053be10b3c5e91960def76",
-			"revisionTime": "2016-06-07T03:33:14Z"
-		},
-		{
-			"checksumSHA1": "aVgPDgwY3/t4J/JOw9H3FVMHqh0=",
-			"path": "golang.org/x/sys/unix",
-			"revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
-			"revisionTime": "2016-10-22T18:22:21Z"
-		},
-		{
-			"checksumSHA1": "fpW2dhGFC6SrVzipJx7fjg2DIH8=",
-			"path": "golang.org/x/sys/windows",
-			"revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
-			"revisionTime": "2016-10-22T18:22:21Z"
-		},
-		{
-			"checksumSHA1": "PjYlbMS0ttyZYlaevvjA/gV3g1c=",
-			"path": "golang.org/x/sys/windows/registry",
-			"revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
-			"revisionTime": "2016-10-22T18:22:21Z"
-		},
-		{
-			"checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=",
-			"path": "golang.org/x/sys/windows/svc/eventlog",
-			"revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
-			"revisionTime": "2016-10-22T18:22:21Z"
-		},
-		{
-			"checksumSHA1": "QQpKbWuqvhmxVr/hfEYdWzzcXRM=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/cases",
-			"path": "golang.org/x/text/cases",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "iAsGo/kxvnwILbJVUCd0ZcqZO/Q=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/internal/tag",
-			"path": "golang.org/x/text/internal/tag",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "mQ6PCGHY7K0oPjKbYD8wsTjm/P8=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/language",
-			"path": "golang.org/x/text/language",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "WpeH2TweiuiZAQVTJNO5vyZAQQA=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/runes",
-			"path": "golang.org/x/text/runes",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "1VjEPyjdi0xOiIN/Alkqiad/B/c=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/secure/bidirule",
-			"path": "golang.org/x/text/secure/bidirule",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "FcK7VslktIAWj5jnWVnU2SesBq0=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/secure/precis",
-			"path": "golang.org/x/text/secure/precis",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "nwlu7UTwYbCj9l5f3a7t2ROwNzM=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/transform",
-			"path": "golang.org/x/text/transform",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "nWJ9R1+Xw41f/mM3b7BYtv77CfI=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/unicode/bidi",
-			"path": "golang.org/x/text/unicode/bidi",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "BAZ96wCGUj6HdY9sG60Yw09KWA4=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/unicode/norm",
-			"path": "golang.org/x/text/unicode/norm",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "AZMILKWqLP99UilLgbGZ+uzIVrM=",
-			"origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/width",
-			"path": "golang.org/x/text/width",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "AjdmRXf0fiy6Bec9mNlsGsmZi1k=",
-			"path": "google.golang.org/api/compute/v1",
-			"revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
-			"revisionTime": "2016-05-31T06:42:46Z"
-		},
-		{
-			"checksumSHA1": "OtsMVXY89Hc/bBXdDp84atFQawM=",
-			"path": "google.golang.org/api/gensupport",
-			"revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
-			"revisionTime": "2016-05-31T06:42:46Z"
-		},
-		{
-			"checksumSHA1": "yQREK/OWrz9PLljbr127+xFk6J0=",
-			"path": "google.golang.org/api/googleapi",
-			"revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
-			"revisionTime": "2016-05-31T06:42:46Z"
-		},
-		{
-			"checksumSHA1": "ii4ET3JHk3vkMUEcg+9t/1RZSUU=",
-			"path": "google.golang.org/api/googleapi/internal/uritemplates",
-			"revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
-			"revisionTime": "2016-05-31T06:42:46Z"
-		},
-		{
-			"checksumSHA1": "N3KZEuQ9O1QwJXcCJbe7Czwroo4=",
-			"path": "google.golang.org/appengine",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "G9Xp1ScdsfcKsw+PcWunivRRP3o=",
-			"path": "google.golang.org/appengine/internal",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=",
-			"path": "google.golang.org/appengine/internal/app_identity",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=",
-			"path": "google.golang.org/appengine/internal/base",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=",
-			"path": "google.golang.org/appengine/internal/datastore",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=",
-			"path": "google.golang.org/appengine/internal/log",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=",
-			"path": "google.golang.org/appengine/internal/modules",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=",
-			"path": "google.golang.org/appengine/internal/remote_api",
-			"revision": "4f7eeb5305a4ba1966344836ba4af9996b7b4e05",
-			"revisionTime": "2016-08-19T23:33:10Z"
-		},
-		{
-			"checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
-			"path": "google.golang.org/appengine/internal/urlfetch",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
-			"path": "google.golang.org/appengine/urlfetch",
-			"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
-			"revisionTime": "2016-06-21T05:59:22Z"
-		},
-		{
-			"checksumSHA1": "Wp8g9MHRmK8SwcyGVCoGtPx+5Lo=",
-			"path": "google.golang.org/cloud/compute/metadata",
-			"revision": "0a83eba2cadb60eb22123673c8fb6fca02b03c94",
-			"revisionTime": "2016-06-21T15:59:29Z"
-		},
-		{
-			"checksumSHA1": "U7dGDNwEHORvJFMoNSXErKE7ITg=",
-			"path": "google.golang.org/cloud/internal",
-			"revision": "0a83eba2cadb60eb22123673c8fb6fca02b03c94",
-			"revisionTime": "2016-06-21T15:59:29Z"
-		},
-		{
-			"checksumSHA1": "JfVmsMwyeeepbdw4q4wpN07BuFg=",
-			"path": "gopkg.in/fsnotify.v1",
-			"revision": "30411dbcefb7a1da7e84f75530ad3abe4011b4f8",
-			"revisionTime": "2016-04-12T13:37:56Z"
-		},
-		{
-			"checksumSHA1": "pfQwQtWlFezJq0Viroa/L+v+yDM=",
-			"origin": "k8s.io/client-go/1.5/vendor/gopkg.in/inf.v0",
-			"path": "gopkg.in/inf.v0",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "KgT+peLCcuh0/m2mpoOZXuxXmwc=",
-			"path": "gopkg.in/yaml.v2",
-			"revision": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40",
-			"revisionTime": "2015-06-24T11:29:02+01:00"
-		},
-		{
-			"checksumSHA1": "st0Nbu4zwLcP3mz03lDOJVZtn8Y=",
-			"path": "k8s.io/client-go/1.5/discovery",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "S+OzpkipMb46LGZoWuveqSLAcoM=",
-			"path": "k8s.io/client-go/1.5/kubernetes",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "yCBn8ig1TUMrk+ljtK0nDr7E5Vo=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/apps/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ZRnUz5NrpvJsXAjtnRdEv5UYhSI=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/authentication/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "TY55Np20olmPMzXgfVlIUIyqv04=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/authorization/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "FRByJsFff/6lPH20FtJPaK1NPWI=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/autoscaling/v1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "3Cy2as7HnQ2FDcvpNbatpFWx0P4=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/batch/v1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "RUKywApIbSLLsfkYxXzifh7HIvs=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/certificates/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "4+Lsxu+sYgzsS2JOHP7CdrZLSKc=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/core/v1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "H8jzevN03YUfmf2krJt0qj2P9sU=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "hrpA6xxtwj3oMcQbFxI2cDhO2ZA=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/policy/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "B2+F12NeMwrOHvHK2ALyEcr3UGA=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/rbac/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "h2eSNUym87RWPlez7UKujShwrUQ=",
-			"path": "k8s.io/client-go/1.5/kubernetes/typed/storage/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "+oIykJ3A0wYjAWbbrGo0jNnMLXw=",
-			"path": "k8s.io/client-go/1.5/pkg/api",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "UsUsIdhuy5Ej2vI0hbmSsrimoaQ=",
-			"path": "k8s.io/client-go/1.5/pkg/api/errors",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Eo6LLHFqG6YznIAKr2mVjuqUj6k=",
-			"path": "k8s.io/client-go/1.5/pkg/api/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "dYznkLcCEai21z1dX8kZY7uDsck=",
-			"path": "k8s.io/client-go/1.5/pkg/api/meta",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "b06esG4xMj/YNFD85Lqq00cx+Yo=",
-			"path": "k8s.io/client-go/1.5/pkg/api/meta/metatypes",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "L9svak1yut0Mx8r9VLDOwpqZzBk=",
-			"path": "k8s.io/client-go/1.5/pkg/api/resource",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "m7jGshKDLH9kdokfa6MwAqzxRQk=",
-			"path": "k8s.io/client-go/1.5/pkg/api/unversioned",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "iI6s5WAexr1PEfqrbvuscB+oVik=",
-			"path": "k8s.io/client-go/1.5/pkg/api/v1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ikac34qI/IkTWHnfi8pPl9irPyo=",
-			"path": "k8s.io/client-go/1.5/pkg/api/validation/path",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "MJyygSPp8N6z+7SPtcROz4PEwas=",
-			"path": "k8s.io/client-go/1.5/pkg/apimachinery",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "EGb4IcSTQ1VXCmX0xcyG5GpWId8=",
-			"path": "k8s.io/client-go/1.5/pkg/apimachinery/announced",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "vhSyuINHQhCsDKTyBmvJT1HzDHI=",
-			"path": "k8s.io/client-go/1.5/pkg/apimachinery/registered",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "rXeBnwLg8ZFe6m5/Ki7tELVBYDk=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/apps",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "KzHaG858KV1tBh5cuLInNcm+G5s=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/apps/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "fynWdchlRbPaxuST2oGDKiKLTqE=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/apps/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "hreIYssoH4Ef/+Aglpitn3GNLR4=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/authentication",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "EgUqJH4CqB9vXVg6T8II2OEt5LE=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/authentication/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Z3DKgomzRPGcBv/8hlL6pfnIpXI=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/authentication/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "GpuScB2Z+NOT4WIQg1mVvVSDUts=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/authorization",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "+u3UD+HY9lBH+PFi/2B4W564JEw=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/authorization/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "zIFzgWjmlWNLHGHMpCpDCvoLtKY=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/authorization/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "tdpzQFQyVkt5kCLTvtKTVqT+maE=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/autoscaling",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "nb6LbYGS5tv8H8Ovptg6M7XuDZ4=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/autoscaling/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "DNb1/nl/5RDdckRrJoXBRagzJXs=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/autoscaling/v1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "4bLhH2vNl5l4Qp6MjLhWyWVAPE0=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/batch",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "RpAAEynmxlvOlLLZK1KEUQRnYzk=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/batch/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "uWJ2BHmjL/Gq4FFlNkqiN6vvPyM=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/batch/v1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "mHWt/p724dKeP1vqLtWQCye7zaE=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/batch/v2alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "6dJ1dGfXkB3A42TOtMaY/rvv4N8=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/certificates",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Bkrhm6HbFYANwtzUE8eza9SWBk0=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/certificates/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "nRRPIBQ5O3Ad24kscNtK+gPC+fk=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/certificates/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "KUMhoaOg9GXHN/aAVvSLO18SgqU=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/extensions",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "eSo2VhNAYtesvmpEPqn05goW4LY=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/extensions/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "DunWIPrCC5iGMWzkaaugMOxD+hg=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/extensions/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "rVGYi2ko0E7vL5OZSMYX+NAGPYw=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/policy",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "llJHd2H0LzABGB6BcletzIHnexo=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/policy/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "j44bqyY13ldnuCtysYE8nRkMD7o=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/policy/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "vT7rFxowcKMTYc55mddePqUFRgE=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/rbac",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "r1MzUXsG+Zyn30aU8I5R5dgrJPA=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/rbac/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "aNfO8xn8VDO3fM9CpVCe6EIB+GA=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/rbac/v1alpha1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "rQCxrbisCXmj2wymlYG63kcTL9I=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/storage",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "wZyxh5nt5Eh6kF7YNAIYukKWWy0=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/storage/install",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "P8ANOt/I4Cs3QtjVXWmDA/gpQdg=",
-			"path": "k8s.io/client-go/1.5/pkg/apis/storage/v1beta1",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "qnVPwzvNLz2mmr3BXdU9qIhQXXU=",
-			"path": "k8s.io/client-go/1.5/pkg/auth/user",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "KrIchxhapSs242yAy8yrTS1XlZo=",
-			"path": "k8s.io/client-go/1.5/pkg/conversion",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "weZqKFcOhcnF47eDDHXzluCKSF0=",
-			"path": "k8s.io/client-go/1.5/pkg/conversion/queryparams",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "T3EMfyXZX5939/OOQ1JU+Nmbk4k=",
-			"path": "k8s.io/client-go/1.5/pkg/fields",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "2v11s3EBH8UBl2qfImT29tQN2kM=",
-			"path": "k8s.io/client-go/1.5/pkg/genericapiserver/openapi/common",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "GvBlph6PywK3zguou/T9kKNNdoQ=",
-			"path": "k8s.io/client-go/1.5/pkg/labels",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Vtrgy827r0rWzIAgvIWY4flu740=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "SEcZqRATexhgHvDn+eHvMc07UJs=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime/serializer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "qzYKG9YZSj8l/W1QVTOrGAry/BM=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime/serializer/json",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "F7h+8zZ0JPLYkac4KgSVljguBE4=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime/serializer/protobuf",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "CvySOL8C85e3y7EWQ+Au4cwUZJM=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime/serializer/recognizer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "eCitoKeIun+lJzYFhAfdSIIicSM=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime/serializer/streaming",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "kVWvZuLGltJ4YqQsiaCLRRLDDK0=",
-			"path": "k8s.io/client-go/1.5/pkg/runtime/serializer/versioning",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "m51+LAeQ9RK1KHX+l2iGcwbVCKs=",
-			"path": "k8s.io/client-go/1.5/pkg/selection",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "dp4IWcC3U6a0HeOdVCDQWODWCbw=",
-			"path": "k8s.io/client-go/1.5/pkg/third_party/forked/golang/reflect",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ER898XJD1ox4d71gKZD8TLtTSpM=",
-			"path": "k8s.io/client-go/1.5/pkg/types",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "BVdXtnLDlmBQksRPfHOIG+qdeVg=",
-			"path": "k8s.io/client-go/1.5/pkg/util",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "nnh8Sa4dCupxRI4bbKaozGp1d/A=",
-			"path": "k8s.io/client-go/1.5/pkg/util/cert",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "S32d5uduNlwouM8+mIz+ALpliUQ=",
-			"path": "k8s.io/client-go/1.5/pkg/util/clock",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Y6rWC0TUw2/uUeUjJ7kazyEUzBQ=",
-			"path": "k8s.io/client-go/1.5/pkg/util/errors",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "C7IfEAdCOePw3/IraaZCNXuYXLw=",
-			"path": "k8s.io/client-go/1.5/pkg/util/flowcontrol",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "EuslQHnhBSRXaWimYqLEqhMPV48=",
-			"path": "k8s.io/client-go/1.5/pkg/util/framer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ByO18NbZwiifFr8qtLyfJAHXguA=",
-			"path": "k8s.io/client-go/1.5/pkg/util/integer",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ww+RfsoIlUBDwThg2oqC5QVz33Y=",
-			"path": "k8s.io/client-go/1.5/pkg/util/intstr",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "7E8f8dLlXW7u6r9sggMjvB4HEiw=",
-			"path": "k8s.io/client-go/1.5/pkg/util/json",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "d0pFZxMJG9j95acNmaIM1l+X+QU=",
-			"path": "k8s.io/client-go/1.5/pkg/util/labels",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "wCN7u1lE+25neM9jXeI7aE8EAfk=",
-			"path": "k8s.io/client-go/1.5/pkg/util/net",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "g+kBkxcb+tYmFtRRly+VE+JAIfw=",
-			"path": "k8s.io/client-go/1.5/pkg/util/parsers",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "S4wUnE5VkaWWrkLbgPL/1oNLJ4g=",
-			"path": "k8s.io/client-go/1.5/pkg/util/rand",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "8j9c2PqTKybtnymXbStNYRexRj8=",
-			"path": "k8s.io/client-go/1.5/pkg/util/runtime",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "aAz4e8hLGs0+ZAz1TdA5tY/9e1A=",
-			"path": "k8s.io/client-go/1.5/pkg/util/sets",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "P/fwh6QZ5tsjVyHTaASDWL3WaGs=",
-			"path": "k8s.io/client-go/1.5/pkg/util/uuid",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "P9Bq/1qbF4SvnN9HyCTRpbUz7sQ=",
-			"path": "k8s.io/client-go/1.5/pkg/util/validation",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "D0JIEjlP69cuPOZEdsSKeFgsnI8=",
-			"path": "k8s.io/client-go/1.5/pkg/util/validation/field",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "T7ba8t8i+BtgClMgL+aMZM94fcI=",
-			"path": "k8s.io/client-go/1.5/pkg/util/wait",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "6RCTv/KDiw7as4KeyrgU3XrUSQI=",
-			"path": "k8s.io/client-go/1.5/pkg/util/yaml",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "OwKlsSeKtz1FBVC9cQ5gWRL5pKc=",
-			"path": "k8s.io/client-go/1.5/pkg/version",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Oil9WGw/dODbpBopn6LWQGS3DYg=",
-			"path": "k8s.io/client-go/1.5/pkg/watch",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "r5alnRCbLaPsbTeJjjTVn/bt6uw=",
-			"path": "k8s.io/client-go/1.5/pkg/watch/versioned",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "X1+ltyfHui/XCwDupXIf39+9gWQ=",
-			"path": "k8s.io/client-go/1.5/plugin/pkg/client/auth",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "KYy+js37AS0ZT08g5uBr1ZoMPmE=",
-			"path": "k8s.io/client-go/1.5/plugin/pkg/client/auth/gcp",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "wQ9G5++lbQpejqCzGHo037N3YcY=",
-			"path": "k8s.io/client-go/1.5/plugin/pkg/client/auth/oidc",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "ABe8YfZVEDoRpAUqp2BKP8o1VIA=",
-			"path": "k8s.io/client-go/1.5/rest",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "Gbe0Vs9hkI7X5hhbXUuWdRFffSI=",
-			"path": "k8s.io/client-go/1.5/tools/cache",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "K/oOznXABjqSS1c2Fs407c5F8KA=",
-			"path": "k8s.io/client-go/1.5/tools/clientcmd/api",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "c1PQ4WJRfpA9BYcFHW2+46hu5IE=",
-			"path": "k8s.io/client-go/1.5/tools/metrics",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		},
-		{
-			"checksumSHA1": "e4W2q+6wvjejv3V0UCI1mewTTro=",
-			"path": "k8s.io/client-go/1.5/transport",
-			"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
-			"revisionTime": "2016-09-30T00:14:02Z"
-		}
-	],
-	"rootPath": "github.com/prometheus/prometheus"
-}
diff --git a/src/cmd/go/internal/modconv/testdata/traefik.dep b/src/cmd/go/internal/modconv/testdata/traefik.dep
deleted file mode 100644
index 8510f0f..0000000
--- a/src/cmd/go/internal/modconv/testdata/traefik.dep
+++ /dev/null
@@ -1,79 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-[[projects]]
-  name = "github.com/Nvveen/Gotty"
-  packages = ["."]
-  revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c"
-  source = "github.com/ijc25/Gotty"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/OpenDNS/vegadns2client"
-  packages = ["."]
-  revision = "a3fa4a771d87bda2514a90a157e1fed1b6897d2e"
-
-[[projects]]
-  name = "github.com/PuerkitoBio/purell"
-  packages = ["."]
-  revision = "8a290539e2e8629dbc4e6bad948158f790ec31f4"
-  version = "v1.0.0"
-
-[[projects]]
-  name = "github.com/PuerkitoBio/urlesc"
-  packages = ["."]
-  revision = "5bd2802263f21d8788851d5305584c82a5c75d7e"
-
-[[projects]]
-  name = "github.com/Shopify/sarama"
-  packages = ["."]
-  revision = "70f6a705d4a17af059acbc6946fb2bd30762acd7"
-
-[[projects]]
-  name = "github.com/VividCortex/gohistogram"
-  packages = ["."]
-  revision = "51564d9861991fb0ad0f531c99ef602d0f9866e6"
-  version = "v1.0.0"
-
-[[projects]]
-  branch = "containous-fork"
-  name = "github.com/abbot/go-http-auth"
-  packages = ["."]
-  revision = "65b0cdae8d7fe5c05c7430e055938ef6d24a66c9"
-  source = "github.com/containous/go-http-auth"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/abronan/valkeyrie"
-  packages = [
-    ".",
-    "store",
-    "store/boltdb",
-    "store/consul",
-    "store/etcd/v2",
-    "store/etcd/v3",
-    "store/zookeeper"
-  ]
-  revision = "063d875e3c5fd734fa2aa12fac83829f62acfc70"
-  
-[[projects]]
-  branch = "master"
-  name = "github.com/mesosphere/mesos-dns"
-  packages = [
-    "detect",
-    "errorutil",
-    "logging",
-    "models",
-    "records",
-    "records/labels",
-    "records/state",
-    "util"
-  ]
-  revision = "b47dc4c19f215e98da687b15b4c64e70f629bea5"
-  source = "git@github.com:containous/mesos-dns.git"
-
-  [[projects]]
-  name = "gopkg.in/fsnotify.v1"
-  packages = ["."]
-  revision = "629574ca2a5df945712d3079857300b5e4da0236"
-  source = "github.com/fsnotify/fsnotify"
-  version = "v1.4.2"
\ No newline at end of file
diff --git a/src/cmd/go/internal/modconv/testdata/traefik.out b/src/cmd/go/internal/modconv/testdata/traefik.out
deleted file mode 100644
index 5054295..0000000
--- a/src/cmd/go/internal/modconv/testdata/traefik.out
+++ /dev/null
@@ -1,14 +0,0 @@
-github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c
-github.com/OpenDNS/vegadns2client a3fa4a771d87bda2514a90a157e1fed1b6897d2e
-github.com/PuerkitoBio/purell v1.0.0
-github.com/PuerkitoBio/urlesc 5bd2802263f21d8788851d5305584c82a5c75d7e
-github.com/Shopify/sarama 70f6a705d4a17af059acbc6946fb2bd30762acd7
-github.com/VividCortex/gohistogram v1.0.0
-github.com/abbot/go-http-auth 65b0cdae8d7fe5c05c7430e055938ef6d24a66c9
-github.com/abronan/valkeyrie 063d875e3c5fd734fa2aa12fac83829f62acfc70
-github.com/mesosphere/mesos-dns b47dc4c19f215e98da687b15b4c64e70f629bea5
-gopkg.in/fsnotify.v1 v1.4.2
-replace: github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c github.com/ijc25/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c
-replace: github.com/abbot/go-http-auth 65b0cdae8d7fe5c05c7430e055938ef6d24a66c9 github.com/containous/go-http-auth 65b0cdae8d7fe5c05c7430e055938ef6d24a66c9
-replace: github.com/mesosphere/mesos-dns b47dc4c19f215e98da687b15b4c64e70f629bea5 github.com/containous/mesos-dns b47dc4c19f215e98da687b15b4c64e70f629bea5
-replace: gopkg.in/fsnotify.v1 v1.4.2 github.com/fsnotify/fsnotify v1.4.2
diff --git a/src/cmd/go/internal/modconv/testdata/upspin.dep b/src/cmd/go/internal/modconv/testdata/upspin.dep
deleted file mode 100644
index be77bcb..0000000
--- a/src/cmd/go/internal/modconv/testdata/upspin.dep
+++ /dev/null
@@ -1,57 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  branch = "master"
-  name = "bazil.org/fuse"
-  packages = [".","fs","fuseutil"]
-  revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/NYTimes/gziphandler"
-  packages = ["."]
-  revision = "97ae7fbaf81620fe97840685304a78a306a39c64"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/golang/protobuf"
-  packages = ["proto"]
-  revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/russross/blackfriday"
-  packages = ["."]
-  revision = "6d1ef893fcb01b4f50cb6e57ed7df3e2e627b6b2"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/crypto"
-  packages = ["acme","acme/autocert","hkdf"]
-  revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/net"
-  packages = ["context"]
-  revision = "4b14673ba32bee7f5ac0f990a48f033919fd418b"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/text"
-  packages = ["cases","internal","internal/gen","internal/tag","internal/triegen","internal/ucd","language","runes","secure/bidirule","secure/precis","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
-  revision = "6eab0e8f74e86c598ec3b6fad4888e0c11482d48"
-
-[[projects]]
-  branch = "v2"
-  name = "gopkg.in/yaml.v2"
-  packages = ["."]
-  revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "2246e647ba1c78b0b9f948f9fb072fff1467284fb138709c063e99736f646b90"
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/src/cmd/go/internal/modconv/testdata/upspin.out b/src/cmd/go/internal/modconv/testdata/upspin.out
deleted file mode 100644
index 00597db..0000000
--- a/src/cmd/go/internal/modconv/testdata/upspin.out
+++ /dev/null
@@ -1,8 +0,0 @@
-bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
-github.com/NYTimes/gziphandler 97ae7fbaf81620fe97840685304a78a306a39c64
-github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
-github.com/russross/blackfriday 6d1ef893fcb01b4f50cb6e57ed7df3e2e627b6b2
-golang.org/x/crypto 13931e22f9e72ea58bb73048bc752b48c6d4d4ac
-golang.org/x/net 4b14673ba32bee7f5ac0f990a48f033919fd418b
-golang.org/x/text 6eab0e8f74e86c598ec3b6fad4888e0c11482d48
-gopkg.in/yaml.v2 eb3733d160e74a9c7e442f435eb3bea458e1d19f
diff --git a/src/cmd/go/internal/modconv/tsv.go b/src/cmd/go/internal/modconv/tsv.go
deleted file mode 100644
index 4649579..0000000
--- a/src/cmd/go/internal/modconv/tsv.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"strings"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseDependenciesTSV(file string, data []byte) (*modfile.File, error) {
-	mf := new(modfile.File)
-	for _, line := range strings.Split(string(data), "\n") {
-		f := strings.Split(line, "\t")
-		if len(f) >= 3 {
-			mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: f[0], Version: f[2]}})
-		}
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/vconf.go b/src/cmd/go/internal/modconv/vconf.go
deleted file mode 100644
index 9bad2ba..0000000
--- a/src/cmd/go/internal/modconv/vconf.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"strings"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseVendorConf(file string, data []byte) (*modfile.File, error) {
-	mf := new(modfile.File)
-	for _, line := range strings.Split(string(data), "\n") {
-		if i := strings.Index(line, "#"); i >= 0 {
-			line = line[:i]
-		}
-		f := strings.Fields(line)
-		if len(f) >= 2 {
-			mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: f[0], Version: f[1]}})
-		}
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/vjson.go b/src/cmd/go/internal/modconv/vjson.go
deleted file mode 100644
index 1bd025c..0000000
--- a/src/cmd/go/internal/modconv/vjson.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"encoding/json"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseVendorJSON(file string, data []byte) (*modfile.File, error) {
-	var cfg struct {
-		Package []struct {
-			Path     string
-			Revision string
-		}
-	}
-	if err := json.Unmarshal(data, &cfg); err != nil {
-		return nil, err
-	}
-	mf := new(modfile.File)
-	for _, d := range cfg.Package {
-		mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: d.Path, Version: d.Revision}})
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/vmanifest.go b/src/cmd/go/internal/modconv/vmanifest.go
deleted file mode 100644
index bcf0008..0000000
--- a/src/cmd/go/internal/modconv/vmanifest.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"encoding/json"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseVendorManifest(file string, data []byte) (*modfile.File, error) {
-	var cfg struct {
-		Dependencies []struct {
-			ImportPath string
-			Revision   string
-		}
-	}
-	if err := json.Unmarshal(data, &cfg); err != nil {
-		return nil, err
-	}
-	mf := new(modfile.File)
-	for _, d := range cfg.Dependencies {
-		mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: d.ImportPath, Version: d.Revision}})
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modconv/vyml.go b/src/cmd/go/internal/modconv/vyml.go
deleted file mode 100644
index cfa4194..0000000
--- a/src/cmd/go/internal/modconv/vyml.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modconv
-
-import (
-	"strings"
-
-	"golang.org/x/mod/modfile"
-	"golang.org/x/mod/module"
-)
-
-func ParseVendorYML(file string, data []byte) (*modfile.File, error) {
-	mf := new(modfile.File)
-	vendors := false
-	path := ""
-	for _, line := range strings.Split(string(data), "\n") {
-		if line == "" {
-			continue
-		}
-		if strings.HasPrefix(line, "vendors:") {
-			vendors = true
-		} else if line[0] != '-' && line[0] != ' ' && line[0] != '\t' {
-			vendors = false
-		}
-		if !vendors {
-			continue
-		}
-		if strings.HasPrefix(line, "- path:") {
-			path = strings.TrimSpace(line[len("- path:"):])
-		}
-		if strings.HasPrefix(line, "  rev:") {
-			rev := strings.TrimSpace(line[len("  rev:"):])
-			if path != "" && rev != "" {
-				mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: path, Version: rev}})
-			}
-		}
-	}
-	return mf, nil
-}
diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go
index ca57762..69a3c57 100644
--- a/src/cmd/go/internal/modfetch/codehost/codehost.go
+++ b/src/cmd/go/internal/modfetch/codehost/codehost.go
@@ -95,6 +95,8 @@
 	URL    string `json:",omitempty"` // URL of repository
 	Subdir string `json:",omitempty"` // subdirectory in repo
 
+	Hash string `json:",omitempty"` // commit hash or ID
+
 	// If TagSum is non-empty, then the resolution of this module version
 	// depends on the set of tags present in the repo, specifically the tags
 	// of the form TagPrefix + a valid semver version.
@@ -111,8 +113,7 @@
 	// and the Hash is the Git object hash the ref maps to.
 	// Other VCS might choose differently, but the idea is that Ref is the name
 	// with a mutable meaning while Hash is a name with an immutable meaning.
-	Ref  string `json:",omitempty"`
-	Hash string `json:",omitempty"`
+	Ref string `json:",omitempty"`
 
 	// If RepoSum is non-empty, then the resolution of this module version
 	// failed due to the repo being available but the version not being present.
@@ -121,21 +122,6 @@
 	RepoSum string `json:",omitempty"`
 }
 
-// Checkable reports whether the Origin contains anything that can be checked.
-// If not, the Origin is purely informational and should fail a CheckReuse call.
-func (o *Origin) Checkable() bool {
-	return o.TagSum != "" || o.Ref != "" || o.Hash != "" || o.RepoSum != ""
-}
-
-// ClearCheckable clears the Origin enough to make Checkable return false.
-func (o *Origin) ClearCheckable() {
-	o.TagSum = ""
-	o.TagPrefix = ""
-	o.Ref = ""
-	o.Hash = ""
-	o.RepoSum = ""
-}
-
 // A Tags describes the available tags in a code repository.
 type Tags struct {
 	Origin *Origin
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index d1a18a8..7d9e5d8 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -18,6 +18,7 @@
 	"os/exec"
 	"path/filepath"
 	"runtime"
+	"slices"
 	"sort"
 	"strconv"
 	"strings"
@@ -154,7 +155,7 @@
 	refsErr error
 
 	localTagsOnce sync.Once
-	localTags     map[string]bool
+	localTags     sync.Map // map[string]bool
 }
 
 const (
@@ -166,7 +167,6 @@
 
 // loadLocalTags loads tag references from the local git cache
 // into the map r.localTags.
-// Should only be called as r.localTagsOnce.Do(r.loadLocalTags).
 func (r *gitRepo) loadLocalTags(ctx context.Context) {
 	// The git protocol sends all known refs and ls-remote filters them on the client side,
 	// so we might as well record both heads and tags in one shot.
@@ -176,10 +176,9 @@
 		return
 	}
 
-	r.localTags = make(map[string]bool)
 	for _, line := range strings.Split(string(out), "\n") {
 		if line != "" {
-			r.localTags[line] = true
+			r.localTags.Store(line, true)
 		}
 	}
 }
@@ -430,7 +429,7 @@
 	// Maybe rev is a tag we already have locally.
 	// (Note that we're excluding branches, which can be stale.)
 	r.localTagsOnce.Do(func() { r.loadLocalTags(ctx) })
-	if r.localTags[rev] {
+	if _, ok := r.localTags.Load(rev); ok {
 		return r.statLocal(ctx, rev, "refs/tags/"+rev)
 	}
 
@@ -506,11 +505,18 @@
 	// Either way, try a local stat before falling back to network I/O.
 	if !didStatLocal {
 		if info, err := r.statLocal(ctx, rev, hash); err == nil {
-			if after, found := strings.CutPrefix(ref, "refs/tags/"); found {
-				// Make sure tag exists, so it will be in localTags next time the go command is run.
-				Run(ctx, r.dir, "git", "tag", after, hash)
+			tag, fromTag := strings.CutPrefix(ref, "refs/tags/")
+			if fromTag && !slices.Contains(info.Tags, tag) {
+				// The local repo includes the commit hash we want, but it is missing
+				// the corresponding tag. Add that tag and try again.
+				_, err := Run(ctx, r.dir, "git", "tag", tag, hash)
+				if err != nil {
+					return nil, err
+				}
+				r.localTags.Store(tag, true)
+				return r.statLocal(ctx, rev, ref)
 			}
-			return info, nil
+			return info, err
 		}
 	}
 
@@ -524,13 +530,7 @@
 	if r.fetchLevel <= fetchSome && ref != "" && hash != "" && !r.local {
 		r.fetchLevel = fetchSome
 		var refspec string
-		if ref != "" && ref != "HEAD" {
-			// If we do know the ref name, save the mapping locally
-			// so that (if it is a tag) it can show up in localTags
-			// on a future call. Also, some servers refuse to allow
-			// full hashes in ref specs, so prefer a ref name if known.
-			refspec = ref + ":" + ref
-		} else {
+		if ref == "HEAD" {
 			// Fetch the hash but give it a local name (refs/dummy),
 			// because that triggers the fetch behavior of creating any
 			// other known remote tags for the hash. We never use
@@ -538,13 +538,23 @@
 			// overwritten in the next command, and that's fine.
 			ref = hash
 			refspec = hash + ":refs/dummy"
+		} else {
+			// If we do know the ref name, save the mapping locally
+			// so that (if it is a tag) it can show up in localTags
+			// on a future call. Also, some servers refuse to allow
+			// full hashes in ref specs, so prefer a ref name if known.
+			refspec = ref + ":" + ref
 		}
 
 		release, err := base.AcquireNet()
 		if err != nil {
 			return nil, err
 		}
-		_, err = Run(ctx, r.dir, "git", "fetch", "-f", "--depth=1", r.remote, refspec)
+		// We explicitly set protocol.version=2 for this command to work around
+		// an apparent Git bug introduced in Git 2.21 (commit 61c771),
+		// which causes the handler for protocol version 1 to sometimes miss
+		// tags that point to the requested commit (see https://go.dev/issue/56881).
+		_, err = Run(ctx, r.dir, "git", "fetch", "-f", "-c", "protocol.version=2", "--depth=1", r.remote, refspec)
 		release()
 
 		if err == nil {
diff --git a/src/cmd/go/internal/modfetch/codehost/git_test.go b/src/cmd/go/internal/modfetch/codehost/git_test.go
index 328ab5b..dba9935 100644
--- a/src/cmd/go/internal/modfetch/codehost/git_test.go
+++ b/src/cmd/go/internal/modfetch/codehost/git_test.go
@@ -280,9 +280,6 @@
 				t.Fatal(err)
 			}
 			if !reflect.DeepEqual(info, tt.info) {
-				if !reflect.DeepEqual(info.Tags, tt.info.Tags) {
-					testenv.SkipFlaky(t, 56881)
-				}
 				t.Errorf("Latest: incorrect info\nhave %+v (origin %+v)\nwant %+v (origin %+v)", info, info.Origin, tt.info, tt.info.Origin)
 			}
 		}
@@ -661,9 +658,6 @@
 			}
 			info.Origin = nil // TestLatest and ../../../testdata/script/reuse_git.txt test Origin well enough
 			if !reflect.DeepEqual(info, tt.info) {
-				if !reflect.DeepEqual(info.Tags, tt.info.Tags) {
-					testenv.SkipFlaky(t, 56881)
-				}
 				t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info)
 			}
 		}
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index 8fe432a..75c34e9 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -322,6 +322,9 @@
 func (r *codeRepo) Latest(ctx context.Context) (*RevInfo, error) {
 	info, err := r.code.Latest(ctx)
 	if err != nil {
+		if info != nil {
+			return &RevInfo{Origin: info.Origin}, err
+		}
 		return nil, err
 	}
 	return r.convert(ctx, info, "")
@@ -332,7 +335,44 @@
 //
 // If statVers is a valid module version, it is used for the Version field.
 // Otherwise, the Version is derived from the passed-in info and recent tags.
-func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers string) (*RevInfo, error) {
+func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers string) (revInfo *RevInfo, err error) {
+	defer func() {
+		if info.Origin == nil {
+			return
+		}
+		if revInfo == nil {
+			revInfo = new(RevInfo)
+		} else if revInfo.Origin != nil {
+			panic("internal error: RevInfo Origin unexpectedly already populated")
+		}
+
+		origin := *info.Origin
+		revInfo.Origin = &origin
+		origin.Subdir = r.codeDir
+
+		v := revInfo.Version
+		if module.IsPseudoVersion(v) && (v != statVers || !strings.HasPrefix(v, "v0.0.0-")) {
+			// Add tags that are relevant to pseudo-version calculation to origin.
+			prefix := r.codeDir
+			if prefix != "" {
+				prefix += "/"
+			}
+			if r.pathMajor != "" { // "/v2" or "/.v2"
+				prefix += r.pathMajor[1:] + "." // += "v2."
+			}
+			tags, tagsErr := r.code.Tags(ctx, prefix)
+			if tagsErr != nil {
+				revInfo.Origin = nil
+				if err == nil {
+					err = tagsErr
+				}
+			} else {
+				origin.TagPrefix = tags.Origin.TagPrefix
+				origin.TagSum = tags.Origin.TagSum
+			}
+		}
+	}()
+
 	// If this is a plain tag (no dir/ prefix)
 	// and the module path is unversioned,
 	// and if the underlying file tree has no go.mod,
@@ -463,31 +503,7 @@
 			return nil, errIncompatible
 		}
 
-		origin := info.Origin
-		if origin != nil {
-			o := *origin
-			origin = &o
-			origin.Subdir = r.codeDir
-			if module.IsPseudoVersion(v) && (v != statVers || !strings.HasPrefix(v, "v0.0.0-")) {
-				// Add tags that are relevant to pseudo-version calculation to origin.
-				prefix := r.codeDir
-				if prefix != "" {
-					prefix += "/"
-				}
-				if r.pathMajor != "" { // "/v2" or "/.v2"
-					prefix += r.pathMajor[1:] + "." // += "v2."
-				}
-				tags, err := r.code.Tags(ctx, prefix)
-				if err != nil {
-					return nil, err
-				}
-				origin.TagPrefix = tags.Origin.TagPrefix
-				origin.TagSum = tags.Origin.TagSum
-			}
-		}
-
 		return &RevInfo{
-			Origin:  origin,
 			Name:    info.Name,
 			Short:   info.Short,
 			Time:    info.Time,
@@ -498,10 +514,20 @@
 	// Determine version.
 
 	if module.IsPseudoVersion(statVers) {
+		// Validate the go.mod location and major version before
+		// we check for an ancestor tagged with the pseude-version base.
+		//
+		// We can rule out an invalid subdirectory or major version with only
+		// shallow commit information, but checking the pseudo-version base may
+		// require downloading a (potentially more expensive) full history.
+		revInfo, err = checkCanonical(statVers)
+		if err != nil {
+			return revInfo, err
+		}
 		if err := r.validatePseudoVersion(ctx, info, statVers); err != nil {
 			return nil, err
 		}
-		return checkCanonical(statVers)
+		return revInfo, nil
 	}
 
 	// statVers is not a pseudo-version, so we need to either resolve it to a
diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go
index 4279686..eeab6da 100644
--- a/src/cmd/go/internal/modfetch/fetch.go
+++ b/src/cmd/go/internal/modfetch/fetch.go
@@ -504,7 +504,7 @@
 
 // readGoSum parses data, which is the content of file,
 // and adds it to goSum.m. The goSum lock must be held.
-func readGoSum(dst map[module.Version][]string, file string, data []byte) error {
+func readGoSum(dst map[module.Version][]string, file string, data []byte) {
 	lineno := 0
 	for len(data) > 0 {
 		var line []byte
@@ -521,7 +521,12 @@
 			continue
 		}
 		if len(f) != 3 {
-			return fmt.Errorf("malformed go.sum:\n%s:%d: wrong number of fields %v", file, lineno, len(f))
+			if cfg.CmdName == "mod tidy" {
+				// ignore malformed line so that go mod tidy can fix go.sum
+				continue
+			} else {
+				base.Fatalf("malformed go.sum:\n%s:%d: wrong number of fields %v\n", file, lineno, len(f))
+			}
 		}
 		if f[2] == emptyGoModHash {
 			// Old bug; drop it.
@@ -530,7 +535,6 @@
 		mod := module.Version{Path: f[0], Version: f[1]}
 		dst[mod] = append(dst[mod], f[2])
 	}
-	return nil
 }
 
 // HaveSum returns true if the go.sum file contains an entry for mod.
diff --git a/src/cmd/go/internal/modfetch/proxy.go b/src/cmd/go/internal/modfetch/proxy.go
index dd37ba9..e0efb09 100644
--- a/src/cmd/go/internal/modfetch/proxy.go
+++ b/src/cmd/go/internal/modfetch/proxy.go
@@ -185,9 +185,9 @@
 }
 
 type proxyRepo struct {
-	url         *url.URL
-	path        string
-	redactedURL string
+	url          *url.URL // The combined module proxy URL joined with the module path.
+	path         string   // The module path (unescaped).
+	redactedBase string   // The base module proxy URL in [url.URL.Redacted] form.
 
 	listLatestOnce sync.Once
 	listLatest     *RevInfo
@@ -195,31 +195,35 @@
 }
 
 func newProxyRepo(baseURL, path string) (Repo, error) {
+	// Parse the base proxy URL.
 	base, err := url.Parse(baseURL)
 	if err != nil {
 		return nil, err
 	}
+	redactedBase := base.Redacted()
 	switch base.Scheme {
 	case "http", "https":
 		// ok
 	case "file":
 		if *base != (url.URL{Scheme: base.Scheme, Path: base.Path, RawPath: base.RawPath}) {
-			return nil, fmt.Errorf("invalid file:// proxy URL with non-path elements: %s", base.Redacted())
+			return nil, fmt.Errorf("invalid file:// proxy URL with non-path elements: %s", redactedBase)
 		}
 	case "":
-		return nil, fmt.Errorf("invalid proxy URL missing scheme: %s", base.Redacted())
+		return nil, fmt.Errorf("invalid proxy URL missing scheme: %s", redactedBase)
 	default:
-		return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", base.Redacted())
+		return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", redactedBase)
 	}
 
+	// Append the module path to the URL.
+	url := base
 	enc, err := module.EscapePath(path)
 	if err != nil {
 		return nil, err
 	}
-	redactedURL := base.Redacted()
-	base.Path = strings.TrimSuffix(base.Path, "/") + "/" + enc
-	base.RawPath = strings.TrimSuffix(base.RawPath, "/") + "/" + pathEscape(enc)
-	return &proxyRepo{base, path, redactedURL, sync.Once{}, nil, nil}, nil
+	url.Path = strings.TrimSuffix(base.Path, "/") + "/" + enc
+	url.RawPath = strings.TrimSuffix(base.RawPath, "/") + "/" + pathEscape(enc)
+
+	return &proxyRepo{url, path, redactedBase, sync.Once{}, nil, nil}, nil
 }
 
 func (p *proxyRepo) ModulePath() string {
@@ -253,7 +257,7 @@
 }
 
 func (p *proxyRepo) getBytes(ctx context.Context, path string) ([]byte, error) {
-	body, err := p.getBody(ctx, path)
+	body, redactedURL, err := p.getBody(ctx, path)
 	if err != nil {
 		return nil, err
 	}
@@ -261,14 +265,14 @@
 
 	b, err := io.ReadAll(body)
 	if err != nil {
-		// net/http doesn't add context to Body errors, so add it here.
+		// net/http doesn't add context to Body read errors, so add it here.
 		// (See https://go.dev/issue/52727.)
-		return b, &url.Error{Op: "read", URL: strings.TrimSuffix(p.redactedURL, "/") + "/" + path, Err: err}
+		return b, &url.Error{Op: "read", URL: redactedURL, Err: err}
 	}
 	return b, nil
 }
 
-func (p *proxyRepo) getBody(ctx context.Context, path string) (r io.ReadCloser, err error) {
+func (p *proxyRepo) getBody(ctx context.Context, path string) (r io.ReadCloser, redactedURL string, err error) {
 	fullPath := pathpkg.Join(p.url.Path, path)
 
 	target := *p.url
@@ -277,13 +281,13 @@
 
 	resp, err := web.Get(web.DefaultSecurity, &target)
 	if err != nil {
-		return nil, err
+		return nil, "", err
 	}
 	if err := resp.Err(); err != nil {
 		resp.Body.Close()
-		return nil, err
+		return nil, "", err
 	}
-	return resp.Body, nil
+	return resp.Body, resp.URL, nil
 }
 
 func (p *proxyRepo) Versions(ctx context.Context, prefix string) (*Versions, error) {
@@ -370,7 +374,7 @@
 	}
 	info := new(RevInfo)
 	if err := json.Unmarshal(data, info); err != nil {
-		return nil, p.versionError(rev, fmt.Errorf("invalid response from proxy %q: %w", p.redactedURL, err))
+		return nil, p.versionError(rev, fmt.Errorf("invalid response from proxy %q: %w", p.redactedBase, err))
 	}
 	if info.Version != rev && rev == module.CanonicalVersion(rev) && module.Check(p.path, rev) == nil {
 		// If we request a correct, appropriate version for the module path, the
@@ -391,7 +395,7 @@
 	}
 	info := new(RevInfo)
 	if err := json.Unmarshal(data, info); err != nil {
-		return nil, p.versionError("", fmt.Errorf("invalid response from proxy %q: %w", p.redactedURL, err))
+		return nil, p.versionError("", fmt.Errorf("invalid response from proxy %q: %w", p.redactedBase, err))
 	}
 	return info, nil
 }
@@ -422,7 +426,7 @@
 		return p.versionError(version, err)
 	}
 	path := "@v/" + encVer + ".zip"
-	body, err := p.getBody(ctx, path)
+	body, redactedURL, err := p.getBody(ctx, path)
 	if err != nil {
 		return p.versionError(version, err)
 	}
@@ -430,9 +434,9 @@
 
 	lr := &io.LimitedReader{R: body, N: codehost.MaxZipFile + 1}
 	if _, err := io.Copy(dst, lr); err != nil {
-		// net/http doesn't add context to Body errors, so add it here.
+		// net/http doesn't add context to Body read errors, so add it here.
 		// (See https://go.dev/issue/52727.)
-		err = &url.Error{Op: "read", URL: pathpkg.Join(p.redactedURL, path), Err: err}
+		err = &url.Error{Op: "read", URL: redactedURL, Err: err}
 		return p.versionError(version, err)
 	}
 	if lr.N <= 0 {
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 37befa3..d9f0c6a 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -128,23 +128,6 @@
 	`,
 }
 
-// Note that this help text is a stopgap to make the module-aware get help text
-// available even in non-module settings. It should be deleted when the old get
-// is deleted. It should NOT be considered to set a precedent of having hierarchical
-// help names with dashes.
-var HelpModuleGet = &base.Command{
-	UsageLine: "module-get",
-	Short:     "module-aware go get",
-	Long: `
-The 'go get' command changes behavior depending on whether the
-go command is running in module-aware mode or legacy GOPATH mode.
-This help text, accessible as 'go help module-get' even in legacy GOPATH mode,
-describes 'go get' as it operates in module-aware mode.
-
-Usage: ` + CmdGet.UsageLine + `
-` + CmdGet.Long,
-}
-
 var HelpVCS = &base.Command{
 	UsageLine: "vcs",
 	Short:     "controlling version control with GOVCS",
@@ -344,7 +327,7 @@
 			// The result of any version query for a given module — even "upgrade" or
 			// "patch" — is always relative to the build list at the start of
 			// the 'go get' command, not an intermediate state, and is therefore
-			// deterministic and therefore cachable, and the constraints on the
+			// deterministic and therefore cacheable, and the constraints on the
 			// selected version of each module can only narrow as we iterate.
 			//
 			// "all" is functionally very similar to a wildcard pattern. The set of
diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go
index b78c1c4..498ba6c 100644
--- a/src/cmd/go/internal/modget/query.go
+++ b/src/cmd/go/internal/modget/query.go
@@ -55,7 +55,7 @@
 	// path.
 	matchWildcard func(path string) bool
 
-	// canMatchWildcard, if non-nil, reports whether the module with the given
+	// canMatchWildcardInModule, if non-nil, reports whether the module with the given
 	// path could lexically contain a package matching pattern, which must be a
 	// wildcard.
 	canMatchWildcardInModule func(mPath string) bool
diff --git a/src/cmd/go/internal/modindex/build.go b/src/cmd/go/internal/modindex/build.go
index b57f2f6..0b06373 100644
--- a/src/cmd/go/internal/modindex/build.go
+++ b/src/cmd/go/internal/modindex/build.go
@@ -622,6 +622,11 @@
 			continue
 		}
 
+		// #cgo (nocallback|noescape) <function name>
+		if fields := strings.Fields(line); len(fields) == 3 && (fields[1] == "nocallback" || fields[1] == "noescape") {
+			continue
+		}
+
 		// Split at colon.
 		line, argstr, ok := strings.Cut(strings.TrimSpace(line[4:]), ":")
 		if !ok {
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index bb513ea..5cf1487 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -161,53 +161,63 @@
 	}
 }
 
-// mergeOrigin merges two origins,
-// returning and possibly modifying one of its arguments.
-// If the two origins conflict, mergeOrigin returns a non-specific one
-// that will not pass CheckReuse.
-// If m1 or m2 is nil, the other is returned unmodified.
-// But if m1 or m2 is non-nil and uncheckable, the result is also uncheckable,
-// to preserve uncheckability.
+// mergeOrigin returns the union of data from two origins,
+// returning either a new origin or one of its unmodified arguments.
+// If the two origins conflict including if either is nil,
+// mergeOrigin returns nil.
 func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin {
-	if m1 == nil {
-		return m2
-	}
-	if m2 == nil {
-		return m1
-	}
-	if !m1.Checkable() {
-		return m1
-	}
-	if !m2.Checkable() {
-		return m2
+	if m1 == nil || m2 == nil {
+		return nil
 	}
 
-	merged := new(codehost.Origin)
-	*merged = *m1 // Clone to avoid overwriting fields in cached results.
+	if m2.VCS != m1.VCS ||
+		m2.URL != m1.URL ||
+		m2.Subdir != m1.Subdir {
+		return nil
+	}
 
+	merged := *m1
+	if m2.Hash != "" {
+		if m1.Hash != "" && m1.Hash != m2.Hash {
+			return nil
+		}
+		merged.Hash = m2.Hash
+	}
 	if m2.TagSum != "" {
 		if m1.TagSum != "" && (m1.TagSum != m2.TagSum || m1.TagPrefix != m2.TagPrefix) {
-			merged.ClearCheckable()
-			return merged
+			return nil
 		}
 		merged.TagSum = m2.TagSum
 		merged.TagPrefix = m2.TagPrefix
 	}
-	if m2.Hash != "" {
-		if m1.Hash != "" && (m1.Hash != m2.Hash || m1.Ref != m2.Ref) {
-			merged.ClearCheckable()
-			return merged
+	if m2.Ref != "" {
+		if m1.Ref != "" && m1.Ref != m2.Ref {
+			return nil
 		}
-		merged.Hash = m2.Hash
 		merged.Ref = m2.Ref
 	}
-	return merged
+
+	switch {
+	case merged == *m1:
+		return m1
+	case merged == *m2:
+		return m2
+	default:
+		// Clone the result to avoid an alloc for merged
+		// if the result is equal to one of the arguments.
+		clone := merged
+		return &clone
+	}
 }
 
 // addVersions fills in m.Versions with the list of known versions.
 // Excluded versions will be omitted. If listRetracted is false, retracted
 // versions will also be omitted.
 func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) {
+	// TODO(bcmills): Would it make sense to check for reuse here too?
+	// Perhaps that doesn't buy us much, though: we would always have to fetch
+	// all of the version tags to list the available versions anyway.
+
 	allowed := CheckAllowed
 	if listRetracted {
 		allowed = CheckExclusions
@@ -313,21 +323,23 @@
 			return
 		}
 
-		if old := reuse[module.Version{Path: m.Path, Version: m.Version}]; old != nil {
-			if err := checkReuse(ctx, m.Path, old.Origin); err == nil {
-				*m = *old
-				m.Query = ""
-				m.Dir = ""
-				return
-			}
-		}
-
 		checksumOk := func(suffix string) bool {
 			return rs == nil || m.Version == "" || !mustHaveSums() ||
 				modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix})
 		}
 
+		mod := module.Version{Path: m.Path, Version: m.Version}
+
 		if m.Version != "" {
+			if old := reuse[mod]; old != nil {
+				if err := checkReuse(ctx, mod, old.Origin); err == nil {
+					*m = *old
+					m.Query = ""
+					m.Dir = ""
+					return
+				}
+			}
+
 			if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil {
 				m.Error = &modinfo.ModuleError{Err: err.Error()}
 			} else {
@@ -335,7 +347,6 @@
 				m.Time = &q.Time
 			}
 		}
-		mod := module.Version{Path: m.Path, Version: m.Version}
 
 		if m.GoVersion == "" && checksumOk("/go.mod") {
 			// Load the go.mod file to determine the Go version, since it hasn't
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index 8d3af08..d72a24f 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -174,17 +174,20 @@
 // requirements.
 func (rs *Requirements) initVendor(vendorList []module.Version) {
 	rs.graphOnce.Do(func() {
+		roots := MainModules.Versions()
+		if inWorkspaceMode() {
+			// Use rs.rootModules to pull in the go and toolchain roots
+			// from the go.work file and preserve the invariant that all
+			// of rs.rootModules are in mg.g.
+			roots = rs.rootModules
+		}
 		mg := &ModuleGraph{
-			g: mvs.NewGraph(cmpVersion, MainModules.Versions()),
+			g: mvs.NewGraph(cmpVersion, roots),
 		}
 
-		if MainModules.Len() != 1 {
-			panic("There should be exactly one main module in Vendor mode.")
-		}
-		mainModule := MainModules.Versions()[0]
-
 		if rs.pruning == pruned {
-			// The roots of a pruned module should already include every module in the
+			mainModule := MainModules.mustGetSingleMainModule()
+			// The roots of a single pruned module should already include every module in the
 			// vendor list, because the vendored modules are the same as those needed
 			// for graph pruning.
 			//
@@ -215,8 +218,18 @@
 			// graph, but still distinguishes between direct and indirect
 			// dependencies.
 			vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""}
-			mg.g.Require(mainModule, append(rs.rootModules, vendorMod))
-			mg.g.Require(vendorMod, vendorList)
+			if inWorkspaceMode() {
+				for _, m := range MainModules.Versions() {
+					reqs, _ := rootsFromModFile(m, MainModules.ModFile(m), omitToolchainRoot)
+					mg.g.Require(m, append(reqs, vendorMod))
+				}
+				mg.g.Require(vendorMod, vendorList)
+
+			} else {
+				mainModule := MainModules.mustGetSingleMainModule()
+				mg.g.Require(mainModule, append(rs.rootModules, vendorMod))
+				mg.g.Require(vendorMod, vendorList)
+			}
 		}
 
 		rs.graph.Store(&cachedGraph{mg, nil})
@@ -1075,7 +1088,7 @@
 			// relevant dependencies, and we explicitly don't want to pull in
 			// requirements on *irrelevant* requirements that happen to occur in the
 			// go.mod files for these transitive-test-only dependencies. (See the test
-			// in mod_lazy_test_horizon.txt for a concrete example.
+			// in mod_lazy_test_horizon.txt for a concrete example).
 			//
 			// The “goldilocks zone” seems to be to spot-check exactly the same
 			// modules that we promote to explicit roots: namely, those that provide
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index 83b9ad4..7cd5fcf 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -318,30 +318,41 @@
 		mods = append(mods, module.Version{})
 	}
 	// -mod=vendor is special.
-	// Everything must be in the main module or the main module's vendor directory.
+	// Everything must be in the main modules or the main module's or workspace's vendor directory.
 	if cfg.BuildMod == "vendor" {
-		mainModule := MainModules.mustGetSingleMainModule()
-		modRoot := MainModules.ModRoot(mainModule)
 		var mainErr error
-		if modRoot != "" {
-			mainDir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true)
-			mainErr = err
-			if mainOK {
-				mods = append(mods, mainModule)
-				dirs = append(dirs, mainDir)
-				roots = append(roots, modRoot)
+		for _, mainModule := range MainModules.Versions() {
+			modRoot := MainModules.ModRoot(mainModule)
+			if modRoot != "" {
+				dir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true)
+				if mainErr == nil {
+					mainErr = err
+				}
+				if mainOK {
+					mods = append(mods, mainModule)
+					dirs = append(dirs, dir)
+					roots = append(roots, modRoot)
+				}
 			}
-			vendorDir, vendorOK, _ := dirInModule(path, "", filepath.Join(modRoot, "vendor"), false)
+		}
+
+		if HasModRoot() {
+			vendorDir := VendorDir()
+			dir, vendorOK, _ := dirInModule(path, "", vendorDir, false)
 			if vendorOK {
-				readVendorList(mainModule)
+				readVendorList(vendorDir)
+				// TODO(#60922): It's possible for a package to manually have been added to the
+				// vendor directory, causing the dirInModule to succeed, but no vendorPkgModule
+				// to exist, causing an empty module path to be reported. Do better checking
+				// here.
 				mods = append(mods, vendorPkgModule[path])
-				dirs = append(dirs, vendorDir)
-				roots = append(roots, modRoot)
+				dirs = append(dirs, dir)
+				roots = append(roots, vendorDir)
 			}
 		}
 
 		if len(dirs) > 1 {
-			return module.Version{}, modRoot, "", nil, &AmbiguousImportError{importPath: path, Dirs: dirs}
+			return module.Version{}, "", "", nil, &AmbiguousImportError{importPath: path, Dirs: dirs}
 		}
 
 		if mainErr != nil {
@@ -349,7 +360,7 @@
 		}
 
 		if len(dirs) == 0 {
-			return module.Version{}, modRoot, "", nil, &ImportMissingError{Path: path}
+			return module.Version{}, "", "", nil, &ImportMissingError{Path: path}
 		}
 
 		return mods[0], roots[0], dirs[0], nil, nil
@@ -695,7 +706,7 @@
 	// Now committed to returning dir (not "").
 
 	// Are there Go source files in the directory?
-	// We don't care about build tags, not even "+build ignore".
+	// We don't care about build tags, not even "go:build ignore".
 	// We're just looking for a plausible directory.
 	haveGoFiles, err = haveGoFilesCache.Do(dir, func() (bool, error) {
 		// modindex.GetPackage will return ErrNotIndexed for any directories which
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index 1c6f7d9..f4f4a68 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -11,6 +11,7 @@
 	"errors"
 	"fmt"
 	"internal/lazyregexp"
+	"io"
 	"os"
 	"path"
 	"path/filepath"
@@ -24,7 +25,6 @@
 	"cmd/go/internal/fsys"
 	"cmd/go/internal/gover"
 	"cmd/go/internal/lockedfile"
-	"cmd/go/internal/modconv"
 	"cmd/go/internal/modfetch"
 	"cmd/go/internal/search"
 
@@ -200,6 +200,10 @@
 	return mms.modFiles[m]
 }
 
+func (mms *MainModuleSet) WorkFile() *modfile.WorkFile {
+	return mms.workFile
+}
+
 func (mms *MainModuleSet) Len() int {
 	if mms == nil {
 		return 0
@@ -410,7 +414,7 @@
 	// Disable any ssh connection pooling by Git.
 	// If a Git subprocess forks a child into the background to cache a new connection,
 	// that child keeps stdout/stderr open. After the Git subprocess exits,
-	// os /exec expects to be able to read from the stdout/stderr pipe
+	// os/exec expects to be able to read from the stdout/stderr pipe
 	// until EOF to get all the data that the Git subprocess wrote before exiting.
 	// The EOF doesn't come until the child exits too, because the child
 	// is holding the write end of the pipe.
@@ -481,7 +485,13 @@
 	if len(list) > 0 && list[0] != "" {
 		gopath = list[0]
 		if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil {
-			base.Fatalf("$GOPATH/go.mod exists but should not")
+			fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath)
+			if RootMode == NeedRoot {
+				base.Fatal(ErrNoModRoot)
+			}
+			if !mustUseModules {
+				return
+			}
 		}
 	}
 }
@@ -553,7 +563,17 @@
 }
 
 func VendorDir() string {
-	return filepath.Join(MainModules.ModRoot(MainModules.mustGetSingleMainModule()), "vendor")
+	if inWorkspaceMode() {
+		return filepath.Join(filepath.Dir(WorkFilePath()), "vendor")
+	}
+	// Even if -mod=vendor, we could be operating with no mod root (and thus no
+	// vendor directory). As long as there are no dependencies that is expected
+	// to work. See script/vendor_outside_module.txt.
+	modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule())
+	if modRoot == "" {
+		panic("vendor directory does not exist when in single module mode outside of a module")
+	}
+	return filepath.Join(modRoot, "vendor")
 }
 
 func inWorkspaceMode() bool {
@@ -914,23 +934,28 @@
 	setDefaultBuildMod() // possibly enable automatic vendoring
 	rs := requirementsFromModFiles(ctx, workFile, modFiles, opts)
 
+	if cfg.BuildMod == "vendor" {
+		readVendorList(VendorDir())
+		var indexes []*modFileIndex
+		var modFiles []*modfile.File
+		var modRoots []string
+		for _, m := range MainModules.Versions() {
+			indexes = append(indexes, MainModules.Index(m))
+			modFiles = append(modFiles, MainModules.ModFile(m))
+			modRoots = append(modRoots, MainModules.ModRoot(m))
+		}
+		checkVendorConsistency(indexes, modFiles, modRoots)
+		rs.initVendor(vendorList)
+	}
+
 	if inWorkspaceMode() {
-		// We don't need to do anything for vendor or update the mod file so
-		// return early.
+		// We don't need to update the mod file so return early.
 		requirements = rs
 		return rs, nil
 	}
 
 	mainModule := MainModules.mustGetSingleMainModule()
 
-	if cfg.BuildMod == "vendor" {
-		readVendorList(mainModule)
-		index := MainModules.Index(mainModule)
-		modFile := MainModules.ModFile(mainModule)
-		checkVendorConsistency(index, modFile)
-		rs.initVendor(vendorList)
-	}
-
 	if rs.hasRedundantRoot() {
 		// If any module path appears more than once in the roots, we know that the
 		// go.mod file needs to be updated even though we have not yet loaded any
@@ -1029,16 +1054,8 @@
 	MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil)
 	addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements.
 
-	convertedFrom, err := convertLegacyConfig(modFile, modRoot)
-	if convertedFrom != "" {
-		fmt.Fprintf(os.Stderr, "go: copying requirements from %s\n", base.ShortPath(convertedFrom))
-	}
-	if err != nil {
-		base.Fatal(err)
-	}
-
 	rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil)
-	rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false)
+	rs, err := updateRoots(ctx, rs.direct, rs, nil, nil, false)
 	if err != nil {
 		base.Fatal(err)
 	}
@@ -1243,44 +1260,69 @@
 	var roots []module.Version
 	direct := map[string]bool{}
 	var pruning modPruning
-	var goVersion, toolchain string
 	if inWorkspaceMode() {
 		pruning = workspace
 		roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions()))
 		copy(roots, MainModules.Versions())
-		goVersion = gover.FromGoWork(workFile)
+		goVersion := gover.FromGoWork(workFile)
+		var toolchain string
 		if workFile.Toolchain != nil {
 			toolchain = workFile.Toolchain.Name
 		}
+		roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct)
 	} else {
 		pruning = pruningForGoVersion(MainModules.GoVersion())
 		if len(modFiles) != 1 {
 			panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles)))
 		}
 		modFile := modFiles[0]
-		roots = make([]module.Version, 0, 2+len(modFile.Require))
-		mm := MainModules.mustGetSingleMainModule()
-		for _, r := range modFile.Require {
-			if index := MainModules.Index(mm); index != nil && index.exclude[r.Mod] {
-				if cfg.BuildMod == "mod" {
-					fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
-				} else {
-					fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
-				}
-				continue
-			}
-
-			roots = append(roots, r.Mod)
-			if !r.Indirect {
-				direct[r.Mod.Path] = true
-			}
-		}
-		goVersion = gover.FromGoMod(modFile)
-		if modFile.Toolchain != nil {
-			toolchain = modFile.Toolchain.Name
-		}
+		roots, direct = rootsFromModFile(MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot)
 	}
 
+	gover.ModSort(roots)
+	rs := newRequirements(pruning, roots, direct)
+	return rs
+}
+
+type addToolchainRoot bool
+
+const (
+	omitToolchainRoot addToolchainRoot = false
+	withToolchainRoot                  = true
+)
+
+func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) {
+	direct = make(map[string]bool)
+	padding := 2 // Add padding for the toolchain and go version, added upon return.
+	if !addToolchainRoot {
+		padding = 1
+	}
+	roots = make([]module.Version, 0, padding+len(modFile.Require))
+	for _, r := range modFile.Require {
+		if index := MainModules.Index(m); index != nil && index.exclude[r.Mod] {
+			if cfg.BuildMod == "mod" {
+				fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
+			} else {
+				fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
+			}
+			continue
+		}
+
+		roots = append(roots, r.Mod)
+		if !r.Indirect {
+			direct[r.Mod.Path] = true
+		}
+	}
+	goVersion := gover.FromGoMod(modFile)
+	var toolchain string
+	if addToolchainRoot && modFile.Toolchain != nil {
+		toolchain = modFile.Toolchain.Name
+	}
+	roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct)
+	return roots, direct
+}
+
+func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain string, direct map[string]bool) []module.Version {
 	// Add explicit go and toolchain versions, inferring as needed.
 	roots = append(roots, module.Version{Path: "go", Version: goVersion})
 	direct["go"] = true // Every module directly uses the language and runtime.
@@ -1293,19 +1335,16 @@
 		// automatically if the 'go' version is changed so that it implies the exact
 		// same toolchain.
 	}
-
-	gover.ModSort(roots)
-	rs := newRequirements(pruning, roots, direct)
-	return rs
+	return roots
 }
 
 // setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag
 // wasn't provided. setDefaultBuildMod may be called multiple times.
 func setDefaultBuildMod() {
 	if cfg.BuildModExplicit {
-		if inWorkspaceMode() && cfg.BuildMod != "readonly" {
-			base.Fatalf("go: -mod may only be set to readonly when in workspace mode, but it is set to %q"+
-				"\n\tRemove the -mod flag to use the default readonly value,"+
+		if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" {
+			base.Fatalf("go: -mod may only be set to readonly or vendor when in workspace mode, but it is set to %q"+
+				"\n\tRemove the -mod flag to use the default readonly value, "+
 				"\n\tor set GOWORK=off to disable workspace mode.", cfg.BuildMod)
 		}
 		// Don't override an explicit '-mod=' argument.
@@ -1327,7 +1366,7 @@
 		// to work in buggy situations.
 		cfg.BuildMod = "mod"
 		return
-	case "mod vendor":
+	case "mod vendor", "work vendor":
 		cfg.BuildMod = "readonly"
 		return
 	}
@@ -1340,63 +1379,98 @@
 		return
 	}
 
-	if len(modRoots) == 1 && !inWorkspaceMode() {
-		index := MainModules.GetSingleIndexOrNil()
-		if fi, err := fsys.Stat(filepath.Join(modRoots[0], "vendor")); err == nil && fi.IsDir() {
+	if len(modRoots) >= 1 {
+		var goVersion string
+		var versionSource string
+		if inWorkspaceMode() {
+			versionSource = "go.work"
+			if wfg := MainModules.WorkFile().Go; wfg != nil {
+				goVersion = wfg.Version
+			}
+		} else {
+			versionSource = "go.mod"
+			index := MainModules.GetSingleIndexOrNil()
+			if index != nil {
+				goVersion = index.goVersion
+			}
+		}
+		vendorDir := ""
+		if workFilePath != "" {
+			vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor")
+		} else {
+			if len(modRoots) != 1 {
+				panic(fmt.Errorf("outside workspace mode, but have %v modRoots", modRoots))
+			}
+			vendorDir = filepath.Join(modRoots[0], "vendor")
+		}
+		if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() {
 			modGo := "unspecified"
-			if index != nil && index.goVersion != "" {
-				if gover.Compare(index.goVersion, "1.14") >= 0 {
-					// The Go version is at least 1.14, and a vendor directory exists.
-					// Set -mod=vendor by default.
-					cfg.BuildMod = "vendor"
-					cfg.BuildModReason = "Go version in go.mod is at least 1.14 and vendor directory exists."
-					return
+			if goVersion != "" {
+				if gover.Compare(goVersion, "1.14") < 0 {
+					// The go version is less than 1.14. Don't set -mod=vendor by default.
+					// Since a vendor directory exists, we should record why we didn't use it.
+					// This message won't normally be shown, but it may appear with import errors.
+					cfg.BuildModReason = fmt.Sprintf("Go version in "+versionSource+" is %s, so vendor directory was not used.", modGo)
 				} else {
-					modGo = index.goVersion
+					vendoredWorkspace, err := modulesTextIsForWorkspace(vendorDir)
+					if err != nil {
+						base.Fatalf("go: reading modules.txt for vendor directory: %v", err)
+					}
+					if vendoredWorkspace != (versionSource == "go.work") {
+						if vendoredWorkspace {
+							cfg.BuildModReason = "Outside workspace mode, but vendor directory is for a workspace."
+						} else {
+							cfg.BuildModReason = "In workspace mode, but vendor directory is not for a workspace"
+						}
+					} else {
+						// The Go version is at least 1.14, a vendor directory exists, and
+						// the modules.txt was generated in the same mode the command is running in.
+						// Set -mod=vendor by default.
+						cfg.BuildMod = "vendor"
+						cfg.BuildModReason = "Go version in " + versionSource + " is at least 1.14 and vendor directory exists."
+						return
+					}
 				}
+				modGo = goVersion
 			}
 
-			// Since a vendor directory exists, we should record why we didn't use it.
-			// This message won't normally be shown, but it may appear with import errors.
-			cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s, so vendor directory was not used.", modGo)
 		}
 	}
 
 	cfg.BuildMod = "readonly"
 }
 
-func mustHaveCompleteRequirements() bool {
-	return cfg.BuildMod != "mod" && !inWorkspaceMode()
+func modulesTextIsForWorkspace(vendorDir string) (bool, error) {
+	f, err := fsys.Open(filepath.Join(vendorDir, "modules.txt"))
+	if errors.Is(err, os.ErrNotExist) {
+		// Some vendor directories exist that don't contain modules.txt.
+		// This mostly happens when converting to modules.
+		// We want to preserve the behavior that mod=vendor is set (even though
+		// readVendorList does nothing in that case).
+		return false, nil
+	}
+	if err != nil {
+		return false, err
+	}
+	var buf [512]byte
+	n, err := f.Read(buf[:])
+	if err != nil && err != io.EOF {
+		return false, err
+	}
+	line, _, _ := strings.Cut(string(buf[:n]), "\n")
+	if annotations, ok := strings.CutPrefix(line, "## "); ok {
+		for _, entry := range strings.Split(annotations, ";") {
+			entry = strings.TrimSpace(entry)
+			if entry == "workspace" {
+				return true, nil
+			}
+		}
+	}
+	return false, nil
 }
 
-// convertLegacyConfig imports module requirements from a legacy vendoring
-// configuration file, if one is present.
-func convertLegacyConfig(modFile *modfile.File, modRoot string) (from string, err error) {
-	noneSelected := func(path string) (version string) { return "none" }
-	queryPackage := func(path, rev string) (module.Version, error) {
-		pkgMods, modOnly, err := QueryPattern(context.Background(), path, rev, noneSelected, nil)
-		if err != nil {
-			return module.Version{}, err
-		}
-		if len(pkgMods) > 0 {
-			return pkgMods[0].Mod, nil
-		}
-		return modOnly.Mod, nil
-	}
-	for _, name := range altConfigs {
-		cfg := filepath.Join(modRoot, name)
-		data, err := os.ReadFile(cfg)
-		if err == nil {
-			convert := modconv.Converters[name]
-			if convert == nil {
-				return "", nil
-			}
-			cfg = filepath.ToSlash(cfg)
-			err := modconv.ConvertLegacyConfig(modFile, cfg, data, queryPackage)
-			return name, err
-		}
-	}
-	return "", nil
+func mustHaveCompleteRequirements() bool {
+	return cfg.BuildMod != "mod" && !inWorkspaceMode()
 }
 
 // addGoStmt adds a go directive to the go.mod file if it does not already
@@ -1417,17 +1491,6 @@
 }
 
 var altConfigs = []string{
-	"Gopkg.lock",
-
-	"GLOCKFILE",
-	"Godeps/Godeps.json",
-	"dependencies.tsv",
-	"glide.lock",
-	"vendor.conf",
-	"vendor.yml",
-	"vendor/manifest",
-	"vendor/vendor.json",
-
 	".git/config",
 }
 
diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go
index e8872ba..ef93c25 100644
--- a/src/cmd/go/internal/modload/list.go
+++ b/src/cmd/go/internal/modload/list.go
@@ -57,8 +57,7 @@
 				}
 				return nil, fmt.Errorf("parsing %s: %v", reuseFile, err)
 			}
-			if m.Origin == nil || !m.Origin.Checkable() {
-				// Nothing to check to validate reuse.
+			if m.Origin == nil {
 				continue
 			}
 			m.Reuse = true
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index a993fe8..51eb141 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -571,7 +571,7 @@
 					return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir)
 				}
 
-				readVendorList(mainModule)
+				readVendorList(VendorDir())
 				if _, ok := vendorPkgModule[pkg]; !ok {
 					return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir)
 				}
@@ -777,7 +777,7 @@
 				longestPrefixVersion = v
 				suffix := filepath.ToSlash(str.TrimFilePathPrefix(dir, modRoot))
 				if strings.HasPrefix(suffix, "vendor/") {
-					longestPrefixPath = strings.TrimPrefix(suffix, "vendor/")
+					longestPrefixPath = suffix[len("vendor/"):]
 					continue
 				}
 				longestPrefixPath = pathpkg.Join(mms.PathPrefix(v), suffix)
@@ -1354,6 +1354,15 @@
 				// In workspace mode / workspace pruning mode, the roots are the main modules
 				// rather than the main module's direct dependencies. The check below on the selected
 				// roots does not apply.
+				if cfg.BuildMod == "vendor" {
+					// In workspace vendor mode, we don't need to load the requirements of the workspace
+					// modules' dependencies so the check below doesn't work. But that's okay, because
+					// checking whether modules are required directly for the purposes of pruning is
+					// less important in vendor mode: if we were able to load the package, we have
+					// everything we need  to build the package, and dependencies' tests are pruned out
+					// of the vendor directory anyway.
+					continue
+				}
 				if mg, err := rs.Graph(ctx); err != nil {
 					return false, err
 				} else if _, ok := mg.RequiredBy(dep.mod); !ok {
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
index d6c395f..899f1b3 100644
--- a/src/cmd/go/internal/modload/modfile.go
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -318,15 +318,22 @@
 // module.Version is relative it's relative to the single main module outside
 // workspace mode, or the workspace's directory in workspace mode.
 func Replacement(mod module.Version) module.Version {
+	r, foundModRoot, _ := replacementFrom(mod)
+	return canonicalizeReplacePath(r, foundModRoot)
+}
+
+// replacementFrom returns the replacement for mod, if any, the modroot of the replacement if it appeared in a go.mod,
+// and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in.
+func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) {
 	foundFrom, found, foundModRoot := "", module.Version{}, ""
 	if MainModules == nil {
-		return module.Version{}
+		return module.Version{}, "", ""
 	} else if MainModules.Contains(mod.Path) && mod.Version == "" {
 		// Don't replace the workspace version of the main module.
-		return module.Version{}
+		return module.Version{}, "", ""
 	}
 	if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok {
-		return r
+		return r, "", workFilePath
 	}
 	for _, v := range MainModules.Versions() {
 		if index := MainModules.Index(v); index != nil {
@@ -335,13 +342,13 @@
 				if foundModRoot != "" && foundFrom != from && found != r {
 					base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v",
 						mod, modFilePath(foundModRoot), modFilePath(modRoot))
-					return canonicalizeReplacePath(found, foundModRoot)
+					return found, foundModRoot, modFilePath(foundModRoot)
 				}
 				found, foundModRoot = r, modRoot
 			}
 		}
 	}
-	return canonicalizeReplacePath(found, foundModRoot)
+	return found, foundModRoot, modFilePath(foundModRoot)
 }
 
 func replaceRelativeTo() string {
@@ -355,7 +362,7 @@
 // are relative to the workspace directory (in workspace mode) or to the module's
 // directory (in module mode, as they already are).
 func canonicalizeReplacePath(r module.Version, modRoot string) module.Version {
-	if filepath.IsAbs(r.Path) || r.Version != "" {
+	if filepath.IsAbs(r.Path) || r.Version != "" || modRoot == "" {
 		return r
 	}
 	workFilePath := WorkFilePath()
@@ -364,11 +371,11 @@
 	}
 	abs := filepath.Join(modRoot, r.Path)
 	if rel, err := filepath.Rel(filepath.Dir(workFilePath), abs); err == nil {
-		return module.Version{Path: rel, Version: r.Version}
+		return module.Version{Path: ToDirectoryPath(rel), Version: r.Version}
 	}
 	// We couldn't make the version's path relative to the workspace's path,
 	// so just return the absolute path. It's the best we can do.
-	return module.Version{Path: abs, Version: r.Version}
+	return module.Version{Path: ToDirectoryPath(abs), Version: r.Version}
 }
 
 // resolveReplacement returns the module actually used to load the source code
@@ -549,7 +556,7 @@
 			module: module.Version{Path: m.Path},
 		}
 
-		readVendorList(MainModules.mustGetSingleMainModule())
+		readVendorList(VendorDir())
 		if vendorVersion[m.Path] != m.Version {
 			// This module is not vendored, so packages cannot be loaded from it and
 			// it cannot be relevant to the build.
@@ -797,7 +804,7 @@
 // an absolute path or a relative path starting with a '.' or '..'
 // path component.
 func ToDirectoryPath(path string) string {
-	if path == "." || modfile.IsDirectoryPath(path) {
+	if modfile.IsDirectoryPath(path) {
 		return path
 	}
 	// The path is not a relative path or an absolute path, so make it relative
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index f8ddf11..c4cf554 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -98,18 +98,83 @@
 	return info, err
 }
 
-// checkReuse checks whether a revision of a given module or a version list
+// checkReuse checks whether a revision of a given module
 // for a given module may be reused, according to the information in origin.
-func checkReuse(ctx context.Context, path string, old *codehost.Origin) error {
+func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error {
 	return modfetch.TryProxies(func(proxy string) error {
-		repo, err := lookupRepo(ctx, proxy, path)
+		repo, err := lookupRepo(ctx, proxy, m.Path)
 		if err != nil {
 			return err
 		}
-		return repo.CheckReuse(ctx, old)
+		return checkReuseRepo(ctx, repo, m.Path, m.Version, old)
 	})
 }
 
+func checkReuseRepo(ctx context.Context, repo versionRepo, path, query string, origin *codehost.Origin) error {
+	if origin == nil {
+		return errors.New("nil Origin")
+	}
+
+	// Ensure that the Origin actually includes enough fields to resolve the query.
+	// If we got the previous Origin data from a proxy, it may be missing something
+	// that we would have needed to resolve the query directly from the repo.
+	switch {
+	case origin.RepoSum != "":
+		// A RepoSum is always acceptable, since it incorporates everything
+		// (and is often associated with an error result).
+
+	case query == module.CanonicalVersion(query):
+		// This query refers to a specific version, and Go module versions
+		// are supposed to be cacheable and immutable (confirmed with checksums).
+		// If the version exists at all, we shouldn't need any extra information
+		// to identify which commit it resolves to.
+		//
+		// It may be associated with a Ref for a semantic-version tag, but if so
+		// we don't expect that tag to change in the future. We also don't need a
+		// TagSum: if a tag is removed from some ancestor commit, the version may
+		// change from valid to invalid, but we're ok with keeping stale versions
+		// as long as they were valid at some point in the past.
+		//
+		// If the version did not successfully resolve, the origin may indicate
+		// a TagSum and/or RepoSum instead of a Hash, in which case we still need
+		// to check those to ensure that the error is still applicable.
+		if origin.Hash == "" && origin.Ref == "" && origin.TagSum == "" {
+			return errors.New("no Origin information to check")
+		}
+
+	case IsRevisionQuery(path, query):
+		// This query may refer to a branch, non-version tag, or commit ID.
+		//
+		// If it is a commit ID, we expect to see a Hash in the Origin data. On
+		// the other hand, if it is not a commit ID, we expect to see either a Ref
+		// (for a positive result) or a RepoSum (for a negative result), since
+		// we don't expect refs in general to remain stable over time.
+		if origin.Hash == "" && origin.Ref == "" {
+			return fmt.Errorf("query %q requires a Hash or Ref", query)
+		}
+		// Once we resolve the query to a particular commit, we will need to
+		// also identify the most appropriate version to assign to that commit.
+		// (It may correspond to more than one valid version.)
+		//
+		// The most appropriate version depends on the tags associated with
+		// both the commit itself (if the commit is a tagged version)
+		// and its ancestors (if we need to produce a pseudo-version for it).
+		if origin.TagSum == "" {
+			return fmt.Errorf("query %q requires a TagSum", query)
+		}
+
+	default:
+		// The query may be "latest" or a version inequality or prefix.
+		// Its result depends on the absence of higher tags matching the query,
+		// not just the state of an individual ref or tag.
+		if origin.TagSum == "" {
+			return fmt.Errorf("query %q requires a TagSum", query)
+		}
+	}
+
+	return repo.CheckReuse(ctx, origin)
+}
+
 // AllowedFunc is used by Query and other functions to filter out unsuitable
 // versions, for example, those listed in exclude directives in the main
 // module's go.mod file.
@@ -164,7 +229,7 @@
 	}
 
 	if old := reuse[module.Version{Path: path, Version: query}]; old != nil {
-		if err := repo.CheckReuse(ctx, old.Origin); err == nil {
+		if err := checkReuseRepo(ctx, repo, path, query, old.Origin); err == nil {
 			info := &modfetch.RevInfo{
 				Version: old.Version,
 				Origin:  old.Origin,
@@ -216,34 +281,35 @@
 	if err != nil {
 		return nil, err
 	}
-	revErr := &modfetch.RevInfo{Origin: versions.Origin} // RevInfo to return with error
+	origin := versions.Origin
+
+	revWithOrigin := func(rev *modfetch.RevInfo) *modfetch.RevInfo {
+		if rev == nil {
+			if origin == nil {
+				return nil
+			}
+			return &modfetch.RevInfo{Origin: origin}
+		}
+
+		clone := *rev
+		clone.Origin = origin
+		return &clone
+	}
 
 	releases, prereleases, err := qm.filterVersions(ctx, versions.List)
 	if err != nil {
-		return revErr, err
-	}
-
-	mergeRevOrigin := func(rev *modfetch.RevInfo, origin *codehost.Origin) *modfetch.RevInfo {
-		merged := mergeOrigin(rev.Origin, origin)
-		if merged == rev.Origin {
-			return rev
-		}
-		clone := new(modfetch.RevInfo)
-		*clone = *rev
-		clone.Origin = merged
-		return clone
+		return revWithOrigin(nil), err
 	}
 
 	lookup := func(v string) (*modfetch.RevInfo, error) {
 		rev, err := repo.Stat(ctx, v)
-		// Stat can return a non-nil rev and a non-nil err,
-		// in order to provide origin information to make the error cacheable.
-		if rev == nil && err != nil {
-			return revErr, err
+		if rev != nil {
+			// Note that Stat can return a non-nil rev and a non-nil err,
+			// in order to provide origin information to make the error cacheable.
+			origin = mergeOrigin(origin, rev.Origin)
 		}
-		rev = mergeRevOrigin(rev, versions.Origin)
 		if err != nil {
-			return rev, err
+			return revWithOrigin(nil), err
 		}
 
 		if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() {
@@ -268,18 +334,20 @@
 			currentTime, err := module.PseudoVersionTime(current)
 			if err == nil && rev.Time.Before(currentTime) {
 				if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) {
-					return revErr, err
+					return revWithOrigin(nil), err
 				}
 				rev, err = repo.Stat(ctx, current)
-				if rev == nil && err != nil {
-					return revErr, err
+				if rev != nil {
+					origin = mergeOrigin(origin, rev.Origin)
 				}
-				rev = mergeRevOrigin(rev, versions.Origin)
-				return rev, err
+				if err != nil {
+					return revWithOrigin(nil), err
+				}
+				return revWithOrigin(rev), nil
 			}
 		}
 
-		return rev, nil
+		return revWithOrigin(rev), nil
 	}
 
 	if qm.preferLower {
@@ -300,24 +368,27 @@
 
 	if qm.mayUseLatest {
 		latest, err := repo.Latest(ctx)
+		if latest != nil {
+			origin = mergeOrigin(origin, latest.Origin)
+		}
 		if err == nil {
 			if qm.allowsVersion(ctx, latest.Version) {
 				return lookup(latest.Version)
 			}
 		} else if !errors.Is(err, fs.ErrNotExist) {
-			return revErr, err
+			return revWithOrigin(nil), err
 		}
 	}
 
 	if (query == "upgrade" || query == "patch") && current != "" && current != "none" {
 		// "upgrade" and "patch" may stay on the current version if allowed.
 		if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) {
-			return nil, err
+			return revWithOrigin(nil), err
 		}
 		return lookup(current)
 	}
 
-	return revErr, &NoMatchingVersionError{query: query, current: current}
+	return revWithOrigin(nil), &NoMatchingVersionError{query: query, current: current}
 }
 
 // IsRevisionQuery returns true if vers is a version query that may refer to
@@ -820,11 +891,12 @@
 	// is most likely to find helpful: the most useful class of error at the
 	// longest matching path.
 	var (
-		noPackage   *PackageNotInModuleError
-		noVersion   *NoMatchingVersionError
-		noPatchBase *NoPatchBaseError
-		invalidPath *module.InvalidPathError // see comment in case below
-		notExistErr error
+		noPackage      *PackageNotInModuleError
+		noVersion      *NoMatchingVersionError
+		noPatchBase    *NoPatchBaseError
+		invalidPath    *module.InvalidPathError // see comment in case below
+		invalidVersion error
+		notExistErr    error
 	)
 	for _, r := range results {
 		switch rErr := r.err.(type) {
@@ -860,6 +932,10 @@
 				if notExistErr == nil {
 					notExistErr = rErr
 				}
+			} else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) {
+				if invalidVersion == nil {
+					invalidVersion = rErr
+				}
 			} else if err == nil {
 				if len(found) > 0 || noPackage != nil {
 					// golang.org/issue/34094: If we have already found a module that
@@ -890,6 +966,8 @@
 			err = noPatchBase
 		case invalidPath != nil:
 			err = invalidPath
+		case invalidVersion != nil:
+			err = invalidVersion
 		case notExistErr != nil:
 			err = notExistErr
 		default:
diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go
index cb03b69..d392b5b 100644
--- a/src/cmd/go/internal/modload/search.go
+++ b/src/cmd/go/internal/modload/search.go
@@ -164,10 +164,13 @@
 	}
 
 	if cfg.BuildMod == "vendor" {
-		mod := MainModules.mustGetSingleMainModule()
-		if modRoot := MainModules.ModRoot(mod); modRoot != "" {
-			walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor)
-			walkPkgs(filepath.Join(modRoot, "vendor"), "", pruneVendor)
+		for _, mod := range MainModules.Versions() {
+			if modRoot := MainModules.ModRoot(mod); modRoot != "" {
+				walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor)
+			}
+		}
+		if HasModRoot() {
+			walkPkgs(VendorDir(), "", pruneVendor)
 		}
 		return
 	}
@@ -210,8 +213,6 @@
 		}
 		walkPkgs(root, modPrefix, prune)
 	}
-
-	return
 }
 
 // walkFromIndex matches packages in a module using the module index. modroot
diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go
index ffc79bb..b2cb441 100644
--- a/src/cmd/go/internal/modload/vendor.go
+++ b/src/cmd/go/internal/modload/vendor.go
@@ -37,13 +37,13 @@
 }
 
 // readVendorList reads the list of vendored modules from vendor/modules.txt.
-func readVendorList(mainModule module.Version) {
+func readVendorList(vendorDir string) {
 	vendorOnce.Do(func() {
 		vendorList = nil
 		vendorPkgModule = make(map[string]module.Version)
 		vendorVersion = make(map[string]string)
 		vendorMeta = make(map[module.Version]vendorMetadata)
-		vendorFile := filepath.Join(MainModules.ModRoot(mainModule), "vendor/modules.txt")
+		vendorFile := filepath.Join(vendorDir, "modules.txt")
 		data, err := os.ReadFile(vendorFile)
 		if err != nil {
 			if !errors.Is(err, fs.ErrNotExist) {
@@ -140,15 +140,31 @@
 // checkVendorConsistency verifies that the vendor/modules.txt file matches (if
 // go 1.14) or at least does not contradict (go 1.13 or earlier) the
 // requirements and replacements listed in the main module's go.mod file.
-func checkVendorConsistency(index *modFileIndex, modFile *modfile.File) {
-	readVendorList(MainModules.mustGetSingleMainModule())
+func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) {
+	// readVendorList only needs the main module to get the directory
+	// the vendor directory is in.
+	readVendorList(VendorDir())
+
+	if len(modFiles) < 1 {
+		// We should never get here if there are zero modfiles. Either
+		// we're in single module mode and there's a single module, or
+		// we're in workspace mode, and we fail earlier reporting that
+		// "no modules were found in the current workspace".
+		panic("checkVendorConsistency called with zero modfiles")
+	}
 
 	pre114 := false
-	if gover.Compare(index.goVersion, "1.14") < 0 {
-		// Go versions before 1.14 did not include enough information in
-		// vendor/modules.txt to check for consistency.
-		// If we know that we're on an earlier version, relax the consistency check.
-		pre114 = true
+	if !inWorkspaceMode() { // workspace mode was added after Go 1.14
+		if len(indexes) != 1 {
+			panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes)))
+		}
+		index := indexes[0]
+		if gover.Compare(index.goVersion, "1.14") < 0 {
+			// Go versions before 1.14 did not include enough information in
+			// vendor/modules.txt to check for consistency.
+			// If we know that we're on an earlier version, relax the consistency check.
+			pre114 = true
+		}
 	}
 
 	vendErrors := new(strings.Builder)
@@ -163,18 +179,20 @@
 
 	// Iterate over the Require directives in their original (not indexed) order
 	// so that the errors match the original file.
-	for _, r := range modFile.Require {
-		if !vendorMeta[r.Mod].Explicit {
-			if pre114 {
-				// Before 1.14, modules.txt did not indicate whether modules were listed
-				// explicitly in the main module's go.mod file.
-				// However, we can at least detect a version mismatch if packages were
-				// vendored from a non-matching version.
-				if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version {
-					vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv))
+	for _, modFile := range modFiles {
+		for _, r := range modFile.Require {
+			if !vendorMeta[r.Mod].Explicit {
+				if pre114 {
+					// Before 1.14, modules.txt did not indicate whether modules were listed
+					// explicitly in the main module's go.mod file.
+					// However, we can at least detect a version mismatch if packages were
+					// vendored from a non-matching version.
+					if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version {
+						vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv))
+					}
+				} else {
+					vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt")
 				}
-			} else {
-				vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt")
 			}
 		}
 	}
@@ -190,42 +208,77 @@
 	// don't directly apply to any module in the vendor list, the replacement
 	// go.mod file can affect the selected versions of other (transitive)
 	// dependencies
-	for _, r := range modFile.Replace {
-		vr := vendorMeta[r.Old].Replacement
-		if vr == (module.Version{}) {
-			if pre114 && (r.Old.Version == "" || vendorVersion[r.Old.Path] != r.Old.Version) {
-				// Before 1.14, modules.txt omitted wildcard replacements and
-				// replacements for modules that did not have any packages to vendor.
-			} else {
-				vendErrorf(r.Old, "is replaced in go.mod, but not marked as replaced in vendor/modules.txt")
+	seenrep := make(map[module.Version]bool)
+	checkReplace := func(replaces []*modfile.Replace) {
+		for _, r := range replaces {
+			if seenrep[r.Old] {
+				continue // Don't print the same error more than once
 			}
-		} else if vr != r.New {
-			vendErrorf(r.Old, "is replaced by %s in go.mod, but marked as replaced by %s in vendor/modules.txt", describe(r.New), describe(vr))
+			seenrep[r.Old] = true
+			rNew, modRoot, replacementSource := replacementFrom(r.Old)
+			rNewCanonical := canonicalizeReplacePath(rNew, modRoot)
+			vr := vendorMeta[r.Old].Replacement
+			if vr == (module.Version{}) {
+				if rNewCanonical == (module.Version{}) {
+					// r.Old is not actually replaced. It might be a main module.
+					// Don't return an error.
+				} else if pre114 && (r.Old.Version == "" || vendorVersion[r.Old.Path] != r.Old.Version) {
+					// Before 1.14, modules.txt omitted wildcard replacements and
+					// replacements for modules that did not have any packages to vendor.
+				} else {
+					vendErrorf(r.Old, "is replaced in %s, but not marked as replaced in vendor/modules.txt", base.ShortPath(replacementSource))
+				}
+			} else if vr != rNewCanonical {
+				vendErrorf(r.Old, "is replaced by %s in %s, but marked as replaced by %s in vendor/modules.txt", describe(rNew), base.ShortPath(replacementSource), describe(vr))
+			}
 		}
 	}
+	for _, modFile := range modFiles {
+		checkReplace(modFile.Replace)
+	}
+	if MainModules.workFile != nil {
+		checkReplace(MainModules.workFile.Replace)
+	}
 
 	for _, mod := range vendorList {
 		meta := vendorMeta[mod]
 		if meta.Explicit {
-			if _, inGoMod := index.require[mod]; !inGoMod {
-				vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in go.mod")
+			// in workspace mode, check that it's required by at least one of the main modules
+			var foundRequire bool
+			for _, index := range indexes {
+				if _, inGoMod := index.require[mod]; inGoMod {
+					foundRequire = true
+				}
 			}
+			if !foundRequire {
+				article := ""
+				if inWorkspaceMode() {
+					article = "a "
+				}
+				vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article)
+			}
+
 		}
 	}
 
 	for _, mod := range vendorReplaced {
 		r := Replacement(mod)
+		replacementSource := "go.mod"
+		if inWorkspaceMode() {
+			replacementSource = "the workspace"
+		}
 		if r == (module.Version{}) {
-			vendErrorf(mod, "is marked as replaced in vendor/modules.txt, but not replaced in go.mod")
+			vendErrorf(mod, "is marked as replaced in vendor/modules.txt, but not replaced in %s", replacementSource)
 			continue
 		}
-		if meta := vendorMeta[mod]; r != meta.Replacement {
-			vendErrorf(mod, "is marked as replaced by %s in vendor/modules.txt, but replaced by %s in go.mod", describe(meta.Replacement), describe(r))
-		}
+		// If both replacements exist, we've already reported that they're different above.
 	}
 
 	if vendErrors.Len() > 0 {
-		modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule())
-		base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo mod vendor", modRoot, vendErrors)
+		subcmd := "mod"
+		if inWorkspaceMode() {
+			subcmd = "work"
+		}
+		base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir()), vendErrors, subcmd)
 	}
 }
diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go
index 4a3dcf0..a97d975 100644
--- a/src/cmd/go/internal/run/run.go
+++ b/src/cmd/go/internal/run/run.go
@@ -7,9 +7,7 @@
 
 import (
 	"context"
-	"fmt"
 	"go/build"
-	"os"
 	"path"
 	"path/filepath"
 	"strings"
@@ -75,10 +73,6 @@
 	CmdRun.Flag.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "")
 }
 
-func printStderr(args ...any) (int, error) {
-	return fmt.Fprint(os.Stderr, args...)
-}
-
 func runRun(ctx context.Context, cmd *base.Command, args []string) {
 	if shouldUseOutsideModuleMode(args) {
 		// Set global module flags for 'go run cmd@version'.
@@ -100,7 +94,6 @@
 			base.Fatal(err)
 		}
 	}()
-	b.Print = printStderr
 
 	i := 0
 	for i < len(args) && strings.HasSuffix(args[i], ".go") {
@@ -208,7 +201,7 @@
 func buildRunProgram(b *work.Builder, ctx context.Context, a *work.Action) error {
 	cmdline := str.StringList(work.FindExecCmd(), a.Deps[0].Target, a.Args)
 	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd("", "%s", strings.Join(cmdline, " "))
+		b.Shell(a).ShowCmd("", "%s", strings.Join(cmdline, " "))
 		if cfg.BuildN {
 			return nil
 		}
diff --git a/src/cmd/go/internal/script/cmds.go b/src/cmd/go/internal/script/cmds.go
index 36e16c5..ecd35ff 100644
--- a/src/cmd/go/internal/script/cmds.go
+++ b/src/cmd/go/internal/script/cmds.go
@@ -5,6 +5,7 @@
 package script
 
 import (
+	"cmd/go/internal/cfg"
 	"cmd/go/internal/robustio"
 	"errors"
 	"fmt"
@@ -824,7 +825,7 @@
 		},
 		func(s *State, args ...string) (WaitFunc, error) {
 			lookPathOnce.Do(func() {
-				path, pathErr = exec.LookPath(name)
+				path, pathErr = cfg.LookPath(name)
 			})
 			if pathErr != nil {
 				return nil, pathErr
diff --git a/src/cmd/go/internal/script/engine.go b/src/cmd/go/internal/script/engine.go
index 43054a2..ba82171 100644
--- a/src/cmd/go/internal/script/engine.go
+++ b/src/cmd/go/internal/script/engine.go
@@ -322,7 +322,7 @@
 	background bool     // command should run in background (ends with a trailing &)
 }
 
-// A expectedStatus describes the expected outcome of a command.
+// An expectedStatus describes the expected outcome of a command.
 // Script execution halts when a command does not match its expected status.
 type expectedStatus string
 
diff --git a/src/cmd/go/internal/script/scripttest/scripttest.go b/src/cmd/go/internal/script/scripttest/scripttest.go
index 0696624..6d7bd78 100644
--- a/src/cmd/go/internal/script/scripttest/scripttest.go
+++ b/src/cmd/go/internal/script/scripttest/scripttest.go
@@ -7,10 +7,10 @@
 
 import (
 	"bufio"
+	"cmd/go/internal/cfg"
 	"cmd/go/internal/script"
 	"errors"
 	"io"
-	"os/exec"
 	"strings"
 	"testing"
 )
@@ -137,7 +137,7 @@
 	return script.CachedCondition(
 		"<suffix> names an executable in the test binary's PATH",
 		func(name string) (bool, error) {
-			_, err := exec.LookPath(name)
+			_, err := cfg.LookPath(name)
 			return err == nil, nil
 		})
 }
diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go
index 947c27e..baa0cdf 100644
--- a/src/cmd/go/internal/test/flagdefs.go
+++ b/src/cmd/go/internal/test/flagdefs.go
@@ -40,6 +40,7 @@
 }
 
 var passAnalyzersToVet = map[string]bool{
+	"appends":          true,
 	"asmdecl":          true,
 	"assign":           true,
 	"atomic":           true,
@@ -50,6 +51,7 @@
 	"cgocall":          true,
 	"composites":       true,
 	"copylocks":        true,
+	"defers":           true,
 	"directive":        true,
 	"errorsas":         true,
 	"framepointer":     true,
diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go
index 3bce026..8a40547 100644
--- a/src/cmd/go/internal/test/test.go
+++ b/src/cmd/go/internal/test/test.go
@@ -9,6 +9,7 @@
 	"context"
 	"errors"
 	"fmt"
+	"internal/coverage"
 	"internal/platform"
 	"io"
 	"io/fs"
@@ -848,6 +849,7 @@
 	}()
 
 	var builds, runs, prints []*work.Action
+	var writeCoverMetaAct *work.Action
 
 	if cfg.BuildCoverPkg != nil {
 		match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg))
@@ -859,6 +861,61 @@
 		// patterns.
 		plist := load.TestPackageList(ctx, pkgOpts, pkgs)
 		testCoverPkgs = load.SelectCoverPackages(plist, match, "test")
+		if cfg.Experiment.CoverageRedesign && len(testCoverPkgs) > 0 {
+			// create a new singleton action that will collect up the
+			// meta-data files from all of the packages mentioned in
+			// "-coverpkg" and write them to a summary file. This new
+			// action will depend on all the build actions for the
+			// test packages, and all the run actions for these
+			// packages will depend on it. Motivating example:
+			// supposed we have a top level directory with three
+			// package subdirs, "a", "b", and "c", and
+			// from the top level, a user runs "go test -coverpkg=./... ./...".
+			// This will result in (roughly) the following action graph:
+			//
+			//	build("a")       build("b")         build("c")
+			//	    |               |                   |
+			//	link("a.test")   link("b.test")     link("c.test")
+			//	    |               |                   |
+			//	run("a.test")    run("b.test")      run("c.test")
+			//	    |               |                   |
+			//	  print          print              print
+			//
+			// When -coverpkg=<pattern> is in effect, we want to
+			// express the coverage percentage for each package as a
+			// fraction of *all* the statements that match the
+			// pattern, hence if "c" doesn't import "a", we need to
+			// pass as meta-data file for "a" (emitted during the
+			// package "a" build) to the package "c" run action, so
+			// that it can be incorporated with "c"'s regular
+			// metadata. To do this, we add edges from each compile
+			// action to a "writeCoverMeta" action, then from the
+			// writeCoverMeta action to each run action. Updated
+			// graph:
+			//
+			//	build("a")       build("b")         build("c")
+			//	    |   \       /   |               /   |
+			//	    |    v     v    |              /    |
+			//	    |   writemeta <-|-------------+     |
+			//	    |         |||   |                   |
+			//	    |         ||\   |                   |
+			//	link("a.test")/\ \  link("b.test")      link("c.test")
+			//	    |        /  \ +-|--------------+    |
+			//	    |       /    \  |               \   |
+			//	    |      v      v |                v  |
+			//	run("a.test")    run("b.test")      run("c.test")
+			//	    |               |                   |
+			//	  print          print              print
+			//
+			writeCoverMetaAct = &work.Action{
+				Mode:   "write coverage meta-data file",
+				Actor:  work.ActorFunc(work.WriteCoverMetaFilesFile),
+				Objdir: b.NewObjdir(),
+			}
+			for _, p := range testCoverPkgs {
+				p.Internal.Cover.GenMeta = true
+			}
+		}
 	}
 
 	// Inform the compiler that it should instrument the binary at
@@ -896,17 +953,39 @@
 		}
 	}
 
+	if cfg.BuildCover {
+		for _, p := range pkgs {
+			// sync/atomic import is inserted by the cover tool if
+			// we're using atomic mode (and not compiling
+			// sync/atomic package itself). See #18486 and #57445.
+			// Note that this needs to be done prior to any of the
+			// builderTest invocations below, due to the fact that
+			// a given package in the 'pkgs' list may import
+			// package Q which appears later in the list (if this
+			// happens we'll wind up building the Q compile action
+			// before updating its deps to include sync/atomic).
+			if cfg.BuildCoverMode == "atomic" && p.ImportPath != "sync/atomic" {
+				load.EnsureImport(p, "sync/atomic")
+			}
+			// Tag the package for static meta-data generation if no
+			// test files (this works only with the new coverage
+			// design). Do this here (as opposed to in builderTest) so
+			// as to handle the case where we're testing multiple
+			// packages and one of the earlier packages imports a
+			// later package. Note that if -coverpkg is in effect
+			// p.Internal.Cover.GenMeta will wind up being set for
+			// all matching packages.
+			if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 &&
+				cfg.BuildCoverPkg == nil &&
+				cfg.Experiment.CoverageRedesign {
+				p.Internal.Cover.GenMeta = true
+			}
+		}
+	}
+
 	// Prepare build + run + print actions for all packages being tested.
 	for _, p := range pkgs {
-		// sync/atomic import is inserted by the cover tool if we're
-		// using atomic mode (and not compiling sync/atomic package itself).
-		// See #18486 and #57445.
-		if cfg.BuildCover && cfg.BuildCoverMode == "atomic" &&
-			p.ImportPath != "sync/atomic" {
-			load.EnsureImport(p, "sync/atomic")
-		}
-
-		buildTest, runTest, printTest, err := builderTest(b, ctx, pkgOpts, p, allImports[p])
+		buildTest, runTest, printTest, err := builderTest(b, ctx, pkgOpts, p, allImports[p], writeCoverMetaAct)
 		if err != nil {
 			str := err.Error()
 			str = strings.TrimPrefix(str, "\n")
@@ -968,16 +1047,39 @@
 	"update",
 }
 
-func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool) (buildAction, runAction, printAction *work.Action, err error) {
+func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, err error) {
 	if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+		if cfg.BuildCover && cfg.Experiment.CoverageRedesign {
+			if p.Internal.Cover.GenMeta {
+				p.Internal.Cover.Mode = cfg.BuildCoverMode
+			}
+		}
 		build := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
 		run := &work.Action{
 			Mode:       "test run",
 			Actor:      new(runTestActor),
 			Deps:       []*work.Action{build},
+			Objdir:     b.NewObjdir(),
 			Package:    p,
 			IgnoreFail: true, // run (prepare output) even if build failed
 		}
+		if writeCoverMetaAct != nil {
+			// There is no real "run" for this package (since there
+			// are no tests), but if coverage is turned on, we can
+			// collect coverage data for the code in the package by
+			// asking cmd/cover for a static meta-data file as part of
+			// the package build. This static meta-data file is then
+			// consumed by a pseudo-action (writeCoverMetaAct) that
+			// adds it to a summary file, then this summary file is
+			// consumed by the various "run test" actions. Below we
+			// add a dependence edge between the build action and the
+			// "write meta files" pseudo-action, and then another dep
+			// from writeCoverMetaAct to the run action. See the
+			// comment in runTest() at the definition of
+			// writeCoverMetaAct for more details.
+			run.Deps = append(run.Deps, writeCoverMetaAct)
+			writeCoverMetaAct.Deps = append(writeCoverMetaAct.Deps, build)
+		}
 		addTestVet(b, p, run, nil)
 		print := &work.Action{
 			Mode:       "test print",
@@ -1020,12 +1122,17 @@
 	testBinary := testBinaryName(p)
 
 	testDir := b.NewObjdir()
-	if err := b.Mkdir(testDir); err != nil {
+	if err := b.BackgroundShell().Mkdir(testDir); err != nil {
 		return nil, nil, nil, err
 	}
 
 	pmain.Dir = testDir
 	pmain.Internal.OmitDebug = !testC && !testNeedBinary()
+	if pmain.ImportPath == "runtime.test" {
+		// The runtime package needs a symbolized binary for its tests.
+		// See runtime/unsafepoint_test.go.
+		pmain.Internal.OmitDebug = false
+	}
 
 	if !cfg.BuildN {
 		// writeTestmain writes _testmain.go,
@@ -1110,22 +1217,42 @@
 			runAction = installAction // make sure runAction != nil even if not running test
 		}
 	}
+
 	var vetRunAction *work.Action
 	if testC {
 		printAction = &work.Action{Mode: "test print (nop)", Package: p, Deps: []*work.Action{runAction}} // nop
 		vetRunAction = printAction
 	} else {
 		// run test
-		r := new(runTestActor)
+		rta := &runTestActor{
+			writeCoverMetaAct: writeCoverMetaAct,
+		}
 		runAction = &work.Action{
 			Mode:       "test run",
-			Actor:      r,
+			Actor:      rta,
 			Deps:       []*work.Action{buildAction},
 			Package:    p,
 			IgnoreFail: true, // run (prepare output) even if build failed
-			TryCache:   r.c.tryCache,
-			Objdir:     testDir,
+			TryCache:   rta.c.tryCache,
 		}
+		if writeCoverMetaAct != nil {
+			// If writeCoverMetaAct != nil, this indicates that our
+			// "go test -coverpkg" run actions will need to read the
+			// meta-files summary file written by writeCoverMetaAct,
+			// so add a dependence edge from writeCoverMetaAct to the
+			// run action.
+			runAction.Deps = append(runAction.Deps, writeCoverMetaAct)
+			if !p.IsTestOnly() {
+				// Package p is not test only, meaning that the build
+				// action for p may generate a static meta-data file.
+				// Add a dependence edge from p to writeCoverMetaAct,
+				// which needs to know the name of that meta-data
+				// file.
+				compileAction := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
+				writeCoverMetaAct.Deps = append(writeCoverMetaAct.Deps, compileAction)
+			}
+		}
+		runAction.Objdir = testDir
 		vetRunAction = runAction
 		cleanAction = &work.Action{
 			Mode:       "test clean",
@@ -1187,6 +1314,12 @@
 type runTestActor struct {
 	c runCache
 
+	// writeCoverMetaAct points to the pseudo-action for collecting
+	// coverage meta-data files for selected -cover test runs. See the
+	// comment in runTest at the definition of writeCoverMetaAct for
+	// more details.
+	writeCoverMetaAct *work.Action
+
 	// sequencing of json start messages, to preserve test order
 	prev <-chan struct{} // wait to start until prev is closed
 	next chan<- struct{} // close next once the next test can start.
@@ -1217,6 +1350,8 @@
 }
 
 func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) error {
+	sh := b.Shell(a)
+
 	// Wait for previous test to get started and print its first json line.
 	select {
 	case <-r.prev:
@@ -1228,18 +1363,6 @@
 		return nil
 	}
 
-	if a.Failed {
-		// We were unable to build the binary.
-		a.Failed = false
-		a.TestOutput = new(bytes.Buffer)
-		fmt.Fprintf(a.TestOutput, "FAIL\t%s [build failed]\n", a.Package.ImportPath)
-		base.SetExitStatus(1)
-
-		// release next test to start
-		close(r.next)
-		return nil
-	}
-
 	var stdout io.Writer = os.Stdout
 	var err error
 	if testJSON {
@@ -1254,8 +1377,53 @@
 	// Release next test to start (test2json.NewConverter writes the start event).
 	close(r.next)
 
+	if a.Failed {
+		// We were unable to build the binary.
+		a.Failed = false
+		fmt.Fprintf(stdout, "FAIL\t%s [build failed]\n", a.Package.ImportPath)
+		// Tell the JSON converter that this was a failure, not a passing run.
+		err = errors.New("build failed")
+		base.SetExitStatus(1)
+		return nil
+	}
+
+	coverProfTempFile := func(a *work.Action) string {
+		if a.Objdir == "" {
+			panic("internal error: objdir not set in coverProfTempFile")
+		}
+		return a.Objdir + "_cover_.out"
+	}
+
 	if p := a.Package; len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
-		fmt.Fprintf(stdout, "?   \t%s\t[no test files]\n", p.ImportPath)
+		reportNoTestFiles := true
+		if cfg.BuildCover && cfg.Experiment.CoverageRedesign {
+			if err := sh.Mkdir(a.Objdir); err != nil {
+				return err
+			}
+			mf, err := work.BuildActionCoverMetaFile(a)
+			if err != nil {
+				return err
+			} else if mf != "" {
+				reportNoTestFiles = false
+				// Write out "percent statements covered".
+				if err := work.WriteCoveragePercent(b, a, mf, stdout); err != nil {
+					return err
+				}
+				// If -coverprofile is in effect, then generate a
+				// coverage profile fragment for this package and
+				// merge it with the final -coverprofile output file.
+				if coverMerge.f != nil {
+					cp := coverProfTempFile(a)
+					if err := work.WriteCoverageProfile(b, a, mf, cp, stdout); err != nil {
+						return err
+					}
+					mergeCoverProfile(stdout, cp)
+				}
+			}
+		}
+		if reportNoTestFiles {
+			fmt.Fprintf(stdout, "?   \t%s\t[no test files]\n", p.ImportPath)
+		}
 		return nil
 	}
 
@@ -1325,7 +1493,7 @@
 	addToEnv := ""
 	if cfg.BuildCover {
 		gcd := filepath.Join(a.Objdir, "gocoverdir")
-		if err := b.Mkdir(gcd); err != nil {
+		if err := sh.Mkdir(gcd); err != nil {
 			// If we can't create a temp dir, terminate immediately
 			// with an error as opposed to returning an error to the
 			// caller; failed MkDir most likely indicates that we're
@@ -1334,6 +1502,16 @@
 			base.Fatalf("failed to create temporary dir: %v", err)
 		}
 		coverdirArg = append(coverdirArg, "-test.gocoverdir="+gcd)
+		if r.writeCoverMetaAct != nil {
+			// Copy the meta-files file over into the test's coverdir
+			// directory so that the coverage runtime support will be
+			// able to find it.
+			src := r.writeCoverMetaAct.Objdir + coverage.MetaFilesFileName
+			dst := filepath.Join(gcd, coverage.MetaFilesFileName)
+			if err := sh.CopyFile(dst, src, 0666, false); err != nil {
+				return err
+			}
+		}
 		// Even though we are passing the -test.gocoverdir option to
 		// the test binary, also set GOCOVERDIR as well. This is
 		// intended to help with tests that run "go build" to build
@@ -1346,13 +1524,13 @@
 		// Write coverage to temporary profile, for merging later.
 		for i, arg := range args {
 			if strings.HasPrefix(arg, "-test.coverprofile=") {
-				args[i] = "-test.coverprofile=" + a.Objdir + "_cover_.out"
+				args[i] = "-test.coverprofile=" + coverProfTempFile(a)
 			}
 		}
 	}
 
 	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd("", "%s", strings.Join(args, " "))
+		sh.ShowCmd("", "%s", strings.Join(args, " "))
 		if cfg.BuildN {
 			return nil
 		}
@@ -1395,25 +1573,6 @@
 		cmd.Stdout = stdout
 		cmd.Stderr = stdout
 
-		// If there are any local SWIG dependencies, we want to load
-		// the shared library from the build directory.
-		if a.Package.UsesSwig() {
-			env := cmd.Env
-			found := false
-			prefix := "LD_LIBRARY_PATH="
-			for i, v := range env {
-				if strings.HasPrefix(v, prefix) {
-					env[i] = v + ":."
-					found = true
-					break
-				}
-			}
-			if !found {
-				env = append(env, "LD_LIBRARY_PATH=.")
-			}
-			cmd.Env = env
-		}
-
 		cmd.Cancel = func() error {
 			if base.SignalTrace == nil {
 				err := cmd.Process.Kill()
@@ -1882,10 +2041,7 @@
 	if cfg.BuildWork {
 		return nil
 	}
-	if cfg.BuildX {
-		b.Showcmd("", "rm -r %s", a.Objdir)
-	}
-	os.RemoveAll(a.Objdir)
+	b.Shell(a).RemoveAll(a.Objdir)
 	return nil
 }
 
diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go
index a44f393..dcf3be9 100644
--- a/src/cmd/go/internal/toolchain/select.go
+++ b/src/cmd/go/internal/toolchain/select.go
@@ -8,12 +8,12 @@
 import (
 	"context"
 	"errors"
+	"flag"
 	"fmt"
 	"go/build"
 	"io/fs"
 	"log"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"runtime"
 	"strconv"
@@ -25,6 +25,7 @@
 	"cmd/go/internal/modfetch"
 	"cmd/go/internal/modload"
 	"cmd/go/internal/run"
+	"cmd/go/internal/work"
 
 	"golang.org/x/mod/module"
 )
@@ -168,12 +169,14 @@
 			gover.Startup.AutoToolchain = toolchain
 		} else {
 			if toolchain != "" {
-				// Accept toolchain only if it is >= our min.
+				// Accept toolchain only if it is > our min.
+				// (If it is equal, then min satisfies it anyway: that can matter if min
+				// has a suffix like "go1.21.1-foo" and toolchain is "go1.21.1".)
 				toolVers := gover.FromToolchain(toolchain)
 				if toolVers == "" || (!strings.HasPrefix(toolchain, "go") && !strings.Contains(toolchain, "-go")) {
 					base.Fatalf("invalid toolchain %q in %s", toolchain, base.ShortPath(file))
 				}
-				if gover.Compare(toolVers, minVers) >= 0 {
+				if gover.Compare(toolVers, minVers) > 0 {
 					gotoolchain = toolchain
 					minVers = toolVers
 					gover.Startup.AutoToolchain = toolchain
@@ -283,7 +286,7 @@
 	// Look in PATH for the toolchain before we download one.
 	// This allows custom toolchains as well as reuse of toolchains
 	// already installed using go install golang.org/dl/go1.2.3@latest.
-	if exe, err := exec.LookPath(gotoolchain); err == nil {
+	if exe, err := cfg.LookPath(gotoolchain); err == nil {
 		execGoToolchain(gotoolchain, "", exe)
 	}
 
@@ -485,74 +488,132 @@
 	// Note: We assume there are no flags between 'go' and 'install' or 'run'.
 	// During testing there are some debugging flags that are accepted
 	// in that position, but in production go binaries there are not.
-	if len(os.Args) < 3 || (os.Args[1] != "install" && os.Args[1] != "run") {
+	if len(os.Args) < 3 {
 		return false
 	}
 
-	// Check for pkg@version.
-	var arg string
+	var cmdFlags *flag.FlagSet
 	switch os.Args[1] {
 	default:
+		// Command doesn't support a pkg@version as the main module.
 		return false
 	case "install":
-		// We would like to let 'go install -newflag pkg@version' work even
-		// across a toolchain switch. To make that work, assume the pkg@version
-		// is the last argument and skip the flag parsing.
-		arg = os.Args[len(os.Args)-1]
+		cmdFlags = &work.CmdInstall.Flag
 	case "run":
-		// For run, the pkg@version can be anywhere on the command line,
-		// because it is preceded by run flags and followed by arguments to the
-		// program being run. To handle that precisely, we have to interpret the
-		// flags a little bit, to know whether each flag takes an optional argument.
-		// We can still allow unknown flags as long as they have an explicit =value.
-		args := os.Args[2:]
-		for i := 0; i < len(args); i++ {
-			a := args[i]
-			if !strings.HasPrefix(a, "-") {
-				arg = a
-				break
-			}
-			if a == "-" {
-				// non-flag but also non-pkg@version
+		cmdFlags = &run.CmdRun.Flag
+	}
+
+	// The modcachrw flag is unique, in that it affects how we fetch the
+	// requested module to even figure out what toolchain it needs.
+	// We need to actually set it before we check the toolchain version.
+	// (See https://go.dev/issue/64282.)
+	modcacherwFlag := cmdFlags.Lookup("modcacherw")
+	if modcacherwFlag == nil {
+		base.Fatalf("internal error: modcacherw flag not registered for command")
+	}
+	modcacherwVal, ok := modcacherwFlag.Value.(interface {
+		IsBoolFlag() bool
+		flag.Value
+	})
+	if !ok || !modcacherwVal.IsBoolFlag() {
+		base.Fatalf("internal error: modcacherw is not a boolean flag")
+	}
+
+	// Make a best effort to parse the command's args to find the pkg@version
+	// argument and the -modcacherw flag.
+	var (
+		pkgArg         string
+		modcacherwSeen bool
+	)
+	for args := os.Args[2:]; len(args) > 0; {
+		a := args[0]
+		args = args[1:]
+		if a == "--" {
+			if len(args) == 0 {
 				return false
 			}
-			if a == "--" {
-				if i+1 >= len(args) {
-					return false
+			pkgArg = args[0]
+			break
+		}
+
+		a, ok := strings.CutPrefix(a, "-")
+		if !ok {
+			// Not a flag argument. Must be a package.
+			pkgArg = a
+			break
+		}
+		a = strings.TrimPrefix(a, "-") // Treat --flag as -flag.
+
+		name, val, hasEq := strings.Cut(a, "=")
+
+		if name == "modcacherw" {
+			if !hasEq {
+				val = "true"
+			}
+			if err := modcacherwVal.Set(val); err != nil {
+				return false
+			}
+			modcacherwSeen = true
+			continue
+		}
+
+		if hasEq {
+			// Already has a value; don't bother parsing it.
+			continue
+		}
+
+		f := run.CmdRun.Flag.Lookup(a)
+		if f == nil {
+			// We don't know whether this flag is a boolean.
+			if os.Args[1] == "run" {
+				// We don't know where to find the pkg@version argument.
+				// For run, the pkg@version can be anywhere on the command line,
+				// because it is preceded by run flags and followed by arguments to the
+				// program being run. Since we don't know whether this flag takes
+				// an argument, we can't reliably identify the end of the run flags.
+				// Just give up and let the user clarify using the "=" form..
+				return false
+			}
+
+			// We would like to let 'go install -newflag pkg@version' work even
+			// across a toolchain switch. To make that work, assume by default that
+			// the pkg@version is the last argument and skip the remaining args unless
+			// we spot a plausible "-modcacherw" flag.
+			for len(args) > 0 {
+				a := args[0]
+				name, _, _ := strings.Cut(a, "=")
+				if name == "-modcacherw" || name == "--modcacherw" {
+					break
 				}
-				arg = args[i+1]
-				break
+				if len(args) == 1 && !strings.HasPrefix(a, "-") {
+					pkgArg = a
+				}
+				args = args[1:]
 			}
-			a = strings.TrimPrefix(a, "-")
-			a = strings.TrimPrefix(a, "-")
-			if strings.HasPrefix(a, "-") {
-				// non-flag but also non-pkg@version
-				return false
-			}
-			if strings.Contains(a, "=") {
-				// already has value
-				continue
-			}
-			f := run.CmdRun.Flag.Lookup(a)
-			if f == nil {
-				// Unknown flag. Give up. The command is going to fail in flag parsing.
-				return false
-			}
-			if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); ok && bf.IsBoolFlag() {
-				// Does not take value.
-				continue
-			}
-			i++ // Does take a value; skip it.
+			continue
+		}
+
+		if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); !ok || !bf.IsBoolFlag() {
+			// The next arg is the value for this flag. Skip it.
+			args = args[1:]
+			continue
 		}
 	}
-	if !strings.Contains(arg, "@") || build.IsLocalImport(arg) || filepath.IsAbs(arg) {
+
+	if !strings.Contains(pkgArg, "@") || build.IsLocalImport(pkgArg) || filepath.IsAbs(pkgArg) {
 		return false
 	}
-	path, version, _ := strings.Cut(arg, "@")
+	path, version, _ := strings.Cut(pkgArg, "@")
 	if path == "" || version == "" || gover.IsToolchain(path) {
 		return false
 	}
 
+	if !modcacherwSeen && base.InGOFLAGS("-modcacherw") {
+		fs := flag.NewFlagSet("goInstallVersion", flag.ExitOnError)
+		fs.Var(modcacherwVal, "modcacherw", modcacherwFlag.Usage)
+		base.SetFromGOFLAGS(fs)
+	}
+
 	// It would be correct to simply return true here, bypassing use
 	// of the current go.mod or go.work, and let "go run" or "go install"
 	// do the rest, including a toolchain switch.
diff --git a/src/cmd/go/internal/trace/trace.go b/src/cmd/go/internal/trace/trace.go
index d69dc4f..f96aa40 100644
--- a/src/cmd/go/internal/trace/trace.go
+++ b/src/cmd/go/internal/trace/trace.go
@@ -5,10 +5,10 @@
 package trace
 
 import (
-	"cmd/internal/traceviewer"
 	"context"
 	"encoding/json"
 	"errors"
+	"internal/trace/traceviewer/format"
 	"os"
 	"strings"
 	"sync/atomic"
@@ -47,7 +47,7 @@
 		return ctx, nil
 	}
 	childSpan := &Span{t: tc.t, name: name, tid: tc.tid, start: time.Now()}
-	tc.t.writeEvent(&traceviewer.Event{
+	tc.t.writeEvent(&format.Event{
 		Name:  childSpan.name,
 		Time:  float64(childSpan.start.UnixNano()) / float64(time.Microsecond),
 		TID:   childSpan.tid,
@@ -77,7 +77,7 @@
 	}
 
 	id := tc.t.getNextFlowID()
-	tc.t.writeEvent(&traceviewer.Event{
+	tc.t.writeEvent(&format.Event{
 		Name:     from.name + " -> " + to.name,
 		Category: "flow",
 		ID:       id,
@@ -85,7 +85,7 @@
 		Phase:    phaseFlowStart,
 		TID:      from.tid,
 	})
-	tc.t.writeEvent(&traceviewer.Event{
+	tc.t.writeEvent(&format.Event{
 		Name:      from.name + " -> " + to.name,
 		Category:  "flow", // TODO(matloob): Add Category to Flow?
 		ID:        id,
@@ -110,7 +110,7 @@
 		return
 	}
 	s.end = time.Now()
-	s.t.writeEvent(&traceviewer.Event{
+	s.t.writeEvent(&format.Event{
 		Name:  s.name,
 		Time:  float64(s.end.UnixNano()) / float64(time.Microsecond),
 		TID:   s.tid,
@@ -121,11 +121,11 @@
 type tracer struct {
 	file chan traceFile // 1-buffered
 
-	nextTID    uint64
-	nextFlowID uint64
+	nextTID    atomic.Uint64
+	nextFlowID atomic.Uint64
 }
 
-func (t *tracer) writeEvent(ev *traceviewer.Event) error {
+func (t *tracer) writeEvent(ev *format.Event) error {
 	f := <-t.file
 	defer func() { t.file <- f }()
 	var err error
@@ -161,11 +161,11 @@
 }
 
 func (t *tracer) getNextTID() uint64 {
-	return atomic.AddUint64(&t.nextTID, 1)
+	return t.nextTID.Add(1)
 }
 
 func (t *tracer) getNextFlowID() uint64 {
-	return atomic.AddUint64(&t.nextFlowID, 1)
+	return t.nextFlowID.Add(1)
 }
 
 // traceKey is the context key for tracing information. It is unexported to prevent collisions with context keys defined in
diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go
index c65dd0f..8550f2a 100644
--- a/src/cmd/go/internal/vcs/vcs.go
+++ b/src/cmd/go/internal/vcs/vcs.go
@@ -283,15 +283,13 @@
 var scpSyntaxRe = lazyregexp.New(`^(\w+)@([\w.-]+):(.*)$`)
 
 func gitRemoteRepo(vcsGit *Cmd, rootDir string) (remoteRepo string, err error) {
-	cmd := "config remote.origin.url"
-	errParse := errors.New("unable to parse output of git " + cmd)
-	errRemoteOriginNotFound := errors.New("remote origin not found")
+	const cmd = "config remote.origin.url"
 	outb, err := vcsGit.run1(rootDir, cmd, nil, false)
 	if err != nil {
 		// if it doesn't output any message, it means the config argument is correct,
 		// but the config value itself doesn't exist
 		if outb != nil && len(outb) == 0 {
-			return "", errRemoteOriginNotFound
+			return "", errors.New("remote origin not found")
 		}
 		return "", err
 	}
@@ -323,7 +321,7 @@
 			return repoURL.String(), nil
 		}
 	}
-	return "", errParse
+	return "", errors.New("unable to parse output of git " + cmd)
 }
 
 func gitStatus(vcsGit *Cmd, rootDir string) (Status, error) {
@@ -680,7 +678,7 @@
 		args = args[2:]
 	}
 
-	_, err := exec.LookPath(v.Cmd)
+	_, err := cfg.LookPath(v.Cmd)
 	if err != nil {
 		fmt.Fprintf(os.Stderr,
 			"go: missing %s command. See https://golang.org/s/gogetcmd\n",
@@ -1015,11 +1013,11 @@
 	{"public", []string{"git", "hg"}},
 }
 
-// CheckGOVCS checks whether the policy defined by the environment variable
+// checkGOVCS checks whether the policy defined by the environment variable
 // GOVCS allows the given vcs command to be used with the given repository
 // root path. Note that root may not be a real package or module path; it's
 // the same as the root path in the go-import meta tag.
-func CheckGOVCS(vcs *Cmd, root string) error {
+func checkGOVCS(vcs *Cmd, root string) error {
 	if vcs == vcsMod {
 		// Direct module (proxy protocol) fetches don't
 		// involve an external version control system
@@ -1047,37 +1045,6 @@
 	return nil
 }
 
-// CheckNested checks for an incorrectly-nested VCS-inside-VCS
-// situation for dir, checking parents up until srcRoot.
-func CheckNested(vcs *Cmd, dir, srcRoot string) error {
-	if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
-		return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
-	}
-
-	otherDir := dir
-	for len(otherDir) > len(srcRoot) {
-		for _, otherVCS := range vcsList {
-			if isVCSRoot(otherDir, otherVCS.RootNames) {
-				// Allow expected vcs in original dir.
-				if otherDir == dir && otherVCS == vcs {
-					continue
-				}
-				// Otherwise, we have one VCS inside a different VCS.
-				return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.Cmd, otherDir, otherVCS.Cmd)
-			}
-		}
-		// Move to parent.
-		newDir := filepath.Dir(otherDir)
-		if len(newDir) >= len(otherDir) {
-			// Shouldn't happen, but just in case, stop.
-			break
-		}
-		otherDir = newDir
-	}
-
-	return nil
-}
-
 // RepoRoot describes the repository root for a tree of source code.
 type RepoRoot struct {
 	Repo     string // repository URL, including scheme
@@ -1193,7 +1160,7 @@
 		if vcs == nil {
 			return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
 		}
-		if err := CheckGOVCS(vcs, match["root"]); err != nil {
+		if err := checkGOVCS(vcs, match["root"]); err != nil {
 			return nil, err
 		}
 		var repoURL string
@@ -1204,18 +1171,31 @@
 			var ok bool
 			repoURL, ok = interceptVCSTest(repo, vcs, security)
 			if !ok {
-				scheme := vcs.Scheme[0] // default to first scheme
-				if vcs.PingCmd != "" {
-					// If we know how to test schemes, scan to find one.
+				scheme, err := func() (string, error) {
 					for _, s := range vcs.Scheme {
 						if security == web.SecureOnly && !vcs.isSecureScheme(s) {
 							continue
 						}
-						if vcs.Ping(s, repo) == nil {
-							scheme = s
-							break
+
+						// If we know how to ping URL schemes for this VCS,
+						// check that this repo works.
+						// Otherwise, default to the first scheme
+						// that meets the requested security level.
+						if vcs.PingCmd == "" {
+							return s, nil
+						}
+						if err := vcs.Ping(s, repo); err == nil {
+							return s, nil
 						}
 					}
+					securityFrag := ""
+					if security == web.SecureOnly {
+						securityFrag = "secure "
+					}
+					return "", fmt.Errorf("no %sprotocol found for repository", securityFrag)
+				}()
+				if err != nil {
+					return nil, err
 				}
 				repoURL = scheme + "://" + repo
 			}
@@ -1369,7 +1349,7 @@
 		}
 	}
 
-	if err := CheckGOVCS(vcs, mmi.Prefix); err != nil {
+	if err := checkGOVCS(vcs, mmi.Prefix); err != nil {
 		return nil, err
 	}
 
@@ -1474,7 +1454,7 @@
 	Prefix, VCS, RepoRoot string
 }
 
-// A ImportMismatchError is returned where metaImport/s are present
+// An ImportMismatchError is returned where metaImport/s are present
 // but none match our import path.
 type ImportMismatchError struct {
 	importPath string
diff --git a/src/cmd/go/internal/vcweb/git.go b/src/cmd/go/internal/vcweb/git.go
index 316c238..d1e0563 100644
--- a/src/cmd/go/internal/vcweb/git.go
+++ b/src/cmd/go/internal/vcweb/git.go
@@ -37,16 +37,35 @@
 		return nil, ServerNotInstalledError{name: "git"}
 	}
 
-	handler := &cgi.Handler{
-		Path:   h.gitPath,
-		Logger: logger,
-		Args:   []string{"http-backend"},
-		Dir:    dir,
-		Env: append(slices.Clip(env),
-			"GIT_PROJECT_ROOT="+dir,
-			"GIT_HTTP_EXPORT_ALL=1",
-		),
-	}
+	baseEnv := append(slices.Clip(env),
+		"GIT_PROJECT_ROOT="+dir,
+		"GIT_HTTP_EXPORT_ALL=1",
+	)
+
+	handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		// The Git client sends the requested Git protocol version as a
+		// "Git-Protocol" HTTP request header, which the CGI host then converts
+		// to an environment variable (HTTP_GIT_PROTOCOL).
+		//
+		// However, versions of Git older that 2.34.0 don't recognize the
+		// HTTP_GIT_PROTOCOL variable, and instead need that value to be set in the
+		// GIT_PROTOCOL variable. We do so here so that vcweb can work reliably
+		// with older Git releases. (As of the time of writing, the Go project's
+		// builders were on Git version 2.30.2.)
+		env := slices.Clip(baseEnv)
+		if p := req.Header.Get("Git-Protocol"); p != "" {
+			env = append(env, "GIT_PROTOCOL="+p)
+		}
+
+		h := &cgi.Handler{
+			Path:   h.gitPath,
+			Logger: logger,
+			Args:   []string{"http-backend"},
+			Dir:    dir,
+			Env:    env,
+		}
+		h.ServeHTTP(w, req)
+	})
 
 	return handler, nil
 }
diff --git a/src/cmd/go/internal/web/http.go b/src/cmd/go/internal/web/http.go
index 4fc939a..bd5f828 100644
--- a/src/cmd/go/internal/web/http.go
+++ b/src/cmd/go/internal/web/http.go
@@ -173,7 +173,7 @@
 		}
 	}
 
-	fetch := func(url *urlpkg.URL) (*urlpkg.URL, *http.Response, error) {
+	fetch := func(url *urlpkg.URL) (*http.Response, error) {
 		// Note: The -v build flag does not mean "print logging information",
 		// despite its historical misuse for this in GOPATH-based go get.
 		// We print extra logging in -x mode instead, which traces what
@@ -184,7 +184,7 @@
 
 		req, err := http.NewRequest("GET", url.String(), nil)
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 		if url.Scheme == "https" {
 			auth.AddCredentials(req)
@@ -197,7 +197,7 @@
 
 		release, err := base.AcquireNet()
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 
 		var res *http.Response
@@ -218,7 +218,7 @@
 			// CheckRedirect fails, and even then the returned Response.Body is
 			// already closed.”
 			release()
-			return nil, nil, err
+			return nil, err
 		}
 
 		// “If the returned error is nil, the Response will contain a non-nil Body
@@ -228,7 +228,7 @@
 			ReadCloser: body,
 			afterClose: release,
 		}
-		return url, res, err
+		return res, err
 	}
 
 	var (
@@ -241,8 +241,10 @@
 		*secure = *url
 		secure.Scheme = "https"
 
-		fetched, res, err = fetch(secure)
-		if err != nil {
+		res, err = fetch(secure)
+		if err == nil {
+			fetched = secure
+		} else {
 			if cfg.BuildX {
 				fmt.Fprintf(os.Stderr, "# get %s: %v\n", secure.Redacted(), err)
 			}
@@ -284,8 +286,10 @@
 			return nil, fmt.Errorf("refusing to pass credentials to insecure URL: %s", insecure.Redacted())
 		}
 
-		fetched, res, err = fetch(insecure)
-		if err != nil {
+		res, err = fetch(insecure)
+		if err == nil {
+			fetched = insecure
+		} else {
 			if cfg.BuildX {
 				fmt.Fprintf(os.Stderr, "# get %s: %v\n", insecure.Redacted(), err)
 			}
diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go
index d4d0a71..a59072e 100644
--- a/src/cmd/go/internal/work/action.go
+++ b/src/cmd/go/internal/work/action.go
@@ -9,6 +9,7 @@
 import (
 	"bufio"
 	"bytes"
+	"cmd/internal/cov/covcmd"
 	"container/heap"
 	"context"
 	"debug/elf"
@@ -37,10 +38,8 @@
 type Builder struct {
 	WorkDir            string                    // the temporary work directory (ends in filepath.Separator)
 	actionCache        map[cacheKey]*Action      // a cache of already-constructed actions
-	mkdirCache         map[string]bool           // a cache of created directories
 	flagCache          map[[2]string]bool        // a cache of supported compiler flags
 	gccCompilerIDCache map[string]cache.ActionID // cache for gccCompilerID
-	Print              func(args ...any) (int, error)
 
 	IsCmdList           bool // running as part of go list; set p.Stale and additional fields below
 	NeedError           bool // list needs p.Error
@@ -51,8 +50,7 @@
 	objdirSeq int // counter for NewObjdir
 	pkgSeq    int
 
-	output    sync.Mutex
-	scriptDir string // current directory in printed script
+	backgroundSh *Shell // Shell that per-Action Shells are derived from
 
 	exec      sync.Mutex
 	readySema chan bool
@@ -107,6 +105,8 @@
 	vetCfg    *vetConfig // vet config
 	output    []byte     // output redirect buffer (nil means use b.Print)
 
+	sh *Shell // lazily created per-Action shell; see Builder.Shell
+
 	// Execution state.
 	pending      int               // number of deps yet to complete
 	priority     int               // relative execution priority
@@ -265,11 +265,7 @@
 func NewBuilder(workDir string) *Builder {
 	b := new(Builder)
 
-	b.Print = func(a ...any) (int, error) {
-		return fmt.Fprint(os.Stderr, a...)
-	}
 	b.actionCache = make(map[cacheKey]*Action)
-	b.mkdirCache = make(map[string]bool)
 	b.toolIDCache = make(map[string]string)
 	b.buildIDCache = make(map[string]string)
 
@@ -300,6 +296,8 @@
 		}
 	}
 
+	b.backgroundSh = NewShell(b.WorkDir, nil)
+
 	if err := CheckGOOSARCHPair(cfg.Goos, cfg.Goarch); err != nil {
 		fmt.Fprintf(os.Stderr, "go: %v\n", err)
 		base.SetExitStatus(2)
@@ -436,6 +434,32 @@
 	return b.CompileAction(mode, depMode, p)
 }
 
+// buildActor implements the Actor interface for package build
+// actions. For most package builds this simply means invoking th
+// *Builder.build method; in the case of "go test -cover" for
+// a package with no test files, we stores some additional state
+// information in the build actor to help with reporting.
+type buildActor struct {
+	// name of static meta-data file fragment emitted by the cover
+	// tool as part of the package build action, for selected
+	// "go test -cover" runs.
+	covMetaFileName string
+}
+
+// newBuildActor returns a new buildActor object, setting up the
+// covMetaFileName field if 'genCoverMeta' flag is set.
+func newBuildActor(p *load.Package, genCoverMeta bool) *buildActor {
+	ba := &buildActor{}
+	if genCoverMeta {
+		ba.covMetaFileName = covcmd.MetaFileForPackage(p.ImportPath)
+	}
+	return ba
+}
+
+func (ba *buildActor) Act(b *Builder, ctx context.Context, a *Action) error {
+	return b.build(ctx, a)
+}
+
 // CompileAction returns the action for compiling and possibly installing
 // (according to mode) the given package. The resulting action is only
 // for building packages (archives), never for linking executables.
@@ -459,7 +483,7 @@
 		a := &Action{
 			Mode:    "build",
 			Package: p,
-			Actor:   ActorFunc((*Builder).build),
+			Actor:   newBuildActor(p, p.Internal.Cover.GenMeta),
 			Objdir:  b.NewObjdir(),
 		}
 
@@ -853,7 +877,11 @@
 
 			// The linker step still needs all the usual linker deps.
 			// (For example, the linker always opens runtime.a.)
-			for _, dep := range load.LinkerDeps(nil) {
+			ldDeps, err := load.LinkerDeps(nil)
+			if err != nil {
+				base.Error(err)
+			}
+			for _, dep := range ldDeps {
 				add(a, dep, true)
 			}
 		}
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index e2e0e07..408edb5 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -11,7 +11,6 @@
 	"fmt"
 	"go/build"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"runtime"
 	"strconv"
@@ -38,11 +37,16 @@
 
 When compiling packages, build ignores files that end in '_test.go'.
 
-When compiling a single main package, build writes
-the resulting executable to an output file named after
-the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
-or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe').
-The '.exe' suffix is added when writing a Windows executable.
+When compiling a single main package, build writes the resulting
+executable to an output file named after the last non-major-version
+component of the package import path. The '.exe' suffix is added
+when writing a Windows executable.
+So 'go build example/sam' writes 'sam' or 'sam.exe'.
+'go build example.com/foo/v2' writes 'foo' or 'foo.exe', not 'v2.exe'.
+
+When compiling a package from a list of .go files, the executable
+is named after the first source file.
+'go build ed.go rx.go' writes 'ed' or 'ed.exe'.
 
 When compiling multiple packages or a single non-main package,
 build compiles the packages but discards the resulting object,
@@ -76,14 +80,15 @@
 		linux/ppc64le and linux/arm64 (only for 48-bit VMA).
 	-msan
 		enable interoperation with memory sanitizer.
-		Supported only on linux/amd64, linux/arm64, freebsd/amd64
+		Supported only on linux/amd64, linux/arm64, linux/loong64, freebsd/amd64
 		and only with Clang/LLVM as the host C compiler.
 		PIE build mode will be used on all platforms except linux/amd64.
 	-asan
 		enable interoperation with address sanitizer.
-		Supported only on linux/arm64, linux/amd64.
-		Supported only on linux/amd64 or linux/arm64 and only with GCC 7 and higher
+		Supported only on linux/arm64, linux/amd64, linux/loong64.
+		Supported on linux/amd64 or linux/arm64 and only with GCC 7 and higher
 		or Clang/LLVM 9 and higher.
+		And supported on linux/loong64 only with Clang/LLVM 16 and higher.
 	-cover
 		enable code coverage instrumentation.
 	-covermode set,count,atomic
@@ -898,7 +903,7 @@
 	if cfg.Goos == runtime.GOOS && cfg.Goarch == runtime.GOARCH {
 		return ExecCmd
 	}
-	path, err := exec.LookPath(fmt.Sprintf("go_%s_%s_exec", cfg.Goos, cfg.Goarch))
+	path, err := cfg.LookPath(fmt.Sprintf("go_%s_%s_exec", cfg.Goos, cfg.Goarch))
 	if err == nil {
 		ExecCmd = []string{path}
 	}
diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go
index 91648a3..f3059f2 100644
--- a/src/cmd/go/internal/work/build_test.go
+++ b/src/cmd/go/internal/work/build_test.go
@@ -222,15 +222,13 @@
 // directory.
 // See https://golang.org/issue/18878.
 func TestRespectSetgidDir(t *testing.T) {
-	var b Builder
-
 	// Check that `cp` is called instead of `mv` by looking at the output
-	// of `(*Builder).ShowCmd` afterwards as a sanity check.
+	// of `(*Shell).ShowCmd` afterwards as a sanity check.
 	cfg.BuildX = true
 	var cmdBuf strings.Builder
-	b.Print = func(a ...any) (int, error) {
+	sh := NewShell("", func(a ...any) (int, error) {
 		return cmdBuf.WriteString(fmt.Sprint(a...))
-	}
+	})
 
 	setgiddir, err := os.MkdirTemp("", "SetGroupID")
 	if err != nil {
@@ -271,12 +269,12 @@
 	defer pkgfile.Close()
 
 	dirGIDFile := filepath.Join(setgiddir, "setgid")
-	if err := b.moveOrCopyFile(dirGIDFile, pkgfile.Name(), 0666, true); err != nil {
+	if err := sh.moveOrCopyFile(dirGIDFile, pkgfile.Name(), 0666, true); err != nil {
 		t.Fatalf("moveOrCopyFile: %v", err)
 	}
 
 	got := strings.TrimSpace(cmdBuf.String())
-	want := b.fmtcmd("", "cp %s %s", pkgfile.Name(), dirGIDFile)
+	want := sh.fmtCmd("", "cp %s %s", pkgfile.Name(), dirGIDFile)
 	if got != want {
 		t.Fatalf("moveOrCopyFile(%q, %q): want %q, got %q", dirGIDFile, pkgfile.Name(), want, got)
 	}
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
index a1d7599..bf923d0 100644
--- a/src/cmd/go/internal/work/buildid.go
+++ b/src/cmd/go/internal/work/buildid.go
@@ -238,8 +238,28 @@
 	version := ""
 	lines := strings.Split(string(out), "\n")
 	for _, line := range lines {
-		if fields := strings.Fields(line); len(fields) > 1 && fields[1] == "version" || len(fields) > 2 && fields[2] == "version" {
-			version = line
+		fields := strings.Fields(line)
+		for i, field := range fields {
+			if strings.HasSuffix(field, ":") {
+				// Avoid parsing fields of lines like "Configured with: …", which may
+				// contain arbitrary substrings.
+				break
+			}
+			if field == "version" && i < len(fields)-1 {
+				// Check that the next field is plausibly a version number.
+				// We require only that it begins with an ASCII digit,
+				// since we don't know what version numbering schemes a given
+				// C compiler may use. (Clang and GCC mostly seem to follow the scheme X.Y.Z,
+				// but in https://go.dev/issue/64619 we saw "8.3 [DragonFly]", and who knows
+				// what other C compilers like "zig cc" might report?)
+				next := fields[i+1]
+				if len(next) > 0 && next[0] >= '0' && next[0] <= '9' {
+					version = line
+					break
+				}
+			}
+		}
+		if version != "" {
 			break
 		}
 	}
@@ -270,7 +290,7 @@
 		}
 		exe = fields[0]
 		if !strings.ContainsAny(exe, `/\`) {
-			if lp, err := exec.LookPath(exe); err == nil {
+			if lp, err := cfg.LookPath(exe); err == nil {
 				exe = lp
 			}
 		}
@@ -343,16 +363,7 @@
 		fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType)
 	}
 
-	if cfg.BuildN || cfg.BuildX {
-		for _, line := range bytes.Split(buf.Bytes(), []byte("\n")) {
-			b.Showcmd("", "echo '%s' >> %s", line, sfile)
-		}
-		if cfg.BuildN {
-			return sfile, nil
-		}
-	}
-
-	if err := os.WriteFile(sfile, buf.Bytes(), 0666); err != nil {
+	if err := b.Shell(a).writeFile(sfile, buf.Bytes()); err != nil {
 		return "", err
 	}
 
@@ -476,8 +487,8 @@
 					// If it doesn't work, it doesn't work: reusing the cached binary is more
 					// important than reprinting diagnostic information.
 					if printOutput {
-						showStdout(b, c, a.actionID, "stdout")      // compile output
-						showStdout(b, c, a.actionID, "link-stdout") // link output
+						showStdout(b, c, a, "stdout")      // compile output
+						showStdout(b, c, a, "link-stdout") // link output
 					}
 
 					// Poison a.Target to catch uses later in the build.
@@ -504,8 +515,8 @@
 		// If it doesn't work, it doesn't work: reusing the test result is more
 		// important than reprinting diagnostic information.
 		if printOutput {
-			showStdout(b, c, a.Deps[0].actionID, "stdout")      // compile output
-			showStdout(b, c, a.Deps[0].actionID, "link-stdout") // link output
+			showStdout(b, c, a.Deps[0], "stdout")      // compile output
+			showStdout(b, c, a.Deps[0], "link-stdout") // link output
 		}
 
 		// Poison a.Target to catch uses later in the build.
@@ -518,7 +529,7 @@
 	if file, _, err := cache.GetFile(c, actionHash); err == nil {
 		if buildID, err := buildid.ReadFile(file); err == nil {
 			if printOutput {
-				showStdout(b, c, a.actionID, "stdout")
+				showStdout(b, c, a, "stdout")
 			}
 			a.built = file
 			a.Target = "DO NOT USE - using cache"
@@ -560,20 +571,21 @@
 	return false
 }
 
-func showStdout(b *Builder, c cache.Cache, actionID cache.ActionID, key string) error {
+func showStdout(b *Builder, c cache.Cache, a *Action, key string) error {
+	actionID := a.actionID
+
 	stdout, stdoutEntry, err := cache.GetBytes(c, cache.Subkey(actionID, key))
 	if err != nil {
 		return err
 	}
 
 	if len(stdout) > 0 {
+		sh := b.Shell(a)
 		if cfg.BuildX || cfg.BuildN {
-			b.Showcmd("", "%s  # internal", joinUnambiguously(str.StringList("cat", c.OutputFile(stdoutEntry.OutputID))))
+			sh.ShowCmd("", "%s  # internal", joinUnambiguously(str.StringList("cat", c.OutputFile(stdoutEntry.OutputID))))
 		}
 		if !cfg.BuildN {
-			b.output.Lock()
-			defer b.output.Unlock()
-			b.Print(string(stdout))
+			sh.Print(string(stdout))
 		}
 	}
 	return nil
@@ -581,9 +593,7 @@
 
 // flushOutput flushes the output being queued in a.
 func (b *Builder) flushOutput(a *Action) {
-	b.output.Lock()
-	defer b.output.Unlock()
-	b.Print(string(a.output))
+	b.Shell(a).Print(string(a.output))
 	a.output = nil
 }
 
@@ -596,9 +606,11 @@
 //
 // Keep in sync with src/cmd/buildid/buildid.go
 func (b *Builder) updateBuildID(a *Action, target string, rewrite bool) error {
+	sh := b.Shell(a)
+
 	if cfg.BuildX || cfg.BuildN {
 		if rewrite {
-			b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList(base.Tool("buildid"), "-w", target)))
+			sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList(base.Tool("buildid"), "-w", target)))
 		}
 		if cfg.BuildN {
 			return nil
@@ -687,7 +699,7 @@
 			outputID, _, err := c.Put(a.actionID, r)
 			r.Close()
 			if err == nil && cfg.BuildX {
-				b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID))))
+				sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID))))
 			}
 			if b.NeedExport {
 				if err != nil {
diff --git a/src/cmd/go/internal/work/cover.go b/src/cmd/go/internal/work/cover.go
new file mode 100644
index 0000000..c0acc61
--- /dev/null
+++ b/src/cmd/go/internal/work/cover.go
@@ -0,0 +1,150 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Action graph execution methods related to coverage.
+
+package work
+
+import (
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/str"
+	"cmd/internal/cov/covcmd"
+	"context"
+	"encoding/json"
+	"fmt"
+	"internal/coverage"
+	"io"
+	"os"
+	"path/filepath"
+)
+
+// CovData invokes "go tool covdata" with the specified arguments
+// as part of the execution of action 'a'.
+func (b *Builder) CovData(a *Action, cmdargs ...any) ([]byte, error) {
+	cmdline := str.StringList(cmdargs...)
+	args := append([]string{}, cfg.BuildToolexec...)
+	args = append(args, base.Tool("covdata"))
+	args = append(args, cmdline...)
+	return b.Shell(a).runOut(a.Objdir, nil, args)
+}
+
+// BuildActionCoverMetaFile locates and returns the path of the
+// meta-data file written by the "go tool cover" step as part of the
+// build action for the "go test -cover" run action 'runAct'. Note
+// that if the package has no functions the meta-data file will exist
+// but will be empty; in this case the return is an empty string.
+func BuildActionCoverMetaFile(runAct *Action) (string, error) {
+	p := runAct.Package
+	for i := range runAct.Deps {
+		pred := runAct.Deps[i]
+		if pred.Mode != "build" || pred.Package == nil {
+			continue
+		}
+		if pred.Package.ImportPath == p.ImportPath {
+			metaFile := pred.Objdir + covcmd.MetaFileForPackage(p.ImportPath)
+			f, err := os.Open(metaFile)
+			if err != nil {
+				return "", err
+			}
+			defer f.Close()
+			fi, err2 := f.Stat()
+			if err2 != nil {
+				return "", err2
+			}
+			if fi.Size() == 0 {
+				return "", nil
+			}
+			return metaFile, nil
+		}
+	}
+	return "", fmt.Errorf("internal error: unable to locate build action for package %q run action", p.ImportPath)
+}
+
+// WriteCoveragePercent writes out to the writer 'w' a "percent
+// statements covered" for the package whose test-run action is
+// 'runAct', based on the meta-data file 'mf'. This helper is used in
+// cases where a user runs "go test -cover" on a package that has
+// functions but no tests; in the normal case (package has tests)
+// the percentage is written by the test binary when it runs.
+func WriteCoveragePercent(b *Builder, runAct *Action, mf string, w io.Writer) error {
+	dir := filepath.Dir(mf)
+	output, cerr := b.CovData(runAct, "percent", "-i", dir)
+	if cerr != nil {
+		return b.Shell(runAct).reportCmd("", "", output, cerr)
+	}
+	_, werr := w.Write(output)
+	return werr
+}
+
+// WriteCoverageProfile writes out a coverage profile fragment for the
+// package whose test-run action is 'runAct'; content is written to
+// the file 'outf' based on the coverage meta-data info found in
+// 'mf'. This helper is used in cases where a user runs "go test
+// -cover" on a package that has functions but no tests.
+func WriteCoverageProfile(b *Builder, runAct *Action, mf, outf string, w io.Writer) error {
+	dir := filepath.Dir(mf)
+	output, err := b.CovData(runAct, "textfmt", "-i", dir, "-o", outf)
+	if err != nil {
+		return b.Shell(runAct).reportCmd("", "", output, err)
+	}
+	_, werr := w.Write(output)
+	return werr
+}
+
+// WriteCoverMetaFilesFile writes out a summary file ("meta-files
+// file") as part of the action function for the "writeCoverMeta"
+// pseudo action employed during "go test -coverpkg" runs where there
+// are multiple tests and multiple packages covered. It builds up a
+// table mapping package import path to meta-data file fragment and
+// writes it out to a file where it can be read by the various test
+// run actions. Note that this function has to be called A) after the
+// build actions are complete for all packages being tested, and B)
+// before any of the "run test" actions for those packages happen.
+// This requirement is enforced by adding making this action ("a")
+// dependent on all test package build actions, and making all test
+// run actions dependent on this action.
+func WriteCoverMetaFilesFile(b *Builder, ctx context.Context, a *Action) error {
+	sh := b.Shell(a)
+
+	// Build the metafilecollection object.
+	var collection coverage.MetaFileCollection
+	for i := range a.Deps {
+		dep := a.Deps[i]
+		if dep.Mode != "build" {
+			panic("unexpected mode " + dep.Mode)
+		}
+		metaFilesFile := dep.Objdir + covcmd.MetaFileForPackage(dep.Package.ImportPath)
+		// Check to make sure the meta-data file fragment exists
+		//  and has content (may be empty if package has no functions).
+		if fi, err := os.Stat(metaFilesFile); err != nil {
+			continue
+		} else if fi.Size() == 0 {
+			continue
+		}
+		collection.ImportPaths = append(collection.ImportPaths, dep.Package.ImportPath)
+		collection.MetaFileFragments = append(collection.MetaFileFragments, metaFilesFile)
+	}
+
+	// Serialize it.
+	data, err := json.Marshal(collection)
+	if err != nil {
+		return fmt.Errorf("marshal MetaFileCollection: %v", err)
+	}
+	data = append(data, '\n') // makes -x output more readable
+
+	// Create the directory for this action's objdir and
+	// then write out the serialized collection
+	// to a file in the directory.
+	if err := sh.Mkdir(a.Objdir); err != nil {
+		return err
+	}
+	mfpath := a.Objdir + coverage.MetaFilesFileName
+	if err := sh.writeFile(mfpath, data); err != nil {
+		return fmt.Errorf("writing metafiles file: %v", err)
+	}
+
+	// We're done.
+	return nil
+}
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 13d2a78..e05471b 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -8,13 +8,13 @@
 
 import (
 	"bytes"
+	"cmd/internal/cov/covcmd"
 	"context"
 	"crypto/sha256"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"go/token"
-	"internal/coverage"
 	"internal/lazyregexp"
 	"io"
 	"io/fs"
@@ -164,6 +164,7 @@
 			if b.AllowErrors && a.Package != nil {
 				if a.Package.Error == nil {
 					a.Package.Error = &load.PackageError{Err: err}
+					a.Package.Incomplete = true
 				}
 			} else {
 				var ipe load.ImportPathError
@@ -309,8 +310,8 @@
 		}
 		// TODO(rsc): Should we include the SWIG version?
 	}
-	if p.Internal.CoverMode != "" {
-		fmt.Fprintf(h, "cover %q %q\n", p.Internal.CoverMode, b.toolID("cover"))
+	if p.Internal.Cover.Mode != "" {
+		fmt.Fprintf(h, "cover %q %q\n", p.Internal.Cover.Mode, b.toolID("cover"))
 	}
 	if p.Internal.FuzzInstrument {
 		if fuzzFlags := fuzzInstrumentFlags(); fuzzFlags != nil {
@@ -440,6 +441,7 @@
 	needCgoHdr
 	needVet
 	needCompiledGoFiles
+	needCovMetaFile
 	needStale
 )
 
@@ -447,6 +449,7 @@
 // Note that any new influence on this logic must be reported in b.buildActionID above as well.
 func (b *Builder) build(ctx context.Context, a *Action) (err error) {
 	p := a.Package
+	sh := b.Shell(a)
 
 	bit := func(x uint32, b bool) uint32 {
 		if b {
@@ -456,9 +459,11 @@
 	}
 
 	cachedBuild := false
+	needCovMeta := p.Internal.Cover.GenMeta
 	need := bit(needBuild, !b.IsCmdList && a.needBuild || b.NeedExport) |
 		bit(needCgoHdr, b.needCgoHdr(a)) |
 		bit(needVet, a.needVet) |
+		bit(needCovMetaFile, needCovMeta) |
 		bit(needCompiledGoFiles, b.NeedCompiledGoFiles)
 
 	if !p.BinaryOnly {
@@ -507,11 +512,11 @@
 		// different sections of the bootstrap script have to
 		// be merged, the banners give patch something
 		// to use to find its context.
-		b.Print("\n#\n# " + p.ImportPath + "\n#\n\n")
+		sh.Print("\n#\n# " + p.ImportPath + "\n#\n\n")
 	}
 
 	if cfg.BuildV {
-		b.Print(p.ImportPath + "\n")
+		sh.Print(p.ImportPath + "\n")
 	}
 
 	if p.Error != nil {
@@ -537,7 +542,7 @@
 		return err
 	}
 
-	if err := b.Mkdir(a.Objdir); err != nil {
+	if err := sh.Mkdir(a.Objdir); err != nil {
 		return err
 	}
 	objdir := a.Objdir
@@ -549,6 +554,15 @@
 		}
 	}
 
+	// Load cached coverage meta-data file fragment, but only if we're
+	// skipping the main build (cachedBuild==true).
+	if cachedBuild && need&needCovMetaFile != 0 {
+		bact := a.Actor.(*buildActor)
+		if err := b.loadCachedObjdirFile(a, cache.Default(), bact.covMetaFileName); err == nil {
+			need &^= needCovMetaFile
+		}
+	}
+
 	// Load cached vet config, but only if that's all we have left
 	// (need == needVet, not testing just the one bit).
 	// If we are going to do a full build anyway,
@@ -569,7 +583,7 @@
 	// make target directory
 	dir, _ := filepath.Split(a.Target)
 	if dir != "" {
-		if err := b.Mkdir(dir); err != nil {
+		if err := sh.Mkdir(dir); err != nil {
 			return err
 		}
 	}
@@ -582,7 +596,7 @@
 	var objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string
 
 	if p.UsesCgo() || p.UsesSwig() {
-		if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(p); err != nil {
+		if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a); err != nil {
 			return
 		}
 	}
@@ -607,7 +621,7 @@
 				from := mkAbs(p.Dir, fs[i])
 				opath, _ := fsys.OverlayPath(from)
 				dst := objdir + filepath.Base(fs[i])
-				if err := b.copyFile(dst, opath, 0666, false); err != nil {
+				if err := sh.CopyFile(dst, opath, 0666, false); err != nil {
 					return err
 				}
 				a.nonGoOverlay[from] = dst
@@ -615,21 +629,8 @@
 		}
 	}
 
-	// Run SWIG on each .swig and .swigcxx file.
-	// Each run will generate two files, a .go file and a .c or .cxx file.
-	// The .go file will use import "C" and is to be processed by cgo.
-	if p.UsesSwig() {
-		outGo, outC, outCXX, err := b.swig(a, p, objdir, pcCFLAGS)
-		if err != nil {
-			return err
-		}
-		cgofiles = append(cgofiles, outGo...)
-		cfiles = append(cfiles, outC...)
-		cxxfiles = append(cxxfiles, outCXX...)
-	}
-
 	// If we're doing coverage, preprocess the .go files and put them in the work directory
-	if p.Internal.CoverMode != "" {
+	if p.Internal.Cover.Mode != "" {
 		outfiles := []string{}
 		infiles := []string{}
 		for i, file := range str.StringList(gofiles, cgofiles) {
@@ -684,7 +685,7 @@
 				// users to break things.
 				sum := sha256.Sum256([]byte(a.Package.ImportPath))
 				coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
-				mode := a.Package.Internal.CoverMode
+				mode := a.Package.Internal.Cover.Mode
 				if mode == "" {
 					panic("covermode should be set at this point")
 				}
@@ -700,11 +701,30 @@
 				// the package with the compiler, so set covermode to
 				// the empty string so as to signal that we need to do
 				// that.
-				p.Internal.CoverMode = ""
+				p.Internal.Cover.Mode = ""
+			}
+			if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
+				b.cacheObjdirFile(a, cache.Default(), ba.covMetaFileName)
 			}
 		}
 	}
 
+	// Run SWIG on each .swig and .swigcxx file.
+	// Each run will generate two files, a .go file and a .c or .cxx file.
+	// The .go file will use import "C" and is to be processed by cgo.
+	// For -cover test or build runs, this needs to happen after the cover
+	// tool is run; we don't want to instrument swig-generated Go files,
+	// see issue #64661.
+	if p.UsesSwig() {
+		outGo, outC, outCXX, err := b.swig(a, objdir, pcCFLAGS)
+		if err != nil {
+			return err
+		}
+		cgofiles = append(cgofiles, outGo...)
+		cfiles = append(cfiles, outC...)
+		cxxfiles = append(cxxfiles, outCXX...)
+	}
+
 	// Run cgo.
 	if p.UsesCgo() || p.UsesSwig() {
 		// In a package using cgo, cgo compiles the C, C++ and assembly files with gcc.
@@ -843,7 +863,7 @@
 	if p.Internal.BuildInfo != nil && cfg.ModulesEnabled {
 		prog := modload.ModInfoProg(p.Internal.BuildInfo.String(), cfg.BuildToolchainName == "gccgo")
 		if len(prog) > 0 {
-			if err := b.writeFile(objdir+"_gomod_.go", prog); err != nil {
+			if err := sh.writeFile(objdir+"_gomod_.go", prog); err != nil {
 				return err
 			}
 			gofiles = append(gofiles, objdir+"_gomod_.go")
@@ -853,15 +873,7 @@
 	// Compile Go.
 	objpkg := objdir + "_pkg_.a"
 	ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), embedcfg, symabis, len(sfiles) > 0, gofiles)
-	if len(out) > 0 {
-		output := b.processOutput(out)
-		if err != nil {
-			return formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), output)
-		} else {
-			b.showOutput(a, p.Dir, p.Desc(), output)
-		}
-	}
-	if err != nil {
+	if err := sh.reportCmd("", "", out, err); err != nil {
 		return err
 	}
 	if ofile != objpkg {
@@ -879,17 +891,17 @@
 		switch {
 		case strings.HasSuffix(name, _goos_goarch):
 			targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext
-			if err := b.copyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+			if err := sh.CopyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
 				return err
 			}
 		case strings.HasSuffix(name, _goarch):
 			targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext
-			if err := b.copyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+			if err := sh.CopyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
 				return err
 			}
 		case strings.HasSuffix(name, _goos):
 			targ := file[:len(name)-len(_goos)] + "_GOOS." + ext
-			if err := b.copyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+			if err := sh.CopyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
 				return err
 			}
 		}
@@ -985,8 +997,11 @@
 		}
 	}
 	if msg != nil {
-		return formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), b.processOutput(msg.Bytes()))
-
+		// We pass a non-nil error to reportCmd to trigger the failure reporting
+		// path, but the content of the error doesn't matter because msg is
+		// non-empty.
+		err := errors.New("invalid directive")
+		return b.Shell(a).reportCmd("", "", msg.Bytes(), err)
 	}
 	return nil
 }
@@ -1014,7 +1029,7 @@
 	if err != nil {
 		return err
 	}
-	return b.copyFile(a.Objdir+name, cached, 0666, true)
+	return b.Shell(a).CopyFile(a.Objdir+name, cached, 0666, true)
 }
 
 func (b *Builder) cacheCgoHdr(a *Action) {
@@ -1219,6 +1234,8 @@
 		return fmt.Errorf("vet config not found")
 	}
 
+	sh := b.Shell(a)
+
 	vcfg.VetxOnly = a.VetxOnly
 	vcfg.VetxOutput = a.Objdir + "vet.out"
 	vcfg.PackageVetx = make(map[string]string)
@@ -1250,7 +1267,7 @@
 		// that vet doesn't like in low-level packages
 		// like runtime, sync, and reflect.
 		// Note that $GOROOT/src/buildall.bash
-		// does the same for the misc-compile trybots
+		// does the same
 		// and should be updated if these flags are
 		// changed here.
 		vetFlags = []string{"-unsafeptr=false"}
@@ -1260,7 +1277,7 @@
 		// like hard-coded forced returns or panics that make
 		// code unreachable. It's unreasonable to insist on files
 		// not having any unreachable code during "go test".
-		// (buildall.bash still runs with -unreachable enabled
+		// (buildall.bash still has -unreachable enabled
 		// for the overall whole-tree scan.)
 		if cfg.CmdName == "test" {
 			vetFlags = append(vetFlags, "-unreachable=false")
@@ -1296,7 +1313,7 @@
 		return fmt.Errorf("internal error marshaling vet config: %v", err)
 	}
 	js = append(js, '\n')
-	if err := b.writeFile(a.Objdir+"vet.cfg", js); err != nil {
+	if err := sh.writeFile(a.Objdir+"vet.cfg", js); err != nil {
 		return err
 	}
 
@@ -1311,7 +1328,7 @@
 	if tool == "" {
 		tool = base.Tool("vet")
 	}
-	runErr := b.run(a, p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg")
+	runErr := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg")
 
 	// If vet wrote export data, save it for input to future vets.
 	if f, err := os.Open(vcfg.VetxOutput); err == nil {
@@ -1418,7 +1435,8 @@
 	}
 	defer b.flushOutput(a)
 
-	if err := b.Mkdir(a.Objdir); err != nil {
+	sh := b.Shell(a)
+	if err := sh.Mkdir(a.Objdir); err != nil {
 		return err
 	}
 
@@ -1434,7 +1452,7 @@
 	// make target directory
 	dir, _ := filepath.Split(a.Target)
 	if dir != "" {
-		if err := b.Mkdir(dir); err != nil {
+		if err := sh.Mkdir(dir); err != nil {
 			return err
 		}
 	}
@@ -1485,7 +1503,7 @@
 		info = a.Package.Internal.BuildInfo.String()
 	}
 	fmt.Fprintf(&icfg, "modinfo %q\n", modload.ModInfoData(info))
-	return b.writeFile(file, icfg.Bytes())
+	return b.Shell(a).writeFile(file, icfg.Bytes())
 }
 
 // PkgconfigCmd returns a pkg-config binary name
@@ -1601,8 +1619,10 @@
 	return flags, nil
 }
 
-// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
-func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) {
+// Calls pkg-config if needed and returns the cflags/ldflags needed to build a's package.
+func (b *Builder) getPkgConfigFlags(a *Action) (cflags, ldflags []string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
 	if pcargs := p.CgoPkgConfig; len(pcargs) > 0 {
 		// pkg-config permits arguments to appear anywhere in
 		// the command line. Move them all to the front, before --.
@@ -1623,10 +1643,10 @@
 			}
 		}
 		var out []byte
-		out, err = b.runOut(nil, p.Dir, nil, b.PkgconfigCmd(), "--cflags", pcflags, "--", pkgs)
+		out, err = sh.runOut(p.Dir, nil, b.PkgconfigCmd(), "--cflags", pcflags, "--", pkgs)
 		if err != nil {
-			err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, b.PkgconfigCmd()+" --cflags "+strings.Join(pcflags, " ")+" -- "+strings.Join(pkgs, " "), string(out)+err.Error())
-			return nil, nil, err
+			desc := b.PkgconfigCmd() + " --cflags " + strings.Join(pcflags, " ") + " -- " + strings.Join(pkgs, " ")
+			return nil, nil, sh.reportCmd(desc, "", out, err)
 		}
 		if len(out) > 0 {
 			cflags, err = splitPkgConfigOutput(bytes.TrimSpace(out))
@@ -1637,10 +1657,10 @@
 				return nil, nil, err
 			}
 		}
-		out, err = b.runOut(nil, p.Dir, nil, b.PkgconfigCmd(), "--libs", pcflags, "--", pkgs)
+		out, err = sh.runOut(p.Dir, nil, b.PkgconfigCmd(), "--libs", pcflags, "--", pkgs)
 		if err != nil {
-			err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, b.PkgconfigCmd()+" --libs "+strings.Join(pcflags, " ")+" -- "+strings.Join(pkgs, " "), string(out)+err.Error())
-			return nil, nil, err
+			desc := b.PkgconfigCmd() + " --libs " + strings.Join(pcflags, " ") + " -- " + strings.Join(pkgs, " ")
+			return nil, nil, sh.reportCmd(desc, "", out, err)
 		}
 		if len(out) > 0 {
 			// We need to handle path with spaces so that C:/Program\ Files can pass
@@ -1663,19 +1683,14 @@
 		return err
 	}
 
-	// TODO: BuildN
+	sh := b.Shell(a)
 	a1 := a.Deps[0]
-	if err := b.Mkdir(filepath.Dir(a.Target)); err != nil {
-		return err
+	if !cfg.BuildN {
+		if err := sh.Mkdir(filepath.Dir(a.Target)); err != nil {
+			return err
+		}
 	}
-	err := os.WriteFile(a.Target, []byte(filepath.Base(a1.Target)+"\n"), 0666)
-	if err != nil {
-		return err
-	}
-	if cfg.BuildX {
-		b.Showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.Target), a.Target)
-	}
-	return nil
+	return sh.writeFile(a.Target, []byte(filepath.Base(a1.Target)+"\n"))
 }
 
 func (b *Builder) linkSharedActionID(a *Action) cache.ActionID {
@@ -1720,7 +1735,7 @@
 		return err
 	}
 
-	if err := b.Mkdir(a.Objdir); err != nil {
+	if err := b.Shell(a).Mkdir(a.Objdir); err != nil {
 		return err
 	}
 
@@ -1749,6 +1764,7 @@
 			err = fmt.Errorf("go %s%s%s: %v", cfg.CmdName, sep, path, err)
 		}
 	}()
+	sh := b.Shell(a)
 
 	a1 := a.Deps[0]
 	a.buildID = a1.buildID
@@ -1786,7 +1802,7 @@
 		// to date).
 		if !a.buggyInstall && !b.IsCmdList {
 			if cfg.BuildN {
-				b.Showcmd("", "touch %s", a.Target)
+				sh.ShowCmd("", "touch %s", a.Target)
 			} else if err := AllowInstall(a); err == nil {
 				now := time.Now()
 				os.Chtimes(a.Target, now, now)
@@ -1805,7 +1821,7 @@
 		return err
 	}
 
-	if err := b.Mkdir(a.Objdir); err != nil {
+	if err := sh.Mkdir(a.Objdir); err != nil {
 		return err
 	}
 
@@ -1821,7 +1837,7 @@
 	// make target directory
 	dir, _ := filepath.Split(a.Target)
 	if dir != "" {
-		if err := b.Mkdir(dir); err != nil {
+		if err := sh.Mkdir(dir); err != nil {
 			return err
 		}
 	}
@@ -1830,7 +1846,7 @@
 		defer b.cleanup(a1)
 	}
 
-	return b.moveOrCopyFile(a.Target, a1.built, perm, false)
+	return sh.moveOrCopyFile(a.Target, a1.built, perm, false)
 }
 
 // AllowInstall returns a non-nil error if this invocation of the go command is
@@ -1846,150 +1862,14 @@
 // this keeps the intermediate objects from hitting the disk.
 func (b *Builder) cleanup(a *Action) {
 	if !cfg.BuildWork {
-		if cfg.BuildX {
-			// Don't say we are removing the directory if
-			// we never created it.
-			if _, err := os.Stat(a.Objdir); err == nil || cfg.BuildN {
-				b.Showcmd("", "rm -r %s", a.Objdir)
-			}
-		}
-		os.RemoveAll(a.Objdir)
+		b.Shell(a).RemoveAll(a.Objdir)
 	}
 }
 
-// moveOrCopyFile is like 'mv src dst' or 'cp src dst'.
-func (b *Builder) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) error {
-	if cfg.BuildN {
-		b.Showcmd("", "mv %s %s", src, dst)
-		return nil
-	}
-
-	// If we can update the mode and rename to the dst, do it.
-	// Otherwise fall back to standard copy.
-
-	// If the source is in the build cache, we need to copy it.
-	if strings.HasPrefix(src, cache.DefaultDir()) {
-		return b.copyFile(dst, src, perm, force)
-	}
-
-	// On Windows, always copy the file, so that we respect the NTFS
-	// permissions of the parent folder. https://golang.org/issue/22343.
-	// What matters here is not cfg.Goos (the system we are building
-	// for) but runtime.GOOS (the system we are building on).
-	if runtime.GOOS == "windows" {
-		return b.copyFile(dst, src, perm, force)
-	}
-
-	// If the destination directory has the group sticky bit set,
-	// we have to copy the file to retain the correct permissions.
-	// https://golang.org/issue/18878
-	if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
-		if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
-			return b.copyFile(dst, src, perm, force)
-		}
-	}
-
-	// The perm argument is meant to be adjusted according to umask,
-	// but we don't know what the umask is.
-	// Create a dummy file to find out.
-	// This avoids build tags and works even on systems like Plan 9
-	// where the file mask computation incorporates other information.
-	mode := perm
-	f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
-	if err == nil {
-		fi, err := f.Stat()
-		if err == nil {
-			mode = fi.Mode() & 0777
-		}
-		name := f.Name()
-		f.Close()
-		os.Remove(name)
-	}
-
-	if err := os.Chmod(src, mode); err == nil {
-		if err := os.Rename(src, dst); err == nil {
-			if cfg.BuildX {
-				b.Showcmd("", "mv %s %s", src, dst)
-			}
-			return nil
-		}
-	}
-
-	return b.copyFile(dst, src, perm, force)
-}
-
-// copyFile is like 'cp src dst'.
-func (b *Builder) copyFile(dst, src string, perm fs.FileMode, force bool) error {
-	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd("", "cp %s %s", src, dst)
-		if cfg.BuildN {
-			return nil
-		}
-	}
-
-	sf, err := os.Open(src)
-	if err != nil {
-		return err
-	}
-	defer sf.Close()
-
-	// Be careful about removing/overwriting dst.
-	// Do not remove/overwrite if dst exists and is a directory
-	// or a non-empty non-object file.
-	if fi, err := os.Stat(dst); err == nil {
-		if fi.IsDir() {
-			return fmt.Errorf("build output %q already exists and is a directory", dst)
-		}
-		if !force && fi.Mode().IsRegular() && fi.Size() != 0 && !isObject(dst) {
-			return fmt.Errorf("build output %q already exists and is not an object file", dst)
-		}
-	}
-
-	// On Windows, remove lingering ~ file from last attempt.
-	if runtime.GOOS == "windows" {
-		if _, err := os.Stat(dst + "~"); err == nil {
-			os.Remove(dst + "~")
-		}
-	}
-
-	mayberemovefile(dst)
-	df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
-	if err != nil && runtime.GOOS == "windows" {
-		// Windows does not allow deletion of a binary file
-		// while it is executing. Try to move it out of the way.
-		// If the move fails, which is likely, we'll try again the
-		// next time we do an install of this binary.
-		if err := os.Rename(dst, dst+"~"); err == nil {
-			os.Remove(dst + "~")
-		}
-		df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
-	}
-	if err != nil {
-		return fmt.Errorf("copying %s: %w", src, err) // err should already refer to dst
-	}
-
-	_, err = io.Copy(df, sf)
-	df.Close()
-	if err != nil {
-		mayberemovefile(dst)
-		return fmt.Errorf("copying %s to %s: %v", src, dst, err)
-	}
-	return nil
-}
-
-// writeFile writes the text to file.
-func (b *Builder) writeFile(file string, text []byte) error {
-	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd("", "cat >%s << 'EOF' # internal\n%sEOF", file, text)
-	}
-	if cfg.BuildN {
-		return nil
-	}
-	return os.WriteFile(file, text, 0666)
-}
-
 // Install the cgo export header file, if there is one.
 func (b *Builder) installHeader(ctx context.Context, a *Action) error {
+	sh := b.Shell(a)
+
 	src := a.Objdir + "_cgo_install.h"
 	if _, err := os.Stat(src); os.IsNotExist(err) {
 		// If the file does not exist, there are no exported
@@ -1998,7 +1878,7 @@
 		// at the right times (not missing rebuilds), here we should
 		// probably delete the installed header, if any.
 		if cfg.BuildX {
-			b.Showcmd("", "# %s not created", src)
+			sh.ShowCmd("", "# %s not created", src)
 		}
 		return nil
 	}
@@ -2009,22 +1889,22 @@
 
 	dir, _ := filepath.Split(a.Target)
 	if dir != "" {
-		if err := b.Mkdir(dir); err != nil {
+		if err := sh.Mkdir(dir); err != nil {
 			return err
 		}
 	}
 
-	return b.moveOrCopyFile(a.Target, src, 0666, true)
+	return sh.moveOrCopyFile(a.Target, src, 0666, true)
 }
 
 // cover runs, in effect,
 //
 //	go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
 func (b *Builder) cover(a *Action, dst, src string, varName string) error {
-	return b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil,
+	return b.Shell(a).run(a.Objdir, "", nil,
 		cfg.BuildToolexec,
 		base.Tool("cover"),
-		"-mode", a.Package.Internal.CoverMode,
+		"-mode", a.Package.Internal.Cover.Mode,
 		"-var", varName,
 		"-o", dst,
 		src)
@@ -2054,7 +1934,7 @@
 		"-outfilelist", covoutputs,
 	}
 	args = append(args, infiles...)
-	if err := b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil,
+	if err := b.Shell(a).run(a.Objdir, "", nil,
 		cfg.BuildToolexec, args); err != nil {
 		return nil, err
 	}
@@ -2062,9 +1942,10 @@
 }
 
 func (b *Builder) writeCoverPkgInputs(a *Action, pconfigfile string, covoutputsfile string, outfiles []string) error {
+	sh := b.Shell(a)
 	p := a.Package
-	p.Internal.CoverageCfg = a.Objdir + "coveragecfg"
-	pcfg := coverage.CoverPkgConfig{
+	p.Internal.Cover.Cfg = a.Objdir + "coveragecfg"
+	pcfg := covcmd.CoverPkgConfig{
 		PkgPath: p.ImportPath,
 		PkgName: p.Name,
 		// Note: coverage granularity is currently hard-wired to
@@ -2072,9 +1953,12 @@
 		// test -cover" to select it. This may change in the future
 		// depending on user demand.
 		Granularity: "perblock",
-		OutConfig:   p.Internal.CoverageCfg,
+		OutConfig:   p.Internal.Cover.Cfg,
 		Local:       p.Internal.Local,
 	}
+	if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
+		pcfg.EmitMetaFile = a.Objdir + ba.covMetaFileName
+	}
 	if a.Package.Module != nil {
 		pcfg.ModulePath = a.Package.Module.Path
 	}
@@ -2082,14 +1966,15 @@
 	if err != nil {
 		return err
 	}
-	if err := b.writeFile(pconfigfile, data); err != nil {
+	data = append(data, '\n')
+	if err := sh.writeFile(pconfigfile, data); err != nil {
 		return err
 	}
 	var sb strings.Builder
 	for i := range outfiles {
 		fmt.Fprintf(&sb, "%s\n", outfiles[i])
 	}
-	return b.writeFile(covoutputsfile, []byte(sb.String()))
+	return sh.writeFile(covoutputsfile, []byte(sb.String()))
 }
 
 var objectMagic = [][]byte{
@@ -2126,321 +2011,6 @@
 	return false
 }
 
-// mayberemovefile removes a file only if it is a regular file
-// When running as a user with sufficient privileges, we may delete
-// even device files, for example, which is not intended.
-func mayberemovefile(s string) {
-	if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() {
-		return
-	}
-	os.Remove(s)
-}
-
-// fmtcmd formats a command in the manner of fmt.Sprintf but also:
-//
-//	If dir is non-empty and the script is not in dir right now,
-//	fmtcmd inserts "cd dir\n" before the command.
-//
-//	fmtcmd replaces the value of b.WorkDir with $WORK.
-//	fmtcmd replaces the value of goroot with $GOROOT.
-//	fmtcmd replaces the value of b.gobin with $GOBIN.
-//
-//	fmtcmd replaces the name of the current directory with dot (.)
-//	but only when it is at the beginning of a space-separated token.
-func (b *Builder) fmtcmd(dir string, format string, args ...any) string {
-	cmd := fmt.Sprintf(format, args...)
-	if dir != "" && dir != "/" {
-		dot := " ."
-		if dir[len(dir)-1] == filepath.Separator {
-			dot += string(filepath.Separator)
-		}
-		cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:]
-		if b.scriptDir != dir {
-			b.scriptDir = dir
-			cmd = "cd " + dir + "\n" + cmd
-		}
-	}
-	if b.WorkDir != "" && !strings.HasPrefix(cmd, "cat ") {
-		cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK")
-		escaped := strconv.Quote(b.WorkDir)
-		escaped = escaped[1 : len(escaped)-1] // strip quote characters
-		if escaped != b.WorkDir {
-			cmd = strings.ReplaceAll(cmd, escaped, "$WORK")
-		}
-	}
-	return cmd
-}
-
-// Showcmd prints the given command to standard output
-// for the implementation of -n or -x.
-func (b *Builder) Showcmd(dir string, format string, args ...any) {
-	b.output.Lock()
-	defer b.output.Unlock()
-	b.Print(b.fmtcmd(dir, format, args...) + "\n")
-}
-
-// showOutput prints "# desc" followed by the given output.
-// The output is expected to contain references to 'dir', usually
-// the source directory for the package that has failed to build.
-// showOutput rewrites mentions of dir with a relative path to dir
-// when the relative path is shorter. This is usually more pleasant.
-// For example, if fmt doesn't compile and we are in src/html,
-// the output is
-//
-//	$ go build
-//	# fmt
-//	../fmt/print.go:1090: undefined: asdf
-//	$
-//
-// instead of
-//
-//	$ go build
-//	# fmt
-//	/usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
-//	$
-//
-// showOutput also replaces references to the work directory with $WORK.
-//
-// If a is not nil and a.output is not nil, showOutput appends to that slice instead of
-// printing to b.Print.
-func (b *Builder) showOutput(a *Action, dir, desc, out string) {
-	importPath := ""
-	if a != nil && a.Package != nil {
-		importPath = a.Package.ImportPath
-	}
-	psErr := formatOutput(b.WorkDir, dir, importPath, desc, out)
-	if a != nil && a.output != nil {
-		a.output = append(a.output, psErr.prefix...)
-		a.output = append(a.output, psErr.suffix...)
-		return
-	}
-
-	b.output.Lock()
-	defer b.output.Unlock()
-	b.Print(psErr.prefix, psErr.suffix)
-}
-
-// A prefixSuffixError is an error formatted by formatOutput.
-type prefixSuffixError struct {
-	importPath     string
-	prefix, suffix string
-}
-
-func (e *prefixSuffixError) Error() string {
-	if e.importPath != "" && !strings.HasPrefix(strings.TrimPrefix(e.prefix, "# "), e.importPath) {
-		return fmt.Sprintf("go build %s:\n%s%s", e.importPath, e.prefix, e.suffix)
-	}
-	return e.prefix + e.suffix
-}
-
-func (e *prefixSuffixError) ImportPath() string {
-	return e.importPath
-}
-
-// formatOutput prints "# desc" followed by the given output.
-// The output is expected to contain references to 'dir', usually
-// the source directory for the package that has failed to build.
-// formatOutput rewrites mentions of dir with a relative path to dir
-// when the relative path is shorter. This is usually more pleasant.
-// For example, if fmt doesn't compile and we are in src/html,
-// the output is
-//
-//	$ go build
-//	# fmt
-//	../fmt/print.go:1090: undefined: asdf
-//	$
-//
-// instead of
-//
-//	$ go build
-//	# fmt
-//	/usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
-//	$
-//
-// formatOutput also replaces references to the work directory with $WORK.
-// formatOutput returns the output in a prefix with the description and a
-// suffix with the actual output.
-func formatOutput(workDir, dir, importPath, desc, out string) *prefixSuffixError {
-	prefix := "# " + desc
-	suffix := "\n" + out
-
-	suffix = strings.ReplaceAll(suffix, " "+workDir, " $WORK")
-
-	for {
-		// Note that dir starts out long, something like
-		// /foo/bar/baz/root/a
-		// The target string to be reduced is something like
-		// (blah-blah-blah) /foo/bar/baz/root/sibling/whatever.go:blah:blah
-		// /foo/bar/baz/root/a doesn't match /foo/bar/baz/root/sibling, but the prefix
-		// /foo/bar/baz/root does.  And there may be other niblings sharing shorter
-		// prefixes, the only way to find them is to look.
-		// This doesn't always produce a relative path --
-		// /foo is shorter than ../../.., for example.
-		//
-		if reldir := base.ShortPath(dir); reldir != dir {
-			suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir)
-			suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir)
-			suffix = strings.ReplaceAll(suffix, "\n\t"+dir, "\n\t"+reldir)
-			if filepath.Separator == '\\' {
-				// Don't know why, sometimes this comes out with slashes, not backslashes.
-				wdir := strings.ReplaceAll(dir, "\\", "/")
-				suffix = strings.ReplaceAll(suffix, " "+wdir, " "+reldir)
-				suffix = strings.ReplaceAll(suffix, "\n"+wdir, "\n"+reldir)
-				suffix = strings.ReplaceAll(suffix, "\n\t"+wdir, "\n\t"+reldir)
-			}
-		}
-		dirP := filepath.Dir(dir)
-		if dir == dirP {
-			break
-		}
-		dir = dirP
-	}
-
-	return &prefixSuffixError{importPath: importPath, prefix: prefix, suffix: suffix}
-}
-
-var cgoLine = lazyregexp.New(`\[[^\[\]]+\.(cgo1|cover)\.go:[0-9]+(:[0-9]+)?\]`)
-var cgoTypeSigRe = lazyregexp.New(`\b_C2?(type|func|var|macro)_\B`)
-
-// run runs the command given by cmdline in the directory dir.
-// If the command fails, run prints information about the failure
-// and returns a non-nil error.
-func (b *Builder) run(a *Action, dir string, desc string, env []string, cmdargs ...any) error {
-	out, err := b.runOut(a, dir, env, cmdargs...)
-	if len(out) > 0 {
-		if desc == "" {
-			desc = b.fmtcmd(dir, "%s", strings.Join(str.StringList(cmdargs...), " "))
-		}
-		if err != nil {
-			err = formatOutput(b.WorkDir, dir, a.Package.ImportPath, desc, b.processOutput(out))
-		} else {
-			b.showOutput(a, dir, desc, b.processOutput(out))
-		}
-	}
-	return err
-}
-
-// processOutput prepares the output of runOut to be output to the console.
-func (b *Builder) processOutput(out []byte) string {
-	if out[len(out)-1] != '\n' {
-		out = append(out, '\n')
-	}
-	messages := string(out)
-	// Fix up output referring to cgo-generated code to be more readable.
-	// Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19.
-	// Replace *[100]_Ctype_foo with *[100]C.foo.
-	// If we're using -x, assume we're debugging and want the full dump, so disable the rewrite.
-	if !cfg.BuildX && cgoLine.MatchString(messages) {
-		messages = cgoLine.ReplaceAllString(messages, "")
-		messages = cgoTypeSigRe.ReplaceAllString(messages, "C.")
-	}
-	return messages
-}
-
-// runOut runs the command given by cmdline in the directory dir.
-// It returns the command output and any errors that occurred.
-// It accumulates execution time in a.
-func (b *Builder) runOut(a *Action, dir string, env []string, cmdargs ...any) ([]byte, error) {
-	cmdline := str.StringList(cmdargs...)
-
-	for _, arg := range cmdline {
-		// GNU binutils commands, including gcc and gccgo, interpret an argument
-		// @foo anywhere in the command line (even following --) as meaning
-		// "read and insert arguments from the file named foo."
-		// Don't say anything that might be misinterpreted that way.
-		if strings.HasPrefix(arg, "@") {
-			return nil, fmt.Errorf("invalid command-line argument %s in command: %s", arg, joinUnambiguously(cmdline))
-		}
-	}
-
-	if cfg.BuildN || cfg.BuildX {
-		var envcmdline string
-		for _, e := range env {
-			if j := strings.IndexByte(e, '='); j != -1 {
-				if strings.ContainsRune(e[j+1:], '\'') {
-					envcmdline += fmt.Sprintf("%s=%q", e[:j], e[j+1:])
-				} else {
-					envcmdline += fmt.Sprintf("%s='%s'", e[:j], e[j+1:])
-				}
-				envcmdline += " "
-			}
-		}
-		envcmdline += joinUnambiguously(cmdline)
-		b.Showcmd(dir, "%s", envcmdline)
-		if cfg.BuildN {
-			return nil, nil
-		}
-	}
-
-	var buf bytes.Buffer
-	cmd := exec.Command(cmdline[0], cmdline[1:]...)
-	if cmd.Path != "" {
-		cmd.Args[0] = cmd.Path
-	}
-	cmd.Stdout = &buf
-	cmd.Stderr = &buf
-	cleanup := passLongArgsInResponseFiles(cmd)
-	defer cleanup()
-	if dir != "." {
-		cmd.Dir = dir
-	}
-	cmd.Env = cmd.Environ() // Pre-allocate with correct PWD.
-
-	// Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools.
-	// It doesn't really matter if -toolexec isn't being used.
-	// Note that a.Package.Desc is not really an import path,
-	// but this is consistent with 'go list -f {{.ImportPath}}'.
-	// Plus, it is useful to uniquely identify packages in 'go list -json'.
-	if a != nil && a.Package != nil {
-		cmd.Env = append(cmd.Env, "TOOLEXEC_IMPORTPATH="+a.Package.Desc())
-	}
-
-	cmd.Env = append(cmd.Env, env...)
-	start := time.Now()
-	err := cmd.Run()
-	if a != nil && a.json != nil {
-		aj := a.json
-		aj.Cmd = append(aj.Cmd, joinUnambiguously(cmdline))
-		aj.CmdReal += time.Since(start)
-		if ps := cmd.ProcessState; ps != nil {
-			aj.CmdUser += ps.UserTime()
-			aj.CmdSys += ps.SystemTime()
-		}
-	}
-
-	// err can be something like 'exit status 1'.
-	// Add information about what program was running.
-	// Note that if buf.Bytes() is non-empty, the caller usually
-	// shows buf.Bytes() and does not print err at all, so the
-	// prefix here does not make most output any more verbose.
-	if err != nil {
-		err = errors.New(cmdline[0] + ": " + err.Error())
-	}
-	return buf.Bytes(), err
-}
-
-// joinUnambiguously prints the slice, quoting where necessary to make the
-// output unambiguous.
-// TODO: See issue 5279. The printing of commands needs a complete redo.
-func joinUnambiguously(a []string) string {
-	var buf strings.Builder
-	for i, s := range a {
-		if i > 0 {
-			buf.WriteByte(' ')
-		}
-		q := strconv.Quote(s)
-		// A gccgo command line can contain -( and -).
-		// Make sure we quote them since they are special to the shell.
-		// The trimpath argument can also contain > (part of =>) and ;. Quote those too.
-		if s == "" || strings.ContainsAny(s, " ()>;") || len(q) > len(s)+2 {
-			buf.WriteString(q)
-		} else {
-			buf.WriteString(s)
-		}
-	}
-	return buf.String()
-}
-
 // cCompilerEnv returns environment variables to set when running the
 // C compiler. This is needed to disable escape codes in clang error
 // messages that confuse tools like cgo.
@@ -2448,51 +2018,6 @@
 	return []string{"TERM=dumb"}
 }
 
-// Mkdir makes the named directory.
-func (b *Builder) Mkdir(dir string) error {
-	// Make Mkdir(a.Objdir) a no-op instead of an error when a.Objdir == "".
-	if dir == "" {
-		return nil
-	}
-
-	b.exec.Lock()
-	defer b.exec.Unlock()
-	// We can be a little aggressive about being
-	// sure directories exist. Skip repeated calls.
-	if b.mkdirCache[dir] {
-		return nil
-	}
-	b.mkdirCache[dir] = true
-
-	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd("", "mkdir -p %s", dir)
-		if cfg.BuildN {
-			return nil
-		}
-	}
-
-	if err := os.MkdirAll(dir, 0777); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Symlink creates a symlink newname -> oldname.
-func (b *Builder) Symlink(oldname, newname string) error {
-	// It's not an error to try to recreate an existing symlink.
-	if link, err := os.Readlink(newname); err == nil && link == oldname {
-		return nil
-	}
-
-	if cfg.BuildN || cfg.BuildX {
-		b.Showcmd("", "ln -s %s %s", oldname, newname)
-		if cfg.BuildN {
-			return nil
-		}
-	}
-	return os.Symlink(oldname, newname)
-}
-
 // mkAbs returns an absolute path corresponding to
 // evaluating f in the directory dir.
 // We always pass absolute paths of source files so that
@@ -2527,9 +2052,9 @@
 	// typically it is run in the object directory.
 	pack(b *Builder, a *Action, afile string, ofiles []string) error
 	// ld runs the linker to create an executable starting at mainpkg.
-	ld(b *Builder, root *Action, out, importcfg, mainpkg string) error
+	ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error
 	// ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions
-	ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error
+	ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error
 
 	compiler() string
 	linker() string
@@ -2568,11 +2093,11 @@
 	return noCompiler()
 }
 
-func (noToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error {
+func (noToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error {
 	return noCompiler()
 }
 
-func (noToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error {
+func (noToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error {
 	return noCompiler()
 }
 
@@ -2581,22 +2106,27 @@
 }
 
 // gcc runs the gcc C compiler to create an object from a single C file.
-func (b *Builder) gcc(a *Action, p *load.Package, workdir, out string, flags []string, cfile string) error {
-	return b.ccompile(a, p, out, flags, cfile, b.GccCmd(p.Dir, workdir))
+func (b *Builder) gcc(a *Action, workdir, out string, flags []string, cfile string) error {
+	p := a.Package
+	return b.ccompile(a, out, flags, cfile, b.GccCmd(p.Dir, workdir))
 }
 
 // gxx runs the g++ C++ compiler to create an object from a single C++ file.
-func (b *Builder) gxx(a *Action, p *load.Package, workdir, out string, flags []string, cxxfile string) error {
-	return b.ccompile(a, p, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir))
+func (b *Builder) gxx(a *Action, workdir, out string, flags []string, cxxfile string) error {
+	p := a.Package
+	return b.ccompile(a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir))
 }
 
 // gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file.
-func (b *Builder) gfortran(a *Action, p *load.Package, workdir, out string, flags []string, ffile string) error {
-	return b.ccompile(a, p, out, flags, ffile, b.gfortranCmd(p.Dir, workdir))
+func (b *Builder) gfortran(a *Action, workdir, out string, flags []string, ffile string) error {
+	p := a.Package
+	return b.ccompile(a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir))
 }
 
 // ccompile runs the given C or C++ compiler and creates an object from a single source file.
-func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []string, file string, compiler []string) error {
+func (b *Builder) ccompile(a *Action, outfile string, flags []string, file string, compiler []string) error {
+	p := a.Package
+	sh := b.Shell(a)
 	file = mkAbs(p.Dir, file)
 	outfile = mkAbs(p.Dir, outfile)
 
@@ -2661,40 +2191,41 @@
 	if p, ok := a.nonGoOverlay[overlayPath]; ok {
 		overlayPath = p
 	}
-	output, err := b.runOut(a, filepath.Dir(overlayPath), b.cCompilerEnv(), compiler, flags, "-o", outfile, "-c", filepath.Base(overlayPath))
-	if len(output) > 0 {
-		// On FreeBSD 11, when we pass -g to clang 3.8 it
-		// invokes its internal assembler with -dwarf-version=2.
-		// When it sees .section .note.GNU-stack, it warns
-		// "DWARF2 only supports one section per compilation unit".
-		// This warning makes no sense, since the section is empty,
-		// but it confuses people.
-		// We work around the problem by detecting the warning
-		// and dropping -g and trying again.
-		if bytes.Contains(output, []byte("DWARF2 only supports one section per compilation unit")) {
-			newFlags := make([]string, 0, len(flags))
-			for _, f := range flags {
-				if !strings.HasPrefix(f, "-g") {
-					newFlags = append(newFlags, f)
-				}
-			}
-			if len(newFlags) < len(flags) {
-				return b.ccompile(a, p, outfile, newFlags, file, compiler)
+	output, err := sh.runOut(filepath.Dir(overlayPath), b.cCompilerEnv(), compiler, flags, "-o", outfile, "-c", filepath.Base(overlayPath))
+
+	// On FreeBSD 11, when we pass -g to clang 3.8 it
+	// invokes its internal assembler with -dwarf-version=2.
+	// When it sees .section .note.GNU-stack, it warns
+	// "DWARF2 only supports one section per compilation unit".
+	// This warning makes no sense, since the section is empty,
+	// but it confuses people.
+	// We work around the problem by detecting the warning
+	// and dropping -g and trying again.
+	if bytes.Contains(output, []byte("DWARF2 only supports one section per compilation unit")) {
+		newFlags := make([]string, 0, len(flags))
+		for _, f := range flags {
+			if !strings.HasPrefix(f, "-g") {
+				newFlags = append(newFlags, f)
 			}
 		}
-
-		if err != nil || os.Getenv("GO_BUILDER_NAME") != "" {
-			err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), b.processOutput(output))
-		} else {
-			b.showOutput(a, p.Dir, p.Desc(), b.processOutput(output))
+		if len(newFlags) < len(flags) {
+			return b.ccompile(a, outfile, newFlags, file, compiler)
 		}
 	}
-	return err
+
+	if len(output) > 0 && err == nil && os.Getenv("GO_BUILDER_NAME") != "" {
+		output = append(output, "C compiler warning promoted to error on Go builders\n"...)
+		err = errors.New("warning promoted to error")
+	}
+
+	return sh.reportCmd("", "", output, err)
 }
 
 // gccld runs the gcc linker to create an executable from a set of object files.
 // Any error output is only displayed for BuildN or BuildX.
-func (b *Builder) gccld(a *Action, p *load.Package, objdir, outfile string, flags []string, objs []string) error {
+func (b *Builder) gccld(a *Action, objdir, outfile string, flags []string, objs []string) error {
+	p := a.Package
+	sh := b.Shell(a)
 	var cmd []string
 	if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 {
 		cmd = b.GxxCmd(p.Dir, objdir)
@@ -2703,8 +2234,7 @@
 	}
 
 	cmdargs := []any{cmd, "-o", outfile, objs, flags}
-	dir := p.Dir
-	out, err := b.runOut(a, base.Cwd(), b.cCompilerEnv(), cmdargs...)
+	out, err := sh.runOut(base.Cwd(), b.cCompilerEnv(), cmdargs...)
 
 	if len(out) > 0 {
 		// Filter out useless linker warnings caused by bugs outside Go.
@@ -2739,9 +2269,11 @@
 			save = append(save, line)
 		}
 		out = bytes.Join(save, nil)
-		if len(out) > 0 && (cfg.BuildN || cfg.BuildX) {
-			b.showOutput(nil, dir, p.ImportPath, b.processOutput(out))
-		}
+	}
+	// Note that failure is an expected outcome here, so we report output only
+	// in debug mode and don't report the error.
+	if cfg.BuildN || cfg.BuildX {
+		sh.reportCmd("", "", out, nil)
 	}
 	return err
 }
@@ -2869,6 +2401,11 @@
 
 // gccSupportsFlag checks to see if the compiler supports a flag.
 func (b *Builder) gccSupportsFlag(compiler []string, flag string) bool {
+	// We use the background shell for operations here because, while this is
+	// triggered by some Action, it's not really about that Action, and often we
+	// just get the results from the global cache.
+	sh := b.BackgroundShell()
+
 	key := [2]string{compiler[0], flag}
 
 	// We used to write an empty C file, but that gets complicated with go
@@ -2917,7 +2454,7 @@
 	cmdArgs = append(cmdArgs, "-x", "c", "-", "-o", tmp)
 
 	if cfg.BuildN {
-		b.Showcmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
+		sh.ShowCmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
 		return false
 	}
 
@@ -2945,7 +2482,7 @@
 	}
 
 	if cfg.BuildX {
-		b.Showcmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
+		sh.ShowCmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
 	}
 	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
 	cmd.Dir = b.WorkDir
@@ -2987,8 +2524,13 @@
 // Other parts of cmd/go can use the id as a hash
 // of the installed compiler version.
 func (b *Builder) gccCompilerID(compiler string) (id cache.ActionID, ok bool) {
+	// We use the background shell for operations here because, while this is
+	// triggered by some Action, it's not really about that Action, and often we
+	// just get the results from the global cache.
+	sh := b.BackgroundShell()
+
 	if cfg.BuildN {
-		b.Showcmd(b.WorkDir, "%s || true", joinUnambiguously([]string{compiler, "--version"}))
+		sh.ShowCmd(b.WorkDir, "%s || true", joinUnambiguously([]string{compiler, "--version"}))
 		return cache.ActionID{}, false
 	}
 
@@ -3013,7 +2555,7 @@
 	//
 	// Otherwise, we compute a new validation description
 	// and compiler id (below).
-	exe, err := exec.LookPath(compiler)
+	exe, err := cfg.LookPath(compiler)
 	if err != nil {
 		return cache.ActionID{}, false
 	}
@@ -3169,6 +2711,8 @@
 
 func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) {
 	p := a.Package
+	sh := b.Shell(a)
+
 	cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p)
 	if err != nil {
 		return nil, nil, err
@@ -3213,13 +2757,8 @@
 	flagLists := [][]string{cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS}
 	if flagsNotCompatibleWithInternalLinking(flagSources, flagLists) {
 		tokenFile := objdir + "preferlinkext"
-		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd("", "echo > %s", tokenFile)
-		}
-		if !cfg.BuildN {
-			if err := os.WriteFile(tokenFile, nil, 0666); err != nil {
-				return nil, nil, err
-			}
+		if err := sh.writeFile(tokenFile, nil); err != nil {
+			return nil, nil, err
 		}
 		outObj = append(outObj, tokenFile)
 	}
@@ -3281,7 +2820,7 @@
 		if pkgpath := gccgoPkgpath(p); pkgpath != "" {
 			cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath)
 		}
-		if !BuildToolchain.(gccgoToolchain).supportsCgoIncomplete(b) {
+		if !BuildToolchain.(gccgoToolchain).supportsCgoIncomplete(b, a) {
 			cgoflags = append(cgoflags, "-gccgo_define_cgoincomplete")
 		}
 	}
@@ -3308,7 +2847,7 @@
 		cgoflags = append(cgoflags, "-trimpath", strings.Join(trimpath, ";"))
 	}
 
-	if err := b.run(a, p.Dir, p.ImportPath, cgoenv, cfg.BuildToolexec, cgoExe, "-objdir", objdir, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil {
+	if err := sh.run(p.Dir, p.ImportPath, cgoenv, cfg.BuildToolexec, cgoExe, "-objdir", objdir, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil {
 		return nil, nil, err
 	}
 	outGo = append(outGo, gofiles...)
@@ -3329,7 +2868,7 @@
 	cflags := str.StringList(cgoCPPFLAGS, cgoCFLAGS)
 	for _, cfile := range cfiles {
 		ofile := nextOfile()
-		if err := b.gcc(a, p, a.Objdir, ofile, cflags, objdir+cfile); err != nil {
+		if err := b.gcc(a, a.Objdir, ofile, cflags, objdir+cfile); err != nil {
 			return nil, nil, err
 		}
 		outObj = append(outObj, ofile)
@@ -3337,7 +2876,7 @@
 
 	for _, file := range gccfiles {
 		ofile := nextOfile()
-		if err := b.gcc(a, p, a.Objdir, ofile, cflags, file); err != nil {
+		if err := b.gcc(a, a.Objdir, ofile, cflags, file); err != nil {
 			return nil, nil, err
 		}
 		outObj = append(outObj, ofile)
@@ -3346,7 +2885,7 @@
 	cxxflags := str.StringList(cgoCPPFLAGS, cgoCXXFLAGS)
 	for _, file := range gxxfiles {
 		ofile := nextOfile()
-		if err := b.gxx(a, p, a.Objdir, ofile, cxxflags, file); err != nil {
+		if err := b.gxx(a, a.Objdir, ofile, cxxflags, file); err != nil {
 			return nil, nil, err
 		}
 		outObj = append(outObj, ofile)
@@ -3354,7 +2893,7 @@
 
 	for _, file := range mfiles {
 		ofile := nextOfile()
-		if err := b.gcc(a, p, a.Objdir, ofile, cflags, file); err != nil {
+		if err := b.gcc(a, a.Objdir, ofile, cflags, file); err != nil {
 			return nil, nil, err
 		}
 		outObj = append(outObj, ofile)
@@ -3363,7 +2902,7 @@
 	fflags := str.StringList(cgoCPPFLAGS, cgoFFLAGS)
 	for _, file := range ffiles {
 		ofile := nextOfile()
-		if err := b.gfortran(a, p, a.Objdir, ofile, fflags, file); err != nil {
+		if err := b.gfortran(a, a.Objdir, ofile, fflags, file); err != nil {
 			return nil, nil, err
 		}
 		outObj = append(outObj, ofile)
@@ -3372,7 +2911,7 @@
 	switch cfg.BuildToolchainName {
 	case "gc":
 		importGo := objdir + "_cgo_import.go"
-		dynOutGo, dynOutObj, err := b.dynimport(a, p, objdir, importGo, cgoExe, cflags, cgoLDFLAGS, outObj)
+		dynOutGo, dynOutObj, err := b.dynimport(a, objdir, importGo, cgoExe, cflags, cgoLDFLAGS, outObj)
 		if err != nil {
 			return nil, nil, err
 		}
@@ -3496,10 +3035,13 @@
 // dynamically imported by the object files outObj.
 // dynOutGo, if not empty, is a new Go file to build as part of the package.
 // dynOutObj, if not empty, is a new file to add to the generated archive.
-func (b *Builder) dynimport(a *Action, p *load.Package, objdir, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) (dynOutGo, dynOutObj string, err error) {
+func (b *Builder) dynimport(a *Action, objdir, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) (dynOutGo, dynOutObj string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+
 	cfile := objdir + "_cgo_main.c"
 	ofile := objdir + "_cgo_main.o"
-	if err := b.gcc(a, p, objdir, ofile, cflags, cfile); err != nil {
+	if err := b.gcc(a, objdir, ofile, cflags, cfile); err != nil {
 		return "", "", err
 	}
 
@@ -3544,7 +3086,7 @@
 			ldflags = n
 		}
 	}
-	if err := b.gccld(a, p, objdir, dynobj, ldflags, linkobj); err != nil {
+	if err := b.gccld(a, objdir, dynobj, ldflags, linkobj); err != nil {
 		// We only need this information for internal linking.
 		// If this link fails, mark the object as requiring
 		// external linking. This link can fail for things like
@@ -3552,13 +3094,8 @@
 		// cmd/link explicitly looks for the name "dynimportfail".
 		// See issue #52863.
 		fail := objdir + "dynimportfail"
-		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd("", "echo > %s", fail)
-		}
-		if !cfg.BuildN {
-			if err := os.WriteFile(fail, nil, 0666); err != nil {
-				return "", "", err
-			}
+		if err := sh.writeFile(fail, nil); err != nil {
+			return "", "", err
 		}
 		return "", fail, nil
 	}
@@ -3568,7 +3105,7 @@
 	if p.Standard && p.ImportPath == "runtime/cgo" {
 		cgoflags = []string{"-dynlinker"} // record path to dynamic linker
 	}
-	err = b.run(a, base.Cwd(), p.ImportPath, b.cCompilerEnv(), cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags)
+	err = sh.run(base.Cwd(), p.ImportPath, b.cCompilerEnv(), cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags)
 	if err != nil {
 		return "", "", err
 	}
@@ -3578,7 +3115,9 @@
 // Run SWIG on all SWIG input files.
 // TODO: Don't build a shared library, once SWIG emits the necessary
 // pragmas for external linking.
-func (b *Builder) swig(a *Action, p *load.Package, objdir string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) {
+func (b *Builder) swig(a *Action, objdir string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) {
+	p := a.Package
+
 	if err := b.swigVersionCheck(); err != nil {
 		return nil, nil, nil, err
 	}
@@ -3589,7 +3128,7 @@
 	}
 
 	for _, f := range p.SwigFiles {
-		goFile, cFile, err := b.swigOne(a, p, f, objdir, pcCFLAGS, false, intgosize)
+		goFile, cFile, err := b.swigOne(a, f, objdir, pcCFLAGS, false, intgosize)
 		if err != nil {
 			return nil, nil, nil, err
 		}
@@ -3601,7 +3140,7 @@
 		}
 	}
 	for _, f := range p.SwigCXXFiles {
-		goFile, cxxFile, err := b.swigOne(a, p, f, objdir, pcCFLAGS, true, intgosize)
+		goFile, cxxFile, err := b.swigOne(a, f, objdir, pcCFLAGS, true, intgosize)
 		if err != nil {
 			return nil, nil, nil, err
 		}
@@ -3622,7 +3161,8 @@
 )
 
 func (b *Builder) swigDoVersionCheck() error {
-	out, err := b.runOut(nil, ".", nil, "swig", "-version")
+	sh := b.BackgroundShell()
+	out, err := sh.runOut(".", nil, "swig", "-version")
 	if err != nil {
 		return err
 	}
@@ -3724,7 +3264,10 @@
 }
 
 // Run SWIG on one SWIG input file.
-func (b *Builder) swigOne(a *Action, p *load.Package, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
+func (b *Builder) swigOne(a *Action, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+
 	cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p)
 	if err != nil {
 		return "", "", err
@@ -3777,19 +3320,12 @@
 		args = append(args, "-c++")
 	}
 
-	out, err := b.runOut(a, p.Dir, nil, "swig", args, file)
-	if err != nil {
-		if len(out) > 0 {
-			if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) {
-				return "", "", errors.New("must have SWIG version >= 3.0.6")
-			}
-			// swig error
-			err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), b.processOutput(out))
-		}
-		return "", "", err
+	out, err := sh.runOut(p.Dir, nil, "swig", args, file)
+	if err != nil && (bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo"))) {
+		return "", "", errors.New("must have SWIG version >= 3.0.6")
 	}
-	if len(out) > 0 {
-		b.showOutput(a, p.Dir, p.Desc(), b.processOutput(out)) // swig warning
+	if err := sh.reportCmd("", "", out, err); err != nil {
+		return "", "", err
 	}
 
 	// If the input was x.swig, the output is x.go in the objdir.
@@ -3803,7 +3339,7 @@
 	goFile = objdir + goFile
 	newGoFile := objdir + "_" + base + "_swig.go"
 	if cfg.BuildX || cfg.BuildN {
-		b.Showcmd("", "mv %s %s", goFile, newGoFile)
+		sh.ShowCmd("", "mv %s %s", goFile, newGoFile)
 	}
 	if !cfg.BuildN {
 		if err := os.Rename(goFile, newGoFile); err != nil {
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index 26b4e0f..e2a5456 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -22,7 +22,6 @@
 	"cmd/go/internal/gover"
 	"cmd/go/internal/load"
 	"cmd/go/internal/str"
-	"cmd/internal/objabi"
 	"cmd/internal/quoted"
 	"crypto/sha1"
 )
@@ -33,20 +32,6 @@
 // The 'path' used for GOROOT_FINAL when -trimpath is specified
 const trimPathGoRootFinal string = "$GOROOT"
 
-var runtimePackages = map[string]struct{}{
-	"internal/abi":             struct{}{},
-	"internal/bytealg":         struct{}{},
-	"internal/coverage/rtcov":  struct{}{},
-	"internal/cpu":             struct{}{},
-	"internal/goarch":          struct{}{},
-	"internal/goos":            struct{}{},
-	"runtime":                  struct{}{},
-	"runtime/internal/atomic":  struct{}{},
-	"runtime/internal/math":    struct{}{},
-	"runtime/internal/sys":     struct{}{},
-	"runtime/internal/syscall": struct{}{},
-}
-
 // The Go toolchain.
 
 type gcToolchain struct{}
@@ -72,6 +57,7 @@
 
 func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) {
 	p := a.Package
+	sh := b.Shell(a)
 	objdir := a.Objdir
 	if archive != "" {
 		ofile = archive
@@ -94,14 +80,6 @@
 	if p.Standard {
 		defaultGcFlags = append(defaultGcFlags, "-std")
 	}
-	_, compilingRuntime := runtimePackages[p.ImportPath]
-	compilingRuntime = compilingRuntime && p.Standard
-	if compilingRuntime {
-		// runtime compiles with a special gc flag to check for
-		// memory allocations that are invalid in the runtime package,
-		// and to implement some special compiler pragmas.
-		defaultGcFlags = append(defaultGcFlags, "-+")
-	}
 
 	// If we're giving the compiler the entire package (no C etc files), tell it that,
 	// so that it can give good error messages about forward declarations.
@@ -133,8 +111,8 @@
 	if strings.HasPrefix(ToolchainVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") {
 		defaultGcFlags = append(defaultGcFlags, "-goversion", ToolchainVersion)
 	}
-	if p.Internal.CoverageCfg != "" {
-		defaultGcFlags = append(defaultGcFlags, "-coveragecfg="+p.Internal.CoverageCfg)
+	if p.Internal.Cover.Cfg != "" {
+		defaultGcFlags = append(defaultGcFlags, "-coveragecfg="+p.Internal.Cover.Cfg)
 	}
 	if p.Internal.PGOProfile != "" {
 		defaultGcFlags = append(defaultGcFlags, "-pgoprofile="+p.Internal.PGOProfile)
@@ -147,18 +125,6 @@
 	if p.Internal.FuzzInstrument {
 		gcflags = append(gcflags, fuzzInstrumentFlags()...)
 	}
-	if compilingRuntime {
-		// Remove -N, if present.
-		// It is not possible to build the runtime with no optimizations,
-		// because the compiler cannot eliminate enough write barriers.
-		for i := 0; i < len(gcflags); i++ {
-			if gcflags[i] == "-N" {
-				copy(gcflags[i:], gcflags[i+1:])
-				gcflags = gcflags[:len(gcflags)-1]
-				i--
-			}
-		}
-	}
 	// Add -c=N to use concurrent backend compilation, if possible.
 	if c := gcBackendConcurrency(gcflags); c > 1 {
 		defaultGcFlags = append(defaultGcFlags, fmt.Sprintf("-c=%d", c))
@@ -171,13 +137,13 @@
 		args = append(args, "-D", p.Internal.LocalPrefix)
 	}
 	if importcfg != nil {
-		if err := b.writeFile(objdir+"importcfg", importcfg); err != nil {
+		if err := sh.writeFile(objdir+"importcfg", importcfg); err != nil {
 			return "", nil, err
 		}
 		args = append(args, "-importcfg", objdir+"importcfg")
 	}
 	if embedcfg != nil {
-		if err := b.writeFile(objdir+"embedcfg", embedcfg); err != nil {
+		if err := sh.writeFile(objdir+"embedcfg", embedcfg); err != nil {
 			return "", nil, err
 		}
 		args = append(args, "-embedcfg", objdir+"embedcfg")
@@ -209,7 +175,7 @@
 		args = append(args, f)
 	}
 
-	output, err = b.runOut(a, base.Cwd(), nil, args...)
+	output, err = sh.runOut(base.Cwd(), nil, args...)
 	return ofile, output, err
 }
 
@@ -359,9 +325,6 @@
 			}
 		}
 	}
-	if objabi.IsRuntimePackagePath(pkgpath) {
-		args = append(args, "-compiling-runtime")
-	}
 
 	if cfg.Goarch == "386" {
 		// Define GO386_value from cfg.GO386.
@@ -398,6 +361,20 @@
 		}
 	}
 
+	if cfg.Goarch == "arm" {
+		// Define GOARM_value from cfg.GOARM.
+		switch cfg.GOARM {
+		case "7":
+			args = append(args, "-D", "GOARM_7")
+			fallthrough
+		case "6":
+			args = append(args, "-D", "GOARM_6")
+			fallthrough
+		default:
+			args = append(args, "-D", "GOARM_5")
+		}
+	}
+
 	return args
 }
 
@@ -411,7 +388,7 @@
 		ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o"
 		ofiles = append(ofiles, ofile)
 		args1 := append(args, "-o", ofile, overlayPath)
-		if err := b.run(a, p.Dir, p.ImportPath, nil, args1...); err != nil {
+		if err := b.Shell(a).run(p.Dir, p.ImportPath, nil, args1...); err != nil {
 			return nil, err
 		}
 	}
@@ -419,6 +396,8 @@
 }
 
 func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+	sh := b.Shell(a)
+
 	mkSymabis := func(p *load.Package, sfiles []string, path string) error {
 		args := asmArgs(a, p)
 		args = append(args, "-gensymabis", "-o", path)
@@ -433,11 +412,11 @@
 		// Supply an empty go_asm.h as if the compiler had been run.
 		// -gensymabis parsing is lax enough that we don't need the
 		// actual definitions that would appear in go_asm.h.
-		if err := b.writeFile(a.Objdir+"go_asm.h", nil); err != nil {
+		if err := sh.writeFile(a.Objdir+"go_asm.h", nil); err != nil {
 			return err
 		}
 
-		return b.run(a, p.Dir, p.ImportPath, nil, args...)
+		return sh.run(p.Dir, p.ImportPath, nil, args...)
 	}
 
 	var symabis string // Only set if we actually create the file
@@ -460,7 +439,7 @@
 	copy(newArgs, args)
 	newArgs[1] = base.Tool(newTool)
 	newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
-	if err := b.run(a, p.Dir, p.ImportPath, nil, newArgs...); err != nil {
+	if err := b.Shell(a).run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
 		return err
 	}
 	data1, err := os.ReadFile(ofile)
@@ -494,15 +473,16 @@
 	}
 
 	p := a.Package
+	sh := b.Shell(a)
 	if cfg.BuildN || cfg.BuildX {
 		cmdline := str.StringList(base.Tool("pack"), "r", absAfile, absOfiles)
-		b.Showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline))
+		sh.ShowCmd(p.Dir, "%s # internal", joinUnambiguously(cmdline))
 	}
 	if cfg.BuildN {
 		return nil
 	}
 	if err := packInternal(absAfile, absOfiles); err != nil {
-		return formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), err.Error()+"\n")
+		return sh.reportCmd("", "", nil, err)
 	}
 	return nil
 }
@@ -610,7 +590,7 @@
 	return fmt.Sprintf("plugin/unnamed-%x", h.Sum(nil))
 }
 
-func (gcToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error {
+func (gcToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error {
 	cxx := len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0
 	for _, a := range root.Deps {
 		if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) {
@@ -679,17 +659,17 @@
 	// the output file path is recorded in the .gnu.version_d section.
 	dir := "."
 	if cfg.BuildBuildmode == "c-shared" || cfg.BuildBuildmode == "plugin" {
-		dir, out = filepath.Split(out)
+		dir, targetPath = filepath.Split(targetPath)
 	}
 
 	env := []string{}
 	if cfg.BuildTrimpath {
 		env = append(env, "GOROOT_FINAL="+trimPathGoRootFinal)
 	}
-	return b.run(root, dir, root.Package.ImportPath, env, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags, mainpkg)
+	return b.Shell(root).run(dir, root.Package.ImportPath, env, cfg.BuildToolexec, base.Tool("link"), "-o", targetPath, "-importcfg", importcfg, ldflags, mainpkg)
 }
 
-func (gcToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error {
+func (gcToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error {
 	ldflags := []string{"-installsuffix", cfg.BuildContext.InstallSuffix}
 	ldflags = append(ldflags, "-buildmode=shared")
 	ldflags = append(ldflags, forcedLdflags...)
@@ -720,7 +700,7 @@
 		}
 		ldflags = append(ldflags, d.Package.ImportPath+"="+d.Target)
 	}
-	return b.run(root, ".", out, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags)
+	return b.Shell(root).run(".", targetPath, nil, cfg.BuildToolexec, base.Tool("link"), "-o", targetPath, "-importcfg", importcfg, ldflags)
 }
 
 func (gcToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go
index a048b7f..2dce9f1 100644
--- a/src/cmd/go/internal/work/gccgo.go
+++ b/src/cmd/go/internal/work/gccgo.go
@@ -5,6 +5,7 @@
 package work
 
 import (
+	"bytes"
 	"fmt"
 	"os"
 	"os/exec"
@@ -32,7 +33,7 @@
 	if GccgoName == "" {
 		GccgoName = "gccgo"
 	}
-	GccgoBin, gccgoErr = exec.LookPath(GccgoName)
+	GccgoBin, gccgoErr = cfg.LookPath(GccgoName)
 }
 
 func (gccgoToolchain) compiler() string {
@@ -45,12 +46,8 @@
 	return GccgoBin
 }
 
-func (gccgoToolchain) ar() string {
-	ar := cfg.Getenv("AR")
-	if ar == "" {
-		ar = "ar"
-	}
-	return ar
+func (gccgoToolchain) ar() []string {
+	return envList("AR", "ar")
 }
 
 func checkGccgoBin() {
@@ -64,6 +61,7 @@
 
 func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) {
 	p := a.Package
+	sh := b.Shell(a)
 	objdir := a.Objdir
 	out := "_go_.o"
 	ofile = objdir + out
@@ -81,20 +79,20 @@
 	args := str.StringList(tools.compiler(), "-c", gcargs, "-o", ofile, forcedGccgoflags)
 	if importcfg != nil {
 		if b.gccSupportsFlag(args[:1], "-fgo-importcfg=/dev/null") {
-			if err := b.writeFile(objdir+"importcfg", importcfg); err != nil {
+			if err := sh.writeFile(objdir+"importcfg", importcfg); err != nil {
 				return "", nil, err
 			}
 			args = append(args, "-fgo-importcfg="+objdir+"importcfg")
 		} else {
 			root := objdir + "_importcfgroot_"
-			if err := buildImportcfgSymlinks(b, root, importcfg); err != nil {
+			if err := buildImportcfgSymlinks(sh, root, importcfg); err != nil {
 				return "", nil, err
 			}
 			args = append(args, "-I", root)
 		}
 	}
 	if embedcfg != nil && b.gccSupportsFlag(args[:1], "-fgo-embedcfg=/dev/null") {
-		if err := b.writeFile(objdir+"embedcfg", embedcfg); err != nil {
+		if err := sh.writeFile(objdir+"embedcfg", embedcfg); err != nil {
 			return "", nil, err
 		}
 		args = append(args, "-fgo-embedcfg="+objdir+"embedcfg")
@@ -132,7 +130,7 @@
 		args = append(args, f)
 	}
 
-	output, err = b.runOut(a, p.Dir, nil, args)
+	output, err = sh.runOut(p.Dir, nil, args)
 	return ofile, output, err
 }
 
@@ -141,7 +139,7 @@
 // This serves as a temporary transition mechanism until
 // we can depend on gccgo reading an importcfg directly.
 // (The Go 1.9 and later gc compilers already do.)
-func buildImportcfgSymlinks(b *Builder, root string, importcfg []byte) error {
+func buildImportcfgSymlinks(sh *Shell, root string, importcfg []byte) error {
 	for lineNum, line := range strings.Split(string(importcfg), "\n") {
 		lineNum++ // 1-based
 		line = strings.TrimSpace(line)
@@ -166,10 +164,10 @@
 				return fmt.Errorf(`importcfg:%d: invalid packagefile: syntax is "packagefile path=filename": %s`, lineNum, line)
 			}
 			archive := gccgoArchive(root, before)
-			if err := b.Mkdir(filepath.Dir(archive)); err != nil {
+			if err := sh.Mkdir(filepath.Dir(archive)); err != nil {
 				return err
 			}
-			if err := b.Symlink(after, archive); err != nil {
+			if err := sh.Symlink(after, archive); err != nil {
 				return err
 			}
 		case "importmap":
@@ -178,13 +176,13 @@
 			}
 			beforeA := gccgoArchive(root, before)
 			afterA := gccgoArchive(root, after)
-			if err := b.Mkdir(filepath.Dir(beforeA)); err != nil {
+			if err := sh.Mkdir(filepath.Dir(beforeA)); err != nil {
 				return err
 			}
-			if err := b.Mkdir(filepath.Dir(afterA)); err != nil {
+			if err := sh.Mkdir(filepath.Dir(afterA)); err != nil {
 				return err
 			}
-			if err := b.Symlink(afterA, beforeA); err != nil {
+			if err := sh.Symlink(afterA, beforeA); err != nil {
 				return err
 			}
 		case "packageshlib":
@@ -208,7 +206,7 @@
 		}
 		defs = tools.maybePIC(defs)
 		defs = append(defs, b.gccArchArgs()...)
-		err := b.run(a, p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", a.Objdir, "-c", "-o", ofile, defs, sfile)
+		err := b.Shell(a).run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", a.Objdir, "-c", "-o", ofile, defs, sfile)
 		if err != nil {
 			return nil, err
 		}
@@ -229,6 +227,7 @@
 
 func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
 	p := a.Package
+	sh := b.Shell(a)
 	objdir := a.Objdir
 	var absOfiles []string
 	for _, f := range ofiles {
@@ -242,20 +241,18 @@
 	}
 	absAfile := mkAbs(objdir, afile)
 	// Try with D modifier first, then without if that fails.
-	output, err := b.runOut(a, p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles)
+	output, err := sh.runOut(p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles)
 	if err != nil {
-		return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles)
+		return sh.run(p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles)
 	}
 
-	if len(output) > 0 {
-		// Show the output if there is any even without errors.
-		b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(output))
-	}
-
-	return nil
+	// Show the output if there is any even without errors.
+	return sh.reportCmd("", "", output, nil)
 }
 
 func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string, allactions []*Action, buildmode, desc string) error {
+	sh := b.Shell(root)
+
 	// gccgo needs explicit linking with all package dependencies,
 	// and all LDFLAGS from cgo dependencies.
 	afiles := []string{}
@@ -303,11 +300,11 @@
 	readAndRemoveCgoFlags := func(archive string) (string, error) {
 		newID++
 		newArchive := root.Objdir + fmt.Sprintf("_pkg%d_.a", newID)
-		if err := b.copyFile(newArchive, archive, 0666, false); err != nil {
+		if err := sh.CopyFile(newArchive, archive, 0666, false); err != nil {
 			return "", err
 		}
 		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd("", "ar d %s _cgo_flags", newArchive)
+			sh.ShowCmd("", "ar d %s _cgo_flags", newArchive)
 			if cfg.BuildN {
 				// TODO(rsc): We could do better about showing the right _cgo_flags even in -n mode.
 				// Either the archive is already built and we can read them out,
@@ -316,11 +313,11 @@
 				return "", nil
 			}
 		}
-		err := b.run(root, root.Objdir, desc, nil, tools.ar(), arArgs, "x", newArchive, "_cgo_flags")
+		err := sh.run(root.Objdir, desc, nil, tools.ar(), arArgs, "x", newArchive, "_cgo_flags")
 		if err != nil {
 			return "", err
 		}
-		err = b.run(root, ".", desc, nil, tools.ar(), arArgs, "d", newArchive, "_cgo_flags")
+		err = sh.run(".", desc, nil, tools.ar(), arArgs, "d", newArchive, "_cgo_flags")
 		if err != nil {
 			return "", err
 		}
@@ -386,16 +383,9 @@
 	}
 
 	for _, a := range allactions {
-		// Gather CgoLDFLAGS, but not from standard packages.
-		// The go tool can dig up runtime/cgo from GOROOT and
-		// think that it should use its CgoLDFLAGS, but gccgo
-		// doesn't use runtime/cgo.
 		if a.Package == nil {
 			continue
 		}
-		if !a.Package.Standard {
-			cgoldflags = append(cgoldflags, a.Package.CgoLDFLAGS...)
-		}
 		if len(a.Package.CgoFiles) > 0 {
 			usesCgo = true
 		}
@@ -425,9 +415,6 @@
 
 	ldflags = append(ldflags, cgoldflags...)
 	ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
-	if root.Package != nil {
-		ldflags = append(ldflags, root.Package.CgoLDFLAGS...)
-	}
 	if cfg.Goos != "aix" {
 		ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)")
 	}
@@ -531,25 +518,25 @@
 		}
 	}
 
-	if err := b.run(root, ".", desc, nil, tools.linker(), "-o", out, ldflags, forcedGccgoflags, root.Package.Internal.Gccgoflags); err != nil {
+	if err := sh.run(".", desc, nil, tools.linker(), "-o", out, ldflags, forcedGccgoflags, root.Package.Internal.Gccgoflags); err != nil {
 		return err
 	}
 
 	switch buildmode {
 	case "c-archive":
-		if err := b.run(root, ".", desc, nil, tools.ar(), arArgs, "rc", realOut, out); err != nil {
+		if err := sh.run(".", desc, nil, tools.ar(), arArgs, "rc", realOut, out); err != nil {
 			return err
 		}
 	}
 	return nil
 }
 
-func (tools gccgoToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error {
-	return tools.link(b, root, out, importcfg, root.Deps, ldBuildmode, root.Package.ImportPath)
+func (tools gccgoToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error {
+	return tools.link(b, root, targetPath, importcfg, root.Deps, ldBuildmode, root.Package.ImportPath)
 }
 
-func (tools gccgoToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error {
-	return tools.link(b, root, out, importcfg, allactions, "shared", out)
+func (tools gccgoToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error {
+	return tools.link(b, root, targetPath, importcfg, allactions, "shared", targetPath)
 }
 
 func (tools gccgoToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
@@ -575,7 +562,7 @@
 	if b.gccSupportsFlag(compiler, "-gno-record-gcc-switches") {
 		defs = append(defs, "-gno-record-gcc-switches")
 	}
-	return b.run(a, p.Dir, p.ImportPath, nil, compiler, "-Wall", "-g",
+	return b.Shell(a).run(p.Dir, p.ImportPath, nil, compiler, "-Wall", "-g",
 		"-I", a.Objdir, "-I", inc, "-o", ofile, defs, "-c", cfile)
 }
 
@@ -631,8 +618,13 @@
 
 // supportsCgoIncomplete reports whether the gccgo/GoLLVM compiler
 // being used supports cgo.Incomplete, which was added in GCC 13.
-func (tools gccgoToolchain) supportsCgoIncomplete(b *Builder) bool {
+//
+// This takes an Action only for output reporting purposes.
+// The result value is unrelated to the Action.
+func (tools gccgoToolchain) supportsCgoIncomplete(b *Builder, a *Action) bool {
 	gccgoSupportsCgoIncompleteOnce.Do(func() {
+		sh := b.Shell(a)
+
 		fail := func(err error) {
 			fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err)
 			base.SetExitStatus(2)
@@ -657,21 +649,24 @@
 
 		on := strings.TrimSuffix(fn, ".go") + ".o"
 		if cfg.BuildN || cfg.BuildX {
-			b.Showcmd(tmpdir, "%s -c -o %s %s || true", tools.compiler(), on, fn)
+			sh.ShowCmd(tmpdir, "%s -c -o %s %s || true", tools.compiler(), on, fn)
 			// Since this function affects later builds,
 			// and only generates temporary files,
 			// we run the command even with -n.
 		}
 		cmd := exec.Command(tools.compiler(), "-c", "-o", on, fn)
 		cmd.Dir = tmpdir
-		var buf strings.Builder
+		var buf bytes.Buffer
 		cmd.Stdout = &buf
 		cmd.Stderr = &buf
 		err = cmd.Run()
-		if out := buf.String(); len(out) > 0 {
-			b.showOutput(nil, tmpdir, b.fmtcmd(tmpdir, "%s -c -o %s %s", tools.compiler(), on, fn), buf.String())
-		}
 		gccgoSupportsCgoIncomplete = err == nil
+		if cfg.BuildN || cfg.BuildX {
+			// Show output. We always pass a nil err because errors are an
+			// expected outcome in this case.
+			desc := sh.fmtCmd(tmpdir, "%s -c -o %s %s", tools.compiler(), on, fn)
+			sh.reportCmd(desc, tmpdir, buf.Bytes(), nil)
+		}
 	})
 	return gccgoSupportsCgoIncomplete
 }
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index 29116cb..7d0921f 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -246,8 +246,8 @@
 			pkgsFilter = oneMainPkg
 		}
 	case "pie":
-		if cfg.BuildRace {
-			base.Fatalf("-buildmode=pie not supported when -race is enabled")
+		if cfg.BuildRace && !platform.DefaultPIE(cfg.Goos, cfg.Goarch, cfg.BuildRace) {
+			base.Fatalf("-buildmode=pie not supported when -race is enabled on %s/%s", cfg.Goos, cfg.Goarch)
 		}
 		if gccgo {
 			codegenArg = "-fPIE"
diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
index 270a34e..88504be 100644
--- a/src/cmd/go/internal/work/security.go
+++ b/src/cmd/go/internal/work/security.go
@@ -59,7 +59,10 @@
 	re(`-f(no-)builtin-[a-zA-Z0-9_]*`),
 	re(`-f(no-)?common`),
 	re(`-f(no-)?constant-cfstrings`),
+	re(`-fdebug-prefix-map=([^@]+)=([^@]+)`),
 	re(`-fdiagnostics-show-note-include-stack`),
+	re(`-ffile-prefix-map=([^@]+)=([^@]+)`),
+	re(`-fno-canonical-system-headers`),
 	re(`-f(no-)?eliminate-unused-debug-types`),
 	re(`-f(no-)?exceptions`),
 	re(`-f(no-)?fast-math`),
@@ -114,6 +117,7 @@
 	re(`-mthumb(-interwork)?`),
 	re(`-mthreads`),
 	re(`-mwindows`),
+	re(`-no-canonical-prefixes`),
 	re(`--param=ssp-buffer-size=[0-9]*`),
 	re(`-pedantic(-errors)?`),
 	re(`-pipe`),
diff --git a/src/cmd/go/internal/work/shell.go b/src/cmd/go/internal/work/shell.go
new file mode 100644
index 0000000..6089170
--- /dev/null
+++ b/src/cmd/go/internal/work/shell.go
@@ -0,0 +1,678 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"bytes"
+	"cmd/go/internal/base"
+	"cmd/go/internal/cache"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/load"
+	"cmd/go/internal/par"
+	"cmd/go/internal/str"
+	"errors"
+	"fmt"
+	"internal/lazyregexp"
+	"io"
+	"io/fs"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// A Shell runs shell commands and performs shell-like file system operations.
+//
+// Shell tracks context related to running commands, and form a tree much like
+// context.Context.
+type Shell struct {
+	action       *Action // nil for the root shell
+	*shellShared         // per-Builder state shared across Shells
+}
+
+// shellShared is Shell state shared across all Shells derived from a single
+// root shell (generally a single Builder).
+type shellShared struct {
+	workDir string // $WORK, immutable
+
+	printLock sync.Mutex
+	printFunc func(args ...any) (int, error)
+	scriptDir string // current directory in printed script
+
+	mkdirCache par.Cache[string, error] // a cache of created directories
+}
+
+// NewShell returns a new Shell.
+//
+// Shell will internally serialize calls to the print function.
+// If print is nil, it defaults to printing to stderr.
+func NewShell(workDir string, print func(a ...any) (int, error)) *Shell {
+	if print == nil {
+		print = func(a ...any) (int, error) {
+			return fmt.Fprint(os.Stderr, a...)
+		}
+	}
+	shared := &shellShared{
+		workDir:   workDir,
+		printFunc: print,
+	}
+	return &Shell{shellShared: shared}
+}
+
+// Print emits a to this Shell's output stream, formatting it like fmt.Print.
+// It is safe to call concurrently.
+func (sh *Shell) Print(a ...any) {
+	sh.printLock.Lock()
+	defer sh.printLock.Unlock()
+	sh.printFunc(a...)
+}
+
+func (sh *Shell) printLocked(a ...any) {
+	sh.printFunc(a...)
+}
+
+// WithAction returns a Shell identical to sh, but bound to Action a.
+func (sh *Shell) WithAction(a *Action) *Shell {
+	sh2 := *sh
+	sh2.action = a
+	return &sh2
+}
+
+// Shell returns a shell for running commands on behalf of Action a.
+func (b *Builder) Shell(a *Action) *Shell {
+	if a == nil {
+		// The root shell has a nil Action. The point of this method is to
+		// create a Shell bound to an Action, so disallow nil Actions here.
+		panic("nil Action")
+	}
+	if a.sh == nil {
+		a.sh = b.backgroundSh.WithAction(a)
+	}
+	return a.sh
+}
+
+// BackgroundShell returns a Builder-wide Shell that's not bound to any Action.
+// Try not to use this unless there's really no sensible Action available.
+func (b *Builder) BackgroundShell() *Shell {
+	return b.backgroundSh
+}
+
+// moveOrCopyFile is like 'mv src dst' or 'cp src dst'.
+func (sh *Shell) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) error {
+	if cfg.BuildN {
+		sh.ShowCmd("", "mv %s %s", src, dst)
+		return nil
+	}
+
+	// If we can update the mode and rename to the dst, do it.
+	// Otherwise fall back to standard copy.
+
+	// If the source is in the build cache, we need to copy it.
+	if strings.HasPrefix(src, cache.DefaultDir()) {
+		return sh.CopyFile(dst, src, perm, force)
+	}
+
+	// On Windows, always copy the file, so that we respect the NTFS
+	// permissions of the parent folder. https://golang.org/issue/22343.
+	// What matters here is not cfg.Goos (the system we are building
+	// for) but runtime.GOOS (the system we are building on).
+	if runtime.GOOS == "windows" {
+		return sh.CopyFile(dst, src, perm, force)
+	}
+
+	// If the destination directory has the group sticky bit set,
+	// we have to copy the file to retain the correct permissions.
+	// https://golang.org/issue/18878
+	if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
+		if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
+			return sh.CopyFile(dst, src, perm, force)
+		}
+	}
+
+	// The perm argument is meant to be adjusted according to umask,
+	// but we don't know what the umask is.
+	// Create a dummy file to find out.
+	// This avoids build tags and works even on systems like Plan 9
+	// where the file mask computation incorporates other information.
+	mode := perm
+	f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+	if err == nil {
+		fi, err := f.Stat()
+		if err == nil {
+			mode = fi.Mode() & 0777
+		}
+		name := f.Name()
+		f.Close()
+		os.Remove(name)
+	}
+
+	if err := os.Chmod(src, mode); err == nil {
+		if err := os.Rename(src, dst); err == nil {
+			if cfg.BuildX {
+				sh.ShowCmd("", "mv %s %s", src, dst)
+			}
+			return nil
+		}
+	}
+
+	return sh.CopyFile(dst, src, perm, force)
+}
+
+// copyFile is like 'cp src dst'.
+func (sh *Shell) CopyFile(dst, src string, perm fs.FileMode, force bool) error {
+	if cfg.BuildN || cfg.BuildX {
+		sh.ShowCmd("", "cp %s %s", src, dst)
+		if cfg.BuildN {
+			return nil
+		}
+	}
+
+	sf, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+	defer sf.Close()
+
+	// Be careful about removing/overwriting dst.
+	// Do not remove/overwrite if dst exists and is a directory
+	// or a non-empty non-object file.
+	if fi, err := os.Stat(dst); err == nil {
+		if fi.IsDir() {
+			return fmt.Errorf("build output %q already exists and is a directory", dst)
+		}
+		if !force && fi.Mode().IsRegular() && fi.Size() != 0 && !isObject(dst) {
+			return fmt.Errorf("build output %q already exists and is not an object file", dst)
+		}
+	}
+
+	// On Windows, remove lingering ~ file from last attempt.
+	if runtime.GOOS == "windows" {
+		if _, err := os.Stat(dst + "~"); err == nil {
+			os.Remove(dst + "~")
+		}
+	}
+
+	mayberemovefile(dst)
+	df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil && runtime.GOOS == "windows" {
+		// Windows does not allow deletion of a binary file
+		// while it is executing. Try to move it out of the way.
+		// If the move fails, which is likely, we'll try again the
+		// next time we do an install of this binary.
+		if err := os.Rename(dst, dst+"~"); err == nil {
+			os.Remove(dst + "~")
+		}
+		df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	}
+	if err != nil {
+		return fmt.Errorf("copying %s: %w", src, err) // err should already refer to dst
+	}
+
+	_, err = io.Copy(df, sf)
+	df.Close()
+	if err != nil {
+		mayberemovefile(dst)
+		return fmt.Errorf("copying %s to %s: %v", src, dst, err)
+	}
+	return nil
+}
+
+// mayberemovefile removes a file only if it is a regular file
+// When running as a user with sufficient privileges, we may delete
+// even device files, for example, which is not intended.
+func mayberemovefile(s string) {
+	if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() {
+		return
+	}
+	os.Remove(s)
+}
+
+// writeFile writes the text to file.
+func (sh *Shell) writeFile(file string, text []byte) error {
+	if cfg.BuildN || cfg.BuildX {
+		switch {
+		case len(text) == 0:
+			sh.ShowCmd("", "echo -n > %s # internal", file)
+		case bytes.IndexByte(text, '\n') == len(text)-1:
+			// One line. Use a simpler "echo" command.
+			sh.ShowCmd("", "echo '%s' > %s # internal", bytes.TrimSuffix(text, []byte("\n")), file)
+		default:
+			// Use the most general form.
+			sh.ShowCmd("", "cat >%s << 'EOF' # internal\n%sEOF", file, text)
+		}
+	}
+	if cfg.BuildN {
+		return nil
+	}
+	return os.WriteFile(file, text, 0666)
+}
+
+// Mkdir makes the named directory.
+func (sh *Shell) Mkdir(dir string) error {
+	// Make Mkdir(a.Objdir) a no-op instead of an error when a.Objdir == "".
+	if dir == "" {
+		return nil
+	}
+
+	// We can be a little aggressive about being
+	// sure directories exist. Skip repeated calls.
+	return sh.mkdirCache.Do(dir, func() error {
+		if cfg.BuildN || cfg.BuildX {
+			sh.ShowCmd("", "mkdir -p %s", dir)
+			if cfg.BuildN {
+				return nil
+			}
+		}
+
+		return os.MkdirAll(dir, 0777)
+	})
+}
+
+// RemoveAll is like 'rm -rf'. It attempts to remove all paths even if there's
+// an error, and returns the first error.
+func (sh *Shell) RemoveAll(paths ...string) error {
+	if cfg.BuildN || cfg.BuildX {
+		// Don't say we are removing the directory if we never created it.
+		show := func() bool {
+			for _, path := range paths {
+				if _, ok := sh.mkdirCache.Get(path); ok {
+					return true
+				}
+				if _, err := os.Stat(path); !os.IsNotExist(err) {
+					return true
+				}
+			}
+			return false
+		}
+		if show() {
+			sh.ShowCmd("", "rm -rf %s", strings.Join(paths, " "))
+		}
+	}
+	if cfg.BuildN {
+		return nil
+	}
+
+	var err error
+	for _, path := range paths {
+		if err2 := os.RemoveAll(path); err2 != nil && err == nil {
+			err = err2
+		}
+	}
+	return err
+}
+
+// Symlink creates a symlink newname -> oldname.
+func (sh *Shell) Symlink(oldname, newname string) error {
+	// It's not an error to try to recreate an existing symlink.
+	if link, err := os.Readlink(newname); err == nil && link == oldname {
+		return nil
+	}
+
+	if cfg.BuildN || cfg.BuildX {
+		sh.ShowCmd("", "ln -s %s %s", oldname, newname)
+		if cfg.BuildN {
+			return nil
+		}
+	}
+	return os.Symlink(oldname, newname)
+}
+
+// fmtCmd formats a command in the manner of fmt.Sprintf but also:
+//
+//	fmtCmd replaces the value of b.WorkDir with $WORK.
+func (sh *Shell) fmtCmd(dir string, format string, args ...any) string {
+	cmd := fmt.Sprintf(format, args...)
+	if sh.workDir != "" && !strings.HasPrefix(cmd, "cat ") {
+		cmd = strings.ReplaceAll(cmd, sh.workDir, "$WORK")
+		escaped := strconv.Quote(sh.workDir)
+		escaped = escaped[1 : len(escaped)-1] // strip quote characters
+		if escaped != sh.workDir {
+			cmd = strings.ReplaceAll(cmd, escaped, "$WORK")
+		}
+	}
+	return cmd
+}
+
+// ShowCmd prints the given command to standard output
+// for the implementation of -n or -x.
+//
+// ShowCmd also replaces the name of the current script directory with dot (.)
+// but only when it is at the beginning of a space-separated token.
+//
+// If dir is not "" or "/" and not the current script directory, ShowCmd first
+// prints a "cd" command to switch to dir and updates the script directory.
+func (sh *Shell) ShowCmd(dir string, format string, args ...any) {
+	// Use the output lock directly so we can manage scriptDir.
+	sh.printLock.Lock()
+	defer sh.printLock.Unlock()
+
+	cmd := sh.fmtCmd(dir, format, args...)
+
+	if dir != "" && dir != "/" {
+		if dir != sh.scriptDir {
+			// Show changing to dir and update the current directory.
+			sh.printLocked(sh.fmtCmd("", "cd %s\n", dir))
+			sh.scriptDir = dir
+		}
+		// Replace scriptDir is our working directory. Replace it
+		// with "." in the command.
+		dot := " ."
+		if dir[len(dir)-1] == filepath.Separator {
+			dot += string(filepath.Separator)
+		}
+		cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:]
+	}
+
+	sh.printLocked(cmd + "\n")
+}
+
+// reportCmd reports the output and exit status of a command. The cmdOut and
+// cmdErr arguments are the output and exit error of the command, respectively.
+//
+// The exact reporting behavior is as follows:
+//
+//	cmdOut  cmdErr  Result
+//	""      nil     print nothing, return nil
+//	!=""    nil     print output, return nil
+//	""      !=nil   print nothing, return cmdErr (later printed)
+//	!=""    !=nil   print nothing, ignore err, return output as error (later printed)
+//
+// reportCmd returns a non-nil error if and only if cmdErr != nil. It assumes
+// that the command output, if non-empty, is more detailed than the command
+// error (which is usually just an exit status), so prefers using the output as
+// the ultimate error. Typically, the caller should return this error from an
+// Action, which it will be printed by the Builder.
+//
+// reportCmd formats the output as "# desc" followed by the given output. The
+// output is expected to contain references to 'dir', usually the source
+// directory for the package that has failed to build. reportCmd rewrites
+// mentions of dir with a relative path to dir when the relative path is
+// shorter. This is usually more pleasant. For example, if fmt doesn't compile
+// and we are in src/html, the output is
+//
+//	$ go build
+//	# fmt
+//	../fmt/print.go:1090: undefined: asdf
+//	$
+//
+// instead of
+//
+//	$ go build
+//	# fmt
+//	/usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
+//	$
+//
+// reportCmd also replaces references to the work directory with $WORK, replaces
+// cgo file paths with the original file path, and replaces cgo-mangled names
+// with "C.name".
+//
+// desc is optional. If "", a.Package.Desc() is used.
+//
+// dir is optional. If "", a.Package.Dir is used.
+func (sh *Shell) reportCmd(desc, dir string, cmdOut []byte, cmdErr error) error {
+	if len(cmdOut) == 0 && cmdErr == nil {
+		// Common case
+		return nil
+	}
+	if len(cmdOut) == 0 && cmdErr != nil {
+		// Just return the error.
+		//
+		// TODO: This is what we've done for a long time, but it may be a
+		// mistake because it loses all of the extra context and results in
+		// ultimately less descriptive output. We should probably just take the
+		// text of cmdErr as the output in this case and do everything we
+		// otherwise would. We could chain the errors if we feel like it.
+		return cmdErr
+	}
+
+	// Fetch defaults from the package.
+	var p *load.Package
+	a := sh.action
+	if a != nil {
+		p = a.Package
+	}
+	var importPath string
+	if p != nil {
+		importPath = p.ImportPath
+		if desc == "" {
+			desc = p.Desc()
+		}
+		if dir == "" {
+			dir = p.Dir
+		}
+	}
+
+	out := string(cmdOut)
+
+	if !strings.HasSuffix(out, "\n") {
+		out = out + "\n"
+	}
+
+	// Replace workDir with $WORK
+	out = replacePrefix(out, sh.workDir, "$WORK")
+
+	// Rewrite mentions of dir with a relative path to dir
+	// when the relative path is shorter.
+	for {
+		// Note that dir starts out long, something like
+		// /foo/bar/baz/root/a
+		// The target string to be reduced is something like
+		// (blah-blah-blah) /foo/bar/baz/root/sibling/whatever.go:blah:blah
+		// /foo/bar/baz/root/a doesn't match /foo/bar/baz/root/sibling, but the prefix
+		// /foo/bar/baz/root does.  And there may be other niblings sharing shorter
+		// prefixes, the only way to find them is to look.
+		// This doesn't always produce a relative path --
+		// /foo is shorter than ../../.., for example.
+		if reldir := base.ShortPath(dir); reldir != dir {
+			out = replacePrefix(out, dir, reldir)
+			if filepath.Separator == '\\' {
+				// Don't know why, sometimes this comes out with slashes, not backslashes.
+				wdir := strings.ReplaceAll(dir, "\\", "/")
+				out = replacePrefix(out, wdir, reldir)
+			}
+		}
+		dirP := filepath.Dir(dir)
+		if dir == dirP {
+			break
+		}
+		dir = dirP
+	}
+
+	// Fix up output referring to cgo-generated code to be more readable.
+	// Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19.
+	// Replace *[100]_Ctype_foo with *[100]C.foo.
+	// If we're using -x, assume we're debugging and want the full dump, so disable the rewrite.
+	if !cfg.BuildX && cgoLine.MatchString(out) {
+		out = cgoLine.ReplaceAllString(out, "")
+		out = cgoTypeSigRe.ReplaceAllString(out, "C.")
+	}
+
+	// Usually desc is already p.Desc(), but if not, signal cmdError.Error to
+	// add a line explicitly metioning the import path.
+	needsPath := importPath != "" && p != nil && desc != p.Desc()
+
+	err := &cmdError{desc, out, importPath, needsPath}
+	if cmdErr != nil {
+		// The command failed. Report the output up as an error.
+		return err
+	}
+	// The command didn't fail, so just print the output as appropriate.
+	if a != nil && a.output != nil {
+		// The Action is capturing output.
+		a.output = append(a.output, err.Error()...)
+	} else {
+		// Write directly to the Builder output.
+		sh.Print(err.Error())
+	}
+	return nil
+}
+
+// replacePrefix is like strings.ReplaceAll, but only replaces instances of old
+// that are preceded by ' ', '\t', or appear at the beginning of a line.
+func replacePrefix(s, old, new string) string {
+	n := strings.Count(s, old)
+	if n == 0 {
+		return s
+	}
+
+	s = strings.ReplaceAll(s, " "+old, " "+new)
+	s = strings.ReplaceAll(s, "\n"+old, "\n"+new)
+	s = strings.ReplaceAll(s, "\n\t"+old, "\n\t"+new)
+	if strings.HasPrefix(s, old) {
+		s = new + s[len(old):]
+	}
+	return s
+}
+
+type cmdError struct {
+	desc       string
+	text       string
+	importPath string
+	needsPath  bool // Set if desc does not already include the import path
+}
+
+func (e *cmdError) Error() string {
+	var msg string
+	if e.needsPath {
+		// Ensure the import path is part of the message.
+		// Clearly distinguish the description from the import path.
+		msg = fmt.Sprintf("# %s\n# [%s]\n", e.importPath, e.desc)
+	} else {
+		msg = "# " + e.desc + "\n"
+	}
+	return msg + e.text
+}
+
+func (e *cmdError) ImportPath() string {
+	return e.importPath
+}
+
+var cgoLine = lazyregexp.New(`\[[^\[\]]+\.(cgo1|cover)\.go:[0-9]+(:[0-9]+)?\]`)
+var cgoTypeSigRe = lazyregexp.New(`\b_C2?(type|func|var|macro)_\B`)
+
+// run runs the command given by cmdline in the directory dir.
+// If the command fails, run prints information about the failure
+// and returns a non-nil error.
+func (sh *Shell) run(dir string, desc string, env []string, cmdargs ...any) error {
+	out, err := sh.runOut(dir, env, cmdargs...)
+	if desc == "" {
+		desc = sh.fmtCmd(dir, "%s", strings.Join(str.StringList(cmdargs...), " "))
+	}
+	return sh.reportCmd(desc, dir, out, err)
+}
+
+// runOut runs the command given by cmdline in the directory dir.
+// It returns the command output and any errors that occurred.
+// It accumulates execution time in a.
+func (sh *Shell) runOut(dir string, env []string, cmdargs ...any) ([]byte, error) {
+	a := sh.action
+
+	cmdline := str.StringList(cmdargs...)
+
+	for _, arg := range cmdline {
+		// GNU binutils commands, including gcc and gccgo, interpret an argument
+		// @foo anywhere in the command line (even following --) as meaning
+		// "read and insert arguments from the file named foo."
+		// Don't say anything that might be misinterpreted that way.
+		if strings.HasPrefix(arg, "@") {
+			return nil, fmt.Errorf("invalid command-line argument %s in command: %s", arg, joinUnambiguously(cmdline))
+		}
+	}
+
+	if cfg.BuildN || cfg.BuildX {
+		var envcmdline string
+		for _, e := range env {
+			if j := strings.IndexByte(e, '='); j != -1 {
+				if strings.ContainsRune(e[j+1:], '\'') {
+					envcmdline += fmt.Sprintf("%s=%q", e[:j], e[j+1:])
+				} else {
+					envcmdline += fmt.Sprintf("%s='%s'", e[:j], e[j+1:])
+				}
+				envcmdline += " "
+			}
+		}
+		envcmdline += joinUnambiguously(cmdline)
+		sh.ShowCmd(dir, "%s", envcmdline)
+		if cfg.BuildN {
+			return nil, nil
+		}
+	}
+
+	var buf bytes.Buffer
+	path, err := cfg.LookPath(cmdline[0])
+	if err != nil {
+		return nil, err
+	}
+	cmd := exec.Command(path, cmdline[1:]...)
+	if cmd.Path != "" {
+		cmd.Args[0] = cmd.Path
+	}
+	cmd.Stdout = &buf
+	cmd.Stderr = &buf
+	cleanup := passLongArgsInResponseFiles(cmd)
+	defer cleanup()
+	if dir != "." {
+		cmd.Dir = dir
+	}
+	cmd.Env = cmd.Environ() // Pre-allocate with correct PWD.
+
+	// Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools.
+	// It doesn't really matter if -toolexec isn't being used.
+	// Note that a.Package.Desc is not really an import path,
+	// but this is consistent with 'go list -f {{.ImportPath}}'.
+	// Plus, it is useful to uniquely identify packages in 'go list -json'.
+	if a != nil && a.Package != nil {
+		cmd.Env = append(cmd.Env, "TOOLEXEC_IMPORTPATH="+a.Package.Desc())
+	}
+
+	cmd.Env = append(cmd.Env, env...)
+	start := time.Now()
+	err = cmd.Run()
+	if a != nil && a.json != nil {
+		aj := a.json
+		aj.Cmd = append(aj.Cmd, joinUnambiguously(cmdline))
+		aj.CmdReal += time.Since(start)
+		if ps := cmd.ProcessState; ps != nil {
+			aj.CmdUser += ps.UserTime()
+			aj.CmdSys += ps.SystemTime()
+		}
+	}
+
+	// err can be something like 'exit status 1'.
+	// Add information about what program was running.
+	// Note that if buf.Bytes() is non-empty, the caller usually
+	// shows buf.Bytes() and does not print err at all, so the
+	// prefix here does not make most output any more verbose.
+	if err != nil {
+		err = errors.New(cmdline[0] + ": " + err.Error())
+	}
+	return buf.Bytes(), err
+}
+
+// joinUnambiguously prints the slice, quoting where necessary to make the
+// output unambiguous.
+// TODO: See issue 5279. The printing of commands needs a complete redo.
+func joinUnambiguously(a []string) string {
+	var buf strings.Builder
+	for i, s := range a {
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		q := strconv.Quote(s)
+		// A gccgo command line can contain -( and -).
+		// Make sure we quote them since they are special to the shell.
+		// The trimpath argument can also contain > (part of =>) and ;. Quote those too.
+		if s == "" || strings.ContainsAny(s, " ()>;") || len(q) > len(s)+2 {
+			buf.WriteString(q)
+		} else {
+			buf.WriteString(s)
+		}
+	}
+	return buf.String()
+}
diff --git a/src/cmd/go/internal/workcmd/vendor.go b/src/cmd/go/internal/workcmd/vendor.go
new file mode 100644
index 0000000..f9f0cc0
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/vendor.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workcmd
+
+import (
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/modcmd"
+	"cmd/go/internal/modload"
+	"context"
+)
+
+var cmdVendor = &base.Command{
+	UsageLine: "go work vendor [-e] [-v] [-o outdir]",
+	Short:     "make vendored copy of dependencies",
+	Long: `
+Vendor resets the workspace's vendor directory to include all packages
+needed to build and test all the workspace's packages.
+It does not include test code for vendored packages.
+
+The -v flag causes vendor to print the names of vendored
+modules and packages to standard error.
+
+The -e flag causes vendor to attempt to proceed despite errors
+encountered while loading packages.
+
+The -o flag causes vendor to create the vendor directory at the given
+path instead of "vendor". The go command can only use a vendor directory
+named "vendor" within the module root directory, so this flag is
+primarily useful for other tools.`,
+
+	Run: runVendor,
+}
+
+var vendorE bool   // if true, report errors but proceed anyway
+var vendorO string // if set, overrides the default output directory
+
+func init() {
+	cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+	cmdVendor.Flag.BoolVar(&vendorE, "e", false, "")
+	cmdVendor.Flag.StringVar(&vendorO, "o", "", "")
+	base.AddChdirFlag(&cmdVendor.Flag)
+	base.AddModCommonFlags(&cmdVendor.Flag)
+}
+
+func runVendor(ctx context.Context, cmd *base.Command, args []string) {
+	modload.InitWorkfile()
+	if modload.WorkFilePath() == "" {
+		base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+	}
+
+	modcmd.RunVendor(ctx, vendorE, vendorO, args)
+}
diff --git a/src/cmd/go/internal/workcmd/work.go b/src/cmd/go/internal/workcmd/work.go
index c99cc2a..bfbed83 100644
--- a/src/cmd/go/internal/workcmd/work.go
+++ b/src/cmd/go/internal/workcmd/work.go
@@ -74,5 +74,6 @@
 		cmdInit,
 		cmdSync,
 		cmdUse,
+		cmdVendor,
 	},
 }
diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go
index 7b73642..d380aae 100644
--- a/src/cmd/go/main.go
+++ b/src/cmd/go/main.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:generate go test cmd/go -v -run=TestDocsUpToDate -fixdocs
+//go:generate go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs
 
 package main
 
@@ -16,7 +16,6 @@
 	"log"
 	"os"
 	"path/filepath"
-	"runtime"
 	rtrace "runtime/trace"
 	"slices"
 	"strings"
@@ -30,7 +29,6 @@
 	"cmd/go/internal/fix"
 	"cmd/go/internal/fmtcmd"
 	"cmd/go/internal/generate"
-	"cmd/go/internal/get"
 	"cmd/go/internal/help"
 	"cmd/go/internal/list"
 	"cmd/go/internal/modcmd"
@@ -75,11 +73,9 @@
 		help.HelpFileType,
 		modload.HelpGoMod,
 		help.HelpGopath,
-		get.HelpGopathGet,
 		modfetch.HelpGoproxy,
 		help.HelpImportPath,
 		modload.HelpModules,
-		modget.HelpModuleGet,
 		modfetch.HelpModuleAuth,
 		help.HelpPackages,
 		modfetch.HelpPrivate,
@@ -104,23 +100,25 @@
 		base.Usage()
 	}
 
-	if args[0] == "get" || args[0] == "help" {
-		if !modload.WillBeEnabled() {
-			// Replace module-aware get with GOPATH get if appropriate.
-			*modget.CmdGet = *get.CmdGet
-		}
-	}
-
 	cfg.CmdName = args[0] // for error messages
 	if args[0] == "help" {
 		help.Help(os.Stdout, args[1:])
 		return
 	}
 
+	if cfg.GOROOT == "" {
+		fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: 'go' binary is trimmed and GOROOT is not set\n")
+		os.Exit(2)
+	}
+	if fi, err := os.Stat(cfg.GOROOT); err != nil || !fi.IsDir() {
+		fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: %v\n", cfg.GOROOT)
+		os.Exit(2)
+	}
+
 	// Diagnose common mistake: GOPATH==GOROOT.
 	// This setting is equivalent to not setting GOPATH at all,
 	// which is not what most people want when they do it.
-	if gopath := cfg.BuildContext.GOPATH; filepath.Clean(gopath) == filepath.Clean(runtime.GOROOT()) {
+	if gopath := cfg.BuildContext.GOPATH; filepath.Clean(gopath) == filepath.Clean(cfg.GOROOT) {
 		fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath)
 	} else {
 		for _, p := range filepath.SplitList(gopath) {
@@ -149,15 +147,6 @@
 		}
 	}
 
-	if cfg.GOROOT == "" {
-		fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: 'go' binary is trimmed and GOROOT is not set\n")
-		os.Exit(2)
-	}
-	if fi, err := os.Stat(cfg.GOROOT); err != nil || !fi.IsDir() {
-		fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: %v\n", cfg.GOROOT)
-		os.Exit(2)
-	}
-
 	cmd, used := lookupCmd(args)
 	cfg.CmdName = strings.Join(args[:used], " ")
 	if len(cmd.Commands) > 0 {
diff --git a/src/cmd/go/testdata/addmod.go b/src/cmd/go/testdata/addmod.go
index 0045d50..7ef68b3 100644
--- a/src/cmd/go/testdata/addmod.go
+++ b/src/cmd/go/testdata/addmod.go
@@ -20,6 +20,7 @@
 
 import (
 	"bytes"
+	"cmd/go/internal/str"
 	"flag"
 	"fmt"
 	"internal/txtar"
diff --git a/src/cmd/go/testdata/failssh/ssh b/src/cmd/go/testdata/failssh/ssh
deleted file mode 100755
index ecdbef9..0000000
--- a/src/cmd/go/testdata/failssh/ssh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exit 1
diff --git a/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-ppc64.txt b/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-ppc64.txt
new file mode 100644
index 0000000..60c73b7
--- /dev/null
+++ b/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-ppc64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-ppc64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-ppc64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-riscv64.txt b/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-riscv64.txt
new file mode 100644
index 0000000..978be3b
--- /dev/null
+++ b/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-riscv64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-riscv64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-riscv64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/src/cmd/go/testdata/savedir.go b/src/cmd/go/testdata/savedir.go
index 9a3ed50..bd42c3e 100644
--- a/src/cmd/go/testdata/savedir.go
+++ b/src/cmd/go/testdata/savedir.go
@@ -15,6 +15,7 @@
 package main
 
 import (
+	"cmd/go/internal/str"
 	"flag"
 	"fmt"
 	"internal/txtar"
diff --git a/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt b/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt
new file mode 100644
index 0000000..f1bc2c3
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt
@@ -0,0 +1,121 @@
+# Regression test for https://go.dev/issue/64423:
+#
+# When we parse the version for a Clang binary, we should accept
+# an arbitrary vendor prefix, which (as of 2023) may be injected
+# by defining CLANG_VENDOR when building clang itself.
+#
+# Since we don't want to actually rebuild the Clang toolchain in
+# this test, we instead simulate it by injecting a fake "clang"
+# binary that runs the real one as a subprocess.
+
+[!cgo] skip
+[short] skip 'builds and links a fake clang binary'
+[!cc:clang] skip 'test is specific to clang version parsing'
+
+# Save the location of the real clang command for our fake one to use.
+go run ./which clang
+cp stdout $WORK/.realclang
+
+# Build a fake clang and ensure that it is the one in $PATH.
+mkdir $WORK/bin
+go build -o $WORK/bin/clang$GOEXE ./fakeclang
+[!GOOS:plan9] env PATH=$WORK${/}bin
+[GOOS:plan9] env path=$WORK${/}bin
+
+# Force CGO_ENABLED=1 so that the following commands should error
+# out if the fake clang doesn't work.
+env CGO_ENABLED=1
+
+# The bug in https://go.dev/issue/64423 resulted in cache keys that
+# didn't contain any information about the C compiler.
+# Since the bug was in cache key computation, isolate the cache:
+# if we change the way caching works, we want the test to fail
+# instead of accidentally reusing the cached information from a
+# previous test run.
+env GOCACHE=$WORK${/}.cache
+mkdir $GOCACHE
+
+go build -x runtime/cgo
+
+	# Tell our fake clang to stop working.
+	# Previously, 'go build -x runtime/cgo' would continue to
+	# succeed because both the broken clang and the non-broken one
+	# resulted in a cache key with no clang version information.
+env GO_BREAK_CLANG=1
+! go build -x runtime/cgo
+stderr '# runtime/cgo\nGO_BREAK_CLANG is set'
+
+-- go.mod --
+module example/issue64423
+go 1.20
+-- which/main.go --
+package main
+
+import (
+	"os"
+	"os/exec"
+)
+
+func main() {
+	path, err := exec.LookPath(os.Args[1])
+	if err != nil {
+		panic(err)
+	}
+	os.Stdout.WriteString(path)
+}
+-- fakeclang/main.go --
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+)
+
+func main() {
+	if os.Getenv("GO_BREAK_CLANG") != "" {
+		os.Stderr.WriteString("GO_BREAK_CLANG is set\n")
+		os.Exit(1)
+	}
+
+	b, err := os.ReadFile(filepath.Join(os.Getenv("WORK"), ".realclang"))
+	if err != nil {
+		log.Fatal(err)
+	}
+	clang := string(bytes.TrimSpace(b))
+	cmd := exec.Command(clang, os.Args[1:]...)
+	cmd.Stdout = os.Stdout
+	stderr, err := cmd.StderrPipe()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if err := cmd.Start(); err != nil {
+		log.Fatal(err)
+	}
+
+	r := bufio.NewReader(stderr)
+	for {
+		line, err := r.ReadString('\n')
+		if line != "" {
+			if strings.Contains(line, "clang version") {
+				// Simulate a clang version string with an arbitrary vendor prefix.
+				const vendorString = "Gopher Solutions Unlimited "
+				os.Stderr.WriteString(vendorString)
+			}
+			os.Stderr.WriteString(line)
+		}
+		if err != nil {
+			break
+		}
+	}
+	os.Stderr.Close()
+
+	if err := cmd.Wait(); err != nil {
+		os.Exit(1)
+	}
+}
diff --git a/src/cmd/go/testdata/script/build_issue62156.txt b/src/cmd/go/testdata/script/build_issue62156.txt
new file mode 100644
index 0000000..d241570
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_issue62156.txt
@@ -0,0 +1,27 @@
+# Regression test for https://go.dev/issue/62156:
+# DWARF generation for inlined functions may require more runtime type
+# descriptors to be written.
+
+go build
+
+-- go.mod --
+module m
+
+go 1.20
+-- main.go --
+package main
+
+import "m/sub"
+
+func main() { sub.F() }
+-- sub/sub.go --
+package sub
+
+type iface interface{ m() }
+
+func F() {
+	f := func(rt []iface) []iface {
+		return append([]iface{}, rt...)
+	}
+	f(nil)
+}
diff --git a/src/cmd/go/testdata/script/build_issue_65528.txt b/src/cmd/go/testdata/script/build_issue_65528.txt
new file mode 100644
index 0000000..ab4d62b
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_issue_65528.txt
@@ -0,0 +1,9 @@
+go build
+
+-- go.mod --
+module test
+
+go 1.0
+
+-- p.go --
+package p
diff --git a/src/cmd/go/testdata/script/build_pgo_auto.txt b/src/cmd/go/testdata/script/build_pgo_auto.txt
index 5dd799a..509be0d 100644
--- a/src/cmd/go/testdata/script/build_pgo_auto.txt
+++ b/src/cmd/go/testdata/script/build_pgo_auto.txt
@@ -14,6 +14,9 @@
 # if the first arg starts with - it is a grep flag.
 stderr 'build\\t-pgo=.*default\.pgo'
 
+# check also that -pgo appears with the other flags, before non-flag settings
+! stderr 'build\\t[A-Za-z].*build\\t-pgo'
+
 # use default.pgo for ... with a single main package
 go build -n -pgo=auto ./a/...
 stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go'
diff --git a/src/cmd/go/testdata/script/build_pie_race.txt b/src/cmd/go/testdata/script/build_pie_race.txt
new file mode 100644
index 0000000..39bea05
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_pie_race.txt
@@ -0,0 +1,30 @@
+# go build -buildmode=pie -race main.go on Darwin should work without errors
+
+[!race] skip 'test requires race detector support'
+
+[!GOOS:darwin] ! go build -buildmode=pie -race
+[!GOOS:darwin] stderr '^-buildmode=pie not supported when -race is enabled on '$GOOS'/'$GOARCH'$'
+[!GOOS:darwin] stop 'not testing -buildmode=pie -race on platform that does not support it'
+
+go build -buildmode=pie -race bytes
+! stderr .
+
+[short] stop 'not linking a binary in -short mode'
+
+go build -buildmode=pie -race main.go
+! stderr .
+exec ./main
+stdout 'Hello, 世界'
+
+-- go.mod --
+module m
+
+go 1.21
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+	fmt.Println("Hello, 世界")
+}
diff --git a/src/cmd/go/testdata/script/cgo_stale_precompiled.txt b/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
index eb7e105..b2a0e0c 100644
--- a/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
+++ b/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
@@ -29,8 +29,12 @@
 # no longer installed anyway! Since we're requiring a C compiler in order to
 # build and use cgo libraries in the standard library, we should make sure it
 # matches what's in the cache.
-[!abscc] env CGO_ENABLED=1
-[!abscc] [!GOOS:plan9] env PATH=''  # Guaranteed not to include $(go env CC)!
-[!abscc] [GOOS:plan9] env path=''
-[!abscc] ! go build -x runtime/cgo
-[!abscc] stderr 'C compiler .* not found'
+
+[abscc] stop
+
+env CGO_ENABLED=1
+env CC=''
+[!GOOS:plan9] env PATH=''  # Guaranteed not to include $(go env CC)!
+[GOOS:plan9] env path=''
+! go build -x runtime/cgo
+stderr 'C compiler .* not found'
diff --git a/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt b/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt
index e5bcdc6..6dc30be 100644
--- a/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt
+++ b/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt
@@ -55,6 +55,32 @@
 env CGO_CFLAGS=-fprofile-instr-generate
 go build -x -n -o dummy.exe ./usesInternalCgo
 stderr preferlinkext
+
+# The -fdebug-prefix-map=path is permitted for internal linking.
+env CGO_CFLAGS=-fdebug-prefix-map=/some/sandbox/execroot/workspace=/tmp/new
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-fdebug-prefix-map=/Users/someone/.cache/bazel/_bazel_someone/3fa7e4650c43657ead684537951f49e2/sandbox/linux-sandbox/10/execroot/rules_go_static=.
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+# The -ffile-prefix-map=path is permitted for internal linking too.
+env CGO_CFLAGS=-ffile-prefix-map=/Users/someone/.cache/bazel/_bazel_someone/3fa7e4650c43657ead684537951f49e2/sandbox/linux-sandbox/10/execroot/rules_go_static/bazel-out/aarch64-fastbuild-ST-b33d65c724e6/bin/external/io_bazel_rules_go/stdlib_=.
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+# Verifying that -fdebug-prefix-map=path, -ffile-prefix-map, -no-canonical-prefixes
+# and -fno-canonical-systemd-headers are permitted for internal linking.
+env CGO_CFLAGS=-fdebug-prefix-map=old=/tmp/new
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-ffile-prefix-map=/Users/someone/_11233/things=new
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-no-canonical-prefixes
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-fno-canonical-system-headers
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
 env CGO_CFLAGS=
 
 [short] skip
@@ -95,6 +121,9 @@
 [cgolinkext] go list ./usesInternalCgo
 [!cgolinkext] go build '-ldflags=-tmpdir=tmp5 -linkmode=internal' -o $devnull ./usesInternalCgo &
 
+# Sixth build: explicit CGO use in a non-main package.
+go build -o p.a ./nonMainPackageUsesExplicitCgo &
+
 wait
 
 # Check first build: no external linking expected
@@ -113,6 +142,10 @@
 # Fifth build: explicit CGO, -linkmode=internal.
 ! exists tmp5/go.o
 
+# Sixth build: make sure that "go tool nm" doesn't get confused
+# by the presence of the "preferlinkext" sentinel.
+go tool nm p.a
+
 -- go.mod --
 
 module cgo.example
@@ -153,3 +186,16 @@
 func main() {
      println(C.meaningOfLife())
 }
+
+-- nonMainPackageUsesExplicitCgo/main.go --
+
+package p
+
+/*
+int meaningOfLife() { return 42; }
+*/
+import "C"
+
+func PrintIt() {
+     println(C.meaningOfLife())
+}
diff --git a/src/cmd/go/testdata/script/cover_coverpkg_partial.txt b/src/cmd/go/testdata/script/cover_coverpkg_partial.txt
new file mode 100644
index 0000000..5240241
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_coverpkg_partial.txt
@@ -0,0 +1,141 @@
+
+# Testcase related to #58770 and #24570. This is intended to ensure
+# that coverage collection works in situations where we're testing a
+# collection of packages and supplying a -coverpkg pattern that
+# matches some but not all of the collection. In addition, some of the
+# packages have Go code but no tests, and other packages have tests
+# but no Go code. Package breakdown:
+#
+# Package         Code?           Tests?          Stmts           Imports
+# a               yes             yes             2               f
+# b               yes             yes             1               a, d
+# c               yes             yes             3               ---
+# d               yes             no              1               ---
+# e               no              yes             0               a, b
+# f               yes             no              3               ---
+#
+
+[short] skip
+[!GOEXPERIMENT:coverageredesign] skip
+
+# Test all packages with -coverpkg=./...
+go test -coverprofile=cov.p -coverpkg=./... ./...
+stdout '^ok\s+M/a\s+\S+\s+coverage: 50.0% of statements in ./...'
+stdout '^ok\s+M/b\s+\S+\s+coverage: 60.0% of statements in ./...'
+stdout '^ok\s+M/c\s+\S+\s+coverage: 30.0% of statements in ./...'
+stdout '^\s*M/d\s+coverage: 0.0% of statements'
+stdout '^\s*M/f\s+coverage: 0.0% of statements'
+
+# Test just the test-only package ./e but with -coverpkg=./...
+# Total number of statements should be 7 (e.g. a/b/d/f but not c)
+# and covered percent should be 6/7 (we hit everything in the
+# coverpkg pattern except the func in "d").
+go test -coverprofile=bar.p -coverpkg=./... ./e
+stdout '^ok\s+M/e\s+\S+\s+coverage: 85.7% of statements in ./...'
+
+# Test b and f with -coverpkg set to a/d/f. Total of 6 statements
+# in a/d/f, again we hit everything except DFunc.
+go test -coverprofile=baz.p -coverpkg=./a,./d,./f ./b ./f
+stdout '^ok\s+M/b\s+\S+\s+coverage: 83.3% of statements in ./a, ./d, ./f'
+stdout '^\s*M/f\s+coverage: 0.0% of statements'
+
+-- a/a.go --
+package a
+
+import "M/f"
+
+var G int
+
+func AFunc() int {
+	G = 1
+	return f.Id()
+}
+-- a/a_test.go --
+package a
+
+import "testing"
+
+func TestA(t *testing.T) {
+	if AFunc() != 42 {
+		t.Fatalf("bad!")
+	}
+}
+-- b/b.go --
+package b
+
+import (
+	"M/a"
+	"M/d"
+)
+
+func BFunc() int {
+	return -d.FortyTwo + a.AFunc()
+}
+-- b/b_test.go --
+package b
+
+import "testing"
+
+func TestB(t *testing.T) {
+	if BFunc() == 1010101 {
+		t.Fatalf("bad!")
+	}
+}
+-- c/c.go --
+package c
+
+var G int
+
+func CFunc(x, y int) int {
+	G += x
+	G -= y
+	return x + y
+}
+-- c/c_test.go --
+package c
+
+import "testing"
+
+func TestC(t *testing.T) {
+	if CFunc(10, 10) == 1010101 {
+		t.Fatalf("bad!")
+	}
+}
+-- d/d.go --
+package d
+
+const FortyTwo = 42
+
+func DFunc() int {
+  return FortyTwo
+}
+
+-- e/e_test.go --
+package e
+
+import (
+	"M/a"
+	"M/b"
+	"testing"
+)
+
+func TestBlah(t *testing.T) {
+	if b.BFunc() == 1010101 {
+		t.Fatalf("bad")
+	}
+	a.AFunc()
+}
+-- f/f.go --
+package f
+
+var F int
+
+func Id() int {
+	F += 9
+	F *= 2
+	return 42
+}
+-- go.mod --
+module M
+
+go 1.21
diff --git a/src/cmd/go/testdata/script/cover_coverpkg_with_init.txt b/src/cmd/go/testdata/script/cover_coverpkg_with_init.txt
new file mode 100644
index 0000000..7a89102
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_coverpkg_with_init.txt
@@ -0,0 +1,130 @@
+
+# Testcase inspired by issue #58770, intended to verify that we're
+# doing the right thing when running "go test -coverpkg=./... ./..."
+# on a collection of packages where some have init functions and some
+# do not, some have tests and some do not.
+
+[short] skip
+[!GOEXPERIMENT:coverageredesign] skip
+
+# Verify correct statements percentages. We have a total of 10
+# statements in the packages matched by "./..."; package "a" (for
+# example) has two statements so we expect 20.0% stmts covered. Go
+# 1.19 would print 50% here (due to force importing of all ./...
+# packages); prior to the fix for #58770 Go 1.20 would show 100%
+# coverage. For packages "x" and "f" (which have no tests), check for
+# 0% stmts covered (as opposed to "no test files").
+
+go test -count=1 -coverprofile=cov.dat -coverpkg=./... ./...
+stdout '^\s*\?\s+M/n\s+\[no test files\]'
+stdout '^\s*M/x\s+coverage: 0.0% of statements'
+stdout '^\s*M/f\s+coverage: 0.0% of statements'
+stdout '^ok\s+M/a\s+\S+\s+coverage: 30.0% of statements in ./...'
+stdout '^ok\s+M/b\s+\S+\s+coverage: 20.0% of statements in ./...'
+stdout '^ok\s+M/main\s+\S+\s+coverage: 80.0% of statements in ./...'
+
+# Check for selected elements in the collected coverprofile as well.
+
+go tool cover -func=cov.dat
+stdout '^M/x/x.go:3:\s+XFunc\s+0.0%'
+stdout '^M/b/b.go:7:\s+BFunc\s+100.0%'
+stdout '^total:\s+\(statements\)\s+80.0%'
+
+-- go.mod --
+module M
+
+go 1.21
+-- a/a.go --
+package a
+
+import "M/f"
+
+func init() {
+	println("package 'a' init: launch the missiles!")
+}
+
+func AFunc() int {
+	return f.Id()
+}
+-- a/a_test.go --
+package a
+
+import "testing"
+
+func TestA(t *testing.T) {
+	if AFunc() != 42 {
+		t.Fatalf("bad!")
+	}
+}
+-- b/b.go --
+package b
+
+func init() {
+	println("package 'b' init: release the kraken")
+}
+
+func BFunc() int {
+	return -42
+}
+-- b/b_test.go --
+package b
+
+import "testing"
+
+func TestB(t *testing.T) {
+	if BFunc() != -42 {
+		t.Fatalf("bad!")
+	}
+}
+-- f/f.go --
+package f
+
+func Id() int {
+     return 42
+}
+-- main/main.go --
+package main
+
+import (
+	"M/a"
+	"M/b"
+)
+
+func MFunc() string {
+	return "42"
+}
+
+func M2Func() int {
+	return a.AFunc() + b.BFunc()
+}
+
+func init() {
+	println("package 'main' init")
+}
+
+func main() {
+	println(a.AFunc() + b.BFunc())
+}
+-- main/main_test.go --
+package main
+
+import "testing"
+
+func TestMain(t *testing.T) {
+	if MFunc() != "42" {
+		t.Fatalf("bad!")
+	}
+	if M2Func() != 0 {
+		t.Fatalf("also bad!")
+	}
+}
+-- n/n.go --
+package n
+
+type N int
+-- x/x.go --
+package x
+
+func XFunc() int {
+	return 2 * 2
+}
diff --git a/src/cmd/go/testdata/script/cover_coverprofile_multipkg.txt b/src/cmd/go/testdata/script/cover_coverprofile_multipkg.txt
new file mode 100644
index 0000000..543626f
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_coverprofile_multipkg.txt
@@ -0,0 +1,193 @@
+
+# Testcase for #63356. In this bug we're doing a "go test -coverprofile"
+# run for a collection of packages, mostly independent (hence tests can
+# be done in parallel) and in the original bug, temp coverage profile
+# files were not being properly qualified and were colliding, resulting
+# in a corrupted final profile. Actual content of the packages doesn't
+# especially matter as long as we have a mix of packages with tests and
+# multiple packages without tests.
+
+[short] skip
+
+# Kick off test.
+go test -p=10 -vet=off -count=1 -coverprofile=cov.p ./...
+
+# Make sure resulting profile is digestible.
+go tool cover -func=cov.p
+
+# No extraneous extra files please.
+! exists _cover_.out
+
+-- a/a.go --
+package a
+
+func init() {
+	println("package 'a' init: launch the missiles!")
+}
+
+func AFunc() int {
+	return 42
+}
+-- a/a_test.go --
+package a
+
+import "testing"
+
+func TestA(t *testing.T) {
+	if AFunc() != 42 {
+		t.Fatalf("bad!")
+	}
+}
+-- aa/aa.go --
+package aa
+
+import "M/it"
+
+func AA(y int) int {
+	c := it.Conc{}
+	x := it.Callee(&c)
+	println(x, y)
+	return 0
+}
+-- aa/aa_test.go --
+package aa
+
+import "testing"
+
+func TestMumble(t *testing.T) {
+	AA(3)
+}
+-- b/b.go --
+package b
+
+func init() {
+	println("package 'b' init: release the kraken")
+}
+
+func BFunc() int {
+	return -42
+}
+-- b/b_test.go --
+package b
+
+import "testing"
+
+func TestB(t *testing.T) {
+	if BFunc() != -42 {
+		t.Fatalf("bad!")
+	}
+}
+-- deadstuff/deadstuff.go --
+package deadstuff
+
+func downStreamOfPanic(x int) {
+	panic("bad")
+	if x < 10 {
+		println("foo")
+	}
+}
+-- deadstuff/deadstuff_test.go --
+package deadstuff
+
+import "testing"
+
+func TestMumble(t *testing.T) {
+	defer func() {
+		if x := recover(); x != nil {
+			println("recovered")
+		}
+	}()
+	downStreamOfPanic(10)
+}
+-- go.mod --
+module M
+
+go 1.21
+-- it/it.go --
+package it
+
+type Ctr interface {
+	Count() int
+}
+
+type Conc struct {
+	X int
+}
+
+func (c *Conc) Count() int {
+	return c.X
+}
+
+func DoCall(c *Conc) {
+	c2 := Callee(c)
+	println(c2.Count())
+}
+
+func Callee(ii Ctr) Ctr {
+	q := ii.Count()
+	return &Conc{X: q}
+}
+-- main/main.go --
+package main
+
+import (
+	"M/a"
+	"M/b"
+)
+
+func MFunc() string {
+	return "42"
+}
+
+func M2Func() int {
+	return a.AFunc() + b.BFunc()
+}
+
+func init() {
+	println("package 'main' init")
+}
+
+func main() {
+	println(a.AFunc() + b.BFunc())
+}
+-- main/main_test.go --
+package main
+
+import "testing"
+
+func TestMain(t *testing.T) {
+	if MFunc() != "42" {
+		t.Fatalf("bad!")
+	}
+	if M2Func() != 0 {
+		t.Fatalf("also bad!")
+	}
+}
+-- n/n.go --
+package n
+
+type N int
+-- onlytest/mumble_test.go --
+package onlytest
+
+import "testing"
+
+func TestFoo(t *testing.T) {
+	t.Logf("Whee\n")
+}
+-- x/x.go --
+package x
+
+func XFunc() int {
+	return 2 * 2
+}
+-- xinternal/i.go --
+package i
+
+func I() int { return 32 }
+-- xinternal/q/q.go --
+package q
+
+func Q() int {
+	return 42
+}
diff --git a/src/cmd/go/testdata/script/cover_statements.txt b/src/cmd/go/testdata/script/cover_statements.txt
index 4f3c9ca..030177c 100644
--- a/src/cmd/go/testdata/script/cover_statements.txt
+++ b/src/cmd/go/testdata/script/cover_statements.txt
@@ -1,10 +1,45 @@
 [short] skip
+
+# Workaround for issue 64014 -- for the portion of this test that
+# verifies that caching works correctly, the cache should theoretically
+# always behave reliably/deterministically, however if other tests are
+# concurrently accessing the cache while this test is running, it can
+# lead to cache lookup failures, which manifest as test failures here.
+# To avoid such flakes, use a separate isolated GOCACHE for this test.
+env GOCACHE=$WORK/cache
+
+# Initial run with simple coverage.
 go test -cover ./pkg1 ./pkg2 ./pkg3 ./pkg4
-stdout 'pkg1	\[no test files\]'
+[!GOEXPERIMENT:coverageredesign] stdout 'pkg1	\[no test files\]'
+[GOEXPERIMENT:coverageredesign] stdout 'pkg1		coverage: 0.0% of statements'
 stdout 'pkg2	\S+	coverage: 0.0% of statements \[no tests to run\]'
 stdout 'pkg3	\S+	coverage: 100.0% of statements'
 stdout 'pkg4	\S+	coverage: \[no statements\]'
 
+# Second run to make sure that caching works properly.
+go test -x -cover ./pkg1 ./pkg2 ./pkg3 ./pkg4
+[!GOEXPERIMENT:coverageredesign] stdout 'pkg1	\[no test files\]'
+[GOEXPERIMENT:coverageredesign] stdout 'pkg1		coverage: 0.0% of statements'
+stdout 'pkg2	\S+	coverage: 0.0% of statements \[no tests to run\]'
+stdout 'pkg3	\S+	coverage: 100.0% of statements'
+stdout 'pkg4	\S+	coverage: \[no statements\]'
+[GOEXPERIMENT:coverageredesign] ! stderr 'link(\.exe"?)? -'
+! stderr 'compile(\.exe"?)? -'
+! stderr 'cover(\.exe"?)? -'
+[GOEXPERIMENT:coverageredesign] stderr 'covdata(\.exe"?)? percent'
+
+# Now add in -coverprofile.
+go test -cover -coverprofile=cov.dat ./pkg1 ./pkg2 ./pkg3 ./pkg4
+[!GOEXPERIMENT:coverageredesign] stdout 'pkg1	\[no test files\]'
+[GOEXPERIMENT:coverageredesign] stdout 'pkg1		coverage: 0.0% of statements'
+stdout 'pkg2	\S+	coverage: 0.0% of statements \[no tests to run\]'
+stdout 'pkg3	\S+	coverage: 100.0% of statements'
+stdout 'pkg4	\S+	coverage: \[no statements\]'
+
+# Validate
+go tool cover -func=cov.dat
+[GOEXPERIMENT:coverageredesign] stdout 'pkg1/a.go:5:\s+F\s+0.0%'
+
 -- go.mod --
 module m
 
diff --git a/src/cmd/go/testdata/script/cover_swig.txt b/src/cmd/go/testdata/script/cover_swig.txt
new file mode 100644
index 0000000..decb29a
--- /dev/null
+++ b/src/cmd/go/testdata/script/cover_swig.txt
@@ -0,0 +1,72 @@
+
+# Testcase for issue 64661.  This testcase is intended to verify that
+# we don't try to send swig-generated Go files through the cover tool
+# for "go test -cover" runs on packages that have *.swig source files.
+
+[!exec:swig] skip
+[!cgo] skip
+
+go test -v -count=1 -coverprofile=foo.p
+stdout 'coverage: 100.0% of statements'
+
+-- go.mod --
+module simple
+
+go 1.21
+-- main.c --
+/* A global variable */
+double Foo = 3.0;
+
+/* Compute the greatest common divisor of positive integers */
+int gcd(int x, int y) {
+  int g;
+  g = y;
+  while (x > 0) {
+    g = x;
+    x = y % x;
+    y = g;
+  }
+  return g;
+}
+
+
+-- main.go --
+package main
+
+import (
+	"fmt"
+)
+
+func main() {
+	// Call our gcd() function
+	x := 42
+	y := 105
+	g := Gcd(x, y)
+	fmt.Println("The gcd of", x, "and", y, "is", g)
+
+	// Manipulate the Foo global variable
+
+	// Output its current value
+	fmt.Println("Foo =", GetFoo())
+
+	// Change its value
+	SetFoo(3.1415926)
+
+	// See if the change took effect
+	fmt.Println("Foo =", GetFoo())
+}
+-- main.swig --
+%module main
+
+%inline %{
+extern int    gcd(int x, int y);
+extern double Foo;
+%}
+-- main_test.go --
+package main
+
+import "testing"
+
+func TestSwigFuncs(t *testing.T) {
+	main()
+}
diff --git a/src/cmd/go/testdata/script/env_issue46807.txt b/src/cmd/go/testdata/script/env_issue46807.txt
new file mode 100644
index 0000000..e37bc63
--- /dev/null
+++ b/src/cmd/go/testdata/script/env_issue46807.txt
@@ -0,0 +1,12 @@
+! go mod tidy
+stderr '^go: warning: ignoring go.mod in \$GOPATH'
+stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules'''
+
+go env
+stdout 'GOPATH='
+stderr '^go: warning: ignoring go.mod in \$GOPATH'
+
+-- $GOPATH/go.mod --
+module bug
+
+go 1.21
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/gccgo_link_c.txt b/src/cmd/go/testdata/script/gccgo_link_c.txt
index b9a4c70..d37cb66 100644
--- a/src/cmd/go/testdata/script/gccgo_link_c.txt
+++ b/src/cmd/go/testdata/script/gccgo_link_c.txt
@@ -3,9 +3,11 @@
 
 [!cgo] skip
 [!exec:gccgo] skip
+[cross] skip  # gccgo can't necessarily cross-compile, so don't assume it will reach the step where we expect it to fail
 
-go build -n -compiler gccgo
+! go build -x -compiler gccgo
 stderr 'gccgo.*\-L [^ ]*alibpath \-lalib' # make sure that Go-inline "#cgo LDFLAGS:" ("-L alibpath -lalib") passed to gccgo linking stage
+! stderr 'gccgo.*-lalib.*-lalib' # make sure -lalib is only passed once
 
 -- go.mod --
 module m
diff --git a/src/cmd/go/testdata/script/generate_invalid.txt b/src/cmd/go/testdata/script/generate_invalid.txt
index e18e62c..3bede32 100644
--- a/src/cmd/go/testdata/script/generate_invalid.txt
+++ b/src/cmd/go/testdata/script/generate_invalid.txt
@@ -6,8 +6,13 @@
 env PATH=$GOBIN${:}$PATH
 
 # Test go generate for directory with no go files
-go generate ./nogo
+! go generate ./nogo
 ! stdout 'Fail'
+stderr 'no Go files'
+
+# Test go  generate for module which doesn't exist should fail
+! go generate foo.bar/nothing
+stderr 'no required module provides package foo.bar/nothing'
 
 # Test go generate for package where all .go files are excluded by build
 # constraints
diff --git a/src/cmd/go/testdata/script/generate_workspace.txt b/src/cmd/go/testdata/script/generate_workspace.txt
new file mode 100644
index 0000000..5ba2393
--- /dev/null
+++ b/src/cmd/go/testdata/script/generate_workspace.txt
@@ -0,0 +1,27 @@
+# This is a regression test for Issue #56098: Go generate
+# wasn't initializing workspace mode
+
+[short] skip
+
+go generate ./mod
+cmp ./mod/got.txt want.txt
+
+-- go.work --
+go 1.22
+
+use ./mod
+-- mod/go.mod --
+module example.com/mod
+-- mod/gen.go --
+//go:generate go run gen.go got.txt
+
+package main
+
+import "os"
+
+func main() {
+    outfile := os.Args[1]
+    os.WriteFile(outfile, []byte("Hello World!\n"), 0644)
+}
+-- want.txt --
+Hello World!
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/get_404_meta.txt b/src/cmd/go/testdata/script/get_404_meta.txt
index 4ffbdeb..7665155 100644
--- a/src/cmd/go/testdata/script/get_404_meta.txt
+++ b/src/cmd/go/testdata/script/get_404_meta.txt
@@ -4,9 +4,6 @@
 [!git] skip
 
 env GONOSUMDB=bazil.org,github.com,golang.org
-env GO111MODULE=off
-go get -d bazil.org/fuse/fs/fstestutil
-
 env GO111MODULE=on
 env GOPROXY=direct
 go get bazil.org/fuse/fs/fstestutil
diff --git a/src/cmd/go/testdata/script/get_brace.txt b/src/cmd/go/testdata/script/get_brace.txt
deleted file mode 100644
index 34f66a6..0000000
--- a/src/cmd/go/testdata/script/get_brace.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-env GO111MODULE=off
-
-[!git] skip
-
-# Set up some empty repositories.
-cd $WORK/_origin/foo
-exec git init
-exec git config user.name 'Nameless Gopher'
-exec git config user.email 'nobody@golang.org'
-exec git commit --allow-empty -m 'create master branch'
-
-cd $WORK
-cd '_origin/{confusing}'
-exec git init
-exec git config user.name 'Nameless Gopher'
-exec git config user.email 'nobody@golang.org'
-exec git commit --allow-empty -m 'create master branch'
-
-# Clone the empty repositories into GOPATH.
-# This tells the Go command where to find them: it takes the place of a user's meta-tag redirector.
-mkdir $GOPATH/src/example.com
-cd $GOPATH/src/example.com
-exec git clone $WORK/_origin/foo
-exec git clone $WORK/_origin/{confusing}
-
-# Commit contents to the repositories.
-cd $WORK/_origin/foo
-exec git add main.go
-exec git commit -m 'add main'
-
-cd $WORK
-cd '_origin/{confusing}'
-exec git add confusing.go
-exec git commit -m 'just try to delete this!'
-
-# 'go get' should refuse to download or update the confusingly-named repo.
-cd $GOPATH/src/example.com/foo
-! go get -u 'example.com/{confusing}'
-stderr 'invalid char'
-! go get -u example.com/foo
-stderr 'invalid import path'
-! exists example.com/{confusing}
-
--- $WORK/_origin/foo/main.go --
-package main
-import _ "example.com/{confusing}"
-
-func main() {}
-
--- $WORK/_origin/{confusing}/confusing.go --
-package confusing
diff --git a/src/cmd/go/testdata/script/get_custom_domain_wildcard.txt b/src/cmd/go/testdata/script/get_custom_domain_wildcard.txt
deleted file mode 100644
index 45ab524..0000000
--- a/src/cmd/go/testdata/script/get_custom_domain_wildcard.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-[!net:rsc.io] skip
-[!git] skip
-env GO111MODULE=off
-
-go get -u rsc.io/pdf/...
-exists $GOPATH/bin/pdfpasswd$GOEXE
diff --git a/src/cmd/go/testdata/script/get_dash_t.txt b/src/cmd/go/testdata/script/get_dash_t.txt
deleted file mode 100644
index 8f3a036..0000000
--- a/src/cmd/go/testdata/script/get_dash_t.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Tests issue 8181
-
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-go get -v -t github.com/rsc/go-get-issue-8181/a github.com/rsc/go-get-issue-8181/b
-go list -test -deps github.com/rsc/go-get-issue-8181/b
-stdout 'x/build/gerrit'
diff --git a/src/cmd/go/testdata/script/get_domain_root.txt b/src/cmd/go/testdata/script/get_domain_root.txt
deleted file mode 100644
index dfcea86..0000000
--- a/src/cmd/go/testdata/script/get_domain_root.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-# Tests issue #9357
-# go get foo.io (not foo.io/subdir) was not working consistently.
-
-[!net:go-get-issue-9357.appspot.com] skip
-[!git] skip
-env GO111MODULE=off
-
-# go-get-issue-9357.appspot.com is running
-# the code at github.com/rsc/go-get-issue-9357,
-# a trivial Go on App Engine app that serves a
-# <meta> tag for the domain root.
-go get -d go-get-issue-9357.appspot.com
-go get go-get-issue-9357.appspot.com
-go get -u go-get-issue-9357.appspot.com
-
-rm $GOPATH/src/go-get-issue-9357.appspot.com
-go get go-get-issue-9357.appspot.com
-
-rm $GOPATH/src/go-get-issue-9357.appspot.com
-go get -u go-get-issue-9357.appspot.com
diff --git a/src/cmd/go/testdata/script/get_dot_slash_download.txt b/src/cmd/go/testdata/script/get_dot_slash_download.txt
deleted file mode 100644
index 6dbd118..0000000
--- a/src/cmd/go/testdata/script/get_dot_slash_download.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-[!net:rsc.io] skip
-[!git] skip
-env GO111MODULE=off
-
-# Tests Issues #9797 and #19769
-
-mkdir $WORK/tmp/src/rsc.io
-env GOPATH=$WORK/tmp
-cd $WORK/tmp/src/rsc.io
-go get ./pprof_mac_fix
diff --git a/src/cmd/go/testdata/script/get_dotfiles.txt b/src/cmd/go/testdata/script/get_dotfiles.txt
deleted file mode 100644
index 676a044..0000000
--- a/src/cmd/go/testdata/script/get_dotfiles.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-env GO111MODULE=off
-[short] skip
-
-[!git] skip
-
-# Set up a benign repository and a repository with a dotfile name.
-cd $WORK/_origin/foo
-exec git init
-exec git config user.name 'Nameless Gopher'
-exec git config user.email 'nobody@golang.org'
-exec git commit --allow-empty -m 'create master branch'
-
-cd $WORK/_origin/.hidden
-exec git init
-exec git config user.name 'Nameless Gopher'
-exec git config user.email 'nobody@golang.org'
-exec git commit --allow-empty -m 'create master branch'
-
-# Clone the empty repositories into GOPATH.
-# This tells the Go command where to find them: it takes the place of a user's meta-tag redirector.
-mkdir $GOPATH/src/example.com
-cd $GOPATH/src/example.com
-exec git clone $WORK/_origin/foo
-exec git clone $WORK/_origin/.hidden
-
-# Add a benign commit.
-cd $WORK/_origin/foo
-cp _ok/main.go main.go
-exec git add main.go
-exec git commit -m 'add ok'
-
-# 'go get' should install the benign commit.
-cd $GOPATH
-go get -u example.com/foo
-
-# Now sneak in an import of a dotfile path.
-cd $WORK/_origin/.hidden
-exec git add hidden.go
-exec git commit -m 'nothing to see here, move along'
-
-cd $WORK/_origin/foo
-cp _sneaky/main.go main.go
-exec git add main.go
-exec git commit -m 'fix typo (heh heh heh)'
-
-# 'go get -u' should refuse to download or update the dotfile-named repo.
-cd $GOPATH/src/example.com/foo
-! go get -u example.com/foo
-stderr 'leading dot'
-! exists example.com/.hidden/hidden.go
-
--- $WORK/_origin/foo/_ok/main.go --
-package main
-
-func main() {}
-
--- $WORK/_origin/foo/_sneaky/main.go --
-package main
-import _ "example.com/.hidden"
-
-func main() {}
-
--- $WORK/_origin/.hidden/hidden.go --
-package hidden
diff --git a/src/cmd/go/testdata/script/get_go_file.txt b/src/cmd/go/testdata/script/get_go_file.txt
deleted file mode 100644
index f6d0de4..0000000
--- a/src/cmd/go/testdata/script/get_go_file.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-# Tests Issue #38478
-# Tests that go get in GOPATH mode returns a specific error if the argument
-# ends with '.go', and either has no slash or refers to an existing file.
-
-env GO111MODULE=off
-
-# argument doesn't have .go suffix
-go get -d test
-
-# argument has .go suffix, is a file and exists
-! go get -d test.go
-stderr 'go: test.go: arguments must be package or module paths'
-
-# argument has .go suffix, doesn't exist and has no slashes
-! go get -d test_missing.go
-stderr 'go: test_missing.go: arguments must be package or module paths'
-
-# argument has .go suffix, is a file and exists in sub-directory
-! go get -d test/test.go
-stderr 'go: test/test.go exists as a file, but ''go get'' requires package arguments'
-
-# argument has .go suffix, doesn't exist and has slashes
-! go get -d test/test_missing.go
-! stderr 'arguments must be package or module paths'
-! stderr 'exists as a file, but ''go get'' requires package arguments'
-
-# argument has .go suffix, is a symlink and exists
-[symlink] symlink test_sym.go -> test.go
-[symlink] ! go get -d test_sym.go
-[symlink] stderr 'go: test_sym.go: arguments must be package or module paths'
-[symlink] rm test_sym.go
-
-# argument has .go suffix, is a symlink and exists in sub-directory
-[symlink] symlink test/test_sym.go -> test.go
-[symlink] ! go get -d test/test_sym.go
-[symlink] stderr 'go: test/test_sym.go exists as a file, but ''go get'' requires package arguments'
-[symlink] rm test_sym.go
-
-# argument has .go suffix, is a directory and exists
-mkdir test_dir.go
-! go get -d test_dir.go
-stderr 'go: test_dir.go: arguments must be package or module paths'
-rm test_dir.go
-
-# argument has .go suffix, is a directory and exists in sub-directory
-mkdir test/test_dir.go
-! go get -d test/test_dir.go
-! stderr 'arguments must be package or module paths'
-! stderr 'exists as a file, but ''go get'' requires package arguments'
-rm test/test_dir.go
-
-
--- test.go --
-package main
-func main() {println("test")}
-
-
--- test/test.go --
-package main
-func main() {println("test")}
diff --git a/src/cmd/go/testdata/script/get_goroot.txt b/src/cmd/go/testdata/script/get_goroot.txt
deleted file mode 100644
index 7510692..0000000
--- a/src/cmd/go/testdata/script/get_goroot.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-[!net:github.com] skip
-env GO111MODULE=off
-
-# Issue 4186. go get cannot be used to download packages to $GOROOT.
-# Test that without GOPATH set, go get should fail.
-
-# Fails because GOROOT=GOPATH
-env GOPATH=$WORK/tmp
-env GOROOT=$WORK/tmp
-! go get -d github.com/golang/example/hello
-stderr 'warning: GOPATH set to GOROOT'
-stderr '\$GOPATH must not be set to \$GOROOT'
-
-# Fails because GOROOT=GOPATH after cleaning.
-env GOPATH=$WORK/tmp/
-env GOROOT=$WORK/tmp
-! go get -d github.com/golang/example/hello
-stderr 'warning: GOPATH set to GOROOT'
-stderr '\$GOPATH must not be set to \$GOROOT'
-
-env GOPATH=$WORK/tmp
-env GOROOT=$WORK/tmp/
-! go get -d github.com/golang/example/hello
-stderr 'warning: GOPATH set to GOROOT'
-stderr '\$GOPATH must not be set to \$GOROOT'
-
-# Make a home directory
-mkdir $WORK/home/go
-
-# Fails because GOROOT=$HOME/go so default GOPATH unset.
-[GOOS:windows] env USERPROFILE=$WORK/home
-[GOOS:plan9] env home=$WORK/home
-[!GOOS:windows] [!GOOS:plan9] env HOME=$WORK/home
-env GOPATH=
-env GOROOT=$WORK/home/go
-! go get -d github.com/golang/example/hello
-stderr '\$GOPATH not set'
-
-[GOOS:windows] env USERPROFILE=$WORK/home/
-[GOOS:plan9] env home=$WORK/home/
-[!GOOS:windows] [!GOOS:plan9] env HOME=$WORK/home/
-env GOPATH=
-env GOROOT=$WORK/home/go
-! go get -d github.com/golang/example/hello
-stderr '\$GOPATH not set'
-
-[GOOS:windows] env USERPROFILE=$WORK/home
-[GOOS:plan9] env home=$WORK/home
-[!GOOS:windows] [!GOOS:plan9] env HOME=$WORK/home
-env GOPATH=
-env GOROOT=$WORK/home/go/
-! go get -d github.com/golang/example/hello
-stderr '\$GOPATH not set'
diff --git a/src/cmd/go/testdata/script/get_insecure.txt b/src/cmd/go/testdata/script/get_insecure.txt
index afe64b8..f29ec2d 100644
--- a/src/cmd/go/testdata/script/get_insecure.txt
+++ b/src/cmd/go/testdata/script/get_insecure.txt
@@ -1,25 +1,9 @@
-# TODO(matloob): Split this test into two? It's one of the slowest tests we have.
-
 [!net:insecure.go-get-issue-15410.appspot.com] skip
 [!git] skip
 
 env PATH=$WORK/tmp/bin${:}$PATH
 go build -o $WORK/tmp/bin/ssh ssh.go
 
-# GOPATH: Set up
-env GO111MODULE=off
-
-# GOPATH: Try go get -d of HTTP-only repo (should fail).
-! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try again with GOINSECURE (should succeed).
-env GOINSECURE=insecure.go-get-issue-15410.appspot.com
-go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try updating without GOINSECURE (should fail).
-env GOINSECURE=''
-! go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p
-
 # Modules: Set up
 env GOPATH=$WORK/m/gp
 mkdir $WORK/m
diff --git a/src/cmd/go/testdata/script/get_insecure_custom_domain.txt b/src/cmd/go/testdata/script/get_insecure_custom_domain.txt
deleted file mode 100644
index 4b3c9d6..0000000
--- a/src/cmd/go/testdata/script/get_insecure_custom_domain.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-[!net:insecure.go-get-issue-15410.appspot.com] skip
-[!git] skip
-env GO111MODULE=off
-
-! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
-
-env GOINSECURE=insecure.go-get-issue-15410.appspot.com
-go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
diff --git a/src/cmd/go/testdata/script/get_insecure_env.txt b/src/cmd/go/testdata/script/get_insecure_env.txt
deleted file mode 100644
index 98e7053..0000000
--- a/src/cmd/go/testdata/script/get_insecure_env.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-[!net:insecure.go-get-issue-15410.appspot.com] skip
-[!git] skip
-
-# GOPATH: Set up
-env GO111MODULE=off
-
-# GOPATH: Try go get -d of HTTP-only repo (should fail).
-! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try again with invalid GOINSECURE (should fail).
-env GOINSECURE=insecure.go-get-issue-15410.appspot.com/pkg/q
-! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try with correct GOINSECURE (should succeed).
-env GOINSECURE=insecure.go-get-issue-15410.appspot.com/pkg/p
-go get -d insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try updating without GOINSECURE (should fail).
-env GOINSECURE=
-! go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try updating with GOINSECURE glob (should succeed).
-env GOINSECURE=*.go-get-*.appspot.com
-go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p
-
-# GOPATH: Try updating with GOINSECURE base URL (should succeed).
-env GOINSECURE=insecure.go-get-issue-15410.appspot.com
-go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p
-
diff --git a/src/cmd/go/testdata/script/get_insecure_redirect.txt b/src/cmd/go/testdata/script/get_insecure_redirect.txt
deleted file mode 100644
index 2e53c58..0000000
--- a/src/cmd/go/testdata/script/get_insecure_redirect.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE).
-# golang.org/issue/34049: 'go get' would panic in case of an insecure redirect in GOPATH mode
-
-[!git] skip
-
-env GO111MODULE=off
-
-! go get -d vcs-test.golang.org/insecure/go/insecure
-stderr 'redirected .* to insecure URL'
-
-[short] stop 'builds a git repo'
-
-env GOINSECURE=vcs-test.golang.org/insecure/go/insecure
-go get -d vcs-test.golang.org/insecure/go/insecure
diff --git a/src/cmd/go/testdata/script/get_insecure_update.txt b/src/cmd/go/testdata/script/get_insecure_update.txt
deleted file mode 100644
index 7cddd6b..0000000
--- a/src/cmd/go/testdata/script/get_insecure_update.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-# Clone the repo via HTTP manually.
-exec git clone -q http://github.com/golang/example github.com/golang/example
-
-# Update without GOINSECURE should fail.
-# We need -f to ignore import comments.
-! go get -d -u -f github.com/golang/example/hello
-
-# Update with GOINSECURE should succeed.
-env GOINSECURE=github.com/golang/example/hello
-go get -d -u -f github.com/golang/example/hello
diff --git a/src/cmd/go/testdata/script/get_internal_wildcard.txt b/src/cmd/go/testdata/script/get_internal_wildcard.txt
deleted file mode 100644
index b25e746..0000000
--- a/src/cmd/go/testdata/script/get_internal_wildcard.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-# This used to fail with errors about internal packages
-go get github.com/rsc/go-get-issue-11960/...
diff --git a/src/cmd/go/testdata/script/get_issue11307.txt b/src/cmd/go/testdata/script/get_issue11307.txt
deleted file mode 100644
index d490959..0000000
--- a/src/cmd/go/testdata/script/get_issue11307.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# go get -u was not working except in checkout directory
-
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-env GOPATH=$WORK/tmp/gopath
-go get github.com/rsc/go-get-issue-11307
-go get -u github.com/rsc/go-get-issue-11307 # was failing
diff --git a/src/cmd/go/testdata/script/get_issue16471.txt b/src/cmd/go/testdata/script/get_issue16471.txt
deleted file mode 100644
index 1aeae58..0000000
--- a/src/cmd/go/testdata/script/get_issue16471.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-[!net:rsc.io] skip
-[!net:github.com] skip
-[!git] skip
-
-env GO111MODULE=off
-
-cd rsc.io/go-get-issue-10952
-
-exec git init
-exec git add foo.go
-exec git config user.name Gopher
-exec git config user.email gopher@golang.org
-exec git commit -a -m 'initial commit'
-exec git remote add origin https://github.com/golang/go-get-issue-10952
-
-exec git status
-
-! go get -x -u rsc.io/go-get-issue-10952
-stderr '^package rsc.io/go-get-issue-10952: rsc\.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/golang/go-get-issue-10952$'
-
--- rsc.io/go-get-issue-10952/foo.go --
-// Junk package to test go get.
-package foo
diff --git a/src/cmd/go/testdata/script/get_issue22125.txt b/src/cmd/go/testdata/script/get_issue22125.txt
deleted file mode 100644
index 086081f..0000000
--- a/src/cmd/go/testdata/script/get_issue22125.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# This test verifies a fix for a security issue; see https://go.dev/issue/22125.
-
-[short] skip
-[!git] skip
-[!exec:svn] skip
-
-env GO111MODULE=off
-
-cd $GOPATH
-! go get -u vcs-test.golang.org/go/test1-svn-git
-stderr 'src'${/}'vcs-test.* uses git, but parent .*src'${/}'vcs-test.* uses svn'
-
-[!case-sensitive] ! go get -u vcs-test.golang.org/go/test2-svn-git/test2main
-[!case-sensitive] stderr 'src'${/}'vcs-test.* uses git, but parent .*src'${/}'vcs-test.* uses svn'
diff --git a/src/cmd/go/testdata/script/get_issue53955.txt b/src/cmd/go/testdata/script/get_issue53955.txt
new file mode 100644
index 0000000..685c6fa
--- /dev/null
+++ b/src/cmd/go/testdata/script/get_issue53955.txt
@@ -0,0 +1,79 @@
+# Regression test for https://go.dev/issue/53955.
+# New remote tags were erroneously added to the local clone of a repo
+# only *after* extracting version information for a locally-cached commit,
+# causing the version information to have incomplete Tags and Version fields.
+
+[short] skip 'constructs a local git repo'
+[!git] skip
+[!net:github.com] skip 'does not actually use github.com because of insteadOf, but silence network check just in case'
+
+# Redirect git to a test-specific .gitconfig.
+# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer.
+# For older git versions we also set $HOME.
+env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig
+env HOME=$WORK${/}home${/}gopher
+exec git config --global --show-origin user.name
+stdout 'Go Gopher'
+
+# Inject a local repo in place of a remote one, so that we can
+# add commits to the repo partway through the test.
+env GIT_ALLOW_PROTOCOL=file
+env GOPRIVATE=github.com/golang/issue53955
+
+[!GOOS:windows] exec git config --global 'url.file://'$WORK'/repo.insteadOf' 'https://github.com/golang/issue53955'
+[GOOS:windows]  exec git config --global 'url.file:///'$WORK'/repo.insteadOf' 'https://github.com/golang/issue53955'
+
+cd $WORK/repo
+
+env GIT_AUTHOR_NAME='Go Gopher'
+env GIT_AUTHOR_EMAIL='gopher@golang.org'
+env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME
+env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL
+
+exec git init
+
+env GIT_COMMITTER_DATE=2022-07-19T11:07:00-04:00
+env GIT_AUTHOR_DATE=2022-07-19T11:07:00-04:00
+exec git add go.mod issue53955.go
+exec git commit -m 'initial commit'
+exec git branch -m main
+exec git tag v1.0.9
+
+env GIT_COMMITTER_DATE=2022-07-19T11:07:01-04:00
+env GIT_AUTHOR_DATE=2022-07-19T11:07:01-04:00
+exec git add extra.go
+exec git commit -m 'next commit'
+exec git show-ref --tags --heads
+cmp stdout $WORK/.git-refs-1
+
+cd $WORK/m
+go get -x github.com/golang/issue53955@2cb3d49f
+stderr '^go: added github.com/golang/issue53955 v1.0.10-0.20220719150701-2cb3d49f8874$'
+
+cd $WORK/repo
+exec git tag v1.0.10
+
+cd $WORK/m
+go get -x github.com/golang/issue53955@v1.0.10
+! stderr 'v1\.0\.10 is not a tag'
+stderr '^go: upgraded github.com/golang/issue53955 v.* => v1\.0\.10$'
+
+-- $WORK/repo/go.mod --
+module github.com/golang/issue53955
+
+go 1.18
+-- $WORK/repo/issue53955.go --
+package issue53955
+-- $WORK/repo/extra.go --
+package issue53955
+-- $WORK/.git-refs-1 --
+2cb3d49f8874b9362ed0ddd2a6512e4108bbf6b1 refs/heads/main
+050526ebf5883191e990529eb3cc9345abaf838c refs/tags/v1.0.9
+-- $WORK/m/go.mod --
+module m
+
+go 1.18
+-- $WORK/home/gopher/.gitconfig --
+[user]
+	name = Go Gopher
+	email = gopher@golang.org
diff --git a/src/cmd/go/testdata/script/get_legacy.txt b/src/cmd/go/testdata/script/get_legacy.txt
deleted file mode 100644
index 2909b73..0000000
--- a/src/cmd/go/testdata/script/get_legacy.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-# This test was converted from a test in vendor_test.go (which no longer exists).
-# That seems to imply that it's about vendoring semantics, but the test doesn't
-# use 'go -mod=vendor' (and none of the fetched repos have vendor folders).
-# The test still seems to be useful as a test of direct-mode go get.
-
-[short] skip
-[!git] skip
-env GO111MODULE=off
-
-env GOPATH=$WORK/tmp/d1
-go get vcs-test.golang.org/git/modlegacy1-old.git/p1
-go list -f '{{.Deps}}' vcs-test.golang.org/git/modlegacy1-old.git/p1
-stdout 'new.git/p2' # old/p1 should depend on new/p2
-! stdout new.git/v2/p2 # old/p1 should NOT depend on new/v2/p2
-go build vcs-test.golang.org/git/modlegacy1-old.git/p1 vcs-test.golang.org/git/modlegacy1-new.git/p1
-! stdout .
-
-env GOPATH=$WORK/tmp/d2
-
-rm $GOPATH
-go get github.com/rsc/vgotest5
-go get github.com/rsc/vgotest4
-go get github.com/myitcv/vgo_example_compat
-
-rm $GOPATH
-go get github.com/rsc/vgotest4
-go get github.com/rsc/vgotest5
-go get github.com/myitcv/vgo_example_compat
-
-rm $GOPATH
-go get github.com/rsc/vgotest4 github.com/rsc/vgotest5
-go get github.com/myitcv/vgo_example_compat
-
-rm $GOPATH
-go get github.com/rsc/vgotest5 github.com/rsc/vgotest4
-go get github.com/myitcv/vgo_example_compat
-
-rm $GOPATH
-go get github.com/myitcv/vgo_example_compat
-go get github.com/rsc/vgotest5 github.com/rsc/vgotest4
-
-rm $GOPATH
-go get github.com/myitcv/vgo_example_compat github.com/rsc/vgotest4 github.com/rsc/vgotest5
-
-rm $GOPATH
-go get github.com/myitcv/vgo_example_compat github.com/rsc/vgotest5 github.com/rsc/vgotest4
-
-rm $GOPATH
-go get github.com/rsc/vgotest4 github.com/myitcv/vgo_example_compat github.com/rsc/vgotest5
-
-rm $GOPATH
-go get github.com/rsc/vgotest4 github.com/rsc/vgotest5 github.com/myitcv/vgo_example_compat
-
-rm $GOPATH
-go get github.com/rsc/vgotest5 github.com/myitcv/vgo_example_compat github.com/rsc/vgotest4
-
-rm $GOPATH
-go get github.com/rsc/vgotest5 github.com/rsc/vgotest4 github.com/myitcv/vgo_example_compat
diff --git a/src/cmd/go/testdata/script/get_non_pkg.txt b/src/cmd/go/testdata/script/get_non_pkg.txt
deleted file mode 100644
index 5202e88..0000000
--- a/src/cmd/go/testdata/script/get_non_pkg.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-[!net:golang.org] skip
-[!git] skip
-
-env GOBIN=$WORK/tmp/gobin
-env GO111MODULE=off
-
-! go get -d golang.org/x/tools
-stderr 'golang.org/x/tools: no Go files'
-
-! go get -d -u golang.org/x/tools
-stderr 'golang.org/x/tools: no Go files'
-
-! go get -d golang.org/x/tools
-stderr 'golang.org/x/tools: no Go files'
diff --git a/src/cmd/go/testdata/script/get_race.txt b/src/cmd/go/testdata/script/get_race.txt
deleted file mode 100644
index 1e06c80..0000000
--- a/src/cmd/go/testdata/script/get_race.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Tests issue #20502
-
-[!net:github.com] skip
-[!git] skip
-[!race] skip
-env GO111MODULE=off
-
-go get -race github.com/rsc/go-get-issue-9224-cmd
diff --git a/src/cmd/go/testdata/script/get_test_only.txt b/src/cmd/go/testdata/script/get_test_only.txt
deleted file mode 100644
index af90f74..0000000
--- a/src/cmd/go/testdata/script/get_test_only.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-[!net:golang.org] skip
-[!git] skip
-env GO111MODULE=off
-
-go get golang.org/x/tour/content...
-go get -t golang.org/x/tour/content...
diff --git a/src/cmd/go/testdata/script/get_tilde.txt b/src/cmd/go/testdata/script/get_tilde.txt
deleted file mode 100644
index 1c3a029..0000000
--- a/src/cmd/go/testdata/script/get_tilde.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-env GO111MODULE=off
-
-# Paths containing windows short names should be rejected before attempting to fetch.
-! go get vcs-test.golang.org/longna~1.dir/thing
-stderr 'trailing tilde and digits'
-! go get vcs-test.golang.org/longna~1/thing
-stderr 'trailing tilde and digits'
-! go get vcs-test.golang.org/~9999999/thing
-stderr 'trailing tilde and digits'
-
-[short] stop
-
-# A path containing an element that is just a tilde, or a tilde followed by non-digits,
-# should attempt to resolve.
-! go get vcs-test.golang.org/~glenda/notfound
-! stderr 'trailing tilde and digits'
-stderr 'unrecognized import path'
-
-! go get vcs-test.golang.org/~glenda2/notfound
-! stderr 'trailing tilde and digits'
-stderr 'unrecognized import path'
-
-! go get vcs-test.golang.org/~/notfound
-! stderr 'trailing tilde and digits'
-stderr 'unrecognized import path'
diff --git a/src/cmd/go/testdata/script/get_update.txt b/src/cmd/go/testdata/script/get_update.txt
deleted file mode 100644
index a70a80d..0000000
--- a/src/cmd/go/testdata/script/get_update.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-# Tests Issue #9224
-# The recursive updating was trying to walk to
-# former dependencies, not current ones.
-
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-# Rewind
-go get github.com/rsc/go-get-issue-9224-cmd
-cd $GOPATH/src/github.com/rsc/go-get-issue-9224-lib
-exec git reset --hard HEAD~
-cd $GOPATH/src
-
-# Run get
-go get -u 'github.com/rsc/go-get-issue-9224-cmd'
-
-# (Again with -d -u) Rewind
-go get github.com/rsc/go-get-issue-9224-cmd
-cd $GOPATH/src/github.com/rsc/go-get-issue-9224-lib
-exec git reset --hard HEAD~
-cd $GOPATH/src
-
-# (Again with -d -u) Run get
-go get -d -u 'github.com/rsc/go-get-issue-9224-cmd'
diff --git a/src/cmd/go/testdata/script/get_update_all.txt b/src/cmd/go/testdata/script/get_update_all.txt
deleted file mode 100644
index 22fe3ed..0000000
--- a/src/cmd/go/testdata/script/get_update_all.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# Issue 14444: go get -u .../ duplicate loads errors
-# Check that go get update -u ... does not try to load duplicates
-
-env GO111MODULE=off
-
-go get -u -n .../
-! stderr 'duplicate loads of' # make sure old packages are removed from cache
diff --git a/src/cmd/go/testdata/script/get_update_unknown_protocol.txt b/src/cmd/go/testdata/script/get_update_unknown_protocol.txt
deleted file mode 100644
index 714ed6a..0000000
--- a/src/cmd/go/testdata/script/get_update_unknown_protocol.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-# Clone the repo via HTTPS manually.
-exec git clone -q https://github.com/golang/example github.com/golang/example
-
-# Configure the repo to use a protocol unknown to cmd/go
-# that still actually works.
-cd github.com/golang/example
-exec git remote set-url origin xyz://github.com/golang/example
-exec git config --local url.https://github.com/.insteadOf xyz://github.com/
-
-go get -d -u -f github.com/golang/example/hello
diff --git a/src/cmd/go/testdata/script/get_update_wildcard.txt b/src/cmd/go/testdata/script/get_update_wildcard.txt
deleted file mode 100644
index c833783..0000000
--- a/src/cmd/go/testdata/script/get_update_wildcard.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-# Issue 14450: go get -u .../ tried to import not downloaded package
-
-[!net:github.com] skip
-[!git] skip
-env GO111MODULE=off
-
-go get github.com/tmwh/go-get-issue-14450/a
-! go get -u .../
-stderr 'cannot find package.*d-dependency/e'
-
-# Even though get -u failed, the source for others should be downloaded.
-exists github.com/tmwh/go-get-issue-14450/b
-exists github.com/tmwh/go-get-issue-14450-b-dependency/c
-exists github.com/tmwh/go-get-issue-14450-b-dependency/d
-
-! exists github.com/tmwh/go-get-issue-14450-c-dependency/e
diff --git a/src/cmd/go/testdata/script/get_vcs_error_message.txt b/src/cmd/go/testdata/script/get_vcs_error_message.txt
deleted file mode 100644
index 8dc84fc..0000000
--- a/src/cmd/go/testdata/script/get_vcs_error_message.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Test that the Version Control error message includes the correct directory
-env GO111MODULE=off
-! go get -u foo
-stderr gopath(\\\\|/)src(\\\\|/)foo
-
--- foo/foo.go --
-package foo
--- math/math.go --
-package math
diff --git a/src/cmd/go/testdata/script/get_vendor.txt b/src/cmd/go/testdata/script/get_vendor.txt
deleted file mode 100644
index 179456d..0000000
--- a/src/cmd/go/testdata/script/get_vendor.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-[short] skip
-env GO111MODULE=off
-
-cd $GOPATH/src/v
-go run m.go
-go test
-go list -f '{{.Imports}}'
-stdout 'v/vendor/vendor.org/p'
-go list -f '{{.TestImports}}'
-stdout 'v/vendor/vendor.org/p'
-go get -d
-go get -t -d
-
-[!net:github.com] stop
-[!git] stop
-
-cd $GOPATH/src
-
-# Update
-go get 'github.com/rsc/go-get-issue-11864'
-go get -u 'github.com/rsc/go-get-issue-11864'
-exists github.com/rsc/go-get-issue-11864/vendor
-
-# get -u
-rm $GOPATH
-mkdir $GOPATH/src
-go get -u 'github.com/rsc/go-get-issue-11864'
-exists github.com/rsc/go-get-issue-11864/vendor
-
-# get -t -u
-rm $GOPATH
-mkdir $GOPATH/src
-go get -t -u 'github.com/rsc/go-get-issue-11864/...'
-exists github.com/rsc/go-get-issue-11864/vendor
-
-# Submodules
-rm $GOPATH
-mkdir $GOPATH/src
-go get -d 'github.com/rsc/go-get-issue-12612'
-go get -u -d 'github.com/rsc/go-get-issue-12612'
-exists github.com/rsc/go-get-issue-12612/vendor/golang.org/x/crypto/.git
-
-# Bad vendor (bad/imp)
-rm $GOPATH
-mkdir $GOPATH/src
-! go get -t -u 'github.com/rsc/go-get-issue-18219/bad/imp'
-stderr 'must be imported as'
-! exists github.com/rsc/go-get-issue-11864/vendor
-
-# Bad vendor (bad/imp2)
-rm $GOPATH
-mkdir $GOPATH/src
-! go get -t -u 'github.com/rsc/go-get-issue-18219/bad/imp2'
-stderr 'must be imported as'
-! exists github.com/rsc/go-get-issue-11864/vendor
-
-# Bad vendor (bad/imp3)
-rm $GOPATH
-mkdir $GOPATH/src
-! go get -t -u 'github.com/rsc/go-get-issue-18219/bad/imp3'
-stderr 'must be imported as'
-! exists github.com/rsc/go-get-issue-11864/vendor
-
-# Bad vendor (bad/...)
-rm $GOPATH
-mkdir $GOPATH/src
-! go get -t -u 'github.com/rsc/go-get-issue-18219/bad/...'
-stderr 'must be imported as'
-! exists github.com/rsc/go-get-issue-11864/vendor
-
--- v/m.go --
-package main
-
-import (
-	"fmt"
-	"vendor.org/p"
-)
-
-func main() {
-	fmt.Println(p.C)
-}
--- v/m_test.go --
-package main
-import (
-	"fmt"
-	"testing"
-	"vendor.org/p"
-)
-
-func TestNothing(t *testing.T) {
-	fmt.Println(p.C)
-}
--- v/vendor/vendor.org/p/p.go --
-package p
-const C = 1
diff --git a/src/cmd/go/testdata/script/get_with_git_trace.txt b/src/cmd/go/testdata/script/get_with_git_trace.txt
deleted file mode 100644
index 6f1305a..0000000
--- a/src/cmd/go/testdata/script/get_with_git_trace.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-env GO111MODULE=off
-
-env GIT_TRACE=1
-
-[!net:golang.org] skip
-[!git] skip
-
-# go get should be success when GIT_TRACE set
-go get golang.org/x/text
diff --git a/src/cmd/go/testdata/script/gopath_moved_repo.txt b/src/cmd/go/testdata/script/gopath_moved_repo.txt
deleted file mode 100644
index 8108d9b..0000000
--- a/src/cmd/go/testdata/script/gopath_moved_repo.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-env GO111MODULE=off
-
-# Test that 'go get -u' reports packages whose VCS configurations do not
-# match their import paths.
-
-[!net:rsc.io] skip
-[short] skip
-
-# We need to execute a custom Go program to break the config files.
-#
-# git will ask for a username and password when we run 'go get -d -f -u',
-# so we also need to set GIT_ASKPASS. Conveniently, a single binary can
-# perform both tasks!
-
-go build -o replace.exe replace
-env GIT_ASKPASS=$PWD/replace.exe
-
-
-# Test that 'go get -u' reports moved git packages.
-
-[git] go get -d rsc.io/pdf
-[git] go get -d -u rsc.io/pdf
-[git] exec ./replace.exe pdf rsc.io/pdf/.git/config
-
-[git] ! go get -d -u rsc.io/pdf
-[git] stderr 'is a custom import path for'
-[git] ! go get -d -f -u rsc.io/pdf
-[git] stderr 'validating server certificate|[nN]ot [fF]ound'
-
-
-# Test that 'go get -u' reports moved Mercurial packages.
-
-[exec:hg] go get -d vcs-test.golang.org/go/custom-hg-hello
-[exec:hg] go get -d -u vcs-test.golang.org/go/custom-hg-hello
-[exec:hg] exec ./replace.exe custom-hg-hello vcs-test.golang.org/go/custom-hg-hello/.hg/hgrc
-
-[exec:hg] ! go get -d -u vcs-test.golang.org/go/custom-hg-hello
-[exec:hg] stderr 'is a custom import path for'
-[exec:hg] ! go get -d -f -u vcs-test.golang.org/go/custom-hg-hello
-[exec:hg] stderr 'validating server certificate|[nN]ot [fF]ound'
-
-
--- replace/replace.go --
-package main
-
-import (
-	"bytes"
-	"log"
-	"os"
-)
-
-func main() {
-	if len(os.Args) < 3 {
-		return
-	}
-
-	base := []byte(os.Args[1])
-	path := os.Args[2]
-	data, err := os.ReadFile(path)
-	if err != nil {
-		log.Fatal(err)
-	}
-	err = os.WriteFile(path, bytes.ReplaceAll(data, base, append(base, "XXX"...)), 0644)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
diff --git a/src/cmd/go/testdata/script/goroot_executable_trimpath.txt b/src/cmd/go/testdata/script/goroot_executable_trimpath.txt
index dc1e25e..a3f0c39 100644
--- a/src/cmd/go/testdata/script/goroot_executable_trimpath.txt
+++ b/src/cmd/go/testdata/script/goroot_executable_trimpath.txt
@@ -29,12 +29,20 @@
 env TESTGOROOT=$GOROOT
 env GOROOT=
 
+# Unset GOPATH and any variables that its default may be derived from,
+# so that we can check for a spurious warning.
+env GOPATH=
+env HOME=''
+env USERPROFILE=''
+env home=''
+
 # Relocated Executable
 # Since we built with -trimpath and the binary isn't installed in a
 # normal-looking GOROOT, this command should fail.
 
 ! exec $WORK/new/bin/go$GOEXE env GOROOT
 stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
+! stderr 'GOPATH set to GOROOT'
 
 # Cross-compiled binaries in cmd are installed to a ${GOOS}_${GOARCH} subdirectory,
 # so we also want to try a copy there.
@@ -44,6 +52,7 @@
 cp $WORK/new/bin/go$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE
 ! exec $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE env GOROOT
 stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
+! stderr 'GOPATH set to GOROOT'
 
 # Relocated Tree:
 # If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
@@ -51,6 +60,7 @@
 mkdir $WORK/new/pkg/tool
 exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $WORK/new
 exec $WORK/bin/check$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE $WORK/new
+! stderr 'GOPATH set to GOROOT'
 
 -- check.go --
 package main
diff --git a/src/cmd/go/testdata/script/gotoolchain_local.txt b/src/cmd/go/testdata/script/gotoolchain_local.txt
index 93f5570..db7e082 100644
--- a/src/cmd/go/testdata/script/gotoolchain_local.txt
+++ b/src/cmd/go/testdata/script/gotoolchain_local.txt
@@ -163,14 +163,27 @@
 go version
 stdout go1.500-bigcorp
 
+go mod edit -go=1.499 -toolchain=go1.499
+go version
+stdout go1.500-bigcorp
+
 go mod edit -go=1.500 -toolchain=none
 go version
 stdout go1.500-bigcorp
 
+go mod edit -go=1.500 -toolchain=go1.500
+go version
+stdout go1.500-bigcorp
+
 go mod edit -go=1.501 -toolchain=none
 go version
 stdout go1.501
 
+	# If toolchain > go, we must upgrade to the indicated toolchain (not just the go version).
+go mod edit -go=1.499 -toolchain=go1.501
+go version
+stdout go1.501
+
 env TESTGO_VERSION='go1.500 (bigcorp)'
 go mod edit -go=1.499 -toolchain=none
 go version
diff --git a/src/cmd/go/testdata/script/gotoolchain_modcmds.txt b/src/cmd/go/testdata/script/gotoolchain_modcmds.txt
index 83b75f0..1edd6d8 100644
--- a/src/cmd/go/testdata/script/gotoolchain_modcmds.txt
+++ b/src/cmd/go/testdata/script/gotoolchain_modcmds.txt
@@ -3,25 +3,18 @@
 
 # If the main module's go.mod file lists a version lower than the version
 # required by its dependencies, the commands that fetch and diagnose the module
-# graph (such as 'go mod download' and 'go mod graph') should fail explicitly:
+# graph (such as 'go mod graph' and 'go mod verify') should fail explicitly:
 # they can't interpret the graph themselves, and they aren't allowed to update
 # the go.mod file to record a specific, stable toolchain version that can.
 
-! go mod download rsc.io/future@v1.0.0
-stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
-
-! go mod download rsc.io/future
-stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
-
-! go mod download
-stderr '^go: rsc.io/future@v1.0.0: module rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
-
 ! go mod verify
 stderr '^go: rsc.io/future@v1.0.0: module rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
 
 ! go mod graph
 stderr '^go: rsc.io/future@v1.0.0: module rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
 
+# TODO(#64008): 'go mod download' without arguments should fail too.
+
 
 # 'go get' should update the main module's go.mod file to a version compatible with the
 # go version required for rsc.io/future, not fail.
@@ -33,8 +26,6 @@
 
 # Now, the various 'go mod' subcommands should succeed.
 
-go mod download rsc.io/future@v1.0.0
-go mod download rsc.io/future
 go mod download
 
 go mod verify
diff --git a/src/cmd/go/testdata/script/gotoolchain_net.txt b/src/cmd/go/testdata/script/gotoolchain_net.txt
index 35f6696..1d6473c 100644
--- a/src/cmd/go/testdata/script/gotoolchain_net.txt
+++ b/src/cmd/go/testdata/script/gotoolchain_net.txt
@@ -1,6 +1,11 @@
 # This test only checks that basic network lookups work.
 # The full test of toolchain version selection is in gotoolchain.txt.
 
+# This test is sensitive to "new" Go experiments, so
+# update the environment to remove any existing GOEXPERIMENT
+# setting, see #62016 for more on this. 
+env GOEXPERIMENT=''
+
 env TESTGO_VERSION=go1.21actual
 
 # GOTOOLCHAIN from network, does not exist
diff --git a/src/cmd/go/testdata/script/govcs.txt b/src/cmd/go/testdata/script/govcs.txt
index dd128cc..876f606 100644
--- a/src/cmd/go/testdata/script/govcs.txt
+++ b/src/cmd/go/testdata/script/govcs.txt
@@ -84,89 +84,6 @@
 ! go get rsc.io/nonexist.hg/hello
 stderr '^go: rsc.io/nonexist.hg/hello: GOVCS disallows using hg for public rsc.io/nonexist.hg; see ''go help vcs''$'
 
-# Repeat in GOPATH mode. Error texts slightly different.
-
-env GO111MODULE=off
-
-# GOVCS stops go get
-env GOVCS='*:none'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: GOVCS disallows using git for public github.com/google/go-cmp; see ''go help vcs''$'
-env GOPRIVATE='github.com/google'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: GOVCS disallows using git for private github.com/google/go-cmp; see ''go help vcs''$'
-
-# public pattern works
-env GOPRIVATE='github.com/google'
-env GOVCS='public:all,private:none'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: GOVCS disallows using git for private github.com/google/go-cmp; see ''go help vcs''$'
-
-# private pattern works
-env GOPRIVATE='hubgit.com/google'
-env GOVCS='private:all,public:none'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: GOVCS disallows using git for public github.com/google/go-cmp; see ''go help vcs''$'
-
-# other patterns work (for more patterns, see TestGOVCS)
-env GOPRIVATE=
-env GOVCS='github.com:svn|hg'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: GOVCS disallows using git for public github.com/google/go-cmp; see ''go help vcs''$'
-env GOVCS='github.com/google/go-cmp/inner:git,github.com:svn|hg'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: GOVCS disallows using git for public github.com/google/go-cmp; see ''go help vcs''$'
-
-# bad patterns are reported (for more bad patterns, see TestGOVCSErrors)
-env GOVCS='git'
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: malformed entry in GOVCS \(missing colon\): "git"$'
-
-env GOVCS=github.com:hg,github.com:git
-! go get github.com/google/go-cmp
-stderr '^package github.com/google/go-cmp: unreachable pattern in GOVCS: "github.com:git" after "github.com:hg"$'
-
-# bad GOVCS patterns do not stop commands that do not need to check VCS
-go list
-
-# svn is disallowed by default
-env GOPRIVATE=
-env GOVCS=
-! go get rsc.io/nonexist.svn/hello
-stderr '^package rsc.io/nonexist.svn/hello: GOVCS disallows using svn for public rsc.io/nonexist.svn; see ''go help vcs''$'
-
-# fossil is disallowed by default
-env GOPRIVATE=
-env GOVCS=
-! go get rsc.io/nonexist.fossil/hello
-stderr '^package rsc.io/nonexist.fossil/hello: GOVCS disallows using fossil for public rsc.io/nonexist.fossil; see ''go help vcs''$'
-
-# bzr is disallowed by default
-env GOPRIVATE=
-env GOVCS=
-! go get rsc.io/nonexist.bzr/hello
-stderr '^package rsc.io/nonexist.bzr/hello: GOVCS disallows using bzr for public rsc.io/nonexist.bzr; see ''go help vcs''$'
-
-# git is OK by default
-env GOVCS=
-env GONOSUMDB='*'
-[net:rsc.io] [git] [!short] go get rsc.io/sampler
-
-# hg is OK by default
-env GOVCS=
-env GONOSUMDB='*'
-[exec:hg] [!short] go get vcs-test.golang.org/go/custom-hg-hello
-
-# git can be disallowed
-env GOVCS=public:hg
-! go get rsc.io/nonexist.git/hello
-stderr '^package rsc.io/nonexist.git/hello: GOVCS disallows using git for public rsc.io/nonexist.git; see ''go help vcs''$'
-
-# hg can be disallowed
-env GOVCS=public:git
-! go get rsc.io/nonexist.hg/hello
-stderr '^package rsc.io/nonexist.hg/hello: GOVCS disallows using hg for public rsc.io/nonexist.hg; see ''go help vcs''$'
-
 -- go.mod --
 module m
 
diff --git a/src/cmd/go/testdata/script/help.txt b/src/cmd/go/testdata/script/help.txt
index 26a0194..fb15e93 100644
--- a/src/cmd/go/testdata/script/help.txt
+++ b/src/cmd/go/testdata/script/help.txt
@@ -48,4 +48,4 @@
 # go help get shows usage for get
 go help get
 stdout 'usage: go get'
-stdout 'get when using GOPATH'
+stdout 'specific module versions'
diff --git a/src/cmd/go/testdata/script/install_modcacherw_issue64282.txt b/src/cmd/go/testdata/script/install_modcacherw_issue64282.txt
new file mode 100644
index 0000000..3e1e6e5
--- /dev/null
+++ b/src/cmd/go/testdata/script/install_modcacherw_issue64282.txt
@@ -0,0 +1,45 @@
+# Regression test for https://go.dev/issue/64282.
+#
+# 'go install' and 'go run' with pkg@version arguments should make
+# a best effort to parse flags relevant to downloading modules
+# (currently only -modcacherw) before actually downloading the module
+# to identify which toolchain version to use.
+#
+# However, the best-effort flag parsing should not interfere with
+# actual flag parsing if we don't switch toolchains. In particular,
+# unrecognized flags should still be diagnosed after the module for
+# the requested package has been downloaded and checked for toolchain
+# upgrades.
+
+
+! go install -cake=delicious -modcacherw example.com/printversion@v0.1.0
+stderr '^flag provided but not defined: -cake$'
+	# Because the -modcacherw flag was set, we should be able to modify the contents
+	# of a directory within the module cache.
+cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
+go clean -modcache
+
+
+! go install -unknownflag -tags -modcacherw example.com/printversion@v0.1.0
+stderr '^flag provided but not defined: -unknownflag$'
+cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
+go clean -modcache
+
+
+# Also try it with a 'go install' that succeeds.
+# (But skip in short mode, because linking a binary is expensive.)
+[!short] go install -modcacherw example.com/printversion@v0.1.0
+[!short] cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
+[!short] go clean -modcache
+
+
+# The flag should also be applied if given in GOFLAGS
+# instead of on the command line.
+env GOFLAGS=-modcacherw
+! go install -cake=delicious example.com/printversion@v0.1.0
+stderr '^flag provided but not defined: -cake$'
+cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
+
+
+-- $WORK/extraneous.txt --
+This is not a Go source file.
diff --git a/src/cmd/go/testdata/script/list_export_e.txt b/src/cmd/go/testdata/script/list_export_e.txt
index 8e4c361..6d5dd39 100644
--- a/src/cmd/go/testdata/script/list_export_e.txt
+++ b/src/cmd/go/testdata/script/list_export_e.txt
@@ -6,6 +6,10 @@
 ! stderr '.'
 stdout '^# example.com/p2\np2'${/}'main\.go:7:.*'
 
+go list -export -e -f '{{.ImportPath}} -- {{.Incomplete}} -- {{.Error}}' ./...
+stdout 'example.com/p1 -- false -- <nil>'
+stdout 'example.com/p2 -- true -- # example.com/p2'
+
 go list -e -export -json=Error ./...
 stdout '"Err": "# example.com/p2'
 
diff --git a/src/cmd/go/testdata/script/list_pkgconfig_error.txt b/src/cmd/go/testdata/script/list_pkgconfig_error.txt
index 7d671a6..f554d2a 100644
--- a/src/cmd/go/testdata/script/list_pkgconfig_error.txt
+++ b/src/cmd/go/testdata/script/list_pkgconfig_error.txt
@@ -2,7 +2,7 @@
 [!exec:pkg-config] skip 'test requires pkg-config tool'
 
 ! go list -export .
-stderr '^go build example:\n# pkg-config (.*\n)+pkg-config: exit status \d+$'
+stderr '^# example\n# \[pkg-config .*\]\n(.*\n)*Package .* not found'
 
 -- go.mod --
 module example
diff --git a/src/cmd/go/testdata/script/malformed_gosum_issue62345.txt b/src/cmd/go/testdata/script/malformed_gosum_issue62345.txt
new file mode 100644
index 0000000..23c41be
--- /dev/null
+++ b/src/cmd/go/testdata/script/malformed_gosum_issue62345.txt
@@ -0,0 +1,51 @@
+! go mod download
+stderr '^malformed go.sum:\n.*go.sum:3: wrong number of fields 5\n$'
+
+go mod tidy
+cmp go.sum go.sum.after-tidy
+
+-- go.mod --
+module m
+
+go 1.20
+
+require rsc.io/quote v1.5.2
+
+require (
+	golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
+	rsc.io/sampler v1.3.0 // indirect
+	rsc.io/testonly v1.0.0 // indirect
+)
+
+-- go.sum --
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekufpn6tCGPY3spdHeZJEsw=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+rsc.io/quote v1.5.2 # invalid line
+rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+rsc.io/testonly v1.0.0 h1:K/VWHdO+Jv7woUXG0GzVNx1czBXUt3Ib1deaMn+xk64=
+rsc.io/testonly v1.0.0/go.mod h1:OqmGbIFOcF+XrFReLOGZ6BhMM7uMBiQwZsyNmh74SzY=
+
+-- main.go --
+package main
+
+import (
+	"fmt"
+
+	"rsc.io/quote"
+)
+
+func main() {
+	fmt.Println(quote.Hello())
+}
+
+-- go.sum.after-tidy --
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekufpn6tCGPY3spdHeZJEsw=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+rsc.io/quote v1.5.2 h1:3fEykkD9k7lYzXqCYrwGAf7iNhbk4yCjHmKBN9td4L0=
+rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+rsc.io/testonly v1.0.0 h1:K/VWHdO+Jv7woUXG0GzVNx1czBXUt3Ib1deaMn+xk64=
+rsc.io/testonly v1.0.0/go.mod h1:OqmGbIFOcF+XrFReLOGZ6BhMM7uMBiQwZsyNmh74SzY=
diff --git a/src/cmd/go/testdata/script/mod_convert.txt b/src/cmd/go/testdata/script/mod_convert.txt
deleted file mode 100644
index 922d924..0000000
--- a/src/cmd/go/testdata/script/mod_convert.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-[!net:github.com] skip
-[!net:golang.org] skip
-[!net:gopkg.in] skip
-[!git] skip
-
-env GO111MODULE=on
-env GOPROXY=
-env GOSUMDB=
-
-go mod download github.com/docker/distribution@v0.0.0-20150410205453-85de3967aa93
-mkdir x/Godeps
-cp $GOPATH/pkg/mod/github.com/docker/distribution@v0.0.0-20150410205453-85de3967aa93/Godeps/Godeps.json x/Godeps
-cd x
-go mod init github.com/docker/distribution
-cmpenv go.mod go.mod.want
-
-[!net:google.golang.org] skip
-[!net:cloud.google.com] skip
-
-go mod download github.com/fishy/gcsbucket@v0.0.0-20180217031846-618d60fe84e0
-cp $GOPATH/pkg/mod/github.com/fishy/gcsbucket@v0.0.0-20180217031846-618d60fe84e0/Gopkg.lock ../y
-cd ../y
-go mod init github.com/fishy/gcsbucket
-cmpenv go.mod go.mod.want
-
--- x/go.mod.want --
-module github.com/docker/distribution
-
-go $goversion
-
-require (
-	github.com/AdRoll/goamz v0.0.0-20150130162828-d3664b76d905
-	github.com/MSOpenTech/azure-sdk-for-go v0.0.0-20150323223030-d90753bcad2e
-	github.com/Sirupsen/logrus v0.7.3
-	github.com/bugsnag/bugsnag-go v1.0.3-0.20141110184014-b1d153021fcd
-	github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b
-	github.com/bugsnag/panicwrap v0.0.0-20141110184334-e5f9854865b9
-	github.com/codegangsta/cli v1.4.2-0.20150131031259-6086d7927ec3
-	github.com/docker/docker v1.4.2-0.20150204013315-165ea5c158cf
-	github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1
-	github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7
-	github.com/gorilla/context v0.0.0-20140604161150-14f550f51af5
-	github.com/gorilla/handlers v0.0.0-20140825150757-0e84b7d810c1
-	github.com/gorilla/mux v0.0.0-20140926153814-e444e69cbd2e
-	github.com/jlhawn/go-crypto v0.0.0-20150401213827-cd738dde20f0
-	github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43
-	github.com/yvasiyarov/gorelic v0.0.7-0.20141212073537-a9bba5b9ab50
-	github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f
-	golang.org/x/net v0.0.0-20150202051010-1dfe7915deaf
-	gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789
-	gopkg.in/yaml.v2 v2.0.0-20150116202057-bef53efd0c76
-)
--- y/go.mod.want --
-module github.com/fishy/gcsbucket
-
-go $goversion
-
-require (
-	cloud.google.com/go v0.18.0
-	github.com/fishy/fsdb v0.0.0-20180217030800-5527ded01371
-	github.com/golang/protobuf v1.0.0
-	github.com/googleapis/gax-go v2.0.0+incompatible
-	golang.org/x/net v0.0.0-20180216171745-136a25c244d3
-	golang.org/x/oauth2 v0.0.0-20180207181906-543e37812f10
-	golang.org/x/text v0.3.1-0.20180208041248-4e4a3210bb54
-	google.golang.org/api v0.0.0-20180217000815-c7a403bb5fe1
-	google.golang.org/appengine v1.0.0
-	google.golang.org/genproto v0.0.0-20180206005123-2b5a72b8730b
-	google.golang.org/grpc v1.10.0
-)
diff --git a/src/cmd/go/testdata/script/mod_convert_dep.txt b/src/cmd/go/testdata/script/mod_convert_dep.txt
deleted file mode 100644
index 875a836..0000000
--- a/src/cmd/go/testdata/script/mod_convert_dep.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found Gopkg.lock in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
-# In Plan 9, directories are automatically created in /n.
-# For example, /n/Gopkg.lock always exists, but it's a directory.
-# Test that we ignore directories when trying to find alternate config files.
-cd $WORK/gopkgdir/x
-! go list .
-stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$'
-! stderr 'Gopkg.lock'
-
--- $WORK/test/Gopkg.lock --
--- $WORK/test/x/x.go --
-package x // import "m/x"
--- $WORK/gopkgdir/Gopkg.lock/README.txt --
-../Gopkg.lock is a directory, not a file.
--- $WORK/gopkgdir/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_glide.txt b/src/cmd/go/testdata/script/mod_convert_glide.txt
deleted file mode 100644
index 9f1fff5..0000000
--- a/src/cmd/go/testdata/script/mod_convert_glide.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found glide.lock in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
--- $WORK/test/glide.lock --
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_glockfile.txt b/src/cmd/go/testdata/script/mod_convert_glockfile.txt
deleted file mode 100644
index 6aa0794..0000000
--- a/src/cmd/go/testdata/script/mod_convert_glockfile.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found GLOCKFILE in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
--- $WORK/test/GLOCKFILE --
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_godeps.txt b/src/cmd/go/testdata/script/mod_convert_godeps.txt
deleted file mode 100644
index da7b6c1..0000000
--- a/src/cmd/go/testdata/script/mod_convert_godeps.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found Godeps/Godeps.json in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
--- $WORK/test/Godeps/Godeps.json --
-{}
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_tsv.txt b/src/cmd/go/testdata/script/mod_convert_tsv.txt
deleted file mode 100644
index 6015ac8..0000000
--- a/src/cmd/go/testdata/script/mod_convert_tsv.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found dependencies.tsv in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
--- $WORK/test/dependencies.tsv --
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt b/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt
deleted file mode 100644
index 6ff6993..0000000
--- a/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-env GO111MODULE=on
-env GOPROXY=direct
-env GOSUMDB=off
-
-[short] skip
-[!git] skip
-
-# secure fetch should report insecure warning
-cd $WORK/test
-go mod init
-stderr 'redirected .* to insecure URL'
-
-# insecure fetch should not
-env GOINSECURE=*.golang.org
-rm go.mod
-go mod init
-! stderr 'redirected .* to insecure URL'
-
-# insecure fetch invalid path should report insecure warning
-env GOINSECURE=foo.golang.org
-rm go.mod
-go mod init
-stderr 'redirected .* to insecure URL'
-
--- $WORK/test/dependencies.tsv --
-vcs-test.golang.org/insecure/go/insecure	git	6fecd21f7c0c	2019-09-04T18:39:48Z 
-
--- $WORK/test/x.go --
-package x // import "m"
diff --git a/src/cmd/go/testdata/script/mod_convert_vendor_conf.txt b/src/cmd/go/testdata/script/mod_convert_vendor_conf.txt
deleted file mode 100644
index 57ec419..0000000
--- a/src/cmd/go/testdata/script/mod_convert_vendor_conf.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found vendor.conf in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
--- $WORK/test/vendor.conf --
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_vendor_json.txt b/src/cmd/go/testdata/script/mod_convert_vendor_json.txt
deleted file mode 100644
index df6db36..0000000
--- a/src/cmd/go/testdata/script/mod_convert_vendor_json.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found vendor/vendor.json in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m
-stdout '^m$'
-
--- $WORK/test/vendor/vendor.json --
-{}
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_vendor_manifest.txt b/src/cmd/go/testdata/script/mod_convert_vendor_manifest.txt
deleted file mode 100644
index 8b6a141..0000000
--- a/src/cmd/go/testdata/script/mod_convert_vendor_manifest.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found vendor/manifest in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m
-stdout '^m$'
-
--- $WORK/test/vendor/manifest --
-{}
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_convert_vendor_yml.txt b/src/cmd/go/testdata/script/mod_convert_vendor_yml.txt
deleted file mode 100644
index 4ed140a..0000000
--- a/src/cmd/go/testdata/script/mod_convert_vendor_yml.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-env GO111MODULE=on
-
-# We should not create a go.mod file unless the user ran 'go mod init' explicitly.
-# However, we should suggest 'go mod init' if we can find an alternate config file.
-cd $WORK/test/x
-! go list .
-stderr 'found vendor.yml in .*[/\\]test'
-stderr '\s*cd \.\. && go mod init'
-
-# The command we suggested should succeed.
-cd ..
-go mod init
-go list -m all
-stdout '^m$'
-
--- $WORK/test/vendor.yml --
--- $WORK/test/x/x.go --
-package x // import "m/x"
diff --git a/src/cmd/go/testdata/script/mod_download_exec_toolchain.txt b/src/cmd/go/testdata/script/mod_download_exec_toolchain.txt
new file mode 100644
index 0000000..6cf863b
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_download_exec_toolchain.txt
@@ -0,0 +1,107 @@
+env TESTGO_VERSION=go1.21
+env TESTGO_VERSION_SWITCH=switch
+
+# First, test 'go mod download' outside of a module.
+#
+# There is no go.mod file into which we can record the selected toolchain,
+# so unfortunately these version switches won't be as reproducible as other
+# go commands, but that's still preferable to failing entirely or downloading
+# a module zip that we don't understand.
+
+# GOTOOLCHAIN=auto should run the newer toolchain
+env GOTOOLCHAIN=auto
+go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
+stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
+! stderr '\(running'
+
+# GOTOOLCHAIN=min+auto should run the newer toolchain
+env GOTOOLCHAIN=go1.21+auto
+go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
+stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
+! stderr '\(running'
+
+# GOTOOLCHAIN=go1.21 should NOT run the newer toolchain
+env GOTOOLCHAIN=go1.21
+! go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
+! stderr switching
+stderr 'rsc.io/needgo122@v0.0.1 requires go >= 1.22'
+stderr 'rsc.io/needgo123@v0.0.1 requires go >= 1.23'
+stderr 'rsc.io/needall@v0.0.1 requires go >= 1.23'
+stderr 'requires go >= 1.23'
+! stderr 'requires go >= 1.21' # that's us!
+
+
+# JSON output should be emitted exactly once,
+# and non-JSON output should go to stderr instead of stdout.
+env GOTOOLCHAIN=auto
+go mod download -json rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
+stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
+! stderr '\(running'
+stdout -count=1 '"Path": "rsc.io/needgo121",'
+stdout -count=1 '"Path": "rsc.io/needgo122",'
+stdout -count=1 '"Path": "rsc.io/needgo123",'
+stdout -count=1 '"Path": "rsc.io/needall",'
+
+# GOTOOLCHAIN=go1.21 should write the errors in the JSON Error fields, not to stderr.
+env GOTOOLCHAIN=go1.21
+! go mod download -json rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
+! stderr switching
+stdout -count=1 '"Error": "rsc.io/needgo122@v0.0.1 requires go .*= 1.22 \(running go 1.21; GOTOOLCHAIN=go1.21\)"'
+stdout -count=1 '"Error": "rsc.io/needgo123@v0.0.1 requires go .*= 1.23 \(running go 1.21; GOTOOLCHAIN=go1.21\)"'
+stdout -count=1 '"Error": "rsc.io/needall@v0.0.1 requires go .*= 1.23 \(running go 1.21; GOTOOLCHAIN=go1.21\)"'
+! stdout '"Error": "rsc.io/needgo121'  # We can handle this one.
+! stderr .
+
+
+# Within a module, 'go mod download' of explicit versions should upgrade if
+# needed to perform the download, but should not change the main module's
+# toolchain version (because the downloaded modules are still not required by
+# the main module).
+
+cd example
+cp go.mod go.mod.orig
+
+env GOTOOLCHAIN=auto
+go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
+stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
+! stderr '\(running'
+cmp go.mod go.mod.orig
+
+
+# However, 'go mod download' without arguments should fix up the
+# 'go' and 'toolchain' lines to be consistent with the existing
+# requirements in the module graph.
+
+go mod edit -require=rsc.io/needall@v0.0.1
+cp go.mod go.mod.121
+
+# If an upgrade is needed, GOTOOLCHAIN=go1.21 should cause
+# the command to fail without changing go.mod.
+
+env GOTOOLCHAIN=go1.21
+! go mod download
+stderr 'rsc.io/needall@v0.0.1 requires go >= 1.23'
+! stderr switching
+cmp go.mod go.mod.121
+
+# If an upgrade is needed, GOTOOLCHAIN=auto should perform
+# the upgrade and record the resulting toolchain version.
+
+env GOTOOLCHAIN=auto
+go mod download
+stderr '^go: module rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
+cmp go.mod go.mod.final
+
+
+-- example/go.mod --
+module example
+
+go 1.21
+-- example/go.mod.final --
+module example
+
+go 1.23
+
+toolchain go1.23.9
+
+require rsc.io/needall v0.0.1
diff --git a/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt b/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt
index 8050461..a61283c 100644
--- a/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt
+++ b/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt
@@ -1,8 +1,14 @@
 [short] skip
 [!git] skip
-[!GOOS:linux] skip  # Uses XDG_CONFIG_HOME
 
-env GIT_CONFIG_GLOBAL=$WORK/.gitconfig
+# Redirect git to a test-specific .gitconfig.
+# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer.
+# For older git versions we also set $HOME.
+env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig
+env HOME=$WORK${/}home${/}gopher
+exec git config --global --show-origin user.name
+stdout 'Go Gopher'
+
 env GOPRIVATE=vcs-test.golang.org
 
 go mod download -x
@@ -14,6 +20,9 @@
 
 require vcs-test.golang.org/git/gitrepo1.git v1.2.3
 
--- $WORK/.gitconfig --
+-- $WORK/home/gopher/.gitconfig --
+[user]
+	name = Go Gopher
+	email = gopher@golang.org
 [safe]
-bareRepository = explicit
+	bareRepository = explicit
diff --git a/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt
index 080ccf0..9afd347 100644
--- a/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt
+++ b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt
@@ -3,12 +3,15 @@
 [short] skip
 [!git] skip
 
+# Redirect git to a test-specific .gitconfig.
+# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer.
+# For older git versions we also set $HOME.
+env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig
+env HOME=$WORK${/}home${/}gopher
+exec git config --global --show-origin user.name
+stdout 'Go Gopher'
+
 env GOPROXY=direct
-env HOME=$WORK/home/gopher
-
-
-go env GOPROXY
-stdout 'direct'
 
 exec git config --get log.decorate
 stdout 'full'
@@ -24,5 +27,8 @@
 stdout 'vcs-test.golang.org/git/gitrepo1.git v1.2.3'
 
 -- $WORK/home/gopher/.gitconfig --
+[user]
+	name = Go Gopher
+	email = gopher@golang.org
 [log]
 	decorate = full
diff --git a/src/cmd/go/testdata/script/mod_download_issue51114.txt b/src/cmd/go/testdata/script/mod_download_issue51114.txt
index 4d274d6..a28d467 100644
--- a/src/cmd/go/testdata/script/mod_download_issue51114.txt
+++ b/src/cmd/go/testdata/script/mod_download_issue51114.txt
@@ -1,8 +1,14 @@
 [!net:github.com] skip
 [!git] skip
-[!GOOS:linux] skip  # Uses XDG_CONFIG_HOME
 
-env GIT_CONFIG_GLOBAL=$WORK/.gitconfig
+# Redirect git to a test-specific .gitconfig.
+# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer.
+# For older git versions we also set $HOME.
+env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig
+env HOME=$WORK${/}home${/}gopher
+exec git config --global --show-origin user.name
+stdout 'Go Gopher'
+
 env GOPROXY=direct
 
 ! go mod download
@@ -15,6 +21,9 @@
 
 require github.com/golang/notexist/subdir v0.1.0
 
--- $WORK/.gitconfig --
+-- $WORK/home/gopher/.gitconfig --
+[user]
+	name = Go Gopher
+	email = gopher@golang.org
 [url "git@github.com:"]
 	insteadOf = https://github.com/
diff --git a/src/cmd/go/testdata/script/mod_download_private_vcs.txt b/src/cmd/go/testdata/script/mod_download_private_vcs.txt
index 2f72a42..5c8d93a 100644
--- a/src/cmd/go/testdata/script/mod_download_private_vcs.txt
+++ b/src/cmd/go/testdata/script/mod_download_private_vcs.txt
@@ -5,6 +5,14 @@
 [!git] skip
 env GOPROXY=direct
 
+# Redirect git to a test-specific .gitconfig.
+# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer.
+# For older git versions we also set $HOME.
+env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig
+env HOME=$WORK${/}home${/}gopher
+exec git config --global --show-origin user.name
+stdout 'Go Gopher'
+
 ! go mod download github.com/golang/nonexist@latest
 stderr 'Confirm the import path was entered correctly.'
 stderr 'If this is a private repository, see https://golang.org/doc/faq#git_https for additional information.'
@@ -27,7 +35,7 @@
 # Test that Git clone errors will be shown to the user instead of a generic
 # "unknown revision" error. To do this we want to force git ls-remote to return
 # an error we don't already have special handling for. See golang/go#42751.
-env HOME=$WORK${/}home${/}gopher
+exec git config --global url.git@github.com.insteadOf https://github.com/
 env GIT_SSH_COMMAND=false
 ! go install github.com/golang/nonexist@master
 stderr 'fatal: Could not read from remote repository.'
@@ -35,5 +43,6 @@
 ! stdout .
 
 -- $WORK/home/gopher/.gitconfig --
-[url "git@github.com:"]
-	insteadOf = https://github.com/
+[user]
+	name = Go Gopher
+	email = gopher@golang.org
diff --git a/src/cmd/go/testdata/script/mod_edit.txt b/src/cmd/go/testdata/script/mod_edit.txt
index ebc032a..2d09b06 100644
--- a/src/cmd/go/testdata/script/mod_edit.txt
+++ b/src/cmd/go/testdata/script/mod_edit.txt
@@ -61,6 +61,10 @@
 cmpenv go.mod $WORK/go.mod.edit4
 go mod edit -dropreplace=x.1
 cmpenv go.mod $WORK/go.mod.edit5
+go mod edit -replace=x.1=../y.1/@v2
+cmpenv go.mod $WORK/go.mod.edit6
+! go mod edit -replace=x.1=y.1/@v2
+stderr '^go: -replace=x.1=y.1/@v2: invalid new path: malformed import path "y.1/": trailing slash$'
 
 # go mod edit -fmt
 cp $WORK/go.mod.badfmt go.mod
@@ -218,6 +222,21 @@
 )
 
 require x.3 v1.99.0
+-- $WORK/go.mod.edit6 --
+module x.x/y/z
+
+go $goversion
+
+exclude x.1 v1.2.0
+
+retract (
+	v1.6.0
+	[v1.3.0, v1.4.0]
+)
+
+require x.3 v1.99.0
+
+replace x.1 => ../y.1/@v2
 -- $WORK/local/go.mod.edit --
 module local-only
 
diff --git a/src/cmd/go/testdata/script/mod_get_future.txt b/src/cmd/go/testdata/script/mod_get_future.txt
index 72c0b97..3f1c777 100644
--- a/src/cmd/go/testdata/script/mod_get_future.txt
+++ b/src/cmd/go/testdata/script/mod_get_future.txt
@@ -1,6 +1,7 @@
 env TESTGO_VERSION=go1.21
+env GOTOOLCHAIN=local
 ! go mod download rsc.io/future@v1.0.0
-stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21\)$'
+stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21; GOTOOLCHAIN=local\)$'
 
 -- go.mod --
 module m
diff --git a/src/cmd/go/testdata/script/mod_get_issue47650.txt b/src/cmd/go/testdata/script/mod_get_issue47650.txt
new file mode 100644
index 0000000..8561b21
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_get_issue47650.txt
@@ -0,0 +1,29 @@
+# Regression test for https://go.dev/issue/47650:
+# 'go get' with a pseudo-version of a non-root package within a module
+# erroneously rejected the pseudo-version as invalid, because it did not fetch
+# enough commit history to validate the pseudo-version base.
+
+[short] skip 'creates and uses a git repository'
+[!git] skip
+
+env GOPRIVATE=vcs-test.golang.org
+
+# If we request a package in a subdirectory of a module by commit hash, we
+# successfully resolve it to a pseudo-version derived from a tag on the parent
+# commit.
+cp go.mod go.mod.orig
+go get -x vcs-test.golang.org/git/issue47650.git/cmd/issue47650@21535ef346c3
+stderr '^go: added vcs-test.golang.org/git/issue47650.git v0.1.1-0.20210811175200-21535ef346c3$'
+
+# Explicitly requesting that same version should succeed, fetching additional
+# history for the requested commit as needed in order to validate the
+# pseudo-version base.
+go clean -modcache
+cp go.mod.orig go.mod
+go get -x vcs-test.golang.org/git/issue47650.git/cmd/issue47650@v0.1.1-0.20210811175200-21535ef346c3
+stderr '^go: added vcs-test.golang.org/git/issue47650.git v0.1.1-0.20210811175200-21535ef346c3$'
+
+-- go.mod --
+module example
+
+go 1.20
diff --git a/src/cmd/go/testdata/script/mod_init_dep.txt b/src/cmd/go/testdata/script/mod_init_dep.txt
deleted file mode 100644
index 76b4867..0000000
--- a/src/cmd/go/testdata/script/mod_init_dep.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-env GO111MODULE=on
-env GOFLAGS=-mod=mod
-
-# go mod init should populate go.mod from Gopkg.lock
-go mod init x
-stderr 'copying requirements from Gopkg.lock'
-go list -m all
-stdout 'rsc.io/sampler v1.0.0'
-
-# test dep replacement
-cd y
-go mod init
-cmpenv go.mod go.mod.replace
-
--- x.go --
-package x
-
--- Gopkg.lock --
-[[projects]]
-  name = "rsc.io/sampler"
-  version = "v1.0.0"
-
--- y/Gopkg.lock --
-[[projects]]
-  name = "z"
-  revision = "v1.0.0"
-  source = "rsc.io/quote"
-
--- y/y.go --
-package y // import "y"
-import _ "z"
-
--- y/go.mod.replace --
-module y
-
-go $goversion
-
-replace z v1.0.0 => rsc.io/quote v1.0.0
-
-require rsc.io/quote v1.0.0
diff --git a/src/cmd/go/testdata/script/mod_init_glide.txt b/src/cmd/go/testdata/script/mod_init_glide.txt
deleted file mode 100644
index 0d087eb..0000000
--- a/src/cmd/go/testdata/script/mod_init_glide.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-[!net:github.com] skip
-[!git] skip
-
-env GO111MODULE=on
-env GOPROXY=direct
-env GOSUMDB=
-
-# Regression test for golang.org/issue/32161:
-# 'go mod init' did not locate tags when resolving a commit to a pseudo-version.
-go mod init x
-cmpenv go.mod go.mod.out
-
--- main.go --
-package main
-
-import (
-	_ "github.com/rsc/legacytest"
-)
-
-func main() {}
-
--- glide.lock --
-imports:
-- name: github.com/rsc/legacytest
-  version: fb3c628075e32f7f3c248a3abbdafd69ad6e21e1
-
--- glide.yaml --
-package: x
-
--- go.mod.out --
-module x
-
-go $goversion
-
-require github.com/rsc/legacytest v1.1.0-pre.0.20180717164849-fb3c628075e3
diff --git a/src/cmd/go/testdata/script/mod_insecure_issue63845.txt b/src/cmd/go/testdata/script/mod_insecure_issue63845.txt
new file mode 100644
index 0000000..c051c05
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_insecure_issue63845.txt
@@ -0,0 +1,28 @@
+# Regression test for https://go.dev/issue/63845:
+# If 'git ls-remote' fails for all secure protocols,
+# we should fail instead of falling back to an arbitrary protocol.
+#
+# Note that this test does not use the local vcweb test server
+# (vcs-test.golang.org), because the hook for redirecting to that
+# server bypasses the "ping to determine protocol" logic
+# in cmd/go/internal/vcs.
+
+[!net:golang.org] skip
+[!git] skip
+[short] skip 'tries to access a nonexistent external Git repo'
+
+env GOPRIVATE=golang.org
+env CURLOPT_TIMEOUT_MS=100
+env GIT_SSH_COMMAND=false
+
+! go get -x golang.org/nonexist.git@latest
+stderr '^git ls-remote https://golang.org/nonexist$'
+stderr '^git ls-remote git\+ssh://golang.org/nonexist'
+stderr '^git ls-remote ssh://golang.org/nonexist$'
+! stderr 'git://'
+stderr '^go: golang.org/nonexist.git@latest: no secure protocol found for repository$'
+
+-- go.mod --
+module example
+
+go 1.19
diff --git a/src/cmd/go/testdata/script/mod_list_issue61415.txt b/src/cmd/go/testdata/script/mod_list_issue61415.txt
new file mode 100644
index 0000000..e763fae
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_list_issue61415.txt
@@ -0,0 +1,76 @@
+[short] skip 'generates a vcstest git repo'
+[!git] skip
+
+env GOPROXY=direct
+
+# Control case: fetching a nested module at a tag that exists should
+# emit Origin metadata for that tag and commit, and the origin should
+# be reusable for that tag.
+
+go list -json -m --versions -e vcs-test.golang.org/git/issue61415.git/nested@has-nested
+cp stdout has-nested.json
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"URL":'  # randomly-chosen vcweb localhost URL
+stdout '"Subdir": "nested"'
+stdout '"TagPrefix": "nested/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Ref": "refs/tags/has-nested"'
+stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"'
+
+go list -reuse=has-nested.json -json -m --versions -e vcs-test.golang.org/git/issue61415.git/nested@has-nested
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"URL":'  # randomly-chosen vcweb localhost URL
+stdout '"Subdir": "nested"'
+stdout '"TagPrefix": "nested/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Ref": "refs/tags/has-nested"'
+stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"'
+stdout '"Reuse": true'
+
+
+# Experiment case: if the nested module doesn't exist at "latest",
+# the Origin metadata should include the ref that we tried to resolve
+# (HEAD for a repo without version tags) and the hash to which it refers,
+# so that changing the HEAD ref will invalidate the result.
+
+go list -json -m --versions -e vcs-test.golang.org/git/issue61415.git/nested@latest
+cp stdout no-nested.json
+stdout '"Err": "module vcs-test.golang.org/git/issue61415.git/nested: no matching versions for query \\"latest\\""'
+stdout '"URL":'  # randomly-chosen vcweb localhost URL
+stdout '"Subdir": "nested"'
+stdout '"TagPrefix": "nested/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"'
+
+# The error result should be reusable.
+
+go list -reuse=no-nested.json -json -m --versions -e vcs-test.golang.org/git/issue61415.git/nested@latest
+
+stdout '"Err": "module vcs-test.golang.org/git/issue61415.git/nested: no matching versions for query \\"latest\\""'
+stdout '"URL":'  # randomly-chosen vcweb localhost URL
+stdout '"Subdir": "nested"'
+stdout '"TagPrefix": "nested/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"'
+stdout '"Reuse": true'
+
+
+# If the hash refers to some other commit instead, the
+# result should not be reused.
+
+replace f213069baa68ec26412fb373c7cf6669db1f8e69 08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a no-nested.json
+
+go list -reuse=no-nested.json -json -m --versions -e vcs-test.golang.org/git/issue61415.git/nested@latest
+stdout '"Err": "module vcs-test.golang.org/git/issue61415.git/nested: no matching versions for query \\"latest\\""'
+stdout '"URL":'  # randomly-chosen vcweb localhost URL
+stdout '"Subdir": "nested"'
+stdout '"TagPrefix": "nested/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"'
+! stdout '"Reuse"'
diff --git a/src/cmd/go/testdata/script/mod_list_issue61423.txt b/src/cmd/go/testdata/script/mod_list_issue61423.txt
new file mode 100644
index 0000000..2888391
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_list_issue61423.txt
@@ -0,0 +1,100 @@
+[short] skip 'generates a vcstest git repo'
+[!git] skip
+
+mkdir $WORK/mod1
+mkdir $WORK/mod2
+env GONOSUMDB=vcs-test.golang.org
+
+env GOPROXY=direct
+env GOMODCACHE=$WORK/mod1
+
+
+# If we query a module version from a git repo, we expect its
+# Origin data to be reusable.
+
+go list -m -json vcs-test.golang.org/git/issue61415.git@latest
+cp stdout git-latest.json
+stdout '"Version": "v0.0.0-20231114180001-f213069baa68"'
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"'
+stdout '"Ref": "HEAD"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+
+go list -reuse=git-latest.json -m -json vcs-test.golang.org/git/issue61415.git@latest
+stdout '"Version": "v0.0.0-20231114180001-f213069baa68"'
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"'
+stdout '"Ref": "HEAD"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Reuse": true'
+
+
+# Now we construct a filesystem-based module proxy that
+# contains only an older commit.
+
+go clean -modcache
+
+go mod download -json vcs-test.golang.org/git/issue61415.git@08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a
+stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"'
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"'
+
+[GOOS:windows] env GOPROXY=file:///$WORK/mod1/cache/download
+[!GOOS:windows] env GOPROXY=file://$WORK/mod1/cache/download
+env GOMODCACHE=$WORK/modcache2
+
+
+# If we resolve the "latest" version query using a proxy,
+# it is only going to have Git origin information about the one
+# commit — not the other tags that would go into resolving
+# the underlying version list.
+# 'go list' should not emit the partial information,
+# since it isn't enough to reconstruct the result.
+
+go list -m -json vcs-test.golang.org/git/issue61415.git@latest
+cp stdout proxy-latest.json
+stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"'
+! stdout '"Origin":'
+
+# However, if we list a specific, stable version, we should get
+# whatever origin metadata the proxy has for the version.
+
+go list -m -json vcs-test.golang.org/git/issue61415.git@v0.0.0-20231114180000-08a4fa6bb9c0
+cp stdout proxy-version.json
+stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"'
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"'
+! stdout '"Ref":'
+! stdout '"TagSum":'
+
+# The -reuse flag has no effect with a proxy, since the proxy can serve
+# metadata about a given module version cheaply anyway.
+
+go list -reuse=proxy-version.json -m -json vcs-test.golang.org/git/issue61415.git@v0.0.0-20231114180000-08a4fa6bb9c0
+stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"'
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"'
+! stdout '"Ref":'
+! stdout '"TagSum":'
+! stdout '"Reuse":'
+
+
+# With GOPROXY=direct, the -reuse flag has an effect, but
+# the Origin data from the proxy should not be sufficient
+# for the proxy response to be reused.
+
+env GOPROXY=direct
+
+go list -reuse=proxy-latest.json -m -json vcs-test.golang.org/git/issue61415.git@latest
+stdout '"Version": "v0.0.0-20231114180001-f213069baa68"'
+stdout '"Origin":'
+stdout '"VCS": "git"'
+stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"'
+stdout '"Ref": "HEAD"'
+stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="'
+! stdout '"Reuse":'
diff --git a/src/cmd/go/testdata/script/mod_vendor_embed.txt b/src/cmd/go/testdata/script/mod_vendor_embed.txt
index b14fd99..1a3b2fe 100644
--- a/src/cmd/go/testdata/script/mod_vendor_embed.txt
+++ b/src/cmd/go/testdata/script/mod_vendor_embed.txt
@@ -12,6 +12,36 @@
 ! go mod vendor
 stderr 'go: pattern ../foo.txt: invalid pattern syntax'
 
+cd ../embed_go122
+go mod vendor
+cmp vendor/example.com/a/samedir_embed.txt ../a/samedir_embed.txt
+cmp vendor/example.com/a/subdir/embed.txt ../a/subdir/embed.txt
+! exists vendor/example.com/a/subdir/test/embed.txt
+! exists vendor/example.com/a/subdir/test/xtest/embed.txt
+-- embed_go122/go.mod --
+module example.com/foo
+go 1.22
+
+require (
+	example.com/a v0.1.0
+)
+
+replace (
+	example.com/a v0.1.0 => ../a
+)
+-- embed_go122/foo.go --
+package main
+
+import (
+	"fmt"
+
+	"example.com/a"
+)
+
+func main() {
+    fmt.Println(a.Str())
+}
+
 # matchPotentialSourceFile prunes out tests and unbuilt code.
 # Make sure that they are vendored if they are embedded files.
 cd ../embed_unbuilt
diff --git a/src/cmd/go/testdata/script/mod_verify_work.txt b/src/cmd/go/testdata/script/mod_verify_work.txt
new file mode 100644
index 0000000..d9f5a54
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_verify_work.txt
@@ -0,0 +1,24 @@
+# Regression test for Issue #62663: we would filter out the toolchain and
+# main modules from the build list incorrectly, leading to the workspace
+# modules being checked for correct sums. Specifically this would happen when
+# the module name sorted after the virtual 'go' version module name because
+# it could not get chopped off when we removed the MainModules.Len() modules
+# at the beginning of the build list and we would remove the go module instead.
+
+go mod verify
+
+-- go.work --
+go 1.21
+
+use (
+    ./a
+    ./b
+)
+-- a/go.mod --
+module hexample.com/a // important for test that module name sorts after 'go'
+
+go 1.21
+-- b/go.mod --
+module hexample.com/b // important for test that module name sorts after 'go'
+
+go 1.21
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/mod_versions.txt b/src/cmd/go/testdata/script/mod_versions.txt
index 9e6322b..aaa4216 100644
--- a/src/cmd/go/testdata/script/mod_versions.txt
+++ b/src/cmd/go/testdata/script/mod_versions.txt
@@ -1,7 +1,7 @@
 # Test rejection of pkg@version in GOPATH mode.
 env GO111MODULE=off
 ! go get rsc.io/quote@v1.5.1
-stderr '^go: can only use path@version syntax with ''go get'' and ''go install'' in module-aware mode$'
+stderr '^go: modules disabled by GO111MODULE=off'
 ! go build rsc.io/quote@v1.5.1
 stderr '^package rsc.io/quote@v1.5.1: can only use path@version syntax with ''go get'' and ''go install'' in module-aware mode$'
 
diff --git a/src/cmd/go/testdata/script/reuse_git.txt b/src/cmd/go/testdata/script/reuse_git.txt
index 0357d67..432f5a9 100644
--- a/src/cmd/go/testdata/script/reuse_git.txt
+++ b/src/cmd/go/testdata/script/reuse_git.txt
@@ -55,7 +55,9 @@
 stdout '"Error":.*no matching versions'
 ! stdout '"TagPrefix"'
 stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
-! stdout '"(Ref|Hash|RepoSum)":'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+! stdout 'RepoSum'
 
 # go mod download vcstest/hello/sub/v9 should also fail, print origin info with TagPrefix
 ! go mod download -x -json vcs-test.golang.org/git/hello.git/sub/v9@latest
@@ -64,7 +66,9 @@
 stdout '"Error":.*no matching versions'
 stdout '"TagPrefix": "sub/"'
 stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
-! stdout '"(Ref|Hash|RepoSum)":'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+! stdout 'RepoSum'
 
 # go mod download vcstest/hello@nonexist should fail, still print origin info
 ! go mod download -x -json vcs-test.golang.org/git/hello.git@nonexist
@@ -200,7 +204,8 @@
 stdout '"Error":.*no matching versions'
 ! stdout '"TagPrefix"'
 stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
-! stdout '"(Ref|Hash)":'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
 ! stdout '"(Dir|Info|GoMod|Zip)"'
 
 # reuse go mod download vcstest/hello/sub/v9 error result
@@ -210,7 +215,8 @@
 stdout '"Error":.*no matching versions'
 stdout '"TagPrefix": "sub/"'
 stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
-! stdout '"(Ref|Hash)":'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
 ! stdout '"(Dir|Info|GoMod|Zip)"'
 
 # reuse go mod download vcstest/hello@nonexist
diff --git a/src/cmd/go/testdata/script/test_android_issue62123.txt b/src/cmd/go/testdata/script/test_android_issue62123.txt
new file mode 100644
index 0000000..2f46a6b
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_android_issue62123.txt
@@ -0,0 +1,19 @@
+env GOOS=android GOARCH=amd64 CGO_ENABLED=0
+
+! go build -o $devnull cmd/buildid
+stderr 'android/amd64 requires external \(cgo\) linking, but cgo is not enabled'
+! stderr 'cannot find runtime/cgo'
+
+! go test -c -o $devnull os
+stderr '# os\nandroid/amd64 requires external \(cgo\) linking, but cgo is not enabled'
+! stderr 'cannot find runtime/cgo'
+
+env GOOS=ios GOARCH=arm64 CGO_ENABLED=0
+
+! go build -o $devnull cmd/buildid
+stderr 'ios/arm64 requires external \(cgo\) linking, but cgo is not enabled'
+! stderr 'cannot find runtime/cgo'
+
+! go test -c -o $devnull os
+stderr '# os\nios/arm64 requires external \(cgo\) linking, but cgo is not enabled'
+! stderr 'cannot find runtime/cgo'
diff --git a/src/cmd/go/testdata/script/test_fuzz_limit_dup_entry.txt b/src/cmd/go/testdata/script/test_fuzz_limit_dup_entry.txt
index 01217ae..d69f6e0 100644
--- a/src/cmd/go/testdata/script/test_fuzz_limit_dup_entry.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_limit_dup_entry.txt
@@ -4,7 +4,7 @@
 
 # FuzzA attempts to cause the mutator to create duplicate inputs that generate
 # new coverage. Previously this would trigger a corner case when the fuzzer
-# had a execution limit, causing it to deadlock and sit in the coordinator
+# had an execution limit, causing it to deadlock and sit in the coordinator
 # loop indefinitely, failing to exit once the limit had been exhausted.
 
 go test -fuzz=FuzzA -fuzztime=100x -parallel=1
diff --git a/src/cmd/go/testdata/script/test_fuzz_minimize_dirty_cov.txt b/src/cmd/go/testdata/script/test_fuzz_minimize_dirty_cov.txt
index 1279f6e..c8af9be 100644
--- a/src/cmd/go/testdata/script/test_fuzz_minimize_dirty_cov.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_minimize_dirty_cov.txt
@@ -1,6 +1,6 @@
 # Test that minimization doesn't use dirty coverage snapshots when it
 # is unable to actually minimize the input. We do this by checking that
-# a expected value appears in the cache. If a dirty coverage map is used
+# an expected value appears in the cache. If a dirty coverage map is used
 # (i.e. the coverage map generated during the last minimization step,
 # rather than the map provided with the initial input) then this value
 # is unlikely to appear in the cache, since the map generated during
diff --git a/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt b/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
index e61c4f9..11aaaca 100644
--- a/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
@@ -21,7 +21,7 @@
 # Test that minimization occurs for a crash that appears while minimizing a
 # newly found interesting input. There must be only one worker for this test to
 # be flaky like we want.
-! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerCrashInMinimization -test.run=XXX -test.fuzztime=10000x -test.parallel=1
+! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerCrashInMinimization -test.run=^$ -test.fuzztime=10000x -test.parallel=1
 ! stdout '^ok'
 stdout -count=1 'got the minimum size!'
 stdout -count=1 'bad input'
@@ -32,7 +32,7 @@
 
 # Test that a nonrecoverable error that occurs while minimizing an interesting
 # input is reported correctly.
-! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerNonrecoverableCrashInMinimization -test.run=XXX -test.fuzztime=10000x -test.parallel=1
+! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerNonrecoverableCrashInMinimization -test.run=^$ -test.fuzztime=10000x -test.parallel=1
 ! stdout '^ok'
 stdout -count=1 'fuzzing process hung or terminated unexpectedly while minimizing'
 stdout -count=1 'EOF'
diff --git a/src/cmd/go/testdata/script/test_json_issue35169.txt b/src/cmd/go/testdata/script/test_json_issue35169.txt
new file mode 100644
index 0000000..fdb5755
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_json_issue35169.txt
@@ -0,0 +1,28 @@
+! go test -json .
+
+	# We should see only JSON output on stdout, no non-JSON.
+	# To simplify the check, we just look for non-curly-braces, since
+	# every JSON entry has them and they're unlikely to occur
+	# in other error messages.
+! stdout '^[^{]'
+! stdout '[^}]\n$'
+
+	# Since the only test we requested failed to build, we should
+	# not see any "pass" actions in the JSON stream.
+! stdout '\{.*"Action":"pass".*\}'
+
+	# TODO(#62067): emit this as a build event instead of a test event.
+stdout '\{.*"Action":"output","Package":"example","Output":"FAIL\\texample \[build failed\]\\n"\}'
+stdout '\{.*"Action":"fail","Package":"example",.*\}'
+
+-- go.mod --
+module example
+go 1.19
+-- example.go --
+package example
+
+This is not valid Go source.
+-- example_test.go --
+package  example
+
+func Test(*testing.T) {}
diff --git a/src/cmd/go/testdata/script/test_skip.txt b/src/cmd/go/testdata/script/test_skip.txt
index 94d20b9..2e5f4d6 100644
--- a/src/cmd/go/testdata/script/test_skip.txt
+++ b/src/cmd/go/testdata/script/test_skip.txt
@@ -13,13 +13,20 @@
 go test -v -skip 2/3 skip_test.go
 stdout RUN.*Test1
 stdout RUN.*Test2
+stdout RUN.*ExampleTest1
 ! stdout Test2/3
 
 go test -v -skip 2/4 skip_test.go
 stdout RUN.*Test1
 stdout RUN.*Test2
 stdout RUN.*Test2/3
+stdout RUN.*ExampleTest1
 
+go test -v -skip Example skip_test.go
+stdout RUN.*Test1
+stdout RUN.*Test2
+stdout RUN.*Test2/3
+! stdout ExampleTest1
 
 -- skip_test.go --
 package skip_test
@@ -32,3 +39,7 @@
 func Test2(t *testing.T) {
 	t.Run("3", func(t *testing.T) {})
 }
+
+func ExampleTest1() {
+	// Output:
+}
diff --git a/src/cmd/go/testdata/script/vendor_list_issue11977.txt b/src/cmd/go/testdata/script/vendor_list_issue11977.txt
index f519175..f1ed613 100644
--- a/src/cmd/go/testdata/script/vendor_list_issue11977.txt
+++ b/src/cmd/go/testdata/script/vendor_list_issue11977.txt
@@ -1,9 +1,5 @@
-[!net:github.com] skip
-[!git] skip
 env GO111MODULE=off
 
-go get github.com/rsc/go-get-issue-11864
-
 go list -f '{{join .TestImports "\n"}}' github.com/rsc/go-get-issue-11864/t
 stdout 'go-get-issue-11864/vendor/vendor.org/p'
 
@@ -15,3 +11,77 @@
 
 go list -f '{{join .XTestImports "\n"}}' github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3
 stdout 'go-get-issue-11864/vendor/vendor.org/tx3'
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/m.go --
+package g
+
+import _ "vendor.org/p"
+import _ "vendor.org/p1"
+
+func main() {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/t/t_test.go --
+package t
+
+import _ "vendor.org/p"
+import _ "vendor.org/p1"
+import "testing"
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/t/t.go --
+package t
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/tx/tx_test.go --
+package tx_test
+
+import _ "vendor.org/p"
+import _ "vendor.org/p1"
+import "testing"
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/tx/tx.go --
+package tx
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/p1/p1.go --
+package p1 // import "vendor.org/p1"
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3/tx3_test.go --
+package tx3_test
+
+import "vendor.org/tx3"
+import "testing"
+
+var Found = tx3.Exported
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3/export_test.go --
+package tx3
+
+var Exported = true
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3/tx3.go --
+package tx3
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2/tx2_test.go --
+package tx2_test
+
+import . "vendor.org/tx2"
+import "testing"
+
+var Found = Exported
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2/export_test.go --
+package tx2
+
+var Exported = true
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2/tx2.go --
+package tx2
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/p/p.go --
+package p
diff --git a/src/cmd/go/testdata/script/vendor_test_issue11864.txt b/src/cmd/go/testdata/script/vendor_test_issue11864.txt
index 9e34811..90c9c59 100644
--- a/src/cmd/go/testdata/script/vendor_test_issue11864.txt
+++ b/src/cmd/go/testdata/script/vendor_test_issue11864.txt
@@ -1,12 +1,83 @@
-[!net:github.com] skip
-[!git] skip
+[short] skip
 env GO111MODULE=off
 
-go get github.com/rsc/go-get-issue-11864
-
 # test should work too
 go test github.com/rsc/go-get-issue-11864
 go test github.com/rsc/go-get-issue-11864/t
 
 # external tests should observe internal test exports (golang.org/issue/11977)
 go test github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/m.go --
+package g
+
+import _ "vendor.org/p"
+import _ "vendor.org/p1"
+
+func main() {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/t/t_test.go --
+package t
+
+import _ "vendor.org/p"
+import _ "vendor.org/p1"
+import "testing"
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/t/t.go --
+package t
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/tx/tx_test.go --
+package tx_test
+
+import _ "vendor.org/p"
+import _ "vendor.org/p1"
+import "testing"
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/tx/tx.go --
+package tx
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/p1/p1.go --
+package p1 // import "vendor.org/p1"
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3/tx3_test.go --
+package tx3_test
+
+import "vendor.org/tx3"
+import "testing"
+
+var Found = tx3.Exported
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3/export_test.go --
+package tx3
+
+var Exported = true
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx3/tx3.go --
+package tx3
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2/tx2_test.go --
+package tx2_test
+
+import . "vendor.org/tx2"
+import "testing"
+
+var Found = Exported
+
+func TestNop(t *testing.T) {}
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2/export_test.go --
+package tx2
+
+var Exported = true
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2/tx2.go --
+package tx2
+
+-- $GOPATH/src/github.com/rsc/go-get-issue-11864/vendor/vendor.org/p/p.go --
+package p
diff --git a/src/cmd/go/testdata/script/vendor_test_issue14613.txt b/src/cmd/go/testdata/script/vendor_test_issue14613.txt
index 9535fc1..d2f05c9 100644
--- a/src/cmd/go/testdata/script/vendor_test_issue14613.txt
+++ b/src/cmd/go/testdata/script/vendor_test_issue14613.txt
@@ -1,11 +1,6 @@
-[!net:github.com] skip
-[!git] skip
+[short] skip
 env GO111MODULE=off
 
-cd $GOPATH
-
-go get github.com/clsung/go-vendor-issue-14613
-
 # test folder should work
 go test github.com/clsung/go-vendor-issue-14613
 
@@ -16,3 +11,42 @@
 # test with imported and not used
 ! go test github.com/clsung/go-vendor-issue-14613/vendor/mylibtesttest/myapp/myapp_test.go
 stderr 'imported and not used'
+
+-- $GOPATH/src/github.com/clsung/go-vendor-issue-14613/./vendor_test.go --
+package main
+
+import (
+	"testing"
+
+	"github.com/clsung/fake"
+)
+
+func TestVendor(t *testing.T) {
+	ret := fake.DoNothing()
+	expected := "Ok"
+	if expected != ret {
+		t.Errorf("fake returned %q, expected %q", ret, expected)
+	}
+}
+
+-- $GOPATH/src/github.com/clsung/go-vendor-issue-14613/./vendor/mylibtesttest/myapp/myapp_test.go --
+package myapp
+import (
+   "mylibtesttest/rds"
+)
+
+-- $GOPATH/src/github.com/clsung/go-vendor-issue-14613/./vendor/mylibtesttest/rds/rds.go --
+package rds
+
+-- $GOPATH/src/github.com/clsung/go-vendor-issue-14613/./vendor/github.com/clsung/fake/fake.go --
+package fake
+
+func DoNothing() string {
+	return "Ok"
+}
+
+-- $GOPATH/src/github.com/clsung/go-vendor-issue-14613/./m.go --
+package main
+
+func main() {}
+
diff --git a/src/cmd/go/testdata/script/work.txt b/src/cmd/go/testdata/script/work.txt
index e229ab6..69391ef 100644
--- a/src/cmd/go/testdata/script/work.txt
+++ b/src/cmd/go/testdata/script/work.txt
@@ -32,7 +32,7 @@
 # -mod can only be set to readonly in workspace mode
 go list -mod=readonly all
 ! go list -mod=mod all
-stderr '^go: -mod may only be set to readonly when in workspace mode'
+stderr '^go: -mod may only be set to readonly or vendor when in workspace mode'
 env GOWORK=off
 go list -mod=mod all
 env GOWORK=
diff --git a/src/cmd/go/testdata/script/work_init_path.txt b/src/cmd/go/testdata/script/work_init_path.txt
index e397788..0a2d372 100644
--- a/src/cmd/go/testdata/script/work_init_path.txt
+++ b/src/cmd/go/testdata/script/work_init_path.txt
@@ -1,17 +1,33 @@
 # Regression test for https://go.dev/issue/51448.
-# 'go work init . foo/bar' should produce a go.work file
-# with the same paths as 'go work init; go work use -r .'.
+# 'go work init . .. foo/bar' should produce a go.work file
+# with the same paths as 'go work init; go work use -r ..',
+# and it should have 'use .' rather than 'use ./.' inside.
 
-go work init . foo/bar
+cd dir
+
+go work init . .. foo/bar
 mv go.work go.work.init
 
 go work init
-go work use -r .
+go work use -r ..
 cmp go.work go.work.init
 
+cmpenv go.work $WORK/go.work.want
+
 -- go.mod --
 module example
 go 1.18
--- foo/bar/go.mod --
+-- dir/go.mod --
 module example
 go 1.18
+-- dir/foo/bar/go.mod --
+module example
+go 1.18
+-- $WORK/go.work.want --
+go $goversion
+
+use (
+	.
+	..
+	./foo/bar
+)
diff --git a/src/cmd/go/testdata/script/work_vendor_empty.txt b/src/cmd/go/testdata/script/work_vendor_empty.txt
new file mode 100644
index 0000000..3c0c7ed
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vendor_empty.txt
@@ -0,0 +1,16 @@
+go work vendor
+stderr 'go: no dependencies to vendor'
+! exists vendor/modules.txt
+! go list .
+stderr 'go: no modules were found in the current workspace'
+mkdir vendor
+mv bad_modules.txt vendor/modules.txt
+! go list .
+stderr 'go: no modules were found in the current workspace'
+
+-- bad_modules.txt --
+# a/module
+a/package
+-- go.work --
+go 1.21
+
diff --git a/src/cmd/go/testdata/script/work_vendor_main_module_replaced.txt b/src/cmd/go/testdata/script/work_vendor_main_module_replaced.txt
new file mode 100644
index 0000000..70446c7
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vendor_main_module_replaced.txt
@@ -0,0 +1,46 @@
+# This is a test that if one of the main modules replaces the other
+# the vendor consistency checks still pass. The replacement is ignored
+# because it is of a main module, but it is still recorded in
+# vendor/modules.txt.
+
+go work vendor
+go list all # make sure the consistency checks pass
+! stderr .
+
+# Removing the replace causes consistency checks to fail
+cp a_go_mod_no_replace a/go.mod
+! go list all # consistency checks fail
+stderr 'example.com/b@v0.0.0: is marked as replaced in vendor/modules.txt, but not replaced in the workspace'
+
+
+-- a_go_mod_no_replace --
+module example.com/a
+
+go 1.21
+
+require example.com/b v0.0.0
+-- go.work --
+go 1.21
+
+use (
+    a
+    b
+)
+-- a/go.mod --
+module example.com/a
+
+go 1.21
+
+require example.com/b v0.0.0
+
+replace example.com/b => ../b
+-- a/a.go --
+package a
+
+import _ "example.com/b"
+-- b/go.mod --
+module example.com/b
+
+go 1.21
+-- b/b.go --
+package b
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_vendor_modules_txt_conditional.txt b/src/cmd/go/testdata/script/work_vendor_modules_txt_conditional.txt
new file mode 100644
index 0000000..3d671eb
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vendor_modules_txt_conditional.txt
@@ -0,0 +1,62 @@
+# This test checks to see if we only start in workspace vendor
+# mode if the modules.txt specifies ## workspace (and only in
+# standard vendor if it doesn't).
+
+# vendor directory produced for workspace, workspace mode
+# runs in mod=vendor
+go work vendor
+cmp vendor/modules.txt want_workspace_modules_txt
+go list -f {{.Dir}} example.com/b
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]b
+
+# vendor directory produced for workspace, module mode
+# runs in mod=readonly
+env GOWORK=off
+go list -f {{.Dir}} example.com/b
+stdout $GOPATH[\\/]src[\\/]b
+
+# vendor directory produced for module, module mode
+# runs in mod=vendor
+go mod vendor
+cmp vendor/modules.txt want_module_modules_txt
+go list -f {{.Dir}} example.com/b
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]b
+
+# vendor directory produced for module, workspace mode
+# runs in mod=readonly
+env GOWORK=
+go list -f {{.Dir}} example.com/b
+stdout $GOPATH[\\/]src[\\/]b
+
+-- want_workspace_modules_txt --
+## workspace
+# example.com/b v0.0.0 => ./b
+## explicit; go 1.21
+example.com/b
+# example.com/b => ./b
+-- want_module_modules_txt --
+# example.com/b v0.0.0 => ./b
+## explicit; go 1.21
+example.com/b
+# example.com/b => ./b
+-- go.work --
+go 1.21
+
+use .
+-- go.mod --
+module example.com/a
+
+go 1.21
+
+require example.com/b v0.0.0
+replace example.com/b => ./b
+-- a.go --
+package a
+
+import _ "example.com/b"
+-- b/go.mod --
+module example.com/b
+
+go 1.21
+-- b/b.go --
+package b
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_vendor_modules_txt_consistent.txt b/src/cmd/go/testdata/script/work_vendor_modules_txt_consistent.txt
new file mode 100644
index 0000000..bc0f068
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vendor_modules_txt_consistent.txt
@@ -0,0 +1,141 @@
+go work vendor
+cmp modules.txt.want vendor/modules.txt
+go list example.com/a example.com/b
+
+# Module required in go.mod but not marked explicit in modules.txt
+cp modules.txt.required_but_not_explicit vendor/modules.txt
+! go list example.com/a example.com/b
+cmpenv stderr required_but_not_explicit_error.txt
+
+# Replacement in go.mod but no replacement in modules.txt
+cp modules.txt.missing_replacement vendor/modules.txt
+! go list example.com/a example.com/b
+cmpenv stderr missing_replacement_error.txt
+
+# Replacement in go.mod but different replacement target in modules.txt
+cp modules.txt.different_replacement vendor/modules.txt
+! go list example.com/a example.com/b
+cmpenv stderr different_replacement_error.txt
+
+# Module marked explicit in modules.txt but not required in go.mod
+cp modules.txt.extra_explicit vendor/modules.txt
+! go list example.com/a example.com/b
+cmpenv stderr extra_explicit_error.txt
+
+# Replacement in modules.txt but not in go.mod
+cp modules.txt.extra_replacement vendor/modules.txt
+! go list example.com/a example.com/b
+cmpenv stderr extra_replacement_error.txt
+
+-- modules.txt.want --
+## workspace
+# example.com/p v1.0.0 => ./p
+## explicit; go 1.21
+# example.com/q v1.0.0 => ./q
+## explicit; go 1.21
+-- modules.txt.required_but_not_explicit --
+## workspace
+# example.com/p v1.0.0 => ./p
+## go 1.21
+# example.com/q v1.0.0 => ./q
+## explicit; go 1.21
+-- required_but_not_explicit_error.txt --
+go: inconsistent vendoring in $GOPATH${/}src:
+	example.com/p@v1.0.0: is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt
+
+	To ignore the vendor directory, use -mod=readonly or -mod=mod.
+	To sync the vendor directory, run:
+		go work vendor
+-- modules.txt.missing_replacement --
+## workspace
+# example.com/p v1.0.0
+## explicit; go 1.21
+# example.com/q v1.0.0 => ./q
+## explicit; go 1.21
+-- missing_replacement_error.txt --
+go: inconsistent vendoring in $GOPATH${/}src:
+	example.com/p@v1.0.0: is replaced in a${/}go.mod, but not marked as replaced in vendor/modules.txt
+
+	To ignore the vendor directory, use -mod=readonly or -mod=mod.
+	To sync the vendor directory, run:
+		go work vendor
+-- modules.txt.different_replacement --
+## workspace
+# example.com/p v1.0.0 => ./r
+## explicit; go 1.21
+# example.com/q v1.0.0 => ./q
+## explicit; go 1.21
+-- different_replacement_error.txt --
+go: inconsistent vendoring in $GOPATH${/}src:
+	example.com/p@v1.0.0: is replaced by ../p in a${/}go.mod, but marked as replaced by ./r in vendor/modules.txt
+
+	To ignore the vendor directory, use -mod=readonly or -mod=mod.
+	To sync the vendor directory, run:
+		go work vendor
+-- modules.txt.extra_explicit --
+## workspace
+# example.com/p v1.0.0 => ./p
+## explicit; go 1.21
+# example.com/q v1.0.0 => ./q
+## explicit; go 1.21
+# example.com/r v1.0.0
+example.com/r
+## explicit; go 1.21
+-- extra_explicit_error.txt --
+go: inconsistent vendoring in $GOPATH${/}src:
+	example.com/r@v1.0.0: is marked as explicit in vendor/modules.txt, but not explicitly required in a go.mod
+
+	To ignore the vendor directory, use -mod=readonly or -mod=mod.
+	To sync the vendor directory, run:
+		go work vendor
+-- modules.txt.extra_replacement --
+## workspace
+# example.com/p v1.0.0 => ./p
+## explicit; go 1.21
+# example.com/q v1.0.0 => ./q
+## explicit; go 1.21
+# example.com/r v1.0.0 => ./r
+example.com/r
+## go 1.21
+-- extra_replacement_error.txt --
+go: inconsistent vendoring in $GOPATH${/}src:
+	example.com/r@v1.0.0: is marked as replaced in vendor/modules.txt, but not replaced in the workspace
+
+	To ignore the vendor directory, use -mod=readonly or -mod=mod.
+	To sync the vendor directory, run:
+		go work vendor
+-- go.work --
+go 1.21
+
+use (
+    ./a
+    ./b
+)
+-- a/go.mod --
+module example.com/a
+
+go 1.21
+
+require example.com/p v1.0.0
+
+replace example.com/p v1.0.0 => ../p
+-- a/a.go --
+package p
+-- b/go.mod --
+module example.com/b
+
+go 1.21
+
+require example.com/q v1.0.0
+
+replace example.com/q v1.0.0 => ../q
+-- b/b.go --
+package b
+-- p/go.mod --
+module example.com/p
+
+go 1.21
+-- q/go.mod --
+module example.com/q
+
+go 1.21
diff --git a/src/cmd/go/testdata/script/work_vendor_prune.txt b/src/cmd/go/testdata/script/work_vendor_prune.txt
new file mode 100644
index 0000000..424b4d5
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vendor_prune.txt
@@ -0,0 +1,116 @@
+# This test exercises that vendoring works properly using the workspace in the
+# the work_prune test case.
+
+go work vendor
+cmp vendor/modules.txt modules.txt.want
+cmp vendor/example.com/b/b.go b/b.go
+cmp vendor/example.com/q/q.go q1_1_0/q.go
+go list -m -f '{{.Version}}' example.com/q
+stdout '^v1.1.0$'
+
+go list -f '{{.Dir}}' example.com/q
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]q
+go list -f '{{.Dir}}' example.com/b
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]b
+
+[short] skip
+
+rm b
+rm q1_0_0
+rm q1_1_0
+go run example.com/p
+stdout 'version 1.1.0'
+
+-- modules.txt.want --
+## workspace
+# example.com/b v1.0.0 => ./b
+## explicit; go 1.18
+example.com/b
+# example.com/q v1.0.0 => ./q1_0_0
+## explicit; go 1.18
+# example.com/q v1.1.0 => ./q1_1_0
+## go 1.18
+example.com/q
+-- go.work --
+go 1.18
+
+use (
+	./a
+	./p
+)
+-- a/go.mod --
+module example.com/a
+
+go 1.18
+
+require example.com/b v1.0.0
+
+replace example.com/b v1.0.0 => ../b
+-- a/foo.go --
+package main
+
+import "example.com/b"
+
+func main() {
+	b.B()
+}
+-- b/go.mod --
+module example.com/b
+
+go 1.18
+
+require example.com/q v1.1.0
+-- b/b.go --
+package b
+
+func B() {
+}
+-- b/b_test.go --
+package b
+
+import "example.com/q"
+
+func TestB() {
+	q.PrintVersion()
+}
+-- p/go.mod --
+module example.com/p
+
+go 1.18
+
+require example.com/q v1.0.0
+
+replace example.com/q v1.0.0 => ../q1_0_0
+replace example.com/q v1.1.0 => ../q1_1_0
+-- p/main.go --
+package main
+
+import "example.com/q"
+
+func main() {
+	q.PrintVersion()
+}
+-- q1_0_0/go.mod --
+module example.com/q
+
+go 1.18
+-- q1_0_0/q.go --
+package q
+
+import "fmt"
+
+func PrintVersion() {
+	fmt.Println("version 1.0.0")
+}
+-- q1_1_0/go.mod --
+module example.com/q
+
+go 1.18
+-- q1_1_0/q.go --
+package q
+
+import "fmt"
+
+func PrintVersion() {
+	fmt.Println("version 1.1.0")
+}
diff --git a/src/cmd/go/testdata/script/work_vendor_prune_all.txt b/src/cmd/go/testdata/script/work_vendor_prune_all.txt
new file mode 100644
index 0000000..a369d22
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_vendor_prune_all.txt
@@ -0,0 +1,201 @@
+# This test exercises that vendoring works properly using the workspace in the
+# the work_prune test case.
+
+go work vendor
+cmp vendor/modules.txt modules.txt.want
+go list -f '{{with .Module}}{{.Path}}@{{.Version}}{{end}}' all
+cmp stdout want_versions
+
+go list -f '{{.Dir}}' example.com/q
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]q
+go list -f '{{.Dir}}' example.com/b
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]b
+go list -f '{{.Dir}}' example.com/w
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]w
+go list -f '{{.Dir}}' example.com/z
+stdout $GOPATH[\\/]src[\\/]vendor[\\/]example.com[\\/]z
+
+cmp $GOPATH/src/vendor/example.com/q/q.go q1_1_0/q.go
+
+-- modules.txt.want --
+## workspace
+# example.com/b v1.0.0 => ./b
+## explicit; go 1.18
+example.com/b
+# example.com/q v1.0.0 => ./q1_0_0
+## explicit; go 1.18
+# example.com/q v1.1.0 => ./q1_1_0
+## go 1.18
+example.com/q
+# example.com/w v1.0.0 => ./w
+## go 1.18
+example.com/w
+# example.com/z v1.0.0 => ./z1_0_0
+## explicit; go 1.18
+# example.com/z v1.1.0 => ./z1_1_0
+## go 1.18
+example.com/z
+# example.com/q v1.0.5 => ./q1_0_5
+# example.com/r v1.0.0 => ./r
+# example.com/x v1.0.0 => ./x
+# example.com/y v1.0.0 => ./y
+-- want_versions --
+example.com/a@
+example.com/b@v1.0.0
+example.com/p@
+example.com/q@v1.1.0
+example.com/w@v1.0.0
+example.com/z@v1.1.0
+-- go.work --
+go 1.18
+
+use (
+	./a
+	./p
+)
+
+replace example.com/b v1.0.0 => ./b
+replace example.com/q v1.0.0 => ./q1_0_0
+replace example.com/q v1.0.5 => ./q1_0_5
+replace example.com/q v1.1.0 => ./q1_1_0
+replace example.com/r v1.0.0 => ./r
+replace example.com/w v1.0.0 => ./w
+replace example.com/x v1.0.0 => ./x
+replace example.com/y v1.0.0 => ./y
+replace example.com/z v1.0.0 => ./z1_0_0
+replace example.com/z v1.1.0 => ./z1_1_0
+
+-- a/go.mod --
+module example.com/a
+
+go 1.18
+
+require example.com/b v1.0.0
+require example.com/z v1.0.0
+-- a/foo.go --
+package main
+
+import "example.com/b"
+
+func main() {
+	b.B()
+}
+-- b/go.mod --
+module example.com/b
+
+go 1.18
+
+require example.com/q v1.1.0
+-- b/b.go --
+package b
+
+func B() {
+}
+-- p/go.mod --
+module example.com/p
+
+go 1.18
+
+require example.com/q v1.0.0
+
+replace example.com/q v1.0.0 => ../q1_0_0
+replace example.com/q v1.1.0 => ../q1_1_0
+-- p/main.go --
+package main
+
+import "example.com/q"
+
+func main() {
+	q.PrintVersion()
+}
+-- q1_0_0/go.mod --
+module example.com/q
+
+go 1.18
+-- q1_0_0/q.go --
+package q
+
+import "fmt"
+
+func PrintVersion() {
+	fmt.Println("version 1.0.0")
+}
+-- q1_0_5/go.mod --
+module example.com/q
+
+go 1.18
+
+require example.com/r v1.0.0
+-- q1_0_5/q.go --
+package q
+
+import _ "example.com/r"
+-- q1_1_0/go.mod --
+module example.com/q
+
+require example.com/w v1.0.0
+require example.com/z v1.1.0
+
+go 1.18
+-- q1_1_0/q.go --
+package q
+
+import _ "example.com/w"
+import _ "example.com/z"
+
+import "fmt"
+
+func PrintVersion() {
+	fmt.Println("version 1.1.0")
+}
+-- r/go.mod --
+module example.com/r
+
+go 1.18
+
+require example.com/r v1.0.0
+-- r/r.go --
+package r
+-- w/go.mod --
+module example.com/w
+
+go 1.18
+
+require example.com/x v1.0.0
+-- w/w.go --
+package w
+-- w/w_test.go --
+package w
+
+import _ "example.com/x"
+-- x/go.mod --
+module example.com/x
+
+go 1.18
+-- x/x.go --
+package x
+-- x/x_test.go --
+package x
+import _ "example.com/y"
+-- y/go.mod --
+module example.com/y
+
+go 1.18
+-- y/y.go --
+package y
+-- z1_0_0/go.mod --
+module example.com/z
+
+go 1.18
+
+require example.com/q v1.0.5
+-- z1_0_0/z.go --
+package z
+
+import _ "example.com/q"
+-- z1_1_0/go.mod --
+module example.com/z
+
+go 1.18
+-- z1_1_0/z.go --
+package z
diff --git a/src/cmd/go/testdata/vcstest/bzr/hello.txt b/src/cmd/go/testdata/vcstest/bzr/hello.txt
index 7d06503..5931585 100644
--- a/src/cmd/go/testdata/vcstest/bzr/hello.txt
+++ b/src/cmd/go/testdata/vcstest/bzr/hello.txt
@@ -1,6 +1,7 @@
 handle bzr
 
 env BZR_EMAIL='Russ Cox <rsc@google.com>'
+env EMAIL='Russ Cox <rsc@google.com>'
 
 bzr init-repo .
 
diff --git a/src/cmd/go/testdata/vcstest/git/issue47650.txt b/src/cmd/go/testdata/vcstest/git/issue47650.txt
new file mode 100644
index 0000000..5204078
--- /dev/null
+++ b/src/cmd/go/testdata/vcstest/git/issue47650.txt
@@ -0,0 +1,42 @@
+handle git
+
+env GIT_AUTHOR_NAME='Bryan C. Mills'
+env GIT_AUTHOR_EMAIL='bcmills@google.com'
+env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME
+env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL
+
+git init
+
+at 2021-08-11T13:52:00-04:00
+git add cmd
+git commit -m 'add cmd/issue47650'
+git branch -m main
+git tag v0.1.0
+
+git add go.mod
+git commit -m 'add go.mod'
+
+git show-ref --tags --heads
+cmp stdout .git-refs
+
+git log --oneline --decorate=short
+cmp stdout .git-log
+
+-- .git-refs --
+21535ef346c3e79fd09edd75bd4725f06c828e43 refs/heads/main
+4d237df2dbfc8a443af2f5e84be774f08a2aed0c refs/tags/v0.1.0
+-- .git-log --
+21535ef (HEAD -> main) add go.mod
+4d237df (tag: v0.1.0) add cmd/issue47650
+-- go.mod --
+module vcs-test.golang.org/git/issue47650.git
+
+go 1.17
+-- cmd/issue47650/main.go --
+package main
+
+import "os"
+
+func main() {
+	os.Stdout.WriteString("Hello, world!")
+}
diff --git a/src/cmd/go/testdata/vcstest/git/issue61415.txt b/src/cmd/go/testdata/vcstest/git/issue61415.txt
new file mode 100644
index 0000000..5b8bca6
--- /dev/null
+++ b/src/cmd/go/testdata/vcstest/git/issue61415.txt
@@ -0,0 +1,42 @@
+handle git
+
+env GIT_AUTHOR_NAME='Bryan C. Mills'
+env GIT_AUTHOR_EMAIL='bcmills@google.com'
+env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME
+env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL
+
+at 2023-11-14T13:00:00-05:00
+
+git init
+
+git add go.mod nested
+git commit -m 'nested: add go.mod'
+git branch -m main
+
+git tag has-nested
+
+at 2023-11-14T13:00:01-05:00
+
+git rm -r nested
+git commit -m 'nested: delete subdirectory'
+
+git show-ref --tags --heads
+cmp stdout .git-refs
+
+git log --pretty=oneline
+cmp stdout .git-log
+
+-- .git-refs --
+f213069baa68ec26412fb373c7cf6669db1f8e69 refs/heads/main
+08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a refs/tags/has-nested
+-- .git-log --
+f213069baa68ec26412fb373c7cf6669db1f8e69 nested: delete subdirectory
+08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a nested: add go.mod
+-- go.mod --
+module vcs-test.golang.org/git/issue61415.git
+
+go 1.20
+-- nested/go.mod --
+module vcs-test.golang.org/git/issue61415.git/nested
+
+go 1.20
diff --git a/src/cmd/gofmt/long_test.go b/src/cmd/gofmt/long_test.go
index 8db348a..21a0119 100644
--- a/src/cmd/gofmt/long_test.go
+++ b/src/cmd/gofmt/long_test.go
@@ -126,7 +126,7 @@
 	if *files != "" {
 		for _, filename := range strings.Split(*files, ",") {
 			fi, err := os.Stat(filename)
-			handleFile(filename, &statDirEntry{fi}, err)
+			handleFile(filename, fs.FileInfoToDirEntry(fi), err)
 		}
 		return // ignore files under -root
 	}
@@ -170,16 +170,3 @@
 		fmt.Printf("processed %d files\n", nfiles)
 	}
 }
-
-type statDirEntry struct {
-	info fs.FileInfo
-}
-
-func (d *statDirEntry) Name() string               { return d.info.Name() }
-func (d *statDirEntry) IsDir() bool                { return d.info.IsDir() }
-func (d *statDirEntry) Type() fs.FileMode          { return d.info.Mode().Type() }
-func (d *statDirEntry) Info() (fs.FileInfo, error) { return d.info, nil }
-
-func (d *statDirEntry) String() string {
-	return fs.FormatDirEntry(d)
-}
diff --git a/src/cmd/gofmt/rewrite.go b/src/cmd/gofmt/rewrite.go
index 0b7e211..8ed0930 100644
--- a/src/cmd/gofmt/rewrite.go
+++ b/src/cmd/gofmt/rewrite.go
@@ -69,9 +69,7 @@
 			return reflect.Value{}
 		}
 		val = apply(rewriteVal, val)
-		for k := range m {
-			delete(m, k)
-		}
+		clear(m)
 		if match(m, pat, val) {
 			val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
 		}
diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go
index 8ac50e2..393034d 100644
--- a/src/cmd/internal/archive/archive.go
+++ b/src/cmd/internal/archive/archive.go
@@ -70,6 +70,7 @@
 	EntryPkgDef EntryType = iota
 	EntryGoObj
 	EntryNativeObj
+	EntrySentinelNonObj
 )
 
 func (e *Entry) String() string {
@@ -357,6 +358,23 @@
 				Data:  Data{r.offset, size},
 			})
 			r.skip(size)
+		case "preferlinkext", "dynimportfail":
+			if size == 0 {
+				// These are not actual objects, but rather sentinel
+				// entries put into the archive by the Go command to
+				// be read by the linker. See #62036.
+				r.a.Entries = append(r.a.Entries, Entry{
+					Name:  name,
+					Type:  EntrySentinelNonObj,
+					Mtime: mtime,
+					Uid:   uid,
+					Gid:   gid,
+					Mode:  mode,
+					Data:  Data{r.offset, size},
+				})
+				break
+			}
+			fallthrough
 		default:
 			var typ EntryType
 			var o *GoObj
diff --git a/src/cmd/internal/bio/buf_mmap.go b/src/cmd/internal/bio/buf_mmap.go
index d089efa..65b245c 100644
--- a/src/cmd/internal/bio/buf_mmap.go
+++ b/src/cmd/internal/bio/buf_mmap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || (solaris && go1.20)
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
 
 package bio
 
diff --git a/src/cmd/internal/bio/buf_nommap.go b/src/cmd/internal/bio/buf_nommap.go
index 5ebe906..674144e 100644
--- a/src/cmd/internal/bio/buf_nommap.go
+++ b/src/cmd/internal/bio/buf_nommap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !(solaris && go1.20)
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
 
 package bio
 
diff --git a/src/cmd/internal/buildid/buildid_test.go b/src/cmd/internal/buildid/buildid_test.go
index 55835bf..8efa473 100644
--- a/src/cmd/internal/buildid/buildid_test.go
+++ b/src/cmd/internal/buildid/buildid_test.go
@@ -7,6 +7,8 @@
 import (
 	"bytes"
 	"crypto/sha256"
+	"debug/elf"
+	"encoding/binary"
 	"internal/obscuretestdata"
 	"os"
 	"reflect"
@@ -90,6 +92,69 @@
 		if id != newID || err != nil {
 			t.Errorf("ReadFile(%s after Rewrite) = %q, %v, want %q, nil", f, id, err, newID)
 		}
+
+		// Test an ELF PT_NOTE segment with an Align field of 0.
+		// Do this by rewriting the file data.
+		if strings.Contains(name, "elf") {
+			// We only expect a 64-bit ELF file.
+			if elf.Class(data[elf.EI_CLASS]) != elf.ELFCLASS64 {
+				continue
+			}
+
+			// We only expect a little-endian ELF file.
+			if elf.Data(data[elf.EI_DATA]) != elf.ELFDATA2LSB {
+				continue
+			}
+			order := binary.LittleEndian
+
+			var hdr elf.Header64
+			if err := binary.Read(bytes.NewReader(data), order, &hdr); err != nil {
+				t.Error(err)
+				continue
+			}
+
+			phoff := hdr.Phoff
+			phnum := int(hdr.Phnum)
+			phsize := uint64(hdr.Phentsize)
+
+			for i := 0; i < phnum; i++ {
+				var phdr elf.Prog64
+				if err := binary.Read(bytes.NewReader(data[phoff:]), order, &phdr); err != nil {
+					t.Error(err)
+					continue
+				}
+
+				if elf.ProgType(phdr.Type) == elf.PT_NOTE {
+					// Increase the size so we keep
+					// reading notes.
+					order.PutUint64(data[phoff+4*8:], phdr.Filesz+1)
+
+					// Clobber the Align field to zero.
+					order.PutUint64(data[phoff+6*8:], 0)
+
+					// Clobber the note type so we
+					// keep reading notes.
+					order.PutUint32(data[phdr.Off+12:], 0)
+				}
+
+				phoff += phsize
+			}
+
+			if err := os.WriteFile(tmp, data, 0666); err != nil {
+				t.Error(err)
+				continue
+			}
+
+			id, err := ReadFile(tmp)
+			// Because we clobbered the note type above,
+			// we don't expect to see a Go build ID.
+			// The issue we are testing for was a crash
+			// in Readefile; see issue #62097.
+			if id != "" || err != nil {
+				t.Errorf("ReadFile with zero ELF Align = %q, %v, want %q, nil", id, err, "")
+				continue
+			}
+		}
 	}
 }
 
diff --git a/src/cmd/internal/buildid/note.go b/src/cmd/internal/buildid/note.go
index ab98701..e0e8683 100644
--- a/src/cmd/internal/buildid/note.go
+++ b/src/cmd/internal/buildid/note.go
@@ -153,9 +153,11 @@
 			}
 			off += notesz
 			align := p.Align
-			alignedOff := (off + align - 1) &^ (align - 1)
-			notesz += alignedOff - off
-			off = alignedOff
+			if align != 0 {
+				alignedOff := (off + align - 1) &^ (align - 1)
+				notesz += alignedOff - off
+				off = alignedOff
+			}
 			filesz -= notesz
 			note = note[notesz:]
 		}
diff --git a/src/cmd/internal/cov/covcmd/cmddefs.go b/src/cmd/internal/cov/covcmd/cmddefs.go
new file mode 100644
index 0000000..cb848d3
--- /dev/null
+++ b/src/cmd/internal/cov/covcmd/cmddefs.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package covcmd
+
+import (
+	"crypto/sha256"
+	"fmt"
+	"internal/coverage"
+)
+
+// CoverPkgConfig is a bundle of information passed from the Go
+// command to the cover command during "go build -cover" runs. The
+// Go command creates and fills in a struct as below, then passes
+// file containing the encoded JSON for the struct to the "cover"
+// tool when instrumenting the source files in a Go package.
+type CoverPkgConfig struct {
+	// File into which cmd/cover should emit summary info
+	// when instrumentation is complete.
+	OutConfig string
+
+	// Import path for the package being instrumented.
+	PkgPath string
+
+	// Package name.
+	PkgName string
+
+	// Instrumentation granularity: one of "perfunc" or "perblock" (default)
+	Granularity string
+
+	// Module path for this package (empty if no go.mod in use)
+	ModulePath string
+
+	// Local mode indicates we're doing a coverage build or test of a
+	// package selected via local import path, e.g. "./..." or
+	// "./foo/bar" as opposed to a non-relative import path. See the
+	// corresponding field in cmd/go's PackageInternal struct for more
+	// info.
+	Local bool
+
+	// EmitMetaFile if non-empty is the path to which the cover tool should
+	// directly emit a coverage meta-data file for the package, if the
+	// package has any functions in it. The go command will pass in a value
+	// here if we've been asked to run "go test -cover" on a package that
+	// doesn't have any *_test.go files.
+	EmitMetaFile string
+}
+
+// CoverFixupConfig contains annotations/notes generated by the
+// cmd/cover tool (during instrumentation) to be passed on to the
+// compiler when the instrumented code is compiled. The cmd/cover tool
+// creates a struct of this type, JSON-encodes it, and emits the
+// result to a file, which the Go command then passes to the compiler
+// when the instrumented package is built.
+type CoverFixupConfig struct {
+	// Name of the variable (created by cmd/cover) containing the
+	// encoded meta-data for the package.
+	MetaVar string
+
+	// Length of the meta-data.
+	MetaLen int
+
+	// Hash computed by cmd/cover of the meta-data.
+	MetaHash string
+
+	// Instrumentation strategy. For now this is always set to
+	// "normal", but in the future we may add new values (for example,
+	// if panic paths are instrumented, or if the instrumenter
+	// eliminates redundant counters).
+	Strategy string
+
+	// Prefix assigned to the names of counter variables generated
+	// during instrumentation by cmd/cover.
+	CounterPrefix string
+
+	// Name chosen for the package ID variable generated during
+	// instrumentation.
+	PkgIdVar string
+
+	// Counter mode (e.g. set/count/atomic)
+	CounterMode string
+
+	// Counter granularity (perblock or perfunc).
+	CounterGranularity string
+}
+
+// MetaFileForPackage returns the expected name of the meta-data file
+// for the package whose import path is 'importPath' in cases where
+// we're using meta-data generated by the cover tool, as opposed to a
+// meta-data file created at runtime.
+func MetaFileForPackage(importPath string) string {
+	var r [32]byte
+	sum := sha256.Sum256([]byte(importPath))
+	copy(r[:], sum[:])
+	return coverage.MetaFilePref + fmt.Sprintf(".%x", r)
+}
diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go
index d4a4e33..3e87e59 100644
--- a/src/cmd/internal/dwarf/dwarf.go
+++ b/src/cmd/internal/dwarf/dwarf.go
@@ -9,6 +9,7 @@
 
 import (
 	"bytes"
+	"cmd/internal/src"
 	"errors"
 	"fmt"
 	"internal/buildcfg"
@@ -16,8 +17,6 @@
 	"sort"
 	"strconv"
 	"strings"
-
-	"cmd/internal/objabi"
 )
 
 // InfoPrefix is the prefix for all the symbols containing DWARF info entries.
@@ -41,7 +40,6 @@
 
 // Sym represents a symbol.
 type Sym interface {
-	Length(dwarfContext interface{}) int64
 }
 
 // A Var represents a local variable or a function parameter.
@@ -86,15 +84,13 @@
 // creating the DWARF subprogram DIE(s) for a function.
 type FnState struct {
 	Name          string
-	Importpath    string
 	Info          Sym
-	Filesym       Sym
 	Loc           Sym
 	Ranges        Sym
 	Absfn         Sym
 	StartPC       Sym
+	StartPos      src.Pos
 	Size          int64
-	StartLine     int32
 	External      bool
 	Scopes        []Scope
 	InlCalls      InlCalls
@@ -169,11 +165,8 @@
 	// index into ctx.InlTree describing the call inlined here
 	InlIndex int
 
-	// Symbol of file containing inlined call site (really *obj.LSym).
-	CallFile Sym
-
-	// Line number of inlined call site.
-	CallLine uint32
+	// Position of the inlined call site.
+	CallPos src.Pos
 
 	// Dwarf abstract subroutine symbol (really *obj.LSym).
 	AbsFunSym Sym
@@ -195,6 +188,7 @@
 // A Context specifies how to add data to a Sym.
 type Context interface {
 	PtrSize() int
+	Size(s Sym) int64
 	AddInt(s Sym, size int, i int64)
 	AddBytes(s Sym, b []byte)
 	AddAddress(s Sym, t interface{}, ofs int64)
@@ -205,7 +199,6 @@
 	RecordDclReference(from Sym, to Sym, dclIdx int, inlIndex int)
 	RecordChildDieOffsets(s Sym, vars []*Var, offsets []int32)
 	AddString(s Sym, v string)
-	AddFileRef(s Sym, f interface{})
 	Logf(format string, args ...interface{})
 }
 
@@ -1233,7 +1226,6 @@
 // DIE (as a space-saving measure, so that name/type etc doesn't have
 // to be repeated for each inlined copy).
 func PutAbstractFunc(ctxt Context, s *FnState) error {
-
 	if logDwarf {
 		ctxt.Logf("PutAbstractFunc(%v)\n", s.Absfn)
 	}
@@ -1242,22 +1234,16 @@
 	Uleb128put(ctxt, s.Absfn, int64(abbrev))
 
 	fullname := s.Name
-	if strings.HasPrefix(s.Name, "\"\".") {
-		// Generate a fully qualified name for the function in the
-		// abstract case. This is so as to avoid the need for the
-		// linker to process the DIE with patchDWARFName(); we can't
-		// allow the name attribute of an abstract subprogram DIE to
-		// be rewritten, since it would change the offsets of the
-		// child DIEs (which we're relying on in order for abstract
-		// origin references to work).
-		fullname = objabi.PathToPrefix(s.Importpath) + "." + s.Name[3:]
+	if strings.HasPrefix(s.Name, `"".`) {
+		return fmt.Errorf("unqualified symbol name: %v", s.Name)
 	}
 	putattr(ctxt, s.Absfn, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(fullname)), fullname)
 
 	// DW_AT_inlined value
 	putattr(ctxt, s.Absfn, abbrev, DW_FORM_data1, DW_CLS_CONSTANT, int64(DW_INL_inlined), nil)
 
-	putattr(ctxt, s.Absfn, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(s.StartLine), nil)
+	// TODO(mdempsky): Shouldn't we write out StartPos.FileIndex() too?
+	putattr(ctxt, s.Absfn, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(s.StartPos.RelLine()), nil)
 
 	var ev int64
 	if s.External {
@@ -1336,7 +1322,7 @@
 	putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, callee)
 
 	if abbrev == DW_ABRV_INLINED_SUBROUTINE_RANGES {
-		putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, s.Ranges.Length(ctxt), s.Ranges)
+		putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, ctxt.Size(s.Ranges), s.Ranges)
 		s.PutRanges(ctxt, ic.Ranges)
 	} else {
 		st := ic.Ranges[0].Start
@@ -1346,9 +1332,9 @@
 	}
 
 	// Emit call file, line attrs.
-	ctxt.AddFileRef(s.Info, ic.CallFile)
+	putattr(ctxt, s.Info, abbrev, DW_FORM_data4, DW_CLS_CONSTANT, int64(1+ic.CallPos.FileIndex()), nil) // 1-based file table
 	form := int(expandPseudoForm(DW_FORM_udata_pseudo))
-	putattr(ctxt, s.Info, abbrev, form, DW_CLS_CONSTANT, int64(ic.CallLine), nil)
+	putattr(ctxt, s.Info, abbrev, form, DW_CLS_CONSTANT, int64(ic.CallPos.RelLine()), nil)
 
 	// Variables associated with this inlined routine instance.
 	vars := ic.InlVars
@@ -1437,10 +1423,9 @@
 	}
 	Uleb128put(ctxt, s.Info, int64(abbrev))
 
-	// Expand '"".' to import path.
 	name := s.Name
-	if s.Importpath != "" {
-		name = strings.Replace(name, "\"\".", objabi.PathToPrefix(s.Importpath)+".", -1)
+	if strings.HasPrefix(name, `"".`) {
+		return fmt.Errorf("unqualified symbol name: %v", name)
 	}
 
 	putattr(ctxt, s.Info, DW_ABRV_FUNCTION, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name)
@@ -1450,8 +1435,8 @@
 	if isWrapper {
 		putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, int64(1), 0)
 	} else {
-		ctxt.AddFileRef(s.Info, s.Filesym)
-		putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(s.StartLine), nil)
+		putattr(ctxt, s.Info, abbrev, DW_FORM_data4, DW_CLS_CONSTANT, int64(1+s.StartPos.FileIndex()), nil) // 1-based file index
+		putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(s.StartPos.RelLine()), nil)
 
 		var ev int64
 		if s.External {
@@ -1550,7 +1535,7 @@
 			putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].End, s.StartPC)
 		} else {
 			Uleb128put(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES)
-			putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES, DW_FORM_sec_offset, DW_CLS_PTR, s.Ranges.Length(ctxt), s.Ranges)
+			putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES, DW_FORM_sec_offset, DW_CLS_PTR, ctxt.Size(s.Ranges), s.Ranges)
 
 			s.PutRanges(ctxt, scope.Ranges)
 		}
@@ -1699,7 +1684,7 @@
 	}
 
 	if abbrevUsesLoclist(abbrev) {
-		putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, s.Loc.Length(ctxt), s.Loc)
+		putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, ctxt.Size(s.Loc), s.Loc)
 		v.PutLocationList(s.Loc, s.StartPC)
 	} else {
 		loc := encbuf[:0]
diff --git a/src/cmd/internal/goobj/builtinlist.go b/src/cmd/internal/goobj/builtinlist.go
index 883e13d..fb729f5 100644
--- a/src/cmd/internal/goobj/builtinlist.go
+++ b/src/cmd/internal/goobj/builtinlist.go
@@ -65,7 +65,6 @@
 	{"runtime.slicecopy", 1},
 	{"runtime.decoderune", 1},
 	{"runtime.countrunes", 1},
-	{"runtime.convI2I", 1},
 	{"runtime.convT", 1},
 	{"runtime.convTnoptr", 1},
 	{"runtime.convT16", 1},
@@ -75,14 +74,16 @@
 	{"runtime.convTslice", 1},
 	{"runtime.assertE2I", 1},
 	{"runtime.assertE2I2", 1},
-	{"runtime.assertI2I", 1},
-	{"runtime.assertI2I2", 1},
 	{"runtime.panicdottypeE", 1},
 	{"runtime.panicdottypeI", 1},
 	{"runtime.panicnildottype", 1},
+	{"runtime.typeAssert", 1},
+	{"runtime.interfaceSwitch", 1},
 	{"runtime.ifaceeq", 1},
 	{"runtime.efaceeq", 1},
-	{"runtime.fastrand", 1},
+	{"runtime.panicrangeexit", 1},
+	{"runtime.deferrangefunc", 1},
+	{"runtime.rand32", 1},
 	{"runtime.makemap64", 1},
 	{"runtime.makemap", 1},
 	{"runtime.makemap_small", 1},
@@ -134,7 +135,6 @@
 	{"runtime.unsafestringcheckptr", 1},
 	{"runtime.panicunsafestringlen", 1},
 	{"runtime.panicunsafestringnilptr", 1},
-	{"runtime.mulUintptr", 1},
 	{"runtime.memmove", 1},
 	{"runtime.memclrNoHeapPointers", 1},
 	{"runtime.memclrHasPointers", 1},
@@ -210,6 +210,7 @@
 	{"runtime.x86HasFMA", 0},
 	{"runtime.armHasVFPv4", 0},
 	{"runtime.arm64HasATOMICS", 0},
+	{"runtime.asanregisterglobals", 1},
 	{"runtime.deferproc", 1},
 	{"runtime.deferprocStack", 1},
 	{"runtime.deferreturn", 1},
diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go
index aefb19b..5ddf0e7 100644
--- a/src/cmd/internal/goobj/mkbuiltin.go
+++ b/src/cmd/internal/goobj/mkbuiltin.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // Generate builtinlist.go from cmd/compile/internal/typecheck/builtin/runtime.go.
 
diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go
index c9d7ca4..6c0f5e6 100644
--- a/src/cmd/internal/goobj/objfile.go
+++ b/src/cmd/internal/goobj/objfile.go
@@ -23,7 +23,6 @@
 	"encoding/binary"
 	"errors"
 	"fmt"
-	"internal/unsafeheader"
 	"unsafe"
 )
 
@@ -245,7 +244,7 @@
 }
 
 func (h *Header) Size() int {
-	return len(h.Magic) + 4 + 4*len(h.Offsets)
+	return len(h.Magic) + len(h.Fingerprint) + 4 + 4*len(h.Offsets)
 }
 
 // Autolib
@@ -662,13 +661,7 @@
 	if len(b) == 0 {
 		return ""
 	}
-
-	var s string
-	hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
-	hdr.Data = unsafe.Pointer(&b[0])
-	hdr.Len = len(b)
-
-	return s
+	return unsafe.String(&b[0], len(b))
 }
 
 func (r *Reader) StringRef(off uint32) string {
diff --git a/src/cmd/internal/metadata/main.go b/src/cmd/internal/metadata/main.go
index 7478eec..af46c89 100644
--- a/src/cmd/internal/metadata/main.go
+++ b/src/cmd/internal/metadata/main.go
@@ -22,7 +22,7 @@
 
 func main() {
 	fmt.Printf("# GOARCH: %s\n", runtime.GOARCH)
-	fmt.Printf("# CPU: %s\n", sysinfo.CPU.Name())
+	fmt.Printf("# CPU: %s\n", sysinfo.CPUName())
 
 	fmt.Printf("# GOOS: %s\n", runtime.GOOS)
 	ver, err := osinfo.Version()
diff --git a/src/cmd/internal/moddeps/moddeps_test.go b/src/cmd/internal/moddeps/moddeps_test.go
index ae890b6..3d4c99e 100644
--- a/src/cmd/internal/moddeps/moddeps_test.go
+++ b/src/cmd/internal/moddeps/moddeps_test.go
@@ -443,7 +443,14 @@
 	goBin := testenv.GoToolPath(t)
 
 	goroot.once.Do(func() {
-		goroot.err = filepath.WalkDir(testenv.GOROOT(t), func(path string, info fs.DirEntry, err error) error {
+		// If the root itself is a symlink to a directory,
+		// we want to follow it (see https://go.dev/issue/64375).
+		// Add a trailing separator to force that to happen.
+		root := testenv.GOROOT(t)
+		if !os.IsPathSeparator(root[len(root)-1]) {
+			root += string(filepath.Separator)
+		}
+		goroot.err = filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error {
 			if err != nil {
 				return err
 			}
diff --git a/src/cmd/internal/notsha256/sha256block_386.s b/src/cmd/internal/notsha256/sha256block_386.s
index f2ba7d7..0e27fa0 100644
--- a/src/cmd/internal/notsha256/sha256block_386.s
+++ b/src/cmd/internal/notsha256/sha256block_386.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !purego
-// +build !purego
 
 // SHA256 block routine. See sha256block.go for Go equivalent.
 //
diff --git a/src/cmd/internal/notsha256/sha256block_amd64.go b/src/cmd/internal/notsha256/sha256block_amd64.go
index 27b84a8..6a615e0 100644
--- a/src/cmd/internal/notsha256/sha256block_amd64.go
+++ b/src/cmd/internal/notsha256/sha256block_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !purego
-// +build !purego
 
 package notsha256
 
diff --git a/src/cmd/internal/notsha256/sha256block_amd64.s b/src/cmd/internal/notsha256/sha256block_amd64.s
index 36ea744..b25c979 100644
--- a/src/cmd/internal/notsha256/sha256block_amd64.s
+++ b/src/cmd/internal/notsha256/sha256block_amd64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !purego
-// +build !purego
 
 #include "textflag.h"
 
diff --git a/src/cmd/internal/notsha256/sha256block_decl.go b/src/cmd/internal/notsha256/sha256block_decl.go
index da66bdd..ab3f7d2 100644
--- a/src/cmd/internal/notsha256/sha256block_decl.go
+++ b/src/cmd/internal/notsha256/sha256block_decl.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !purego && (386 || amd64 || ppc64le || ppc64)
-// +build !purego
-// +build 386 amd64 ppc64le ppc64
 
 package notsha256
 
diff --git a/src/cmd/internal/notsha256/sha256block_generic.go b/src/cmd/internal/notsha256/sha256block_generic.go
index 2664722..76d5372 100644
--- a/src/cmd/internal/notsha256/sha256block_generic.go
+++ b/src/cmd/internal/notsha256/sha256block_generic.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build purego || (!amd64 && !386 && !ppc64le && !ppc64)
-// +build purego !amd64,!386,!ppc64le,!ppc64
 
 package notsha256
 
diff --git a/src/cmd/internal/notsha256/sha256block_ppc64x.s b/src/cmd/internal/notsha256/sha256block_ppc64x.s
index ea4417d..93e8550 100644
--- a/src/cmd/internal/notsha256/sha256block_ppc64x.s
+++ b/src/cmd/internal/notsha256/sha256block_ppc64x.s
@@ -9,8 +9,6 @@
 //
 
 //go:build !purego && (ppc64 || ppc64le)
-// +build !purego
-// +build ppc64 ppc64le
 
 // Based on CRYPTOGAMS code with the following comment:
 // # ====================================================================
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index 177ffd9..4e6eff9 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -979,7 +979,7 @@
 			if immrot(^uint32(c.instoffset)) != 0 {
 				return C_NCON
 			}
-			if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM == 7 {
+			if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM.Version == 7 {
 				return C_SCON
 			}
 			if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 {
@@ -1099,6 +1099,32 @@
 		fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
 	}
 
+	if (p.As == ASRL || p.As == ASRA) && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
+		// Right shifts are weird - a shift that looks like "shift by constant 0" actually
+		// means "shift by constant 32". Use left shift in this situation instead.
+		// See issue 64715.
+		// TODO: rotate by 0? Not currently supported, but if we ever do then include it here.
+		p.As = ASLL
+	}
+	if p.As != AMOVB && p.As != AMOVBS && p.As != AMOVBU && p.As != AMOVH && p.As != AMOVHS && p.As != AMOVHU && p.As != AXTAB && p.As != AXTABU && p.As != AXTAH && p.As != AXTAHU {
+		// Same here, but for shifts encoded in Addrs.
+		// Don't do it for the extension ops, which
+		// need to keep their RR shifts.
+		fixShift := func(a *obj.Addr) {
+			if a.Type == obj.TYPE_SHIFT {
+				typ := a.Offset & SHIFT_RR
+				isConst := a.Offset&(1<<4) == 0
+				amount := a.Offset >> 7 & 0x1f
+				if isConst && amount == 0 && (typ == SHIFT_LR || typ == SHIFT_AR || typ == SHIFT_RR) {
+					a.Offset -= typ
+					a.Offset += SHIFT_LL
+				}
+			}
+		}
+		fixShift(&p.From)
+		fixShift(&p.To)
+	}
+
 	ops := oprange[p.As&obj.AMask]
 	c1 := &xcmp[a1]
 	c3 := &xcmp[a3]
@@ -3044,16 +3070,16 @@
 }
 
 func (c *ctxt5) chipzero5(e float64) int {
-	// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
-	if buildcfg.GOARM < 7 || math.Float64bits(e) != 0 {
+	// We use GOARM.Version=7 and !GOARM.SoftFloat to gate the use of VFPv3 vmov (imm) instructions.
+	if buildcfg.GOARM.Version < 7 || buildcfg.GOARM.SoftFloat || math.Float64bits(e) != 0 {
 		return -1
 	}
 	return 0
 }
 
 func (c *ctxt5) chipfloat5(e float64) int {
-	// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
-	if buildcfg.GOARM < 7 {
+	// We use GOARM.Version=7 and !GOARM.SoftFloat to gate the use of VFPv3 vmov (imm) instructions.
+	if buildcfg.GOARM.Version < 7 || buildcfg.GOARM.SoftFloat {
 		return -1
 	}
 
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index fb7c260..def4f52 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -66,7 +66,7 @@
 				ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line())
 			}
 
-			if buildcfg.GOARM < 7 {
+			if buildcfg.GOARM.Version < 7 {
 				// Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
 				if progedit_tlsfallback == nil {
 					progedit_tlsfallback = ctxt.Lookup("runtime.read_tls_fallback")
diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go
index fc170e7..39b9f16 100644
--- a/src/cmd/internal/obj/arm64/a.out.go
+++ b/src/cmd/internal/obj/arm64/a.out.go
@@ -1,5 +1,5 @@
 // cmd/7c/7.out.h  from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/src/cmd/7c/7.out.h
+// https://bitbucket.org/plan9-from-bell-labs/9-cc/src/master/src/cmd/7c/7.out.h
 //
 // 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
 // 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
@@ -341,21 +341,21 @@
 	// optab is sorted based on the order of these constants
 	// and the first match is chosen.
 	// The more specific class needs to come earlier.
-	C_NONE   = iota
-	C_REG    // R0..R30
-	C_ZREG   // R0..R30, ZR
-	C_RSP    // R0..R30, RSP
-	C_FREG   // F0..F31
-	C_VREG   // V0..V31
-	C_PAIR   // (Rn, Rm)
-	C_SHIFT  // Rn<<2
-	C_EXTREG // Rn.UXTB[<<3]
-	C_SPR    // REG_NZCV
-	C_COND   // condition code, EQ, NE, etc.
-	C_SPOP   // special operand, PLDL1KEEP, VMALLE1IS, etc.
-	C_ARNG   // Vn.<T>
-	C_ELEM   // Vn.<T>[index]
-	C_LIST   // [V1, V2, V3]
+	C_NONE   = iota + 1 // starting from 1, leave unclassified Addr's class as 0
+	C_REG               // R0..R30
+	C_ZREG              // R0..R30, ZR
+	C_RSP               // R0..R30, RSP
+	C_FREG              // F0..F31
+	C_VREG              // V0..V31
+	C_PAIR              // (Rn, Rm)
+	C_SHIFT             // Rn<<2
+	C_EXTREG            // Rn.UXTB[<<3]
+	C_SPR               // REG_NZCV
+	C_COND              // condition code, EQ, NE, etc.
+	C_SPOP              // special operand, PLDL1KEEP, VMALLE1IS, etc.
+	C_ARNG              // Vn.<T>
+	C_ELEM              // Vn.<T>[index]
+	C_LIST              // [V1, V2, V3]
 
 	C_ZCON     // $0
 	C_ABCON0   // could be C_ADDCON0 or C_BITCON
@@ -414,7 +414,8 @@
 	C_UAUTO32K_16 // 0 to 32760, 0 mod 16 + C_PSAUTO
 	C_UAUTO32K    // 0 to 32760, 0 mod 8 + C_PSAUTO
 	C_UAUTO64K    // 0 to 65520, 0 mod 16 + C_PSAUTO
-	C_LAUTO       // any other 32-bit constant
+	C_LAUTOPOOL   // any other constant up to 64 bits (needs pool literal)
+	C_LAUTO       // any other constant up to 64 bits
 
 	C_SEXT1  // 0 to 4095, direct
 	C_SEXT2  // 0 to 8190
@@ -454,6 +455,7 @@
 	C_UOREG32K_16
 	C_UOREG32K
 	C_UOREG64K
+	C_LOREGPOOL
 	C_LOREG
 
 	C_ADDR // TODO(aram): explain difference from C_VCONADDR
diff --git a/src/cmd/internal/obj/arm64/anames7.go b/src/cmd/internal/obj/arm64/anames7.go
index 2f20dfe..5f2e3a6 100644
--- a/src/cmd/internal/obj/arm64/anames7.go
+++ b/src/cmd/internal/obj/arm64/anames7.go
@@ -6,6 +6,7 @@
 
 // This order should be strictly consistent to that in a.out.go
 var cnames7 = []string{
+	"", // C_NONE starts from 1
 	"NONE",
 	"REG",
 	"ZREG",
@@ -74,6 +75,7 @@
 	"UAUTO32K_8",
 	"UAUTO32K",
 	"UAUTO64K",
+	"LAUTOPOOL",
 	"LAUTO",
 	"SEXT1",
 	"SEXT2",
@@ -112,6 +114,7 @@
 	"UOREG32K_16",
 	"UOREG32K",
 	"UOREG64K",
+	"LOREGPOOL",
 	"LOREG",
 	"ADDR",
 	"GOTADDR",
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index ff8daad..03f0fb0 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -1,5 +1,5 @@
 // cmd/7l/asm.c, cmd/7l/asmout.c, cmd/7l/optab.c, cmd/7l/span.c, cmd/ld/sub.c, cmd/ld/mod.c, from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/
+// https://bitbucket.org/plan9-from-bell-labs/9-cc/src/master/
 //
 // 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
 // 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
@@ -69,15 +69,16 @@
 
 type Optab struct {
 	as    obj.As
-	a1    uint8
-	a2    uint8
-	a3    uint8
-	a4    uint8
+	a1    uint8 // Prog.From
+	a2    uint8 // 2nd source operand, Prog.Reg or Prog.RestArgs[XXX]
+	a3    uint8 // 3rd source operand, Prog.RestArgs[XXX]
+	a4    uint8 // Prog.To
+	a5    uint8 // 2nd destination operand, Prog.RegTo2 or Prog.RestArgs[XXX]
 	type_ int8
 	size_ int8 // the value of this field is not static, use the size() method to return the value
 	param int16
 	flag  int8
-	scond uint16
+	scond uint8
 }
 
 func IsAtomicInstruction(as obj.As) bool {
@@ -281,7 +282,6 @@
 const (
 	// Optab.flag
 	LFROM        = 1 << iota // p.From uses constant pool
-	LFROM128                 // p.From3<<64+p.From forms a 128-bit constant in literal pool
 	LTO                      // p.To uses constant pool
 	NOTUSETMP                // p expands to multiple instructions, but does NOT use REGTMP
 	BRANCH14BITS             // branch instruction encodes 14 bits
@@ -290,571 +290,608 @@
 
 var optab = []Optab{
 	/* struct Optab:
-	OPCODE, from, prog->reg, from3, to, type,size,param,flag,scond */
-	{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0},
+	OPCODE, from, prog->reg, from3, to, to2, type,size,param,flag,scond */
+	{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, C_NONE, 0, 0, 0, 0, 0},
 
 	/* arithmetic operations */
-	{AADD, C_ZREG, C_ZREG, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AADD, C_ZREG, C_NONE, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AADC, C_ZREG, C_ZREG, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AADC, C_ZREG, C_NONE, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{ANEG, C_ZREG, C_NONE, C_NONE, C_ZREG, 25, 4, 0, 0, 0},
-	{ANEG, C_NONE, C_NONE, C_NONE, C_ZREG, 25, 4, 0, 0, 0},
-	{ANGC, C_ZREG, C_NONE, C_NONE, C_ZREG, 17, 4, 0, 0, 0},
-	{ACMP, C_ZREG, C_ZREG, C_NONE, C_NONE, 1, 4, 0, 0, 0},
-	{AADD, C_ADDCON, C_RSP, C_NONE, C_RSP, 2, 4, 0, 0, 0},
-	{AADD, C_ADDCON, C_NONE, C_NONE, C_RSP, 2, 4, 0, 0, 0},
-	{ACMP, C_ADDCON, C_RSP, C_NONE, C_NONE, 2, 4, 0, 0, 0},
-	{AADD, C_MOVCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0},
-	{AADD, C_MOVCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0},
-	{ACMP, C_MOVCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0},
-	{AADD, C_BITCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0},
-	{AADD, C_BITCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0},
-	{ACMP, C_BITCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0},
-	{AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, 48, 8, 0, NOTUSETMP, 0},
-	{AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, 48, 8, 0, NOTUSETMP, 0},
-	{AADD, C_MOVCON2, C_RSP, C_NONE, C_RSP, 13, 12, 0, 0, 0},
-	{AADD, C_MOVCON2, C_NONE, C_NONE, C_RSP, 13, 12, 0, 0, 0},
-	{AADD, C_MOVCON3, C_RSP, C_NONE, C_RSP, 13, 16, 0, 0, 0},
-	{AADD, C_MOVCON3, C_NONE, C_NONE, C_RSP, 13, 16, 0, 0, 0},
-	{AADD, C_VCON, C_RSP, C_NONE, C_RSP, 13, 20, 0, 0, 0},
-	{AADD, C_VCON, C_NONE, C_NONE, C_RSP, 13, 20, 0, 0, 0},
-	{ACMP, C_MOVCON2, C_ZREG, C_NONE, C_NONE, 13, 12, 0, 0, 0},
-	{ACMP, C_MOVCON3, C_ZREG, C_NONE, C_NONE, 13, 16, 0, 0, 0},
-	{ACMP, C_VCON, C_ZREG, C_NONE, C_NONE, 13, 20, 0, 0, 0},
-	{AADD, C_SHIFT, C_ZREG, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{AADD, C_SHIFT, C_NONE, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{AMVN, C_SHIFT, C_NONE, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{ACMP, C_SHIFT, C_ZREG, C_NONE, C_NONE, 3, 4, 0, 0, 0},
-	{ANEG, C_SHIFT, C_NONE, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{AADD, C_ZREG, C_RSP, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{AADD, C_ZREG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{ACMP, C_ZREG, C_RSP, C_NONE, C_NONE, 27, 4, 0, 0, 0},
-	{AADD, C_EXTREG, C_RSP, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{AADD, C_EXTREG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{ACMP, C_EXTREG, C_RSP, C_NONE, C_NONE, 27, 4, 0, 0, 0},
-	{AADD, C_ZREG, C_ZREG, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AADD, C_ZREG, C_NONE, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AMUL, C_ZREG, C_ZREG, C_NONE, C_ZREG, 15, 4, 0, 0, 0},
-	{AMUL, C_ZREG, C_NONE, C_NONE, C_ZREG, 15, 4, 0, 0, 0},
-	{AMADD, C_ZREG, C_ZREG, C_ZREG, C_ZREG, 15, 4, 0, 0, 0},
-	{AREM, C_ZREG, C_ZREG, C_NONE, C_ZREG, 16, 8, 0, 0, 0},
-	{AREM, C_ZREG, C_NONE, C_NONE, C_ZREG, 16, 8, 0, 0, 0},
-	{ASDIV, C_ZREG, C_NONE, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{ASDIV, C_ZREG, C_ZREG, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
+	{AADD, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AADD, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AADC, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AADC, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{ANEG, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 25, 4, 0, 0, 0},
+	{ANEG, C_NONE, C_NONE, C_NONE, C_ZREG, C_NONE, 25, 4, 0, 0, 0},
+	{ANGC, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 17, 4, 0, 0, 0},
+	{ACMP, C_ZREG, C_ZREG, C_NONE, C_NONE, C_NONE, 1, 4, 0, 0, 0},
+	{AADD, C_ADDCON, C_RSP, C_NONE, C_RSP, C_NONE, 2, 4, 0, 0, 0},
+	{AADD, C_ADDCON, C_NONE, C_NONE, C_RSP, C_NONE, 2, 4, 0, 0, 0},
+	{ACMP, C_ADDCON, C_RSP, C_NONE, C_NONE, C_NONE, 2, 4, 0, 0, 0},
+	{AADD, C_MOVCON, C_RSP, C_NONE, C_RSP, C_NONE, 62, 8, 0, 0, 0},
+	{AADD, C_MOVCON, C_NONE, C_NONE, C_RSP, C_NONE, 62, 8, 0, 0, 0},
+	{ACMP, C_MOVCON, C_RSP, C_NONE, C_NONE, C_NONE, 62, 8, 0, 0, 0},
+	{AADD, C_BITCON, C_RSP, C_NONE, C_RSP, C_NONE, 62, 8, 0, 0, 0},
+	{AADD, C_BITCON, C_NONE, C_NONE, C_RSP, C_NONE, 62, 8, 0, 0, 0},
+	{ACMP, C_BITCON, C_RSP, C_NONE, C_NONE, C_NONE, 62, 8, 0, 0, 0},
+	{AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, C_NONE, 48, 8, 0, NOTUSETMP, 0},
+	{AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, C_NONE, 48, 8, 0, NOTUSETMP, 0},
+	{AADD, C_MOVCON2, C_RSP, C_NONE, C_RSP, C_NONE, 13, 12, 0, 0, 0},
+	{AADD, C_MOVCON2, C_NONE, C_NONE, C_RSP, C_NONE, 13, 12, 0, 0, 0},
+	{AADD, C_MOVCON3, C_RSP, C_NONE, C_RSP, C_NONE, 13, 16, 0, 0, 0},
+	{AADD, C_MOVCON3, C_NONE, C_NONE, C_RSP, C_NONE, 13, 16, 0, 0, 0},
+	{AADD, C_VCON, C_RSP, C_NONE, C_RSP, C_NONE, 13, 20, 0, 0, 0},
+	{AADD, C_VCON, C_NONE, C_NONE, C_RSP, C_NONE, 13, 20, 0, 0, 0},
+	{ACMP, C_MOVCON2, C_ZREG, C_NONE, C_NONE, C_NONE, 13, 12, 0, 0, 0},
+	{ACMP, C_MOVCON3, C_ZREG, C_NONE, C_NONE, C_NONE, 13, 16, 0, 0, 0},
+	{ACMP, C_VCON, C_ZREG, C_NONE, C_NONE, C_NONE, 13, 20, 0, 0, 0},
+	{AADD, C_SHIFT, C_ZREG, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{AADD, C_SHIFT, C_NONE, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{AMVN, C_SHIFT, C_NONE, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{ACMP, C_SHIFT, C_ZREG, C_NONE, C_NONE, C_NONE, 3, 4, 0, 0, 0},
+	{ANEG, C_SHIFT, C_NONE, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{AADD, C_ZREG, C_RSP, C_NONE, C_RSP, C_NONE, 27, 4, 0, 0, 0},
+	{AADD, C_ZREG, C_NONE, C_NONE, C_RSP, C_NONE, 27, 4, 0, 0, 0},
+	{ACMP, C_ZREG, C_RSP, C_NONE, C_NONE, C_NONE, 27, 4, 0, 0, 0},
+	{AADD, C_EXTREG, C_RSP, C_NONE, C_RSP, C_NONE, 27, 4, 0, 0, 0},
+	{AADD, C_EXTREG, C_NONE, C_NONE, C_RSP, C_NONE, 27, 4, 0, 0, 0},
+	{ACMP, C_EXTREG, C_RSP, C_NONE, C_NONE, C_NONE, 27, 4, 0, 0, 0},
+	{AADD, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AADD, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AMUL, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 15, 4, 0, 0, 0},
+	{AMUL, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 15, 4, 0, 0, 0},
+	{AMADD, C_ZREG, C_ZREG, C_ZREG, C_ZREG, C_NONE, 15, 4, 0, 0, 0},
+	{AREM, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 16, 8, 0, 0, 0},
+	{AREM, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 16, 8, 0, 0, 0},
+	{ASDIV, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{ASDIV, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
 
-	{AFADDS, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFADDS, C_FREG, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFMSUBD, C_FREG, C_FREG, C_FREG, C_FREG, 15, 4, 0, 0, 0},
-	{AFCMPS, C_FREG, C_FREG, C_NONE, C_NONE, 56, 4, 0, 0, 0},
-	{AFCMPS, C_FCON, C_FREG, C_NONE, C_NONE, 56, 4, 0, 0, 0},
-	{AVADDP, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0},
-	{AVADD, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0},
-	{AVADD, C_VREG, C_VREG, C_NONE, C_VREG, 89, 4, 0, 0, 0},
-	{AVADD, C_VREG, C_NONE, C_NONE, C_VREG, 89, 4, 0, 0, 0},
-	{AVADDV, C_ARNG, C_NONE, C_NONE, C_VREG, 85, 4, 0, 0, 0},
+	{AFADDS, C_FREG, C_NONE, C_NONE, C_FREG, C_NONE, 54, 4, 0, 0, 0},
+	{AFADDS, C_FREG, C_FREG, C_NONE, C_FREG, C_NONE, 54, 4, 0, 0, 0},
+	{AFMSUBD, C_FREG, C_FREG, C_FREG, C_FREG, C_NONE, 15, 4, 0, 0, 0},
+	{AFCMPS, C_FREG, C_FREG, C_NONE, C_NONE, C_NONE, 56, 4, 0, 0, 0},
+	{AFCMPS, C_FCON, C_FREG, C_NONE, C_NONE, C_NONE, 56, 4, 0, 0, 0},
+	{AVADDP, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 72, 4, 0, 0, 0},
+	{AVADD, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 72, 4, 0, 0, 0},
+	{AVADD, C_VREG, C_VREG, C_NONE, C_VREG, C_NONE, 89, 4, 0, 0, 0},
+	{AVADD, C_VREG, C_NONE, C_NONE, C_VREG, C_NONE, 89, 4, 0, 0, 0},
+	{AVADDV, C_ARNG, C_NONE, C_NONE, C_VREG, C_NONE, 85, 4, 0, 0, 0},
 
 	/* logical operations */
-	{AAND, C_ZREG, C_ZREG, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AAND, C_ZREG, C_NONE, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AANDS, C_ZREG, C_ZREG, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{AANDS, C_ZREG, C_NONE, C_NONE, C_ZREG, 1, 4, 0, 0, 0},
-	{ATST, C_ZREG, C_ZREG, C_NONE, C_NONE, 1, 4, 0, 0, 0},
-	{AAND, C_MBCON, C_ZREG, C_NONE, C_RSP, 53, 4, 0, 0, 0},
-	{AAND, C_MBCON, C_NONE, C_NONE, C_RSP, 53, 4, 0, 0, 0},
-	{AANDS, C_MBCON, C_ZREG, C_NONE, C_ZREG, 53, 4, 0, 0, 0},
-	{AANDS, C_MBCON, C_NONE, C_NONE, C_ZREG, 53, 4, 0, 0, 0},
-	{ATST, C_MBCON, C_ZREG, C_NONE, C_NONE, 53, 4, 0, 0, 0},
-	{AAND, C_BITCON, C_ZREG, C_NONE, C_RSP, 53, 4, 0, 0, 0},
-	{AAND, C_BITCON, C_NONE, C_NONE, C_RSP, 53, 4, 0, 0, 0},
-	{AANDS, C_BITCON, C_ZREG, C_NONE, C_ZREG, 53, 4, 0, 0, 0},
-	{AANDS, C_BITCON, C_NONE, C_NONE, C_ZREG, 53, 4, 0, 0, 0},
-	{ATST, C_BITCON, C_ZREG, C_NONE, C_NONE, 53, 4, 0, 0, 0},
-	{AAND, C_MOVCON, C_ZREG, C_NONE, C_ZREG, 62, 8, 0, 0, 0},
-	{AAND, C_MOVCON, C_NONE, C_NONE, C_ZREG, 62, 8, 0, 0, 0},
-	{AANDS, C_MOVCON, C_ZREG, C_NONE, C_ZREG, 62, 8, 0, 0, 0},
-	{AANDS, C_MOVCON, C_NONE, C_NONE, C_ZREG, 62, 8, 0, 0, 0},
-	{ATST, C_MOVCON, C_ZREG, C_NONE, C_NONE, 62, 8, 0, 0, 0},
-	{AAND, C_MOVCON2, C_ZREG, C_NONE, C_ZREG, 28, 12, 0, 0, 0},
-	{AAND, C_MOVCON2, C_NONE, C_NONE, C_ZREG, 28, 12, 0, 0, 0},
-	{AAND, C_MOVCON3, C_ZREG, C_NONE, C_ZREG, 28, 16, 0, 0, 0},
-	{AAND, C_MOVCON3, C_NONE, C_NONE, C_ZREG, 28, 16, 0, 0, 0},
-	{AAND, C_VCON, C_ZREG, C_NONE, C_ZREG, 28, 20, 0, 0, 0},
-	{AAND, C_VCON, C_NONE, C_NONE, C_ZREG, 28, 20, 0, 0, 0},
-	{AANDS, C_MOVCON2, C_ZREG, C_NONE, C_ZREG, 28, 12, 0, 0, 0},
-	{AANDS, C_MOVCON2, C_NONE, C_NONE, C_ZREG, 28, 12, 0, 0, 0},
-	{AANDS, C_MOVCON3, C_ZREG, C_NONE, C_ZREG, 28, 16, 0, 0, 0},
-	{AANDS, C_MOVCON3, C_NONE, C_NONE, C_ZREG, 28, 16, 0, 0, 0},
-	{AANDS, C_VCON, C_ZREG, C_NONE, C_ZREG, 28, 20, 0, 0, 0},
-	{AANDS, C_VCON, C_NONE, C_NONE, C_ZREG, 28, 20, 0, 0, 0},
-	{ATST, C_MOVCON2, C_ZREG, C_NONE, C_NONE, 28, 12, 0, 0, 0},
-	{ATST, C_MOVCON3, C_ZREG, C_NONE, C_NONE, 28, 16, 0, 0, 0},
-	{ATST, C_VCON, C_ZREG, C_NONE, C_NONE, 28, 20, 0, 0, 0},
-	{AAND, C_SHIFT, C_ZREG, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{AAND, C_SHIFT, C_NONE, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{AANDS, C_SHIFT, C_ZREG, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{AANDS, C_SHIFT, C_NONE, C_NONE, C_ZREG, 3, 4, 0, 0, 0},
-	{ATST, C_SHIFT, C_ZREG, C_NONE, C_NONE, 3, 4, 0, 0, 0},
-	{AMOVD, C_RSP, C_NONE, C_NONE, C_RSP, 24, 4, 0, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_ZREG, 24, 4, 0, 0, 0},
-	{AMVN, C_ZREG, C_NONE, C_NONE, C_ZREG, 24, 4, 0, 0, 0},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_ZREG, 45, 4, 0, 0, 0}, /* also MOVBU */
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_ZREG, 45, 4, 0, 0, 0}, /* also MOVHU */
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_ZREG, 45, 4, 0, 0, 0}, /* also MOVWU */
+	{AAND, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AAND, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AANDS, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{AANDS, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 1, 4, 0, 0, 0},
+	{ATST, C_ZREG, C_ZREG, C_NONE, C_NONE, C_NONE, 1, 4, 0, 0, 0},
+	{AAND, C_MBCON, C_ZREG, C_NONE, C_RSP, C_NONE, 53, 4, 0, 0, 0},
+	{AAND, C_MBCON, C_NONE, C_NONE, C_RSP, C_NONE, 53, 4, 0, 0, 0},
+	{AANDS, C_MBCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 53, 4, 0, 0, 0},
+	{AANDS, C_MBCON, C_NONE, C_NONE, C_ZREG, C_NONE, 53, 4, 0, 0, 0},
+	{ATST, C_MBCON, C_ZREG, C_NONE, C_NONE, C_NONE, 53, 4, 0, 0, 0},
+	{AAND, C_BITCON, C_ZREG, C_NONE, C_RSP, C_NONE, 53, 4, 0, 0, 0},
+	{AAND, C_BITCON, C_NONE, C_NONE, C_RSP, C_NONE, 53, 4, 0, 0, 0},
+	{AANDS, C_BITCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 53, 4, 0, 0, 0},
+	{AANDS, C_BITCON, C_NONE, C_NONE, C_ZREG, C_NONE, 53, 4, 0, 0, 0},
+	{ATST, C_BITCON, C_ZREG, C_NONE, C_NONE, C_NONE, 53, 4, 0, 0, 0},
+	{AAND, C_MOVCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 62, 8, 0, 0, 0},
+	{AAND, C_MOVCON, C_NONE, C_NONE, C_ZREG, C_NONE, 62, 8, 0, 0, 0},
+	{AANDS, C_MOVCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 62, 8, 0, 0, 0},
+	{AANDS, C_MOVCON, C_NONE, C_NONE, C_ZREG, C_NONE, 62, 8, 0, 0, 0},
+	{ATST, C_MOVCON, C_ZREG, C_NONE, C_NONE, C_NONE, 62, 8, 0, 0, 0},
+	{AAND, C_MOVCON2, C_ZREG, C_NONE, C_ZREG, C_NONE, 28, 12, 0, 0, 0},
+	{AAND, C_MOVCON2, C_NONE, C_NONE, C_ZREG, C_NONE, 28, 12, 0, 0, 0},
+	{AAND, C_MOVCON3, C_ZREG, C_NONE, C_ZREG, C_NONE, 28, 16, 0, 0, 0},
+	{AAND, C_MOVCON3, C_NONE, C_NONE, C_ZREG, C_NONE, 28, 16, 0, 0, 0},
+	{AAND, C_VCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 28, 20, 0, 0, 0},
+	{AAND, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 28, 20, 0, 0, 0},
+	{AANDS, C_MOVCON2, C_ZREG, C_NONE, C_ZREG, C_NONE, 28, 12, 0, 0, 0},
+	{AANDS, C_MOVCON2, C_NONE, C_NONE, C_ZREG, C_NONE, 28, 12, 0, 0, 0},
+	{AANDS, C_MOVCON3, C_ZREG, C_NONE, C_ZREG, C_NONE, 28, 16, 0, 0, 0},
+	{AANDS, C_MOVCON3, C_NONE, C_NONE, C_ZREG, C_NONE, 28, 16, 0, 0, 0},
+	{AANDS, C_VCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 28, 20, 0, 0, 0},
+	{AANDS, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 28, 20, 0, 0, 0},
+	{ATST, C_MOVCON2, C_ZREG, C_NONE, C_NONE, C_NONE, 28, 12, 0, 0, 0},
+	{ATST, C_MOVCON3, C_ZREG, C_NONE, C_NONE, C_NONE, 28, 16, 0, 0, 0},
+	{ATST, C_VCON, C_ZREG, C_NONE, C_NONE, C_NONE, 28, 20, 0, 0, 0},
+	{AAND, C_SHIFT, C_ZREG, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{AAND, C_SHIFT, C_NONE, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{AANDS, C_SHIFT, C_ZREG, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{AANDS, C_SHIFT, C_NONE, C_NONE, C_ZREG, C_NONE, 3, 4, 0, 0, 0},
+	{ATST, C_SHIFT, C_ZREG, C_NONE, C_NONE, C_NONE, 3, 4, 0, 0, 0},
+	{AMOVD, C_RSP, C_NONE, C_NONE, C_RSP, C_NONE, 24, 4, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 24, 4, 0, 0, 0},
+	{AMVN, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 24, 4, 0, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 45, 4, 0, 0, 0}, /* also MOVBU */
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 45, 4, 0, 0, 0}, /* also MOVHU */
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 45, 4, 0, 0, 0}, /* also MOVWU */
 	/* TODO: MVN C_SHIFT */
 
 	/* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */
-	{AMOVW, C_MBCON, C_NONE, C_NONE, C_ZREG, 32, 4, 0, 0, 0},
-	{AMOVD, C_MBCON, C_NONE, C_NONE, C_ZREG, 32, 4, 0, 0, 0},
-	{AMOVW, C_MOVCON, C_NONE, C_NONE, C_ZREG, 32, 4, 0, 0, 0},
-	{AMOVD, C_MOVCON, C_NONE, C_NONE, C_ZREG, 32, 4, 0, 0, 0},
-	{AMOVW, C_BITCON, C_NONE, C_NONE, C_RSP, 32, 4, 0, 0, 0},
-	{AMOVD, C_BITCON, C_NONE, C_NONE, C_RSP, 32, 4, 0, 0, 0},
-	{AMOVW, C_MOVCON2, C_NONE, C_NONE, C_ZREG, 12, 8, 0, NOTUSETMP, 0},
-	{AMOVD, C_MOVCON2, C_NONE, C_NONE, C_ZREG, 12, 8, 0, NOTUSETMP, 0},
-	{AMOVD, C_MOVCON3, C_NONE, C_NONE, C_ZREG, 12, 12, 0, NOTUSETMP, 0},
-	{AMOVD, C_VCON, C_NONE, C_NONE, C_ZREG, 12, 16, 0, NOTUSETMP, 0},
+	{AMOVW, C_MBCON, C_NONE, C_NONE, C_ZREG, C_NONE, 32, 4, 0, 0, 0},
+	{AMOVD, C_MBCON, C_NONE, C_NONE, C_ZREG, C_NONE, 32, 4, 0, 0, 0},
+	{AMOVW, C_MOVCON, C_NONE, C_NONE, C_ZREG, C_NONE, 32, 4, 0, 0, 0},
+	{AMOVD, C_MOVCON, C_NONE, C_NONE, C_ZREG, C_NONE, 32, 4, 0, 0, 0},
+	{AMOVW, C_BITCON, C_NONE, C_NONE, C_RSP, C_NONE, 32, 4, 0, 0, 0},
+	{AMOVD, C_BITCON, C_NONE, C_NONE, C_RSP, C_NONE, 32, 4, 0, 0, 0},
+	{AMOVW, C_MOVCON2, C_NONE, C_NONE, C_ZREG, C_NONE, 12, 8, 0, NOTUSETMP, 0},
+	{AMOVD, C_MOVCON2, C_NONE, C_NONE, C_ZREG, C_NONE, 12, 8, 0, NOTUSETMP, 0},
+	{AMOVD, C_MOVCON3, C_NONE, C_NONE, C_ZREG, C_NONE, 12, 12, 0, NOTUSETMP, 0},
+	{AMOVD, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 12, 16, 0, NOTUSETMP, 0},
 
-	{AMOVK, C_VCON, C_NONE, C_NONE, C_ZREG, 33, 4, 0, 0, 0},
-	{AMOVD, C_AACON, C_NONE, C_NONE, C_RSP, 4, 4, REGFROM, 0, 0},
-	{AMOVD, C_AACON2, C_NONE, C_NONE, C_RSP, 4, 8, REGFROM, NOTUSETMP, 0},
+	{AMOVK, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 33, 4, 0, 0, 0},
+	{AMOVD, C_AACON, C_NONE, C_NONE, C_RSP, C_NONE, 4, 4, REGFROM, 0, 0},
+	{AMOVD, C_AACON2, C_NONE, C_NONE, C_RSP, C_NONE, 4, 8, REGFROM, NOTUSETMP, 0},
 
 	/* load long effective stack address (load int32 offset and add) */
-	{AMOVD, C_LACON, C_NONE, C_NONE, C_RSP, 34, 8, REGSP, LFROM, 0},
+	{AMOVD, C_LACON, C_NONE, C_NONE, C_RSP, C_NONE, 34, 8, REGSP, LFROM, 0},
 
-	// Move a large constant to a vector register.
-	{AVMOVQ, C_VCON, C_NONE, C_VCON, C_VREG, 101, 4, 0, LFROM128, 0},
-	{AVMOVD, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0},
-	{AVMOVS, C_LCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0},
+	// Load a large constant into a vector register.
+	{AVMOVS, C_ADDR, C_NONE, C_NONE, C_VREG, C_NONE, 65, 12, 0, 0, 0},
+	{AVMOVD, C_ADDR, C_NONE, C_NONE, C_VREG, C_NONE, 65, 12, 0, 0, 0},
+	{AVMOVQ, C_ADDR, C_NONE, C_NONE, C_VREG, C_NONE, 65, 12, 0, 0, 0},
 
 	/* jump operations */
-	{AB, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
-	{ABL, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
-	{AB, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
-	{ABL, C_NONE, C_NONE, C_NONE, C_ZREG, 6, 4, 0, 0, 0},
-	{ABL, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
-	{obj.ARET, C_NONE, C_NONE, C_NONE, C_ZREG, 6, 4, 0, 0, 0},
-	{obj.ARET, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
-	{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 7, 4, 0, BRANCH19BITS, 0},
-	{ACBZ, C_ZREG, C_NONE, C_NONE, C_SBRA, 39, 4, 0, BRANCH19BITS, 0},
-	{ATBZ, C_VCON, C_ZREG, C_NONE, C_SBRA, 40, 4, 0, BRANCH14BITS, 0},
-	{AERET, C_NONE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0},
+	{AB, C_NONE, C_NONE, C_NONE, C_SBRA, C_NONE, 5, 4, 0, 0, 0},
+	{ABL, C_NONE, C_NONE, C_NONE, C_SBRA, C_NONE, 5, 4, 0, 0, 0},
+	{AB, C_NONE, C_NONE, C_NONE, C_ZOREG, C_NONE, 6, 4, 0, 0, 0},
+	{ABL, C_NONE, C_NONE, C_NONE, C_ZREG, C_NONE, 6, 4, 0, 0, 0},
+	{ABL, C_NONE, C_NONE, C_NONE, C_ZOREG, C_NONE, 6, 4, 0, 0, 0},
+	{obj.ARET, C_NONE, C_NONE, C_NONE, C_ZREG, C_NONE, 6, 4, 0, 0, 0},
+	{obj.ARET, C_NONE, C_NONE, C_NONE, C_ZOREG, C_NONE, 6, 4, 0, 0, 0},
+	{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, C_NONE, 7, 4, 0, BRANCH19BITS, 0},
+	{ACBZ, C_ZREG, C_NONE, C_NONE, C_SBRA, C_NONE, 39, 4, 0, BRANCH19BITS, 0},
+	{ATBZ, C_VCON, C_ZREG, C_NONE, C_SBRA, C_NONE, 40, 4, 0, BRANCH14BITS, 0},
+	{AERET, C_NONE, C_NONE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0},
 
 	// get a PC-relative address
-	{AADRP, C_SBRA, C_NONE, C_NONE, C_ZREG, 60, 4, 0, 0, 0},
-	{AADR, C_SBRA, C_NONE, C_NONE, C_ZREG, 61, 4, 0, 0, 0},
+	{AADRP, C_SBRA, C_NONE, C_NONE, C_ZREG, C_NONE, 60, 4, 0, 0, 0},
+	{AADR, C_SBRA, C_NONE, C_NONE, C_ZREG, C_NONE, 61, 4, 0, 0, 0},
 
-	{ACLREX, C_NONE, C_NONE, C_NONE, C_VCON, 38, 4, 0, 0, 0},
-	{ACLREX, C_NONE, C_NONE, C_NONE, C_NONE, 38, 4, 0, 0, 0},
-	{ABFM, C_VCON, C_ZREG, C_VCON, C_ZREG, 42, 4, 0, 0, 0},
-	{ABFI, C_VCON, C_ZREG, C_VCON, C_ZREG, 43, 4, 0, 0, 0},
-	{AEXTR, C_VCON, C_ZREG, C_ZREG, C_ZREG, 44, 4, 0, 0, 0},
-	{ASXTB, C_ZREG, C_NONE, C_NONE, C_ZREG, 45, 4, 0, 0, 0},
-	{ACLS, C_ZREG, C_NONE, C_NONE, C_ZREG, 46, 4, 0, 0, 0},
-	{ALSL, C_VCON, C_ZREG, C_NONE, C_ZREG, 8, 4, 0, 0, 0},
-	{ALSL, C_VCON, C_NONE, C_NONE, C_ZREG, 8, 4, 0, 0, 0},
-	{ALSL, C_ZREG, C_NONE, C_NONE, C_ZREG, 9, 4, 0, 0, 0},
-	{ALSL, C_ZREG, C_ZREG, C_NONE, C_ZREG, 9, 4, 0, 0, 0},
-	{ASVC, C_VCON, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
-	{ASVC, C_NONE, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
-	{ADWORD, C_NONE, C_NONE, C_NONE, C_VCON, 11, 8, 0, NOTUSETMP, 0},
-	{ADWORD, C_NONE, C_NONE, C_NONE, C_LEXT, 11, 8, 0, NOTUSETMP, 0},
-	{ADWORD, C_NONE, C_NONE, C_NONE, C_ADDR, 11, 8, 0, NOTUSETMP, 0},
-	{ADWORD, C_NONE, C_NONE, C_NONE, C_LACON, 11, 8, 0, NOTUSETMP, 0},
-	{AWORD, C_NONE, C_NONE, C_NONE, C_LCON, 14, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_NONE, C_LEXT, 14, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_NONE, C_ADDR, 14, 4, 0, 0, 0},
-	{AMOVW, C_VCONADDR, C_NONE, C_NONE, C_ZREG, 68, 8, 0, NOTUSETMP, 0},
-	{AMOVD, C_VCONADDR, C_NONE, C_NONE, C_ZREG, 68, 8, 0, NOTUSETMP, 0},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVB, C_ADDR, C_NONE, C_NONE, C_ZREG, 65, 12, 0, 0, 0},
-	{AMOVH, C_ADDR, C_NONE, C_NONE, C_ZREG, 65, 12, 0, 0, 0},
-	{AMOVW, C_ADDR, C_NONE, C_NONE, C_ZREG, 65, 12, 0, 0, 0},
-	{AMOVD, C_ADDR, C_NONE, C_NONE, C_ZREG, 65, 12, 0, 0, 0},
-	{AMOVD, C_GOTADDR, C_NONE, C_NONE, C_ZREG, 71, 8, 0, 0, 0},
-	{AMOVD, C_TLS_LE, C_NONE, C_NONE, C_ZREG, 69, 4, 0, 0, 0},
-	{AMOVD, C_TLS_IE, C_NONE, C_NONE, C_ZREG, 70, 8, 0, 0, 0},
+	{ACLREX, C_NONE, C_NONE, C_NONE, C_VCON, C_NONE, 38, 4, 0, 0, 0},
+	{ACLREX, C_NONE, C_NONE, C_NONE, C_NONE, C_NONE, 38, 4, 0, 0, 0},
+	{ABFM, C_VCON, C_ZREG, C_VCON, C_ZREG, C_NONE, 42, 4, 0, 0, 0},
+	{ABFI, C_VCON, C_ZREG, C_VCON, C_ZREG, C_NONE, 43, 4, 0, 0, 0},
+	{AEXTR, C_VCON, C_ZREG, C_ZREG, C_ZREG, C_NONE, 44, 4, 0, 0, 0},
+	{ASXTB, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 45, 4, 0, 0, 0},
+	{ACLS, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 46, 4, 0, 0, 0},
+	{ALSL, C_VCON, C_ZREG, C_NONE, C_ZREG, C_NONE, 8, 4, 0, 0, 0},
+	{ALSL, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 8, 4, 0, 0, 0},
+	{ALSL, C_ZREG, C_NONE, C_NONE, C_ZREG, C_NONE, 9, 4, 0, 0, 0},
+	{ALSL, C_ZREG, C_ZREG, C_NONE, C_ZREG, C_NONE, 9, 4, 0, 0, 0},
+	{ASVC, C_VCON, C_NONE, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
+	{ASVC, C_NONE, C_NONE, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
+	{ADWORD, C_NONE, C_NONE, C_NONE, C_VCON, C_NONE, 11, 8, 0, NOTUSETMP, 0},
+	{ADWORD, C_NONE, C_NONE, C_NONE, C_LEXT, C_NONE, 11, 8, 0, NOTUSETMP, 0},
+	{ADWORD, C_NONE, C_NONE, C_NONE, C_ADDR, C_NONE, 11, 8, 0, NOTUSETMP, 0},
+	{ADWORD, C_NONE, C_NONE, C_NONE, C_LACON, C_NONE, 11, 8, 0, NOTUSETMP, 0},
+	{AWORD, C_NONE, C_NONE, C_NONE, C_LCON, C_NONE, 14, 4, 0, 0, 0},
+	{AWORD, C_NONE, C_NONE, C_NONE, C_LEXT, C_NONE, 14, 4, 0, 0, 0},
+	{AWORD, C_NONE, C_NONE, C_NONE, C_ADDR, C_NONE, 14, 4, 0, 0, 0},
+	{AMOVW, C_VCONADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 68, 8, 0, NOTUSETMP, 0},
+	{AMOVD, C_VCONADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 68, 8, 0, NOTUSETMP, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_ADDR, C_NONE, 64, 12, 0, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_ADDR, C_NONE, 64, 12, 0, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_ADDR, C_NONE, 64, 12, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_ADDR, C_NONE, 64, 12, 0, 0, 0},
+	{AMOVB, C_ADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 65, 12, 0, 0, 0},
+	{AMOVH, C_ADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 65, 12, 0, 0, 0},
+	{AMOVW, C_ADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 65, 12, 0, 0, 0},
+	{AMOVD, C_ADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 65, 12, 0, 0, 0},
+	{AMOVD, C_GOTADDR, C_NONE, C_NONE, C_ZREG, C_NONE, 71, 8, 0, 0, 0},
+	{AMOVD, C_TLS_LE, C_NONE, C_NONE, C_ZREG, C_NONE, 69, 4, 0, 0, 0},
+	{AMOVD, C_TLS_IE, C_NONE, C_NONE, C_ZREG, C_NONE, 70, 8, 0, 0, 0},
 
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AFMOVS, C_ADDR, C_NONE, C_NONE, C_FREG, 65, 12, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 65, 12, 0, 0, 0},
-	{AFMOVS, C_FCON, C_NONE, C_NONE, C_FREG, 55, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFMOVD, C_FCON, C_NONE, C_NONE, C_FREG, 55, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFMOVS, C_ZREG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_ZREG, 29, 4, 0, 0, 0},
-	{AFMOVD, C_ZREG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ZREG, 29, 4, 0, 0, 0},
-	{AFCVTZSD, C_FREG, C_NONE, C_NONE, C_ZREG, 29, 4, 0, 0, 0},
-	{ASCVTFD, C_ZREG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AFCVTSD, C_FREG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AVMOV, C_ELEM, C_NONE, C_NONE, C_ZREG, 73, 4, 0, 0, 0},
-	{AVMOV, C_ELEM, C_NONE, C_NONE, C_ELEM, 92, 4, 0, 0, 0},
-	{AVMOV, C_ELEM, C_NONE, C_NONE, C_VREG, 80, 4, 0, 0, 0},
-	{AVMOV, C_ZREG, C_NONE, C_NONE, C_ARNG, 82, 4, 0, 0, 0},
-	{AVMOV, C_ZREG, C_NONE, C_NONE, C_ELEM, 78, 4, 0, 0, 0},
-	{AVMOV, C_ARNG, C_NONE, C_NONE, C_ARNG, 83, 4, 0, 0, 0},
-	{AVDUP, C_ELEM, C_NONE, C_NONE, C_ARNG, 79, 4, 0, 0, 0},
-	{AVDUP, C_ELEM, C_NONE, C_NONE, C_VREG, 80, 4, 0, 0, 0},
-	{AVDUP, C_ZREG, C_NONE, C_NONE, C_ARNG, 82, 4, 0, 0, 0},
-	{AVMOVI, C_ADDCON, C_NONE, C_NONE, C_ARNG, 86, 4, 0, 0, 0},
-	{AVFMLA, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0},
-	{AVEXT, C_VCON, C_ARNG, C_ARNG, C_ARNG, 94, 4, 0, 0, 0},
-	{AVTBL, C_ARNG, C_NONE, C_LIST, C_ARNG, 100, 4, 0, 0, 0},
-	{AVUSHR, C_VCON, C_ARNG, C_NONE, C_ARNG, 95, 4, 0, 0, 0},
-	{AVZIP1, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0},
-	{AVUSHLL, C_VCON, C_ARNG, C_NONE, C_ARNG, 102, 4, 0, 0, 0},
-	{AVUXTL, C_ARNG, C_NONE, C_NONE, C_ARNG, 102, 4, 0, 0, 0},
-	{AVUADDW, C_ARNG, C_ARNG, C_NONE, C_ARNG, 105, 4, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_ADDR, C_NONE, 64, 12, 0, 0, 0},
+	{AFMOVS, C_ADDR, C_NONE, C_NONE, C_FREG, C_NONE, 65, 12, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, C_NONE, 64, 12, 0, 0, 0},
+	{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, C_NONE, 65, 12, 0, 0, 0},
+	{AFMOVS, C_FCON, C_NONE, C_NONE, C_FREG, C_NONE, 55, 4, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_FREG, C_NONE, 54, 4, 0, 0, 0},
+	{AFMOVD, C_FCON, C_NONE, C_NONE, C_FREG, C_NONE, 55, 4, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, C_NONE, 54, 4, 0, 0, 0},
+	{AFMOVS, C_ZREG, C_NONE, C_NONE, C_FREG, C_NONE, 29, 4, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_ZREG, C_NONE, 29, 4, 0, 0, 0},
+	{AFMOVD, C_ZREG, C_NONE, C_NONE, C_FREG, C_NONE, 29, 4, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ZREG, C_NONE, 29, 4, 0, 0, 0},
+	{AFCVTZSD, C_FREG, C_NONE, C_NONE, C_ZREG, C_NONE, 29, 4, 0, 0, 0},
+	{ASCVTFD, C_ZREG, C_NONE, C_NONE, C_FREG, C_NONE, 29, 4, 0, 0, 0},
+	{AFCVTSD, C_FREG, C_NONE, C_NONE, C_FREG, C_NONE, 29, 4, 0, 0, 0},
+	{AVMOV, C_ELEM, C_NONE, C_NONE, C_ZREG, C_NONE, 73, 4, 0, 0, 0},
+	{AVMOV, C_ELEM, C_NONE, C_NONE, C_ELEM, C_NONE, 92, 4, 0, 0, 0},
+	{AVMOV, C_ELEM, C_NONE, C_NONE, C_VREG, C_NONE, 80, 4, 0, 0, 0},
+	{AVMOV, C_ZREG, C_NONE, C_NONE, C_ARNG, C_NONE, 82, 4, 0, 0, 0},
+	{AVMOV, C_ZREG, C_NONE, C_NONE, C_ELEM, C_NONE, 78, 4, 0, 0, 0},
+	{AVMOV, C_ARNG, C_NONE, C_NONE, C_ARNG, C_NONE, 83, 4, 0, 0, 0},
+	{AVDUP, C_ELEM, C_NONE, C_NONE, C_ARNG, C_NONE, 79, 4, 0, 0, 0},
+	{AVDUP, C_ELEM, C_NONE, C_NONE, C_VREG, C_NONE, 80, 4, 0, 0, 0},
+	{AVDUP, C_ZREG, C_NONE, C_NONE, C_ARNG, C_NONE, 82, 4, 0, 0, 0},
+	{AVMOVI, C_ADDCON, C_NONE, C_NONE, C_ARNG, C_NONE, 86, 4, 0, 0, 0},
+	{AVFMLA, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 72, 4, 0, 0, 0},
+	{AVEXT, C_VCON, C_ARNG, C_ARNG, C_ARNG, C_NONE, 94, 4, 0, 0, 0},
+	{AVTBL, C_ARNG, C_NONE, C_LIST, C_ARNG, C_NONE, 100, 4, 0, 0, 0},
+	{AVUSHR, C_VCON, C_ARNG, C_NONE, C_ARNG, C_NONE, 95, 4, 0, 0, 0},
+	{AVZIP1, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 72, 4, 0, 0, 0},
+	{AVUSHLL, C_VCON, C_ARNG, C_NONE, C_ARNG, C_NONE, 102, 4, 0, 0, 0},
+	{AVUXTL, C_ARNG, C_NONE, C_NONE, C_ARNG, C_NONE, 102, 4, 0, 0, 0},
+	{AVUADDW, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 105, 4, 0, 0, 0},
 
 	/* conditional operations */
-	{ACSEL, C_COND, C_ZREG, C_ZREG, C_ZREG, 18, 4, 0, 0, 0},
-	{ACINC, C_COND, C_ZREG, C_NONE, C_ZREG, 18, 4, 0, 0, 0},
-	{ACSET, C_COND, C_NONE, C_NONE, C_ZREG, 18, 4, 0, 0, 0},
-	{AFCSELD, C_COND, C_FREG, C_FREG, C_FREG, 18, 4, 0, 0, 0},
-	{ACCMN, C_COND, C_ZREG, C_ZREG, C_VCON, 19, 4, 0, 0, 0},
-	{ACCMN, C_COND, C_ZREG, C_VCON, C_VCON, 19, 4, 0, 0, 0},
-	{AFCCMPS, C_COND, C_FREG, C_FREG, C_VCON, 57, 4, 0, 0, 0},
+	{ACSEL, C_COND, C_ZREG, C_ZREG, C_ZREG, C_NONE, 18, 4, 0, 0, 0},
+	{ACINC, C_COND, C_ZREG, C_NONE, C_ZREG, C_NONE, 18, 4, 0, 0, 0},
+	{ACSET, C_COND, C_NONE, C_NONE, C_ZREG, C_NONE, 18, 4, 0, 0, 0},
+	{AFCSELD, C_COND, C_FREG, C_FREG, C_FREG, C_NONE, 18, 4, 0, 0, 0},
+	{ACCMN, C_COND, C_ZREG, C_ZREG, C_VCON, C_NONE, 19, 4, 0, 0, 0},
+	{ACCMN, C_COND, C_ZREG, C_VCON, C_VCON, C_NONE, 19, 4, 0, 0, 0},
+	{AFCCMPS, C_COND, C_FREG, C_FREG, C_VCON, C_NONE, 57, 4, 0, 0, 0},
 
 	/* scaled 12-bit unsigned displacement store */
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_UAUTO4K, 20, 4, REGSP, 0, 0},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_UOREG4K, 20, 4, 0, 0, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_UAUTO8K, 20, 4, REGSP, 0, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_UOREG8K, 20, 4, 0, 0, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_UAUTO32K, 20, 4, REGSP, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_UOREG32K, 20, 4, 0, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_UAUTO4K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_UOREG4K, C_NONE, 20, 4, 0, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_UAUTO8K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_UOREG8K, C_NONE, 20, 4, 0, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_UAUTO16K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_UOREG16K, C_NONE, 20, 4, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_UAUTO32K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_UOREG32K, C_NONE, 20, 4, 0, 0, 0},
 
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_UAUTO32K, 20, 4, REGSP, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_UOREG32K, 20, 4, 0, 0, 0},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_UAUTO64K, 20, 4, REGSP, 0, 0},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_UOREG64K, 20, 4, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_UAUTO16K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_UOREG16K, C_NONE, 20, 4, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_UAUTO32K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_UOREG32K, C_NONE, 20, 4, 0, 0, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_UAUTO64K, C_NONE, 20, 4, REGSP, 0, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_UOREG64K, C_NONE, 20, 4, 0, 0, 0},
 
 	/* unscaled 9-bit signed displacement store */
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
 
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_NSAUTO, C_NONE, 20, 4, REGSP, 0, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_NSOREG, C_NONE, 20, 4, 0, 0, 0},
 
 	/* scaled 12-bit unsigned displacement load */
-	{AMOVB, C_UAUTO4K, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVB, C_UOREG4K, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
-	{AMOVH, C_UAUTO8K, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVH, C_UOREG8K, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
-	{AMOVW, C_UAUTO16K, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVW, C_UOREG16K, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
-	{AMOVD, C_UAUTO32K, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVD, C_UOREG32K, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
+	{AMOVB, C_UAUTO4K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVB, C_UOREG4K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
+	{AMOVH, C_UAUTO8K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVH, C_UOREG8K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
+	{AMOVW, C_UAUTO16K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVW, C_UOREG16K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
+	{AMOVD, C_UAUTO32K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVD, C_UOREG32K, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
 
-	{AFMOVS, C_UAUTO16K, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVS, C_UOREG16K, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVD, C_UAUTO32K, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVD, C_UOREG32K, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVQ, C_UAUTO64K, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVQ, C_UOREG64K, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0},
+	{AFMOVS, C_UAUTO16K, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AFMOVS, C_UOREG16K, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0, 0},
+	{AFMOVD, C_UAUTO32K, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AFMOVD, C_UOREG32K, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0, 0},
+	{AFMOVQ, C_UAUTO64K, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AFMOVQ, C_UOREG64K, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0, 0},
 
 	/* unscaled 9-bit signed displacement load */
-	{AMOVB, C_NSAUTO, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVB, C_NSOREG, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
-	{AMOVH, C_NSAUTO, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVH, C_NSOREG, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
-	{AMOVW, C_NSAUTO, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVW, C_NSOREG, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
-	{AMOVD, C_NSAUTO, C_NONE, C_NONE, C_ZREG, 21, 4, REGSP, 0, 0},
-	{AMOVD, C_NSOREG, C_NONE, C_NONE, C_ZREG, 21, 4, 0, 0, 0},
+	{AMOVB, C_NSAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVB, C_NSOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
+	{AMOVH, C_NSAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVH, C_NSOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
+	{AMOVW, C_NSAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVW, C_NSOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
+	{AMOVD, C_NSAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AMOVD, C_NSOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 21, 4, 0, 0, 0},
 
-	{AFMOVS, C_NSAUTO, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVS, C_NSOREG, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVD, C_NSAUTO, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVD, C_NSOREG, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVQ, C_NSAUTO, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVQ, C_NSOREG, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0},
+	{AFMOVS, C_NSAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AFMOVS, C_NSOREG, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0, 0},
+	{AFMOVD, C_NSAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AFMOVD, C_NSOREG, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0, 0},
+	{AFMOVQ, C_NSAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, REGSP, 0, 0},
+	{AFMOVQ, C_NSOREG, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0, 0},
 
 	/* long displacement store */
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
 
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 30, 8, REGSP, 0, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 30, 8, REGSP, LTO, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 30, 8, 0, 0, 0},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 30, 8, 0, LTO, 0},
 
 	/* long displacement load */
-	{AMOVB, C_LAUTO, C_NONE, C_NONE, C_ZREG, 31, 8, REGSP, LFROM, 0},
-	{AMOVB, C_LOREG, C_NONE, C_NONE, C_ZREG, 31, 8, 0, LFROM, 0},
-	{AMOVH, C_LAUTO, C_NONE, C_NONE, C_ZREG, 31, 8, REGSP, LFROM, 0},
-	{AMOVH, C_LOREG, C_NONE, C_NONE, C_ZREG, 31, 8, 0, LFROM, 0},
-	{AMOVW, C_LAUTO, C_NONE, C_NONE, C_ZREG, 31, 8, REGSP, LFROM, 0},
-	{AMOVW, C_LOREG, C_NONE, C_NONE, C_ZREG, 31, 8, 0, LFROM, 0},
-	{AMOVD, C_LAUTO, C_NONE, C_NONE, C_ZREG, 31, 8, REGSP, LFROM, 0},
-	{AMOVD, C_LOREG, C_NONE, C_NONE, C_ZREG, 31, 8, 0, LFROM, 0},
+	{AMOVB, C_LAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AMOVB, C_LAUTOPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AMOVB, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, 0, 0},
+	{AMOVB, C_LOREGPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, LFROM, 0},
+	{AMOVH, C_LAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AMOVH, C_LAUTOPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AMOVH, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, 0, 0},
+	{AMOVH, C_LOREGPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, LFROM, 0},
+	{AMOVW, C_LAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AMOVW, C_LAUTOPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AMOVW, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, 0, 0},
+	{AMOVW, C_LOREGPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, LFROM, 0},
+	{AMOVD, C_LAUTO, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AMOVD, C_LAUTOPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AMOVD, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, 0, 0},
+	{AMOVD, C_LOREGPOOL, C_NONE, C_NONE, C_ZREG, C_NONE, 31, 8, 0, LFROM, 0},
 
-	{AFMOVS, C_LAUTO, C_NONE, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0},
-	{AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, 31, 8, 0, LFROM, 0},
-	{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0},
-	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 31, 8, 0, LFROM, 0},
-	{AFMOVQ, C_LAUTO, C_NONE, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0},
-	{AFMOVQ, C_LOREG, C_NONE, C_NONE, C_FREG, 31, 8, 0, LFROM, 0},
+	{AFMOVS, C_LAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AFMOVS, C_LAUTOPOOL, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, 0, 0, 0},
+	{AFMOVS, C_LOREGPOOL, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, 0, LFROM, 0},
+	{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AFMOVD, C_LAUTOPOOL, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, 0, 0, 0},
+	{AFMOVD, C_LOREGPOOL, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, 0, LFROM, 0},
+	{AFMOVQ, C_LAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, REGSP, 0, 0},
+	{AFMOVQ, C_LAUTOPOOL, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, REGSP, LFROM, 0},
+	{AFMOVQ, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, 0, 0, 0},
+	{AFMOVQ, C_LOREGPOOL, C_NONE, C_NONE, C_FREG, C_NONE, 31, 8, 0, LFROM, 0},
 
 	/* pre/post-indexed load (unscaled, signed 9-bit offset) */
-	{AMOVD, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPOST},
-	{AMOVW, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPOST},
-	{AMOVH, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPOST},
-	{AMOVB, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPOST},
-	{AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST},
-	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST},
-	{AFMOVQ, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST},
+	{AMOVD, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPOST},
+	{AMOVW, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPOST},
+	{AMOVH, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPOST},
+	{AMOVB, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPOST},
+	{AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 22, 4, 0, 0, C_XPOST},
+	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 22, 4, 0, 0, C_XPOST},
+	{AFMOVQ, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 22, 4, 0, 0, C_XPOST},
 
-	{AMOVD, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPRE},
-	{AMOVW, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPRE},
-	{AMOVH, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPRE},
-	{AMOVB, C_LOREG, C_NONE, C_NONE, C_ZREG, 22, 4, 0, 0, C_XPRE},
-	{AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE},
-	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE},
-	{AFMOVQ, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE},
+	{AMOVD, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPRE},
+	{AMOVW, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPRE},
+	{AMOVH, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPRE},
+	{AMOVB, C_LOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 22, 4, 0, 0, C_XPRE},
+	{AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 22, 4, 0, 0, C_XPRE},
+	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 22, 4, 0, 0, C_XPRE},
+	{AFMOVQ, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 22, 4, 0, 0, C_XPRE},
 
 	/* pre/post-indexed store (unscaled, signed 9-bit offset) */
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPOST},
 
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
+	{AFMOVQ, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 23, 4, 0, 0, C_XPRE},
 
 	/* load with shifted or extended register offset */
-	{AMOVD, C_ROFF, C_NONE, C_NONE, C_ZREG, 98, 4, 0, 0, 0},
-	{AMOVW, C_ROFF, C_NONE, C_NONE, C_ZREG, 98, 4, 0, 0, 0},
-	{AMOVH, C_ROFF, C_NONE, C_NONE, C_ZREG, 98, 4, 0, 0, 0},
-	{AMOVB, C_ROFF, C_NONE, C_NONE, C_ZREG, 98, 4, 0, 0, 0},
-	{AFMOVS, C_ROFF, C_NONE, C_NONE, C_FREG, 98, 4, 0, 0, 0},
-	{AFMOVD, C_ROFF, C_NONE, C_NONE, C_FREG, 98, 4, 0, 0, 0},
+	{AMOVD, C_ROFF, C_NONE, C_NONE, C_ZREG, C_NONE, 98, 4, 0, 0, 0},
+	{AMOVW, C_ROFF, C_NONE, C_NONE, C_ZREG, C_NONE, 98, 4, 0, 0, 0},
+	{AMOVH, C_ROFF, C_NONE, C_NONE, C_ZREG, C_NONE, 98, 4, 0, 0, 0},
+	{AMOVB, C_ROFF, C_NONE, C_NONE, C_ZREG, C_NONE, 98, 4, 0, 0, 0},
+	{AFMOVS, C_ROFF, C_NONE, C_NONE, C_FREG, C_NONE, 98, 4, 0, 0, 0},
+	{AFMOVD, C_ROFF, C_NONE, C_NONE, C_FREG, C_NONE, 98, 4, 0, 0, 0},
 
 	/* store with extended register offset */
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0},
-	{AMOVW, C_ZREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0},
-	{AMOVH, C_ZREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0},
-	{AMOVB, C_ZREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_ROFF, C_NONE, 99, 4, 0, 0, 0},
+	{AMOVW, C_ZREG, C_NONE, C_NONE, C_ROFF, C_NONE, 99, 4, 0, 0, 0},
+	{AMOVH, C_ZREG, C_NONE, C_NONE, C_ROFF, C_NONE, 99, 4, 0, 0, 0},
+	{AMOVB, C_ZREG, C_NONE, C_NONE, C_ROFF, C_NONE, 99, 4, 0, 0, 0},
+	{AFMOVS, C_FREG, C_NONE, C_NONE, C_ROFF, C_NONE, 99, 4, 0, 0, 0},
+	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ROFF, C_NONE, 99, 4, 0, 0, 0},
 
 	/* pre/post-indexed/signed-offset load/store register pair
 	   (unscaled, signed 10-bit quad-aligned and long offset).
 	The pre/post-indexed format only supports OREG cases because
 	the RSP and pseudo registers are not allowed to be modified
 	in this way. */
-	{AFLDPQ, C_NQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0},
-	{AFLDPQ, C_PQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0},
-	{AFLDPQ, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0},
-	{AFLDPQ, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0},
-	{AFLDPQ, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0},
-	{AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0},
-	{AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0},
-	{AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{AFLDPQ, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0},
-	{AFLDPQ, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0},
-	{AFLDPQ, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0},
-	{AFLDPQ, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0},
+	{AFLDPQ, C_NQAUTO_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, REGSP, 0, 0},
+	{AFLDPQ, C_PQAUTO_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, REGSP, 0, 0},
+	{AFLDPQ, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, REGSP, 0, 0},
+	{AFLDPQ, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, REGSP, 0, 0},
+	{AFLDPQ, C_LAUTO, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, REGSP, 0, 0},
+	{AFLDPQ, C_LAUTOPOOL, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, REGSP, LFROM, 0},
+	{AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, 0},
+	{AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPRE},
+	{AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPOST},
+	{AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, 0},
+	{AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPRE},
+	{AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPOST},
+	{AFLDPQ, C_UOREG4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, 0, 0, 0},
+	{AFLDPQ, C_NOREG4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, 0, 0, 0},
+	{AFLDPQ, C_LOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, 0, 0, 0},
+	{AFLDPQ, C_LOREGPOOL, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, 0, LFROM, 0},
+	{AFLDPQ, C_ADDR, C_NONE, C_NONE, C_PAIR, C_NONE, 88, 12, 0, 0, 0},
 
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQAUTO_16, 67, 4, REGSP, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQAUTO_16, 67, 4, REGSP, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 8, REGSP, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, 67, 4, 0, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, 67, 4, 0, 0, C_XPRE},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, 67, 4, 0, 0, C_XPOST},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, 67, 4, 0, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, 67, 4, 0, 0, C_XPRE},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, 67, 4, 0, 0, C_XPOST},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0},
-	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQAUTO_16, C_NONE, 67, 4, REGSP, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQAUTO_16, C_NONE, 67, 4, REGSP, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, C_NONE, 76, 8, REGSP, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, C_NONE, 76, 8, REGSP, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LAUTO, C_NONE, 77, 12, REGSP, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 77, 12, REGSP, LTO, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, C_NONE, 67, 4, 0, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, C_NONE, 67, 4, 0, 0, C_XPRE},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, C_NONE, 67, 4, 0, 0, C_XPOST},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, C_NONE, 67, 4, 0, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, C_NONE, 67, 4, 0, 0, C_XPRE},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, C_NONE, 67, 4, 0, 0, C_XPOST},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_UOREG4K, C_NONE, 76, 8, 0, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NOREG4K, C_NONE, 76, 8, 0, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LOREG, C_NONE, 77, 12, 0, 0, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 77, 12, 0, LTO, 0},
+	{AFSTPQ, C_PAIR, C_NONE, C_NONE, C_ADDR, C_NONE, 87, 12, 0, 0, 0},
 
-	{ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0},
-	{ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0},
-	{ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0},
-	{ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0},
-	{ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0},
-	{ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0},
-	{ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0},
-	{ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0},
-	{ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0},
-	{ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0},
-	{ALDP, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0},
+	{ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, REGSP, 0, 0},
+	{ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, REGSP, 0, 0},
+	{ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, REGSP, 0, 0},
+	{ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, REGSP, 0, 0},
+	{ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, REGSP, 0, 0},
+	{ALDP, C_LAUTOPOOL, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, REGSP, LFROM, 0},
+	{ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, 0},
+	{ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPRE},
+	{ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPOST},
+	{ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, 0},
+	{ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPRE},
+	{ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPOST},
+	{ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, 0, 0, 0},
+	{ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, 0, 0, 0},
+	{ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, 0, 0, 0},
+	{ALDP, C_LOREGPOOL, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, 0, LFROM, 0},
+	{ALDP, C_ADDR, C_NONE, C_NONE, C_PAIR, C_NONE, 88, 12, 0, 0, 0},
 
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 8, REGSP, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPRE},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPOST},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPRE},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPOST},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0},
-	{ASTP, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPAUTO, C_NONE, 67, 4, REGSP, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, C_NONE, 67, 4, REGSP, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, C_NONE, 76, 8, REGSP, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, C_NONE, 76, 8, REGSP, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, C_NONE, 77, 12, REGSP, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 77, 12, REGSP, LTO, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, C_NONE, 67, 4, 0, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, C_NONE, 67, 4, 0, 0, C_XPRE},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, C_NONE, 67, 4, 0, 0, C_XPOST},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, C_NONE, 67, 4, 0, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, C_NONE, 67, 4, 0, 0, C_XPRE},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, C_NONE, 67, 4, 0, 0, C_XPOST},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, C_NONE, 76, 8, 0, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, C_NONE, 76, 8, 0, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, C_NONE, 77, 12, 0, 0, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 77, 12, 0, LTO, 0},
+	{ASTP, C_PAIR, C_NONE, C_NONE, C_ADDR, C_NONE, 87, 12, 0, 0, 0},
 
 	// differ from LDP/STP for C_NSAUTO_4/C_PSAUTO_4/C_NSOREG_4/C_PSOREG_4
-	{ALDPW, C_NSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0},
-	{ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0},
-	{ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0},
-	{ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0},
-	{ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0},
-	{ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0},
-	{ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0},
-	{ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0},
-	{ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0},
-	{ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0},
-	{ALDPW, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0},
+	{ALDPW, C_NSAUTO_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, REGSP, 0, 0},
+	{ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, REGSP, 0, 0},
+	{ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, REGSP, 0, 0},
+	{ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, REGSP, 0, 0},
+	{ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, REGSP, 0, 0},
+	{ALDPW, C_LAUTOPOOL, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, REGSP, LFROM, 0},
+	{ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, 0},
+	{ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPRE},
+	{ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPOST},
+	{ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, 0},
+	{ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPRE},
+	{ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, C_NONE, 66, 4, 0, 0, C_XPOST},
+	{ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, 0, 0, 0},
+	{ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, C_NONE, 74, 8, 0, 0, 0},
+	{ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, 0, 0, 0},
+	{ALDPW, C_LOREGPOOL, C_NONE, C_NONE, C_PAIR, C_NONE, 75, 12, 0, LFROM, 0},
+	{ALDPW, C_ADDR, C_NONE, C_NONE, C_PAIR, C_NONE, 88, 12, 0, 0, 0},
 
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSAUTO_4, 67, 4, REGSP, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, 67, 4, REGSP, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 8, REGSP, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, C_XPRE},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, C_XPOST},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, C_XPRE},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, C_XPOST},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0},
-	{ASTPW, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSAUTO_4, C_NONE, 67, 4, REGSP, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, C_NONE, 67, 4, REGSP, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, C_NONE, 76, 8, REGSP, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, C_NONE, 76, 8, REGSP, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, C_NONE, 77, 12, REGSP, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTOPOOL, C_NONE, 77, 12, REGSP, LTO, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, C_NONE, 67, 4, 0, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, C_NONE, 67, 4, 0, 0, C_XPRE},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, C_NONE, 67, 4, 0, 0, C_XPOST},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, C_NONE, 67, 4, 0, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, C_NONE, 67, 4, 0, 0, C_XPRE},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, C_NONE, 67, 4, 0, 0, C_XPOST},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, C_NONE, 76, 8, 0, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, C_NONE, 76, 8, 0, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, C_NONE, 77, 12, 0, 0, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREGPOOL, C_NONE, 77, 12, 0, LTO, 0},
+	{ASTPW, C_PAIR, C_NONE, C_NONE, C_ADDR, C_NONE, 87, 12, 0, 0, 0},
 
-	{ASWPD, C_ZREG, C_NONE, C_NONE, C_ZOREG, 47, 4, 0, 0, 0},       // RegTo2=C_REG
-	{ASWPD, C_ZREG, C_NONE, C_NONE, C_ZAUTO, 47, 4, REGSP, 0, 0},   // RegTo2=C_REG
-	{ACASPD, C_PAIR, C_NONE, C_NONE, C_ZOREG, 106, 4, 0, 0, 0},     // RegTo2=C_REGREG
-	{ACASPD, C_PAIR, C_NONE, C_NONE, C_ZAUTO, 106, 4, REGSP, 0, 0}, // RegTo2=C_REGREG
-	{ALDAR, C_ZOREG, C_NONE, C_NONE, C_ZREG, 58, 4, 0, 0, 0},
-	{ALDXR, C_ZOREG, C_NONE, C_NONE, C_ZREG, 58, 4, 0, 0, 0},
-	{ALDAXR, C_ZOREG, C_NONE, C_NONE, C_ZREG, 58, 4, 0, 0, 0},
-	{ALDXP, C_ZOREG, C_NONE, C_NONE, C_PAIR, 58, 4, 0, 0, 0},
-	{ASTLR, C_ZREG, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0},  // RegTo2=C_NONE
-	{ASTXR, C_ZREG, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0},  // RegTo2=C_REG
-	{ASTLXR, C_ZREG, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0}, // RegTo2=C_REG
-	{ASTXP, C_PAIR, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0},
+	{ASWPD, C_ZREG, C_NONE, C_NONE, C_ZOREG, C_ZREG, 47, 4, 0, 0, 0},
+	{ASWPD, C_ZREG, C_NONE, C_NONE, C_ZAUTO, C_ZREG, 47, 4, REGSP, 0, 0},
+	{ACASPD, C_PAIR, C_NONE, C_NONE, C_ZOREG, C_PAIR, 106, 4, 0, 0, 0},
+	{ACASPD, C_PAIR, C_NONE, C_NONE, C_ZAUTO, C_PAIR, 106, 4, REGSP, 0, 0},
+	{ALDAR, C_ZOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 58, 4, 0, 0, 0},
+	{ALDXR, C_ZOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 58, 4, 0, 0, 0},
+	{ALDAXR, C_ZOREG, C_NONE, C_NONE, C_ZREG, C_NONE, 58, 4, 0, 0, 0},
+	{ALDXP, C_ZOREG, C_NONE, C_NONE, C_PAIR, C_NONE, 58, 4, 0, 0, 0},
+	{ASTLR, C_ZREG, C_NONE, C_NONE, C_ZOREG, C_NONE, 59, 4, 0, 0, 0},
+	{ASTXR, C_ZREG, C_NONE, C_NONE, C_ZOREG, C_ZREG, 59, 4, 0, 0, 0},
+	{ASTLXR, C_ZREG, C_NONE, C_NONE, C_ZOREG, C_ZREG, 59, 4, 0, 0, 0},
+	{ASTXP, C_PAIR, C_NONE, C_NONE, C_ZOREG, C_ZREG, 59, 4, 0, 0, 0},
 
 	/* VLD[1-4]/VST[1-4] */
-	{AVLD1, C_ZOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, 0},
-	{AVLD1, C_LOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST},
-	{AVLD1, C_ROFF, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST},
-	{AVLD1R, C_ZOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, 0},
-	{AVLD1R, C_LOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST},
-	{AVLD1R, C_ROFF, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST},
-	{AVLD1, C_LOREG, C_NONE, C_NONE, C_ELEM, 97, 4, 0, 0, C_XPOST},
-	{AVLD1, C_ROFF, C_NONE, C_NONE, C_ELEM, 97, 4, 0, 0, C_XPOST},
-	{AVLD1, C_LOREG, C_NONE, C_NONE, C_ELEM, 97, 4, 0, 0, 0},
-	{AVST1, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0},
-	{AVST1, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST},
-	{AVST1, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST},
-	{AVST2, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0},
-	{AVST2, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST},
-	{AVST2, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST},
-	{AVST3, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0},
-	{AVST3, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST},
-	{AVST3, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST},
-	{AVST4, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0},
-	{AVST4, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST},
-	{AVST4, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST},
-	{AVST1, C_ELEM, C_NONE, C_NONE, C_LOREG, 96, 4, 0, 0, C_XPOST},
-	{AVST1, C_ELEM, C_NONE, C_NONE, C_ROFF, 96, 4, 0, 0, C_XPOST},
-	{AVST1, C_ELEM, C_NONE, C_NONE, C_LOREG, 96, 4, 0, 0, 0},
+	{AVLD1, C_ZOREG, C_NONE, C_NONE, C_LIST, C_NONE, 81, 4, 0, 0, 0},
+	{AVLD1, C_LOREG, C_NONE, C_NONE, C_LIST, C_NONE, 81, 4, 0, 0, C_XPOST},
+	{AVLD1, C_ROFF, C_NONE, C_NONE, C_LIST, C_NONE, 81, 4, 0, 0, C_XPOST},
+	{AVLD1R, C_ZOREG, C_NONE, C_NONE, C_LIST, C_NONE, 81, 4, 0, 0, 0},
+	{AVLD1R, C_LOREG, C_NONE, C_NONE, C_LIST, C_NONE, 81, 4, 0, 0, C_XPOST},
+	{AVLD1R, C_ROFF, C_NONE, C_NONE, C_LIST, C_NONE, 81, 4, 0, 0, C_XPOST},
+	{AVLD1, C_LOREG, C_NONE, C_NONE, C_ELEM, C_NONE, 97, 4, 0, 0, C_XPOST},
+	{AVLD1, C_ROFF, C_NONE, C_NONE, C_ELEM, C_NONE, 97, 4, 0, 0, C_XPOST},
+	{AVLD1, C_LOREG, C_NONE, C_NONE, C_ELEM, C_NONE, 97, 4, 0, 0, 0},
+	{AVST1, C_LIST, C_NONE, C_NONE, C_ZOREG, C_NONE, 84, 4, 0, 0, 0},
+	{AVST1, C_LIST, C_NONE, C_NONE, C_LOREG, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST1, C_LIST, C_NONE, C_NONE, C_ROFF, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST2, C_LIST, C_NONE, C_NONE, C_ZOREG, C_NONE, 84, 4, 0, 0, 0},
+	{AVST2, C_LIST, C_NONE, C_NONE, C_LOREG, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST2, C_LIST, C_NONE, C_NONE, C_ROFF, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST3, C_LIST, C_NONE, C_NONE, C_ZOREG, C_NONE, 84, 4, 0, 0, 0},
+	{AVST3, C_LIST, C_NONE, C_NONE, C_LOREG, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST3, C_LIST, C_NONE, C_NONE, C_ROFF, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST4, C_LIST, C_NONE, C_NONE, C_ZOREG, C_NONE, 84, 4, 0, 0, 0},
+	{AVST4, C_LIST, C_NONE, C_NONE, C_LOREG, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST4, C_LIST, C_NONE, C_NONE, C_ROFF, C_NONE, 84, 4, 0, 0, C_XPOST},
+	{AVST1, C_ELEM, C_NONE, C_NONE, C_LOREG, C_NONE, 96, 4, 0, 0, C_XPOST},
+	{AVST1, C_ELEM, C_NONE, C_NONE, C_ROFF, C_NONE, 96, 4, 0, 0, C_XPOST},
+	{AVST1, C_ELEM, C_NONE, C_NONE, C_LOREG, C_NONE, 96, 4, 0, 0, 0},
 
 	/* special */
-	{AMOVD, C_SPR, C_NONE, C_NONE, C_ZREG, 35, 4, 0, 0, 0},
-	{AMRS, C_SPR, C_NONE, C_NONE, C_ZREG, 35, 4, 0, 0, 0},
-	{AMOVD, C_ZREG, C_NONE, C_NONE, C_SPR, 36, 4, 0, 0, 0},
-	{AMSR, C_ZREG, C_NONE, C_NONE, C_SPR, 36, 4, 0, 0, 0},
-	{AMOVD, C_VCON, C_NONE, C_NONE, C_SPR, 37, 4, 0, 0, 0},
-	{AMSR, C_VCON, C_NONE, C_NONE, C_SPR, 37, 4, 0, 0, 0},
-	{AMSR, C_VCON, C_NONE, C_NONE, C_SPOP, 37, 4, 0, 0, 0},
-	{APRFM, C_UOREG32K, C_NONE, C_NONE, C_SPOP, 91, 4, 0, 0, 0},
-	{APRFM, C_UOREG32K, C_NONE, C_NONE, C_LCON, 91, 4, 0, 0, 0},
-	{ADMB, C_VCON, C_NONE, C_NONE, C_NONE, 51, 4, 0, 0, 0},
-	{AHINT, C_VCON, C_NONE, C_NONE, C_NONE, 52, 4, 0, 0, 0},
-	{ASYS, C_VCON, C_NONE, C_NONE, C_NONE, 50, 4, 0, 0, 0},
-	{ASYS, C_VCON, C_NONE, C_NONE, C_ZREG, 50, 4, 0, 0, 0},
-	{ASYSL, C_VCON, C_NONE, C_NONE, C_ZREG, 50, 4, 0, 0, 0},
-	{ATLBI, C_SPOP, C_NONE, C_NONE, C_NONE, 107, 4, 0, 0, 0},
-	{ATLBI, C_SPOP, C_NONE, C_NONE, C_ZREG, 107, 4, 0, 0, 0},
+	{AMOVD, C_SPR, C_NONE, C_NONE, C_ZREG, C_NONE, 35, 4, 0, 0, 0},
+	{AMRS, C_SPR, C_NONE, C_NONE, C_ZREG, C_NONE, 35, 4, 0, 0, 0},
+	{AMOVD, C_ZREG, C_NONE, C_NONE, C_SPR, C_NONE, 36, 4, 0, 0, 0},
+	{AMSR, C_ZREG, C_NONE, C_NONE, C_SPR, C_NONE, 36, 4, 0, 0, 0},
+	{AMOVD, C_VCON, C_NONE, C_NONE, C_SPR, C_NONE, 37, 4, 0, 0, 0},
+	{AMSR, C_VCON, C_NONE, C_NONE, C_SPR, C_NONE, 37, 4, 0, 0, 0},
+	{AMSR, C_VCON, C_NONE, C_NONE, C_SPOP, C_NONE, 37, 4, 0, 0, 0},
+	{APRFM, C_UOREG32K, C_NONE, C_NONE, C_SPOP, C_NONE, 91, 4, 0, 0, 0},
+	{APRFM, C_UOREG32K, C_NONE, C_NONE, C_LCON, C_NONE, 91, 4, 0, 0, 0},
+	{ADMB, C_VCON, C_NONE, C_NONE, C_NONE, C_NONE, 51, 4, 0, 0, 0},
+	{AHINT, C_VCON, C_NONE, C_NONE, C_NONE, C_NONE, 52, 4, 0, 0, 0},
+	{ASYS, C_VCON, C_NONE, C_NONE, C_NONE, C_NONE, 50, 4, 0, 0, 0},
+	{ASYS, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 50, 4, 0, 0, 0},
+	{ASYSL, C_VCON, C_NONE, C_NONE, C_ZREG, C_NONE, 50, 4, 0, 0, 0},
+	{ATLBI, C_SPOP, C_NONE, C_NONE, C_NONE, C_NONE, 107, 4, 0, 0, 0},
+	{ATLBI, C_SPOP, C_NONE, C_NONE, C_ZREG, C_NONE, 107, 4, 0, 0, 0},
 
 	/* encryption instructions */
-	{AAESD, C_VREG, C_NONE, C_NONE, C_VREG, 26, 4, 0, 0, 0}, // for compatibility with old code
-	{AAESD, C_ARNG, C_NONE, C_NONE, C_ARNG, 26, 4, 0, 0, 0}, // recommend using the new one for better readability
-	{ASHA1C, C_VREG, C_VREG, C_NONE, C_VREG, 49, 4, 0, 0, 0},
-	{ASHA1C, C_ARNG, C_VREG, C_NONE, C_VREG, 49, 4, 0, 0, 0},
-	{ASHA1SU0, C_ARNG, C_ARNG, C_NONE, C_ARNG, 63, 4, 0, 0, 0},
-	{AVREV32, C_ARNG, C_NONE, C_NONE, C_ARNG, 83, 4, 0, 0, 0},
-	{AVPMULL, C_ARNG, C_ARNG, C_NONE, C_ARNG, 93, 4, 0, 0, 0},
-	{AVEOR3, C_ARNG, C_ARNG, C_ARNG, C_ARNG, 103, 4, 0, 0, 0},
-	{AVXAR, C_VCON, C_ARNG, C_ARNG, C_ARNG, 104, 4, 0, 0, 0},
-
-	{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0},
-	{obj.APCDATA, C_VCON, C_NONE, C_NONE, C_VCON, 0, 0, 0, 0, 0},
-	{obj.AFUNCDATA, C_VCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0, 0, 0},
-	{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689
-	{obj.ANOP, C_ZREG, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.ANOP, C_VREG, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as AB/ABL
-	{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as AB/ABL
-	{obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},  // align code
-
-	{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
+	{AAESD, C_VREG, C_NONE, C_NONE, C_VREG, C_NONE, 26, 4, 0, 0, 0}, // for compatibility with old code
+	{AAESD, C_ARNG, C_NONE, C_NONE, C_ARNG, C_NONE, 26, 4, 0, 0, 0}, // recommend using the new one for better readability
+	{ASHA1C, C_VREG, C_VREG, C_NONE, C_VREG, C_NONE, 49, 4, 0, 0, 0},
+	{ASHA1C, C_ARNG, C_VREG, C_NONE, C_VREG, C_NONE, 49, 4, 0, 0, 0},
+	{ASHA1SU0, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 63, 4, 0, 0, 0},
+	{AVREV32, C_ARNG, C_NONE, C_NONE, C_ARNG, C_NONE, 83, 4, 0, 0, 0},
+	{AVPMULL, C_ARNG, C_ARNG, C_NONE, C_ARNG, C_NONE, 93, 4, 0, 0, 0},
+	{AVEOR3, C_ARNG, C_ARNG, C_ARNG, C_ARNG, C_NONE, 103, 4, 0, 0, 0},
+	{AVXAR, C_VCON, C_ARNG, C_ARNG, C_ARNG, C_NONE, 104, 4, 0, 0, 0},
+	{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0},
+	{obj.APCDATA, C_VCON, C_NONE, C_NONE, C_VCON, C_NONE, 0, 0, 0, 0, 0},
+	{obj.AFUNCDATA, C_VCON, C_NONE, C_NONE, C_ADDR, C_NONE, 0, 0, 0, 0, 0},
+	{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+	{obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689
+	{obj.ANOP, C_ZREG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+	{obj.ANOP, C_VREG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+	{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_SBRA, C_NONE, 5, 4, 0, 0, 0}, // same as AB/ABL
+	{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_SBRA, C_NONE, 5, 4, 0, 0, 0}, // same as AB/ABL
+	{obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},  // align code
 }
 
 // Valid pstate field values, and value to use in instruction.
@@ -1091,9 +1128,6 @@
 		if o.flag&LFROM != 0 {
 			c.addpool(p, &p.From)
 		}
-		if o.flag&LFROM128 != 0 {
-			c.addpool128(p, &p.From, p.GetFrom3())
-		}
 		if o.flag&LTO != 0 {
 			c.addpool(p, &p.To)
 		}
@@ -1295,34 +1329,6 @@
 	c.pool.start = 0
 }
 
-// addpool128 adds a 128-bit constant to literal pool by two consecutive DWORD
-// instructions, the 128-bit constant is formed by ah.Offset<<64+al.Offset.
-func (c *ctxt7) addpool128(p *obj.Prog, al, ah *obj.Addr) {
-	q := c.newprog()
-	q.As = ADWORD
-	q.To.Type = obj.TYPE_CONST
-	q.To.Offset = al.Offset // q.Pc is lower than t.Pc, so al.Offset is stored in q.
-
-	t := c.newprog()
-	t.As = ADWORD
-	t.To.Type = obj.TYPE_CONST
-	t.To.Offset = ah.Offset
-
-	q.Link = t
-
-	if c.blitrl == nil {
-		c.blitrl = q
-		c.pool.start = uint32(p.Pc)
-	} else {
-		c.elitrl.Link = q
-	}
-
-	c.elitrl = t
-	c.pool.size = roundUp(c.pool.size, 16)
-	c.pool.size += 16
-	p.Pool = q
-}
-
 /*
  * MOVD foo(SB), R is actually
  *   MOVD addr, REGTMP
@@ -1339,8 +1345,8 @@
 	sz := 4
 
 	if a.Type == obj.TYPE_CONST {
-		if (lit != int64(int32(lit)) && uint64(lit) != uint64(uint32(lit))) || p.As == AVMOVQ || p.As == AVMOVD {
-			// out of range -0x80000000 ~ 0xffffffff or VMOVQ or VMOVD operand, must store 64-bit.
+		if lit != int64(int32(lit)) && uint64(lit) != uint64(uint32(lit)) {
+			// out of range -0x80000000 ~ 0xffffffff, must store 64-bit.
 			t.As = ADWORD
 			sz = 8
 		} // else store 32-bit
@@ -1386,6 +1392,32 @@
 	return (x + to - 1) &^ (to - 1)
 }
 
+// splitImm24uScaled splits an immediate into a scaled 12 bit unsigned lo value
+// and an unscaled shifted 12 bit unsigned hi value. These are typically used
+// by adding or subtracting the hi value and using the lo value as the offset
+// for a load or store.
+func splitImm24uScaled(v int32, shift int) (int32, int32, error) {
+	if v < 0 {
+		return 0, 0, fmt.Errorf("%d is not a 24 bit unsigned immediate", v)
+	}
+	if v > 0xfff000+0xfff<<shift {
+		return 0, 0, fmt.Errorf("%d is too large for a scaled 24 bit unsigned immediate", v)
+	}
+	if v&((1<<shift)-1) != 0 {
+		return 0, 0, fmt.Errorf("%d is not a multiple of %d", v, 1<<shift)
+	}
+	lo := (v >> shift) & 0xfff
+	hi := v - (lo << shift)
+	if hi > 0xfff000 {
+		hi = 0xfff000
+		lo = (v - hi) >> shift
+	}
+	if hi & ^0xfff000 != 0 {
+		panic(fmt.Sprintf("bad split for %x with shift %v (%x, %x)", v, shift, hi, lo))
+	}
+	return hi, lo, nil
+}
+
 func (c *ctxt7) regoff(a *obj.Addr) int32 {
 	c.instoffset = 0
 	c.aclass(a)
@@ -1459,6 +1491,22 @@
 	return false
 }
 
+func isLoadStorePairOp(op obj.As) bool {
+	switch op {
+	case AFLDPQ, AFSTPQ, ALDP, ASTP, ALDPW, ASTPW:
+		return true
+	}
+	return false
+}
+
+func isMOVop(op obj.As) bool {
+	switch op {
+	case AMOVB, AMOVBU, AMOVH, AMOVHU, AMOVW, AMOVWU, AMOVD, AFMOVS, AFMOVD, AFMOVQ:
+		return true
+	}
+	return false
+}
+
 func isRegShiftOrExt(a *obj.Addr) bool {
 	return (a.Index-obj.RBaseARM64)&REG_EXT != 0 || (a.Index-obj.RBaseARM64)&REG_LSL != 0
 }
@@ -1895,6 +1943,94 @@
 	}
 }
 
+// loadStoreClass reclassifies a load or store operation based on its offset.
+func (c *ctxt7) loadStoreClass(p *obj.Prog, lsc int, v int64) int {
+	// Avoid reclassification of pre/post-indexed loads and stores.
+	if p.Scond == C_XPRE || p.Scond == C_XPOST {
+		return lsc
+	}
+	if cmp(C_NSAUTO, lsc) || cmp(C_NSOREG, lsc) {
+		return lsc
+	}
+
+	needsPool := true
+	if v >= -4095 && v <= 4095 {
+		needsPool = false
+	}
+
+	switch p.As {
+	case AMOVB, AMOVBU:
+		if cmp(C_UAUTO4K, lsc) || cmp(C_UOREG4K, lsc) {
+			return lsc
+		}
+		if v >= 0 && v <= 0xffffff {
+			needsPool = false
+		}
+	case AMOVH, AMOVHU:
+		if cmp(C_UAUTO8K, lsc) || cmp(C_UOREG8K, lsc) {
+			return lsc
+		}
+		if v >= 0 && v <= 0xfff000+0xfff<<1 && v&1 == 0 {
+			needsPool = false
+		}
+	case AMOVW, AMOVWU, AFMOVS:
+		if cmp(C_UAUTO16K, lsc) || cmp(C_UOREG16K, lsc) {
+			return lsc
+		}
+		if v >= 0 && v <= 0xfff000+0xfff<<2 && v&3 == 0 {
+			needsPool = false
+		}
+	case AMOVD, AFMOVD:
+		if cmp(C_UAUTO32K, lsc) || cmp(C_UOREG32K, lsc) {
+			return lsc
+		}
+		if v >= 0 && v <= 0xfff000+0xfff<<3 && v&7 == 0 {
+			needsPool = false
+		}
+	case AFMOVQ:
+		if cmp(C_UAUTO64K, lsc) || cmp(C_UOREG64K, lsc) {
+			return lsc
+		}
+		if v >= 0 && v <= 0xfff000+0xfff<<4 && v&15 == 0 {
+			needsPool = false
+		}
+	}
+	if needsPool && cmp(C_LAUTO, lsc) {
+		return C_LAUTOPOOL
+	}
+	if needsPool && cmp(C_LOREG, lsc) {
+		return C_LOREGPOOL
+	}
+	return lsc
+}
+
+// loadStorePairClass reclassifies a load or store pair operation based on its offset.
+func (c *ctxt7) loadStorePairClass(p *obj.Prog, lsc int, v int64) int {
+	// Avoid reclassification of pre/post-indexed loads and stores.
+	if p.Scond == C_XPRE || p.Scond == C_XPOST {
+		return lsc
+	}
+
+	if cmp(C_NAUTO4K, lsc) || cmp(C_NOREG4K, lsc) {
+		return lsc
+	}
+	if cmp(C_UAUTO4K, lsc) || cmp(C_UOREG4K, lsc) {
+		return lsc
+	}
+
+	needsPool := true
+	if v >= 0 && v <= 0xffffff {
+		needsPool = false
+	}
+	if needsPool && cmp(C_LAUTO, lsc) {
+		return C_LAUTOPOOL
+	}
+	if needsPool && cmp(C_LOREG, lsc) {
+		return C_LOREGPOOL
+	}
+	return lsc
+}
+
 func (c *ctxt7) aclass(a *obj.Addr) int {
 	switch a.Type {
 	case obj.TYPE_NONE:
@@ -2091,10 +2227,6 @@
 	return C_GOK
 }
 
-func oclass(a *obj.Addr) int {
-	return int(a.Class) - 1
-}
-
 func (c *ctxt7) oplook(p *obj.Prog) *Optab {
 	a1 := int(p.Optab)
 	if a1 != 0 {
@@ -2102,58 +2234,82 @@
 	}
 	a1 = int(p.From.Class)
 	if a1 == 0 {
-		a0 := c.aclass(&p.From)
+		a1 = c.aclass(&p.From)
 		// do not break C_ADDCON2 when S bit is set
-		if (p.As == AADDS || p.As == AADDSW || p.As == ASUBS || p.As == ASUBSW) && a0 == C_ADDCON2 {
-			a0 = C_LCON
+		if (p.As == AADDS || p.As == AADDSW || p.As == ASUBS || p.As == ASUBSW) && a1 == C_ADDCON2 {
+			a1 = C_LCON
 		}
-		a1 = a0 + 1
-		p.From.Class = int8(a1)
 		if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE {
 			if p.As == AMOVW || isADDWop(p.As) || isANDWop(p.As) {
 				// For 32-bit instruction with constant, we need to
 				// treat its offset value as 32 bits to classify it.
-				ra0 := c.con32class(&p.From)
+				a1 = c.con32class(&p.From)
 				// do not break C_ADDCON2 when S bit is set
-				if (p.As == AADDSW || p.As == ASUBSW) && ra0 == C_ADDCON2 {
-					ra0 = C_LCON
+				if (p.As == AADDSW || p.As == ASUBSW) && a1 == C_ADDCON2 {
+					a1 = C_LCON
 				}
-				a1 = ra0 + 1
-				p.From.Class = int8(a1)
 			}
-			if ((p.As == AMOVD) || isANDop(p.As) || isADDop(p.As)) && (a0 == C_LCON || a0 == C_VCON) {
+			if ((p.As == AMOVD) || isANDop(p.As) || isADDop(p.As)) && (a1 == C_LCON || a1 == C_VCON) {
 				// more specific classification of 64-bit integers
-				a1 = c.con64class(&p.From) + 1
-				p.From.Class = int8(a1)
+				a1 = c.con64class(&p.From)
 			}
 		}
-	}
-
-	a1--
-	a3 := C_NONE + 1
-	if p.GetFrom3() != nil && p.RestArgs[0].Pos == 0 {
-		a3 = int(p.GetFrom3().Class)
-		if a3 == 0 {
-			a3 = c.aclass(p.GetFrom3()) + 1
-			p.GetFrom3().Class = int8(a3)
+		if p.From.Type == obj.TYPE_MEM {
+			if isMOVop(p.As) && (cmp(C_LAUTO, a1) || cmp(C_LOREG, a1)) {
+				// More specific classification of large offset loads and stores.
+				a1 = c.loadStoreClass(p, a1, c.instoffset)
+			}
+			if isLoadStorePairOp(p.As) && (cmp(C_LAUTO, a1) || cmp(C_LOREG, a1)) {
+				// More specific classification of large offset loads and stores.
+				a1 = c.loadStorePairClass(p, a1, c.instoffset)
+			}
 		}
+		p.From.Class = int8(a1)
 	}
 
-	a3--
-	a4 := int(p.To.Class)
-	if a4 == 0 {
-		a4 = c.aclass(&p.To) + 1
-		p.To.Class = int8(a4)
-	}
-
-	a4--
 	a2 := C_NONE
 	if p.Reg != 0 {
 		a2 = rclass(p.Reg)
 	}
 
+	a3 := C_NONE
+	if p.GetFrom3() != nil {
+		a3 = int(p.GetFrom3().Class)
+		if a3 == 0 {
+			a3 = c.aclass(p.GetFrom3())
+			p.GetFrom3().Class = int8(a3)
+		}
+	}
+
+	a4 := int(p.To.Class)
+	if a4 == 0 {
+		a4 = c.aclass(&p.To)
+		if p.To.Type == obj.TYPE_MEM {
+			if isMOVop(p.As) && (cmp(C_LAUTO, a4) || cmp(C_LOREG, a4)) {
+				// More specific classification of large offset loads and stores.
+				a4 = c.loadStoreClass(p, a4, c.instoffset)
+			}
+			if isLoadStorePairOp(p.As) && (cmp(C_LAUTO, a4) || cmp(C_LOREG, a4)) {
+				// More specific classification of large offset loads and stores.
+				a4 = c.loadStorePairClass(p, a4, c.instoffset)
+			}
+		}
+		p.To.Class = int8(a4)
+	}
+
+	a5 := C_NONE
+	if p.RegTo2 != 0 {
+		a5 = rclass(p.RegTo2)
+	} else if p.GetTo2() != nil {
+		a5 = int(p.GetTo2().Class)
+		if a5 == 0 {
+			a5 = c.aclass(p.GetTo2())
+			p.GetTo2().Class = int8(a5)
+		}
+	}
+
 	if false {
-		fmt.Printf("oplook %v %d %d %d %d\n", p.As, a1, a2, a3, a4)
+		fmt.Printf("oplook %v %d %d %d %d %d\n", p.As, a1, a2, a3, a4, a5)
 		fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
 	}
 
@@ -2162,18 +2318,18 @@
 	c2 := &xcmp[a2]
 	c3 := &xcmp[a3]
 	c4 := &xcmp[a4]
-	c5 := &xcmp[p.Scond>>5]
+	c5 := &xcmp[a5]
 	for i := range ops {
 		op := &ops[i]
-		if (int(op.a2) == a2 || c2[op.a2]) && c5[op.scond>>5] && c1[op.a1] && c3[op.a3] && c4[op.a4] {
+		if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && p.Scond == op.scond {
 			p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
 			return op
 		}
 	}
 
-	c.ctxt.Diag("illegal combination: %v %v %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), p.From.Type, p.To.Type)
+	c.ctxt.Diag("illegal combination: %v %v %v %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), p.From.Type, p.To.Type)
 	// Turn illegal instruction into an UNDEF, avoid crashing in asmout
-	return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0}
+	return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0}
 }
 
 func cmp(a int, b int) bool {
@@ -2596,29 +2752,27 @@
 		return
 	}
 
-	var n int
 	for i := 0; i < C_GOK; i++ {
-		for n = 0; n < C_GOK; n++ {
-			if cmp(n, i) {
-				xcmp[i][n] = true
+		for j := 0; j < C_GOK; j++ {
+			if cmp(j, i) {
+				xcmp[i][j] = true
 			}
 		}
 	}
-	for n = 0; optab[n].as != obj.AXXX; n++ {
-	}
-	sort.Sort(ocmp(optab[:n]))
-	for i := 0; i < n; i++ {
-		r := optab[i].as
-		start := i
-		for optab[i].as == r {
-			i++
+
+	sort.Sort(ocmp(optab))
+	for i := 0; i < len(optab); i++ {
+		as, start := optab[i].as, i
+		for ; i < len(optab)-1; i++ {
+			if optab[i+1].as != as {
+				break
+			}
 		}
-		t := optab[start:i]
-		i--
-		oprangeset(r, t)
-		switch r {
+		t := optab[start : i+1]
+		oprangeset(as, t)
+		switch as {
 		default:
-			ctxt.Diag("unknown op in build: %v", r)
+			ctxt.Diag("unknown op in build: %v", as)
 			ctxt.DiagFlush()
 			log.Fatalf("bad code")
 
@@ -3541,7 +3695,7 @@
 		}
 		o := uint32(0)
 		num := uint8(0)
-		cls := oclass(&p.From)
+		cls := int(p.From.Class)
 		if isADDWop(p.As) {
 			if !cmp(C_LCON, cls) {
 				c.ctxt.Diag("illegal combination: %v", p)
@@ -3553,26 +3707,24 @@
 		if num == 0 {
 			c.ctxt.Diag("invalid constant: %v", p)
 		}
-		rt := int(p.To.Reg)
+
+		rt, r, rf := p.To.Reg, p.Reg, int16(REGTMP)
 		if p.To.Type == obj.TYPE_NONE {
 			rt = REGZERO
 		}
-		r := int(p.Reg)
 		if r == obj.REG_NONE {
 			r = rt
 		}
-		if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) {
-			o = c.opxrrr(p, p.As, false)
-			o |= REGTMP & 31 << 16
+		if p.To.Type != obj.TYPE_NONE && (rt == REGSP || r == REGSP) {
+			o = c.opxrrr(p, p.As, rt, r, rf, false)
 			o |= LSL0_64
 		} else {
 			o = c.oprrr(p, p.As)
-			o |= REGTMP & 31 << 16 /* shift is 0 */
+			o |= uint32(rf&31) << 16 /* shift is 0 */
+			o |= uint32(r&31) << 5
+			o |= uint32(rt & 31)
 		}
 
-		o |= uint32(r&31) << 5
-		o |= uint32(rt & 31)
-
 		os[num] = o
 		o1 = os[0]
 		o2 = os[1]
@@ -3816,27 +3968,24 @@
 		if p.To.Reg == REG_RSP && isADDSop(p.As) {
 			c.ctxt.Diag("illegal destination register: %v\n", p)
 		}
+		rt, r, rf := p.To.Reg, p.Reg, p.From.Reg
+		if p.To.Type == obj.TYPE_NONE {
+			rt = REGZERO
+		}
+		if r == obj.REG_NONE {
+			r = rt
+		}
 		if (p.From.Reg-obj.RBaseARM64)&REG_EXT != 0 ||
 			(p.From.Reg >= REG_LSL && p.From.Reg < REG_ARNG) {
 			amount := (p.From.Reg >> 5) & 7
 			if amount > 4 {
 				c.ctxt.Diag("shift amount out of range 0 to 4: %v", p)
 			}
-			o1 = c.opxrrr(p, p.As, true)
+			o1 = c.opxrrr(p, p.As, rt, r, obj.REG_NONE, true)
 			o1 |= c.encRegShiftOrExt(p, &p.From, p.From.Reg) /* includes reg, op, etc */
 		} else {
-			o1 = c.opxrrr(p, p.As, false)
-			o1 |= uint32(p.From.Reg&31) << 16
+			o1 = c.opxrrr(p, p.As, rt, r, rf, false)
 		}
-		rt := int(p.To.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		r := int(p.Reg)
-		if r == obj.REG_NONE {
-			r = rt
-		}
-		o1 |= (uint32(r&31) << 5) | uint32(rt&31)
 
 	case 28: /* logop $vcon, [R], R (64 bit literal) */
 		if p.Reg == REGTMP {
@@ -3844,7 +3993,7 @@
 		}
 		o := uint32(0)
 		num := uint8(0)
-		cls := oclass(&p.From)
+		cls := int(p.From.Class)
 		if isANDWop(p.As) {
 			if !cmp(C_LCON, cls) {
 				c.ctxt.Diag("illegal combination: %v", p)
@@ -3895,10 +4044,13 @@
 		o1 |= uint32(p.From.Reg&31)<<5 | uint32(p.To.Reg&31)
 
 	case 30: /* movT R,L(R) -> strT */
-		// if offset L can be split into hi+lo, and both fit into instructions, do
+		// If offset L fits in a 12 bit unsigned immediate:
+		//	add $L, R, Rtmp  or  sub $L, R, Rtmp
+		//	str R, (Rtmp)
+		// Otherwise, if offset L can be split into hi+lo, and both fit into instructions:
 		//	add $hi, R, Rtmp
 		//	str R, lo(Rtmp)
-		// otherwise, use constant pool
+		// Otherwise, use constant pool:
 		//	mov $L, Rtmp (from constant pool)
 		//	str R, (R+Rtmp)
 		s := movesize(o.as)
@@ -3912,26 +4064,35 @@
 		}
 
 		v := c.regoff(&p.To)
-		var hi int32
-		if v < 0 || (v&((1<<uint(s))-1)) != 0 {
-			// negative or unaligned offset, use constant pool
-			goto storeusepool
+		if v >= -256 && v <= 256 {
+			c.ctxt.Diag("%v: bad type for offset %d (should be 9 bit signed immediate store)", p, v)
+		}
+		if v >= 0 && v <= 4095 && v&((1<<int32(s))-1) == 0 {
+			c.ctxt.Diag("%v: bad type for offset %d (should be 12 bit unsigned immediate store)", p, v)
 		}
 
-		hi = v - (v & (0xFFF << uint(s)))
-		if hi&0xFFF != 0 {
-			c.ctxt.Diag("internal: miscalculated offset %d [%d]\n%v", v, s, p)
-		}
-		if hi&^0xFFF000 != 0 {
-			// hi doesn't fit into an ADD instruction
-			goto storeusepool
+		// Handle smaller unaligned and negative offsets via addition or subtraction.
+		if v >= -4095 && v <= 4095 {
+			o1 = c.oaddi12(p, v, REGTMP, int16(r))
+			o2 = c.olsr12u(p, c.opstr(p, p.As), 0, REGTMP, p.From.Reg)
+			break
 		}
 
+		hi, lo, err := splitImm24uScaled(v, s)
+		if err != nil {
+			goto storeusepool
+		}
+		if p.Pool != nil {
+			c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v)
+		}
 		o1 = c.oaddi(p, AADD, hi, REGTMP, r)
-		o2 = c.olsr12u(p, c.opstr(p, p.As), ((v-hi)>>uint(s))&0xFFF, REGTMP, p.From.Reg)
+		o2 = c.olsr12u(p, c.opstr(p, p.As), lo, REGTMP, p.From.Reg)
 		break
 
 	storeusepool:
+		if p.Pool == nil {
+			c.ctxt.Diag("%v: constant is not in pool", p)
+		}
 		if r == REGTMP || p.From.Reg == REGTMP {
 			c.ctxt.Diag("REGTMP used in large offset store: %v", p)
 		}
@@ -3939,10 +4100,13 @@
 		o2 = c.olsxrr(p, int32(c.opstrr(p, p.As, false)), int(p.From.Reg), int(r), REGTMP)
 
 	case 31: /* movT L(R), R -> ldrT */
-		// if offset L can be split into hi+lo, and both fit into instructions, do
+		// If offset L fits in a 12 bit unsigned immediate:
+		//	add $L, R, Rtmp  or  sub $L, R, Rtmp
+		//	ldr R, (Rtmp)
+		// Otherwise, if offset L can be split into hi+lo, and both fit into instructions:
 		//	add $hi, R, Rtmp
 		//	ldr lo(Rtmp), R
-		// otherwise, use constant pool
+		// Otherwise, use constant pool:
 		//	mov $L, Rtmp (from constant pool)
 		//	ldr (R+Rtmp), R
 		s := movesize(o.as)
@@ -3956,26 +4120,35 @@
 		}
 
 		v := c.regoff(&p.From)
-		var hi int32
-		if v < 0 || (v&((1<<uint(s))-1)) != 0 {
-			// negative or unaligned offset, use constant pool
-			goto loadusepool
+		if v >= -256 && v <= 256 {
+			c.ctxt.Diag("%v: bad type for offset %d (should be 9 bit signed immediate load)", p, v)
+		}
+		if v >= 0 && v <= 4095 && v&((1<<int32(s))-1) == 0 {
+			c.ctxt.Diag("%v: bad type for offset %d (should be 12 bit unsigned immediate load)", p, v)
 		}
 
-		hi = v - (v & (0xFFF << uint(s)))
-		if (hi & 0xFFF) != 0 {
-			c.ctxt.Diag("internal: miscalculated offset %d [%d]\n%v", v, s, p)
-		}
-		if hi&^0xFFF000 != 0 {
-			// hi doesn't fit into an ADD instruction
-			goto loadusepool
+		// Handle smaller unaligned and negative offsets via addition or subtraction.
+		if v >= -4095 && v <= 4095 {
+			o1 = c.oaddi12(p, v, REGTMP, int16(r))
+			o2 = c.olsr12u(p, c.opldr(p, p.As), 0, REGTMP, p.To.Reg)
+			break
 		}
 
+		hi, lo, err := splitImm24uScaled(v, s)
+		if err != nil {
+			goto loadusepool
+		}
+		if p.Pool != nil {
+			c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v)
+		}
 		o1 = c.oaddi(p, AADD, hi, REGTMP, r)
-		o2 = c.olsr12u(p, c.opldr(p, p.As), ((v-hi)>>uint(s))&0xFFF, REGTMP, p.To.Reg)
+		o2 = c.olsr12u(p, c.opldr(p, p.As), lo, REGTMP, p.To.Reg)
 		break
 
 	loadusepool:
+		if p.Pool == nil {
+			c.ctxt.Diag("%v: constant is not in pool", p)
+		}
 		if r == REGTMP || p.From.Reg == REGTMP {
 			c.ctxt.Diag("REGTMP used in large offset load: %v", p)
 		}
@@ -4008,15 +4181,12 @@
 
 	case 34: /* mov $lacon,R */
 		o1 = c.omovlit(AMOVD, p, &p.From, REGTMP)
-		o2 = c.opxrrr(p, AADD, false)
-		o2 |= REGTMP & 31 << 16
-		o2 |= LSL0_64
-		r := int(p.From.Reg)
+		rt, r, rf := p.To.Reg, p.From.Reg, int16(REGTMP)
 		if r == obj.REG_NONE {
-			r = int(o.param)
+			r = o.param
 		}
-		o2 |= uint32(r&31) << 5
-		o2 |= uint32(p.To.Reg & 31)
+		o2 = c.opxrrr(p, AADD, rt, r, rf, false)
+		o2 |= LSL0_64
 
 	case 35: /* mov SPR,R -> mrs */
 		o1 = c.oprrr(p, AMRS)
@@ -4453,24 +4623,22 @@
 			o1 = c.omovconst(AMOVD, p, &p.From, REGTMP)
 		}
 
-		rt := int(p.To.Reg)
+		rt, r, rf := p.To.Reg, p.Reg, int16(REGTMP)
 		if p.To.Type == obj.TYPE_NONE {
 			rt = REGZERO
 		}
-		r := int(p.Reg)
 		if r == obj.REG_NONE {
 			r = rt
 		}
-		if p.To.Reg == REGSP || r == REGSP {
-			o2 = c.opxrrr(p, p.As, false)
-			o2 |= REGTMP & 31 << 16
+		if rt == REGSP || r == REGSP {
+			o2 = c.opxrrr(p, p.As, rt, r, rf, false)
 			o2 |= uint32(lsl0)
 		} else {
 			o2 = c.oprrr(p, p.As)
-			o2 |= REGTMP & 31 << 16 /* shift is 0 */
+			o2 |= uint32(rf&31) << 16 /* shift is 0 */
+			o2 |= uint32(r&31) << 5
+			o2 |= uint32(rt & 31)
 		}
-		o2 |= uint32(r&31) << 5
-		o2 |= uint32(rt & 31)
 
 	case 63: /* op Vm.<t>, Vn.<T>, Vd.<T> */
 		o1 |= c.oprrr(p, p.As)
@@ -4718,24 +4886,51 @@
 		o2 = c.opldpstp(p, o, 0, REGTMP, rt1, rt2, 1)
 
 	case 75:
+		// If offset L fits in a 24 bit unsigned immediate:
+		//	add $lo, R, Rtmp
+		//	add $hi, Rtmp, Rtmp
+		//	ldr (Rtmp), R
+		// Otherwise, use constant pool:
 		//	mov $L, Rtmp (from constant pool)
 		//	add Rtmp, R, Rtmp
 		//	ldp (Rtmp), (R1, R2)
-		rf, rt1, rt2 := int(p.From.Reg), p.To.Reg, int16(p.To.Offset)
+		rf, rt1, rt2 := p.From.Reg, p.To.Reg, int16(p.To.Offset)
 		if rf == REGTMP {
 			c.ctxt.Diag("REGTMP used in large offset load: %v", p)
 		}
 		if rf == obj.REG_NONE {
-			rf = int(o.param)
+			rf = o.param
 		}
 		if rf == obj.REG_NONE {
 			c.ctxt.Diag("invalid ldp source: %v", p)
 		}
+
+		v := c.regoff(&p.From)
+		if v >= -4095 && v <= 4095 {
+			c.ctxt.Diag("%v: bad type for offset %d (should be add/sub+ldp)", p, v)
+		}
+
+		hi, lo, err := splitImm24uScaled(v, 0)
+		if err != nil {
+			goto loadpairusepool
+		}
+		if p.Pool != nil {
+			c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v)
+		}
+		o1 = c.oaddi(p, AADD, lo, REGTMP, int16(rf))
+		o2 = c.oaddi(p, AADD, hi, REGTMP, REGTMP)
+		o3 = c.opldpstp(p, o, 0, REGTMP, rt1, rt2, 1)
+		break
+
+	loadpairusepool:
+		if p.Pool == nil {
+			c.ctxt.Diag("%v: constant is not in pool", p)
+		}
+		if rf == REGTMP || p.From.Reg == REGTMP {
+			c.ctxt.Diag("REGTMP used in large offset load: %v", p)
+		}
 		o1 = c.omovlit(AMOVD, p, &p.From, REGTMP)
-		o2 = c.opxrrr(p, AADD, false)
-		o2 |= (REGTMP & 31) << 16
-		o2 |= uint32(rf&31) << 5
-		o2 |= uint32(REGTMP & 31)
+		o2 = c.opxrrr(p, AADD, REGTMP, rf, REGTMP, false)
 		o3 = c.opldpstp(p, o, 0, REGTMP, rt1, rt2, 1)
 
 	case 76:
@@ -4756,24 +4951,51 @@
 		o2 = c.opldpstp(p, o, 0, REGTMP, rf1, rf2, 0)
 
 	case 77:
+		// If offset L fits in a 24 bit unsigned immediate:
+		//	add $lo, R, Rtmp
+		//	add $hi, Rtmp, Rtmp
+		//	stp (R1, R2), (Rtmp)
+		// Otherwise, use constant pool:
 		//	mov $L, Rtmp (from constant pool)
 		//	add Rtmp, R, Rtmp
 		//	stp (R1, R2), (Rtmp)
-		rt, rf1, rf2 := int(p.To.Reg), p.From.Reg, int16(p.From.Offset)
+		rt, rf1, rf2 := p.To.Reg, p.From.Reg, int16(p.From.Offset)
 		if rt == REGTMP || rf1 == REGTMP || rf2 == REGTMP {
 			c.ctxt.Diag("REGTMP used in large offset store: %v", p)
 		}
 		if rt == obj.REG_NONE {
-			rt = int(o.param)
+			rt = o.param
 		}
 		if rt == obj.REG_NONE {
 			c.ctxt.Diag("invalid stp destination: %v", p)
 		}
+
+		v := c.regoff(&p.To)
+		if v >= -4095 && v <= 4095 {
+			c.ctxt.Diag("%v: bad type for offset %d (should be add/sub+stp)", p, v)
+		}
+
+		hi, lo, err := splitImm24uScaled(v, 0)
+		if err != nil {
+			goto storepairusepool
+		}
+		if p.Pool != nil {
+			c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v)
+		}
+		o1 = c.oaddi(p, AADD, lo, REGTMP, int16(rt))
+		o2 = c.oaddi(p, AADD, hi, REGTMP, REGTMP)
+		o3 = c.opldpstp(p, o, 0, REGTMP, rf1, rf2, 0)
+		break
+
+	storepairusepool:
+		if p.Pool == nil {
+			c.ctxt.Diag("%v: constant is not in pool", p)
+		}
+		if rt == REGTMP || p.From.Reg == REGTMP {
+			c.ctxt.Diag("REGTMP used in large offset store: %v", p)
+		}
 		o1 = c.omovlit(AMOVD, p, &p.To, REGTMP)
-		o2 = c.opxrrr(p, AADD, false)
-		o2 |= REGTMP & 31 << 16
-		o2 |= uint32(rt&31) << 5
-		o2 |= uint32(REGTMP & 31)
+		o2 = c.opxrrr(p, AADD, REGTMP, rt, REGTMP, false)
 		o3 = c.opldpstp(p, o, 0, REGTMP, rf1, rf2, 0)
 
 	case 78: /* vmov R, V.<T>[index] */
@@ -5521,9 +5743,6 @@
 		o1 = q<<30 | 0xe<<24 | len<<13 | op<<12
 		o1 |= (uint32(rf&31) << 16) | uint32(offset&31)<<5 | uint32(rt&31)
 
-	case 101: // VMOVQ $vcon1, $vcon2, Vd or VMOVD|VMOVS $vcon, Vd -> FMOVQ/FMOVD/FMOVS pool(PC), Vd: load from constant pool.
-		o1 = c.omovlit(p.As, p, &p.From, int(p.To.Reg))
-
 	case 102: /* vushll, vushll2, vuxtl, vuxtl2 */
 		o1 = c.opirr(p, p.As)
 		rf := p.Reg
@@ -6640,7 +6859,7 @@
 /*
  * add/subtract sign or zero-extended register
  */
-func (c *ctxt7) opxrrr(p *obj.Prog, a obj.As, extend bool) uint32 {
+func (c *ctxt7) opxrrr(p *obj.Prog, a obj.As, rd, rn, rm int16, extend bool) uint32 {
 	extension := uint32(0)
 	if !extend {
 		if isADDop(a) {
@@ -6651,34 +6870,41 @@
 		}
 	}
 
+	var op uint32
+
 	switch a {
 	case AADD:
-		return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case AADDW:
-		return S32 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S32 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case ACMN, AADDS:
-		return S64 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S64 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case ACMNW, AADDSW:
-		return S32 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S32 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case ASUB:
-		return S64 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S64 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case ASUBW:
-		return S32 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S32 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case ACMP, ASUBS:
-		return S64 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S64 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
 
 	case ACMPW, ASUBSW:
-		return S32 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+		op = S32 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension
+
+	default:
+		c.ctxt.Diag("bad opxrrr %v\n%v", a, p)
+		return 0
 	}
 
-	c.ctxt.Diag("bad opxrrr %v\n%v", a, p)
-	return 0
+	op |= uint32(rm&0x1f)<<16 | uint32(rn&0x1f)<<5 | uint32(rd&0x1f)
+
+	return op
 }
 
 func (c *ctxt7) opimm(p *obj.Prog, a obj.As) uint32 {
@@ -7041,13 +7267,13 @@
 	case AMOVBU:
 		return LDSTR(0, 0, 1)
 
-	case AFMOVS:
+	case AFMOVS, AVMOVS:
 		return LDSTR(2, 1, 1)
 
-	case AFMOVD:
+	case AFMOVD, AVMOVD:
 		return LDSTR(3, 1, 1)
 
-	case AFMOVQ:
+	case AFMOVQ, AVMOVQ:
 		return LDSTR(0, 1, 3)
 	}
 
@@ -7217,7 +7443,7 @@
 
 // load a constant (MOVCON or BITCON) in a into rt
 func (c *ctxt7) omovconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint32) {
-	if cls := oclass(a); (cls == C_BITCON || cls == C_ABCON || cls == C_ABCON0) && rt != REGZERO {
+	if cls := int(a.Class); (cls == C_BITCON || cls == C_ABCON || cls == C_ABCON0) && rt != REGZERO {
 		// or $bitcon, REGZERO, rt. rt can't be ZR.
 		mode := 64
 		var as1 obj.As
@@ -7515,7 +7741,7 @@
 			c.ctxt.Diag("invalid register pair %v\n", p)
 		}
 	case ALDP, ALDPW, ALDPSW:
-		if rl < REG_R0 || REG_R30 < rl || rh < REG_R0 || REG_R30 < rh {
+		if rl < REG_R0 || REG_R31 < rl || rh < REG_R0 || REG_R31 < rh {
 			c.ctxt.Diag("invalid register pair %v\n", p)
 		}
 	case ASTP, ASTPW:
@@ -7601,6 +7827,11 @@
 	case REG_UXTW <= r && r < REG_UXTX:
 		if a.Type == obj.TYPE_MEM {
 			if num == 0 {
+				// According to the arm64 specification, for instructions MOVB, MOVBU and FMOVB,
+				// the extension amount must be 0, encoded in "S" as 0 if omitted, or as 1 if present.
+				// But in Go, we don't distinguish between Rn.UXTW and Rn.UXTW<<0, so we encode it as
+				// that does not present. This makes no difference to the function of the instruction.
+				// This is also true for extensions LSL, SXTW and SXTX.
 				return roff(rm, 2, 2)
 			} else {
 				return roff(rm, 2, 6)
@@ -7636,7 +7867,11 @@
 		}
 	case REG_LSL <= r && r < REG_ARNG:
 		if a.Type == obj.TYPE_MEM { // (R1)(R2<<1)
-			return roff(rm, 3, 6)
+			if num == 0 {
+				return roff(rm, 3, 2)
+			} else {
+				return roff(rm, 3, 6)
+			}
 		} else if isADDWop(p.As) {
 			return roff(rm, 2, num)
 		}
diff --git a/src/cmd/internal/obj/arm64/asm_arm64_test.go b/src/cmd/internal/obj/arm64/asm_arm64_test.go
index c52717d..0680394 100644
--- a/src/cmd/internal/obj/arm64/asm_arm64_test.go
+++ b/src/cmd/internal/obj/arm64/asm_arm64_test.go
@@ -14,6 +14,158 @@
 	"testing"
 )
 
+func TestSplitImm24uScaled(t *testing.T) {
+	tests := []struct {
+		v       int32
+		shift   int
+		wantErr bool
+		wantHi  int32
+		wantLo  int32
+	}{
+		{
+			v:      0,
+			shift:  0,
+			wantHi: 0,
+			wantLo: 0,
+		},
+		{
+			v:      0x1001,
+			shift:  0,
+			wantHi: 0x1000,
+			wantLo: 0x1,
+		},
+		{
+			v:      0xffffff,
+			shift:  0,
+			wantHi: 0xfff000,
+			wantLo: 0xfff,
+		},
+		{
+			v:       0xffffff,
+			shift:   1,
+			wantErr: true,
+		},
+		{
+			v:      0xfe,
+			shift:  1,
+			wantHi: 0x0,
+			wantLo: 0x7f,
+		},
+		{
+			v:      0x10fe,
+			shift:  1,
+			wantHi: 0x0,
+			wantLo: 0x87f,
+		},
+		{
+			v:      0x2002,
+			shift:  1,
+			wantHi: 0x2000,
+			wantLo: 0x1,
+		},
+		{
+			v:      0xfffffe,
+			shift:  1,
+			wantHi: 0xffe000,
+			wantLo: 0xfff,
+		},
+		{
+			v:      0x1000ffe,
+			shift:  1,
+			wantHi: 0xfff000,
+			wantLo: 0xfff,
+		},
+		{
+			v:       0x1001000,
+			shift:   1,
+			wantErr: true,
+		},
+		{
+			v:       0xfffffe,
+			shift:   2,
+			wantErr: true,
+		},
+		{
+			v:      0x4004,
+			shift:  2,
+			wantHi: 0x4000,
+			wantLo: 0x1,
+		},
+		{
+			v:      0xfffffc,
+			shift:  2,
+			wantHi: 0xffc000,
+			wantLo: 0xfff,
+		},
+		{
+			v:      0x1002ffc,
+			shift:  2,
+			wantHi: 0xfff000,
+			wantLo: 0xfff,
+		},
+		{
+			v:       0x1003000,
+			shift:   2,
+			wantErr: true,
+		},
+		{
+			v:       0xfffffe,
+			shift:   3,
+			wantErr: true,
+		},
+		{
+			v:      0x8008,
+			shift:  3,
+			wantHi: 0x8000,
+			wantLo: 0x1,
+		},
+		{
+			v:      0xfffff8,
+			shift:  3,
+			wantHi: 0xff8000,
+			wantLo: 0xfff,
+		},
+		{
+			v:      0x1006ff8,
+			shift:  3,
+			wantHi: 0xfff000,
+			wantLo: 0xfff,
+		},
+		{
+			v:       0x1007000,
+			shift:   3,
+			wantErr: true,
+		},
+	}
+	for _, test := range tests {
+		hi, lo, err := splitImm24uScaled(test.v, test.shift)
+		switch {
+		case err == nil && test.wantErr:
+			t.Errorf("splitImm24uScaled(%v, %v) succeeded, want error", test.v, test.shift)
+		case err != nil && !test.wantErr:
+			t.Errorf("splitImm24uScaled(%v, %v) failed: %v", test.v, test.shift, err)
+		case !test.wantErr:
+			if got, want := hi, test.wantHi; got != want {
+				t.Errorf("splitImm24uScaled(%x, %x) - got hi %x, want %x", test.v, test.shift, got, want)
+			}
+			if got, want := lo, test.wantLo; got != want {
+				t.Errorf("splitImm24uScaled(%x, %x) - got lo %x, want %x", test.v, test.shift, got, want)
+			}
+		}
+	}
+	for shift := 0; shift <= 3; shift++ {
+		for v := int32(0); v < 0xfff000+0xfff<<shift; v = v + 1<<shift {
+			hi, lo, err := splitImm24uScaled(v, shift)
+			if err != nil {
+				t.Fatalf("splitImm24uScaled(%x, %x) failed: %v", v, shift, err)
+			}
+			if hi+lo<<shift != v {
+				t.Fatalf("splitImm24uScaled(%x, %x) = (%x, %x) is incorrect", v, shift, hi, lo)
+			}
+		}
+	}
+}
+
 // TestLarge generates a very large file to verify that large
 // program builds successfully, in particular, too-far
 // conditional branches are fixed, and also verify that the
@@ -149,13 +301,25 @@
 	}
 }
 
+func testvmovs() (r1, r2 uint64)
+func testvmovd() (r1, r2 uint64)
 func testvmovq() (r1, r2 uint64)
 
-// TestVMOVQ checks if the arm64 VMOVQ instruction is working properly.
-func TestVMOVQ(t *testing.T) {
-	a, b := testvmovq()
-	if a != 0x7040201008040201 || b != 0x3040201008040201 {
-		t.Errorf("TestVMOVQ got: a=0x%x, b=0x%x, want: a=0x7040201008040201, b=0x3040201008040201", a, b)
+func TestVMOV(t *testing.T) {
+	tests := []struct {
+		op           string
+		vmovFunc     func() (uint64, uint64)
+		wantA, wantB uint64
+	}{
+		{"VMOVS", testvmovs, 0x80402010, 0},
+		{"VMOVD", testvmovd, 0x7040201008040201, 0},
+		{"VMOVQ", testvmovq, 0x7040201008040201, 0x3040201008040201},
+	}
+	for _, test := range tests {
+		gotA, gotB := test.vmovFunc()
+		if gotA != test.wantA || gotB != test.wantB {
+			t.Errorf("%v: got: a=0x%x, b=0x%x, want: a=0x%x, b=0x%x", test.op, gotA, gotB, test.wantA, test.wantB)
+		}
 	}
 }
 
@@ -166,6 +330,6 @@
 	x := testmovk()
 	want := uint64(40000 << 48)
 	if x != want {
-		t.Errorf("TestMOVK got %x want %x\n", x, want)
+		t.Errorf("Got %x want %x\n", x, want)
 	}
 }
diff --git a/src/cmd/internal/obj/arm64/asm_arm64_test.s b/src/cmd/internal/obj/arm64/asm_arm64_test.s
index f85433c..e3fda57 100644
--- a/src/cmd/internal/obj/arm64/asm_arm64_test.s
+++ b/src/cmd/internal/obj/arm64/asm_arm64_test.s
@@ -4,6 +4,24 @@
 
 #include "textflag.h"
 
+// testvmovs() (r1, r2 uint64)
+TEXT ·testvmovs(SB), NOSPLIT, $0-16
+	VMOVS   $0x80402010, V1
+	VMOV    V1.D[0], R0
+	VMOV    V1.D[1], R1
+	MOVD    R0, r1+0(FP)
+	MOVD    R1, r2+8(FP)
+	RET
+
+// testvmovd() (r1, r2 uint64)
+TEXT ·testvmovd(SB), NOSPLIT, $0-16
+	VMOVD   $0x7040201008040201, V1
+	VMOV    V1.D[0], R0
+	VMOV    V1.D[1], R1
+	MOVD    R0, r1+0(FP)
+	MOVD    R1, r2+8(FP)
+	RET
+
 // testvmovq() (r1, r2 uint64)
 TEXT ·testvmovq(SB), NOSPLIT, $0-16
 	VMOVQ   $0x7040201008040201, $0x3040201008040201, V1
diff --git a/src/cmd/internal/obj/arm64/doc.go b/src/cmd/internal/obj/arm64/doc.go
index 4606e98..f2c3331 100644
--- a/src/cmd/internal/obj/arm64/doc.go
+++ b/src/cmd/internal/obj/arm64/doc.go
@@ -78,7 +78,7 @@
 	MOVD $8, R2
 	RET
 
-On arm64, functions in Go are aligned to 16 bytes by default, we can also use PCALGIN to set the
+On arm64, functions in Go are aligned to 16 bytes by default, we can also use PCALIGN to set the
 function alignment. The functions that need to be aligned are preferably using NOFRAME and NOSPLIT
 to avoid the impact of the prologues inserted by the assembler, so that the function address will
 have the same alignment as the first hand-written instruction.
@@ -96,7 +96,7 @@
 7. Move large constants to vector registers.
 
 Go asm uses VMOVQ/VMOVD/VMOVS to move 128-bit, 64-bit and 32-bit constants into vector registers, respectively.
-And for a 128-bit interger, it take two 64-bit operands, for the low and high parts separately.
+And for a 128-bit integer, it take two 64-bit operands, for the low and high parts separately.
 
 Examples:
 
diff --git a/src/cmd/internal/obj/arm64/list7.go b/src/cmd/internal/obj/arm64/list7.go
index 0187ad3..0654139 100644
--- a/src/cmd/internal/obj/arm64/list7.go
+++ b/src/cmd/internal/obj/arm64/list7.go
@@ -1,5 +1,5 @@
 // cmd/7l/list.c and cmd/7l/sub.c from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/
+// https://bitbucket.org/plan9-from-bell-labs/9-cc/src/master/
 //
 // 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
 // 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go
index f963f62..0ab5939 100644
--- a/src/cmd/internal/obj/arm64/obj7.go
+++ b/src/cmd/internal/obj/arm64/obj7.go
@@ -1,5 +1,5 @@
 // cmd/7l/noop.c, cmd/7l/obj.c, cmd/ld/pass.c from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/
+// https://bitbucket.org/plan9-from-bell-labs/9-cc/src/master/
 //
 // 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
 // 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
@@ -289,11 +289,12 @@
 	}
 	call.To.Sym = c.ctxt.Lookup(morestack)
 
-	unspill := c.cursym.Func().UnspillRegisterArgs(call, c.newprog)
-	pcdata = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
+	// The instructions which unspill regs should be preemptible.
+	pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
+	unspill := c.cursym.Func().UnspillRegisterArgs(pcdata, c.newprog)
 
 	// B	start
-	jmp := obj.Appendp(pcdata, c.newprog)
+	jmp := obj.Appendp(unspill, c.newprog)
 	jmp.As = AB
 	jmp.To.Type = obj.TYPE_BRANCH
 	jmp.To.SetTarget(startPred.Link)
@@ -329,8 +330,33 @@
 		break
 	}
 
-	// Rewrite float constants to values stored in memory.
+	// Rewrite float and vector constants to values stored in memory.
 	switch p.As {
+	case AVMOVS:
+		if p.From.Type == obj.TYPE_CONST {
+			p.From.Type = obj.TYPE_MEM
+			p.From.Sym = c.ctxt.Int32Sym(p.From.Offset)
+			p.From.Name = obj.NAME_EXTERN
+			p.From.Offset = 0
+		}
+
+	case AVMOVD:
+		if p.From.Type == obj.TYPE_CONST {
+			p.From.Type = obj.TYPE_MEM
+			p.From.Sym = c.ctxt.Int64Sym(p.From.Offset)
+			p.From.Name = obj.NAME_EXTERN
+			p.From.Offset = 0
+		}
+
+	case AVMOVQ:
+		if p.From.Type == obj.TYPE_CONST {
+			p.From.Type = obj.TYPE_MEM
+			p.From.Sym = c.ctxt.Int128Sym(p.GetFrom3().Offset, p.From.Offset)
+			p.From.Name = obj.NAME_EXTERN
+			p.From.Offset = 0
+			p.RestArgs = nil
+		}
+
 	case AFMOVS:
 		if p.From.Type == obj.TYPE_FCONST {
 			f64 := p.From.Val.(float64)
@@ -365,8 +391,6 @@
 			p.From.Name = obj.NAME_EXTERN
 			p.From.Offset = 0
 		}
-
-		break
 	}
 
 	if c.ctxt.Flag_dynlink {
@@ -826,21 +850,24 @@
 			p.To = obj.Addr{}
 			if c.cursym.Func().Text.Mark&LEAF != 0 {
 				if c.autosize != 0 {
+					// Restore frame pointer.
+					// ADD $framesize-8, RSP, R29
+					p.As = AADD
+					p.From.Type = obj.TYPE_CONST
+					p.From.Offset = int64(c.autosize) - 8
+					p.Reg = REGSP
+					p.To.Type = obj.TYPE_REG
+					p.To.Reg = REGFP
+
+					// Pop stack frame.
+					// ADD $framesize, RSP, RSP
+					p = obj.Appendp(p, c.newprog)
 					p.As = AADD
 					p.From.Type = obj.TYPE_CONST
 					p.From.Offset = int64(c.autosize)
 					p.To.Type = obj.TYPE_REG
 					p.To.Reg = REGSP
 					p.Spadj = -c.autosize
-
-					// Frame pointer.
-					p = obj.Appendp(p, c.newprog)
-					p.As = ASUB
-					p.From.Type = obj.TYPE_CONST
-					p.From.Offset = 8
-					p.Reg = REGSP
-					p.To.Type = obj.TYPE_REG
-					p.To.Reg = REGFP
 				}
 			} else {
 				aoffset := c.autosize
diff --git a/src/cmd/internal/obj/dwarf.go b/src/cmd/internal/obj/dwarf.go
index f1330c9..4788272 100644
--- a/src/cmd/internal/obj/dwarf.go
+++ b/src/cmd/internal/obj/dwarf.go
@@ -48,7 +48,7 @@
 	line := int64(1)
 	pc := s.Func().Text.Pc
 	var lastpc int64 // last PC written to line table, not last PC in func
-	name := ""
+	fileIndex := 1
 	prologue, wrotePrologue := false, false
 	// Walk the progs, generating the DWARF table.
 	for p := s.Func().Text; p != nil; p = p.Link {
@@ -58,15 +58,15 @@
 			continue
 		}
 		newStmt := p.Pos.IsStmt() != src.PosNotStmt
-		newName, newLine := ctxt.getFileSymbolAndLine(p.Pos)
+		newFileIndex, newLine := ctxt.getFileIndexAndLine(p.Pos)
+		newFileIndex++ // 1 indexing for the table
 
 		// Output debug info.
 		wrote := false
-		if name != newName {
-			newFile := ctxt.PosTable.FileIndex(newName) + 1 // 1 indexing for the table.
+		if newFileIndex != fileIndex {
 			dctxt.AddUint8(lines, dwarf.DW_LNS_set_file)
-			dwarf.Uleb128put(dctxt, lines, int64(newFile))
-			name = newName
+			dwarf.Uleb128put(dctxt, lines, int64(newFileIndex))
+			fileIndex = newFileIndex
 			wrote = true
 		}
 		if prologue && !wrotePrologue {
@@ -207,6 +207,9 @@
 func (c dwCtxt) PtrSize() int {
 	return c.Arch.PtrSize
 }
+func (c dwCtxt) Size(s dwarf.Sym) int64 {
+	return s.(*LSym).Size
+}
 func (c dwCtxt) AddInt(s dwarf.Sym, size int, i int64) {
 	ls := s.(*LSym)
 	ls.WriteInt(c.Link, ls.Size, size, i)
@@ -258,16 +261,6 @@
 	r.Type = objabi.R_DWARFSECREF
 }
 
-func (c dwCtxt) AddFileRef(s dwarf.Sym, f interface{}) {
-	ls := s.(*LSym)
-	rsym := f.(*LSym)
-	fidx := c.Link.PosTable.FileIndex(rsym.Name)
-	// Note the +1 here -- the value we're writing is going to be an
-	// index into the DWARF line table file section, whose entries
-	// are numbered starting at 1, not 0.
-	ls.WriteInt(c.Link, ls.Size, 4, int64(fidx+1))
-}
-
 func (c dwCtxt) CurrentOffset(s dwarf.Sym) int64 {
 	ls := s.(*LSym)
 	return ls.Size
@@ -325,27 +318,24 @@
 	return fn.dwarfInfoSym, fn.dwarfLocSym, fn.dwarfRangesSym, fn.dwarfAbsFnSym, fn.dwarfDebugLinesSym
 }
 
-func (s *LSym) Length(dwarfContext interface{}) int64 {
-	return s.Size
-}
-
-// fileSymbol returns a symbol corresponding to the source file of the
-// first instruction (prog) of the specified function. This will
-// presumably be the file in which the function is defined.
-func (ctxt *Link) fileSymbol(fn *LSym) *LSym {
-	p := fn.Func().Text
-	if p != nil {
-		f, _ := ctxt.getFileSymbolAndLine(p.Pos)
-		fsym := ctxt.Lookup(f)
-		return fsym
+// textPos returns the source position of the first instruction (prog)
+// of the specified function.
+func textPos(fn *LSym) src.XPos {
+	if p := fn.Func().Text; p != nil {
+		return p.Pos
 	}
-	return nil
+	return src.NoXPos
 }
 
 // populateDWARF fills in the DWARF Debugging Information Entries for
 // TEXT symbol 's'. The various DWARF symbols must already have been
 // initialized in InitTextSym.
-func (ctxt *Link) populateDWARF(curfn interface{}, s *LSym, myimportpath string) {
+func (ctxt *Link) populateDWARF(curfn Func, s *LSym) {
+	myimportpath := ctxt.Pkgpath
+	if myimportpath == "" {
+		return
+	}
+
 	info, loc, ranges, absfunc, lines := ctxt.dwarfSym(s)
 	if info.Size != 0 {
 		ctxt.Diag("makeFuncDebugEntry double process %v", s)
@@ -353,24 +343,23 @@
 	var scopes []dwarf.Scope
 	var inlcalls dwarf.InlCalls
 	if ctxt.DebugInfo != nil {
-		// Don't need startPos because s.Func().StartLine is populated,
-		// as s is in this package.
-		scopes, inlcalls, _ = ctxt.DebugInfo(s, info, curfn)
+		scopes, inlcalls = ctxt.DebugInfo(s, info, curfn)
 	}
 	var err error
 	dwctxt := dwCtxt{ctxt}
-	filesym := ctxt.fileSymbol(s)
+	startPos := ctxt.InnermostPos(textPos(s))
+	if !startPos.IsKnown() || startPos.RelLine() != uint(s.Func().StartLine) {
+		panic("bad startPos")
+	}
 	fnstate := &dwarf.FnState{
 		Name:          s.Name,
-		Importpath:    myimportpath,
 		Info:          info,
-		Filesym:       filesym,
 		Loc:           loc,
 		Ranges:        ranges,
 		Absfn:         absfunc,
 		StartPC:       s,
 		Size:          s.Size,
-		StartLine:     s.Func().StartLine,
+		StartPos:      startPos,
 		External:      !s.Static(),
 		Scopes:        scopes,
 		InlCalls:      inlcalls,
@@ -394,7 +383,8 @@
 
 // DwarfIntConst creates a link symbol for an integer constant with the
 // given name, type and value.
-func (ctxt *Link) DwarfIntConst(myimportpath, name, typename string, val int64) {
+func (ctxt *Link) DwarfIntConst(name, typename string, val int64) {
+	myimportpath := ctxt.Pkgpath
 	if myimportpath == "" {
 		return
 	}
@@ -407,7 +397,8 @@
 
 // DwarfGlobal creates a link symbol containing a DWARF entry for
 // a global variable.
-func (ctxt *Link) DwarfGlobal(myimportpath, typename string, varSym *LSym) {
+func (ctxt *Link) DwarfGlobal(typename string, varSym *LSym) {
+	myimportpath := ctxt.Pkgpath
 	if myimportpath == "" || varSym.Local() {
 		return
 	}
@@ -421,7 +412,7 @@
 	dwarf.PutGlobal(dwCtxt{ctxt}, dieSym, typeSym, varSym, varname)
 }
 
-func (ctxt *Link) DwarfAbstractFunc(curfn interface{}, s *LSym, myimportpath string) {
+func (ctxt *Link) DwarfAbstractFunc(curfn Func, s *LSym) {
 	absfn := ctxt.DwFixups.AbsFuncDwarfSym(s)
 	if absfn.Size != 0 {
 		ctxt.Diag("internal error: DwarfAbstractFunc double process %v", s)
@@ -429,15 +420,13 @@
 	if s.Func() == nil {
 		s.NewFuncInfo()
 	}
-	scopes, _, startPos := ctxt.DebugInfo(s, absfn, curfn)
-	_, startLine := ctxt.getFileSymbolAndLine(startPos)
+	scopes, _ := ctxt.DebugInfo(s, absfn, curfn)
 	dwctxt := dwCtxt{ctxt}
 	fnstate := dwarf.FnState{
 		Name:          s.Name,
-		Importpath:    myimportpath,
 		Info:          absfn,
 		Absfn:         absfn,
-		StartLine:     startLine,
+		StartPos:      ctxt.InnermostPos(curfn.Pos()),
 		External:      !s.Static(),
 		Scopes:        scopes,
 		UseBASEntries: ctxt.UseBASEntries,
@@ -510,8 +499,8 @@
 }
 
 type fnState struct {
-	// precursor function (really *gc.Node)
-	precursor interface{}
+	// precursor function
+	precursor Func
 	// abstract function symbol
 	absfn *LSym
 }
@@ -524,14 +513,14 @@
 	}
 }
 
-func (ft *DwarfFixupTable) GetPrecursorFunc(s *LSym) interface{} {
+func (ft *DwarfFixupTable) GetPrecursorFunc(s *LSym) Func {
 	if fnstate, found := ft.precursor[s]; found {
 		return fnstate.precursor
 	}
 	return nil
 }
 
-func (ft *DwarfFixupTable) SetPrecursorFunc(s *LSym, fn interface{}) {
+func (ft *DwarfFixupTable) SetPrecursorFunc(s *LSym, fn Func) {
 	if _, found := ft.precursor[s]; found {
 		ft.ctxt.Diag("internal error: DwarfFixupTable.SetPrecursorFunc double call on %v", s)
 	}
diff --git a/src/cmd/internal/obj/line.go b/src/cmd/internal/obj/line.go
index 20f03d9..988640f 100644
--- a/src/cmd/internal/obj/line.go
+++ b/src/cmd/internal/obj/line.go
@@ -14,22 +14,14 @@
 	ctxt.Imports = append(ctxt.Imports, goobj.ImportedPkg{Pkg: pkg, Fingerprint: fingerprint})
 }
 
-// getFileSymbolAndLine returns the relative file symbol and relative line
-// number for a position (i.e., as adjusted by a //line directive). This is the
-// file/line visible in the final binary (pcfile, pcln, etc).
-func (ctxt *Link) getFileSymbolAndLine(xpos src.XPos) (f string, l int32) {
-	pos := ctxt.InnermostPos(xpos)
-	if !pos.IsKnown() {
-		pos = src.Pos{}
-	}
-	return pos.SymFilename(), int32(pos.RelLine())
-}
-
 // getFileIndexAndLine returns the relative file index (local to the CU), and
 // the relative line number for a position (i.e., as adjusted by a //line
 // directive). This is the file/line visible in the final binary (pcfile, pcln,
 // etc).
 func (ctxt *Link) getFileIndexAndLine(xpos src.XPos) (int, int32) {
-	f, l := ctxt.getFileSymbolAndLine(xpos)
-	return ctxt.PosTable.FileIndex(f), l
+	pos := ctxt.InnermostPos(xpos)
+	if !pos.IsKnown() {
+		pos = src.Pos{}
+	}
+	return pos.FileIndex(), int32(pos.RelLine())
 }
diff --git a/src/cmd/internal/obj/line_test.go b/src/cmd/internal/obj/line_test.go
index d3bb4e2..de7ef1a 100644
--- a/src/cmd/internal/obj/line_test.go
+++ b/src/cmd/internal/obj/line_test.go
@@ -31,9 +31,15 @@
 	}
 
 	for _, test := range tests {
-		f, l := ctxt.getFileSymbolAndLine(ctxt.PosTable.XPos(test.pos))
-		got := fmt.Sprintf("%s:%d", f, l)
-		if got != src.FileSymPrefix+test.want {
+		fileIndex, line := ctxt.getFileIndexAndLine(ctxt.PosTable.XPos(test.pos))
+
+		file := "??"
+		if fileIndex >= 0 {
+			file = ctxt.PosTable.FileTable()[fileIndex]
+		}
+		got := fmt.Sprintf("%s:%d", file, line)
+
+		if got != test.want {
 			t.Errorf("ctxt.getFileSymbolAndLine(%v) = %q, want %q", test.pos, got, test.want)
 		}
 	}
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index f13f9b4..0b78786 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -1001,6 +1001,11 @@
 	Spill, Unspill As
 }
 
+// A Func represents a Go function. If non-nil, it must be a *ir.Func.
+type Func interface {
+	Pos() src.XPos
+}
+
 // Link holds the context for writing object code from a compiler
 // to be linker input or for reading that input into the linker.
 type Link struct {
@@ -1030,7 +1035,7 @@
 	Imports            []goobj.ImportedPkg
 	DiagFunc           func(string, ...interface{})
 	DiagFlush          func()
-	DebugInfo          func(fn *LSym, info *LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls, src.XPos) // if non-nil, curfn is a *ir.Func
+	DebugInfo          func(fn *LSym, info *LSym, curfn Func) ([]dwarf.Scope, dwarf.InlCalls)
 	GenAbstractFunc    func(fn *LSym)
 	Errors             int
 
diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go
index 99a7da3..d944fcf 100644
--- a/src/cmd/internal/obj/loong64/a.out.go
+++ b/src/cmd/internal/obj/loong64/a.out.go
@@ -157,14 +157,14 @@
 	REGZERO = REG_R0 // set to zero
 	REGLINK = REG_R1
 	REGSP   = REG_R3
-	REGRET  = REG_R19
+	REGRET  = REG_R20 // not use
 	REGARG  = -1      // -1 disables passing the first argument in register
-	REGRT1  = REG_R19 // reserved for runtime, duffzero and duffcopy
-	REGRT2  = REG_R20 // reserved for runtime, duffcopy
+	REGRT1  = REG_R20 // reserved for runtime, duffzero and duffcopy
+	REGRT2  = REG_R21 // reserved for runtime, duffcopy
 	REGCTXT = REG_R29 // context for closures
 	REGG    = REG_R22 // G in loong64
 	REGTMP  = REG_R30 // used by the assembler
-	FREGRET = REG_F0
+	FREGRET = REG_F0  // not use
 )
 
 var LOONG64DWARFRegisters = map[int16]int16{}
@@ -227,6 +227,7 @@
 	C_ADDR
 	C_TLS_LE
 	C_TLS_IE
+	C_GOTADDR
 	C_TEXTSIZE
 
 	C_NCLASS // must be the last
diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go
index 0ab0caa..64c9226 100644
--- a/src/cmd/internal/obj/loong64/asm.go
+++ b/src/cmd/internal/obj/loong64/asm.go
@@ -349,6 +349,8 @@
 	{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, C_NONE, 40, 4, 0, 0},
 	{AWORD, C_DCON, C_NONE, C_NONE, C_NONE, C_NONE, 61, 4, 0, 0},
 
+	{AMOVV, C_GOTADDR, C_NONE, C_NONE, C_REG, C_NONE, 65, 8, 0, 0},
+
 	{ATEQ, C_SCON, C_REG, C_NONE, C_REG, C_NONE, 15, 8, 0, 0},
 	{ATEQ, C_SCON, C_NONE, C_NONE, C_REG, C_NONE, 15, 8, 0, 0},
 
@@ -676,6 +678,9 @@
 				return C_SOREG
 			}
 			return C_LOREG
+
+		case obj.NAME_GOTREF:
+			return C_GOTADDR
 		}
 
 		return C_GOK
@@ -753,7 +758,7 @@
 			if c.instoffset <= 0xfff {
 				return C_ANDCON
 			}
-			if c.instoffset&0xfff == 0 && isuint32(uint64(c.instoffset)) { // && (instoffset & (1<<31)) == 0)
+			if c.instoffset&0xfff == 0 && isuint32(uint64(c.instoffset)) { // && ((instoffset & (1<<31)) == 0)
 				return C_UCON
 			}
 			if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
@@ -1776,6 +1781,22 @@
 	case 64: // movv c_reg, c_fcc0 ==> movgr2cf cd, rj
 		a := OP_TEN(8, 1334)
 		o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg))
+
+	case 65: // mov sym@GOT, r ==> pcalau12i + ld.d
+		o1 = OP_IR(c.opir(APCALAU12I), uint32(0), uint32(p.To.Reg))
+		rel := obj.Addrel(c.cursym)
+		rel.Off = int32(c.pc)
+		rel.Siz = 4
+		rel.Sym = p.From.Sym
+		rel.Type = objabi.R_LOONG64_GOT_HI
+		rel.Add = 0x0
+		o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+		rel2 := obj.Addrel(c.cursym)
+		rel2.Off = int32(c.pc + 4)
+		rel2.Siz = 4
+		rel2.Sym = p.From.Sym
+		rel2.Type = objabi.R_LOONG64_GOT_LO
+		rel2.Add = 0x0
 	}
 
 	out[0] = o1
diff --git a/src/cmd/internal/obj/loong64/cnames.go b/src/cmd/internal/obj/loong64/cnames.go
index 8b8af6b..94b1b54 100644
--- a/src/cmd/internal/obj/loong64/cnames.go
+++ b/src/cmd/internal/obj/loong64/cnames.go
@@ -39,6 +39,7 @@
 	"ADDR",
 	"TLS_LE",
 	"TLS_IE",
+	"GOTADDR",
 	"TEXTSIZE",
 	"NCLASS",
 }
diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go
index 1eedd46..5fa67f3 100644
--- a/src/cmd/internal/obj/loong64/obj.go
+++ b/src/cmd/internal/obj/loong64/obj.go
@@ -6,6 +6,7 @@
 
 import (
 	"cmd/internal/obj"
+	"cmd/internal/objabi"
 	"cmd/internal/sys"
 	"internal/abi"
 	"log"
@@ -84,6 +85,122 @@
 			p.As = AADDVU
 		}
 	}
+
+	if ctxt.Flag_dynlink {
+		rewriteToUseGot(ctxt, p, newprog)
+	}
+}
+
+func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
+	//     ADUFFxxx $offset
+	// becomes
+	//     MOVV runtime.duffxxx@GOT, REGTMP
+	//     ADD $offset, REGTMP
+	//     JAL REGTMP
+	if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
+		var sym *obj.LSym
+		if p.As == obj.ADUFFZERO {
+			sym = ctxt.Lookup("runtime.duffzero")
+		} else {
+			sym = ctxt.Lookup("runtime.duffcopy")
+		}
+		offset := p.To.Offset
+		p.As = AMOVV
+		p.From.Type = obj.TYPE_MEM
+		p.From.Sym = sym
+		p.From.Name = obj.NAME_GOTREF
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = REGTMP
+		p.To.Name = obj.NAME_NONE
+		p.To.Offset = 0
+		p.To.Sym = nil
+		p1 := obj.Appendp(p, newprog)
+		p1.As = AADDV
+		p1.From.Type = obj.TYPE_CONST
+		p1.From.Offset = offset
+		p1.To.Type = obj.TYPE_REG
+		p1.To.Reg = REGTMP
+		p2 := obj.Appendp(p1, newprog)
+		p2.As = AJAL
+		p2.To.Type = obj.TYPE_MEM
+		p2.To.Reg = REGTMP
+	}
+
+	// We only care about global data: NAME_EXTERN means a global
+	// symbol in the Go sense, and p.Sym.Local is true for a few
+	// internally defined symbols.
+	if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
+		// MOVV $sym, Rx becomes MOVV sym@GOT, Rx
+		// MOVV $sym+<off>, Rx becomes MOVV sym@GOT, Rx; ADD <off>, Rx
+		if p.As != AMOVV {
+			ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -shared", p)
+		}
+		if p.To.Type != obj.TYPE_REG {
+			ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -shared", p)
+		}
+		p.From.Type = obj.TYPE_MEM
+		p.From.Name = obj.NAME_GOTREF
+		if p.From.Offset != 0 {
+			q := obj.Appendp(p, newprog)
+			q.As = AADDV
+			q.From.Type = obj.TYPE_CONST
+			q.From.Offset = p.From.Offset
+			q.To = p.To
+			p.From.Offset = 0
+		}
+	}
+	if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN {
+		ctxt.Diag("don't know how to handle %v with -shared", p)
+	}
+
+	var source *obj.Addr
+	// MOVx sym, Ry becomes MOVV sym@GOT, REGTMP; MOVx (REGTMP), Ry
+	// MOVx Ry, sym becomes MOVV sym@GOT, REGTMP; MOVx Ry, (REGTMP)
+	// An addition may be inserted between the two MOVs if there is an offset.
+	if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
+		if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
+			ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -shared", p)
+		}
+		source = &p.From
+	} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
+		source = &p.To
+	} else {
+		return
+	}
+	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
+		return
+	}
+	if source.Sym.Type == objabi.STLSBSS {
+		return
+	}
+	if source.Type != obj.TYPE_MEM {
+		ctxt.Diag("don't know how to handle %v with -shared", p)
+	}
+	p1 := obj.Appendp(p, newprog)
+	p2 := obj.Appendp(p1, newprog)
+	p1.As = AMOVV
+	p1.From.Type = obj.TYPE_MEM
+	p1.From.Sym = source.Sym
+	p1.From.Name = obj.NAME_GOTREF
+	p1.To.Type = obj.TYPE_REG
+	p1.To.Reg = REGTMP
+
+	p2.As = p.As
+	p2.From = p.From
+	p2.To = p.To
+	if p.From.Name == obj.NAME_EXTERN {
+		p2.From.Reg = REGTMP
+		p2.From.Name = obj.NAME_NONE
+		p2.From.Sym = nil
+	} else if p.To.Name == obj.NAME_EXTERN {
+		p2.To.Reg = REGTMP
+		p2.To.Name = obj.NAME_NONE
+		p2.To.Sym = nil
+	} else {
+		return
+	}
+
+	obj.Nopout(p)
 }
 
 func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
@@ -279,18 +396,18 @@
 			if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 {
 				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
 				//
-				//	MOV	g_panic(g), R1
-				//	BEQ	R1, end
-				//	MOV	panic_argp(R1), R2
-				//	ADD	$(autosize+FIXED_FRAME), R29, R3
-				//	BNE	R2, R3, end
-				//	ADD	$FIXED_FRAME, R29, R2
-				//	MOV	R2, panic_argp(R1)
+				//	MOV	g_panic(g), R20
+				//	BEQ	R20, end
+				//	MOV	panic_argp(R20), R24
+				//	ADD	$(autosize+FIXED_FRAME), R3, R30
+				//	BNE	R24, R30, end
+				//	ADD	$FIXED_FRAME, R3, R24
+				//	MOV	R24, panic_argp(R20)
 				// end:
 				//	NOP
 				//
 				// The NOP is needed to give the jumps somewhere to land.
-				// It is a liblink NOP, not an hardware NOP: it encodes to 0 instruction bytes.
+				// It is a liblink NOP, not a hardware NOP: it encodes to 0 instruction bytes.
 				//
 				// We don't generate this for leafs because that means the wrapped
 				// function was inlined into the wrapper.
@@ -302,12 +419,12 @@
 				q.From.Reg = REGG
 				q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
 				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R19
+				q.To.Reg = REG_R20
 
 				q = obj.Appendp(q, newprog)
 				q.As = ABEQ
 				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R19
+				q.From.Reg = REG_R20
 				q.To.Type = obj.TYPE_BRANCH
 				q.Mark |= BRANCH
 				p1 = q
@@ -315,10 +432,10 @@
 				q = obj.Appendp(q, newprog)
 				q.As = mov
 				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REG_R19
+				q.From.Reg = REG_R20
 				q.From.Offset = 0 // Panic.argp
 				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R4
+				q.To.Reg = REG_R24
 
 				q = obj.Appendp(q, newprog)
 				q.As = add
@@ -326,13 +443,13 @@
 				q.From.Offset = int64(autosize) + ctxt.Arch.FixedFrameSize
 				q.Reg = REGSP
 				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R5
+				q.To.Reg = REG_R30
 
 				q = obj.Appendp(q, newprog)
 				q.As = ABNE
 				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R4
-				q.Reg = REG_R5
+				q.From.Reg = REG_R24
+				q.Reg = REG_R30
 				q.To.Type = obj.TYPE_BRANCH
 				q.Mark |= BRANCH
 				p2 = q
@@ -343,14 +460,14 @@
 				q.From.Offset = ctxt.Arch.FixedFrameSize
 				q.Reg = REGSP
 				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R4
+				q.To.Reg = REG_R24
 
 				q = obj.Appendp(q, newprog)
 				q.As = mov
 				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R4
+				q.From.Reg = REG_R24
 				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REG_R19
+				q.To.Reg = REG_R20
 				q.To.Offset = 0 // Panic.argp
 
 				q = obj.Appendp(q, newprog)
@@ -503,6 +620,10 @@
 
 		p = c.ctxt.StartUnsafePoint(p, c.newprog)
 
+		// Spill Arguments. This has to happen before we open
+		// any more frame space.
+		p = c.cursym.Func().SpillRegisterArgs(p, c.newprog)
+
 		// MOV	REGLINK, -8/-16(SP)
 		p = obj.Appendp(p, c.newprog)
 		p.As = mov
@@ -567,13 +688,15 @@
 		p.To.Reg = REGSP
 		p.Spadj = int32(-frameSize)
 
+		// Unspill arguments
+		p = c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
 		p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
 	}
 
 	// Jump back to here after morestack returns.
 	startPred := p
 
-	// MOV	g_stackguard(g), R19
+	// MOV	g_stackguard(g), R20
 	p = obj.Appendp(p, c.newprog)
 
 	p.As = mov
@@ -584,7 +707,7 @@
 		p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
 	}
 	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R19
+	p.To.Reg = REG_R20
 
 	// Mark the stack bound check and morestack call async nonpreemptible.
 	// If we get preempted here, when resumed the preemption request is
@@ -595,15 +718,15 @@
 	var q *obj.Prog
 	if framesize <= abi.StackSmall {
 		// small stack: SP < stackguard
-		//	AGTU	SP, stackguard, R19
+		//	AGTU	SP, stackguard, R20
 		p = obj.Appendp(p, c.newprog)
 
 		p.As = ASGTU
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = REGSP
-		p.Reg = REG_R19
+		p.Reg = REG_R20
 		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R19
+		p.To.Reg = REG_R20
 	} else {
 		// large stack: SP-framesize < stackguard-StackSmall
 		offset := int64(framesize) - abi.StackSmall
@@ -615,8 +738,8 @@
 			// stack guard to incorrectly succeed. We explicitly
 			// guard against underflow.
 			//
-			//      SGTU    $(framesize-StackSmall), SP, R4
-			//      BNE     R4, label-of-call-to-morestack
+			//      SGTU    $(framesize-StackSmall), SP, R24
+			//      BNE     R24, label-of-call-to-morestack
 
 			p = obj.Appendp(p, c.newprog)
 			p.As = ASGTU
@@ -624,13 +747,13 @@
 			p.From.Offset = offset
 			p.Reg = REGSP
 			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_R4
+			p.To.Reg = REG_R24
 
 			p = obj.Appendp(p, c.newprog)
 			q = p
 			p.As = ABNE
 			p.From.Type = obj.TYPE_REG
-			p.From.Reg = REG_R4
+			p.From.Reg = REG_R24
 			p.To.Type = obj.TYPE_BRANCH
 			p.Mark |= BRANCH
 		}
@@ -642,35 +765,35 @@
 		p.From.Offset = -offset
 		p.Reg = REGSP
 		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
+		p.To.Reg = REG_R24
 
 		p = obj.Appendp(p, c.newprog)
 		p.As = ASGTU
 		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R4
-		p.Reg = REG_R19
+		p.From.Reg = REG_R24
+		p.Reg = REG_R20
 		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R19
+		p.To.Reg = REG_R20
 	}
 
-	// q1: BNE	R19, done
+	// q1: BNE	R20, done
 	p = obj.Appendp(p, c.newprog)
 	q1 := p
 
 	p.As = ABNE
 	p.From.Type = obj.TYPE_REG
-	p.From.Reg = REG_R19
+	p.From.Reg = REG_R20
 	p.To.Type = obj.TYPE_BRANCH
 	p.Mark |= BRANCH
 
-	// MOV	LINK, R5
+	// MOV	LINK, R31
 	p = obj.Appendp(p, c.newprog)
 
 	p.As = mov
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = REGLINK
 	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R5
+	p.To.Reg = REG_R31
 	if q != nil {
 		q.To.SetTarget(p)
 		p.Mark |= LABEL
@@ -678,6 +801,10 @@
 
 	p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog)
 
+	// Spill the register args that could be clobbered by the
+	// morestack code
+	p = c.cursym.Func().SpillRegisterArgs(p, c.newprog)
+
 	// JAL	runtime.morestack(SB)
 	p = obj.Appendp(p, c.newprog)
 
@@ -692,6 +819,7 @@
 	}
 	p.Mark |= BRANCH
 
+	p = c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
 	p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
 
 	// JMP	start
diff --git a/src/cmd/internal/obj/mips/a.out.go b/src/cmd/internal/obj/mips/a.out.go
index c6ce53a..cd61313 100644
--- a/src/cmd/internal/obj/mips/a.out.go
+++ b/src/cmd/internal/obj/mips/a.out.go
@@ -394,6 +394,8 @@
 	AROTRV
 	ASC
 	ASCV
+	ASEB
+	ASEH
 	ASGT
 	ASGTU
 	ASLL
@@ -415,6 +417,7 @@
 	ATLBWR
 	ATNE
 	AWORD
+	AWSBH
 	AXOR
 
 	/* 64-bit */
@@ -434,6 +437,8 @@
 	AADDVU
 	ASUBV
 	ASUBVU
+	ADSBH
+	ADSHD
 
 	/* 64-bit FP */
 	ATRUNCFV
diff --git a/src/cmd/internal/obj/mips/anames.go b/src/cmd/internal/obj/mips/anames.go
index ca2ad5a..d86e37f 100644
--- a/src/cmd/internal/obj/mips/anames.go
+++ b/src/cmd/internal/obj/mips/anames.go
@@ -82,6 +82,8 @@
 	"ROTRV",
 	"SC",
 	"SCV",
+	"SEB",
+	"SEH",
 	"SGT",
 	"SGTU",
 	"SLL",
@@ -103,6 +105,7 @@
 	"TLBWR",
 	"TNE",
 	"WORD",
+	"WSBH",
 	"XOR",
 	"MOVV",
 	"MOVVL",
@@ -120,6 +123,8 @@
 	"ADDVU",
 	"SUBV",
 	"SUBVU",
+	"DSBH",
+	"DSHD",
 	"TRUNCFV",
 	"TRUNCDV",
 	"TRUNCFW",
diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go
index ab8d37b..2804073 100644
--- a/src/cmd/internal/obj/mips/asm0.go
+++ b/src/cmd/internal/obj/mips/asm0.go
@@ -382,6 +382,9 @@
 	{AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0},
 	{AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0},
 
+	{AWSBH, C_REG, C_NONE, C_REG, 59, 4, 0, 0, 0},
+	{ADSBH, C_REG, C_NONE, C_REG, 59, 4, 0, sys.MIPS64, 0},
+
 	{ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */
 	{ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
 	{ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
@@ -654,7 +657,7 @@
 		switch a.Name {
 		case obj.NAME_NONE:
 			c.instoffset = a.Offset
-			if a.Reg != 0 {
+			if a.Reg != obj.REG_NONE {
 				if -BIG <= c.instoffset && c.instoffset <= BIG {
 					return C_SACON
 				}
@@ -715,7 +718,7 @@
 			if c.instoffset <= 0xffff {
 				return C_ANDCON
 			}
-			if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
+			if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && ((instoffset & (1<<31)) == 0) */
 				return C_UCON
 			}
 			if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
@@ -770,7 +773,7 @@
 
 	a3--
 	a2 := C_NONE
-	if p.Reg != 0 {
+	if p.Reg != obj.REG_NONE {
 		a2 = C_REG
 	}
 
@@ -1101,6 +1104,13 @@
 
 		case ATEQ:
 			opset(ATNE, r0)
+
+		case AWSBH:
+			opset(ASEB, r0)
+			opset(ASEH, r0)
+
+		case ADSBH:
+			opset(ADSHD, r0)
 		}
 	}
 }
@@ -1137,20 +1147,20 @@
 	return SP(2, 1) | 21<<21 | x<<3 | y<<0
 }
 
-func OP_RRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
-	return op | (r1&31)<<16 | (r2&31)<<21 | (r3&31)<<11
+func OP_RRR(op uint32, r1 int16, r2 int16, r3 int16) uint32 {
+	return op | uint32(r1&31)<<16 | uint32(r2&31)<<21 | uint32(r3&31)<<11
 }
 
-func OP_IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
-	return op | i&0xFFFF | (r2&31)<<21 | (r3&31)<<16
+func OP_IRR(op uint32, i uint32, r2 int16, r3 int16) uint32 {
+	return op | i&0xFFFF | uint32(r2&31)<<21 | uint32(r3&31)<<16
 }
 
-func OP_SRR(op uint32, s uint32, r2 uint32, r3 uint32) uint32 {
-	return op | (s&31)<<6 | (r2&31)<<16 | (r3&31)<<11
+func OP_SRR(op uint32, s uint32, r2 int16, r3 int16) uint32 {
+	return op | (s&31)<<6 | uint32(r2&31)<<16 | uint32(r3&31)<<11
 }
 
-func OP_FRRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
-	return op | (r1&31)<<16 | (r2&31)<<11 | (r3&31)<<6
+func OP_FRRR(op uint32, r1 int16, r2 int16, r3 int16) uint32 {
+	return op | uint32(r1&31)<<16 | uint32(r2&31)<<11 | uint32(r3&31)<<6
 }
 
 func OP_JMP(op uint32, i uint32) uint32 {
@@ -1191,41 +1201,37 @@
 			// but SLL is special that the result is always sign-extended to 64-bit.
 			a = ASLL
 		}
-		o1 = OP_RRR(c.oprrr(a), uint32(p.From.Reg), uint32(REGZERO), uint32(p.To.Reg))
+		o1 = OP_RRR(c.oprrr(a), p.From.Reg, REGZERO, p.To.Reg)
 
 	case 2: /* add/sub r1,[r2],r3 */
-		r := int(p.Reg)
+		r := p.Reg
 		if p.As == ANEGW || p.As == ANEGV {
 			r = REGZERO
 		}
-		if r == 0 {
-			r = int(p.To.Reg)
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-		o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+		o1 = OP_RRR(c.oprrr(p.As), p.From.Reg, r, p.To.Reg)
 
 	case 3: /* mov $soreg, r ==> or/add $i,o,r */
-		v := c.regoff(&p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
 		a := add
 		if o.a1 == C_ANDCON {
 			a = AOR
 		}
-
-		o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
+		r := p.From.Reg
+		if r == obj.REG_NONE {
+			r = o.param
+		}
+		v := c.regoff(&p.From)
+		o1 = OP_IRR(c.opirr(a), uint32(v), r, p.To.Reg)
 
 	case 4: /* add $scon,[r1],r2 */
-		v := c.regoff(&p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-
-		o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+		v := c.regoff(&p.From)
+		o1 = OP_IRR(c.opirr(p.As), uint32(v), r, p.To.Reg)
 
 	case 5: /* syscall */
 		o1 = c.oprrr(p.As)
@@ -1240,34 +1246,33 @@
 		if (v<<16)>>16 != v {
 			c.ctxt.Diag("short branch too far\n%v", p)
 		}
-		o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
+		o1 = OP_IRR(c.opirr(p.As), uint32(v), p.From.Reg, p.Reg)
 		// for ABFPT and ABFPF only: always fill delay slot with 0
 		// see comments in func preprocess for details.
 		o2 = 0
 
 	case 7: /* mov r, soreg ==> sw o(r) */
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
+		r := p.To.Reg
+		if r == obj.REG_NONE {
+			r = o.param
 		}
 		v := c.regoff(&p.To)
-		o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.From.Reg))
+		o1 = OP_IRR(c.opirr(p.As), uint32(v), r, p.From.Reg)
 
 	case 8: /* mov soreg, r ==> lw o(r) */
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
+		r := p.From.Reg
+		if r == obj.REG_NONE {
+			r = o.param
 		}
 		v := c.regoff(&p.From)
-		o1 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+		o1 = OP_IRR(c.opirr(-p.As), uint32(v), r, p.To.Reg)
 
 	case 9: /* sll r1,[r2],r3 */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-		o1 = OP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+		o1 = OP_RRR(c.oprrr(p.As), r, p.From.Reg, p.To.Reg)
 
 	case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
 		v := c.regoff(&p.From)
@@ -1275,12 +1280,12 @@
 		if v < 0 {
 			a = AADDU
 		}
-		o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
+		o1 = OP_IRR(c.opirr(a), uint32(v), obj.REG_NONE, REGTMP)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-		o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+		o2 = OP_RRR(c.oprrr(p.As), REGTMP, r, p.To.Reg)
 
 	case 11: /* jmp lbra */
 		v := int32(0)
@@ -1293,7 +1298,7 @@
 				v = int32(p.To.Target().Pc-p.Pc-4) >> 2
 			}
 			if (v<<16)>>16 == v {
-				o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
+				o1 = OP_IRR(c.opirr(ABEQ), uint32(v), REGZERO, REGZERO)
 				break
 			}
 		}
@@ -1325,54 +1330,54 @@
 		if p.As == AMOVB {
 			v = 24
 		}
-		o1 = OP_SRR(c.opirr(ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
-		o2 = OP_SRR(c.opirr(ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
+		o1 = OP_SRR(c.opirr(ASLL), uint32(v), p.From.Reg, p.To.Reg)
+		o2 = OP_SRR(c.opirr(ASRA), uint32(v), p.To.Reg, p.To.Reg)
 
 	case 13: /* movbu r,r */
 		if p.As == AMOVBU {
-			o1 = OP_IRR(c.opirr(AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
+			o1 = OP_IRR(c.opirr(AAND), uint32(0xff), p.From.Reg, p.To.Reg)
 		} else {
-			o1 = OP_IRR(c.opirr(AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
+			o1 = OP_IRR(c.opirr(AAND), uint32(0xffff), p.From.Reg, p.To.Reg)
 		}
 
 	case 14: /* movwu r,r */
 		// NOTE: this case does not use REGTMP. If it ever does,
 		// remove the NOTUSETMP flag in optab.
-		o1 = OP_SRR(c.opirr(-ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
-		o2 = OP_SRR(c.opirr(-ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+		o1 = OP_SRR(c.opirr(-ASLLV), 0, p.From.Reg, p.To.Reg)
+		o2 = OP_SRR(c.opirr(-ASRLV), 0, p.To.Reg, p.To.Reg)
 
 	case 15: /* teq $c r,r */
-		v := c.regoff(&p.From)
-		r := int(p.Reg)
-		if r == 0 {
+		r := p.Reg
+		if r == obj.REG_NONE {
 			r = REGZERO
 		}
+		v := c.regoff(&p.From)
 		/* only use 10 bits of trap code */
-		o1 = OP_IRR(c.opirr(p.As), (uint32(v)&0x3FF)<<6, uint32(r), uint32(p.To.Reg))
+		o1 = OP_IRR(c.opirr(p.As), (uint32(v)&0x3FF)<<6, r, p.To.Reg)
 
 	case 16: /* sll $c,[r1],r2 */
-		v := c.regoff(&p.From)
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
+		v := c.regoff(&p.From)
 
 		/* OP_SRR will use only the low 5 bits of the shift value */
 		if v >= 32 && vshift(p.As) {
-			o1 = OP_SRR(c.opirr(-p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
+			o1 = OP_SRR(c.opirr(-p.As), uint32(v-32), r, p.To.Reg)
 		} else {
-			o1 = OP_SRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+			o1 = OP_SRR(c.opirr(p.As), uint32(v), r, p.To.Reg)
 		}
 
 	case 17:
-		o1 = OP_RRR(c.oprrr(p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
+		o1 = OP_RRR(c.oprrr(p.As), REGZERO, p.From.Reg, p.To.Reg)
 
 	case 18: /* jmp [r1],0(r2) */
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(o.param)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = o.param
 		}
-		o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.To.Reg), uint32(r))
+		o1 = OP_RRR(c.oprrr(p.As), obj.REG_NONE, p.To.Reg, r)
 		if p.As == obj.ACALL {
 			rel := obj.Addrel(c.cursym)
 			rel.Off = int32(c.pc)
@@ -1384,199 +1389,199 @@
 		// NOTE: this case does not use REGTMP. If it ever does,
 		// remove the NOTUSETMP flag in optab.
 		v := c.regoff(&p.From)
-		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
-		o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
+		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, p.To.Reg)
+		o2 = OP_IRR(c.opirr(AOR), uint32(v), p.To.Reg, p.To.Reg)
 
 	case 20: /* mov lo/hi,r */
 		a := OP(2, 0) /* mfhi */
 		if p.From.Reg == REG_LO {
 			a = OP(2, 2) /* mflo */
 		}
-		o1 = OP_RRR(a, uint32(REGZERO), uint32(REGZERO), uint32(p.To.Reg))
+		o1 = OP_RRR(a, REGZERO, REGZERO, p.To.Reg)
 
 	case 21: /* mov r,lo/hi */
 		a := OP(2, 1) /* mthi */
 		if p.To.Reg == REG_LO {
 			a = OP(2, 3) /* mtlo */
 		}
-		o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO))
+		o1 = OP_RRR(a, REGZERO, p.From.Reg, REGZERO)
 
 	case 22: /* mul r1,r2 [r3]*/
-		if p.To.Reg != 0 {
-			r := int(p.Reg)
-			if r == 0 {
-				r = int(p.To.Reg)
+		if p.To.Reg != obj.REG_NONE {
+			r := p.Reg
+			if r == obj.REG_NONE {
+				r = p.To.Reg
 			}
 			a := SP(3, 4) | 2 /* mul */
-			o1 = OP_RRR(a, uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+			o1 = OP_RRR(a, p.From.Reg, r, p.To.Reg)
 		} else {
-			o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
+			o1 = OP_RRR(c.oprrr(p.As), p.From.Reg, p.Reg, REGZERO)
 		}
 
 	case 23: /* add $lcon,r1,r2 ==> lu+or+add */
 		v := c.regoff(&p.From)
-		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
+		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, REGTMP)
+		o2 = OP_IRR(c.opirr(AOR), uint32(v), REGTMP, REGTMP)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-		o3 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+		o3 = OP_RRR(c.oprrr(p.As), REGTMP, r, p.To.Reg)
 
 	case 24: /* mov $ucon,r ==> lu r */
 		v := c.regoff(&p.From)
-		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, p.To.Reg)
 
 	case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */
 		v := c.regoff(&p.From)
-		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
+		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, REGTMP)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-		o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+		o2 = OP_RRR(c.oprrr(p.As), REGTMP, r, p.To.Reg)
 
 	case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
 		v := c.regoff(&p.From)
-		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
+		o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, REGTMP)
+		o2 = OP_IRR(c.opirr(AOR), uint32(v), REGTMP, REGTMP)
+		r := p.From.Reg
+		if r == obj.REG_NONE {
+			r = o.param
 		}
-		o3 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+		o3 = OP_RRR(c.oprrr(add), REGTMP, r, p.To.Reg)
 
 	case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */
-		v := c.regoff(&p.From)
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
 		a := -AMOVF
 		if p.As == AMOVD {
 			a = -AMOVD
 		}
+		r := p.From.Reg
+		if r == obj.REG_NONE {
+			r = o.param
+		}
+		v := c.regoff(&p.From)
 		switch o.size {
 		case 12:
-			o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-			o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-			o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
+			o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP)
+			o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP)
+			o3 = OP_IRR(c.opirr(a), uint32(v), REGTMP, p.To.Reg)
 
 		case 4:
-			o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
+			o1 = OP_IRR(c.opirr(a), uint32(v), r, p.To.Reg)
 		}
 
 	case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */
-		v := c.regoff(&p.To)
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
 		a := AMOVF
 		if p.As == AMOVD {
 			a = AMOVD
 		}
+		r := p.To.Reg
+		if r == obj.REG_NONE {
+			r = o.param
+		}
+		v := c.regoff(&p.To)
 		switch o.size {
 		case 12:
-			o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-			o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-			o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
+			o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP)
+			o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP)
+			o3 = OP_IRR(c.opirr(a), uint32(v), REGTMP, p.From.Reg)
 
 		case 4:
-			o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.From.Reg))
+			o1 = OP_IRR(c.opirr(a), uint32(v), r, p.From.Reg)
 		}
 
 	case 30: /* movw r,fr */
 		a := SP(2, 1) | (4 << 21) /* mtc1 */
-		o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
+		o1 = OP_RRR(a, p.From.Reg, obj.REG_NONE, p.To.Reg)
 
 	case 31: /* movw fr,r */
 		a := SP(2, 1) | (0 << 21) /* mtc1 */
-		o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+		o1 = OP_RRR(a, p.To.Reg, obj.REG_NONE, p.From.Reg)
 
 	case 32: /* fadd fr1,[fr2],fr3 */
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
+		r := p.Reg
+		if r == obj.REG_NONE {
+			r = p.To.Reg
 		}
-		o1 = OP_FRRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+		o1 = OP_FRRR(c.oprrr(p.As), p.From.Reg, r, p.To.Reg)
 
 	case 33: /* fabs fr1, fr3 */
-		o1 = OP_FRRR(c.oprrr(p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+		o1 = OP_FRRR(c.oprrr(p.As), obj.REG_NONE, p.From.Reg, p.To.Reg)
 
 	case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
-		v := c.regoff(&p.From)
 		a := AADDU
 		if o.a1 == C_ANDCON {
 			a = AOR
 		}
-		o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
-		o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */
+		v := c.regoff(&p.From)
+		o1 = OP_IRR(c.opirr(a), uint32(v), obj.REG_NONE, REGTMP)
+		o2 = OP_RRR(SP(2, 1)|(4<<21), REGTMP, obj.REG_NONE, p.To.Reg) /* mtc1 */
 
 	case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */
-		v := c.regoff(&p.To)
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
+		r := p.To.Reg
+		if r == obj.REG_NONE {
+			r = o.param
 		}
-		o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-		o3 = OP_IRR(c.opirr(p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
+		v := c.regoff(&p.To)
+		o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP)
+		o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP)
+		o3 = OP_IRR(c.opirr(p.As), uint32(v), REGTMP, p.From.Reg)
 
 	case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */
-		v := c.regoff(&p.From)
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
+		r := p.From.Reg
+		if r == obj.REG_NONE {
+			r = o.param
 		}
-		o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-		o3 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
+		v := c.regoff(&p.From)
+		o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP)
+		o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP)
+		o3 = OP_IRR(c.opirr(-p.As), uint32(v), REGTMP, p.To.Reg)
 
 	case 37: /* movw r,mr */
 		a := SP(2, 0) | (4 << 21) /* mtc0 */
 		if p.As == AMOVV {
 			a = SP(2, 0) | (5 << 21) /* dmtc0 */
 		}
-		o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
+		o1 = OP_RRR(a, p.From.Reg, obj.REG_NONE, p.To.Reg)
 
 	case 38: /* movw mr,r */
 		a := SP(2, 0) | (0 << 21) /* mfc0 */
 		if p.As == AMOVV {
 			a = SP(2, 0) | (1 << 21) /* dmfc0 */
 		}
-		o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+		o1 = OP_RRR(a, p.To.Reg, obj.REG_NONE, p.From.Reg)
 
 	case 40: /* word */
 		o1 = uint32(c.regoff(&p.From))
 
 	case 41: /* movw f,fcr */
-		o1 = OP_RRR(SP(2, 1)|(6<<21), uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) /* mtcc1 */
+		o1 = OP_RRR(SP(2, 1)|(6<<21), p.From.Reg, obj.REG_NONE, p.To.Reg) /* mtcc1 */
 
 	case 42: /* movw fcr,r */
-		o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) /* mfcc1 */
+		o1 = OP_RRR(SP(2, 1)|(2<<21), p.To.Reg, obj.REG_NONE, p.From.Reg) /* mfcc1 */
 
 	case 47: /* movv r,fr */
 		a := SP(2, 1) | (5 << 21) /* dmtc1 */
-		o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
+		o1 = OP_RRR(a, p.From.Reg, obj.REG_NONE, p.To.Reg)
 
 	case 48: /* movv fr,r */
 		a := SP(2, 1) | (1 << 21) /* dmtc1 */
-		o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+		o1 = OP_RRR(a, p.To.Reg, obj.REG_NONE, p.From.Reg)
 
 	case 49: /* undef */
 		o1 = 52 /* trap -- teq r0, r0 */
 
 	/* relocation operations */
 	case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */
-		o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
+		o1 = OP_IRR(c.opirr(ALUI), 0, REGZERO, REGTMP)
 		rel := obj.Addrel(c.cursym)
 		rel.Off = int32(c.pc)
 		rel.Siz = 4
 		rel.Sym = p.To.Sym
 		rel.Add = p.To.Offset
 		rel.Type = objabi.R_ADDRMIPSU
-		o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+		o2 = OP_IRR(c.opirr(p.As), 0, REGTMP, p.From.Reg)
 		rel2 := obj.Addrel(c.cursym)
 		rel2.Off = int32(c.pc + 4)
 		rel2.Siz = 4
@@ -1586,19 +1591,19 @@
 
 		if o.size == 12 {
 			o3 = o2
-			o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
+			o2 = OP_RRR(c.oprrr(AADDVU), REGSB, REGTMP, REGTMP)
 			rel2.Off += 4
 		}
 
 	case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */
-		o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
+		o1 = OP_IRR(c.opirr(ALUI), 0, REGZERO, REGTMP)
 		rel := obj.Addrel(c.cursym)
 		rel.Off = int32(c.pc)
 		rel.Siz = 4
 		rel.Sym = p.From.Sym
 		rel.Add = p.From.Offset
 		rel.Type = objabi.R_ADDRMIPSU
-		o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+		o2 = OP_IRR(c.opirr(-p.As), 0, REGTMP, p.To.Reg)
 		rel2 := obj.Addrel(c.cursym)
 		rel2.Off = int32(c.pc + 4)
 		rel2.Siz = 4
@@ -1608,21 +1613,21 @@
 
 		if o.size == 12 {
 			o3 = o2
-			o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
+			o2 = OP_RRR(c.oprrr(AADDVU), REGSB, REGTMP, REGTMP)
 			rel2.Off += 4
 		}
 
 	case 52: /* mov $lext, r ==> lu + add REGSB, r + add */
 		// NOTE: this case does not use REGTMP. If it ever does,
 		// remove the NOTUSETMP flag in optab.
-		o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
+		o1 = OP_IRR(c.opirr(ALUI), 0, REGZERO, p.To.Reg)
 		rel := obj.Addrel(c.cursym)
 		rel.Off = int32(c.pc)
 		rel.Siz = 4
 		rel.Sym = p.From.Sym
 		rel.Add = p.From.Offset
 		rel.Type = objabi.R_ADDRMIPSU
-		o2 = OP_IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+		o2 = OP_IRR(c.opirr(add), 0, p.To.Reg, p.To.Reg)
 		rel2 := obj.Addrel(c.cursym)
 		rel2.Off = int32(c.pc + 4)
 		rel2.Siz = 4
@@ -1632,7 +1637,7 @@
 
 		if o.size == 12 {
 			o3 = o2
-			o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
+			o2 = OP_RRR(c.oprrr(AADDVU), REGSB, p.To.Reg, p.To.Reg)
 			rel2.Off += 4
 		}
 
@@ -1642,7 +1647,7 @@
 		// NOTE: this case does not use REGTMP. If it ever does,
 		// remove the NOTUSETMP flag in optab.
 		o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
-		o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
+		o2 = OP_IRR(c.opirr(p.As), 0, REG_R3, p.From.Reg)
 		rel := obj.Addrel(c.cursym)
 		rel.Off = int32(c.pc + 4)
 		rel.Siz = 4
@@ -1655,7 +1660,7 @@
 		// NOTE: this case does not use REGTMP. If it ever does,
 		// remove the NOTUSETMP flag in optab.
 		o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
-		o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
+		o2 = OP_IRR(c.opirr(-p.As), 0, REG_R3, p.To.Reg)
 		rel := obj.Addrel(c.cursym)
 		rel.Off = int32(c.pc + 4)
 		rel.Siz = 4
@@ -1668,7 +1673,7 @@
 		// NOTE: this case does not use REGTMP. If it ever does,
 		// remove the NOTUSETMP flag in optab.
 		o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
-		o2 = OP_IRR(c.opirr(add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
+		o2 = OP_IRR(c.opirr(add), 0, REG_R3, p.To.Reg)
 		rel := obj.Addrel(c.cursym)
 		rel.Off = int32(c.pc + 4)
 		rel.Siz = 4
@@ -1688,6 +1693,9 @@
 	case 58: /* vst wr, $soreg */
 		v := c.lsoffset(p.As, c.regoff(&p.To))
 		o1 = OP_VMI10(v, uint32(p.To.Reg), uint32(p.From.Reg), 9, c.twobitdf(p.As))
+
+	case 59:
+		o1 = OP_RRR(c.oprrr(p.As), p.From.Reg, REGZERO, p.To.Reg)
 	}
 
 	out[0] = o1
@@ -1888,6 +1896,16 @@
 		return SP(3, 4) | OP(0, 0)
 	case AMSUB:
 		return SP(3, 4) | OP(0, 4)
+	case AWSBH:
+		return SP(3, 7) | OP(20, 0)
+	case ADSBH:
+		return SP(3, 7) | OP(20, 4)
+	case ADSHD:
+		return SP(3, 7) | OP(44, 4)
+	case ASEB:
+		return SP(3, 7) | OP(132, 0)
+	case ASEH:
+		return SP(3, 7) | OP(196, 0)
 	}
 
 	if a < 0 {
diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go
index aa99855..189c1ae 100644
--- a/src/cmd/internal/obj/objfile.go
+++ b/src/cmd/internal/obj/objfile.go
@@ -292,8 +292,8 @@
 			// Don't include them if Flag_noRefName
 			return
 		}
-		if w.pkgpath != "" {
-			s.Name = strings.Replace(s.Name, "\"\".", w.pkgpath+".", -1)
+		if strings.HasPrefix(s.Name, `"".`) {
+			w.ctxt.Diag("unqualified symbol name: %v", s.Name)
 		}
 		w.AddString(s.Name)
 	})
@@ -423,7 +423,7 @@
 // contentHashSection returns a mnemonic for s's section.
 // The goal is to prevent content-addressability from moving symbols between sections.
 // contentHashSection only distinguishes between sets of sections for which this matters.
-// Allowing flexibility increases the effectiveness of content-addressibility.
+// Allowing flexibility increases the effectiveness of content-addressability.
 // But in some cases, such as doing addressing based on a base symbol,
 // we need to ensure that a symbol is always in a particular section.
 // Some of these conditions are duplicated in cmd/link/internal/ld.(*Link).symtab.
@@ -914,9 +914,9 @@
 			name = "TLS"
 		}
 		if ctxt.Arch.InFamily(sys.ARM, sys.PPC64) {
-			fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s%s+%x\n", int(r.Off), r.Siz, r.Type, name, ver, uint64(r.Add))
+			fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%v %s%s+%x\n", int(r.Off), r.Siz, r.Type, name, ver, uint64(r.Add))
 		} else {
-			fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s%s+%d\n", int(r.Off), r.Siz, r.Type, name, ver, r.Add)
+			fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%v %s%s+%d\n", int(r.Off), r.Siz, r.Type, name, ver, r.Add)
 		}
 	}
 }
diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go
index 921dfee..9cf6a20 100644
--- a/src/cmd/internal/obj/plist.go
+++ b/src/cmd/internal/obj/plist.go
@@ -14,14 +14,18 @@
 
 type Plist struct {
 	Firstpc *Prog
-	Curfn   interface{} // holds a *gc.Node, if non-nil
+	Curfn   Func
 }
 
 // ProgAlloc is a function that allocates Progs.
 // It is used to provide access to cached/bulk-allocated Progs to the assemblers.
 type ProgAlloc func() *Prog
 
-func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc, myimportpath string) {
+func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) {
+	if ctxt.Pkgpath == "" {
+		panic("Flushplist called without Pkgpath")
+	}
+
 	// Build list of symbols, and assign instructions to lists.
 	var curtext *LSym
 	var etext *Prog
@@ -96,8 +100,9 @@
 
 	// Add reference to Go arguments for assembly functions without them.
 	if ctxt.IsAsm {
+		pkgPrefix := objabi.PathToPrefix(ctxt.Pkgpath) + "."
 		for _, s := range text {
-			if !strings.HasPrefix(s.Name, "\"\".") {
+			if !strings.HasPrefix(s.Name, pkgPrefix) {
 				continue
 			}
 			// The current args_stackmap generation in the compiler assumes
@@ -107,6 +112,16 @@
 			if s.ABI() != ABI0 {
 				continue
 			}
+			// runtime.addmoduledata is a host ABI function, so it doesn't
+			// need FUNCDATA anyway. Moreover, cmd/link has special logic
+			// for linking it in eccentric build modes, which breaks if it
+			// has FUNCDATA references (e.g., cmd/cgo/internal/testplugin).
+			//
+			// TODO(cherryyz): Fix cmd/link's handling of plugins (see
+			// discussion on CL 523355).
+			if s.Name == "runtime.addmoduledata" {
+				continue
+			}
 			foundArgMap, foundArgInfo := false, false
 			for p := s.Func().Text; p != nil; p = p.Link {
 				if p.As == AFUNCDATA && p.From.Type == TYPE_CONST {
@@ -155,9 +170,7 @@
 			continue
 		}
 		linkpcln(ctxt, s)
-		if myimportpath != "" {
-			ctxt.populateDWARF(plist.Curfn, s, myimportpath)
-		}
+		ctxt.populateDWARF(plist.Curfn, s)
 		if ctxt.Headtype == objabi.Hwindows && ctxt.Arch.SEH != nil {
 			s.Func().sehUnwindInfoSym = ctxt.Arch.SEH(ctxt, s)
 		}
@@ -178,15 +191,16 @@
 		ctxt.Diag("%s: symbol %s redeclared", ctxt.PosTable.Pos(start), s.Name)
 		return
 	}
+	if strings.HasPrefix(s.Name, `"".`) {
+		ctxt.Diag("%s: unqualified symbol name: %s", ctxt.PosTable.Pos(start), s.Name)
+	}
 
 	// startLine should be the same line number that would be displayed via
 	// pcln, etc for the declaration (i.e., relative line number, as
 	// adjusted by //line).
-	_, startLine := ctxt.getFileSymbolAndLine(start)
+	_, startLine := ctxt.getFileIndexAndLine(start)
 
-	// TODO(mdempsky): Remove once cmd/asm stops writing "" symbols.
-	name := strings.Replace(s.Name, "\"\"", ctxt.Pkgpath, -1)
-	s.Func().FuncID = objabi.GetFuncID(name, flag&WRAPPER != 0 || flag&ABIWRAPPER != 0)
+	s.Func().FuncID = objabi.GetFuncID(s.Name, flag&WRAPPER != 0 || flag&ABIWRAPPER != 0)
 	s.Func().FuncFlag = ctxt.toFuncFlag(flag)
 	s.Func().StartLine = startLine
 	s.Set(AttrOnList, true)
diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go
index efc10ea..13143f5 100644
--- a/src/cmd/internal/obj/ppc64/a.out.go
+++ b/src/cmd/internal/obj/ppc64/a.out.go
@@ -422,7 +422,6 @@
 	C_U15CON   /* 15 bit unsigned constant */
 	C_S16CON   /* 16 bit signed constant */
 	C_U16CON   /* 16 bit unsigned constant */
-	C_32S16CON /* Any 32 bit constant of the form 0x....0000, signed or unsigned */
 	C_32CON    /* Any constant which fits into 32 bits. Can be signed or unsigned */
 	C_S34CON   /* 34 bit signed constant */
 	C_64CON    /* Any constant which fits into 64 bits. Can be signed or unsigned */
@@ -437,7 +436,6 @@
 	C_LOREG    /* An $n+reg memory arg where n is a 32 bit signed offset */
 	C_XOREG    /* An reg+reg memory arg */
 	C_FPSCR    /* The fpscr register */
-	C_XER      /* The xer, holds the carry bit */
 	C_LR       /* The link register */
 	C_CTR      /* The count register */
 	C_ANY      /* Any argument */
@@ -451,16 +449,13 @@
 
 	/* Aliased names which should be cleaned up, or integrated. */
 	C_SCON   = C_U15CON
-	C_UCON   = C_32S16CON
 	C_ADDCON = C_S16CON
 	C_ANDCON = C_U16CON
 	C_LCON   = C_32CON
 
 	/* Aliased names which may be generated by ppc64map for the optab. */
-	C_S3216CON = C_32S16CON // TODO: these should be treated differently (e.g xoris vs addis)
-	C_U3216CON = C_32S16CON
-	C_S32CON   = C_32CON
-	C_U32CON   = C_32CON
+	C_S32CON = C_32CON
+	C_U32CON = C_32CON
 )
 
 const (
diff --git a/src/cmd/internal/obj/ppc64/anames9.go b/src/cmd/internal/obj/ppc64/anames9.go
index ad6776a..72d1f49 100644
--- a/src/cmd/internal/obj/ppc64/anames9.go
+++ b/src/cmd/internal/obj/ppc64/anames9.go
@@ -27,7 +27,6 @@
 	"U15CON",
 	"S16CON",
 	"U16CON",
-	"32S16CON",
 	"32CON",
 	"S34CON",
 	"64CON",
@@ -42,7 +41,6 @@
 	"LOREG",
 	"XOREG",
 	"FPSCR",
-	"XER",
 	"LR",
 	"CTR",
 	"ANY",
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 4559eed..0f01dfa 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -65,6 +65,11 @@
 	PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
 )
 
+const (
+	// The preferred hardware nop instruction.
+	NOP = 0x60000000
+)
+
 type Optab struct {
 	as    obj.As // Opcode
 	a1    uint8  // p.From argument (obj.Addr). p is of type obj.Prog.
@@ -99,7 +104,11 @@
 //
 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
 // to arrange entries to minimize text size of each opcode.
-var optab = []Optab{
+//
+// optab is the sorted result of combining optabBase, optabGen, and prefixableOptab.
+var optab []Optab
+
+var optabBase = []Optab{
 	{as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
 	{as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
 	{as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
@@ -111,8 +120,6 @@
 	{as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
 	{as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
 	{as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
-	{as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
-	{as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
 	{as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
 	{as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
 	{as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
@@ -129,14 +136,12 @@
 	{as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
 	{as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
 	{as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
-	{as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
-	{as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
 	{as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
 	{as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
 	{as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
 	{as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
-	{as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
-	{as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
+	{as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
+	{as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
 	{as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
 	{as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
 	{as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
@@ -153,14 +158,12 @@
 	{as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
 	{as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
 	{as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
-	{as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
-	{as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
 	{as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
 	{as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
 	{as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
 	{as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
-	{as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
-	{as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
+	{as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
+	{as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
 	{as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
 	{as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
 	{as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
@@ -183,13 +186,14 @@
 	{as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
 	{as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
 	{as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
-	{as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
-	{as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
-	{as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
-	{as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
+	{as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
+	{as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
+	{as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
+	{as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
 	{as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
 	{as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
 	{as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
+	{as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4},
 	{as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
 	{as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
 	{as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
@@ -230,7 +234,6 @@
 
 	{as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
 	{as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
-	{as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
 	{as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
 	{as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
 	{as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
@@ -244,7 +247,6 @@
 
 	{as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
 	{as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
-	{as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
 	{as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
 	{as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
 	{as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
@@ -541,7 +543,7 @@
 //
 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
 var prefixableOptab = []PrefixableOptab{
-	{Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
+	{Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
 	{Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
 	{Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
 	{Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
@@ -574,6 +576,8 @@
 
 	{Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
 	{Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
+	{Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
+	{Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
 }
 
 var oprange [ALAST & obj.AMask][]Optab
@@ -603,32 +607,6 @@
 	return 0
 }
 
-// Get the implied register of an operand which doesn't specify one.  These show up
-// in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
-// or "MOVD R5, foo+10(SP) or pseudo-register is used.  The other common case is when
-// generating constants in register like "MOVD $constant, Rx".
-func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
-	class := oclass(a)
-	if class >= C_ZCON && class <= C_64CON {
-		return REGZERO
-	}
-	switch class {
-	case C_SACON, C_LACON:
-		return REGSP
-	case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
-		switch a.Name {
-		case obj.NAME_EXTERN, obj.NAME_STATIC:
-			return REGSB
-		case obj.NAME_AUTO, obj.NAME_PARAM:
-			return REGSP
-		case obj.NAME_NONE:
-			return REGZERO
-		}
-	}
-	c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
-	return 0
-}
-
 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
 	p := cursym.Func().Text
 	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
@@ -824,7 +802,6 @@
 	// lay out the code, emitting code and data relocations.
 
 	bp := c.cursym.P
-	nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
 	var i int32
 	for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
 		c.pc = p.Pc
@@ -839,13 +816,13 @@
 			if v > 0 {
 				// Same padding instruction for all
 				for i = 0; i < int32(v/4); i++ {
-					c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
+					c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP)
 					bp = bp[4:]
 				}
 			}
 		} else {
 			if p.Mark&PFX_X64B != 0 {
-				c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
+				c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP)
 				bp = bp[4:]
 			}
 			o.asmout(&c, p, o, &out)
@@ -889,9 +866,6 @@
 		case REG_LR:
 			return C_LR
 
-		case REG_XER:
-			return C_XER
-
 		case REG_CTR:
 			return C_CTR
 		}
@@ -944,14 +918,15 @@
 			}
 
 		case obj.NAME_AUTO:
+			a.Reg = REGSP
 			c.instoffset = int64(c.autosize) + a.Offset
-
 			if c.instoffset >= -BIG && c.instoffset < BIG {
 				return C_SOREG
 			}
 			return C_LOREG
 
 		case obj.NAME_PARAM:
+			a.Reg = REGSP
 			c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
 			if c.instoffset >= -BIG && c.instoffset < BIG {
 				return C_SOREG
@@ -1011,6 +986,7 @@
 			return C_LACON
 
 		case obj.NAME_AUTO:
+			a.Reg = REGSP
 			c.instoffset = int64(c.autosize) + a.Offset
 			if c.instoffset >= -BIG && c.instoffset < BIG {
 				return C_SACON
@@ -1018,6 +994,7 @@
 			return C_LACON
 
 		case obj.NAME_PARAM:
+			a.Reg = REGSP
 			c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
 			if c.instoffset >= -BIG && c.instoffset < BIG {
 				return C_SACON
@@ -1040,10 +1017,6 @@
 			case sbits <= 16:
 				return C_U16CON
 			case sbits <= 31:
-				// Special case, a positive int32 value which is a multiple of 2^16
-				if c.instoffset&0xFFFF == 0 {
-					return C_U3216CON
-				}
 				return C_U32CON
 			case sbits <= 32:
 				return C_U32CON
@@ -1058,10 +1031,6 @@
 			case sbits <= 15:
 				return C_S16CON
 			case sbits <= 31:
-				// Special case, a negative int32 value which is a multiple of 2^16
-				if c.instoffset&0xFFFF == 0 {
-					return C_S3216CON
-				}
 				return C_S32CON
 			case sbits <= 33:
 				return C_S34CON
@@ -1158,7 +1127,7 @@
 	switch a {
 
 	case C_SPR:
-		if b == C_LR || b == C_XER || b == C_CTR {
+		if b == C_LR || b == C_CTR {
 			return true
 		}
 
@@ -1182,15 +1151,12 @@
 	case C_S16CON:
 		return cmp(C_U15CON, b)
 	case C_32CON:
-		return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
+		return cmp(C_S16CON, b) || cmp(C_U16CON, b)
 	case C_S34CON:
 		return cmp(C_32CON, b)
 	case C_64CON:
 		return cmp(C_S34CON, b)
 
-	case C_32S16CON:
-		return cmp(C_ZCON, b)
-
 	case C_LACON:
 		return cmp(C_SACON, b)
 
@@ -1303,10 +1269,6 @@
 			entry.ispfx = true
 			entry.size = entry.pfxsize
 		}
-		// Use the legacy assembler function if none provided.
-		if entry.asmout == nil {
-			entry.asmout = asmout
-		}
 		prefixOptab = append(prefixOptab, entry.Optab)
 
 	}
@@ -1318,16 +1280,20 @@
 			}
 		}
 	}
+
+	// Append the generated entries, sort, and fill out oprange.
+	optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab))
+	optab = append(optab, optabBase...)
+	optab = append(optab, optabGen...)
+	optab = append(optab, prefixOptab...)
+	sort.Slice(optab, optabLess)
+
 	for i := range optab {
 		// Use the legacy assembler function if none provided.
 		if optab[i].asmout == nil {
 			optab[i].asmout = asmout
 		}
 	}
-	// Append the generated entries, sort, and fill out oprange.
-	optab = append(optab, optabGen...)
-	optab = append(optab, prefixOptab...)
-	sort.Slice(optab, optabLess)
 
 	for i := 0; i < len(optab); {
 		r := optab[i].as
@@ -1993,10 +1959,10 @@
 			opset(APTESYNC, r0)
 			opset(ATLBSYNC, r0)
 
-		case ARLWMI:
-			opset(ARLWMICC, r0)
-			opset(ARLWNM, r0)
+		case ARLWNM:
 			opset(ARLWNMCC, r0)
+			opset(ARLWMI, r0)
+			opset(ARLWMICC, r0)
 
 		case ARLDMI:
 			opset(ARLDMICC, r0)
@@ -2269,10 +2235,6 @@
 	return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
 }
 
-func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
-	return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
-}
-
 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
 	return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
 }
@@ -2281,6 +2243,16 @@
 	return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
 }
 
+/* MD-form 2-register, 2 6-bit immediate operands */
+func AOP_MD(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
+	return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
+}
+
+/* MDS-form 3-register, 1 6-bit immediate operands. rsh argument is a register. */
+func AOP_MDS(op, to, from, rsh, m uint32) uint32 {
+	return AOP_MD(op, to, from, rsh&31, m)
+}
+
 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
 	return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
 }
@@ -2477,87 +2449,59 @@
 	return
 }
 
-/*
- * 32-bit masks
- */
-func getmask(m []byte, v uint32) bool {
-	m[1] = 0
-	m[0] = m[1]
-	if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
-		if getmask(m, ^v) {
-			i := int(m[0])
-			m[0] = m[1] + 1
-			m[1] = byte(i - 1)
-			return true
-		}
-
-		return false
+// Determine the mask begin (mb) and mask end (me) values
+// for a valid word rotate mask. A valid 32 bit mask is of
+// the form 1+0*1+ or 0*1+0*.
+//
+// Note, me is inclusive.
+func decodeMask32(mask uint32) (mb, me uint32, valid bool) {
+	mb = uint32(bits.LeadingZeros32(mask))
+	me = uint32(32 - bits.TrailingZeros32(mask))
+	mbn := uint32(bits.LeadingZeros32(^mask))
+	men := uint32(32 - bits.TrailingZeros32(^mask))
+	// Check for a wrapping mask (e.g bits at 0 and 31)
+	if mb == 0 && me == 32 {
+		// swap the inverted values
+		mb, me = men, mbn
 	}
 
-	for i := 0; i < 32; i++ {
-		if v&(1<<uint(31-i)) != 0 {
-			m[0] = byte(i)
-			for {
-				m[1] = byte(i)
-				i++
-				if i >= 32 || v&(1<<uint(31-i)) == 0 {
-					break
-				}
-			}
-
-			for ; i < 32; i++ {
-				if v&(1<<uint(31-i)) != 0 {
-					return false
-				}
-			}
-			return true
-		}
-	}
-
-	return false
+	// Validate mask is of the binary form 1+0*1+ or 0*1+0*
+	// Isolate rightmost 1 (if none 0) and add.
+	v := mask
+	vp := (v & -v) + v
+	// Likewise, check for the wrapping (inverted) case.
+	vn := ^v
+	vpn := (vn & -vn) + vn
+	return mb, (me - 1) & 31, (v&vp == 0 || vn&vpn == 0) && v != 0
 }
 
-func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
-	if !getmask(m, v) {
-		c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
-	}
+// Decompose a mask of contiguous bits into a begin (mb) and
+// end (me) value.
+//
+// 64b mask values cannot wrap on any valid PPC64 instruction.
+// Only masks of the form 0*1+0* are valid.
+//
+// Note, me is inclusive.
+func decodeMask64(mask int64) (mb, me uint32, valid bool) {
+	m := uint64(mask)
+	mb = uint32(bits.LeadingZeros64(m))
+	me = uint32(64 - bits.TrailingZeros64(m))
+	valid = ((m&-m)+m)&m == 0 && m != 0
+	return mb, (me - 1) & 63, valid
 }
 
-/*
- * 64-bit masks (rldic etc)
- */
-func getmask64(m []byte, v uint64) bool {
-	m[1] = 0
-	m[0] = m[1]
-	for i := 0; i < 64; i++ {
-		if v&(uint64(1)<<uint(63-i)) != 0 {
-			m[0] = byte(i)
-			for {
-				m[1] = byte(i)
-				i++
-				if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
-					break
-				}
-			}
-
-			for ; i < 64; i++ {
-				if v&(uint64(1)<<uint(63-i)) != 0 {
-					return false
-				}
-			}
-			return true
-		}
+// Load the lower 16 bits of a constant into register r.
+func loadl16(r int, d int64) uint32 {
+	v := uint16(d)
+	if v == 0 {
+		// Avoid generating "ori r,r,0", r != 0. Instead, generate the architectually preferred nop.
+		// For example, "ori r31,r31,0" is a special execution serializing nop on Power10 called "exser".
+		return NOP
 	}
-
-	return false
+	return LOP_IRR(OP_ORI, uint32(r), uint32(r), uint32(v))
 }
 
-func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
-	if !getmask64(m, v) {
-		c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
-	}
-}
-
+// Load the upper 16 bits of a 32b constant into register r.
 func loadu32(r int, d int64) uint32 {
 	v := int32(d >> 16)
 	if isuint32(uint64(d)) {
@@ -2602,31 +2546,18 @@
 
 		v := int32(d)
 		r := int(p.From.Reg)
-		if r == 0 {
-			r = c.getimpliedreg(&p.From, p)
-		}
+		// p.From may be a constant value or an offset(reg) type argument.
+		isZeroOrR0 := r&0x1f == 0
+
 		if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
 			c.ctxt.Diag("literal operation on R0\n%v", p)
 		}
 		a := OP_ADDI
-		if o.a1 == C_UCON {
-			if d&0xffff != 0 {
-				log.Fatalf("invalid handling of %v", p)
-			}
-			// For UCON operands the value is right shifted 16, using ADDIS if the
-			// value should be signed, ORIS if unsigned.
-			v >>= 16
-			if r == REGZERO && isuint32(uint64(d)) {
-				o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
-				break
-			}
-
-			a = OP_ADDIS
-		} else if int64(int16(d)) != d {
+		if int64(int16(d)) != d {
 			// Operand is 16 bit value with sign bit set
 			if o.a1 == C_ANDCON {
 				// Needs unsigned 16 bit so use ORI
-				if r == 0 || r == REGZERO {
+				if isZeroOrR0 {
 					o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
 					break
 				}
@@ -2665,7 +2596,7 @@
 		// AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
 		switch p.As {
 		case AROTL:
-			o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
+			o1 = AOP_MD(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
 		case AROTLW:
 			o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
 		default:
@@ -2680,10 +2611,6 @@
 
 	case 7: /* mov r, soreg ==> stw o(r) */
 		r := int(p.To.Reg)
-
-		if r == 0 {
-			r = c.getimpliedreg(&p.To, p)
-		}
 		v := c.regoff(&p.To)
 		if int32(int16(v)) != v {
 			log.Fatalf("mishandled instruction %v", p)
@@ -2697,10 +2624,6 @@
 
 	case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
 		r := int(p.From.Reg)
-
-		if r == 0 {
-			r = c.getimpliedreg(&p.From, p)
-		}
 		v := c.regoff(&p.From)
 		if int32(int16(v)) != v {
 			log.Fatalf("mishandled instruction %v", p)
@@ -2715,6 +2638,14 @@
 		// Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
 		o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
 
+	case 9: /* RLDC Ra, $sh, $mb, Rb */
+		sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F
+		mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F
+		o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F))
+		o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1.
+		o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10.
+		o1 |= (mb & 0x20)      // mb[5] is placed in bit 5
+
 	case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
 		r := int(p.Reg)
 
@@ -2753,7 +2684,7 @@
 			rel.Add = int64(v)
 			rel.Type = objabi.R_CALLPOWER
 		}
-		o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
+		o2 = NOP // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
 
 	case 13: /* mov[bhwd]{z,} r,r */
 		// This needs to handle "MOV* $0, Rx".  This shows up because $0 also
@@ -2787,62 +2718,47 @@
 		}
 
 	case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
-		r := int(p.Reg)
+		r := uint32(p.Reg)
 
 		if r == 0 {
-			r = int(p.To.Reg)
+			r = uint32(p.To.Reg)
 		}
 		d := c.vregoff(p.GetFrom3())
-		var a int
 		switch p.As {
 
 		// These opcodes expect a mask operand that has to be converted into the
 		// appropriate operand.  The way these were defined, not all valid masks are possible.
 		// Left here for compatibility in case they were used or generated.
 		case ARLDCL, ARLDCLCC:
-			var mask [2]uint8
-			c.maskgen64(p, mask[:], uint64(d))
-
-			a = int(mask[0]) /* MB */
-			if mask[1] != 63 {
+			mb, me, valid := decodeMask64(d)
+			if me != 63 || !valid {
 				c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
 			}
-			o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
-			o1 |= (uint32(a) & 31) << 6
-			if a&0x20 != 0 {
-				o1 |= 1 << 5 /* mb[5] is top bit */
-			}
+			o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), mb)
 
 		case ARLDCR, ARLDCRCC:
-			var mask [2]uint8
-			c.maskgen64(p, mask[:], uint64(d))
-
-			a = int(mask[1]) /* ME */
-			if mask[0] != 0 {
-				c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
+			mb, me, valid := decodeMask64(d)
+			if mb != 0 || !valid {
+				c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
 			}
-			o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
-			o1 |= (uint32(a) & 31) << 6
-			if a&0x20 != 0 {
-				o1 |= 1 << 5 /* mb[5] is top bit */
-			}
+			o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), me)
 
 		// These opcodes use a shift count like the ppc64 asm, no mask conversion done
 		case ARLDICR, ARLDICRCC:
-			me := int(d)
+			me := uint32(d)
 			sh := c.regoff(&p.From)
 			if me < 0 || me > 63 || sh > 63 {
 				c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
 			}
-			o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
+			o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), me)
 
 		case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
-			mb := int(d)
+			mb := uint32(d)
 			sh := c.regoff(&p.From)
 			if mb < 0 || mb > 63 || sh > 63 {
 				c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
 			}
-			o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
+			o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), mb)
 
 		case ACLRLSLDI:
 			// This is an extended mnemonic defined in the ISA section C.8.1
@@ -2854,11 +2770,10 @@
 			if n > b || b > 63 {
 				c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
 			}
-			o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
+			o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
 
 		default:
 			c.ctxt.Diag("unexpected op in rldc case\n%v", p)
-			a = 0
 		}
 
 	case 17, /* bc bo,bi,lbra (same for now) */
@@ -2963,16 +2878,9 @@
 		if r == 0 {
 			r = int(p.To.Reg)
 		}
-		if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
-			c.ctxt.Diag("literal operation on R0\n%v", p)
-		}
-		if p.As == AADDIS {
-			o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-		} else {
-			o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
-		}
+		o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
 
-	case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
+	case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */
 		if p.To.Reg == REGTMP || p.Reg == REGTMP {
 			c.ctxt.Diag("can't synthesize large constant\n%v", p)
 		}
@@ -2984,19 +2892,23 @@
 		if p.From.Sym != nil {
 			c.ctxt.Diag("%v is not supported", p)
 		}
-		// If operand is ANDCON, generate 2 instructions using
-		// ORI for unsigned value; with LCON 3 instructions.
-		if o.size == 8 {
-			o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
-			o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
-		} else {
-			o1 = loadu32(REGTMP, d)
-			o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
-			o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
-		}
-
 		if o.ispfx {
 			o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
+		} else if o.size == 8 {
+			o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))          // tmp = uint16(d)
+			o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from
+		} else if o.size == 12 {
+			// Note, o1 is ADDIS if d is negative, ORIS otherwise.
+			o1 = loadu32(REGTMP, d)                                          // tmp = d & 0xFFFF0000
+			o2 = loadl16(REGTMP, d)                                          // tmp |= d & 0xFFFF
+			o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp
+		} else {
+			// For backwards compatibility with GOPPC64 < 10, generate 34b constants in register.
+			o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000)
+			o2 = loadl16(REGTMP, int64(d>>16))                     // tmp |= (d>>16)&0xFFFF
+			o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16)      // tmp <<= 16
+			o4 = loadl16(REGTMP, int64(uint16(d)))                 // tmp |= d&0xFFFF
+			o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
 		}
 
 	case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
@@ -3016,7 +2928,7 @@
 			o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
 		} else {
 			o1 = loadu32(REGTMP, d)
-			o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+			o2 = loadl16(REGTMP, d)
 			o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
 		}
 		if p.From.Sym != nil {
@@ -3069,7 +2981,7 @@
 			o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
 
 		} else {
-			o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
+			o1 = AOP_MD(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
 		}
 		if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
 			o1 |= 1 // Set the condition code bit
@@ -3085,9 +2997,6 @@
 			// Load a 32 bit constant, or relocation depending on if a symbol is attached
 			o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
 		default:
-			if r == 0 {
-				r = c.getimpliedreg(&p.From, p)
-			}
 			// Add a 32 bit offset to a register.
 			o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
 			o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
@@ -3112,87 +3021,60 @@
 		if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
 			c.ctxt.Diag("can't synthesize large constant\n%v", p)
 		}
-		v := c.regoff(p.GetFrom3())
+		v := c.vregoff(p.GetFrom3())
 		o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
-		o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
+		o2 = loadl16(REGTMP, v)
 		o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
 		if p.From.Sym != nil {
 			c.ctxt.Diag("%v is not supported", p)
 		}
 
 	case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
-		v := c.regoff(&p.From)
-
+		sh := uint32(c.regoff(&p.From))
 		d := c.vregoff(p.GetFrom3())
-		var mask [2]uint8
-		c.maskgen64(p, mask[:], uint64(d))
-		var a int
+		mb, me, valid := decodeMask64(d)
+		var a uint32
 		switch p.As {
 		case ARLDC, ARLDCCC:
-			a = int(mask[0]) /* MB */
-			if int32(mask[1]) != (63 - v) {
-				c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
+			a = mb
+			if me != (63-sh) || !valid {
+				c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
 			}
 
 		case ARLDCL, ARLDCLCC:
-			a = int(mask[0]) /* MB */
-			if mask[1] != 63 {
-				c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
+			a = mb
+			if mb != 63 || !valid {
+				c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
 			}
 
 		case ARLDCR, ARLDCRCC:
-			a = int(mask[1]) /* ME */
-			if mask[0] != 0 {
-				c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
+			a = me
+			if mb != 0 || !valid {
+				c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
 			}
 
 		default:
 			c.ctxt.Diag("unexpected op in rldic case\n%v", p)
-			a = 0
 		}
-
-		o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
-		o1 |= (uint32(a) & 31) << 6
-		if v&0x20 != 0 {
-			o1 |= 1 << 1
-		}
-		if a&0x20 != 0 {
-			o1 |= 1 << 5 /* mb[5] is top bit */
-		}
+		o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, a)
 
 	case 30: /* rldimi $sh,s,$mask,a */
-		v := c.regoff(&p.From)
-
+		sh := uint32(c.regoff(&p.From))
 		d := c.vregoff(p.GetFrom3())
 
 		// Original opcodes had mask operands which had to be converted to a shift count as expected by
 		// the ppc64 asm.
 		switch p.As {
 		case ARLDMI, ARLDMICC:
-			var mask [2]uint8
-			c.maskgen64(p, mask[:], uint64(d))
-			if int32(mask[1]) != (63 - v) {
-				c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
+			mb, me, valid := decodeMask64(d)
+			if me != (63-sh) || !valid {
+				c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), me, sh, p)
 			}
-			o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
-			o1 |= (uint32(mask[0]) & 31) << 6
-			if v&0x20 != 0 {
-				o1 |= 1 << 1
-			}
-			if mask[0]&0x20 != 0 {
-				o1 |= 1 << 5 /* mb[5] is top bit */
-			}
+			o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb)
 
 		// Opcodes with shift count operands.
 		case ARLDIMI, ARLDIMICC:
-			o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
-			o1 |= (uint32(d) & 31) << 6
-			if d&0x20 != 0 {
-				o1 |= 1 << 5
-			}
-			if v&0x20 != 0 {
-				o1 |= 1 << 1
-			}
+			o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, uint32(d))
 		}
 
 	case 31: /* dword */
@@ -3238,11 +3120,7 @@
 
 	case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
 		v := c.regoff(&p.To)
-
 		r := int(p.To.Reg)
-		if r == 0 {
-			r = c.getimpliedreg(&p.To, p)
-		}
 		// Offsets in DS form stores must be a multiple of 4
 		if o.ispfx {
 			o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
@@ -3259,11 +3137,7 @@
 
 	case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
 		v := c.regoff(&p.From)
-
 		r := int(p.From.Reg)
-		if r == 0 {
-			r = c.getimpliedreg(&p.From, p)
-		}
 
 		if o.ispfx {
 			o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
@@ -3467,24 +3341,6 @@
 		}
 		o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
 
-	case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
-		v := c.regoff(&p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		switch p.As {
-		case AOR:
-			o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
-		case AXOR:
-			o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
-		case AANDCC:
-			o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
-		default:
-			o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-		}
-
 	case 60: /* tw to,a,b */
 		r := int(c.regoff(&p.From) & 31)
 
@@ -3496,31 +3352,35 @@
 		v := c.regoff(&p.To)
 		o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
 
-	case 62: /* rlwmi $sh,s,$mask,a */
+	case 62: /* clrlslwi $sh,s,$mask,a */
 		v := c.regoff(&p.From)
-		switch p.As {
-		case ACLRLSLWI:
-			n := c.regoff(p.GetFrom3())
-			// This is an extended mnemonic described in the ISA C.8.2
-			// clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
-			// It maps onto rlwinm which is directly generated here.
-			if n > v || v >= 32 {
-				c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
-			}
-
-			o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
-		default:
-			var mask [2]uint8
-			c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
-			o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
-			o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+		n := c.regoff(p.GetFrom3())
+		// This is an extended mnemonic described in the ISA C.8.2
+		// clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
+		// It maps onto rlwinm which is directly generated here.
+		if n > v || v >= 32 {
+			c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
 		}
 
-	case 63: /* rlwmi b,s,$mask,a */
-		var mask [2]uint8
-		c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
-		o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
-		o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+		o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
+
+	case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
+		var mb, me uint32
+		if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
+			var valid bool
+			// Note, optab rules ensure $mask is a 32b constant.
+			mb, me, valid = decodeMask32(uint32(p.RestArgs[0].Addr.Offset))
+			if !valid {
+				c.ctxt.Diag("cannot generate mask #%x\n%v", uint64(p.RestArgs[0].Addr.Offset), p)
+			}
+		} else { // Otherwise, mask is already passed as mb and me in RestArgs.
+			mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
+		}
+		if p.From.Type == obj.TYPE_CONST {
+			o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
+		} else {
+			o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
+		}
 
 	case 64: /* mtfsf fr[, $m] {,fpcsr} */
 		var v int32
@@ -3921,17 +3781,6 @@
 	case 101:
 		o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
 
-	case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
-		mb := uint32(c.regoff(&p.RestArgs[0].Addr))
-		me := uint32(c.regoff(&p.RestArgs[1].Addr))
-		sh := uint32(c.regoff(&p.From))
-		o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
-
-	case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
-		mb := uint32(c.regoff(&p.RestArgs[0].Addr))
-		me := uint32(c.regoff(&p.RestArgs[1].Addr))
-		o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
-
 	case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
 		o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
 
@@ -4446,10 +4295,6 @@
 	case AHRFID:
 		return OPVCC(19, 274, 0, 0)
 
-	case ARLWMI:
-		return OPVCC(20, 0, 0, 0)
-	case ARLWMICC:
-		return OPVCC(20, 0, 0, 1)
 	case ARLWNM:
 		return OPVCC(23, 0, 0, 0)
 	case ARLWNMCC:
diff --git a/src/cmd/internal/obj/ppc64/asm_test.go b/src/cmd/internal/obj/ppc64/asm_test.go
index b8995dc..7167a6a 100644
--- a/src/cmd/internal/obj/ppc64/asm_test.go
+++ b/src/cmd/internal/obj/ppc64/asm_test.go
@@ -7,6 +7,7 @@
 import (
 	"bytes"
 	"fmt"
+	"internal/buildcfg"
 	"internal/testenv"
 	"math"
 	"os"
@@ -198,11 +199,11 @@
 			t.Errorf("Failed to compile %v: %v\n", pgm, err)
 		}
 		if !strings.Contains(string(out), pgm.align) {
-			t.Errorf(fmt.Sprintf("Fatal, misaligned text with prefixed instructions:\n%s\n", string(out)))
+			t.Errorf("Fatal, misaligned text with prefixed instructions:\n%s", out)
 		}
 		hasNop := strings.Contains(string(out), "00 00 00 60")
 		if hasNop != pgm.hasNop {
-			t.Errorf(fmt.Sprintf("Fatal, prefixed instruction is missing nop padding:\n%s\n", string(out)))
+			t.Errorf("Fatal, prefixed instruction is missing nop padding:\n%s", out)
 		}
 	}
 }
@@ -464,7 +465,6 @@
 		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_CR1}, C_CREG},
 		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_CR1SO}, C_CRBIT},
 		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0}, C_SPR},
-		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0 + 1}, C_XER},
 		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0 + 8}, C_LR},
 		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0 + 9}, C_CTR},
 		{obj.Addr{Type: obj.TYPE_REG, Reg: REG_FPSCR}, C_FPSCR},
@@ -516,12 +516,10 @@
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 32}, C_U8CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 14}, C_U15CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 15}, C_U16CON},
-		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 16}, C_U3216CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 + 1<<16}, C_U32CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 32}, C_S34CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 33}, C_64CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -1}, C_S16CON},
-		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -0x10000}, C_S3216CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -0x10001}, C_S32CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -(1 << 33)}, C_S34CON},
 		{obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -(1 << 34)}, C_64CON},
@@ -553,3 +551,18 @@
 		}
 	}
 }
+
+// The optab size should remain constant when reinitializing the PPC64 assembler backend.
+func TestOptabReinit(t *testing.T) {
+	buildcfg.GOOS = "linux"
+	buildcfg.GOARCH = "ppc64le"
+	buildcfg.GOPPC64 = 8
+	buildop(nil)
+	optabLen := len(optab)
+	buildcfg.GOPPC64 = 9
+	buildop(nil)
+	reinitOptabLen := len(optab)
+	if reinitOptabLen != optabLen {
+		t.Errorf("rerunning buildop changes optab size from %d to %d", optabLen, reinitOptabLen)
+	}
+}
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index 02831b8..a3d392d 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -36,8 +36,42 @@
 	"cmd/internal/sys"
 	"internal/abi"
 	"log"
+	"math/bits"
 )
 
+// Test if this value can encoded as a mask for
+// li -1, rx; rlic rx,rx,sh,mb.
+// Masks can also extend from the msb and wrap to
+// the lsb too. That is, the valid masks are 32 bit strings
+// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
+func isPPC64DoublewordRotateMask(v64 int64) bool {
+	// Isolate rightmost 1 (if none 0) and add.
+	v := uint64(v64)
+	vp := (v & -v) + v
+	// Likewise, for the wrapping case.
+	vn := ^v
+	vpn := (vn & -vn) + vn
+	return (v&vp == 0 || vn&vpn == 0) && v != 0
+}
+
+// Encode a doubleword rotate mask into mb (mask begin) and
+// me (mask end, inclusive). Note, POWER ISA labels bits in
+// big endian order.
+func encodePPC64RLDCMask(mask int64) (mb, me int) {
+	// Determine boundaries and then decode them
+	mb = bits.LeadingZeros64(uint64(mask))
+	me = 64 - bits.TrailingZeros64(uint64(mask))
+	mbn := bits.LeadingZeros64(^uint64(mask))
+	men := 64 - bits.TrailingZeros64(^uint64(mask))
+	// Check for a wrapping mask (e.g bits at 0 and 63)
+	if mb == 0 && me == 64 {
+		// swap the inverted values
+		mb, me = men, mbn
+	}
+	// Note, me is inclusive.
+	return mb, me - 1
+}
+
 func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
 	p.From.Class = 0
 	p.To.Class = 0
@@ -79,16 +113,78 @@
 			}
 		}
 
+	case AMOVW, AMOVWZ:
+		// Note, for backwards compatibility, MOVW $const, Rx and MOVWZ $const, Rx are identical.
+		if p.From.Type == obj.TYPE_CONST && p.From.Offset != 0 && p.From.Offset&0xFFFF == 0 {
+			// This is a constant shifted 16 bits to the left, convert it to ADDIS/ORIS $const,...
+			p.As = AADDIS
+			// Use ORIS for large constants which should not be sign extended.
+			if p.From.Offset >= 0x80000000 {
+				p.As = AORIS
+			}
+			p.Reg = REG_R0
+			p.From.Offset >>= 16
+		}
+
 	case AMOVD:
-		// 32b constants (signed and unsigned) can be generated via 1 or 2 instructions.
-		// All others must be placed in memory and loaded.
+		// Skip this opcode if it is not a constant load.
+		if p.From.Type != obj.TYPE_CONST || p.From.Name != obj.NAME_NONE || p.From.Reg != 0 {
+			break
+		}
+
+		// 32b constants (signed and unsigned) can be generated via 1 or 2 instructions. They can be assembled directly.
 		isS32 := int64(int32(p.From.Offset)) == p.From.Offset
 		isU32 := uint64(uint32(p.From.Offset)) == uint64(p.From.Offset)
-		if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && !isS32 && !isU32 {
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = ctxt.Int64Sym(p.From.Offset)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
+		// If prefixed instructions are supported, a 34b signed constant can be generated by one pli instruction.
+		isS34 := pfxEnabled && (p.From.Offset<<30)>>30 == p.From.Offset
+
+		// Try converting MOVD $const,Rx into ADDIS/ORIS $s32>>16,R0,Rx
+		switch {
+		case isS32 && p.From.Offset&0xFFFF == 0 && p.From.Offset != 0:
+			p.As = AADDIS
+			p.From.Offset >>= 16
+			p.Reg = REG_R0
+
+		case isU32 && p.From.Offset&0xFFFF == 0 && p.From.Offset != 0:
+			p.As = AORIS
+			p.From.Offset >>= 16
+			p.Reg = REG_R0
+
+		case isS32 || isU32 || isS34:
+			// The assembler can generate this opcode in 1 (on Power10) or 2 opcodes.
+
+		// Otherwise, see if the large constant can be generated with 2 instructions. If not, load it from memory.
+		default:
+			// Is this a shifted 16b constant? If so, rewrite it to avoid a creating and loading a constant.
+			val := p.From.Offset
+			shift := bits.TrailingZeros64(uint64(val))
+			mask := 0xFFFF << shift
+			if val&int64(mask) == val || (val>>(shift+16) == -1 && (val>>shift)<<shift == val) {
+				// Rewrite this value into MOVD $const>>shift, Rto; SLD $shift, Rto
+				q := obj.Appendp(p, c.newprog)
+				q.As = ASLD
+				q.From.SetConst(int64(shift))
+				q.To = p.To
+				p.From.Offset >>= shift
+				p = q
+			} else if isPPC64DoublewordRotateMask(val) {
+				// This constant is a mask value, generate MOVD $-1, Rto; RLDIC Rto, ^me, mb, Rto
+				mb, me := encodePPC64RLDCMask(val)
+				q := obj.Appendp(p, c.newprog)
+				q.As = ARLDC
+				q.AddRestSourceConst((^int64(me)) & 0x3F)
+				q.AddRestSourceConst(int64(mb))
+				q.From = p.To
+				q.To = p.To
+				p.From.Offset = -1
+				p = q
+			} else {
+				// Load the constant from memory.
+				p.From.Type = obj.TYPE_MEM
+				p.From.Sym = ctxt.Int64Sym(p.From.Offset)
+				p.From.Name = obj.NAME_EXTERN
+				p.From.Offset = 0
+			}
 		}
 	}
 
@@ -112,6 +208,29 @@
 			p.As = AADD
 		}
 
+	// Rewrite ADD/OR/XOR/ANDCC $const,... forms into ADDIS/ORIS/XORIS/ANDISCC
+	case AADD:
+		// AADD can encode signed 34b values, ensure it is a valid signed 32b integer too.
+		if p.From.Type == obj.TYPE_CONST && p.From.Offset&0xFFFF == 0 && int64(int32(p.From.Offset)) == p.From.Offset && p.From.Offset != 0 {
+			p.As = AADDIS
+			p.From.Offset >>= 16
+		}
+	case AOR:
+		if p.From.Type == obj.TYPE_CONST && uint64(p.From.Offset)&0xFFFFFFFF0000FFFF == 0 && p.From.Offset != 0 {
+			p.As = AORIS
+			p.From.Offset >>= 16
+		}
+	case AXOR:
+		if p.From.Type == obj.TYPE_CONST && uint64(p.From.Offset)&0xFFFFFFFF0000FFFF == 0 && p.From.Offset != 0 {
+			p.As = AXORIS
+			p.From.Offset >>= 16
+		}
+	case AANDCC:
+		if p.From.Type == obj.TYPE_CONST && uint64(p.From.Offset)&0xFFFFFFFF0000FFFF == 0 && p.From.Offset != 0 {
+			p.As = AANDISCC
+			p.From.Offset >>= 16
+		}
+
 	// To maintain backwards compatibility, we accept some 4 argument usage of
 	// several opcodes which was likely not intended, but did work. These are not
 	// added to optab to avoid the chance this behavior might be used with newer
@@ -1363,11 +1482,12 @@
 		p.To.Reg = REG_R2
 	}
 
+	// The instructions which unspill regs should be preemptible.
+	p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
 	unspill := c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
-	p = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
 
 	// BR	start
-	p = obj.Appendp(p, c.newprog)
+	p = obj.Appendp(unspill, c.newprog)
 	p.As = ABR
 	p.To.Type = obj.TYPE_BRANCH
 	p.To.SetTarget(startPred.Link)
diff --git a/src/cmd/internal/obj/riscv/asm_test.go b/src/cmd/internal/obj/riscv/asm_test.go
index c22428c..96ea230 100644
--- a/src/cmd/internal/obj/riscv/asm_test.go
+++ b/src/cmd/internal/obj/riscv/asm_test.go
@@ -9,8 +9,10 @@
 	"fmt"
 	"internal/testenv"
 	"os"
+	"os/exec"
 	"path/filepath"
 	"runtime"
+	"strings"
 	"testing"
 )
 
@@ -126,6 +128,72 @@
 	fmt.Fprintln(buf, "RET")
 }
 
+// TestLargeJump generates a large jump (>1MB of text) with a JMP to the
+// end of the function, in order to ensure that it assembles correctly.
+func TestLargeJump(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping test in short mode")
+	}
+	if runtime.GOARCH != "riscv64" {
+		t.Skip("Require riscv64 to run")
+	}
+	testenv.MustHaveGoBuild(t)
+
+	dir := t.TempDir()
+
+	if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module largejump"), 0644); err != nil {
+		t.Fatalf("Failed to write file: %v\n", err)
+	}
+	main := `package main
+
+import "fmt"
+
+func main() {
+        fmt.Print(x())
+}
+
+func x() uint64
+`
+	if err := os.WriteFile(filepath.Join(dir, "x.go"), []byte(main), 0644); err != nil {
+		t.Fatalf("failed to write main: %v\n", err)
+	}
+
+	// Generate a very large jump instruction.
+	buf := bytes.NewBuffer(make([]byte, 0, 7000000))
+	genLargeJump(buf)
+
+	if err := os.WriteFile(filepath.Join(dir, "x.s"), buf.Bytes(), 0644); err != nil {
+		t.Fatalf("Failed to write file: %v\n", err)
+	}
+
+	// Build generated files.
+	cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "x.exe")
+	cmd.Dir = dir
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Errorf("Build failed: %v, output: %s", err, out)
+	}
+
+	cmd = testenv.Command(t, filepath.Join(dir, "x.exe"))
+	out, err = cmd.CombinedOutput()
+	if string(out) != "1" {
+		t.Errorf(`Got test output %q, want "1"`, string(out))
+	}
+}
+
+func genLargeJump(buf *bytes.Buffer) {
+	fmt.Fprintln(buf, "TEXT ·x(SB),0,$0-8")
+	fmt.Fprintln(buf, "MOV  X0, X10")
+	fmt.Fprintln(buf, "JMP end")
+	for i := 0; i < 1<<18; i++ {
+		fmt.Fprintln(buf, "ADD $1, X10, X10")
+	}
+	fmt.Fprintln(buf, "end:")
+	fmt.Fprintln(buf, "ADD $1, X10, X10")
+	fmt.Fprintln(buf, "MOV X10, r+0(FP)")
+	fmt.Fprintln(buf, "RET")
+}
+
 // Issue 20348.
 func TestNoRet(t *testing.T) {
 	dir, err := os.MkdirTemp("", "testnoret")
@@ -211,3 +279,33 @@
 		t.Errorf("Branch test failed: %v\n%s", err, out)
 	}
 }
+
+func TestPCAlign(t *testing.T) {
+	dir := t.TempDir()
+	tmpfile := filepath.Join(dir, "x.s")
+	asm := `
+TEXT _stub(SB),$0-0
+	FENCE
+	PCALIGN	$8
+	FENCE
+	RET
+`
+	if err := os.WriteFile(tmpfile, []byte(asm), 0644); err != nil {
+		t.Fatal(err)
+	}
+	cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), "-S", tmpfile)
+	cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux")
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Errorf("Failed to assemble: %v\n%s", err, out)
+	}
+	// The expected instruction sequence after alignment:
+	//	FENCE
+	//	NOP
+	//	FENCE
+	//	RET
+	want := "0f 00 f0 0f 13 00 00 00 0f 00 f0 0f 67 80 00 00"
+	if !strings.Contains(string(out), want) {
+		t.Errorf("PCALIGN test failed - got %s\nwant %s", out, want)
+	}
+}
diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go
index dde1231..edd1ac8 100644
--- a/src/cmd/internal/obj/riscv/cpu.go
+++ b/src/cmd/internal/obj/riscv/cpu.go
@@ -260,8 +260,13 @@
 	// corresponding *obj.Prog uses the temporary register.
 	USES_REG_TMP = 1 << iota
 
-	// NEED_CALL_RELOC is set on JAL instructions to indicate that a
-	// R_RISCV_CALL relocation is needed.
+	// NEED_JAL_RELOC is set on JAL instructions to indicate that a
+	// R_RISCV_JAL relocation is needed.
+	NEED_JAL_RELOC
+
+	// NEED_CALL_RELOC is set on an AUIPC instruction to indicate that it
+	// is the first instruction in an AUIPC + JAL pair that needs a
+	// R_RISCV_CALL relocation.
 	NEED_CALL_RELOC
 
 	// NEED_PCREL_ITYPE_RELOC is set on AUIPC instructions to indicate that
@@ -619,14 +624,26 @@
 
 // Instruction encoding masks.
 const (
-	// JTypeImmMask is a mask including only the immediate portion of
-	// J-type instructions.
-	JTypeImmMask = 0xfffff000
+	// BTypeImmMask is a mask including only the immediate portion of
+	// B-type instructions.
+	BTypeImmMask = 0xfe000f80
+
+	// CBTypeImmMask is a mask including only the immediate portion of
+	// CB-type instructions.
+	CBTypeImmMask = 0x1c7c
+
+	// CJTypeImmMask is a mask including only the immediate portion of
+	// CJ-type instructions.
+	CJTypeImmMask = 0x1f7c
 
 	// ITypeImmMask is a mask including only the immediate portion of
 	// I-type instructions.
 	ITypeImmMask = 0xfff00000
 
+	// JTypeImmMask is a mask including only the immediate portion of
+	// J-type instructions.
+	JTypeImmMask = 0xfffff000
+
 	// STypeImmMask is a mask including only the immediate portion of
 	// S-type instructions.
 	STypeImmMask = 0xfe000f80
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 43fa735..11d6c20 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -41,7 +41,7 @@
 	}
 
 	p.As = AJAL
-	p.Mark |= NEED_CALL_RELOC
+	p.Mark |= NEED_JAL_RELOC
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = lr
 	p.Reg = obj.REG_NONE
@@ -69,6 +69,8 @@
 		switch p.As {
 		case AADD:
 			p.As = AADDI
+		case ASUB:
+			p.As, p.From.Offset = AADDI, -p.From.Offset
 		case ASLT:
 			p.As = ASLTI
 		case ASLTU:
@@ -87,6 +89,8 @@
 			p.As = ASRAI
 		case AADDW:
 			p.As = AADDIW
+		case ASUBW:
+			p.As, p.From.Offset = AADDIW, -p.From.Offset
 		case ASLLW:
 			p.As = ASLLIW
 		case ASRLW:
@@ -304,6 +308,12 @@
 		for _, ins := range instructionsForProg(p) {
 			pc += int64(ins.length())
 		}
+
+		if p.As == obj.APCALIGN {
+			alignedValue := p.From.Offset
+			v := pcAlignPadLength(pc, alignedValue)
+			pc += int64(v)
+		}
 	}
 	return pc
 }
@@ -610,7 +620,7 @@
 	var callCount int
 	for p := cursym.Func().Text; p != nil; p = p.Link {
 		markRelocs(p)
-		if p.Mark&NEED_CALL_RELOC == NEED_CALL_RELOC {
+		if p.Mark&NEED_JAL_RELOC == NEED_JAL_RELOC {
 			callCount++
 		}
 	}
@@ -664,7 +674,7 @@
 					jmp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
 
 					p.As = AAUIPC
-					p.Mark = (p.Mark &^ NEED_CALL_RELOC) | NEED_PCREL_ITYPE_RELOC
+					p.Mark = (p.Mark &^ NEED_JAL_RELOC) | NEED_CALL_RELOC
 					p.AddRestSource(obj.Addr{Type: obj.TYPE_CONST, Offset: p.To.Offset, Sym: p.To.Sym})
 					p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0}
 					p.Reg = obj.REG_NONE
@@ -727,7 +737,17 @@
 					ctxt.Diag("%v: jump displacement %d too large", p, p.To.Target().Pc-p.Pc)
 				}
 				p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym}
-				p.Link.From.Offset = low
+				p.Link.To.Offset = low
+			}
+
+		case obj.APCALIGN:
+			alignedValue := p.From.Offset
+			if (alignedValue&(alignedValue-1) != 0) || 4 > alignedValue || alignedValue > 2048 {
+				ctxt.Diag("alignment value of an instruction must be a power of two and in the range [4, 2048], got %d\n", alignedValue)
+			}
+			// Update the current text symbol alignment value.
+			if int32(alignedValue) > cursym.Func().Align {
+				cursym.Func().Align = int32(alignedValue)
 			}
 		}
 	}
@@ -740,6 +760,10 @@
 	}
 }
 
+func pcAlignPadLength(pc int64, alignedValue int64) int {
+	return int(-pc & (alignedValue - 1))
+}
+
 func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgAlloc, framesize int64) *obj.Prog {
 	// Leaf function with no frame is effectively NOSPLIT.
 	if framesize == 0 {
@@ -913,8 +937,9 @@
 	}
 	jalToSym(ctxt, p, REG_X5)
 
-	p = cursym.Func().UnspillRegisterArgs(p, newprog)
+	// The instructions which unspill regs should be preemptible.
 	p = ctxt.EndUnsafePoint(p, newprog, -1)
+	p = cursym.Func().UnspillRegisterArgs(p, newprog)
 
 	// JMP start
 	p = obj.Appendp(p, newprog)
@@ -941,12 +966,12 @@
 // result. For example, high may be used in LUI and low in a following ADDI to
 // generate a full 32-bit constant.
 func Split32BitImmediate(imm int64) (low, high int64, err error) {
-	if !immIFits(imm, 32) {
-		return 0, 0, fmt.Errorf("immediate does not fit in 32 bits: %d", imm)
+	if err := immIFits(imm, 32); err != nil {
+		return 0, 0, err
 	}
 
 	// Nothing special needs to be done if the immediate fits in 12 bits.
-	if immIFits(imm, 12) {
+	if err := immIFits(imm, 12); err == nil {
 		return imm, 0, nil
 	}
 
@@ -973,7 +998,7 @@
 
 func regVal(r, min, max uint32) uint32 {
 	if r < min || r > max {
-		panic(fmt.Sprintf("register out of range, want %d < %d < %d", min, r, max))
+		panic(fmt.Sprintf("register out of range, want %d <= %d <= %d", min, r, max))
 	}
 	return r - min
 }
@@ -1006,171 +1031,186 @@
 	return regAddr(a, REG_F0, REG_F31)
 }
 
-// immIFits reports whether immediate value x fits in nbits bits
-// as a signed integer.
-func immIFits(x int64, nbits uint) bool {
+// immEven checks that the immediate is a multiple of two. If it
+// is not, an error is returned.
+func immEven(x int64) error {
+	if x&1 != 0 {
+		return fmt.Errorf("immediate %#x is not a multiple of two", x)
+	}
+	return nil
+}
+
+// immIFits checks whether the immediate value x fits in nbits bits
+// as a signed integer. If it does not, an error is returned.
+func immIFits(x int64, nbits uint) error {
 	nbits--
-	var min int64 = -1 << nbits
-	var max int64 = 1<<nbits - 1
-	return min <= x && x <= max
+	min := int64(-1) << nbits
+	max := int64(1)<<nbits - 1
+	if x < min || x > max {
+		if nbits <= 16 {
+			return fmt.Errorf("signed immediate %d must be in range [%d, %d] (%d bits)", x, min, max, nbits)
+		}
+		return fmt.Errorf("signed immediate %#x must be in range [%#x, %#x] (%d bits)", x, min, max, nbits)
+	}
+	return nil
 }
 
 // immI extracts the signed integer of the specified size from an immediate.
 func immI(as obj.As, imm int64, nbits uint) uint32 {
-	if !immIFits(imm, nbits) {
-		panic(fmt.Sprintf("%v: signed immediate %d cannot fit in %d bits", as, imm, nbits))
+	if err := immIFits(imm, nbits); err != nil {
+		panic(fmt.Sprintf("%v: %v", as, err))
 	}
 	return uint32(imm)
 }
 
-func wantImmI(ctxt *obj.Link, as obj.As, imm int64, nbits uint) {
-	if !immIFits(imm, nbits) {
-		ctxt.Diag("%v: signed immediate %d cannot be larger than %d bits", as, imm, nbits)
+func wantImmI(ctxt *obj.Link, ins *instruction, imm int64, nbits uint) {
+	if err := immIFits(imm, nbits); err != nil {
+		ctxt.Diag("%v: %v", ins, err)
 	}
 }
 
-func wantReg(ctxt *obj.Link, as obj.As, pos string, descr string, r, min, max uint32) {
+func wantReg(ctxt *obj.Link, ins *instruction, pos string, descr string, r, min, max uint32) {
 	if r < min || r > max {
 		var suffix string
 		if r != obj.REG_NONE {
 			suffix = fmt.Sprintf(" but got non-%s register %s", descr, RegName(int(r)))
 		}
-		ctxt.Diag("%v: expected %s register in %s position%s", as, descr, pos, suffix)
+		ctxt.Diag("%v: expected %s register in %s position%s", ins, descr, pos, suffix)
 	}
 }
 
-func wantNoneReg(ctxt *obj.Link, as obj.As, pos string, r uint32) {
+func wantNoneReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
 	if r != obj.REG_NONE {
-		ctxt.Diag("%v: expected no register in %s but got register %s", as, pos, RegName(int(r)))
+		ctxt.Diag("%v: expected no register in %s but got register %s", ins, pos, RegName(int(r)))
 	}
 }
 
 // wantIntReg checks that r is an integer register.
-func wantIntReg(ctxt *obj.Link, as obj.As, pos string, r uint32) {
-	wantReg(ctxt, as, pos, "integer", r, REG_X0, REG_X31)
+func wantIntReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
+	wantReg(ctxt, ins, pos, "integer", r, REG_X0, REG_X31)
 }
 
 // wantFloatReg checks that r is a floating-point register.
-func wantFloatReg(ctxt *obj.Link, as obj.As, pos string, r uint32) {
-	wantReg(ctxt, as, pos, "float", r, REG_F0, REG_F31)
+func wantFloatReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) {
+	wantReg(ctxt, ins, pos, "float", r, REG_F0, REG_F31)
 }
 
 // wantEvenOffset checks that the offset is a multiple of two.
-func wantEvenOffset(ctxt *obj.Link, as obj.As, offset int64) {
-	if offset%1 != 0 {
-		ctxt.Diag("%v: jump offset %d must be a multiple of two", as, offset)
+func wantEvenOffset(ctxt *obj.Link, ins *instruction, offset int64) {
+	if err := immEven(offset); err != nil {
+		ctxt.Diag("%v: %v", ins, err)
 	}
 }
 
 func validateRIII(ctxt *obj.Link, ins *instruction) {
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantIntReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantIntReg(ctxt, ins, "rs1", ins.rs1)
+	wantIntReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRFFF(ctxt *obj.Link, ins *instruction) {
-	wantFloatReg(ctxt, ins.as, "rd", ins.rd)
-	wantFloatReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantFloatReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantFloatReg(ctxt, ins, "rd", ins.rd)
+	wantFloatReg(ctxt, ins, "rs1", ins.rs1)
+	wantFloatReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRFFFF(ctxt *obj.Link, ins *instruction) {
-	wantFloatReg(ctxt, ins.as, "rd", ins.rd)
-	wantFloatReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantFloatReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantFloatReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantFloatReg(ctxt, ins, "rd", ins.rd)
+	wantFloatReg(ctxt, ins, "rs1", ins.rs1)
+	wantFloatReg(ctxt, ins, "rs2", ins.rs2)
+	wantFloatReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRFFI(ctxt *obj.Link, ins *instruction) {
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantFloatReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantFloatReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantFloatReg(ctxt, ins, "rs1", ins.rs1)
+	wantFloatReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRFI(ctxt *obj.Link, ins *instruction) {
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantNoneReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantFloatReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantNoneReg(ctxt, ins, "rs1", ins.rs1)
+	wantFloatReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRIF(ctxt *obj.Link, ins *instruction) {
-	wantFloatReg(ctxt, ins.as, "rd", ins.rd)
-	wantNoneReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantIntReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantFloatReg(ctxt, ins, "rd", ins.rd)
+	wantNoneReg(ctxt, ins, "rs1", ins.rs1)
+	wantIntReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRFF(ctxt *obj.Link, ins *instruction) {
-	wantFloatReg(ctxt, ins.as, "rd", ins.rd)
-	wantNoneReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantFloatReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantFloatReg(ctxt, ins, "rd", ins.rd)
+	wantNoneReg(ctxt, ins, "rs1", ins.rs1)
+	wantFloatReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateII(ctxt *obj.Link, ins *instruction) {
-	wantImmI(ctxt, ins.as, ins.imm, 12)
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantImmI(ctxt, ins, ins.imm, 12)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantIntReg(ctxt, ins, "rs1", ins.rs1)
+	wantNoneReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateIF(ctxt *obj.Link, ins *instruction) {
-	wantImmI(ctxt, ins.as, ins.imm, 12)
-	wantFloatReg(ctxt, ins.as, "rd", ins.rd)
-	wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantImmI(ctxt, ins, ins.imm, 12)
+	wantFloatReg(ctxt, ins, "rd", ins.rd)
+	wantIntReg(ctxt, ins, "rs1", ins.rs1)
+	wantNoneReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateSI(ctxt *obj.Link, ins *instruction) {
-	wantImmI(ctxt, ins.as, ins.imm, 12)
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantImmI(ctxt, ins, ins.imm, 12)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantIntReg(ctxt, ins, "rs1", ins.rs1)
+	wantNoneReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateSF(ctxt *obj.Link, ins *instruction) {
-	wantImmI(ctxt, ins.as, ins.imm, 12)
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantFloatReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantImmI(ctxt, ins, ins.imm, 12)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantFloatReg(ctxt, ins, "rs1", ins.rs1)
+	wantNoneReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateB(ctxt *obj.Link, ins *instruction) {
 	// Offsets are multiples of two, so accept 13 bit immediates for the
 	// 12 bit slot. We implicitly drop the least significant bit in encodeB.
-	wantEvenOffset(ctxt, ins.as, ins.imm)
-	wantImmI(ctxt, ins.as, ins.imm, 13)
-	wantNoneReg(ctxt, ins.as, "rd", ins.rd)
-	wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantIntReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantEvenOffset(ctxt, ins, ins.imm)
+	wantImmI(ctxt, ins, ins.imm, 13)
+	wantNoneReg(ctxt, ins, "rd", ins.rd)
+	wantIntReg(ctxt, ins, "rs1", ins.rs1)
+	wantIntReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateU(ctxt *obj.Link, ins *instruction) {
-	wantImmI(ctxt, ins.as, ins.imm, 20)
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantNoneReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantImmI(ctxt, ins, ins.imm, 20)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantNoneReg(ctxt, ins, "rs1", ins.rs1)
+	wantNoneReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateJ(ctxt *obj.Link, ins *instruction) {
 	// Offsets are multiples of two, so accept 21 bit immediates for the
 	// 20 bit slot. We implicitly drop the least significant bit in encodeJ.
-	wantEvenOffset(ctxt, ins.as, ins.imm)
-	wantImmI(ctxt, ins.as, ins.imm, 21)
-	wantIntReg(ctxt, ins.as, "rd", ins.rd)
-	wantNoneReg(ctxt, ins.as, "rs1", ins.rs1)
-	wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
-	wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
+	wantEvenOffset(ctxt, ins, ins.imm)
+	wantImmI(ctxt, ins, ins.imm, 21)
+	wantIntReg(ctxt, ins, "rd", ins.rd)
+	wantNoneReg(ctxt, ins, "rs1", ins.rs1)
+	wantNoneReg(ctxt, ins, "rs2", ins.rs2)
+	wantNoneReg(ctxt, ins, "rs3", ins.rs3)
 }
 
 func validateRaw(ctxt *obj.Link, ins *instruction) {
@@ -1181,6 +1221,12 @@
 	}
 }
 
+// extractBitAndShift extracts the specified bit from the given immediate,
+// before shifting it to the requested position and returning it.
+func extractBitAndShift(imm uint32, bit, pos int) uint32 {
+	return ((imm >> bit) & 1) << pos
+}
+
 // encodeR encodes an R-type RISC-V instruction.
 func encodeR(as obj.As, rs1, rs2, rd, funct3, funct7 uint32) uint32 {
 	enc := encode(as)
@@ -1272,6 +1318,11 @@
 	return encodeS(ins.as, regI(ins.rd), regF(ins.rs1), uint32(ins.imm))
 }
 
+// encodeBImmediate encodes an immediate for a B-type RISC-V instruction.
+func encodeBImmediate(imm uint32) uint32 {
+	return (imm>>12)<<31 | ((imm>>5)&0x3f)<<25 | ((imm>>1)&0xf)<<8 | ((imm>>11)&0x1)<<7
+}
+
 // encodeB encodes a B-type RISC-V instruction.
 func encodeB(ins *instruction) uint32 {
 	imm := immI(ins.as, ins.imm, 13)
@@ -1281,7 +1332,7 @@
 	if enc == nil {
 		panic("encodeB: could not encode instruction")
 	}
-	return (imm>>12)<<31 | ((imm>>5)&0x3f)<<25 | rs2<<20 | rs1<<15 | enc.funct3<<12 | ((imm>>1)&0xf)<<8 | ((imm>>11)&0x1)<<7 | enc.opcode
+	return encodeBImmediate(imm) | rs2<<20 | rs1<<15 | enc.funct3<<12 | enc.opcode
 }
 
 // encodeU encodes a U-type RISC-V instruction.
@@ -1315,6 +1366,37 @@
 	return encodeJImmediate(imm) | rd<<7 | enc.opcode
 }
 
+// encodeCBImmediate encodes an immediate for a CB-type RISC-V instruction.
+func encodeCBImmediate(imm uint32) uint32 {
+	// Bit order - [8|4:3|7:6|2:1|5]
+	bits := extractBitAndShift(imm, 8, 7)
+	bits |= extractBitAndShift(imm, 4, 6)
+	bits |= extractBitAndShift(imm, 3, 5)
+	bits |= extractBitAndShift(imm, 7, 4)
+	bits |= extractBitAndShift(imm, 6, 3)
+	bits |= extractBitAndShift(imm, 2, 2)
+	bits |= extractBitAndShift(imm, 1, 1)
+	bits |= extractBitAndShift(imm, 5, 0)
+	return (bits>>5)<<10 | (bits&0x1f)<<2
+}
+
+// encodeCJImmediate encodes an immediate for a CJ-type RISC-V instruction.
+func encodeCJImmediate(imm uint32) uint32 {
+	// Bit order - [11|4|9:8|10|6|7|3:1|5]
+	bits := extractBitAndShift(imm, 11, 10)
+	bits |= extractBitAndShift(imm, 4, 9)
+	bits |= extractBitAndShift(imm, 9, 8)
+	bits |= extractBitAndShift(imm, 8, 7)
+	bits |= extractBitAndShift(imm, 10, 6)
+	bits |= extractBitAndShift(imm, 6, 5)
+	bits |= extractBitAndShift(imm, 7, 4)
+	bits |= extractBitAndShift(imm, 3, 3)
+	bits |= extractBitAndShift(imm, 2, 2)
+	bits |= extractBitAndShift(imm, 1, 1)
+	bits |= extractBitAndShift(imm, 5, 0)
+	return bits << 2
+}
+
 func encodeRawIns(ins *instruction) uint32 {
 	// Treat the raw value specially as a 32-bit unsigned integer.
 	// Nobody wants to enter negative machine code.
@@ -1324,33 +1406,63 @@
 	return uint32(ins.imm)
 }
 
-func EncodeJImmediate(imm int64) (int64, error) {
-	if !immIFits(imm, 21) {
-		return 0, fmt.Errorf("immediate %#x does not fit in 21 bits", imm)
+func EncodeBImmediate(imm int64) (int64, error) {
+	if err := immIFits(imm, 13); err != nil {
+		return 0, err
 	}
-	if imm&1 != 0 {
-		return 0, fmt.Errorf("immediate %#x is not a multiple of two", imm)
+	if err := immEven(imm); err != nil {
+		return 0, err
 	}
-	return int64(encodeJImmediate(uint32(imm))), nil
+	return int64(encodeBImmediate(uint32(imm))), nil
+}
+
+func EncodeCBImmediate(imm int64) (int64, error) {
+	if err := immIFits(imm, 9); err != nil {
+		return 0, err
+	}
+	if err := immEven(imm); err != nil {
+		return 0, err
+	}
+	return int64(encodeCBImmediate(uint32(imm))), nil
+}
+
+func EncodeCJImmediate(imm int64) (int64, error) {
+	if err := immIFits(imm, 12); err != nil {
+		return 0, err
+	}
+	if err := immEven(imm); err != nil {
+		return 0, err
+	}
+	return int64(encodeCJImmediate(uint32(imm))), nil
 }
 
 func EncodeIImmediate(imm int64) (int64, error) {
-	if !immIFits(imm, 12) {
-		return 0, fmt.Errorf("immediate %#x does not fit in 12 bits", imm)
+	if err := immIFits(imm, 12); err != nil {
+		return 0, err
 	}
 	return imm << 20, nil
 }
 
+func EncodeJImmediate(imm int64) (int64, error) {
+	if err := immIFits(imm, 21); err != nil {
+		return 0, err
+	}
+	if err := immEven(imm); err != nil {
+		return 0, err
+	}
+	return int64(encodeJImmediate(uint32(imm))), nil
+}
+
 func EncodeSImmediate(imm int64) (int64, error) {
-	if !immIFits(imm, 12) {
-		return 0, fmt.Errorf("immediate %#x does not fit in 12 bits", imm)
+	if err := immIFits(imm, 12); err != nil {
+		return 0, err
 	}
 	return ((imm >> 5) << 25) | ((imm & 0x1f) << 7), nil
 }
 
 func EncodeUImmediate(imm int64) (int64, error) {
-	if !immIFits(imm, 20) {
-		return 0, fmt.Errorf("immediate %#x does not fit in 20 bits", imm)
+	if err := immIFits(imm, 20); err != nil {
+		return 0, err
 	}
 	return imm << 12, nil
 }
@@ -1616,6 +1728,7 @@
 	obj.ANOP:      pseudoOpEncoding,
 	obj.ADUFFZERO: pseudoOpEncoding,
 	obj.ADUFFCOPY: pseudoOpEncoding,
+	obj.APCALIGN:  pseudoOpEncoding,
 }
 
 // encodingForAs returns the encoding for an obj.As.
@@ -1635,14 +1748,26 @@
 }
 
 type instruction struct {
-	as     obj.As // Assembler opcode
-	rd     uint32 // Destination register
-	rs1    uint32 // Source register 1
-	rs2    uint32 // Source register 2
-	rs3    uint32 // Source register 3
-	imm    int64  // Immediate
-	funct3 uint32 // Function 3
-	funct7 uint32 // Function 7 (or Function 2)
+	p      *obj.Prog // Prog that instruction is for
+	as     obj.As    // Assembler opcode
+	rd     uint32    // Destination register
+	rs1    uint32    // Source register 1
+	rs2    uint32    // Source register 2
+	rs3    uint32    // Source register 3
+	imm    int64     // Immediate
+	funct3 uint32    // Function 3
+	funct7 uint32    // Function 7 (or Function 2)
+}
+
+func (ins *instruction) String() string {
+	if ins.p == nil {
+		return ins.as.String()
+	}
+	var suffix string
+	if ins.p.As != ins.as {
+		suffix = fmt.Sprintf(" (%v)", ins.as)
+	}
+	return fmt.Sprintf("%v%v", ins.p, suffix)
 }
 
 func (ins *instruction) encode() (uint32, error) {
@@ -1650,10 +1775,10 @@
 	if err != nil {
 		return 0, err
 	}
-	if enc.length > 0 {
-		return enc.encode(ins), nil
+	if enc.length <= 0 {
+		return 0, fmt.Errorf("%v: encoding called for a pseudo instruction", ins.as)
 	}
-	return 0, fmt.Errorf("fixme")
+	return enc.encode(ins), nil
 }
 
 func (ins *instruction) length() int {
@@ -1827,6 +1952,53 @@
 	return []*instruction{insLUI, insADD, ins}
 }
 
+func instructionsForTLS(p *obj.Prog, ins *instruction) []*instruction {
+	insAddTP := &instruction{as: AADD, rd: REG_TMP, rs1: REG_TMP, rs2: REG_TP}
+
+	var inss []*instruction
+	if p.Ctxt.Flag_shared {
+		// TLS initial-exec mode - load TLS offset from GOT, add the thread pointer
+		// register, then load from or store to the resulting memory location.
+		insAUIPC := &instruction{as: AAUIPC, rd: REG_TMP}
+		insLoadTLSOffset := &instruction{as: ALD, rd: REG_TMP, rs1: REG_TMP}
+		inss = []*instruction{insAUIPC, insLoadTLSOffset, insAddTP, ins}
+	} else {
+		// TLS local-exec mode - load upper TLS offset, add the lower TLS offset,
+		// add the thread pointer register, then load from or store to the resulting
+		// memory location. Note that this differs from the suggested three
+		// instruction sequence, as the Go linker does not currently have an
+		// easy way to handle relocation across 12 bytes of machine code.
+		insLUI := &instruction{as: ALUI, rd: REG_TMP}
+		insADDIW := &instruction{as: AADDIW, rd: REG_TMP, rs1: REG_TMP}
+		inss = []*instruction{insLUI, insADDIW, insAddTP, ins}
+	}
+	return inss
+}
+
+func instructionsForTLSLoad(p *obj.Prog) []*instruction {
+	if p.From.Sym.Type != objabi.STLSBSS {
+		p.Ctxt.Diag("%v: %v is not a TLS symbol", p, p.From.Sym)
+		return nil
+	}
+
+	ins := instructionForProg(p)
+	ins.as, ins.rs1, ins.rs2, ins.imm = movToLoad(p.As), REG_TMP, obj.REG_NONE, 0
+
+	return instructionsForTLS(p, ins)
+}
+
+func instructionsForTLSStore(p *obj.Prog) []*instruction {
+	if p.To.Sym.Type != objabi.STLSBSS {
+		p.Ctxt.Diag("%v: %v is not a TLS symbol", p, p.To.Sym)
+		return nil
+	}
+
+	ins := instructionForProg(p)
+	ins.as, ins.rd, ins.rs1, ins.rs2, ins.imm = movToStore(p.As), REG_TMP, uint32(p.From.Reg), obj.REG_NONE, 0
+
+	return instructionsForTLS(p, ins)
+}
+
 // instructionsForMOV returns the machine instructions for an *obj.Prog that
 // uses a MOV pseudo-instruction.
 func instructionsForMOV(p *obj.Prog) []*instruction {
@@ -1855,9 +2027,9 @@
 		// 	MOV $1, X10
 		// 	SLLI $63, X10, X10
 		var insSLLI *instruction
-		if !immIFits(ins.imm, 32) {
+		if err := immIFits(ins.imm, 32); err != nil {
 			ctz := bits.TrailingZeros64(uint64(ins.imm))
-			if immIFits(ins.imm>>ctz, 32) {
+			if err := immIFits(ins.imm>>ctz, 32); err == nil {
 				ins.imm = ins.imm >> ctz
 				insSLLI = &instruction{as: ASLLI, rd: ins.rd, rs1: ins.rd, imm: int64(ctz)}
 			}
@@ -1873,20 +2045,15 @@
 		ins.as, ins.rs1, ins.rs2, ins.imm = AADDI, REG_ZERO, obj.REG_NONE, low
 
 		// LUI is only necessary if the constant does not fit in 12 bits.
-		if high == 0 {
-			if insSLLI != nil {
-				inss = append(inss, insSLLI)
+		if high != 0 {
+			// LUI top20bits(c), R
+			// ADD bottom12bits(c), R, R
+			insLUI := &instruction{as: ALUI, rd: ins.rd, imm: high}
+			inss = []*instruction{insLUI}
+			if low != 0 {
+				ins.as, ins.rs1 = AADDIW, ins.rd
+				inss = append(inss, ins)
 			}
-			break
-		}
-
-		// LUI top20bits(c), R
-		// ADD bottom12bits(c), R, R
-		insLUI := &instruction{as: ALUI, rd: ins.rd, imm: high}
-		inss = []*instruction{insLUI}
-		if low != 0 {
-			ins.as, ins.rs1 = AADDIW, ins.rd
-			inss = append(inss, ins)
 		}
 		if insSLLI != nil {
 			inss = append(inss, insSLLI)
@@ -1939,6 +2106,10 @@
 			inss = instructionsForLoad(p, movToLoad(p.As), addrToReg(p.From))
 
 		case obj.NAME_EXTERN, obj.NAME_STATIC:
+			if p.From.Sym.Type == objabi.STLSBSS {
+				return instructionsForTLSLoad(p)
+			}
+
 			// Note that the values for $off_hi and $off_lo are currently
 			// zero and will be assigned during relocation.
 			//
@@ -1966,6 +2137,10 @@
 			inss = instructionsForStore(p, movToStore(p.As), addrToReg(p.To))
 
 		case obj.NAME_EXTERN, obj.NAME_STATIC:
+			if p.To.Sym.Type == objabi.STLSBSS {
+				return instructionsForTLSStore(p)
+			}
+
 			// Note that the values for $off_hi and $off_lo are currently
 			// zero and will be assigned during relocation.
 			//
@@ -2058,13 +2233,13 @@
 		ins.imm = p.To.Offset
 
 	case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
-		return instructionsForMOV(p)
+		inss = instructionsForMOV(p)
 
 	case ALW, ALWU, ALH, ALHU, ALB, ALBU, ALD, AFLW, AFLD:
-		return instructionsForLoad(p, ins.as, p.From.Reg)
+		inss = instructionsForLoad(p, ins.as, p.From.Reg)
 
 	case ASW, ASH, ASB, ASD, AFSW, AFSD:
-		return instructionsForStore(p, ins.as, p.To.Reg)
+		inss = instructionsForStore(p, ins.as, p.To.Reg)
 
 	case ALRW, ALRD:
 		// Set aq to use acquire access ordering
@@ -2104,7 +2279,7 @@
 	case AFNES, AFNED:
 		// Replace FNE[SD] with FEQ[SD] and NOT.
 		if p.To.Type != obj.TYPE_REG {
-			p.Ctxt.Diag("%v needs an integer register output", ins.as)
+			p.Ctxt.Diag("%v needs an integer register output", p)
 			return nil
 		}
 		if ins.as == AFNES {
@@ -2193,6 +2368,11 @@
 			p.Ctxt.Diag("%v: shift amount out of range 0 to 31", p)
 		}
 	}
+
+	for _, ins := range inss {
+		ins.p = p
+	}
+
 	return inss
 }
 
@@ -2204,16 +2384,22 @@
 		ctxt.Retpoline = false // don't keep printing
 	}
 
+	// If errors were encountered during preprocess/validation, proceeding
+	// and attempting to encode said instructions will only lead to panics.
+	if ctxt.Errors > 0 {
+		return
+	}
+
 	for p := cursym.Func().Text; p != nil; p = p.Link {
 		switch p.As {
 		case AJAL:
-			if p.Mark&NEED_CALL_RELOC == NEED_CALL_RELOC {
+			if p.Mark&NEED_JAL_RELOC == NEED_JAL_RELOC {
 				rel := obj.Addrel(cursym)
 				rel.Off = int32(p.Pc)
 				rel.Siz = 4
 				rel.Sym = p.To.Sym
 				rel.Add = p.To.Offset
-				rel.Type = objabi.R_RISCV_CALL
+				rel.Type = objabi.R_RISCV_JAL
 			}
 		case AJALR:
 			if p.To.Sym != nil {
@@ -2223,7 +2409,10 @@
 		case AAUIPC, AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
 			var addr *obj.Addr
 			var rt objabi.RelocType
-			if p.Mark&NEED_PCREL_ITYPE_RELOC == NEED_PCREL_ITYPE_RELOC {
+			if p.Mark&NEED_CALL_RELOC == NEED_CALL_RELOC {
+				rt = objabi.R_RISCV_CALL
+				addr = &p.From
+			} else if p.Mark&NEED_PCREL_ITYPE_RELOC == NEED_PCREL_ITYPE_RELOC {
 				rt = objabi.R_RISCV_PCREL_ITYPE
 				addr = &p.From
 			} else if p.Mark&NEED_PCREL_STYPE_RELOC == NEED_PCREL_STYPE_RELOC {
@@ -2244,10 +2433,10 @@
 				break
 			}
 			if addr.Sym.Type == objabi.STLSBSS {
-				if rt == objabi.R_RISCV_PCREL_ITYPE {
-					rt = objabi.R_RISCV_TLS_IE_ITYPE
-				} else if rt == objabi.R_RISCV_PCREL_STYPE {
-					rt = objabi.R_RISCV_TLS_IE_STYPE
+				if ctxt.Flag_shared {
+					rt = objabi.R_RISCV_TLS_IE
+				} else {
+					rt = objabi.R_RISCV_TLS_LE
 				}
 			}
 
@@ -2257,6 +2446,17 @@
 			rel.Sym = addr.Sym
 			rel.Add = addr.Offset
 			rel.Type = rt
+
+		case obj.APCALIGN:
+			alignedValue := p.From.Offset
+			v := pcAlignPadLength(p.Pc, alignedValue)
+			offset := p.Pc
+			for ; v >= 4; v -= 4 {
+				// NOP
+				cursym.WriteBytes(ctxt, offset, []byte{0x13, 0, 0, 0})
+				offset += 4
+			}
+			continue
 		}
 
 		offset := p.Pc
diff --git a/src/cmd/internal/obj/s390x/a.out.go b/src/cmd/internal/obj/s390x/a.out.go
index cdfb6dd..1c86fe1 100644
--- a/src/cmd/internal/obj/s390x/a.out.go
+++ b/src/cmd/internal/obj/s390x/a.out.go
@@ -480,6 +480,15 @@
 	// macros
 	ACLEAR
 
+	// crypto
+	AKM
+	AKMC
+	AKLMD
+	AKIMD
+	AKDSA
+	AKMA
+	AKMCTR
+
 	// vector
 	AVA
 	AVAB
diff --git a/src/cmd/internal/obj/s390x/anames.go b/src/cmd/internal/obj/s390x/anames.go
index 3af15a5..fa23984 100644
--- a/src/cmd/internal/obj/s390x/anames.go
+++ b/src/cmd/internal/obj/s390x/anames.go
@@ -207,6 +207,13 @@
 	"STCKE",
 	"STCKF",
 	"CLEAR",
+	"KM",
+	"KMC",
+	"KLMD",
+	"KIMD",
+	"KDSA",
+	"KMA",
+	"KMCTR",
 	"VA",
 	"VAB",
 	"VAH",
diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go
index d9f7606..7b560e0 100644
--- a/src/cmd/internal/obj/s390x/asmz.go
+++ b/src/cmd/internal/obj/s390x/asmz.go
@@ -339,6 +339,17 @@
 	// 2 byte no-operation
 	{i: 66, as: ANOPH},
 
+	// crypto instructions
+
+	// KM
+	{i: 124, as: AKM, a1: C_REG, a6: C_REG},
+
+	// KDSA
+	{i: 125, as: AKDSA, a1: C_REG, a6: C_REG},
+
+	// KMA
+	{i: 126, as: AKMA, a1: C_REG, a2: C_REG, a6: C_REG},
+
 	// vector instructions
 
 	// VRX store
@@ -680,7 +691,7 @@
 			if c.instoffset <= 0xffff {
 				return C_ANDCON
 			}
-			if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
+			if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && ((instoffset & (1<<31)) == 0) */
 				return C_UCON
 			}
 			if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
@@ -1480,6 +1491,12 @@
 			opset(AVFMSDB, r)
 			opset(AWFMSDB, r)
 			opset(AVPERM, r)
+		case AKM:
+			opset(AKMC, r)
+			opset(AKLMD, r)
+			opset(AKIMD, r)
+		case AKMA:
+			opset(AKMCTR, r)
 		}
 	}
 }
@@ -1884,6 +1901,7 @@
 	op_KM      uint32 = 0xB92E // FORMAT_RRE        CIPHER MESSAGE
 	op_KMAC    uint32 = 0xB91E // FORMAT_RRE        COMPUTE MESSAGE AUTHENTICATION CODE
 	op_KMC     uint32 = 0xB92F // FORMAT_RRE        CIPHER MESSAGE WITH CHAINING
+	op_KMA     uint32 = 0xB929 // FORMAT_RRF2       CIPHER MESSAGE WITH AUTHENTICATION
 	op_KMCTR   uint32 = 0xB92D // FORMAT_RRF2       CIPHER MESSAGE WITH COUNTER
 	op_KMF     uint32 = 0xB92A // FORMAT_RRE        CIPHER MESSAGE WITH CFB
 	op_KMO     uint32 = 0xB92B // FORMAT_RRE        CIPHER MESSAGE WITH OFB
@@ -2629,6 +2647,10 @@
 	op_VUPLL  uint32 = 0xE7D4 // 	VRR-a	VECTOR UNPACK LOGICAL LOW
 	op_VUPL   uint32 = 0xE7D6 // 	VRR-a	VECTOR UNPACK LOW
 	op_VMSL   uint32 = 0xE7B8 // 	VRR-d	VECTOR MULTIPLY SUM LOGICAL
+
+	// added in z15
+	op_KDSA uint32 = 0xB93A // FORMAT_RRE        COMPUTE DIGITAL SIGNATURE AUTHENTICATION (KDSA)
+
 )
 
 func oclass(a *obj.Addr) int {
@@ -4366,6 +4388,83 @@
 		op, _, _ := vop(p.As)
 		m4 := c.regoff(&p.From)
 		zVRRc(op, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), 0, 0, uint32(m4), asm)
+
+	case 124:
+		var opcode uint32
+		switch p.As {
+		default:
+			c.ctxt.Diag("unexpected opcode %v", p.As)
+		case AKM, AKMC, AKLMD:
+			if p.From.Reg == REG_R0 {
+				c.ctxt.Diag("input must not be R0 in %v", p)
+			}
+			if p.From.Reg&1 != 0 {
+				c.ctxt.Diag("input must be even register in %v", p)
+			}
+			if p.To.Reg == REG_R0 {
+				c.ctxt.Diag("second argument must not be R0 in %v", p)
+			}
+			if p.To.Reg&1 != 0 {
+				c.ctxt.Diag("second argument must be even register in %v", p)
+			}
+			if p.As == AKM {
+				opcode = op_KM
+			} else if p.As == AKMC {
+				opcode = op_KMC
+			} else {
+				opcode = op_KLMD
+			}
+		case AKIMD:
+			if p.To.Reg == REG_R0 {
+				c.ctxt.Diag("second argument must not be R0 in %v", p)
+			}
+			if p.To.Reg&1 != 0 {
+				c.ctxt.Diag("second argument must be even register in %v", p)
+			}
+			opcode = op_KIMD
+		}
+		zRRE(opcode, uint32(p.From.Reg), uint32(p.To.Reg), asm)
+
+	case 125: // KDSA sign and verify
+		if p.To.Reg == REG_R0 {
+			c.ctxt.Diag("second argument must not be R0 in %v", p)
+		}
+		if p.To.Reg&1 != 0 {
+			c.ctxt.Diag("second argument must be an even register in %v", p)
+		}
+		zRRE(op_KDSA, uint32(p.From.Reg), uint32(p.To.Reg), asm)
+
+	case 126: // KMA and KMCTR - CIPHER MESSAGE WITH AUTHENTICATION; CIPHER MESSAGE WITH COUNTER
+		var opcode uint32
+		switch p.As {
+		default:
+			c.ctxt.Diag("unexpected opcode %v", p.As)
+		case AKMA, AKMCTR:
+			if p.From.Reg == REG_R0 {
+				c.ctxt.Diag("input argument must not be R0 in %v", p)
+			}
+			if p.From.Reg&1 != 0 {
+				c.ctxt.Diag("input argument must be even register in %v", p)
+			}
+			if p.To.Reg == REG_R0 {
+				c.ctxt.Diag("output argument must not be R0 in %v", p)
+			}
+			if p.To.Reg&1 != 0 {
+				c.ctxt.Diag("output argument must be an even register in %v", p)
+			}
+			if p.Reg == REG_R0 {
+				c.ctxt.Diag("third argument must not be R0 in %v", p)
+			}
+			if p.Reg&1 != 0 {
+				c.ctxt.Diag("third argument must be even register in %v", p)
+			}
+			if p.As == AKMA {
+				opcode = op_KMA
+			} else if p.As == AKMCTR {
+				opcode = op_KMCTR
+			}
+		}
+		zRRF(opcode, uint32(p.Reg), 0, uint32(p.From.Reg), uint32(p.To.Reg), asm)
 	}
 }
 
diff --git a/src/cmd/internal/obj/stringer.go b/src/cmd/internal/obj/stringer.go
index a4d507d..b2b0df8 100644
--- a/src/cmd/internal/obj/stringer.go
+++ b/src/cmd/internal/obj/stringer.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // This is a mini version of the stringer tool customized for the Anames table
 // in the architecture support for obj.
diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go
index 2b885f6..f27d4ef 100644
--- a/src/cmd/internal/obj/sym.go
+++ b/src/cmd/internal/obj/sym.go
@@ -36,6 +36,7 @@
 	"cmd/internal/notsha256"
 	"cmd/internal/objabi"
 	"encoding/base64"
+	"encoding/binary"
 	"fmt"
 	"internal/buildcfg"
 	"log"
@@ -162,6 +163,18 @@
 	})
 }
 
+func (ctxt *Link) Int32Sym(i int64) *LSym {
+	name := fmt.Sprintf("$i32.%08x", uint64(i))
+	return ctxt.LookupInit(name, func(s *LSym) {
+		s.Size = 4
+		s.WriteInt(ctxt, 0, 4, i)
+		s.Type = objabi.SRODATA
+		s.Set(AttrLocal, true)
+		s.Set(AttrContentAddressable, true)
+		ctxt.constSyms = append(ctxt.constSyms, s)
+	})
+}
+
 func (ctxt *Link) Int64Sym(i int64) *LSym {
 	name := fmt.Sprintf("$i64.%016x", uint64(i))
 	return ctxt.LookupInit(name, func(s *LSym) {
@@ -174,6 +187,24 @@
 	})
 }
 
+func (ctxt *Link) Int128Sym(hi, lo int64) *LSym {
+	name := fmt.Sprintf("$i128.%016x%016x", uint64(hi), uint64(lo))
+	return ctxt.LookupInit(name, func(s *LSym) {
+		s.Size = 16
+		if ctxt.Arch.ByteOrder == binary.LittleEndian {
+			s.WriteInt(ctxt, 0, 8, lo)
+			s.WriteInt(ctxt, 8, 8, hi)
+		} else {
+			s.WriteInt(ctxt, 0, 8, hi)
+			s.WriteInt(ctxt, 8, 8, lo)
+		}
+		s.Type = objabi.SRODATA
+		s.Set(AttrLocal, true)
+		s.Set(AttrContentAddressable, true)
+		ctxt.constSyms = append(ctxt.constSyms, s)
+	})
+}
+
 // GCLocalsSym generates a content-addressable sym containing data.
 func (ctxt *Link) GCLocalsSym(data []byte) *LSym {
 	sum := notsha256.Sum256(data)
@@ -188,6 +219,10 @@
 // asm is set to true if this is called by the assembler (i.e. not the compiler),
 // in which case all the symbols are non-package (for now).
 func (ctxt *Link) NumberSyms() {
+	if ctxt.Pkgpath == "" {
+		panic("NumberSyms called without package path")
+	}
+
 	if ctxt.Headtype == objabi.Haix {
 		// Data must be in a reliable order for reproducible builds.
 		// The original entries are in a reliable order, but the TOC symbols
@@ -218,9 +253,7 @@
 
 	var idx, hashedidx, hashed64idx, nonpkgidx int32
 	ctxt.traverseSyms(traverseDefs|traversePcdata, func(s *LSym) {
-		// if Pkgpath is unknown, cannot hash symbols with relocations, as it
-		// may reference named symbols whose names are not fully expanded.
-		if s.ContentAddressable() && (ctxt.Pkgpath != "" || len(s.R) == 0) {
+		if s.ContentAddressable() {
 			if s.Size <= 8 && len(s.R) == 0 && contentHashSection(s) == 0 {
 				// We can use short hash only for symbols without relocations.
 				// Don't use short hash for symbols that belong in a particular section
@@ -416,10 +449,6 @@
 		if call.Func != nil {
 			fn(fsym, call.Func)
 		}
-		f, _ := ctxt.getFileSymbolAndLine(call.Pos)
-		if filesym := ctxt.Lookup(f); filesym != nil {
-			fn(fsym, filesym)
-		}
 	}
 
 	auxsyms := []*LSym{fninfo.dwarfRangesSym, fninfo.dwarfLocSym, fninfo.dwarfDebugLinesSym, fninfo.dwarfInfoSym, fninfo.WasmImportSym, fninfo.sehUnwindInfoSym}
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 718da6a..bdd75b4 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -301,7 +301,7 @@
 	Py   = 0x80 // defaults to 64-bit mode
 	Py1  = 0x81 // symbolic; exact value doesn't matter
 	Py3  = 0x83 // symbolic; exact value doesn't matter
-	Pavx = 0x84 // symbolic: exact value doesn't matter
+	Pavx = 0x84 // symbolic; exact value doesn't matter
 
 	RxrEvex = 1 << 4 // AVX512 extension to REX.R/VEX.R
 	Rxw     = 1 << 3 // =1, 64-bit operand size
@@ -1978,7 +1978,7 @@
 type padJumpsCtx int32
 
 func makePjcCtx(ctxt *obj.Link) padJumpsCtx {
-	// Disable jump padding on 32 bit builds by settting
+	// Disable jump padding on 32 bit builds by setting
 	// padJumps to 0.
 	if ctxt.Arch.Family == sys.I386 {
 		return padJumpsCtx(0)
@@ -2036,6 +2036,31 @@
 	n int32     // Size of the pad
 }
 
+// Padding bytes to add to align code as requested.
+// Alignment is restricted to powers of 2 between 8 and 2048 inclusive.
+//
+// pc: current offset in function, in bytes
+// a: requested alignment, in bytes
+// cursym: current function being assembled
+// returns number of bytes of padding needed
+func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
+	if !((a&(a-1) == 0) && 8 <= a && a <= 2048) {
+		ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", a)
+		return 0
+	}
+
+	// By default function alignment is 32 bytes for amd64
+	if cursym.Func().Align < int32(a) {
+		cursym.Func().Align = int32(a)
+	}
+
+	if pc&(a-1) != 0 {
+		return int(a - (pc & (a - 1)))
+	}
+
+	return 0
+}
+
 func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
 	if ctxt.Retpoline && ctxt.Arch.Family == sys.I386 {
 		ctxt.Diag("-spectre=ret not supported on 386")
@@ -2119,6 +2144,19 @@
 			c0 := c
 			c = pjc.padJump(ctxt, s, p, c)
 
+			if p.As == obj.APCALIGN {
+				aln := p.From.Offset
+				v := addpad(int64(c), aln, ctxt, s)
+				if v > 0 {
+					s.Grow(int64(c) + int64(v))
+					fillnop(s.P[c:], int(v))
+				}
+
+				c += int32(v)
+				pPrev = p
+				continue
+			}
+
 			if maxLoopPad > 0 && p.Back&branchLoopHead != 0 && c&(loopAlign-1) != 0 {
 				// pad with NOPs
 				v := -c & (loopAlign - 1)
diff --git a/src/cmd/internal/obj/x86/asm_test.go b/src/cmd/internal/obj/x86/asm_test.go
index 36c8fce..458a912 100644
--- a/src/cmd/internal/obj/x86/asm_test.go
+++ b/src/cmd/internal/obj/x86/asm_test.go
@@ -7,6 +7,10 @@
 import (
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
+	"internal/testenv"
+	"os"
+	"path/filepath"
+	"regexp"
 	"testing"
 )
 
@@ -289,3 +293,50 @@
 		}
 	}
 }
+
+// TestPCALIGN verifies the correctness of the PCALIGN by checking if the
+// code can be aligned to the alignment value.
+func TestPCALIGN(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+	dir := t.TempDir()
+	tmpfile := filepath.Join(dir, "test.s")
+	tmpout := filepath.Join(dir, "test.o")
+
+	var testCases = []struct {
+		name string
+		code string
+		out  string
+	}{
+		{
+			name: "8-byte alignment",
+			code: "TEXT ·foo(SB),$0-0\nMOVQ $0, AX\nPCALIGN $8\nMOVQ $1, BX\nRET\n",
+			out:  `0x0008\s00008\s\(.*\)\tMOVQ\t\$1,\sBX`,
+		},
+		{
+			name: "16-byte alignment",
+			code: "TEXT ·foo(SB),$0-0\nMOVQ $0, AX\nPCALIGN $16\nMOVQ $2, CX\nRET\n",
+			out:  `0x0010\s00016\s\(.*\)\tMOVQ\t\$2,\sCX`,
+		},
+	}
+
+	for _, test := range testCases {
+		if err := os.WriteFile(tmpfile, []byte(test.code), 0644); err != nil {
+			t.Fatal(err)
+		}
+		cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-S", "-o", tmpout, tmpfile)
+		cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux")
+		out, err := cmd.CombinedOutput()
+		if err != nil {
+			t.Errorf("The %s build failed: %v, output: %s", test.name, err, out)
+			continue
+		}
+
+		matched, err := regexp.MatchString(test.out, string(out))
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !matched {
+			t.Errorf("The %s testing failed!\ninput: %s\noutput: %s\n", test.name, test.code, out)
+		}
+	}
+}
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index fc4007e..e6ea898 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -1226,10 +1226,11 @@
 		progedit(ctxt, callend.Link, newprog)
 	}
 
-	pcdata = cursym.Func().UnspillRegisterArgs(callend, newprog)
-	pcdata = ctxt.EndUnsafePoint(pcdata, newprog, -1)
+	// The instructions which unspill regs should be preemptible.
+	pcdata = ctxt.EndUnsafePoint(callend, newprog, -1)
+	unspill := cursym.Func().UnspillRegisterArgs(pcdata, newprog)
 
-	jmp := obj.Appendp(pcdata, newprog)
+	jmp := obj.Appendp(unspill, newprog)
 	jmp.As = obj.AJMP
 	jmp.To.Type = obj.TYPE_BRANCH
 	jmp.To.SetTarget(startPred.Link)
diff --git a/src/cmd/internal/obj/x86/seh.go b/src/cmd/internal/obj/x86/seh.go
index e7d3d57..71cdd36 100644
--- a/src/cmd/internal/obj/x86/seh.go
+++ b/src/cmd/internal/obj/x86/seh.go
@@ -97,17 +97,32 @@
 	// https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-unwind_info
 
 	const (
-		UWOP_PUSH_NONVOL = 0
-		UWOP_SET_FPREG   = 3
-		SEH_REG_BP       = 5
+		UWOP_PUSH_NONVOL  = 0
+		UWOP_SET_FPREG    = 3
+		SEH_REG_BP        = 5
+		UNW_FLAG_EHANDLER = 1 << 3
 	)
 
+	var exceptionHandler *obj.LSym
+	var flags uint8
+	if s.Name == "runtime.asmcgocall_landingpad" {
+		// Most cgo calls go through runtime.asmcgocall_landingpad,
+		// we can use it to catch exceptions from C code.
+		// TODO: use a more generic approach to identify which calls need an exception handler.
+		exceptionHandler = ctxt.Lookup("runtime.sehtramp")
+		if exceptionHandler == nil {
+			ctxt.Diag("missing runtime.sehtramp\n")
+			return
+		}
+		flags = UNW_FLAG_EHANDLER
+	}
+
 	// Fow now we only support operations which are encoded
 	// using a single 2-byte node, so the number of nodes
 	// is the number of operations.
 	nodes := uint8(2)
 	buf := newsehbuf(ctxt, nodes)
-	buf.write8(1)                    // Flags + version
+	buf.write8(flags | 1)            // Flags + version
 	buf.write8(uint8(movbp.Link.Pc)) // Size of prolog
 	buf.write8(nodes)                // Count of nodes
 	buf.write8(SEH_REG_BP)           // FP register
@@ -119,8 +134,10 @@
 	buf.write8(uint8(pushbp.Link.Pc))
 	buf.writecode(UWOP_PUSH_NONVOL, SEH_REG_BP)
 
-	// The following 4 bytes reference the RVA of the exception handler,
-	// in case the function has one. We don't use it for now.
+	// The following 4 bytes reference the RVA of the exception handler.
+	// The value is set to 0 for now, if an exception handler is needed,
+	// it will be updated later with a R_PEIMAGEOFF relocation to the
+	// exception handler.
 	buf.write32(0)
 
 	// The list of unwind infos in a PE binary have very low cardinality
@@ -134,6 +151,13 @@
 		s.Type = objabi.SSEHUNWINDINFO
 		s.Set(obj.AttrDuplicateOK, true)
 		s.Set(obj.AttrLocal, true)
+		if exceptionHandler != nil {
+			r := obj.Addrel(s)
+			r.Off = int32(len(buf.data) - 4)
+			r.Siz = 4
+			r.Sym = exceptionHandler
+			r.Type = objabi.R_PEIMAGEOFF
+		}
 		// Note: AttrContentAddressable cannot be set here,
 		// because the content-addressable-handling code
 		// does not know about aux symbols.
diff --git a/src/cmd/internal/objabi/flag.go b/src/cmd/internal/objabi/flag.go
index 847ed48..ee7d2fe 100644
--- a/src/cmd/internal/objabi/flag.go
+++ b/src/cmd/internal/objabi/flag.go
@@ -7,6 +7,7 @@
 import (
 	"flag"
 	"fmt"
+	"internal/bisect"
 	"internal/buildcfg"
 	"io"
 	"log"
@@ -262,8 +263,8 @@
 
 		switch ptr.(type) {
 		default:
-			panic(fmt.Sprintf("debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
-		case *int, *string:
+			panic(fmt.Sprintf("debug.%s has invalid type %v (must be int, string, or *bisect.Matcher)", f.Name, f.Type))
+		case *int, *string, **bisect.Matcher:
 			// ok
 		}
 		flag.tab[name] = debugField{name, help, concurrent == "ok", ptr}
@@ -328,6 +329,12 @@
 					log.Fatalf("invalid debug value %v", name)
 				}
 				*vp = val
+			case **bisect.Matcher:
+				var err error
+				*vp, err = bisect.New(valstring)
+				if err != nil {
+					log.Fatalf("debug flag %v: %v", name, err)
+				}
 			default:
 				panic("bad debugtab type")
 			}
diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go
index afe9deb..d9b47f1 100644
--- a/src/cmd/internal/objabi/funcid.go
+++ b/src/cmd/internal/objabi/funcid.go
@@ -14,6 +14,7 @@
 	"asmcgocall":         abi.FuncID_asmcgocall,
 	"asyncPreempt":       abi.FuncID_asyncPreempt,
 	"cgocallback":        abi.FuncID_cgocallback,
+	"corostart":          abi.FuncID_corostart,
 	"debugCallV2":        abi.FuncID_debugCallV2,
 	"gcBgMarkWorker":     abi.FuncID_gcBgMarkWorker,
 	"rt0_go":             abi.FuncID_rt0_go,
@@ -32,9 +33,7 @@
 	"systemstack":        abi.FuncID_systemstack,
 
 	// Don't show in call stack but otherwise not special.
-	"deferreturn":       abi.FuncIDWrapper,
-	"runOpenDeferFrame": abi.FuncIDWrapper,
-	"deferCallSave":     abi.FuncIDWrapper,
+	"deferreturn": abi.FuncIDWrapper,
 }
 
 // Get the function ID for the named function in the named file.
diff --git a/src/cmd/internal/objabi/path.go b/src/cmd/internal/objabi/path.go
index aacab9a..30301b1 100644
--- a/src/cmd/internal/objabi/path.go
+++ b/src/cmd/internal/objabi/path.go
@@ -4,7 +4,11 @@
 
 package objabi
 
-import "strings"
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
 
 // PathToPrefix converts raw string to the prefix that will be used in the
 // symbol table. All control characters, space, '%' and '"', as well as
@@ -40,28 +44,35 @@
 	return string(p)
 }
 
-// IsRuntimePackagePath examines 'pkgpath' and returns TRUE if it
-// belongs to the collection of "runtime-related" packages, including
-// "runtime" itself, "reflect", "syscall", and the
-// "runtime/internal/*" packages. The compiler and/or assembler in
-// some cases need to be aware of when they are building such a
-// package, for example to enable features such as ABI selectors in
-// assembly sources.
-//
-// Keep in sync with cmd/dist/build.go:IsRuntimePackagePath.
-func IsRuntimePackagePath(pkgpath string) bool {
-	rval := false
-	switch pkgpath {
-	case "runtime":
-		rval = true
-	case "reflect":
-		rval = true
-	case "syscall":
-		rval = true
-	case "internal/bytealg":
-		rval = true
-	default:
-		rval = strings.HasPrefix(pkgpath, "runtime/internal")
+// PrefixToPath is the inverse of PathToPrefix, replacing escape sequences with
+// the original character.
+func PrefixToPath(s string) (string, error) {
+	percent := strings.IndexByte(s, '%')
+	if percent == -1 {
+		return s, nil
 	}
-	return rval
+
+	p := make([]byte, 0, len(s))
+	for i := 0; i < len(s); {
+		if s[i] != '%' {
+			p = append(p, s[i])
+			i++
+			continue
+		}
+		if i+2 >= len(s) {
+			// Not enough characters remaining to be a valid escape
+			// sequence.
+			return "", fmt.Errorf("malformed prefix %q: escape sequence must contain two hex digits", s)
+		}
+
+		b, err := strconv.ParseUint(s[i+1:i+3], 16, 8)
+		if err != nil {
+			// Not a valid escape sequence.
+			return "", fmt.Errorf("malformed prefix %q: escape sequence %q must contain two hex digits", s, s[i:i+3])
+		}
+
+		p = append(p, byte(b))
+		i += 3
+	}
+	return string(p), nil
 }
diff --git a/src/cmd/internal/objabi/path_test.go b/src/cmd/internal/objabi/path_test.go
index 05d7fb4..934db3d 100644
--- a/src/cmd/internal/objabi/path_test.go
+++ b/src/cmd/internal/objabi/path_test.go
@@ -4,13 +4,18 @@
 
 package objabi
 
-import "testing"
+import (
+	"internal/testenv"
+	"os/exec"
+	"strings"
+	"testing"
+)
 
-func TestPathToPrefix(t *testing.T) {
-	tests := []struct {
-		Path     string
-		Expected string
-	}{{"foo/bar/v1", "foo/bar/v1"},
+var escapeTests = []struct {
+		Path    string
+		Escaped string
+	}{
+		{"foo/bar/v1", "foo/bar/v1"},
 		{"foo/bar/v.1", "foo/bar/v%2e1"},
 		{"f.o.o/b.a.r/v1", "f.o.o/b.a.r/v1"},
 		{"f.o.o/b.a.r/v.1", "f.o.o/b.a.r/v%2e1"},
@@ -25,9 +30,63 @@
 		{"%foo%bar", "%25foo%25bar"},
 		{"\x01\x00\x7F☺", "%01%00%7f%e2%98%ba"},
 	}
+
+func TestPathToPrefix(t *testing.T) {
+	for _, tc := range escapeTests {
+		if got := PathToPrefix(tc.Path); got != tc.Escaped {
+			t.Errorf("expected PathToPrefix(%s) = %s, got %s", tc.Path, tc.Escaped, got)
+		}
+	}
+}
+
+func TestPrefixToPath(t *testing.T) {
+	for _, tc := range escapeTests {
+		got, err := PrefixToPath(tc.Escaped)
+		if err != nil {
+			t.Errorf("expected PrefixToPath(%s) err = nil, got %v", tc.Escaped, err)
+		}
+		if got != tc.Path {
+			t.Errorf("expected PrefixToPath(%s) = %s, got %s", tc.Escaped, tc.Path, got)
+		}
+	}
+}
+
+func TestPrefixToPathError(t *testing.T) {
+	tests := []string{
+		"foo%",
+		"foo%1",
+		"foo%%12",
+		"foo%1g",
+	}
 	for _, tc := range tests {
-		if got := PathToPrefix(tc.Path); got != tc.Expected {
-			t.Errorf("expected PathToPrefix(%s) = %s, got %s", tc.Path, tc.Expected, got)
+		_, err := PrefixToPath(tc)
+		if err == nil {
+			t.Errorf("expected PrefixToPath(%s) err != nil, got nil", tc)
+		}
+	}
+}
+
+func TestRuntimePackageList(t *testing.T) {
+	// Test that all packages imported by the runtime are marked as runtime
+	// packages.
+	testenv.MustHaveGoBuild(t)
+	goCmd, err := testenv.GoTool()
+	if err != nil {
+		t.Fatal(err)
+	}
+	pkgList, err := exec.Command(goCmd, "list", "-deps", "runtime").Output()
+	if err != nil {
+		if err, ok := err.(*exec.ExitError); ok {
+			t.Log(string(err.Stderr))
+		}
+		t.Fatal(err)
+	}
+	for _, pkg := range strings.Split(strings.TrimRight(string(pkgList), "\n"), "\n") {
+		if pkg == "unsafe" {
+			continue
+		}
+		if !LookupPkgSpecial(pkg).Runtime {
+			t.Errorf("package %s is imported by runtime, but not marked Runtime", pkg)
 		}
 	}
 }
diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go
new file mode 100644
index 0000000..6df95f3
--- /dev/null
+++ b/src/cmd/internal/objabi/pkgspecial.go
@@ -0,0 +1,123 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objabi
+
+import "sync"
+
+// PkgSpecial indicates special build properties of a given runtime-related
+// package.
+type PkgSpecial struct {
+	// Runtime indicates that this package is "runtime" or imported by
+	// "runtime". This has several effects (which maybe should be split out):
+	//
+	// - Implicit allocation is disallowed.
+	//
+	// - Various runtime pragmas are enabled.
+	//
+	// - Optimizations are always enabled.
+	//
+	// This should be set for runtime and all packages it imports, and may be
+	// set for additional packages.
+	Runtime bool
+
+	// NoInstrument indicates this package should not receive sanitizer
+	// instrumentation. In many of these, instrumentation could cause infinite
+	// recursion. This is all runtime packages, plus those that support the
+	// sanitizers.
+	NoInstrument bool
+
+	// NoRaceFunc indicates functions in this package should not get
+	// racefuncenter/racefuncexit instrumentation Memory accesses in these
+	// packages are either uninteresting or will cause false positives.
+	NoRaceFunc bool
+
+	// AllowAsmABI indicates that assembly in this package is allowed to use ABI
+	// selectors in symbol names. Generally this is needed for packages that
+	// interact closely with the runtime package or have performance-critical
+	// assembly.
+	AllowAsmABI bool
+}
+
+var runtimePkgs = []string{
+	"runtime",
+
+	"runtime/internal/atomic",
+	"runtime/internal/math",
+	"runtime/internal/sys",
+	"runtime/internal/syscall",
+
+	"internal/abi",
+	"internal/bytealg",
+	"internal/chacha8rand",
+	"internal/coverage/rtcov",
+	"internal/cpu",
+	"internal/goarch",
+	"internal/godebugs",
+	"internal/goexperiment",
+	"internal/goos",
+}
+
+// extraNoInstrumentPkgs is the set of packages in addition to runtimePkgs that
+// should have NoInstrument set.
+var extraNoInstrumentPkgs = []string{
+	"runtime/race",
+	"runtime/msan",
+	"runtime/asan",
+	// We omit bytealg even though it's imported by runtime because it also
+	// backs a lot of package bytes. Currently we don't have a way to omit race
+	// instrumentation when used from the runtime while keeping race
+	// instrumentation when used from user code. Somehow this doesn't seem to
+	// cause problems, though we may be skating on thin ice. See #61204.
+	"-internal/bytealg",
+}
+
+var noRaceFuncPkgs = []string{"sync", "sync/atomic"}
+
+var allowAsmABIPkgs = []string{
+	"runtime",
+	"reflect",
+	"syscall",
+	"internal/bytealg",
+	"internal/chacha8rand",
+	"runtime/internal/syscall",
+	"runtime/internal/startlinetest",
+}
+
+var (
+	pkgSpecials     map[string]PkgSpecial
+	pkgSpecialsOnce sync.Once
+)
+
+// LookupPkgSpecial returns special build properties for the given package path.
+func LookupPkgSpecial(pkgPath string) PkgSpecial {
+	pkgSpecialsOnce.Do(func() {
+		// Construct pkgSpecials from various package lists. This lets us use
+		// more flexible logic, while keeping the final map simple, and avoids
+		// the init-time cost of a map.
+		pkgSpecials = make(map[string]PkgSpecial)
+		set := func(elt string, f func(*PkgSpecial)) {
+			s := pkgSpecials[elt]
+			f(&s)
+			pkgSpecials[elt] = s
+		}
+		for _, pkg := range runtimePkgs {
+			set(pkg, func(ps *PkgSpecial) { ps.Runtime = true; ps.NoInstrument = true })
+		}
+		for _, pkg := range extraNoInstrumentPkgs {
+			if pkg[0] == '-' {
+				set(pkg[1:], func(ps *PkgSpecial) { ps.NoInstrument = false })
+			} else {
+				set(pkg, func(ps *PkgSpecial) { ps.NoInstrument = true })
+			}
+		}
+		for _, pkg := range noRaceFuncPkgs {
+			set(pkg, func(ps *PkgSpecial) { ps.NoRaceFunc = true })
+		}
+		for _, pkg := range allowAsmABIPkgs {
+			set(pkg, func(ps *PkgSpecial) { ps.AllowAsmABI = true })
+		}
+	})
+	return pkgSpecials[pkgPath]
+}
diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go
index 996c300..5442984 100644
--- a/src/cmd/internal/objabi/reloctype.go
+++ b/src/cmd/internal/objabi/reloctype.go
@@ -94,11 +94,10 @@
 	// This is a marker relocation (0-sized), for the linker's reachabililty
 	// analysis.
 	R_USEIFACEMETHOD
-	// Similar to R_USEIFACEMETHOD, except instead of indicating a type +
-	// method offset with Sym+Add, Sym points to a symbol containing the name
-	// of the method being called. See the description in
-	// cmd/compile/internal/reflectdata/reflect.go:MarkUsedIfaceMethod for details.
-	R_USEGENERICIFACEMETHOD
+	// R_USENAMEDMETHOD marks that methods with a specific name must not be eliminated.
+	// The target is a symbol containing the name of a method called via a generic
+	// interface or looked up via MethodByName("F").
+	R_USENAMEDMETHOD
 	// R_METHODOFF resolves to a 32-bit offset from the beginning of the section
 	// holding the data being relocated to the referenced symbol.
 	// It is a variant of R_ADDROFF used when linking from the uncommonType of a
@@ -260,30 +259,60 @@
 
 	// RISC-V.
 
-	// R_RISCV_CALL relocates a J-type instruction with a 21 bit PC-relative
-	// address.
-	R_RISCV_CALL
+	// R_RISCV_JAL resolves a 20 bit offset for a J-type instruction.
+	R_RISCV_JAL
 
-	// R_RISCV_CALL_TRAMP is the same as R_RISCV_CALL but denotes the use of a
+	// R_RISCV_JAL_TRAMP is the same as R_RISCV_JAL but denotes the use of a
 	// trampoline, which we may be able to avoid during relocation. These are
 	// only used by the linker and are not emitted by the compiler or assembler.
-	R_RISCV_CALL_TRAMP
+	R_RISCV_JAL_TRAMP
 
-	// R_RISCV_PCREL_ITYPE resolves a 32-bit PC-relative address using an
+	// R_RISCV_CALL resolves a 32 bit PC-relative address for an AUIPC + JALR
+	// instruction pair.
+	R_RISCV_CALL
+
+	// R_RISCV_PCREL_ITYPE resolves a 32 bit PC-relative address for an
 	// AUIPC + I-type instruction pair.
 	R_RISCV_PCREL_ITYPE
 
-	// R_RISCV_PCREL_STYPE resolves a 32-bit PC-relative address using an
+	// R_RISCV_PCREL_STYPE resolves a 32 bit PC-relative address for an
 	// AUIPC + S-type instruction pair.
 	R_RISCV_PCREL_STYPE
 
-	// R_RISCV_TLS_IE_ITYPE resolves a 32-bit TLS initial-exec TOC offset
-	// address using an AUIPC + I-type instruction pair.
-	R_RISCV_TLS_IE_ITYPE
+	// R_RISCV_TLS_IE resolves a 32 bit TLS initial-exec address for an
+	// AUIPC + I-type instruction pair.
+	R_RISCV_TLS_IE
 
-	// R_RISCV_TLS_IE_STYPE resolves a 32-bit TLS initial-exec TOC offset
-	// address using an AUIPC + S-type instruction pair.
-	R_RISCV_TLS_IE_STYPE
+	// R_RISCV_TLS_LE resolves a 32 bit TLS local-exec address for a
+	// LUI + I-type instruction sequence.
+	R_RISCV_TLS_LE
+
+	// R_RISCV_GOT_HI20 resolves the high 20 bits of a 32-bit PC-relative GOT
+	// address.
+	R_RISCV_GOT_HI20
+
+	// R_RISCV_PCREL_HI20 resolves the high 20 bits of a 32-bit PC-relative
+	// address.
+	R_RISCV_PCREL_HI20
+
+	// R_RISCV_PCREL_LO12_I resolves the low 12 bits of a 32-bit PC-relative
+	// address using an I-type instruction.
+	R_RISCV_PCREL_LO12_I
+
+	// R_RISCV_PCREL_LO12_S resolves the low 12 bits of a 32-bit PC-relative
+	// address using an S-type instruction.
+	R_RISCV_PCREL_LO12_S
+
+	// R_RISCV_BRANCH resolves a 12-bit PC-relative branch offset.
+	R_RISCV_BRANCH
+
+	// R_RISCV_RVC_BRANCH resolves an 8-bit PC-relative offset for a CB-type
+	// instruction.
+	R_RISCV_RVC_BRANCH
+
+	// R_RISCV_RVC_JUMP resolves an 11-bit PC-relative offset for a CJ-type
+	// instruction.
+	R_RISCV_RVC_JUMP
 
 	// R_PCRELDBL relocates s390x 2-byte aligned PC-relative addresses.
 	// TODO(mundaym): remove once variants can be serialized - see issue 14218.
@@ -316,6 +345,11 @@
 	R_LOONG64_TLS_IE_PCREL_HI
 	R_LOONG64_TLS_IE_LO
 
+	// R_LOONG64_GOT_HI and R_LOONG64_GOT_LO resolves a GOT-relative instruction sequence,
+	// usually an pcalau12i followed by another ld or addi instruction.
+	R_LOONG64_GOT_HI
+	R_LOONG64_GOT_LO
+
 	// R_JMPLOONG64 resolves to non-PC-relative target address of a JMP instruction,
 	// by encoding the address into the instruction.
 	R_JMPLOONG64
@@ -361,12 +395,13 @@
 
 // IsDirectCall reports whether r is a relocation for a direct call.
 // A direct call is a CALL instruction that takes the target address
-// as an immediate. The address is embedded into the instruction, possibly
+// as an immediate. The address is embedded into the instruction(s), possibly
 // with limited width. An indirect call is a CALL instruction that takes
 // the target address in register or memory.
 func (r RelocType) IsDirectCall() bool {
 	switch r {
-	case R_CALL, R_CALLARM, R_CALLARM64, R_CALLLOONG64, R_CALLMIPS, R_CALLPOWER, R_RISCV_CALL, R_RISCV_CALL_TRAMP:
+	case R_CALL, R_CALLARM, R_CALLARM64, R_CALLLOONG64, R_CALLMIPS, R_CALLPOWER,
+		R_RISCV_CALL, R_RISCV_JAL, R_RISCV_JAL_TRAMP:
 		return true
 	}
 	return false
diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go
index c7441ef..c8923c0 100644
--- a/src/cmd/internal/objabi/reloctype_string.go
+++ b/src/cmd/internal/objabi/reloctype_string.go
@@ -32,7 +32,7 @@
 	_ = x[R_USETYPE-22]
 	_ = x[R_USEIFACE-23]
 	_ = x[R_USEIFACEMETHOD-24]
-	_ = x[R_USEGENERICIFACEMETHOD-25]
+	_ = x[R_USENAMEDMETHOD-25]
 	_ = x[R_METHODOFF-26]
 	_ = x[R_KEEP-27]
 	_ = x[R_POWER_TOC-28]
@@ -67,33 +67,43 @@
 	_ = x[R_ADDRPOWER_TOCREL_DS-57]
 	_ = x[R_ADDRPOWER_D34-58]
 	_ = x[R_ADDRPOWER_PCREL34-59]
-	_ = x[R_RISCV_CALL-60]
-	_ = x[R_RISCV_CALL_TRAMP-61]
-	_ = x[R_RISCV_PCREL_ITYPE-62]
-	_ = x[R_RISCV_PCREL_STYPE-63]
-	_ = x[R_RISCV_TLS_IE_ITYPE-64]
-	_ = x[R_RISCV_TLS_IE_STYPE-65]
-	_ = x[R_PCRELDBL-66]
-	_ = x[R_ADDRLOONG64-67]
-	_ = x[R_ADDRLOONG64U-68]
-	_ = x[R_ADDRLOONG64TLS-69]
-	_ = x[R_ADDRLOONG64TLSU-70]
-	_ = x[R_CALLLOONG64-71]
-	_ = x[R_LOONG64_TLS_IE_PCREL_HI-72]
-	_ = x[R_LOONG64_TLS_IE_LO-73]
-	_ = x[R_JMPLOONG64-74]
-	_ = x[R_ADDRMIPSU-75]
-	_ = x[R_ADDRMIPSTLS-76]
-	_ = x[R_ADDRCUOFF-77]
-	_ = x[R_WASMIMPORT-78]
-	_ = x[R_XCOFFREF-79]
-	_ = x[R_PEIMAGEOFF-80]
-	_ = x[R_INITORDER-81]
+	_ = x[R_RISCV_JAL-60]
+	_ = x[R_RISCV_JAL_TRAMP-61]
+	_ = x[R_RISCV_CALL-62]
+	_ = x[R_RISCV_PCREL_ITYPE-63]
+	_ = x[R_RISCV_PCREL_STYPE-64]
+	_ = x[R_RISCV_TLS_IE-65]
+	_ = x[R_RISCV_TLS_LE-66]
+	_ = x[R_RISCV_GOT_HI20-67]
+	_ = x[R_RISCV_PCREL_HI20-68]
+	_ = x[R_RISCV_PCREL_LO12_I-69]
+	_ = x[R_RISCV_PCREL_LO12_S-70]
+	_ = x[R_RISCV_BRANCH-71]
+	_ = x[R_RISCV_RVC_BRANCH-72]
+	_ = x[R_RISCV_RVC_JUMP-73]
+	_ = x[R_PCRELDBL-74]
+	_ = x[R_ADDRLOONG64-75]
+	_ = x[R_ADDRLOONG64U-76]
+	_ = x[R_ADDRLOONG64TLS-77]
+	_ = x[R_ADDRLOONG64TLSU-78]
+	_ = x[R_CALLLOONG64-79]
+	_ = x[R_LOONG64_TLS_IE_PCREL_HI-80]
+	_ = x[R_LOONG64_TLS_IE_LO-81]
+	_ = x[R_LOONG64_GOT_HI-82]
+	_ = x[R_LOONG64_GOT_LO-83]
+	_ = x[R_JMPLOONG64-84]
+	_ = x[R_ADDRMIPSU-85]
+	_ = x[R_ADDRMIPSTLS-86]
+	_ = x[R_ADDRCUOFF-87]
+	_ = x[R_WASMIMPORT-88]
+	_ = x[R_XCOFFREF-89]
+	_ = x[R_PEIMAGEOFF-90]
+	_ = x[R_INITORDER-91]
 }
 
-const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_USEGENERICIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_PCREL_LDST8R_ARM64_PCREL_LDST16R_ARM64_PCREL_LDST32R_ARM64_PCREL_LDST64R_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_POWER_TLS_IE_PCREL34R_POWER_TLS_LE_TPREL34R_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_GOT_PCREL34R_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_ADDRPOWER_D34R_ADDRPOWER_PCREL34R_RISCV_CALLR_RISCV_CALL_TRAMPR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRLOONG64R_ADDRLOONG64UR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_CALLLOONG64R_LOONG64_TLS_IE_PCREL_HIR_LOONG64_TLS_IE_LOR_JMPLOONG64R_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREFR_PEIMAGEOFFR_INITORDER"
+const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_USENAMEDMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_PCREL_LDST8R_ARM64_PCREL_LDST16R_ARM64_PCREL_LDST32R_ARM64_PCREL_LDST64R_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_POWER_TLS_IE_PCREL34R_POWER_TLS_LE_TPREL34R_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_GOT_PCREL34R_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_ADDRPOWER_D34R_ADDRPOWER_PCREL34R_RISCV_JALR_RISCV_JAL_TRAMPR_RISCV_CALLR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IER_RISCV_TLS_LER_RISCV_GOT_HI20R_RISCV_PCREL_HI20R_RISCV_PCREL_LO12_IR_RISCV_PCREL_LO12_SR_RISCV_BRANCHR_RISCV_RVC_BRANCHR_RISCV_RVC_JUMPR_PCRELDBLR_ADDRLOONG64R_ADDRLOONG64UR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_CALLLOONG64R_LOONG64_TLS_IE_PCREL_HIR_LOONG64_TLS_IE_LOR_LOONG64_GOT_HIR_LOONG64_GOT_LOR_JMPLOONG64R_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREFR_PEIMAGEOFFR_INITORDER"
 
-var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 116, 123, 131, 139, 147, 153, 159, 165, 175, 184, 194, 210, 233, 244, 250, 261, 271, 280, 293, 307, 321, 335, 351, 362, 375, 394, 414, 434, 454, 467, 481, 495, 509, 524, 538, 552, 563, 585, 607, 621, 636, 659, 676, 694, 715, 730, 749, 761, 779, 798, 817, 837, 857, 867, 880, 894, 910, 927, 940, 965, 984, 996, 1007, 1020, 1031, 1043, 1053, 1065, 1076}
+var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 116, 123, 131, 139, 147, 153, 159, 165, 175, 184, 194, 210, 226, 237, 243, 254, 264, 273, 286, 300, 314, 328, 344, 355, 368, 387, 407, 427, 447, 460, 474, 488, 502, 517, 531, 545, 556, 578, 600, 614, 629, 652, 669, 687, 708, 723, 742, 753, 770, 782, 801, 820, 834, 848, 864, 882, 902, 922, 936, 954, 970, 980, 993, 1007, 1023, 1040, 1053, 1078, 1097, 1113, 1129, 1141, 1152, 1165, 1176, 1188, 1198, 1210, 1221}
 
 func (i RelocType) String() string {
 	i -= 1
diff --git a/src/cmd/internal/objfile/goobj.go b/src/cmd/internal/objfile/goobj.go
index 24d2d0b..a0a2a17 100644
--- a/src/cmd/internal/objfile/goobj.go
+++ b/src/cmd/internal/objfile/goobj.go
@@ -35,7 +35,7 @@
 L:
 	for _, e := range a.Entries {
 		switch e.Type {
-		case archive.EntryPkgDef:
+		case archive.EntryPkgDef, archive.EntrySentinelNonObj:
 			continue
 		case archive.EntryGoObj:
 			o := e.Obj
diff --git a/src/cmd/internal/objfile/macho.go b/src/cmd/internal/objfile/macho.go
index 1d6963f..c924975 100644
--- a/src/cmd/internal/objfile/macho.go
+++ b/src/cmd/internal/objfile/macho.go
@@ -128,6 +128,9 @@
 func (x uint64s) Less(i, j int) bool { return x[i] < x[j] }
 
 func (f *machoFile) loadAddress() (uint64, error) {
+	if seg := f.macho.Segment("__TEXT"); seg != nil {
+		return seg.Addr, nil
+	}
 	return 0, fmt.Errorf("unknown load address")
 }
 
diff --git a/src/cmd/internal/src/pos.go b/src/cmd/internal/src/pos.go
index 6f1c7dd..4d71c81 100644
--- a/src/cmd/internal/src/pos.go
+++ b/src/cmd/internal/src/pos.go
@@ -116,9 +116,9 @@
 // AbsFilename() returns the absolute filename recorded with the position's base.
 func (p Pos) AbsFilename() string { return p.base.AbsFilename() }
 
-// SymFilename() returns the absolute filename recorded with the position's base,
-// prefixed by FileSymPrefix to make it appropriate for use as a linker symbol.
-func (p Pos) SymFilename() string { return p.base.SymFilename() }
+// FileIndex returns the file index of the position's base's absolute
+// filename within the PosTable that it was registered.
+func (p Pos) FileIndex() int { return p.base.FileIndex() }
 
 func (p Pos) String() string {
 	return p.Format(true, true)
@@ -193,9 +193,9 @@
 	pos         Pos    // position at which the relative position is (line, col)
 	filename    string // file name used to open source file, for error messages
 	absFilename string // absolute file name, for PC-Line tables
-	symFilename string // cached symbol file name, to avoid repeated string concatenation
 	line, col   uint   // relative line, column number at pos
 	inl         int    // inlining index (see cmd/internal/obj/inl.go)
+	fileIndex   int    // index of absFilename within PosTable.FileTable
 }
 
 // NewFileBase returns a new *PosBase for a file with the given (relative and
@@ -204,10 +204,10 @@
 	base := &PosBase{
 		filename:    filename,
 		absFilename: absFilename,
-		symFilename: FileSymPrefix + absFilename,
 		line:        1,
 		col:         1,
 		inl:         -1,
+		fileIndex:   -1,
 	}
 	base.pos = MakePos(base, 1, 1)
 	return base
@@ -220,24 +220,22 @@
 //
 // at position pos.
 func NewLinePragmaBase(pos Pos, filename, absFilename string, line, col uint) *PosBase {
-	return &PosBase{pos, filename, absFilename, FileSymPrefix + absFilename, line, col, -1}
+	return &PosBase{pos, filename, absFilename, line, col, -1, -1}
 }
 
-// NewInliningBase returns a copy of the old PosBase with the given inlining
-// index. If old == nil, the resulting PosBase has no filename.
-func NewInliningBase(old *PosBase, inlTreeIndex int) *PosBase {
-	if old == nil {
-		base := &PosBase{line: 1, col: 1, inl: inlTreeIndex}
-		base.pos = MakePos(base, 1, 1)
-		return base
+// NewInliningBase returns a copy of the orig PosBase with the given inlining
+// index. If orig == nil, NewInliningBase panics.
+func NewInliningBase(orig *PosBase, inlTreeIndex int) *PosBase {
+	if orig == nil {
+		panic("no old PosBase")
 	}
-	copy := *old
-	base := &copy
+	base := *orig
 	base.inl = inlTreeIndex
-	if old == old.pos.base {
-		base.pos.base = base
+	base.fileIndex = -1
+	if orig == orig.pos.base {
+		base.pos.base = &base
 	}
-	return base
+	return &base
 }
 
 var noPos Pos
@@ -269,16 +267,21 @@
 	return ""
 }
 
+// FileSymPrefix is the linker symbol prefix that used to be used for
+// linker pseudo-symbols representing file names.
 const FileSymPrefix = "gofile.."
 
-// SymFilename returns the absolute filename recorded with the base,
-// prefixed by FileSymPrefix to make it appropriate for use as a linker symbol.
-// If b is nil, SymFilename returns FileSymPrefix + "??".
-func (b *PosBase) SymFilename() string {
+// FileIndex returns the index of the base's absolute filename within
+// its PosTable's FileTable. It panics if it hasn't been registered
+// with a PosTable. If b == nil, the result is -1.
+func (b *PosBase) FileIndex() int {
 	if b != nil {
-		return b.symFilename
+		if b.fileIndex < 0 {
+			panic("PosBase has no file index")
+		}
+		return b.fileIndex
 	}
-	return FileSymPrefix + "??"
+	return -1
 }
 
 // Line returns the line number recorded with the base.
diff --git a/src/cmd/internal/src/xpos.go b/src/cmd/internal/src/xpos.go
index 867d0ab..a745059 100644
--- a/src/cmd/internal/src/xpos.go
+++ b/src/cmd/internal/src/xpos.go
@@ -124,25 +124,40 @@
 // XPos returns the corresponding XPos for the given pos,
 // adding pos to t if necessary.
 func (t *PosTable) XPos(pos Pos) XPos {
-	m := t.indexMap
-	if m == nil {
-		// Create new list and map and populate with nil
-		// base so that NoPos always gets index 0.
+	return XPos{t.baseIndex(pos.base), pos.lico}
+}
+
+func (t *PosTable) baseIndex(base *PosBase) int32 {
+	if base == nil {
+		return 0
+	}
+
+	if i, ok := t.indexMap[base]; ok {
+		return int32(i)
+	}
+
+	if base.fileIndex >= 0 {
+		panic("PosBase already registered with a PosTable")
+	}
+
+	if t.indexMap == nil {
 		t.baseList = append(t.baseList, nil)
-		m = map[*PosBase]int{nil: 0}
-		t.indexMap = m
+		t.indexMap = make(map[*PosBase]int)
 		t.nameMap = make(map[string]int)
 	}
-	i, ok := m[pos.base]
+
+	i := len(t.baseList)
+	t.indexMap[base] = i
+	t.baseList = append(t.baseList, base)
+
+	fileIndex, ok := t.nameMap[base.absFilename]
 	if !ok {
-		i = len(t.baseList)
-		t.baseList = append(t.baseList, pos.base)
-		t.indexMap[pos.base] = i
-		if _, ok := t.nameMap[pos.base.symFilename]; !ok {
-			t.nameMap[pos.base.symFilename] = len(t.nameMap)
-		}
+		fileIndex = len(t.nameMap)
+		t.nameMap[base.absFilename] = fileIndex
 	}
-	return XPos{int32(i), pos.lico}
+	base.fileIndex = fileIndex
+
+	return int32(i)
 }
 
 // Pos returns the corresponding Pos for the given p.
@@ -155,14 +170,6 @@
 	return Pos{base, p.lico}
 }
 
-// FileIndex returns the index of the given filename(symbol) in the PosTable, or -1 if not found.
-func (t *PosTable) FileIndex(filename string) int {
-	if v, ok := t.nameMap[filename]; ok {
-		return v
-	}
-	return -1
-}
-
 // FileTable returns a slice of all files used to build this package.
 func (t *PosTable) FileTable() []string {
 	// Create a LUT of the global package level file indices. This table is what
diff --git a/src/cmd/internal/src/xpos_test.go b/src/cmd/internal/src/xpos_test.go
index a17ba63..f76de9d 100644
--- a/src/cmd/internal/src/xpos_test.go
+++ b/src/cmd/internal/src/xpos_test.go
@@ -62,8 +62,8 @@
 		}
 	}
 
-	if len(tab.baseList) != len(tab.indexMap) {
-		t.Errorf("table length discrepancy: %d != %d", len(tab.baseList), len(tab.indexMap))
+	if len(tab.baseList) != 1+len(tab.indexMap) { // indexMap omits nil
+		t.Errorf("table length discrepancy: %d != 1+%d", len(tab.baseList), len(tab.indexMap))
 	}
 
 	const wantLen = 4
diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go
index bd77859..0fb56e6 100644
--- a/src/cmd/internal/testdir/testdir_test.go
+++ b/src/cmd/internal/testdir/testdir_test.go
@@ -24,6 +24,7 @@
 	"path/filepath"
 	"regexp"
 	"runtime"
+	"slices"
 	"sort"
 	"strconv"
 	"strings"
@@ -417,13 +418,12 @@
 		}
 	}
 
+	if slices.Contains(build.Default.ReleaseTags, name) {
+		return true
+	}
+
 	if strings.HasPrefix(name, "goexperiment.") {
-		for _, tag := range build.Default.ToolTags {
-			if tag == name {
-				return true
-			}
-		}
-		return false
+		return slices.Contains(build.Default.ToolTags, name)
 	}
 
 	if name == "cgo" && ctxt.cgoEnabled {
@@ -477,16 +477,20 @@
 	}
 	src := string(srcBytes)
 
-	// Execution recipe stops at first blank line.
-	action, _, ok := strings.Cut(src, "\n\n")
-	if !ok {
-		t.Fatalf("double newline ending execution recipe not found in GOROOT/test/%s", t.goFileName())
+	// Execution recipe is contained in a comment in
+	// the first non-empty line that is not a build constraint.
+	var action string
+	for actionSrc := src; action == "" && actionSrc != ""; {
+		var line string
+		line, actionSrc, _ = strings.Cut(actionSrc, "\n")
+		if constraint.IsGoBuild(line) || constraint.IsPlusBuild(line) {
+			continue
+		}
+		action = strings.TrimSpace(strings.TrimPrefix(line, "//"))
 	}
-	if firstLine, rest, ok := strings.Cut(action, "\n"); ok && strings.Contains(firstLine, "+build") {
-		// skip first line
-		action = rest
+	if action == "" {
+		t.Fatalf("execution recipe not found in GOROOT/test/%s", t.goFileName())
 	}
-	action = strings.TrimPrefix(action, "//")
 
 	// Check for build constraints only up to the actual code.
 	header, _, ok := strings.Cut(src, "\npackage")
@@ -1454,7 +1458,7 @@
 	archVariants = map[string][]string{
 		"386":     {"GO386", "sse2", "softfloat"},
 		"amd64":   {"GOAMD64", "v1", "v2", "v3", "v4"},
-		"arm":     {"GOARM", "5", "6", "7"},
+		"arm":     {"GOARM", "5", "6", "7", "7,softfloat"},
 		"arm64":   {},
 		"loong64": {},
 		"mips":    {"GOMIPS", "hardfloat", "softfloat"},
@@ -1747,6 +1751,9 @@
 
 	// Test that (!a OR !b) matches anything.
 	assert(shouldTest("// +build !windows !plan9", "windows", "amd64"))
+
+	// Test that //go:build tag match.
+	assert(shouldTest("//go:build go1.4", "linux", "amd64"))
 }
 
 // overlayDir makes a minimal-overhead copy of srcRoot in which new files may be added.
diff --git a/src/cmd/internal/traceviewer/format.go b/src/cmd/internal/traceviewer/format.go
deleted file mode 100644
index 3636c10..0000000
--- a/src/cmd/internal/traceviewer/format.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package traceviewer provides definitions of the JSON data structures
-// used by the Chrome trace viewer.
-//
-// The official description of the format is in this file:
-// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
-package traceviewer
-
-type Data struct {
-	Events   []*Event         `json:"traceEvents"`
-	Frames   map[string]Frame `json:"stackFrames"`
-	TimeUnit string           `json:"displayTimeUnit"`
-}
-
-type Event struct {
-	Name      string  `json:"name,omitempty"`
-	Phase     string  `json:"ph"`
-	Scope     string  `json:"s,omitempty"`
-	Time      float64 `json:"ts"`
-	Dur       float64 `json:"dur,omitempty"`
-	PID       uint64  `json:"pid"`
-	TID       uint64  `json:"tid"`
-	ID        uint64  `json:"id,omitempty"`
-	BindPoint string  `json:"bp,omitempty"`
-	Stack     int     `json:"sf,omitempty"`
-	EndStack  int     `json:"esf,omitempty"`
-	Arg       any     `json:"args,omitempty"`
-	Cname     string  `json:"cname,omitempty"`
-	Category  string  `json:"cat,omitempty"`
-}
-
-type Frame struct {
-	Name   string `json:"name"`
-	Parent int    `json:"parent,omitempty"`
-}
diff --git a/src/cmd/link/doc.go b/src/cmd/link/doc.go
index ce0166f..b0f2700 100644
--- a/src/cmd/link/doc.go
+++ b/src/cmd/link/doc.go
@@ -18,6 +18,8 @@
 	-B note
 		Add an ELF_NT_GNU_BUILD_ID note when using ELF.
 		The value should start with 0x and be an even number of hex digits.
+		Alternatively, you can pass "gobuildid" in order to derive the
+		GNU build ID from the Go build ID.
 	-E entry
 		Set entry symbol name.
 	-H type
@@ -41,10 +43,10 @@
 		or initialized to a constant string expression. -X will not work if the initializer makes
 		a function call or refers to other variables.
 		Note that before Go 1.5 this option took two separate arguments.
-	-a
-		Disassemble output.
 	-asan
 		Link with C/C++ address sanitizer support.
+	-aslr
+		Enable ASLR for buildmode=c-shared on windows (default true).
 	-buildid id
 		Record id as Go toolchain build id.
 	-buildmode mode
@@ -62,8 +64,6 @@
 		The dynamic header is on by default, even without any
 		references to dynamic libraries, because many common
 		system tools now assume the presence of the header.
-	-debugtramp int
-		Debug trampolines.
 	-dumpdep
 		Dump symbol dependency graph.
 	-extar ar
@@ -102,8 +102,6 @@
 		Set runtime.MemProfileRate to rate.
 	-msan
 		Link with C/C++ memory sanitizer support.
-	-n
-		Dump symbol table.
 	-o file
 		Write output to file (default a.out, or a.out.exe on Windows).
 	-pluginpath path
@@ -114,13 +112,9 @@
 		Link with race detection libraries.
 	-s
 		Omit the symbol table and debug information.
-	-shared
-		Generated shared object (implies -linkmode external; experimental).
 	-tmpdir dir
 		Write temporary files to dir.
 		Temporary files are only used in external linking mode.
-	-u
-		Reject unsafe packages.
 	-v
 		Print trace of linker operations.
 	-w
diff --git a/src/cmd/link/elf_test.go b/src/cmd/link/elf_test.go
index d662145..5dcef1c 100644
--- a/src/cmd/link/elf_test.go
+++ b/src/cmd/link/elf_test.go
@@ -7,6 +7,10 @@
 package main
 
 import (
+	"bytes"
+	"cmd/internal/buildid"
+	"cmd/internal/notsha256"
+	"cmd/link/internal/ld"
 	"debug/elf"
 	"fmt"
 	"internal/platform"
@@ -199,6 +203,39 @@
 	}
 }
 
+func TestGNUBuildIDDerivedFromGoBuildID(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+
+	t.Parallel()
+
+	goFile := filepath.Join(t.TempDir(), "notes.go")
+	if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil {
+		t.Fatal(err)
+	}
+	outFile := filepath.Join(t.TempDir(), "notes.exe")
+	goTool := testenv.GoToolPath(t)
+
+	cmd := testenv.Command(t, goTool, "build", "-o", outFile, "-ldflags", "-buildid 0x1234 -B gobuildid", goFile)
+	cmd.Dir = t.TempDir()
+
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Logf("%s", out)
+		t.Fatal(err)
+	}
+
+	expectedGoBuildID := notsha256.Sum256([]byte("0x1234"))
+
+	gnuBuildID, err := buildid.ReadELFNote(outFile, string(ld.ELF_NOTE_BUILDINFO_NAME), ld.ELF_NOTE_BUILDINFO_TAG)
+	if err != nil || gnuBuildID == nil {
+		t.Fatalf("can't read GNU build ID")
+	}
+
+	if !bytes.Equal(gnuBuildID, expectedGoBuildID[:20]) {
+		t.Fatalf("build id not matching")
+	}
+}
+
 func TestMergeNoteSections(t *testing.T) {
 	testenv.MustHaveGoBuild(t)
 	expected := 1
@@ -498,3 +535,28 @@
 		}
 	}
 }
+
+func TestFlagR(t *testing.T) {
+	// Test that using the -R flag to specify a (large) alignment generates
+	// a working binary.
+	// (Test only on ELF for now. The alignment allowed differs from platform
+	// to platform.)
+	testenv.MustHaveGoBuild(t)
+	t.Parallel()
+	tmpdir := t.TempDir()
+	src := filepath.Join(tmpdir, "x.go")
+	if err := os.WriteFile(src, []byte(goSource), 0444); err != nil {
+		t.Fatal(err)
+	}
+	exe := filepath.Join(tmpdir, "x.exe")
+
+	cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-R=0x100000", "-o", exe, src)
+	if out, err := cmd.CombinedOutput(); err != nil {
+		t.Fatalf("build failed: %v, output:\n%s", err, out)
+	}
+
+	cmd = testenv.Command(t, exe)
+	if out, err := cmd.CombinedOutput(); err != nil {
+		t.Errorf("executable failed to run: %v\n%s", err, out)
+	}
+}
diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go
index 4d95f61..f86d224 100644
--- a/src/cmd/link/internal/amd64/asm.go
+++ b/src/cmd/link/internal/amd64/asm.go
@@ -245,8 +245,7 @@
 	r = relocs.At(rIdx)
 
 	switch r.Type() {
-	case objabi.R_CALL,
-		objabi.R_PCREL:
+	case objabi.R_CALL:
 		if targType != sym.SDYNIMPORT {
 			// nothing to do, the relocation will be laid out in reloc
 			return true
@@ -263,6 +262,30 @@
 		su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ)))
 		return true
 
+	case objabi.R_PCREL:
+		if targType == sym.SDYNIMPORT && ldr.SymType(s) == sym.STEXT && target.IsDarwin() {
+			// Loading the address of a dynamic symbol. Rewrite to use GOT.
+			// turn LEAQ symbol address to MOVQ of GOT entry
+			if r.Add() != 0 {
+				ldr.Errorf(s, "unexpected nonzero addend for dynamic symbol %s", ldr.SymName(targ))
+				return false
+			}
+			su := ldr.MakeSymbolUpdater(s)
+			if r.Off() >= 2 && su.Data()[r.Off()-2] == 0x8d {
+				su.MakeWritable()
+				su.Data()[r.Off()-2] = 0x8b
+				if target.IsInternal() {
+					ld.AddGotSym(target, ldr, syms, targ, 0)
+					su.SetRelocSym(rIdx, syms.GOT)
+					su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ)))
+				} else {
+					su.SetRelocType(rIdx, objabi.R_GOTPCREL)
+				}
+				return true
+			}
+			ldr.Errorf(s, "unexpected R_PCREL reloc for dynamic symbol %s: not preceded by LEAQ instruction", ldr.SymName(targ))
+		}
+
 	case objabi.R_ADDR:
 		if ldr.SymType(s) == sym.STEXT && target.IsElf() {
 			su := ldr.MakeSymbolUpdater(s)
@@ -446,7 +469,7 @@
 	rs := r.Xsym
 	rt := r.Type
 
-	if rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL || ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
+	if !ldr.SymType(s).IsDWARF() {
 		if ldr.SymDynid(rs) < 0 {
 			ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
 			return false
@@ -554,7 +577,7 @@
 	return -1
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() == 0 {
 		// pushq got+8(IP)
 		plt.AddUint8(0xff)
diff --git a/src/cmd/link/internal/amd64/obj.go b/src/cmd/link/internal/amd64/obj.go
index b99cdbc..3a6141b 100644
--- a/src/cmd/link/internal/amd64/obj.go
+++ b/src/cmd/link/internal/amd64/obj.go
@@ -86,13 +86,12 @@
 
 	case objabi.Hplan9: /* plan 9 */
 		ld.HEADR = 32 + 8
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x200000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x200000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x200000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hdarwin: /* apple MACH */
 		ld.HEADR = ld.INITIAL_MACHO_HEADR
@@ -100,7 +99,7 @@
 			*ld.FlagRound = 4096
 		}
 		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x1000000 + int64(ld.HEADR)
+			*ld.FlagTextAddr = ld.Rnd(0x1000000, *ld.FlagRound) + int64(ld.HEADR)
 		}
 
 	case objabi.Hlinux, /* elf64 executable */
@@ -112,12 +111,12 @@
 		ld.Elfinit(ctxt)
 
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = (1 << 22) + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 4096
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(1<<22, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hwindows: /* PE executable */
 		// ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
diff --git a/src/cmd/link/internal/arm/asm.go b/src/cmd/link/internal/arm/asm.go
index b432da8..0443e49 100644
--- a/src/cmd/link/internal/arm/asm.go
+++ b/src/cmd/link/internal/arm/asm.go
@@ -304,7 +304,7 @@
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() == 0 {
 		// str lr, [sp, #-4]!
 		plt.AddUint32(ctxt.Arch, 0xe52de004)
diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go
index 52fd5b6..3a1830c 100644
--- a/src/cmd/link/internal/arm/obj.go
+++ b/src/cmd/link/internal/arm/obj.go
@@ -84,13 +84,12 @@
 
 	case objabi.Hplan9: /* plan 9 */
 		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4128
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 4096
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hlinux, /* arm elf */
 		objabi.Hfreebsd,
@@ -100,12 +99,12 @@
 		// with dynamic linking
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hwindows: /* PE executable */
 		// ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go
index 69d430b..6645795 100644
--- a/src/cmd/link/internal/arm64/asm.go
+++ b/src/cmd/link/internal/arm64/asm.go
@@ -284,9 +284,7 @@
 	r = relocs.At(rIdx)
 
 	switch r.Type() {
-	case objabi.R_CALL,
-		objabi.R_PCREL,
-		objabi.R_CALLARM64:
+	case objabi.R_CALLARM64:
 		if targType != sym.SDYNIMPORT {
 			// nothing to do, the relocation will be laid out in reloc
 			return true
@@ -306,6 +304,40 @@
 		su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ)))
 		return true
 
+	case objabi.R_ADDRARM64:
+		if targType == sym.SDYNIMPORT && ldr.SymType(s) == sym.STEXT && target.IsDarwin() {
+			// Loading the address of a dynamic symbol. Rewrite to use GOT.
+			// turn MOVD $sym (adrp+add) into MOVD sym@GOT (adrp+ldr)
+			if r.Add() != 0 {
+				ldr.Errorf(s, "unexpected nonzero addend for dynamic symbol %s", ldr.SymName(targ))
+				return false
+			}
+			su := ldr.MakeSymbolUpdater(s)
+			data := ldr.Data(s)
+			off := r.Off()
+			if int(off+8) > len(data) {
+				ldr.Errorf(s, "unexpected R_ADDRARM64 reloc for dynamic symbol %s", ldr.SymName(targ))
+				return false
+			}
+			o := target.Arch.ByteOrder.Uint32(data[off+4:])
+			if o>>24 == 0x91 { // add
+				// rewrite to ldr
+				o = (0xf9 << 24) | 1<<22 | (o & (1<<22 - 1))
+				su.MakeWritable()
+				su.SetUint32(target.Arch, int64(off+4), o)
+				if target.IsInternal() {
+					ld.AddGotSym(target, ldr, syms, targ, 0)
+					su.SetRelocSym(rIdx, syms.GOT)
+					su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ)))
+					su.SetRelocType(rIdx, objabi.R_ARM64_PCREL_LDST64)
+				} else {
+					su.SetRelocType(rIdx, objabi.R_ARM64_GOTPCREL)
+				}
+				return true
+			}
+			ldr.Errorf(s, "unexpected R_ADDRARM64 reloc for dynamic symbol %s", ldr.SymName(targ))
+		}
+
 	case objabi.R_ADDR:
 		if ldr.SymType(s) == sym.STEXT && target.IsElf() {
 			// The code is asking for the address of an external
@@ -525,9 +557,11 @@
 	siz := r.Size
 	xadd := r.Xadd
 
-	if xadd != signext24(xadd) {
+	if xadd != signext24(xadd) && rt != objabi.R_ADDR {
 		// If the relocation target would overflow the addend, then target
 		// a linker-manufactured label symbol with a smaller addend instead.
+		// R_ADDR has full-width addend encoded in data content, so it doesn't
+		// use a label symbol.
 		label := ldr.Lookup(offsetLabelName(ldr, rs, xadd/machoRelocLimit*machoRelocLimit), ldr.SymVersion(rs))
 		if label != 0 {
 			xadd = ldr.SymValue(rs) + xadd - ldr.SymValue(label)
@@ -545,11 +579,7 @@
 		}
 	}
 
-	if rt == objabi.R_CALLARM64 ||
-		rt == objabi.R_ARM64_PCREL_LDST8 || rt == objabi.R_ARM64_PCREL_LDST16 ||
-		rt == objabi.R_ARM64_PCREL_LDST32 || rt == objabi.R_ARM64_PCREL_LDST64 ||
-		rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL ||
-		ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
+	if !ldr.SymType(s).IsDWARF() {
 		if ldr.SymDynid(rs) < 0 {
 			ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
 			return false
@@ -1061,7 +1091,7 @@
 	return loader.ExtReloc{}, false
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() == 0 {
 		// stp     x16, x30, [sp, #-16]!
 		// identifying information
diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go
index 34b693e..3d35815 100644
--- a/src/cmd/link/internal/arm64/obj.go
+++ b/src/cmd/link/internal/arm64/obj.go
@@ -86,13 +86,12 @@
 
 	case objabi.Hplan9: /* plan 9 */
 		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 4096
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hlinux, /* arm64 elf */
 		objabi.Hfreebsd,
@@ -100,21 +99,21 @@
 		objabi.Hopenbsd:
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hdarwin: /* apple MACH */
 		ld.HEADR = ld.INITIAL_MACHO_HEADR
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 1<<32 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 16384 // 16K page alignment
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(1<<32, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hwindows: /* PE executable */
 		// ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
diff --git a/src/cmd/link/internal/dwtest/dwtest.go b/src/cmd/link/internal/dwtest/dwtest.go
index c68edf4..3fb02ee 100644
--- a/src/cmd/link/internal/dwtest/dwtest.go
+++ b/src/cmd/link/internal/dwtest/dwtest.go
@@ -90,7 +90,7 @@
 	fmt.Printf("0x%x: %v\n", idx, entry.Tag)
 	for _, f := range entry.Field {
 		indent(ilevel)
-		fmt.Printf("at=%v val=0x%x\n", f.Attr, f.Val)
+		fmt.Printf("at=%v val=%v\n", f.Attr, f.Val)
 	}
 	if dumpKids {
 		ksl := ex.kids[idx]
diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go
index c0484d6..3a186b4 100644
--- a/src/cmd/link/internal/ld/config.go
+++ b/src/cmd/link/internal/ld/config.go
@@ -58,8 +58,8 @@
 	return nil
 }
 
-func (mode *BuildMode) String() string {
-	switch *mode {
+func (mode BuildMode) String() string {
+	switch mode {
 	case BuildModeUnset:
 		return "" // avoid showing a default in usage message
 	case BuildModeExe:
@@ -75,7 +75,7 @@
 	case BuildModePlugin:
 		return "plugin"
 	}
-	return fmt.Sprintf("BuildMode(%d)", uint8(*mode))
+	return fmt.Sprintf("BuildMode(%d)", uint8(mode))
 }
 
 // LinkMode indicates whether an external linker is used for the final link.
@@ -217,7 +217,7 @@
 		}
 	case LinkExternal:
 		switch {
-		case buildcfg.GOARCH == "ppc64" && buildcfg.GOOS != "aix":
+		case buildcfg.GOARCH == "ppc64" && buildcfg.GOOS == "linux":
 			Exitf("external linking not supported for %s/ppc64", buildcfg.GOOS)
 		}
 	}
diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
index 0550f07..f4ea840 100644
--- a/src/cmd/link/internal/ld/data.go
+++ b/src/cmd/link/internal/ld/data.go
@@ -56,10 +56,11 @@
 func isRuntimeDepPkg(pkg string) bool {
 	switch pkg {
 	case "runtime",
-		"sync/atomic",      // runtime may call to sync/atomic, due to go:linkname
-		"internal/abi",     // used by reflectcall (and maybe more)
-		"internal/bytealg", // for IndexByte
-		"internal/cpu":     // for cpu features
+		"sync/atomic",          // runtime may call to sync/atomic, due to go:linkname
+		"internal/abi",         // used by reflectcall (and maybe more)
+		"internal/bytealg",     // for IndexByte
+		"internal/chacha8rand", // for rand
+		"internal/cpu":         // for cpu features
 		return true
 	}
 	return strings.HasPrefix(pkg, "runtime/internal/") && !strings.HasSuffix(pkg, "_test")
@@ -84,14 +85,15 @@
 		}
 	}
 
-	if ctxt.IsARM() {
+	switch {
+	case ctxt.IsARM():
 		return n * 20 // Trampolines in ARM range from 3 to 5 instructions.
-	}
-	if ctxt.IsPPC64() {
-		return n * 16 // Trampolines in PPC64 are 4 instructions.
-	}
-	if ctxt.IsARM64() {
+	case ctxt.IsARM64():
 		return n * 12 // Trampolines in ARM64 are 3 instructions.
+	case ctxt.IsPPC64():
+		return n * 16 // Trampolines in PPC64 are 4 instructions.
+	case ctxt.IsRISCV64():
+		return n * 8 // Trampolines in RISCV64 are 2 instructions.
 	}
 	panic("unreachable")
 }
@@ -118,18 +120,21 @@
 			continue // something is wrong. skip it here and we'll emit a better error later
 		}
 
-		// RISC-V is only able to reach +/-1MiB via a JAL instruction,
-		// which we can readily exceed in the same package. As such, we
-		// need to generate trampolines when the address is unknown.
-		if ldr.SymValue(rs) == 0 && !ctxt.Target.IsRISCV64() && ldr.SymType(rs) != sym.SDYNIMPORT && ldr.SymType(rs) != sym.SUNDEFEXT {
+		if ldr.SymValue(rs) == 0 && ldr.SymType(rs) != sym.SDYNIMPORT && ldr.SymType(rs) != sym.SUNDEFEXT {
+			// Symbols in the same package are laid out together.
+			// Except that if SymPkg(s) == "", it is a host object symbol
+			// which may call an external symbol via PLT.
 			if ldr.SymPkg(s) != "" && ldr.SymPkg(rs) == ldr.SymPkg(s) {
-				// Symbols in the same package are laid out together.
-				// Except that if SymPkg(s) == "", it is a host object symbol
-				// which may call an external symbol via PLT.
-				continue
+				// RISC-V is only able to reach +/-1MiB via a JAL instruction.
+				// We need to generate a trampoline when an address is
+				// currently unknown.
+				if !ctxt.Target.IsRISCV64() {
+					continue
+				}
 			}
+			// Runtime packages are laid out together.
 			if isRuntimeDepPkg(ldr.SymPkg(s)) && isRuntimeDepPkg(ldr.SymPkg(rs)) {
-				continue // runtime packages are laid out together
+				continue
 			}
 		}
 		thearch.Trampoline(ctxt, ldr, ri, rs, s)
@@ -368,9 +373,11 @@
 						o = 0
 					}
 				} else if target.IsDarwin() {
-					if ldr.SymType(rs) != sym.SHOSTOBJ && ldr.SymType(s) != sym.SINITARR {
-						// ld-prime drops the offset in data for SINITARR. We need to use
-						// symbol-targeted relocation. See also machoreloc1.
+					if ldr.SymType(s).IsDWARF() {
+						// We generally use symbol-targeted relocations.
+						// DWARF tools seem to only handle section-targeted relocations,
+						// so generate section-targeted relocations in DWARF sections.
+						// See also machoreloc1.
 						o += ldr.SymValue(rs)
 					}
 				} else if target.IsWindows() {
@@ -580,19 +587,17 @@
 		case 1:
 			P[off] = byte(int8(o))
 		case 2:
-			if o != int64(int16(o)) {
-				st.err.Errorf(s, "relocation address for %s is too big: %#x", ldr.SymName(rs), o)
+			if (rt == objabi.R_PCREL || rt == objabi.R_CALL) && o != int64(int16(o)) {
+				st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o)
+			} else if o != int64(int16(o)) && o != int64(uint16(o)) {
+				st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o))
 			}
 			target.Arch.ByteOrder.PutUint16(P[off:], uint16(o))
 		case 4:
-			if rt == objabi.R_PCREL || rt == objabi.R_CALL {
-				if o != int64(int32(o)) {
-					st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o)
-				}
-			} else {
-				if o != int64(int32(o)) && o != int64(uint32(o)) {
-					st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o))
-				}
+			if (rt == objabi.R_PCREL || rt == objabi.R_CALL) && o != int64(int32(o)) {
+				st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o)
+			} else if o != int64(int32(o)) && o != int64(uint32(o)) {
+				st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o))
 			}
 			target.Arch.ByteOrder.PutUint32(P[off:], uint32(o))
 		case 8:
@@ -1073,7 +1078,7 @@
 			break
 		}
 		if val < addr {
-			ldr.Errorf(s, "phase error: addr=%#x but sym=%#x type=%v sect=%v", addr, val, ldr.SymType(s), ldr.SymSect(s).Name)
+			ldr.Errorf(s, "phase error: addr=%#x but val=%#x sym=%s type=%v sect=%v sect.addr=%#x", addr, val, ldr.SymName(s), ldr.SymType(s), ldr.SymSect(s).Name, ldr.SymSect(s).Vaddr)
 			errorexit()
 		}
 		if addr < val {
@@ -1157,11 +1162,11 @@
 }
 
 func pdatablk(ctxt *Link, out *OutBuf, addr int64, size int64) {
-	writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, []loader.Sym{sehp.pdata}, addr, size, zeros[:])
+	writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, sehp.pdata, addr, size, zeros[:])
 }
 
 func xdatablk(ctxt *Link, out *OutBuf, addr int64, size int64) {
-	writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, []loader.Sym{sehp.xdata}, addr, size, zeros[:])
+	writeBlocks(ctxt, out, ctxt.outSem, ctxt.loader, sehp.xdata, addr, size, zeros[:])
 }
 
 var covCounterDataStartOff, covCounterDataLen uint64
@@ -1424,6 +1429,20 @@
 		edata.SetType(sym.SXCOFFTOC)
 	}
 
+	noptrbss := ldr.CreateSymForUpdate("runtime.noptrbss", 0)
+	noptrbss.SetSize(8)
+	ldr.SetAttrSpecial(noptrbss.Sym(), false)
+
+	enoptrbss := ldr.CreateSymForUpdate("runtime.enoptrbss", 0)
+	ldr.SetAttrSpecial(enoptrbss.Sym(), false)
+
+	noptrdata := ldr.CreateSymForUpdate("runtime.noptrdata", 0)
+	noptrdata.SetSize(8)
+	ldr.SetAttrSpecial(noptrdata.Sym(), false)
+
+	enoptrdata := ldr.CreateSymForUpdate("runtime.enoptrdata", 0)
+	ldr.SetAttrSpecial(enoptrdata.Sym(), false)
+
 	types := ldr.CreateSymForUpdate("runtime.types", 0)
 	types.SetType(sym.STYPE)
 	types.SetSize(8)
@@ -2181,14 +2200,14 @@
 // allocateSEHSections allocate a sym.Section object for SEH
 // symbols, and assigns symbols to sections.
 func (state *dodataState) allocateSEHSections(ctxt *Link) {
-	if sehp.pdata > 0 {
-		sect := state.allocateDataSectionForSym(&Segpdata, sehp.pdata, 04)
-		state.assignDsymsToSection(sect, []loader.Sym{sehp.pdata}, sym.SRODATA, aligndatsize)
+	if len(sehp.pdata) > 0 {
+		sect := state.allocateNamedDataSection(&Segpdata, ".pdata", []sym.SymKind{}, 04)
+		state.assignDsymsToSection(sect, sehp.pdata, sym.SRODATA, aligndatsize)
 		state.checkdatsize(sym.SSEHSECT)
 	}
-	if sehp.xdata > 0 {
+	if len(sehp.xdata) > 0 {
 		sect := state.allocateNamedDataSection(&Segxdata, ".xdata", []sym.SymKind{}, 04)
-		state.assignDsymsToSection(sect, []loader.Sym{sehp.xdata}, sym.SRODATA, aligndatsize)
+		state.assignDsymsToSection(sect, sehp.xdata, sym.SRODATA, aligndatsize)
 		state.checkdatsize(sym.SSEHSECT)
 	}
 }
@@ -2232,10 +2251,12 @@
 		// end of their section.
 		if (ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin) || (ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal) {
 			switch ldr.SymName(s) {
-			case "runtime.text", "runtime.bss", "runtime.data", "runtime.types", "runtime.rodata":
+			case "runtime.text", "runtime.bss", "runtime.data", "runtime.types", "runtime.rodata",
+				"runtime.noptrdata", "runtime.noptrbss":
 				head = s
 				continue
-			case "runtime.etext", "runtime.ebss", "runtime.edata", "runtime.etypes", "runtime.erodata":
+			case "runtime.etext", "runtime.ebss", "runtime.edata", "runtime.etypes", "runtime.erodata",
+				"runtime.enoptrdata", "runtime.enoptrbss":
 				tail = s
 				continue
 			}
@@ -2419,8 +2440,8 @@
 		limit = 1
 	}
 
-	// First pass: assign addresses assuming the program is small and
-	// don't generate trampolines.
+	// First pass: assign addresses assuming the program is small and will
+	// not require trampoline generation.
 	big := false
 	for _, s := range ctxt.Textp {
 		sect, n, va = assignAddress(ctxt, sect, n, s, va, false, big)
@@ -2435,21 +2456,45 @@
 	if big {
 		// reset addresses
 		for _, s := range ctxt.Textp {
-			if ldr.OuterSym(s) != 0 || s == text {
-				continue
-			}
-			oldv := ldr.SymValue(s)
-			for sub := s; sub != 0; sub = ldr.SubSym(sub) {
-				ldr.SetSymValue(sub, ldr.SymValue(sub)-oldv)
+			if s != text {
+				resetAddress(ctxt, s)
 			}
 		}
 		va = start
 
 		ntramps := 0
-		for _, s := range ctxt.Textp {
+		var curPkg string
+		for i, s := range ctxt.Textp {
+			// When we find the first symbol in a package, perform a
+			// single iteration that assigns temporary addresses to all
+			// of the text in the same package, using the maximum possible
+			// number of trampolines. This allows for better decisions to
+			// be made regarding reachability and the need for trampolines.
+			if symPkg := ldr.SymPkg(s); symPkg != "" && curPkg != symPkg {
+				curPkg = symPkg
+				vaTmp := va
+				for j := i; j < len(ctxt.Textp); j++ {
+					curSym := ctxt.Textp[j]
+					if symPkg := ldr.SymPkg(curSym); symPkg == "" || curPkg != symPkg {
+						break
+					}
+					// We do not pass big to assignAddress here, as this
+					// can result in side effects such as section splitting.
+					sect, n, vaTmp = assignAddress(ctxt, sect, n, curSym, vaTmp, false, false)
+					vaTmp += maxSizeTrampolines(ctxt, ldr, curSym, false)
+				}
+			}
+
+			// Reset address for current symbol.
+			if s != text {
+				resetAddress(ctxt, s)
+			}
+
+			// Assign actual address for current symbol.
 			sect, n, va = assignAddress(ctxt, sect, n, s, va, false, big)
 
-			trampoline(ctxt, s) // resolve jumps, may add trampolines if jump too far
+			// Resolve jumps, adding trampolines if they are needed.
+			trampoline(ctxt, s)
 
 			// lay down trampolines after each function
 			for ; ntramps < len(ctxt.tramps); ntramps++ {
@@ -2597,6 +2642,17 @@
 	return sect, n, va
 }
 
+func resetAddress(ctxt *Link, s loader.Sym) {
+	ldr := ctxt.loader
+	if ldr.OuterSym(s) != 0 {
+		return
+	}
+	oldv := ldr.SymValue(s)
+	for sub := s; sub != 0; sub = ldr.SubSym(sub) {
+		ldr.SetSymValue(sub, ldr.SymValue(sub)-oldv)
+	}
+}
+
 // Return whether we may need to split text sections.
 //
 // On PPC64x, when external linking, a text section should not be
@@ -2658,7 +2714,7 @@
 		//
 		// Ideally the last page of the text segment would not be
 		// writable even for this short period.
-		va = uint64(Rnd(int64(va), int64(*FlagRound)))
+		va = uint64(Rnd(int64(va), *FlagRound))
 
 		order = append(order, &Segrodata)
 		Segrodata.Rwx = 04
@@ -2674,7 +2730,7 @@
 	if len(Segrelrodata.Sections) > 0 {
 		// align to page boundary so as not to mix
 		// rodata, rel-ro data, and executable text.
-		va = uint64(Rnd(int64(va), int64(*FlagRound)))
+		va = uint64(Rnd(int64(va), *FlagRound))
 		if ctxt.HeadType == objabi.Haix {
 			// Relro data are inside data segment on AIX.
 			va += uint64(XCOFFDATABASE) - uint64(XCOFFTEXTBASE)
@@ -2692,7 +2748,7 @@
 		Segrelrodata.Length = va - Segrelrodata.Vaddr
 	}
 
-	va = uint64(Rnd(int64(va), int64(*FlagRound)))
+	va = uint64(Rnd(int64(va), *FlagRound))
 	if ctxt.HeadType == objabi.Haix && len(Segrelrodata.Sections) == 0 {
 		// Data sections are moved to an unreachable segment
 		// to ensure that they are position-independent.
@@ -2737,7 +2793,7 @@
 	Segdata.Filelen = bss.Vaddr - Segdata.Vaddr
 
 	if len(Segpdata.Sections) > 0 {
-		va = uint64(Rnd(int64(va), int64(*FlagRound)))
+		va = uint64(Rnd(int64(va), *FlagRound))
 		order = append(order, &Segpdata)
 		Segpdata.Rwx = 04
 		Segpdata.Vaddr = va
@@ -2752,7 +2808,7 @@
 	}
 
 	if len(Segxdata.Sections) > 0 {
-		va = uint64(Rnd(int64(va), int64(*FlagRound)))
+		va = uint64(Rnd(int64(va), *FlagRound))
 		order = append(order, &Segxdata)
 		Segxdata.Rwx = 04
 		Segxdata.Vaddr = va
@@ -2766,7 +2822,7 @@
 		Segxdata.Length = va - Segxdata.Vaddr
 	}
 
-	va = uint64(Rnd(int64(va), int64(*FlagRound)))
+	va = uint64(Rnd(int64(va), *FlagRound))
 	order = append(order, &Segdwarf)
 	Segdwarf.Rwx = 06
 	Segdwarf.Vaddr = va
@@ -2817,7 +2873,12 @@
 		}
 	}
 
-	for _, s := range []loader.Sym{sehp.pdata, sehp.xdata} {
+	for _, s := range sehp.pdata {
+		if sect := ldr.SymSect(s); sect != nil {
+			ldr.AddToSymValue(s, int64(sect.Vaddr))
+		}
+	}
+	for _, s := range sehp.xdata {
 		if sect := ldr.SymSect(s); sect != nil {
 			ldr.AddToSymValue(s, int64(sect.Vaddr))
 		}
@@ -2943,7 +3004,7 @@
 				// aligned, the following rounding
 				// should ensure that this segment's
 				// VA ≡ Fileoff mod FlagRound.
-				seg.Fileoff = uint64(Rnd(int64(prev.Fileoff+prev.Filelen), int64(*FlagRound)))
+				seg.Fileoff = uint64(Rnd(int64(prev.Fileoff+prev.Filelen), *FlagRound))
 				if seg.Vaddr%uint64(*FlagRound) != seg.Fileoff%uint64(*FlagRound) {
 					Exitf("bad segment rounding (Vaddr=%#x Fileoff=%#x FlagRound=%#x)", seg.Vaddr, seg.Fileoff, *FlagRound)
 				}
diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go
index d9e0d62..70b4a7c 100644
--- a/src/cmd/link/internal/ld/deadcode.go
+++ b/src/cmd/link/internal/ld/deadcode.go
@@ -51,6 +51,7 @@
 			s := loader.Sym(i)
 			d.mark(s, 0)
 		}
+		d.mark(d.ctxt.mainInittasks, 0)
 		return
 	}
 
@@ -123,6 +124,8 @@
 	for !d.wq.empty() {
 		symIdx := d.wq.pop()
 
+		// Methods may be called via reflection. Give up on static analysis,
+		// and mark all exported methods of all reachable types as reachable.
 		d.reflectSeen = d.reflectSeen || d.ldr.IsReflectMethod(symIdx)
 
 		isgotype := d.ldr.IsGoType(symIdx)
@@ -228,7 +231,7 @@
 				}
 				d.ifaceMethod[m] = true
 				continue
-			case objabi.R_USEGENERICIFACEMETHOD:
+			case objabi.R_USENAMEDMETHOD:
 				name := d.decodeGenericIfaceMethod(d.ldr, r.Sym())
 				if d.ctxt.Debugvlog > 1 {
 					d.ctxt.Logf("reached generic iface method: %s\n", name)
@@ -407,13 +410,20 @@
 // against the interface method signatures, if it matches it is marked
 // as reachable. This is extremely conservative, but easy and correct.
 //
-// The third case is handled by looking to see if any of:
-//   - reflect.Value.Method or MethodByName is reachable
-//   - reflect.Type.Method or MethodByName is called (through the
-//     REFLECTMETHOD attribute marked by the compiler).
+// The third case is handled by looking for functions that compiler flagged
+// as REFLECTMETHOD. REFLECTMETHOD on a function F means that F does a method
+// lookup with reflection, but the compiler was not able to statically determine
+// the method name.
 //
-// If any of these happen, all bets are off and all exported methods
-// of reachable types are marked reachable.
+// All functions that call reflect.Value.Method or reflect.Type.Method are REFLECTMETHODs.
+// Functions that call reflect.Value.MethodByName or reflect.Type.MethodByName with
+// a non-constant argument are REFLECTMETHODs, too. If we find a REFLECTMETHOD,
+// we give up on static analysis, and mark all exported methods of all reachable
+// types as reachable.
+//
+// If the argument to MethodByName is a compile-time constant, the compiler
+// emits a relocation with the method name. Matching methods are kept in all
+// reachable types.
 //
 // Any unreached text symbols are removed from ctxt.Textp.
 func deadcode(ctxt *Link) {
@@ -422,9 +432,6 @@
 	d.init()
 	d.flood()
 
-	methSym := ldr.Lookup("reflect.Value.Method", abiInternalVer)
-	methByNameSym := ldr.Lookup("reflect.Value.MethodByName", abiInternalVer)
-
 	if ctxt.DynlinkingGo() {
 		// Exported methods may satisfy interfaces we don't know
 		// about yet when dynamically linking.
@@ -432,11 +439,6 @@
 	}
 
 	for {
-		// Methods might be called via reflection. Give up on
-		// static analysis, mark all exported methods of
-		// all reachable types as reachable.
-		d.reflectSeen = d.reflectSeen || (methSym != 0 && ldr.AttrReachable(methSym)) || (methByNameSym != 0 && ldr.AttrReachable(methByNameSym))
-
 		// Mark all methods that could satisfy a discovered
 		// interface as reachable. We recheck old marked interfaces
 		// as new types (with new methods) may have been discovered
diff --git a/src/cmd/link/internal/ld/deadcode_test.go b/src/cmd/link/internal/ld/deadcode_test.go
index 633a0d0..6962a4e 100644
--- a/src/cmd/link/internal/ld/deadcode_test.go
+++ b/src/cmd/link/internal/ld/deadcode_test.go
@@ -27,6 +27,9 @@
 		{"ifacemethod2", []string{"main.T.M"}, nil},
 		{"ifacemethod3", []string{"main.S.M"}, nil},
 		{"ifacemethod4", nil, []string{"main.T.M"}},
+		{"ifacemethod5", []string{"main.S.M"}, nil},
+		{"ifacemethod6", []string{"main.S.M"}, []string{"main.S.N"}},
+		{"structof_funcof", []string{"main.S.M"}, []string{"main.S.N"}},
 		{"globalmap", []string{"main.small", "main.effect"},
 			[]string{"main.large"}},
 	}
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index a601fe5..17f2803 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -73,15 +73,14 @@
 // DwAttr objects contain references to symbols via this type.
 type dwSym loader.Sym
 
-func (s dwSym) Length(dwarfContext interface{}) int64 {
-	l := dwarfContext.(dwctxt).ldr
-	return int64(len(l.Data(loader.Sym(s))))
-}
-
 func (c dwctxt) PtrSize() int {
 	return c.arch.PtrSize
 }
 
+func (c dwctxt) Size(s dwarf.Sym) int64 {
+	return int64(len(c.ldr.Data(loader.Sym(s.(dwSym)))))
+}
+
 func (c dwctxt) AddInt(s dwarf.Sym, size int, i int64) {
 	ds := loader.Sym(s.(dwSym))
 	dsu := c.ldr.MakeSymbolUpdater(ds)
@@ -154,10 +153,6 @@
 
 // At the moment these interfaces are only used in the compiler.
 
-func (c dwctxt) AddFileRef(s dwarf.Sym, f interface{}) {
-	panic("should be used only in the compiler")
-}
-
 func (c dwctxt) CurrentOffset(s dwarf.Sym) int64 {
 	panic("should be used only in the compiler")
 }
@@ -753,6 +748,7 @@
 	// pointers of slices. Link to the ones we can find.
 	gts := d.ldr.Lookup("type:"+ptrname, 0)
 	if gts != 0 && d.ldr.AttrReachable(gts) {
+		newattr(pdie, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, int64(objabi.KindPtr), 0)
 		newattr(pdie, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_GO_TYPEREF, 0, dwSym(gts))
 	}
 
@@ -1145,9 +1141,7 @@
 }
 
 func expandFile(fname string) string {
-	if strings.HasPrefix(fname, src.FileSymPrefix) {
-		fname = fname[len(src.FileSymPrefix):]
-	}
+	fname = strings.TrimPrefix(fname, src.FileSymPrefix)
 	return expandGoroot(fname)
 }
 
@@ -1517,16 +1511,6 @@
 	COMPUNITHEADERSIZE = 4 + 2 + 4 + 1
 )
 
-// appendSyms appends the syms from 'src' into 'syms' and returns the
-// result. This can go away once we do away with sym.LoaderSym
-// entirely.
-func appendSyms(syms []loader.Sym, src []sym.LoaderSym) []loader.Sym {
-	for _, s := range src {
-		syms = append(syms, loader.Sym(s))
-	}
-	return syms
-}
-
 func (d *dwctxt) writeUnitInfo(u *sym.CompilationUnit, abbrevsym loader.Sym, infoEpilog loader.Sym) []loader.Sym {
 	syms := []loader.Sym{}
 	if len(u.Textp) == 0 && u.DWInfo.Child == nil && len(u.VarDIEs) == 0 {
@@ -1555,12 +1539,12 @@
 	// This is an under-estimate; more will be needed for type DIEs.
 	cu := make([]loader.Sym, 0, len(u.AbsFnDIEs)+len(u.FuncDIEs))
 	cu = append(cu, s)
-	cu = appendSyms(cu, u.AbsFnDIEs)
-	cu = appendSyms(cu, u.FuncDIEs)
+	cu = append(cu, u.AbsFnDIEs...)
+	cu = append(cu, u.FuncDIEs...)
 	if u.Consts != 0 {
 		cu = append(cu, loader.Sym(u.Consts))
 	}
-	cu = appendSyms(cu, u.VarDIEs)
+	cu = append(cu, u.VarDIEs...)
 	var cusize int64
 	for _, child := range cu {
 		cusize += int64(len(d.ldr.Data(child)))
@@ -1633,9 +1617,6 @@
 	if *FlagW { // disable dwarf
 		return false
 	}
-	if *FlagS && ctxt.HeadType != objabi.Hdarwin {
-		return false
-	}
 	if ctxt.HeadType == objabi.Hplan9 || ctxt.HeadType == objabi.Hjs || ctxt.HeadType == objabi.Hwasip1 {
 		return false
 	}
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index 6ca2a84..e431427 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -1985,17 +1985,7 @@
 	}
 }
 
-func TestZeroSizedVariable(t *testing.T) {
-	testenv.MustHaveGoBuild(t)
-
-	mustHaveDWARF(t)
-	t.Parallel()
-
-	// This test verifies that the compiler emits DIEs for zero sized variables
-	// (for example variables of type 'struct {}').
-	// See go.dev/issues/54615.
-
-	const prog = `
+const zeroSizedVarProg = `
 package main
 
 import (
@@ -2008,10 +1998,24 @@
 }
 `
 
+func TestZeroSizedVariable(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+
+	mustHaveDWARF(t)
+	t.Parallel()
+
+	if testing.Short() {
+		t.Skip("skipping test in short mode.")
+	}
+
+	// This test verifies that the compiler emits DIEs for zero sized variables
+	// (for example variables of type 'struct {}').
+	// See go.dev/issues/54615.
+
 	for _, opt := range []string{NoOpt, DefaultOpt} {
 		opt := opt
 		t.Run(opt, func(t *testing.T) {
-			_, ex := gobuildAndExamine(t, prog, opt)
+			_, ex := gobuildAndExamine(t, zeroSizedVarProg, opt)
 
 			// Locate the main.zeroSizedVariable DIE
 			abcs := ex.Named("zeroSizedVariable")
@@ -2024,3 +2028,46 @@
 		})
 	}
 }
+
+func TestConsistentGoKindAndRuntimeType(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+
+	mustHaveDWARF(t)
+	t.Parallel()
+
+	if testing.Short() {
+		t.Skip("skipping test in short mode.")
+	}
+
+	// Ensure that if we emit a "go runtime type" attr on a type DIE,
+	// we also include the "go kind" attribute. See issue #64231.
+	_, ex := gobuildAndExamine(t, zeroSizedVarProg, DefaultOpt)
+
+	// Walk all dies.
+	typesChecked := 0
+	failures := 0
+	for _, die := range ex.DIEs() {
+		// For any type DIE with DW_AT_go_runtime_type set...
+		rtt, hasRT := die.Val(intdwarf.DW_AT_go_runtime_type).(uint64)
+		if !hasRT || rtt == 0 {
+			continue
+		}
+		typesChecked++
+		// ... we want to see a meaningful DW_AT_go_kind value.
+		if val, ok := die.Val(intdwarf.DW_AT_go_kind).(int64); !ok || val == 0 {
+			failures++
+			// dump DIEs for first 10 failures.
+			if failures <= 10 {
+				idx := ex.IdxFromOffset(die.Offset)
+				t.Logf("type DIE has DW_AT_go_runtime_type but invalid DW_AT_go_kind:\n")
+				ex.DumpEntry(idx, false, 0)
+			}
+			t.Errorf("bad type DIE at offset %d\n", die.Offset)
+		}
+	}
+	if typesChecked == 0 {
+		t.Fatalf("something went wrong, 0 types checked")
+	} else {
+		t.Logf("%d types checked\n", typesChecked)
+	}
+}
diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go
index 713f773..be9e229 100644
--- a/src/cmd/link/internal/ld/elf.go
+++ b/src/cmd/link/internal/ld/elf.go
@@ -155,7 +155,7 @@
  * marshal a 32-bit representation from the 64-bit structure.
  */
 
-var Elfstrdat []byte
+var elfstrdat, elfshstrdat []byte
 
 /*
  * Total amount of space to reserve at the start of the file
@@ -208,7 +208,7 @@
 
 	Reloc1    func(*Link, *OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int, int64) bool
 	RelocSize uint32 // size of an ELF relocation record, must match Reloc1.
-	SetupPLT  func(ctxt *Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym)
+	SetupPLT  func(ctxt *Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym)
 
 	// DynamicReadOnly can be set to true to make the .dynamic
 	// section read-only. By default it is writable.
@@ -243,7 +243,7 @@
 	switch ctxt.Arch.Family {
 	// 64-bit architectures
 	case sys.PPC64, sys.S390X:
-		if ctxt.Arch.ByteOrder == binary.BigEndian {
+		if ctxt.Arch.ByteOrder == binary.BigEndian && ctxt.HeadType != objabi.Hopenbsd {
 			ehdr.Flags = 1 /* Version 1 ABI */
 		} else {
 			ehdr.Flags = 2 /* Version 2 ABI */
@@ -806,6 +806,18 @@
 }
 
 func addbuildinfo(val string) {
+	if val == "gobuildid" {
+		buildID := *flagBuildid
+		if buildID == "" {
+			Exitf("-B gobuildid requires a Go build ID supplied via -buildid")
+		}
+
+		hashedBuildID := notsha256.Sum256([]byte(buildID))
+		buildinfo = hashedBuildID[:20]
+
+		return
+	}
+
 	if !strings.HasPrefix(val, "0x") {
 		Exitf("-B argument must start with 0x: %s", val)
 	}
@@ -1386,12 +1398,16 @@
 	ldr := ctxt.loader
 
 	/* predefine strings we need for section headers */
-	shstrtab := ldr.CreateSymForUpdate(".shstrtab", 0)
 
-	shstrtab.SetType(sym.SELFROSECT)
+	addshstr := func(s string) int {
+		off := len(elfshstrdat)
+		elfshstrdat = append(elfshstrdat, s...)
+		elfshstrdat = append(elfshstrdat, 0)
+		return off
+	}
 
 	shstrtabAddstring := func(s string) {
-		off := shstrtab.Addstring(s)
+		off := addshstr(s)
 		elfsetstring(ctxt, 0, s, int(off))
 	}
 
@@ -1491,6 +1507,8 @@
 	if !*FlagS {
 		shstrtabAddstring(".symtab")
 		shstrtabAddstring(".strtab")
+	}
+	if !*FlagW {
 		dwarfaddshstrings(ctxt, shstrtabAddstring)
 	}
 
@@ -1583,7 +1601,7 @@
 			// S390X uses .got instead of .got.plt
 			gotplt = got
 		}
-		thearch.ELF.SetupPLT(ctxt, plt, gotplt, dynamic.Sym())
+		thearch.ELF.SetupPLT(ctxt, ctxt.loader, plt, gotplt, dynamic.Sym())
 
 		/*
 		 * .dynamic table
@@ -1744,12 +1762,16 @@
 
 func asmbElf(ctxt *Link) {
 	var symo int64
-	if !*FlagS {
-		symo = int64(Segdwarf.Fileoff + Segdwarf.Filelen)
-		symo = Rnd(symo, int64(ctxt.Arch.PtrSize))
+	symo = int64(Segdwarf.Fileoff + Segdwarf.Filelen)
+	symo = Rnd(symo, int64(ctxt.Arch.PtrSize))
+	ctxt.Out.SeekSet(symo)
+	if *FlagS {
+		ctxt.Out.Write(elfshstrdat)
+	} else {
 		ctxt.Out.SeekSet(symo)
 		asmElfSym(ctxt)
-		ctxt.Out.Write(Elfstrdat)
+		ctxt.Out.Write(elfstrdat)
+		ctxt.Out.Write(elfshstrdat)
 		if ctxt.IsExternal() {
 			elfEmitReloc(ctxt)
 		}
@@ -2153,9 +2175,6 @@
 
 elfobj:
 	sh := elfshname(".shstrtab")
-	sh.Type = uint32(elf.SHT_STRTAB)
-	sh.Addralign = 1
-	shsym(sh, ldr, ldr.Lookup(".shstrtab", 0))
 	eh.Shstrndx = uint16(sh.shnum)
 
 	if ctxt.IsMIPS() {
@@ -2182,6 +2201,7 @@
 		elfshname(".symtab")
 		elfshname(".strtab")
 	}
+	elfshname(".shstrtab")
 
 	for _, sect := range Segtext.Sections {
 		elfshbits(ctxt.LinkMode, sect)
@@ -2224,6 +2244,7 @@
 		sh.Flags = 0
 	}
 
+	var shstroff uint64
 	if !*FlagS {
 		sh := elfshname(".symtab")
 		sh.Type = uint32(elf.SHT_SYMTAB)
@@ -2237,10 +2258,19 @@
 		sh = elfshname(".strtab")
 		sh.Type = uint32(elf.SHT_STRTAB)
 		sh.Off = uint64(symo) + uint64(symSize)
-		sh.Size = uint64(len(Elfstrdat))
+		sh.Size = uint64(len(elfstrdat))
 		sh.Addralign = 1
+		shstroff = sh.Off + sh.Size
+	} else {
+		shstroff = uint64(symo)
 	}
 
+	sh = elfshname(".shstrtab")
+	sh.Type = uint32(elf.SHT_STRTAB)
+	sh.Off = shstroff
+	sh.Size = uint64(len(elfshstrdat))
+	sh.Addralign = 1
+
 	/* Main header */
 	copy(eh.Ident[:], elf.ELFMAG)
 
diff --git a/src/cmd/link/internal/ld/elf_test.go b/src/cmd/link/internal/ld/elf_test.go
index 8af0ca1..e535af6 100644
--- a/src/cmd/link/internal/ld/elf_test.go
+++ b/src/cmd/link/internal/ld/elf_test.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build cgo
-// +build cgo
 
 package ld
 
@@ -125,3 +124,61 @@
 		t.Errorf("Got %d entries for `libc.so`, want %d", got, want)
 	}
 }
+
+func TestShStrTabAttributesIssue62600(t *testing.T) {
+	t.Parallel()
+	testenv.MustHaveGoBuild(t)
+	dir := t.TempDir()
+
+	const prog = `
+package main
+
+func main() {
+	println("whee")
+}
+`
+	src := filepath.Join(dir, "issue62600.go")
+	if err := os.WriteFile(src, []byte(prog), 0666); err != nil {
+		t.Fatal(err)
+	}
+
+	binFile := filepath.Join(dir, "issue62600")
+	cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", binFile, src)
+	if out, err := cmd.CombinedOutput(); err != nil {
+		t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
+	}
+
+	fi, err := os.Open(binFile)
+	if err != nil {
+		t.Fatalf("failed to open built file: %v", err)
+	}
+	defer fi.Close()
+
+	elfFile, err := elf.NewFile(fi)
+	if err != nil {
+		t.Skip("The system may not support ELF, skipped.")
+	}
+
+	section := elfFile.Section(".shstrtab")
+	if section == nil {
+		t.Fatal("no .shstrtab")
+	}
+
+	// The .shstrtab section should have a zero address, non-zero
+	// size, no ALLOC flag, and the offset should not fall into any of
+	// the segments defined by the program headers.
+	if section.Addr != 0 {
+		t.Fatalf("expected Addr == 0 for .shstrtab got %x", section.Addr)
+	}
+	if section.Size == 0 {
+		t.Fatal("expected nonzero Size for .shstrtab got 0")
+	}
+	if section.Flags&elf.SHF_ALLOC != 0 {
+		t.Fatal("expected zero alloc flag got nonzero for .shstrtab")
+	}
+	for idx, p := range elfFile.Progs {
+		if section.Offset >= p.Off && section.Offset < p.Off+p.Filesz {
+			t.Fatalf("badly formed .shstrtab, is contained in segment %d", idx)
+		}
+	}
+}
diff --git a/src/cmd/link/internal/ld/execarchive.go b/src/cmd/link/internal/ld/execarchive.go
index a9376e9..7a19567 100644
--- a/src/cmd/link/internal/ld/execarchive.go
+++ b/src/cmd/link/internal/ld/execarchive.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !wasm && !windows
-// +build !wasm,!windows
 
 package ld
 
diff --git a/src/cmd/link/internal/ld/execarchive_noexec.go b/src/cmd/link/internal/ld/execarchive_noexec.go
index 5e1f266..ada3e9f 100644
--- a/src/cmd/link/internal/ld/execarchive_noexec.go
+++ b/src/cmd/link/internal/ld/execarchive_noexec.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build wasm || windows
-// +build wasm windows
 
 package ld
 
diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go
index ef73924..a2db0bf 100644
--- a/src/cmd/link/internal/ld/go.go
+++ b/src/cmd/link/internal/ld/go.go
@@ -50,11 +50,7 @@
 	// process header lines
 	for data != "" {
 		var line string
-		if i := strings.Index(data, "\n"); i >= 0 {
-			line, data = data[:i], data[i+1:]
-		} else {
-			line, data = data, ""
-		}
+		line, data, _ = strings.Cut(data, "\n")
 		if line == "main" {
 			lib.Main = true
 		}
@@ -141,8 +137,8 @@
 			}
 
 			q := ""
-			if i := strings.Index(remote, "#"); i >= 0 {
-				remote, q = remote[:i], remote[i+1:]
+			if before, after, found := strings.Cut(remote, "#"); found {
+				remote, q = before, after
 			}
 			s := l.LookupOrCreateSym(local, 0)
 			st := l.SymType(s)
diff --git a/src/cmd/link/internal/ld/inittask.go b/src/cmd/link/internal/ld/inittask.go
index 0699107..c4c5beb 100644
--- a/src/cmd/link/internal/ld/inittask.go
+++ b/src/cmd/link/internal/ld/inittask.go
@@ -41,15 +41,21 @@
 	switch ctxt.BuildMode {
 	case BuildModeExe, BuildModePIE, BuildModeCArchive, BuildModeCShared:
 		// Normally the inittask list will be run on program startup.
-		ctxt.mainInittasks = ctxt.inittaskSym("main..inittask", "go:main.inittasks")
+		ctxt.mainInittasks = ctxt.inittaskSym([]string{"main..inittask"}, "go:main.inittasks")
 	case BuildModePlugin:
 		// For plugins, the list will be run on plugin load.
-		ctxt.mainInittasks = ctxt.inittaskSym(fmt.Sprintf("%s..inittask", objabi.PathToPrefix(*flagPluginPath)), "go:plugin.inittasks")
+		ctxt.mainInittasks = ctxt.inittaskSym([]string{fmt.Sprintf("%s..inittask", objabi.PathToPrefix(*flagPluginPath))}, "go:plugin.inittasks")
 		// Make symbol local so multiple plugins don't clobber each other's inittask list.
 		ctxt.loader.SetAttrLocal(ctxt.mainInittasks, true)
 	case BuildModeShared:
-		// Nothing to do. The inittask list will be built by
-		// the final build (with the -linkshared option).
+		// For a shared library, all packages are roots.
+		var roots []string
+		for _, lib := range ctxt.Library {
+			roots = append(roots, fmt.Sprintf("%s..inittask", objabi.PathToPrefix(lib.Pkg)))
+		}
+		ctxt.mainInittasks = ctxt.inittaskSym(roots, "go:shlib.inittasks")
+		// Make symbol local so multiple plugins don't clobber each other's inittask list.
+		ctxt.loader.SetAttrLocal(ctxt.mainInittasks, true)
 	default:
 		Exitf("unhandled build mode %d", ctxt.BuildMode)
 	}
@@ -58,7 +64,7 @@
 	// initialize the runtime_inittasks variable.
 	ldr := ctxt.loader
 	if ldr.Lookup("runtime.runtime_inittasks", 0) != 0 {
-		t := ctxt.inittaskSym("runtime..inittask", "go:runtime.inittasks")
+		t := ctxt.inittaskSym([]string{"runtime..inittask"}, "go:runtime.inittasks")
 
 		// This slice header is already defined in runtime/proc.go, so we update it here with new contents.
 		sh := ldr.Lookup("runtime.runtime_inittasks", 0)
@@ -72,11 +78,17 @@
 }
 
 // inittaskSym builds a symbol containing pointers to all the inittasks
-// that need to be run, given the root inittask symbol.
-func (ctxt *Link) inittaskSym(rootName, symName string) loader.Sym {
+// that need to be run, given a list of root inittask symbols.
+func (ctxt *Link) inittaskSym(rootNames []string, symName string) loader.Sym {
 	ldr := ctxt.loader
-	root := ldr.Lookup(rootName, 0)
-	if root == 0 {
+	var roots []loader.Sym
+	for _, n := range rootNames {
+		p := ldr.Lookup(n, 0)
+		if p != 0 {
+			roots = append(roots, p)
+		}
+	}
+	if len(roots) == 0 {
 		// Nothing to do
 		return 0
 	}
@@ -98,13 +110,15 @@
 	// p's direct imports that have not yet been scheduled.
 	m := map[loader.Sym]int{}
 
-	// Find all reachable inittask records from the root.
+	// Find all reachable inittask records from the roots.
 	// Keep track of the dependency edges between them in edges.
 	// Keep track of how many imports each package has in m.
 	// q is the list of found but not yet explored packages.
 	var q []loader.Sym
-	m[root] = 0
-	q = append(q, root)
+	for _, p := range roots {
+		m[p] = 0
+		q = append(q, p)
+	}
 	for len(q) > 0 {
 		x := q[len(q)-1]
 		q = q[:len(q)-1]
diff --git a/src/cmd/link/internal/ld/ld.go b/src/cmd/link/internal/ld/ld.go
index d416571..77fde0b 100644
--- a/src/cmd/link/internal/ld/ld.go
+++ b/src/cmd/link/internal/ld/ld.go
@@ -62,15 +62,13 @@
 			continue
 		}
 
-		var verb, args string
-		if i := strings.Index(line, " "); i < 0 {
-			verb = line
-		} else {
-			verb, args = line[:i], strings.TrimSpace(line[i+1:])
+		verb, args, found := strings.Cut(line, " ")
+		if found {
+			args = strings.TrimSpace(args)
 		}
-		var before, after string
-		if i := strings.Index(args, "="); i >= 0 {
-			before, after = args[:i], args[i+1:]
+		before, after, exist := strings.Cut(args, "=")
+		if !exist {
+			before = ""
 		}
 		switch verb {
 		default:
diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go
index a7a6082..1767667 100644
--- a/src/cmd/link/internal/ld/ld_test.go
+++ b/src/cmd/link/internal/ld/ld_test.go
@@ -344,3 +344,71 @@
 		})
 	}
 }
+
+func TestRISCVTrampolines(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+	t.Parallel()
+
+	tmpDir := t.TempDir()
+	tmpFile := filepath.Join(tmpDir, "x.s")
+
+	// Calling b from a or c should not use trampolines, however
+	// calling from d to a will require one.
+	buf := new(bytes.Buffer)
+	fmt.Fprintf(buf, "TEXT a(SB),$0-0\n")
+	for i := 0; i < 1<<17; i++ {
+		fmt.Fprintf(buf, "\tADD $0, X0, X0\n")
+	}
+	fmt.Fprintf(buf, "\tCALL b(SB)\n")
+	fmt.Fprintf(buf, "\tRET\n")
+	fmt.Fprintf(buf, "TEXT b(SB),$0-0\n")
+	fmt.Fprintf(buf, "\tRET\n")
+	fmt.Fprintf(buf, "TEXT c(SB),$0-0\n")
+	fmt.Fprintf(buf, "\tCALL b(SB)\n")
+	fmt.Fprintf(buf, "\tRET\n")
+	fmt.Fprintf(buf, "TEXT ·d(SB),0,$0-0\n")
+	for i := 0; i < 1<<17; i++ {
+		fmt.Fprintf(buf, "\tADD $0, X0, X0\n")
+	}
+	fmt.Fprintf(buf, "\tCALL a(SB)\n")
+	fmt.Fprintf(buf, "\tCALL c(SB)\n")
+	fmt.Fprintf(buf, "\tRET\n")
+	if err := os.WriteFile(tmpFile, buf.Bytes(), 0644); err != nil {
+		t.Fatalf("Failed to write assembly file: %v", err)
+	}
+
+	if err := os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module riscvtramp"), 0644); err != nil {
+		t.Fatalf("Failed to write file: %v\n", err)
+	}
+	main := `package main
+func main() {
+	d()
+}
+
+func d()
+`
+	if err := os.WriteFile(filepath.Join(tmpDir, "x.go"), []byte(main), 0644); err != nil {
+		t.Fatalf("failed to write main: %v\n", err)
+	}
+	cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-linkmode=internal")
+	cmd.Dir = tmpDir
+	cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux")
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("Build failed: %v, output: %s", err, out)
+	}
+
+	// Check what trampolines exist.
+	cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", filepath.Join(tmpDir, "riscvtramp"))
+	cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux")
+	out, err = cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("nm failure: %s\n%s\n", err, string(out))
+	}
+	if !bytes.Contains(out, []byte(" T a-tramp0")) {
+		t.Errorf("Trampoline a-tramp0 is missing")
+	}
+	if bytes.Contains(out, []byte(" T b-tramp0")) {
+		t.Errorf("Trampoline b-tramp0 exists unnecessarily")
+	}
+}
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 5e5d255..eab74dc 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -44,6 +44,7 @@
 	"os/exec"
 	"path/filepath"
 	"runtime"
+	"sort"
 	"strings"
 	"sync"
 
@@ -877,7 +878,17 @@
 			sb := ctxt.loader.MakeSymbolUpdater(goarm)
 			sb.SetType(sym.SDATA)
 			sb.SetSize(0)
-			sb.AddUint8(uint8(buildcfg.GOARM))
+			sb.AddUint8(uint8(buildcfg.GOARM.Version))
+
+			goarmsoftfp := ctxt.loader.LookupOrCreateSym("runtime.goarmsoftfp", 0)
+			sb2 := ctxt.loader.MakeSymbolUpdater(goarmsoftfp)
+			sb2.SetType(sym.SDATA)
+			sb2.SetSize(0)
+			if buildcfg.GOARM.SoftFloat {
+				sb2.AddUint8(1)
+			} else {
+				sb2.AddUint8(0)
+			}
 		}
 
 		// Set runtime.disableMemoryProfiling bool if
@@ -1394,7 +1405,7 @@
 		if ctxt.HeadType == objabi.Hdarwin {
 			// Recent versions of macOS print
 			//	ld: warning: option -s is obsolete and being ignored
-			// so do not pass any arguments.
+			// so do not pass any arguments (but we strip symbols below).
 		} else {
 			argv = append(argv, "-s")
 		}
@@ -1402,7 +1413,7 @@
 
 	// On darwin, whether to combine DWARF into executable.
 	// Only macOS supports unmapped segments such as our __DWARF segment.
-	combineDwarf := ctxt.IsDarwin() && !*FlagS && !*FlagW && !debug_s && machoPlatform == PLATFORM_MACOS
+	combineDwarf := ctxt.IsDarwin() && !*FlagW && machoPlatform == PLATFORM_MACOS
 
 	switch ctxt.HeadType {
 	case objabi.Hdarwin:
@@ -1418,13 +1429,15 @@
 			// resolving a lazy binding. See issue 38824.
 			// Force eager resolution to work around.
 			argv = append(argv, "-Wl,-flat_namespace", "-Wl,-bind_at_load")
-			if linkerFlagSupported(ctxt.Arch, argv[0], "", "-Wl,-ld_classic") {
-				// Force old linker to work around a bug in Apple's new linker.
-				argv = append(argv, "-Wl,-ld_classic")
-			}
 		}
 		if !combineDwarf {
 			argv = append(argv, "-Wl,-S") // suppress STAB (symbolic debugging) symbols
+			if debug_s {
+				// We are generating a binary with symbol table suppressed.
+				// Suppress local symbols. We need to keep dynamically exported
+				// and referenced symbols so the dynamic linker can resolve them.
+				argv = append(argv, "-Wl,-x")
+			}
 		}
 	case objabi.Hopenbsd:
 		argv = append(argv, "-Wl,-nopie")
@@ -1679,9 +1692,12 @@
 		if ctxt.DynlinkingGo() || ctxt.BuildMode == BuildModeCShared || !linkerFlagSupported(ctxt.Arch, argv[0], altLinker, "-Wl,--export-dynamic-symbol=main") {
 			argv = append(argv, "-rdynamic")
 		} else {
+			var exports []string
 			ctxt.loader.ForAllCgoExportDynamic(func(s loader.Sym) {
-				argv = append(argv, "-Wl,--export-dynamic-symbol="+ctxt.loader.SymExtname(s))
+				exports = append(exports, "-Wl,--export-dynamic-symbol="+ctxt.loader.SymExtname(s))
 			})
+			sort.Strings(exports)
+			argv = append(argv, exports...)
 		}
 	}
 	if ctxt.HeadType == objabi.Haix {
@@ -1939,12 +1955,34 @@
 		// dsymutil may not clean up its temp directory at exit.
 		// Set DSYMUTIL_REPRODUCER_PATH to work around. see issue 59026.
 		cmd.Env = append(os.Environ(), "DSYMUTIL_REPRODUCER_PATH="+*flagTmpdir)
+		if ctxt.Debugvlog != 0 {
+			ctxt.Logf("host link dsymutil:")
+			for _, v := range cmd.Args {
+				ctxt.Logf(" %q", v)
+			}
+			ctxt.Logf("\n")
+		}
 		if out, err := cmd.CombinedOutput(); err != nil {
 			Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out)
 		}
 		// Remove STAB (symbolic debugging) symbols after we are done with them (by dsymutil).
 		// They contain temporary file paths and make the build not reproducible.
-		if out, err := exec.Command(stripCmd, "-S", *flagOutfile).CombinedOutput(); err != nil {
+		var stripArgs = []string{"-S"}
+		if debug_s {
+			// We are generating a binary with symbol table suppressed.
+			// Suppress local symbols. We need to keep dynamically exported
+			// and referenced symbols so the dynamic linker can resolve them.
+			stripArgs = append(stripArgs, "-x")
+		}
+		stripArgs = append(stripArgs, *flagOutfile)
+		if ctxt.Debugvlog != 0 {
+			ctxt.Logf("host link strip: %q", stripCmd)
+			for _, v := range stripArgs {
+				ctxt.Logf(" %q", v)
+			}
+			ctxt.Logf("\n")
+		}
+		if out, err := exec.Command(stripCmd, stripArgs...).CombinedOutput(); err != nil {
 			Exitf("%s: running strip failed: %v\n%s", os.Args[0], err, out)
 		}
 		// Skip combining if `dsymutil` didn't generate a file. See #11994.
@@ -2192,15 +2230,21 @@
 		0xc401, // arm
 		0x64aa: // arm64
 		ldpe := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) {
-			textp, rsrc, err := loadpe.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn)
+			ls, err := loadpe.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn)
 			if err != nil {
 				Errorf(nil, "%v", err)
 				return
 			}
-			if len(rsrc) != 0 {
-				setpersrc(ctxt, rsrc)
+			if len(ls.Resources) != 0 {
+				setpersrc(ctxt, ls.Resources)
 			}
-			ctxt.Textp = append(ctxt.Textp, textp...)
+			if ls.PData != 0 {
+				sehp.pdata = append(sehp.pdata, ls.PData)
+			}
+			if ls.XData != 0 {
+				sehp.xdata = append(sehp.xdata, ls.XData)
+			}
+			ctxt.Textp = append(ctxt.Textp, ls.Textp...)
 		}
 		return ldhostobj(ldpe, ctxt.HeadType, f, pkg, length, pn, file)
 	}
diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go
index 88ff787..fc38b0d 100644
--- a/src/cmd/link/internal/ld/macho.go
+++ b/src/cmd/link/internal/ld/macho.go
@@ -665,8 +665,8 @@
 
 func asmbMacho(ctxt *Link) {
 	machlink := doMachoLink(ctxt)
-	if !*FlagS && ctxt.IsExternal() {
-		symo := int64(Segdwarf.Fileoff + uint64(Rnd(int64(Segdwarf.Filelen), int64(*FlagRound))) + uint64(machlink))
+	if ctxt.IsExternal() {
+		symo := int64(Segdwarf.Fileoff + uint64(Rnd(int64(Segdwarf.Filelen), *FlagRound)) + uint64(machlink))
 		ctxt.Out.SeekSet(symo)
 		machoEmitReloc(ctxt)
 	}
@@ -708,7 +708,7 @@
 	}
 
 	/* text */
-	v := Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound))
+	v := Rnd(int64(uint64(HEADR)+Segtext.Length), *FlagRound)
 
 	var mstext *MachoSeg
 	if ctxt.LinkMode != LinkExternal {
@@ -803,7 +803,7 @@
 
 		if ctxt.LinkMode != LinkExternal {
 			ms := newMachoSeg("__LINKEDIT", 0)
-			ms.vaddr = uint64(Rnd(int64(Segdata.Vaddr+Segdata.Length), int64(*FlagRound)))
+			ms.vaddr = uint64(Rnd(int64(Segdata.Vaddr+Segdata.Length), *FlagRound))
 			ms.vsize = uint64(s1 + s2 + s3 + s4 + s5 + s6 + s7)
 			ms.fileoffset = uint64(linkoff)
 			ms.filesize = ms.vsize
@@ -877,7 +877,7 @@
 }
 
 func symkind(ldr *loader.Loader, s loader.Sym) int {
-	if ldr.SymType(s) == sym.SDYNIMPORT {
+	if t := ldr.SymType(s); t == sym.SDYNIMPORT || t == sym.SHOSTOBJ || t == sym.SUNDEFEXT {
 		return SymKindUndef
 	}
 	if ldr.AttrCgoExport(s) {
@@ -894,30 +894,39 @@
 		nkind[symkind(ldr, s)]++
 	}
 
-	// Add special runtime.text and runtime.etext symbols.
+	// On Mach-O, even with -s, we still need to keep dynamically exported and
+	// referenced symbols. We can strip defined local text and data symbols.
+	// So *FlagS is applied based on symbol type.
+
+	// Add special runtime.text and runtime.etext symbols (which are local).
 	// We've already included this symbol in Textp on darwin if ctxt.DynlinkingGo().
 	// See data.go:/textaddress
-	if !ctxt.DynlinkingGo() {
-		s := ldr.Lookup("runtime.text", 0)
-		if ldr.SymType(s) == sym.STEXT {
-			addsym(s)
-		}
-		for n := range Segtext.Sections[1:] {
-			s := ldr.Lookup(fmt.Sprintf("runtime.text.%d", n+1), 0)
-			if s != 0 {
+	if !*FlagS {
+		if !ctxt.DynlinkingGo() {
+			s := ldr.Lookup("runtime.text", 0)
+			if ldr.SymType(s) == sym.STEXT {
 				addsym(s)
-			} else {
-				break
 			}
-		}
-		s = ldr.Lookup("runtime.etext", 0)
-		if ldr.SymType(s) == sym.STEXT {
-			addsym(s)
+			for n := range Segtext.Sections[1:] {
+				s := ldr.Lookup(fmt.Sprintf("runtime.text.%d", n+1), 0)
+				if s != 0 {
+					addsym(s)
+				} else {
+					break
+				}
+			}
+			s = ldr.Lookup("runtime.etext", 0)
+			if ldr.SymType(s) == sym.STEXT {
+				addsym(s)
+			}
 		}
 	}
 
 	// Add text symbols.
 	for _, s := range ctxt.Textp {
+		if *FlagS && !ldr.AttrCgoExportDynamic(s) {
+			continue
+		}
 		addsym(s)
 	}
 
@@ -946,11 +955,16 @@
 			if !shouldBeInSymbolTable(s) {
 				continue
 			}
+			if *FlagS && !ldr.AttrCgoExportDynamic(s) {
+				continue
+			}
 			addsym(s)
+			continue
 		}
 
 		switch t {
 		case sym.SDYNIMPORT, sym.SHOSTOBJ, sym.SUNDEFEXT:
+			// Keep dynamic symbol references even if *FlagS.
 			addsym(s)
 		}
 
@@ -1171,7 +1185,7 @@
 	}
 
 	if size > 0 {
-		linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound)) + Rnd(int64(Segrelrodata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdwarf.Filelen), int64(*FlagRound))
+		linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), *FlagRound) + Rnd(int64(Segrelrodata.Filelen), *FlagRound) + Rnd(int64(Segdata.Filelen), *FlagRound) + Rnd(int64(Segdwarf.Filelen), *FlagRound)
 		ctxt.Out.SeekSet(linkoff)
 
 		ctxt.Out.Write(ldr.Data(s1))
@@ -1186,7 +1200,7 @@
 		size += ldr.SymSize(s7)
 	}
 
-	return Rnd(size, int64(*FlagRound))
+	return Rnd(size, *FlagRound)
 }
 
 func machorelocsect(ctxt *Link, out *OutBuf, sect *sym.Section, syms []loader.Sym) {
diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go
index 2ab7da9..2e8bfcd 100644
--- a/src/cmd/link/internal/ld/macho_combine_dwarf.go
+++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go
@@ -134,7 +134,7 @@
 	// Now copy the dwarf data into the output.
 	// Kernel requires all loaded segments to be page-aligned in the file,
 	// even though we mark this one as being 0 bytes of virtual address space.
-	dwarfstart := Rnd(int64(linkseg.Offset), int64(*FlagRound))
+	dwarfstart := Rnd(int64(linkseg.Offset), *FlagRound)
 	if _, err := outf.Seek(dwarfstart, 0); err != nil {
 		return err
 	}
@@ -162,7 +162,7 @@
 	if _, err := exef.Seek(int64(linkseg.Offset), 0); err != nil {
 		return err
 	}
-	linkstart := Rnd(dwarfstart+int64(dwarfsize), int64(*FlagRound))
+	linkstart := Rnd(dwarfstart+int64(dwarfsize), *FlagRound)
 	if _, err := outf.Seek(linkstart, 0); err != nil {
 		return err
 	}
diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go
index bedc15a..feb4ba5 100644
--- a/src/cmd/link/internal/ld/main.go
+++ b/src/cmd/link/internal/ld/main.go
@@ -43,6 +43,7 @@
 	"os"
 	"runtime"
 	"runtime/pprof"
+	"strconv"
 	"strings"
 )
 
@@ -56,6 +57,7 @@
 	flag.Var(&rpath, "r", "set the ELF dynamic linker search `path` to dir1:dir2:...")
 	flag.Var(&flagExtld, "extld", "use `linker` when linking in external mode")
 	flag.Var(&flagExtldflags, "extldflags", "pass `flags` to external linker")
+	flag.Var(&flagW, "w", "disable DWARF generation")
 }
 
 // Flags used by the linker. The exported flags are used by the architecture-specific packages.
@@ -88,16 +90,15 @@
 	flagF             = flag.Bool("f", false, "ignore version mismatch")
 	flagG             = flag.Bool("g", false, "disable go package data checks")
 	flagH             = flag.Bool("h", false, "halt on error")
-	flagN             = flag.Bool("n", false, "dump symbol table")
+	flagN             = flag.Bool("n", false, "no-op (deprecated)")
 	FlagS             = flag.Bool("s", false, "disable symbol table")
-	FlagW             = flag.Bool("w", false, "disable DWARF generation")
 	flag8             bool // use 64-bit addresses in symbol table
 	flagInterpreter   = flag.String("I", "", "use `linker` as ELF dynamic linker")
 	FlagDebugTramp    = flag.Int("debugtramp", 0, "debug trampolines")
 	FlagDebugTextSize = flag.Int("debugtextsize", 0, "debug text section max size")
 	flagDebugNosplit  = flag.Bool("debugnosplit", false, "dump nosplit call graph")
 	FlagStrictDups    = flag.Int("strictdups", 0, "sanity check duplicate symbol contents during object file reading (1=warn 2=err).")
-	FlagRound         = flag.Int("R", -1, "set address rounding `quantum`")
+	FlagRound         = flag.Int64("R", -1, "set address rounding `quantum`")
 	FlagTextAddr      = flag.Int64("T", -1, "set the start address of text symbols")
 	flagEntrySymbol   = flag.String("E", "", "set `entry` symbol name")
 	flagPruneWeakMap  = flag.Bool("pruneweakmap", true, "prune weak mapinit refs")
@@ -106,8 +107,48 @@
 	memprofilerate    = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
 	benchmarkFlag     = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking")
 	benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof")
+
+	flagW ternaryFlag
+	FlagW = new(bool) // the -w flag, computed in main from flagW
 )
 
+// ternaryFlag is like a boolean flag, but has a default value that is
+// neither true nor false, allowing it to be set from context (e.g. from another
+// flag).
+// *ternaryFlag implements flag.Value.
+type ternaryFlag int
+
+const (
+	ternaryFlagUnset ternaryFlag = iota
+	ternaryFlagFalse
+	ternaryFlagTrue
+)
+
+func (t *ternaryFlag) Set(s string) error {
+	v, err := strconv.ParseBool(s)
+	if err != nil {
+		return err
+	}
+	if v {
+		*t = ternaryFlagTrue
+	} else {
+		*t = ternaryFlagFalse
+	}
+	return nil
+}
+
+func (t *ternaryFlag) String() string {
+	switch *t {
+	case ternaryFlagFalse:
+		return "false"
+	case ternaryFlagTrue:
+		return "true"
+	}
+	return "unset"
+}
+
+func (t *ternaryFlag) IsBoolFlag() bool { return true } // parse like a boolean flag
+
 // Main is the main entry point for the linker code.
 func Main(arch *sys.Arch, theArch Arch) {
 	log.SetPrefix("link: ")
@@ -149,7 +190,7 @@
 	flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`")
 	flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`")
 	flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible")
-	objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF", addbuildinfo)
+	objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo)
 	objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) })
 	objabi.AddVersionFlag() // -V
 	objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) })
@@ -195,10 +236,25 @@
 		Exitf("dynamic linking required on %s; -d flag cannot be used", buildcfg.GOOS)
 	}
 
+	isPowerOfTwo := func(n int64) bool {
+		return n > 0 && n&(n-1) == 0
+	}
+	if *FlagRound != -1 && (*FlagRound < 4096 || !isPowerOfTwo(*FlagRound)) {
+		Exitf("invalid -R value 0x%x", *FlagRound)
+	}
+
 	checkStrictDups = *FlagStrictDups
 
-	if ctxt.IsDarwin() && ctxt.BuildMode == BuildModeCShared {
-		*FlagW = true // default to -w in c-shared mode on darwin, see #61229
+	switch flagW {
+	case ternaryFlagFalse:
+		*FlagW = false
+	case ternaryFlagTrue:
+		*FlagW = true
+	case ternaryFlagUnset:
+		*FlagW = *FlagS // -s implies -w if not explicitly set
+		if ctxt.IsDarwin() && ctxt.BuildMode == BuildModeCShared {
+			*FlagW = true // default to -w in c-shared mode on darwin, see #61229
+		}
 	}
 
 	if !buildcfg.Experiment.RegabiWrappers {
@@ -256,6 +312,13 @@
 	}
 
 	if ctxt.Debugvlog != 0 {
+		onOff := func(b bool) string {
+			if b {
+				return "on"
+			}
+			return "off"
+		}
+		ctxt.Logf("build mode: %s, symbol table: %s, DWARF: %s\n", ctxt.BuildMode, onOff(!*FlagS), onOff(dwarfEnabled(ctxt)))
 		ctxt.Logf("HEADER = -H%d -T0x%x -R0x%x\n", ctxt.HeadType, uint64(*FlagTextAddr), uint32(*FlagRound))
 	}
 
diff --git a/src/cmd/link/internal/ld/msync_darwin_libc.go b/src/cmd/link/internal/ld/msync_darwin_libc.go
index eb2a526..6627eca 100644
--- a/src/cmd/link/internal/ld/msync_darwin_libc.go
+++ b/src/cmd/link/internal/ld/msync_darwin_libc.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build darwin && go1.20
+//go:build darwin
 
 package ld
 
diff --git a/src/cmd/link/internal/ld/msync_darwin_syscall.go b/src/cmd/link/internal/ld/msync_darwin_syscall.go
deleted file mode 100644
index 270d9f3..0000000
--- a/src/cmd/link/internal/ld/msync_darwin_syscall.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin && !go1.20
-
-package ld
-
-import (
-	"syscall"
-	"unsafe"
-)
-
-func msync(b []byte, flags int) (err error) {
-	var p unsafe.Pointer
-	if len(b) > 0 {
-		p = unsafe.Pointer(&b[0])
-	}
-	_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(p), uintptr(len(b)), uintptr(flags))
-	if errno != 0 {
-		return errno
-	}
-	return nil
-}
diff --git a/src/cmd/link/internal/ld/outbuf_mmap.go b/src/cmd/link/internal/ld/outbuf_mmap.go
index 7bb728a..2972d8a 100644
--- a/src/cmd/link/internal/ld/outbuf_mmap.go
+++ b/src/cmd/link/internal/ld/outbuf_mmap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || (solaris && go1.20)
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
 
 package ld
 
diff --git a/src/cmd/link/internal/ld/outbuf_nommap.go b/src/cmd/link/internal/ld/outbuf_nommap.go
index b1d3d27..6a40b97 100644
--- a/src/cmd/link/internal/ld/outbuf_nommap.go
+++ b/src/cmd/link/internal/ld/outbuf_nommap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !(solaris && go1.20) && !windows
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
 
 package ld
 
diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go
index aaf8dde..5734b92 100644
--- a/src/cmd/link/internal/ld/pcln.go
+++ b/src/cmd/link/internal/ld/pcln.go
@@ -15,6 +15,7 @@
 	"internal/buildcfg"
 	"os"
 	"path/filepath"
+	"strings"
 )
 
 const funcSize = 11 * 4 // funcSize is the size of the _func object in runtime/runtime2.go
@@ -99,6 +100,19 @@
 }
 
 func emitPcln(ctxt *Link, s loader.Sym, container loader.Bitmap) bool {
+	if ctxt.Target.IsRISCV64() {
+		// Avoid adding local symbols to the pcln table - RISC-V
+		// linking generates a very large number of these, particularly
+		// for HI20 symbols (which we need to load in order to be able
+		// to resolve relocations). Unnecessarily including all of
+		// these symbols quickly blows out the size of the pcln table
+		// and overflows hash buckets.
+		symName := ctxt.loader.SymName(s)
+		if symName == "" || strings.HasPrefix(symName, ".L") {
+			return false
+		}
+	}
+
 	// We want to generate func table entries only for the "lowest
 	// level" symbols, not containers of subsymbols.
 	return !container.Has(s)
@@ -713,6 +727,17 @@
 		for j := range funcdata {
 			dataoff := off + int64(4*j)
 			fdsym := funcdata[j]
+
+			// cmd/internal/obj optimistically populates ArgsPointerMaps and
+			// ArgInfo for assembly functions, hoping that the compiler will
+			// emit appropriate symbols from their Go stub declarations. If
+			// it didn't though, just ignore it.
+			//
+			// TODO(cherryyz): Fix arg map generation (see discussion on CL 523335).
+			if fdsym != 0 && (j == abi.FUNCDATA_ArgsPointerMaps || j == abi.FUNCDATA_ArgInfo) && ldr.IsFromAssembly(s) && ldr.Data(fdsym) == nil {
+				fdsym = 0
+			}
+
 			if fdsym == 0 {
 				sb.SetUint32(ctxt.Arch, dataoff, ^uint32(0)) // ^0 is a sentinel for "no value"
 				continue
diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go
index 1502b3e..8cfecaf 100644
--- a/src/cmd/link/internal/ld/pe.go
+++ b/src/cmd/link/internal/ld/pe.go
@@ -635,11 +635,11 @@
 		{f.rdataSect, &Segrodata, ctxt.datap},
 		{f.dataSect, &Segdata, ctxt.datap},
 	}
-	if sehp.pdata != 0 {
-		sects = append(sects, relsect{f.pdataSect, &Segpdata, []loader.Sym{sehp.pdata}})
+	if len(sehp.pdata) != 0 {
+		sects = append(sects, relsect{f.pdataSect, &Segpdata, sehp.pdata})
 	}
-	if sehp.xdata != 0 {
-		sects = append(sects, relsect{f.xdataSect, &Segxdata, []loader.Sym{sehp.xdata}})
+	if len(sehp.xdata) != 0 {
+		sects = append(sects, relsect{f.xdataSect, &Segxdata, sehp.xdata})
 	}
 	for _, s := range sects {
 		s.peSect.emitRelocations(ctxt.Out, func() int {
@@ -777,7 +777,7 @@
 				// so the external linker see them as Forwarder RVA exports. See:
 				//
 				//  - https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#export-address-table
-				//  - https://sourceware.org/git/?p=binutils-gdb.git;a=blob;f=ld/pe-dll.c;h=e7b82ba6ffadf74dc1b9ee71dc13d48336941e51;hb=HEAD#l972)
+				//  - https://sourceware.org/git/?p=binutils-gdb.git;a=blob;f=ld/pe-dll.c;h=e7b82ba6ffadf74dc1b9ee71dc13d48336941e51;hb=HEAD#l972
 				//
 				// CL 317917 changes "." to ":" in symbols name, so these symbols can not be
 				// found by external linker anymore. So a hacky way is adding the
@@ -1149,11 +1149,11 @@
 	}
 
 	HEADR = PEFILEHEADR
-	if *FlagTextAddr == -1 {
-		*FlagTextAddr = PEBASE + int64(PESECTHEADR)
-	}
 	if *FlagRound == -1 {
-		*FlagRound = int(PESECTALIGN)
+		*FlagRound = PESECTALIGN
+	}
+	if *FlagTextAddr == -1 {
+		*FlagTextAddr = Rnd(PEBASE, *FlagRound) + int64(PESECTHEADR)
 	}
 }
 
diff --git a/src/cmd/link/internal/ld/seh.go b/src/cmd/link/internal/ld/seh.go
index 5379528..9f0f747 100644
--- a/src/cmd/link/internal/ld/seh.go
+++ b/src/cmd/link/internal/ld/seh.go
@@ -11,8 +11,8 @@
 )
 
 var sehp struct {
-	pdata loader.Sym
-	xdata loader.Sym
+	pdata []sym.LoaderSym
+	xdata []sym.LoaderSym
 }
 
 func writeSEH(ctxt *Link) {
@@ -40,7 +40,7 @@
 	// to deduplicate .xdata entries.
 	uwcache := make(map[string]int64) // aux symbol name --> .xdata offset
 	for _, s := range ctxt.Textp {
-		if fi := ldr.FuncInfo(s); !fi.Valid() || fi.TopFrame() {
+		if fi := ldr.FuncInfo(s); !fi.Valid() {
 			continue
 		}
 		uw := ldr.SEHUnwindSym(s)
@@ -53,6 +53,17 @@
 			off = xdata.Size()
 			uwcache[name] = off
 			xdata.AddBytes(ldr.Data(uw))
+			// The SEH unwind data can contain relocations,
+			// make sure those are copied over.
+			rels := ldr.Relocs(uw)
+			for i := 0; i < rels.Count(); i++ {
+				r := rels.At(i)
+				rel, _ := xdata.AddRel(r.Type())
+				rel.SetOff(int32(off) + r.Off())
+				rel.SetSiz(r.Siz())
+				rel.SetSym(r.Sym())
+				rel.SetAdd(r.Add())
+			}
 		}
 
 		// Reference:
@@ -61,6 +72,6 @@
 		pdata.AddPEImageRelativeAddrPlus(ctxt.Arch, s, ldr.SymSize(s))
 		pdata.AddPEImageRelativeAddrPlus(ctxt.Arch, xdata.Sym(), off)
 	}
-	sehp.pdata = pdata.Sym()
-	sehp.xdata = xdata.Sym()
+	sehp.pdata = append(sehp.pdata, pdata.Sym())
+	sehp.xdata = append(sehp.xdata, xdata.Sym())
 }
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index b039e7d..01f9780 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -45,14 +45,14 @@
 // Symbol table.
 
 func putelfstr(s string) int {
-	if len(Elfstrdat) == 0 && s != "" {
+	if len(elfstrdat) == 0 && s != "" {
 		// first entry must be empty string
 		putelfstr("")
 	}
 
-	off := len(Elfstrdat)
-	Elfstrdat = append(Elfstrdat, s...)
-	Elfstrdat = append(Elfstrdat, 0)
+	off := len(elfstrdat)
+	elfstrdat = append(elfstrdat, s...)
+	elfstrdat = append(elfstrdat, 0)
 	return off
 }
 
diff --git a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod5.go b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod5.go
new file mode 100644
index 0000000..2f0bdcc
--- /dev/null
+++ b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod5.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Like ifacemethod2.go, this tests that a method *is* live
+// if the type is "indirectly" converted to an interface
+// using reflection with a method descriptor as intermediate.
+// However, it uses MethodByName() with a constant name of
+// a method to look up. This does not disable the DCE like
+// Method(0) does.
+
+package main
+
+import "reflect"
+
+type S int
+
+func (s S) M() { println("S.M") }
+
+type I interface{ M() }
+
+type T float64
+
+func (t T) F(s S) {}
+
+func main() {
+	var t T
+	meth, _ := reflect.TypeOf(t).MethodByName("F")
+	ft := meth.Type
+	at := ft.In(1)
+	v := reflect.New(at).Elem()
+	v.Interface().(I).M()
+}
diff --git a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod6.go b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod6.go
new file mode 100644
index 0000000..7eb9419
--- /dev/null
+++ b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod6.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test only uses MethodByName() with constant names
+// of methods to look up. These methods need to be kept,
+// but other methods must be eliminated.
+
+package main
+
+import "reflect"
+
+type S int
+
+func (s S) M() { println("S.M") }
+
+func (s S) N() { println("S.N") }
+
+type T float64
+
+func (t T) F(s S) {}
+
+func main() {
+	var t T
+	meth, _ := reflect.TypeOf(t).MethodByName("F")
+	ft := meth.Type
+	at := ft.In(1)
+	v := reflect.New(at).Elem()
+	methV := v.MethodByName("M")
+	methV.Call([]reflect.Value{v})
+}
diff --git a/src/cmd/link/internal/ld/testdata/deadcode/structof_funcof.go b/src/cmd/link/internal/ld/testdata/deadcode/structof_funcof.go
new file mode 100644
index 0000000..bec5f25
--- /dev/null
+++ b/src/cmd/link/internal/ld/testdata/deadcode/structof_funcof.go
@@ -0,0 +1,52 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Methods of reflect.rtype use StructOf and FuncOf which in turn depend on
+// reflect.Value.Method. StructOf and FuncOf must not disable the DCE.
+
+package main
+
+import "reflect"
+
+type S int
+
+func (s S) M() { println("S.M") }
+
+func (s S) N() { println("S.N") }
+
+type T float64
+
+func (t T) F(s S) {}
+
+func useStructOf() {
+	t := reflect.StructOf([]reflect.StructField{
+		{
+			Name: "X",
+			Type: reflect.TypeOf(int(0)),
+		},
+	})
+	println(t.Name())
+}
+
+func useFuncOf() {
+	t := reflect.FuncOf(
+		[]reflect.Type{reflect.TypeOf(int(0))},
+		[]reflect.Type{reflect.TypeOf(int(0))},
+		false,
+	)
+	println(t.Name())
+}
+
+func main() {
+	useStructOf()
+	useFuncOf()
+
+	var t T
+	meth, _ := reflect.TypeOf(t).MethodByName("F")
+	ft := meth.Type
+	at := ft.In(1)
+	v := reflect.New(at).Elem()
+	methV := v.MethodByName("M")
+	methV.Call([]reflect.Value{v})
+}
diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go
index 158a86f..d915ab3 100644
--- a/src/cmd/link/internal/ld/xcoff.go
+++ b/src/cmd/link/internal/ld/xcoff.go
@@ -544,15 +544,14 @@
 	xfile.dynLibraries = make(map[string]int)
 
 	HEADR = int32(Rnd(XCOFFHDRRESERVE, XCOFFSECTALIGN))
-	if *FlagTextAddr != -1 {
-		Errorf(nil, "-T not available on AIX")
-	}
-	*FlagTextAddr = XCOFFTEXTBASE + int64(HEADR)
 	if *FlagRound != -1 {
 		Errorf(nil, "-R not available on AIX")
 	}
-	*FlagRound = int(XCOFFSECTALIGN)
-
+	*FlagRound = XCOFFSECTALIGN
+	if *FlagTextAddr != -1 {
+		Errorf(nil, "-T not available on AIX")
+	}
+	*FlagTextAddr = Rnd(XCOFFTEXTBASE, *FlagRound) + int64(HEADR)
 }
 
 // SYMBOL TABLE
@@ -1141,7 +1140,7 @@
 		putaixsym(ctxt, s, TextSym)
 	}
 
-	if ctxt.Debugvlog != 0 || *flagN {
+	if ctxt.Debugvlog != 0 {
 		ctxt.Logf("symsize = %d\n", uint32(symSize))
 	}
 	xfile.updatePreviousFile(ctxt, true)
@@ -1581,7 +1580,7 @@
 func asmbXcoff(ctxt *Link) {
 	ctxt.Out.SeekSet(0)
 	fileoff := int64(Segdwarf.Fileoff + Segdwarf.Filelen)
-	fileoff = int64(Rnd(int64(fileoff), int64(*FlagRound)))
+	fileoff = int64(Rnd(int64(fileoff), *FlagRound))
 
 	xfile.sectNameToScnum = make(map[string]int16)
 
diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go
index 942d54c..82e7dc3 100644
--- a/src/cmd/link/internal/loadelf/ldelf.go
+++ b/src/cmd/link/internal/loadelf/ldelf.go
@@ -584,27 +584,41 @@
 		}
 		sect = &elfobj.sect[elfsym.shndx]
 		if sect.sym == 0 {
-			if strings.HasPrefix(elfsym.name, ".Linfo_string") { // clang does this
+			if elfsym.type_ == 0 {
+				if strings.HasPrefix(sect.name, ".debug_") && elfsym.name == "" {
+					// clang on arm and riscv64.
+					// This reportedly happens with clang 3.7 on ARM.
+					// See issue 13139.
+					continue
+				}
+				if strings.HasPrefix(elfsym.name, ".Ldebug_") || elfsym.name == ".L0 " {
+					// gcc on riscv64.
+					continue
+				}
+				if elfsym.name == ".Lline_table_start0" {
+					// clang on riscv64.
+					continue
+				}
+
+				if strings.HasPrefix(elfsym.name, "$d") && sect.name == ".debug_frame" {
+					// "$d" is a marker, not a real symbol.
+					// This happens with gcc on ARM64.
+					// See https://sourceware.org/bugzilla/show_bug.cgi?id=21809
+					continue
+				}
+			}
+
+			if strings.HasPrefix(elfsym.name, ".Linfo_string") {
+				// clang does this
 				continue
 			}
 
-			if elfsym.name == "" && elfsym.type_ == 0 && sect.name == ".debug_str" {
-				// This reportedly happens with clang 3.7 on ARM.
-				// See issue 13139.
+			if strings.HasPrefix(elfsym.name, ".LASF") || strings.HasPrefix(elfsym.name, ".LLRL") || strings.HasPrefix(elfsym.name, ".LLST") {
+				// gcc on s390x and riscv64 does this.
 				continue
 			}
 
-			if strings.HasPrefix(elfsym.name, "$d") && elfsym.type_ == 0 && sect.name == ".debug_frame" {
-				// "$d" is a marker, not a real symbol.
-				// This happens with gcc on ARM64.
-				// See https://sourceware.org/bugzilla/show_bug.cgi?id=21809
-				continue
-			}
-
-			if strings.HasPrefix(elfsym.name, ".LASF") { // gcc on s390x does this
-				continue
-			}
-			return errorf("%v: sym#%d (%s): ignoring symbol in section %d (type %d)", elfsym.sym, i, elfsym.name, elfsym.shndx, elfsym.type_)
+			return errorf("%v: sym#%d (%q): ignoring symbol in section %d (%q) (type %d)", elfsym.sym, i, elfsym.name, elfsym.shndx, sect.name, elfsym.type_)
 		}
 
 		s := elfsym.sym
@@ -1004,19 +1018,38 @@
 		MIPS64 | uint32(elf.R_MIPS_CALL16)<<16,
 		MIPS64 | uint32(elf.R_MIPS_GPREL32)<<16,
 		MIPS64 | uint32(elf.R_MIPS_64)<<16,
-		MIPS64 | uint32(elf.R_MIPS_GOT_DISP)<<16:
+		MIPS64 | uint32(elf.R_MIPS_GOT_DISP)<<16,
+		MIPS64 | uint32(elf.R_MIPS_PC32)<<16:
 		return 4, 4, nil
 
+	case LOONG64 | uint32(elf.R_LARCH_ADD8)<<16,
+		LOONG64 | uint32(elf.R_LARCH_SUB8)<<16:
+		return 1, 1, nil
+
+	case LOONG64 | uint32(elf.R_LARCH_ADD16)<<16,
+		LOONG64 | uint32(elf.R_LARCH_SUB16)<<16:
+		return 2, 2, nil
+
 	case LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_PCREL)<<16,
 		LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_GPREL)<<16,
 		LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_ABSOLUTE)<<16,
 		LOONG64 | uint32(elf.R_LARCH_MARK_LA)<<16,
 		LOONG64 | uint32(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)<<16,
-		LOONG64 | uint32(elf.R_LARCH_64)<<16,
 		LOONG64 | uint32(elf.R_LARCH_MARK_PCREL)<<16,
+		LOONG64 | uint32(elf.R_LARCH_ADD24)<<16,
+		LOONG64 | uint32(elf.R_LARCH_ADD32)<<16,
+		LOONG64 | uint32(elf.R_LARCH_SUB24)<<16,
+		LOONG64 | uint32(elf.R_LARCH_SUB32)<<16,
+		LOONG64 | uint32(elf.R_LARCH_B26)<<16,
 		LOONG64 | uint32(elf.R_LARCH_32_PCREL)<<16:
 		return 4, 4, nil
 
+	case LOONG64 | uint32(elf.R_LARCH_64)<<16,
+		LOONG64 | uint32(elf.R_LARCH_ADD64)<<16,
+		LOONG64 | uint32(elf.R_LARCH_SUB64)<<16,
+		LOONG64 | uint32(elf.R_LARCH_64_PCREL)<<16:
+		return 8, 8, nil
+
 	case S390X | uint32(elf.R_390_8)<<16:
 		return 1, 1, nil
 
diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go
index 4d0b497..3edb5e2 100644
--- a/src/cmd/link/internal/loader/loader.go
+++ b/src/cmd/link/internal/loader/loader.go
@@ -27,7 +27,7 @@
 
 // Sym encapsulates a global symbol index, used to identify a specific
 // Go symbol. The 0-valued Sym is corresponds to an invalid symbol.
-type Sym uint32
+type Sym = sym.LoaderSym
 
 // Relocs encapsulates the set of relocations on a given symbol; an
 // instance of this type is returned by the Loader Relocs() method.
@@ -988,7 +988,7 @@
 	return l.attrExternal.Has(l.extIndex(i))
 }
 
-// SetAttrExternal sets the "external" property for an host object
+// SetAttrExternal sets the "external" property for a host object
 // symbol (see AttrExternal).
 func (l *Loader) SetAttrExternal(i Sym, v bool) {
 	if !l.IsExternal(i) {
@@ -1316,14 +1316,6 @@
 	l.symSects[i] = sect.Index
 }
 
-// growSects grows the slice used to store symbol sections.
-func (l *Loader) growSects(reqLen int) {
-	curLen := len(l.symSects)
-	if reqLen > curLen {
-		l.symSects = append(l.symSects, make([]uint16, reqLen+1-curLen)...)
-	}
-}
-
 // NewSection creates a new (output) section.
 func (l *Loader) NewSection() *sym.Section {
 	sect := new(sym.Section)
diff --git a/src/cmd/link/internal/loader/symbolbuilder.go b/src/cmd/link/internal/loader/symbolbuilder.go
index 5a3e88b..b9eaca7 100644
--- a/src/cmd/link/internal/loader/symbolbuilder.go
+++ b/src/cmd/link/internal/loader/symbolbuilder.go
@@ -176,10 +176,6 @@
 	sb.l.SetAttrReachable(sb.symIdx, v)
 }
 
-func (sb *SymbolBuilder) setReachable() {
-	sb.SetReachable(true)
-}
-
 func (sb *SymbolBuilder) ReadOnly() bool {
 	return sb.l.AttrReadOnly(sb.symIdx)
 }
diff --git a/src/cmd/link/internal/loadpe/ldpe.go b/src/cmd/link/internal/loadpe/ldpe.go
index 81c2841..1ba6deb 100644
--- a/src/cmd/link/internal/loadpe/ldpe.go
+++ b/src/cmd/link/internal/loadpe/ldpe.go
@@ -182,9 +182,6 @@
 	// Text and non-text sections read in by the host object loader.
 	secSyms []loader.Sym
 
-	// SDYNIMPORT symbols encountered along the way
-	dynimports map[loader.Sym]struct{}
-
 	// Loader and arch, for use in postprocessing.
 	l    *loader.Loader
 	arch *sys.Arch
@@ -197,9 +194,8 @@
 		return
 	}
 	importSymsState = &peImportSymsState{
-		dynimports: make(map[loader.Sym]struct{}),
-		l:          l,
-		arch:       arch,
+		l:    l,
+		arch: arch,
 	}
 }
 
@@ -219,14 +215,19 @@
 // comdatDefinitions records the names of symbols for which we've
 // previously seen a definition in COMDAT. Key is symbol name, value
 // is symbol size (or -1 if we're using the "any" strategy).
-var comdatDefinitions = make(map[string]int64)
+var comdatDefinitions map[string]int64
+
+// Symbols contains the symbols that can be loaded from a PE file.
+type Symbols struct {
+	Textp     []loader.Sym // text symbols
+	Resources []loader.Sym // .rsrc section or set of .rsrc$xx sections
+	PData     loader.Sym
+	XData     loader.Sym
+}
 
 // Load loads the PE file pn from input.
-// Symbols from the object file are created via the loader 'l',
-// and a slice of the text symbols is returned.
-// If an .rsrc section or set of .rsrc$xx sections is found, its symbols are
-// returned as rsrc.
-func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Reader, pkg string, length int64, pn string) (textp []loader.Sym, rsrc []loader.Sym, err error) {
+// Symbols from the object file are created via the loader 'l'.
+func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Reader, pkg string, length int64, pn string) (*Symbols, error) {
 	state := &peLoaderState{
 		l:               l,
 		arch:            arch,
@@ -236,6 +237,9 @@
 		pn:              pn,
 	}
 	createImportSymsState(state.l, state.arch)
+	if comdatDefinitions == nil {
+		comdatDefinitions = make(map[string]int64)
+	}
 
 	// Some input files are archives containing multiple of
 	// object files, and pe.NewFile seeks to the start of
@@ -246,11 +250,13 @@
 	// TODO: replace pe.NewFile with pe.Load (grep for "add Load function" in debug/pe for details)
 	f, err := pe.NewFile(sr)
 	if err != nil {
-		return nil, nil, err
+		return nil, err
 	}
 	defer f.Close()
 	state.f = f
 
+	var ls Symbols
+
 	// TODO return error if found .cormeta
 
 	// create symbols for mapped sections
@@ -271,7 +277,12 @@
 
 		switch sect.Characteristics & (pe.IMAGE_SCN_CNT_UNINITIALIZED_DATA | pe.IMAGE_SCN_CNT_INITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ | pe.IMAGE_SCN_MEM_WRITE | pe.IMAGE_SCN_CNT_CODE | pe.IMAGE_SCN_MEM_EXECUTE) {
 		case pe.IMAGE_SCN_CNT_INITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ: //.rdata
-			bld.SetType(sym.SRODATA)
+			if issehsect(arch, sect) {
+				bld.SetType(sym.SSEHSECT)
+				bld.SetAlign(4)
+			} else {
+				bld.SetType(sym.SRODATA)
+			}
 
 		case pe.IMAGE_SCN_CNT_UNINITIALIZED_DATA | pe.IMAGE_SCN_MEM_READ | pe.IMAGE_SCN_MEM_WRITE: //.bss
 			bld.SetType(sym.SNOPTRBSS)
@@ -283,13 +294,13 @@
 			bld.SetType(sym.STEXT)
 
 		default:
-			return nil, nil, fmt.Errorf("unexpected flags %#06x for PE section %s", sect.Characteristics, sect.Name)
+			return nil, fmt.Errorf("unexpected flags %#06x for PE section %s", sect.Characteristics, sect.Name)
 		}
 
 		if bld.Type() != sym.SNOPTRBSS {
 			data, err := sect.Data()
 			if err != nil {
-				return nil, nil, err
+				return nil, err
 			}
 			state.sectdata[sect] = data
 			bld.SetData(data)
@@ -297,13 +308,19 @@
 		bld.SetSize(int64(sect.Size))
 		state.sectsyms[sect] = s
 		if sect.Name == ".rsrc" || strings.HasPrefix(sect.Name, ".rsrc$") {
-			rsrc = append(rsrc, s)
+			ls.Resources = append(ls.Resources, s)
+		} else if bld.Type() == sym.SSEHSECT {
+			if sect.Name == ".pdata" {
+				ls.PData = s
+			} else if sect.Name == ".xdata" {
+				ls.XData = s
+			}
 		}
 	}
 
 	// Make a prepass over the symbols to collect info about COMDAT symbols.
 	if err := state.preprocessSymbols(); err != nil {
-		return nil, nil, err
+		return nil, err
 	}
 
 	// load relocations
@@ -324,22 +341,23 @@
 		}
 
 		splitResources := strings.HasPrefix(rsect.Name, ".rsrc$")
+		issehsect := issehsect(arch, rsect)
 		sb := l.MakeSymbolUpdater(state.sectsyms[rsect])
 		for j, r := range rsect.Relocs {
 			if int(r.SymbolTableIndex) >= len(f.COFFSymbols) {
-				return nil, nil, fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols))
+				return nil, fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols))
 			}
 			pesym := &f.COFFSymbols[r.SymbolTableIndex]
 			_, gosym, err := state.readpesym(pesym)
 			if err != nil {
-				return nil, nil, err
+				return nil, err
 			}
 			if gosym == 0 {
 				name, err := pesym.FullName(f.StringTable)
 				if err != nil {
 					name = string(pesym.Name[:])
 				}
-				return nil, nil, fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type)
+				return nil, fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type)
 			}
 
 			rSym := gosym
@@ -349,21 +367,29 @@
 			var rType objabi.RelocType
 			switch arch.Family {
 			default:
-				return nil, nil, fmt.Errorf("%s: unsupported arch %v", pn, arch.Family)
+				return nil, fmt.Errorf("%s: unsupported arch %v", pn, arch.Family)
 			case sys.I386, sys.AMD64:
 				switch r.Type {
 				default:
-					return nil, nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, state.sectsyms[rsect], r.Type)
+					return nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, state.sectsyms[rsect], r.Type)
 
 				case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32,
 					IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32
 					IMAGE_REL_AMD64_ADDR32NB:
-					rType = objabi.R_PCREL
+					if r.Type == IMAGE_REL_AMD64_ADDR32NB {
+						rType = objabi.R_PEIMAGEOFF
+					} else {
+						rType = objabi.R_PCREL
+					}
 
 					rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
 
 				case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32:
-					rType = objabi.R_ADDR
+					if r.Type == IMAGE_REL_I386_DIR32NB {
+						rType = objabi.R_PEIMAGEOFF
+					} else {
+						rType = objabi.R_ADDR
+					}
 
 					// load addend from image
 					rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
@@ -380,7 +406,7 @@
 			case sys.ARM:
 				switch r.Type {
 				default:
-					return nil, nil, fmt.Errorf("%s: %v: unknown ARM relocation type %v", pn, state.sectsyms[rsect], r.Type)
+					return nil, fmt.Errorf("%s: %v: unknown ARM relocation type %v", pn, state.sectsyms[rsect], r.Type)
 
 				case IMAGE_REL_ARM_SECREL:
 					rType = objabi.R_PCREL
@@ -388,7 +414,11 @@
 					rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
 
 				case IMAGE_REL_ARM_ADDR32, IMAGE_REL_ARM_ADDR32NB:
-					rType = objabi.R_ADDR
+					if r.Type == IMAGE_REL_ARM_ADDR32NB {
+						rType = objabi.R_PEIMAGEOFF
+					} else {
+						rType = objabi.R_ADDR
+					}
 
 					rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
 
@@ -401,10 +431,14 @@
 			case sys.ARM64:
 				switch r.Type {
 				default:
-					return nil, nil, fmt.Errorf("%s: %v: unknown ARM64 relocation type %v", pn, state.sectsyms[rsect], r.Type)
+					return nil, fmt.Errorf("%s: %v: unknown ARM64 relocation type %v", pn, state.sectsyms[rsect], r.Type)
 
 				case IMAGE_REL_ARM64_ADDR32, IMAGE_REL_ARM64_ADDR32NB:
-					rType = objabi.R_ADDR
+					if r.Type == IMAGE_REL_ARM64_ADDR32NB {
+						rType = objabi.R_PEIMAGEOFF
+					} else {
+						rType = objabi.R_ADDR
+					}
 
 					rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
 				}
@@ -417,12 +451,20 @@
 			if issect(pesym) || splitResources {
 				rAdd += int64(pesym.Value)
 			}
+			if issehsect {
+				// .pdata and .xdata sections can contain records
+				// associated to functions that won't be used in
+				// the final binary, in which case the relocation
+				// target symbol won't be reachable.
+				rType |= objabi.R_WEAK
+			}
 
 			rel, _ := sb.AddRel(rType)
 			rel.SetOff(rOff)
 			rel.SetSiz(rSize)
 			rel.SetSym(rSym)
 			rel.SetAdd(rAdd)
+
 		}
 
 		sb.SortRelocs()
@@ -436,7 +478,7 @@
 
 		name, err := pesym.FullName(f.StringTable)
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 		if name == "" {
 			continue
@@ -451,17 +493,10 @@
 			continue
 		}
 		if pesym.SectionNumber == IMAGE_SYM_ABSOLUTE && bytes.Equal(pesym.Name[:], []byte("@feat.00")) {
-			// Microsoft's linker looks at whether all input objects have an empty
-			// section called @feat.00. If all of them do, then it enables SEH;
-			// otherwise it doesn't enable that feature. So, since around the Windows
-			// XP SP2 era, most tools that make PE objects just tack on that section,
-			// so that it won't gimp Microsoft's linker logic. Go doesn't support SEH,
-			// so in theory, none of this really matters to us. But actually, if the
-			// linker tries to ingest an object with @feat.00 -- which are produced by
-			// LLVM's resource compiler, for example -- it chokes because of the
-			// IMAGE_SYM_ABSOLUTE section that it doesn't know how to deal with. Since
-			// @feat.00 is just a marking anyway, skip IMAGE_SYM_ABSOLUTE sections that
-			// are called @feat.00.
+			// The PE documentation says that, on x86 platforms, the absolute symbol named @feat.00
+			// is used to indicate that the COFF object supports SEH.
+			// Go doesn't support SEH on windows/386, so we can ignore this symbol.
+			// See https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#the-sxdata-section
 			continue
 		}
 		var sect *pe.Section
@@ -474,7 +509,7 @@
 
 		bld, s, err := state.readpesym(pesym)
 		if err != nil {
-			return nil, nil, err
+			return nil, err
 		}
 
 		if pesym.SectionNumber == 0 { // extern
@@ -488,14 +523,14 @@
 		} else if pesym.SectionNumber > 0 && int(pesym.SectionNumber) <= len(f.Sections) {
 			sect = f.Sections[pesym.SectionNumber-1]
 			if _, found := state.sectsyms[sect]; !found {
-				return nil, nil, fmt.Errorf("%s: %v: missing sect.sym", pn, s)
+				return nil, fmt.Errorf("%s: %v: missing sect.sym", pn, s)
 			}
 		} else {
-			return nil, nil, fmt.Errorf("%s: %v: sectnum < 0!", pn, s)
+			return nil, fmt.Errorf("%s: %v: sectnum < 0!", pn, s)
 		}
 
 		if sect == nil {
-			return nil, nil, nil
+			return nil, nil
 		}
 
 		// Check for COMDAT symbol.
@@ -514,7 +549,7 @@
 			}
 			outerName := l.SymName(l.OuterSym(s))
 			sectName := l.SymName(state.sectsyms[sect])
-			return nil, nil, fmt.Errorf("%s: duplicate symbol reference: %s in both %s and %s", pn, l.SymName(s), outerName, sectName)
+			return nil, fmt.Errorf("%s: duplicate symbol reference: %s in both %s and %s", pn, l.SymName(s), outerName, sectName)
 		}
 
 		bld = makeUpdater(l, bld, s)
@@ -525,7 +560,7 @@
 		bld.SetSize(4)
 		if l.SymType(sectsym) == sym.STEXT {
 			if bld.External() && !bld.DuplicateOK() {
-				return nil, nil, fmt.Errorf("%s: duplicate symbol definition", l.SymName(s))
+				return nil, fmt.Errorf("%s: duplicate symbol definition", l.SymName(s))
 			}
 			bld.SetExternal(true)
 		}
@@ -533,7 +568,7 @@
 			// This is a COMDAT definition. Record that we're picking
 			// this instance so that we can ignore future defs.
 			if _, ok := comdatDefinitions[l.SymName(s)]; ok {
-				return nil, nil, fmt.Errorf("internal error: preexisting COMDAT definition for %q", name)
+				return nil, fmt.Errorf("internal error: preexisting COMDAT definition for %q", name)
 			}
 			comdatDefinitions[l.SymName(s)] = sz
 		}
@@ -551,15 +586,19 @@
 		if l.SymType(s) == sym.STEXT {
 			for ; s != 0; s = l.SubSym(s) {
 				if l.AttrOnList(s) {
-					return nil, nil, fmt.Errorf("symbol %s listed multiple times", l.SymName(s))
+					return nil, fmt.Errorf("symbol %s listed multiple times", l.SymName(s))
 				}
 				l.SetAttrOnList(s, true)
-				textp = append(textp, s)
+				ls.Textp = append(ls.Textp, s)
 			}
 		}
 	}
 
-	return textp, rsrc, nil
+	if ls.PData != 0 {
+		processSEH(l, arch, ls.PData, ls.XData)
+	}
+
+	return &ls, nil
 }
 
 // PostProcessImports works to resolve inconsistencies with DLL import
@@ -640,6 +679,10 @@
 	return nil
 }
 
+func issehsect(arch *sys.Arch, s *pe.Section) bool {
+	return arch.Family == sys.AMD64 && (s.Name == ".pdata" || s.Name == ".xdata")
+}
+
 func issect(s *pe.COFFSymbol) bool {
 	return s.StorageClass == IMAGE_SYM_CLASS_STATIC && s.Type == 0 && s.Name[0] == '.'
 }
diff --git a/src/cmd/link/internal/loadpe/seh.go b/src/cmd/link/internal/loadpe/seh.go
new file mode 100644
index 0000000..545958f
--- /dev/null
+++ b/src/cmd/link/internal/loadpe/seh.go
@@ -0,0 +1,111 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loadpe
+
+import (
+	"cmd/internal/objabi"
+	"cmd/internal/sys"
+	"cmd/link/internal/loader"
+	"cmd/link/internal/sym"
+	"fmt"
+	"sort"
+)
+
+const (
+	UNW_FLAG_EHANDLER  = 1 << 3
+	UNW_FLAG_UHANDLER  = 2 << 3
+	UNW_FLAG_CHAININFO = 4 << 3
+	unwStaticDataSize  = 4 // Bytes of unwind data before the variable length part.
+	unwCodeSize        = 2 // Bytes per unwind code.
+)
+
+// processSEH walks all pdata relocations looking for exception handler function symbols.
+// We want to mark these as reachable if the function that they protect is reachable
+// in the final binary.
+func processSEH(ldr *loader.Loader, arch *sys.Arch, pdata sym.LoaderSym, xdata sym.LoaderSym) error {
+	switch arch.Family {
+	case sys.AMD64:
+		ldr.SetAttrReachable(pdata, true)
+		if xdata != 0 {
+			ldr.SetAttrReachable(xdata, true)
+		}
+		return processSEHAMD64(ldr, pdata)
+	default:
+		// TODO: support SEH on other architectures.
+		return fmt.Errorf("unsupported architecture for SEH: %v", arch.Family)
+	}
+}
+
+func processSEHAMD64(ldr *loader.Loader, pdata sym.LoaderSym) error {
+	// The following loop traverses a list of pdata entries,
+	// each entry being 3 relocations long. The first relocation
+	// is a pointer to the function symbol to which the pdata entry
+	// corresponds. The third relocation is a pointer to the
+	// corresponding .xdata entry.
+	// Reference:
+	// https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-runtime_function
+	rels := ldr.Relocs(pdata)
+	if rels.Count()%3 != 0 {
+		return fmt.Errorf(".pdata symbol %q has invalid relocation count", ldr.SymName(pdata))
+	}
+	for i := 0; i < rels.Count(); i += 3 {
+		xrel := rels.At(i + 2)
+		handler := findHandlerInXDataAMD64(ldr, xrel.Sym(), xrel.Add())
+		if handler != 0 {
+			sb := ldr.MakeSymbolUpdater(rels.At(i).Sym())
+			r, _ := sb.AddRel(objabi.R_KEEP)
+			r.SetSym(handler)
+		}
+	}
+	return nil
+}
+
+// findHandlerInXDataAMD64 finds the symbol in the .xdata section that
+// corresponds to the exception handler.
+// Reference:
+// https://learn.microsoft.com/en-us/cpp/build/exception-handling-x64#struct-unwind_info
+func findHandlerInXDataAMD64(ldr *loader.Loader, xsym sym.LoaderSym, add int64) loader.Sym {
+	data := ldr.Data(xsym)
+	if add < 0 || add+unwStaticDataSize > int64(len(data)) {
+		return 0
+	}
+	data = data[add:]
+	var isChained bool
+	switch flag := data[0]; {
+	case flag&UNW_FLAG_EHANDLER != 0 || flag&UNW_FLAG_UHANDLER != 0:
+		// Exception handler.
+	case flag&UNW_FLAG_CHAININFO != 0:
+		isChained = true
+	default:
+		// Nothing to do.
+		return 0
+	}
+	codes := data[2]
+	if codes%2 != 0 {
+		// There are always an even number of unwind codes, even if the last one is unused.
+		codes += 1
+	}
+	// The exception handler relocation is the first relocation after the unwind codes,
+	// unless it is chained, but we will handle this case later.
+	targetOff := add + unwStaticDataSize + unwCodeSize*int64(codes)
+	xrels := ldr.Relocs(xsym)
+	xrelsCount := xrels.Count()
+	idx := sort.Search(xrelsCount, func(i int) bool {
+		return int64(xrels.At(i).Off()) >= targetOff
+	})
+	if idx == xrelsCount {
+		return 0
+	}
+	if isChained {
+		// The third relocations references the next .xdata entry in the chain, recurse.
+		idx += 2
+		if idx >= xrelsCount {
+			return 0
+		}
+		r := xrels.At(idx)
+		return findHandlerInXDataAMD64(ldr, r.Sym(), r.Add())
+	}
+	return xrels.At(idx).Sym()
+}
diff --git a/src/cmd/link/internal/loadxcoff/ldxcoff.go b/src/cmd/link/internal/loadxcoff/ldxcoff.go
index 920e1c8..29d1625 100644
--- a/src/cmd/link/internal/loadxcoff/ldxcoff.go
+++ b/src/cmd/link/internal/loadxcoff/ldxcoff.go
@@ -155,7 +155,6 @@
 		}
 	}
 	return textp, nil
-
 }
 
 // Convert symbol xcoff type to sym.SymKind
diff --git a/src/cmd/link/internal/loong64/asm.go b/src/cmd/link/internal/loong64/asm.go
index 8f06068..6607e5d 100644
--- a/src/cmd/link/internal/loong64/asm.go
+++ b/src/cmd/link/internal/loong64/asm.go
@@ -14,7 +14,47 @@
 	"log"
 )
 
-func gentext(ctxt *ld.Link, ldr *loader.Loader) {}
+func gentext(ctxt *ld.Link, ldr *loader.Loader) {
+	initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt)
+	if initfunc == nil {
+		return
+	}
+
+	o := func(op uint32) {
+		initfunc.AddUint32(ctxt.Arch, op)
+	}
+
+	// Emit the following function:
+	//
+	//	local.dso_init:
+	//		la.pcrel $a0, local.moduledata
+	//		b runtime.addmoduledata
+
+	//	0000000000000000 <local.dso_init>:
+	//	0:	1a000004	pcalau12i	$a0, 0
+	//				0: R_LARCH_PCALA_HI20	local.moduledata
+	o(0x1a000004)
+	rel, _ := initfunc.AddRel(objabi.R_ADDRLOONG64U)
+	rel.SetOff(0)
+	rel.SetSiz(4)
+	rel.SetSym(ctxt.Moduledata)
+
+	//	4:	02c00084	addi.d	$a0, $a0, 0
+	//				4: R_LARCH_PCALA_LO12	local.moduledata
+	o(0x02c00084)
+	rel2, _ := initfunc.AddRel(objabi.R_ADDRLOONG64)
+	rel2.SetOff(4)
+	rel2.SetSiz(4)
+	rel2.SetSym(ctxt.Moduledata)
+
+	//	8:	50000000	b	0
+	//				8: R_LARCH_B26	runtime.addmoduledata
+	o(0x50000000)
+	rel3, _ := initfunc.AddRel(objabi.R_CALLLOONG64)
+	rel3.SetOff(8)
+	rel3.SetSiz(4)
+	rel3.SetSym(addmoduledata)
+}
 
 func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool {
 	log.Fatalf("adddynrel not implemented")
@@ -78,12 +118,22 @@
 		out.Write64(uint64(sectoff))
 		out.Write64(uint64(elf.R_LARCH_PCALA_HI20) | uint64(elfsym)<<32)
 		out.Write64(uint64(r.Xadd))
+
+	case objabi.R_LOONG64_GOT_HI:
+		out.Write64(uint64(sectoff))
+		out.Write64(uint64(elf.R_LARCH_GOT_PC_HI20) | uint64(elfsym)<<32)
+		out.Write64(uint64(0x0))
+
+	case objabi.R_LOONG64_GOT_LO:
+		out.Write64(uint64(sectoff))
+		out.Write64(uint64(elf.R_LARCH_GOT_PC_LO12) | uint64(elfsym)<<32)
+		out.Write64(uint64(0x0))
 	}
 
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
 	return
 }
 
@@ -111,7 +161,9 @@
 			objabi.R_CALLLOONG64,
 			objabi.R_JMPLOONG64,
 			objabi.R_LOONG64_TLS_IE_PCREL_HI,
-			objabi.R_LOONG64_TLS_IE_LO:
+			objabi.R_LOONG64_TLS_IE_LO,
+			objabi.R_LOONG64_GOT_HI,
+			objabi.R_LOONG64_GOT_LO:
 			return val, 1, true
 		}
 	}
@@ -156,7 +208,10 @@
 func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) {
 	switch r.Type() {
 	case objabi.R_ADDRLOONG64,
-		objabi.R_ADDRLOONG64U:
+		objabi.R_ADDRLOONG64U,
+		objabi.R_LOONG64_GOT_HI,
+		objabi.R_LOONG64_GOT_LO:
+
 		return ld.ExtrelocViaOuterSym(ldr, r, s), true
 
 	case objabi.R_ADDRLOONG64TLS,
diff --git a/src/cmd/link/internal/loong64/obj.go b/src/cmd/link/internal/loong64/obj.go
index c3f6ed9..b68ed49 100644
--- a/src/cmd/link/internal/loong64/obj.go
+++ b/src/cmd/link/internal/loong64/obj.go
@@ -29,7 +29,7 @@
 		Gentext:          gentext,
 
 		ELF: ld.ELFArch{
-			Linuxdynld:     "/lib64/ld.so.1",
+			Linuxdynld:     "/lib64/ld-linux-loongarch-lp64d.so.1",
 			LinuxdynldMusl: "/lib64/ld-musl-loongarch.so.1",
 			Freebsddynld:   "XXX",
 			Openbsddynld:   "XXX",
@@ -53,11 +53,11 @@
 	case objabi.Hlinux: /* loong64 elf */
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 	}
 }
diff --git a/src/cmd/link/internal/mips/asm.go b/src/cmd/link/internal/mips/asm.go
index 5891d35..5d7e5c7 100644
--- a/src/cmd/link/internal/mips/asm.go
+++ b/src/cmd/link/internal/mips/asm.go
@@ -68,7 +68,7 @@
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
 	return
 }
 
diff --git a/src/cmd/link/internal/mips/obj.go b/src/cmd/link/internal/mips/obj.go
index 1caddac..c76e267 100644
--- a/src/cmd/link/internal/mips/obj.go
+++ b/src/cmd/link/internal/mips/obj.go
@@ -91,17 +91,16 @@
 	case objabi.Hlinux: /* mips elf */
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 	}
 }
 
 func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool {
 	ld.Exitf("adddynrel currently unimplemented for MIPS")
 	return false
-
 }
diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go
index bd0e019..e82d986 100644
--- a/src/cmd/link/internal/mips64/asm.go
+++ b/src/cmd/link/internal/mips64/asm.go
@@ -184,7 +184,7 @@
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() != 0 {
 		return
 	}
diff --git a/src/cmd/link/internal/mips64/obj.go b/src/cmd/link/internal/mips64/obj.go
index 7fb19e9..193ad1f 100644
--- a/src/cmd/link/internal/mips64/obj.go
+++ b/src/cmd/link/internal/mips64/obj.go
@@ -88,24 +88,23 @@
 
 	case objabi.Hplan9: /* plan 9 */
 		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 16*1024 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 16 * 1024
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(16*1024, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hlinux, /* mips64 elf */
 		objabi.Hopenbsd:
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 	}
 
 	dynSymCount = 0
diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go
index d537bc5..09647d8 100644
--- a/src/cmd/link/internal/ppc64/asm.go
+++ b/src/cmd/link/internal/ppc64/asm.go
@@ -270,9 +270,8 @@
 	for _, s := range ctxt.Textp {
 		relocs := ldr.Relocs(s)
 		for i := 0; i < relocs.Count(); i++ {
-			r := relocs.At(i)
-			switch r.Type() {
-			case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24):
+			switch r := relocs.At(i); r.Type() {
+			case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24), objabi.R_CALLPOWER:
 				switch ldr.SymType(r.Sym()) {
 				case sym.SDYNIMPORT:
 					// This call goes through the PLT, generate and call through a PLT stub.
@@ -633,7 +632,7 @@
 		su.SetRelocAdd(rIdx, r.Add()+localEoffset)
 
 		if targType == sym.SDYNIMPORT {
-			// Should have been handled in elfsetupplt
+			// Should have been handled in genstubs
 			ldr.Errorf(s, "unexpected R_PPC64_REL24 for dyn import")
 		}
 
@@ -918,7 +917,6 @@
 		emitReloc(ld.XCOFF_R_REF|0x3F<<8, 0)
 	}
 	return true
-
 }
 
 func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool {
@@ -1015,7 +1013,7 @@
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() == 0 {
 		// The dynamic linker stores the address of the
 		// dynamic resolver and the DSO identifier in the two
@@ -1575,7 +1573,6 @@
 			var o1 uint32
 			if target.IsBigEndian() {
 				o1 = binary.BigEndian.Uint32(p[r.Off()-2:])
-
 			} else {
 				o1 = binary.LittleEndian.Uint32(p[r.Off():])
 			}
diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go
index 3fc6622..703c8ec 100644
--- a/src/cmd/link/internal/ppc64/obj.go
+++ b/src/cmd/link/internal/ppc64/obj.go
@@ -71,7 +71,7 @@
 			LinuxdynldMusl: musl,
 
 			Freebsddynld:   "XXX",
-			Openbsddynld:   "XXX",
+			Openbsddynld:   "/usr/libexec/ld.so",
 			Netbsddynld:    "XXX",
 			Dragonflydynld: "XXX",
 			Solarisdynld:   "XXX",
@@ -92,23 +92,23 @@
 
 	case objabi.Hplan9: /* plan 9 */
 		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4128
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 4096
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
-	case objabi.Hlinux: /* ppc64 elf */
+	case objabi.Hlinux, /* ppc64 elf */
+		objabi.Hopenbsd:
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Haix:
 		ld.Xcoffinit(ctxt)
diff --git a/src/cmd/link/internal/riscv64/asm.go b/src/cmd/link/internal/riscv64/asm.go
index 6b5c0cb..d95de6c 100644
--- a/src/cmd/link/internal/riscv64/asm.go
+++ b/src/cmd/link/internal/riscv64/asm.go
@@ -20,7 +20,139 @@
 // fakeLabelName matches the RISCV_FAKE_LABEL_NAME from binutils.
 const fakeLabelName = ".L0 "
 
-func gentext(ctxt *ld.Link, ldr *loader.Loader) {
+func gentext(ctxt *ld.Link, ldr *loader.Loader) {}
+
+func findHI20Reloc(ldr *loader.Loader, s loader.Sym, val int64) *loader.Reloc {
+	outer := ldr.OuterSym(s)
+	if outer == 0 {
+		return nil
+	}
+	relocs := ldr.Relocs(outer)
+	start := sort.Search(relocs.Count(), func(i int) bool { return ldr.SymValue(outer)+int64(relocs.At(i).Off()) >= val })
+	for idx := start; idx < relocs.Count(); idx++ {
+		r := relocs.At(idx)
+		if ldr.SymValue(outer)+int64(r.Off()) != val {
+			break
+		}
+		if r.Type() == objabi.R_RISCV_GOT_HI20 || r.Type() == objabi.R_RISCV_PCREL_HI20 {
+			return &r
+		}
+	}
+	return nil
+}
+
+func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool {
+	targ := r.Sym()
+
+	var targType sym.SymKind
+	if targ != 0 {
+		targType = ldr.SymType(targ)
+	}
+
+	switch r.Type() {
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_CALL),
+		objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_CALL_PLT):
+
+		if targType == sym.SDYNIMPORT {
+			addpltsym(target, ldr, syms, targ)
+			su := ldr.MakeSymbolUpdater(s)
+			su.SetRelocSym(rIdx, syms.PLT)
+			su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ)))
+		}
+		if targType == 0 || targType == sym.SXREF {
+			ldr.Errorf(s, "unknown symbol %s in RISCV call", ldr.SymName(targ))
+		}
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_CALL)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_GOT_HI20):
+		if targType != sym.SDYNIMPORT {
+			// TODO(jsing): Could convert to non-GOT reference.
+		}
+
+		ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_RISCV_64))
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_GOT_HI20)
+		su.SetRelocSym(rIdx, syms.GOT)
+		su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ)))
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_PCREL_HI20):
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_PCREL_HI20)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_PCREL_LO12_I):
+		if r.Add() != 0 {
+			ldr.Errorf(s, "R_RISCV_PCREL_LO12_I with non-zero addend")
+		}
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_PCREL_LO12_I)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_PCREL_LO12_S):
+		if r.Add() != 0 {
+			ldr.Errorf(s, "R_RISCV_PCREL_LO12_S with non-zero addend")
+		}
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_PCREL_LO12_S)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_RVC_BRANCH):
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_RVC_BRANCH)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_RVC_JUMP):
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_RVC_JUMP)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_BRANCH):
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocType(rIdx, objabi.R_RISCV_BRANCH)
+		return true
+
+	case objabi.ElfRelocOffset + objabi.RelocType(elf.R_RISCV_RELAX):
+		// Ignore relaxations, at least for now.
+		return true
+
+	default:
+		if r.Type() >= objabi.ElfRelocOffset {
+			ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type()))
+			return false
+		}
+	}
+
+	// Reread the reloc to incorporate any changes in type above.
+	relocs := ldr.Relocs(s)
+	r = relocs.At(rIdx)
+
+	switch r.Type() {
+	case objabi.R_RISCV_CALL:
+		if targType != sym.SDYNIMPORT {
+			// nothing to do, the relocation will be laid out in reloc
+			return true
+		}
+		if target.IsExternal() {
+			// External linker will do this relocation.
+			return true
+		}
+		// Internal linking.
+		if r.Add() != 0 {
+			ldr.Errorf(s, "PLT reference with non-zero addend (%v)", r.Add())
+		}
+		// Build a PLT entry and change the relocation target to that entry.
+		addpltsym(target, ldr, syms, targ)
+		su := ldr.MakeSymbolUpdater(s)
+		su.SetRelocSym(rIdx, syms.PLT)
+		su.SetRelocAdd(rIdx, int64(ldr.SymPlt(targ)))
+
+		return true
+	}
+
+	return false
 }
 
 func genSymsLate(ctxt *ld.Link, ldr *loader.Loader) {
@@ -39,7 +171,7 @@
 		for ri := 0; ri < relocs.Count(); ri++ {
 			r := relocs.At(ri)
 			if r.Type() != objabi.R_RISCV_PCREL_ITYPE && r.Type() != objabi.R_RISCV_PCREL_STYPE &&
-				r.Type() != objabi.R_RISCV_TLS_IE_ITYPE && r.Type() != objabi.R_RISCV_TLS_IE_STYPE {
+				r.Type() != objabi.R_RISCV_TLS_IE {
 				continue
 			}
 			if r.Off() == 0 && ldr.SymType(s) == sym.STEXT {
@@ -96,12 +228,12 @@
 		}
 		out.Write64(uint64(r.Xadd))
 
-	case objabi.R_RISCV_CALL, objabi.R_RISCV_CALL_TRAMP:
+	case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP:
 		out.Write64(uint64(sectoff))
 		out.Write64(uint64(elf.R_RISCV_JAL) | uint64(elfsym)<<32)
 		out.Write64(uint64(r.Xadd))
 
-	case objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE_ITYPE, objabi.R_RISCV_TLS_IE_STYPE:
+	case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE:
 		// Find the text symbol for the AUIPC instruction targeted
 		// by this relocation.
 		relocs := ldr.Relocs(s)
@@ -117,20 +249,19 @@
 		// corresponding R_RISCV_PCREL_LO12_I or R_RISCV_PCREL_LO12_S relocation.
 		// Note that the LO12 relocation must point to a target that has a valid
 		// HI20 PC-relative relocation text symbol, which in turn points to the
-		// given symbol. For further details see the ELF specification for RISC-V:
+		// given symbol. For further details see section 8.4.9 of the RISC-V ABIs
+		// Specification:
 		//
-		//   https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-elf.adoc#pc-relative-symbol-addresses
+		//  https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/v1.0/riscv-abi.pdf
 		//
 		var hiRel, loRel elf.R_RISCV
 		switch r.Type {
-		case objabi.R_RISCV_PCREL_ITYPE:
+		case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE:
 			hiRel, loRel = elf.R_RISCV_PCREL_HI20, elf.R_RISCV_PCREL_LO12_I
 		case objabi.R_RISCV_PCREL_STYPE:
 			hiRel, loRel = elf.R_RISCV_PCREL_HI20, elf.R_RISCV_PCREL_LO12_S
-		case objabi.R_RISCV_TLS_IE_ITYPE:
+		case objabi.R_RISCV_TLS_IE:
 			hiRel, loRel = elf.R_RISCV_TLS_GOT_HI20, elf.R_RISCV_PCREL_LO12_I
-		case objabi.R_RISCV_TLS_IE_STYPE:
-			hiRel, loRel = elf.R_RISCV_TLS_GOT_HI20, elf.R_RISCV_PCREL_LO12_S
 		}
 		out.Write64(uint64(sectoff))
 		out.Write64(uint64(hiRel) | uint64(elfsym)<<32)
@@ -139,6 +270,14 @@
 		out.Write64(uint64(loRel) | uint64(hi20ElfSym)<<32)
 		out.Write64(uint64(0))
 
+	case objabi.R_RISCV_TLS_LE:
+		out.Write64(uint64(sectoff))
+		out.Write64(uint64(elf.R_RISCV_TPREL_HI20) | uint64(elfsym)<<32)
+		out.Write64(uint64(r.Xadd))
+		out.Write64(uint64(sectoff + 4))
+		out.Write64(uint64(elf.R_RISCV_TPREL_LO12_I) | uint64(elfsym)<<32)
+		out.Write64(uint64(r.Xadd))
+
 	default:
 		return false
 	}
@@ -146,8 +285,106 @@
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
-	log.Fatalf("elfsetupplt")
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) {
+	if plt.Size() != 0 {
+		return
+	}
+	if gotplt.Size() != 0 {
+		ctxt.Errorf(gotplt.Sym(), "got.plt is not empty")
+	}
+
+	// See section 8.4.6 of the RISC-V ABIs Specification:
+	//
+	//  https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/v1.0/riscv-abi.pdf
+	//
+	// 1:   auipc  t2, %pcrel_hi(.got.plt)
+	//      sub    t1, t1, t3               # shifted .got.plt offset + hdr size + 12
+	//      l[w|d] t3, %pcrel_lo(1b)(t2)    # _dl_runtime_resolve
+	//      addi   t1, t1, -(hdr size + 12) # shifted .got.plt offset
+	//      addi   t0, t2, %pcrel_lo(1b)    # &.got.plt
+	//      srli   t1, t1, log2(16/PTRSIZE) # .got.plt offset
+	//      l[w|d] t0, PTRSIZE(t0)          # link map
+	//      jr     t3
+
+	plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 0, objabi.R_RISCV_PCREL_HI20, 4)
+	plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x00000397) // auipc   t2,0x0
+
+	sb := ldr.MakeSymbolBuilder(fakeLabelName)
+	sb.SetType(sym.STEXT)
+	sb.SetValue(ldr.SymValue(plt.Sym()) + plt.Size() - 4)
+	sb.SetLocal(true)
+	sb.SetReachable(true)
+	sb.SetVisibilityHidden(true)
+	plt.AddInteriorSym(sb.Sym())
+
+	plt.AddUint32(ctxt.Arch, 0x41c30333) // sub     t1,t1,t3
+
+	plt.AddSymRef(ctxt.Arch, sb.Sym(), 0, objabi.R_RISCV_PCREL_LO12_I, 4)
+	plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x0003be03) // ld      t3,0(t2)
+
+	plt.AddUint32(ctxt.Arch, 0xfd430313) // addi    t1,t1,-44
+
+	plt.AddSymRef(ctxt.Arch, sb.Sym(), 0, objabi.R_RISCV_PCREL_LO12_I, 4)
+	plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x00038293) // addi    t0,t2,0
+
+	plt.AddUint32(ctxt.Arch, 0x00135313) // srli    t1,t1,0x1
+	plt.AddUint32(ctxt.Arch, 0x0082b283) // ld      t0,8(t0)
+	plt.AddUint32(ctxt.Arch, 0x00008e02) // jr      t3
+
+	gotplt.AddAddrPlus(ctxt.Arch, dynamic, 0) // got.plt[0] = _dl_runtime_resolve
+	gotplt.AddUint64(ctxt.Arch, 0)            // got.plt[1] = link map
+}
+
+func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) {
+	if ldr.SymPlt(s) >= 0 {
+		return
+	}
+
+	ld.Adddynsym(ldr, target, syms, s)
+
+	plt := ldr.MakeSymbolUpdater(syms.PLT)
+	gotplt := ldr.MakeSymbolUpdater(syms.GOTPLT)
+	rela := ldr.MakeSymbolUpdater(syms.RelaPLT)
+	if plt.Size() == 0 {
+		panic("plt is not set up")
+	}
+
+	// See section 8.4.6 of the RISC-V ABIs Specification:
+	//
+	//  https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/v1.0/riscv-abi.pdf
+	//
+	// 1:  auipc   t3, %pcrel_hi(function@.got.plt)
+	//     l[w|d]  t3, %pcrel_lo(1b)(t3)
+	//     jalr    t1, t3
+	//     nop
+
+	plt.AddSymRef(target.Arch, gotplt.Sym(), gotplt.Size(), objabi.R_RISCV_PCREL_HI20, 4)
+	plt.SetUint32(target.Arch, plt.Size()-4, 0x00000e17) // auipc   t3,0x0
+
+	sb := ldr.MakeSymbolBuilder(fakeLabelName)
+	sb.SetType(sym.STEXT)
+	sb.SetValue(ldr.SymValue(plt.Sym()) + plt.Size() - 4)
+	sb.SetLocal(true)
+	sb.SetReachable(true)
+	sb.SetVisibilityHidden(true)
+	plt.AddInteriorSym(sb.Sym())
+
+	plt.AddSymRef(target.Arch, sb.Sym(), 0, objabi.R_RISCV_PCREL_LO12_I, 4)
+	plt.SetUint32(target.Arch, plt.Size()-4, 0x000e3e03) // ld      t3,0(t3)
+	plt.AddUint32(target.Arch, 0x000e0367)               // jalr    t1,t3
+	plt.AddUint32(target.Arch, 0x00000001)               // nop
+
+	ldr.SetPlt(s, int32(plt.Size()-16))
+
+	// add to got.plt: pointer to plt[0]
+	gotplt.AddAddrPlus(target.Arch, plt.Sym(), 0)
+
+	// rela
+	rela.AddAddrPlus(target.Arch, gotplt.Sym(), gotplt.Size()-8)
+	sDynid := ldr.SymDynid(s)
+
+	rela.AddUint64(target.Arch, elf.R_INFO(uint32(sDynid), uint32(elf.R_RISCV_JUMP_SLOT)))
+	rela.AddUint64(target.Arch, 0)
 }
 
 func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool {
@@ -162,20 +399,20 @@
 	// If the call points to a trampoline, see if we can reach the symbol
 	// directly. This situation can occur when the relocation symbol is
 	// not assigned an address until after the trampolines are generated.
-	if r.Type() == objabi.R_RISCV_CALL_TRAMP {
+	if r.Type() == objabi.R_RISCV_JAL_TRAMP {
 		relocs := ldr.Relocs(rs)
 		if relocs.Count() != 1 {
 			ldr.Errorf(s, "trampoline %v has %d relocations", ldr.SymName(rs), relocs.Count())
 		}
 		tr := relocs.At(0)
-		if tr.Type() != objabi.R_RISCV_PCREL_ITYPE {
+		if tr.Type() != objabi.R_RISCV_CALL {
 			ldr.Errorf(s, "trampoline %v has unexpected relocation %v", ldr.SymName(rs), tr.Type())
 		}
 		trs := tr.Sym()
 		if ldr.SymValue(trs) != 0 && ldr.SymType(trs) != sym.SDYNIMPORT && ldr.SymType(trs) != sym.SUNDEFEXT {
 			trsOff := ldr.SymValue(trs) + tr.Add() - pc
 			if trsOff >= -(1<<20) && trsOff < (1<<20) {
-				r.SetType(objabi.R_RISCV_CALL)
+				r.SetType(objabi.R_RISCV_JAL)
 				r.SetSym(trs)
 				r.SetAdd(tr.Add())
 				rs = trs
@@ -186,10 +423,10 @@
 
 	if target.IsExternal() {
 		switch r.Type() {
-		case objabi.R_RISCV_CALL, objabi.R_RISCV_CALL_TRAMP:
+		case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP:
 			return val, 1, true
 
-		case objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE_ITYPE, objabi.R_RISCV_TLS_IE_STYPE:
+		case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE, objabi.R_RISCV_TLS_LE:
 			return val, 2, true
 		}
 
@@ -199,11 +436,11 @@
 	off := ldr.SymValue(rs) + r.Add() - pc
 
 	switch r.Type() {
-	case objabi.R_RISCV_CALL, objabi.R_RISCV_CALL_TRAMP:
+	case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP:
 		// Generate instruction immediates.
 		imm, err := riscv.EncodeJImmediate(off)
 		if err != nil {
-			ldr.Errorf(s, "cannot encode R_RISCV_CALL relocation offset for %s: %v", ldr.SymName(rs), err)
+			ldr.Errorf(s, "cannot encode J-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
 		}
 		immMask := int64(riscv.JTypeImmMask)
 
@@ -211,21 +448,84 @@
 
 		return val, 0, true
 
-	case objabi.R_RISCV_TLS_IE_ITYPE, objabi.R_RISCV_TLS_IE_STYPE:
-		// TLS relocations are not currently handled for internal linking.
-		// For now, TLS is only used when cgo is in use and cgo currently
-		// requires external linking. However, we need to accept these
-		// relocations so that code containing TLS variables will link,
-		// even when they're not being used. For now, replace these
-		// instructions with EBREAK to detect accidental use.
-		const ebreakIns = 0x00100073
-		return ebreakIns<<32 | ebreakIns, 0, true
+	case objabi.R_RISCV_TLS_IE:
+		log.Fatalf("cannot handle R_RISCV_TLS_IE (sym %s) when linking internally", ldr.SymName(s))
+		return val, 0, false
 
-	case objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE:
-		// Generate AUIPC and second instruction immediates.
+	case objabi.R_RISCV_TLS_LE:
+		// Generate LUI and ADDIW instruction immediates.
+		off := r.Add()
+
 		low, high, err := riscv.Split32BitImmediate(off)
 		if err != nil {
-			ldr.Errorf(s, "R_RISCV_PCREL_ relocation does not fit in 32 bits: %d", off)
+			ldr.Errorf(s, "relocation does not fit in 32-bits: %d", off)
+		}
+
+		luiImm, err := riscv.EncodeUImmediate(high)
+		if err != nil {
+			ldr.Errorf(s, "cannot encode R_RISCV_TLS_LE LUI relocation offset for %s: %v", ldr.SymName(rs), err)
+		}
+
+		addiwImm, err := riscv.EncodeIImmediate(low)
+		if err != nil {
+			ldr.Errorf(s, "cannot encode R_RISCV_TLS_LE I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+		}
+
+		lui := int64(uint32(val))
+		addiw := int64(uint32(val >> 32))
+
+		lui = (lui &^ riscv.UTypeImmMask) | int64(uint32(luiImm))
+		addiw = (addiw &^ riscv.ITypeImmMask) | int64(uint32(addiwImm))
+
+		return addiw<<32 | lui, 0, true
+
+	case objabi.R_RISCV_BRANCH:
+		pc := ldr.SymValue(s) + int64(r.Off())
+		off := ldr.SymValue(rs) + r.Add() - pc
+
+		imm, err := riscv.EncodeBImmediate(off)
+		if err != nil {
+			ldr.Errorf(s, "cannot encode B-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+		}
+		ins := (int64(uint32(val)) &^ riscv.BTypeImmMask) | int64(uint32(imm))
+
+		return ins, 0, true
+
+	case objabi.R_RISCV_RVC_BRANCH, objabi.R_RISCV_RVC_JUMP:
+		pc := ldr.SymValue(s) + int64(r.Off())
+		off := ldr.SymValue(rs) + r.Add() - pc
+
+		var err error
+		var imm, immMask int64
+		switch r.Type() {
+		case objabi.R_RISCV_RVC_BRANCH:
+			immMask = riscv.CBTypeImmMask
+			imm, err = riscv.EncodeCBImmediate(off)
+			if err != nil {
+				ldr.Errorf(s, "cannot encode CB-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+			}
+		case objabi.R_RISCV_RVC_JUMP:
+			immMask = riscv.CJTypeImmMask
+			imm, err = riscv.EncodeCJImmediate(off)
+			if err != nil {
+				ldr.Errorf(s, "cannot encode CJ-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+			}
+		default:
+			panic(fmt.Sprintf("unknown relocation type: %v", r.Type()))
+		}
+
+		ins := (int64(uint16(val)) &^ immMask) | int64(uint16(imm))
+
+		return ins, 0, true
+
+	case objabi.R_RISCV_GOT_HI20, objabi.R_RISCV_PCREL_HI20:
+		pc := ldr.SymValue(s) + int64(r.Off())
+		off := ldr.SymValue(rs) + r.Add() - pc
+
+		// Generate AUIPC immediates.
+		_, high, err := riscv.Split32BitImmediate(off)
+		if err != nil {
+			ldr.Errorf(s, "relocation does not fit in 32-bits: %d", off)
 		}
 
 		auipcImm, err := riscv.EncodeUImmediate(high)
@@ -233,22 +533,75 @@
 			ldr.Errorf(s, "cannot encode R_RISCV_PCREL_ AUIPC relocation offset for %s: %v", ldr.SymName(rs), err)
 		}
 
+		auipc := int64(uint32(val))
+		auipc = (auipc &^ riscv.UTypeImmMask) | int64(uint32(auipcImm))
+
+		return auipc, 0, true
+
+	case objabi.R_RISCV_PCREL_LO12_I, objabi.R_RISCV_PCREL_LO12_S:
+		hi20Reloc := findHI20Reloc(ldr, rs, ldr.SymValue(rs))
+		if hi20Reloc == nil {
+			ldr.Errorf(s, "missing HI20 relocation for LO12 relocation with %s (%d)", ldr.SymName(rs), rs)
+		}
+
+		pc := ldr.SymValue(s) + int64(hi20Reloc.Off())
+		off := ldr.SymValue(hi20Reloc.Sym()) + hi20Reloc.Add() - pc
+
+		low, _, err := riscv.Split32BitImmediate(off)
+		if err != nil {
+			ldr.Errorf(s, "relocation does not fit in 32-bits: %d", off)
+		}
+
+		var imm, immMask int64
+		switch r.Type() {
+		case objabi.R_RISCV_PCREL_LO12_I:
+			immMask = riscv.ITypeImmMask
+			imm, err = riscv.EncodeIImmediate(low)
+			if err != nil {
+				ldr.Errorf(s, "cannot encode objabi.R_RISCV_PCREL_LO12_I I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+			}
+		case objabi.R_RISCV_PCREL_LO12_S:
+			immMask = riscv.STypeImmMask
+			imm, err = riscv.EncodeSImmediate(low)
+			if err != nil {
+				ldr.Errorf(s, "cannot encode R_RISCV_PCREL_LO12_S S-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+			}
+		default:
+			panic(fmt.Sprintf("unknown relocation type: %v", r.Type()))
+		}
+
+		ins := int64(uint32(val))
+		ins = (ins &^ immMask) | int64(uint32(imm))
+		return ins, 0, true
+
+	case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE:
+		// Generate AUIPC and second instruction immediates.
+		low, high, err := riscv.Split32BitImmediate(off)
+		if err != nil {
+			ldr.Errorf(s, "pc-relative relocation does not fit in 32 bits: %d", off)
+		}
+
+		auipcImm, err := riscv.EncodeUImmediate(high)
+		if err != nil {
+			ldr.Errorf(s, "cannot encode AUIPC relocation offset for %s: %v", ldr.SymName(rs), err)
+		}
+
 		var secondImm, secondImmMask int64
 		switch r.Type() {
-		case objabi.R_RISCV_PCREL_ITYPE:
+		case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE:
 			secondImmMask = riscv.ITypeImmMask
 			secondImm, err = riscv.EncodeIImmediate(low)
 			if err != nil {
-				ldr.Errorf(s, "cannot encode R_RISCV_PCREL_ITYPE I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+				ldr.Errorf(s, "cannot encode I-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
 			}
 		case objabi.R_RISCV_PCREL_STYPE:
 			secondImmMask = riscv.STypeImmMask
 			secondImm, err = riscv.EncodeSImmediate(low)
 			if err != nil {
-				ldr.Errorf(s, "cannot encode R_RISCV_PCREL_STYPE S-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
+				ldr.Errorf(s, "cannot encode S-type instruction relocation offset for %s: %v", ldr.SymName(rs), err)
 			}
 		default:
-			panic(fmt.Sprintf("Unknown relocation type: %v", r.Type()))
+			panic(fmt.Sprintf("unknown relocation type: %v", r.Type()))
 		}
 
 		auipc := int64(uint32(val))
@@ -270,10 +623,10 @@
 
 func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) {
 	switch r.Type() {
-	case objabi.R_RISCV_CALL, objabi.R_RISCV_CALL_TRAMP:
+	case objabi.R_RISCV_JAL, objabi.R_RISCV_JAL_TRAMP:
 		return ld.ExtrelocSimple(ldr, r), true
 
-	case objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE_ITYPE, objabi.R_RISCV_TLS_IE_STYPE:
+	case objabi.R_RISCV_CALL, objabi.R_RISCV_PCREL_ITYPE, objabi.R_RISCV_PCREL_STYPE, objabi.R_RISCV_TLS_IE, objabi.R_RISCV_TLS_LE:
 		return ld.ExtrelocViaOuterSym(ldr, r, s), true
 	}
 	return loader.ExtReloc{}, false
@@ -284,7 +637,7 @@
 	r := relocs.At(ri)
 
 	switch r.Type() {
-	case objabi.R_RISCV_CALL:
+	case objabi.R_RISCV_JAL:
 		pc := ldr.SymValue(s) + int64(r.Off())
 		off := ldr.SymValue(rs) + r.Add() - pc
 
@@ -338,13 +691,16 @@
 			// address, so we have to assume a trampoline is required. Mark
 			// this as a call via a trampoline so that we can potentially
 			// switch to a direct call during relocation.
-			sb.SetRelocType(ri, objabi.R_RISCV_CALL_TRAMP)
+			sb.SetRelocType(ri, objabi.R_RISCV_JAL_TRAMP)
 		}
 		relocs := sb.Relocs()
 		r := relocs.At(ri)
 		r.SetSym(tramp)
 		r.SetAdd(0)
 
+	case objabi.R_RISCV_CALL:
+		// Nothing to do, already using AUIPC+JALR.
+
 	default:
 		ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type()))
 	}
@@ -352,9 +708,9 @@
 
 func genCallTramp(arch *sys.Arch, linkmode ld.LinkMode, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) {
 	tramp.AddUint32(arch, 0x00000f97) // AUIPC	$0, X31
-	tramp.AddUint32(arch, 0x000f8067) // JALR		X0, (X31)
+	tramp.AddUint32(arch, 0x000f8067) // JALR	X0, (X31)
 
-	r, _ := tramp.AddRel(objabi.R_RISCV_PCREL_ITYPE)
+	r, _ := tramp.AddRel(objabi.R_RISCV_CALL)
 	r.SetSiz(8)
 	r.SetSym(target)
 	r.SetAdd(offset)
diff --git a/src/cmd/link/internal/riscv64/obj.go b/src/cmd/link/internal/riscv64/obj.go
index 6230bd6..940a8d6 100644
--- a/src/cmd/link/internal/riscv64/obj.go
+++ b/src/cmd/link/internal/riscv64/obj.go
@@ -20,6 +20,7 @@
 		Dwarfregsp: dwarfRegSP,
 		Dwarfreglr: dwarfRegLR,
 
+		Adddynrel:        adddynrel,
 		Archinit:         archinit,
 		Archreloc:        archreloc,
 		Archrelocvariant: archrelocvariant,
@@ -41,7 +42,7 @@
 
 			Freebsddynld:   "/usr/libexec/ld-elf.so.1",
 			Netbsddynld:    "XXX",
-			Openbsddynld:   "XXX",
+			Openbsddynld:   "/usr/libexec/ld.so",
 			Dragonflydynld: "XXX",
 			Solarisdynld:   "XXX",
 
@@ -56,15 +57,15 @@
 
 func archinit(ctxt *ld.Link) {
 	switch ctxt.HeadType {
-	case objabi.Hlinux, objabi.Hfreebsd:
+	case objabi.Hlinux, objabi.Hfreebsd, objabi.Hopenbsd:
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 	default:
 		ld.Exitf("unknown -H option: %v", ctxt.HeadType)
 	}
diff --git a/src/cmd/link/internal/s390x/asm.go b/src/cmd/link/internal/s390x/asm.go
index 2d9f750..dee0348 100644
--- a/src/cmd/link/internal/s390x/asm.go
+++ b/src/cmd/link/internal/s390x/asm.go
@@ -309,7 +309,7 @@
 	return true
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() == 0 {
 		// stg     %r1,56(%r15)
 		plt.AddUint8(0xe3)
diff --git a/src/cmd/link/internal/s390x/obj.go b/src/cmd/link/internal/s390x/obj.go
index c2386fb..76aa962 100644
--- a/src/cmd/link/internal/s390x/obj.go
+++ b/src/cmd/link/internal/s390x/obj.go
@@ -81,11 +81,11 @@
 	case objabi.Hlinux: // s390x ELF
 		ld.Elfinit(ctxt)
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 0x10000
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 	}
 }
diff --git a/src/cmd/link/internal/sym/compilation_unit.go b/src/cmd/link/internal/sym/compilation_unit.go
index 3bad5bf..3d6cc3c 100644
--- a/src/cmd/link/internal/sym/compilation_unit.go
+++ b/src/cmd/link/internal/sym/compilation_unit.go
@@ -8,7 +8,7 @@
 
 // LoaderSym holds a loader.Sym value. We can't refer to this
 // type from the sym package since loader imports sym.
-type LoaderSym int
+type LoaderSym uint32
 
 // A CompilationUnit represents a set of source files that are compiled
 // together. Since all Go sources in a Go package are compiled together,
diff --git a/src/cmd/link/internal/sym/reloc.go b/src/cmd/link/internal/sym/reloc.go
index a44dcdd..e614caa 100644
--- a/src/cmd/link/internal/sym/reloc.go
+++ b/src/cmd/link/internal/sym/reloc.go
@@ -8,6 +8,7 @@
 	"cmd/internal/objabi"
 	"cmd/internal/sys"
 	"debug/elf"
+	"debug/macho"
 )
 
 // RelocVariant is a linker-internal variation on a relocation.
@@ -30,24 +31,17 @@
 )
 
 func RelocName(arch *sys.Arch, r objabi.RelocType) string {
-	// We didn't have some relocation types at Go1.4.
-	// Uncomment code when we include those in bootstrap code.
-
 	switch {
 	case r >= objabi.MachoRelocOffset: // Mach-O
-		// nr := (r - objabi.MachoRelocOffset)>>1
-		// switch ctxt.Arch.Family {
-		// case sys.AMD64:
-		// 	return macho.RelocTypeX86_64(nr).String()
-		// case sys.ARM:
-		// 	return macho.RelocTypeARM(nr).String()
-		// case sys.ARM64:
-		// 	return macho.RelocTypeARM64(nr).String()
-		// case sys.I386:
-		// 	return macho.RelocTypeGeneric(nr).String()
-		// default:
-		// 	panic("unreachable")
-		// }
+		nr := (r - objabi.MachoRelocOffset) >> 1
+		switch arch.Family {
+		case sys.AMD64:
+			return macho.RelocTypeX86_64(nr).String()
+		case sys.ARM64:
+			return macho.RelocTypeARM64(nr).String()
+		default:
+			panic("unreachable")
+		}
 	case r >= objabi.ElfRelocOffset: // ELF
 		nr := r - objabi.ElfRelocOffset
 		switch arch.Family {
@@ -67,6 +61,8 @@
 			return elf.R_PPC64(nr).String()
 		case sys.S390X:
 			return elf.R_390(nr).String()
+		case sys.RISCV64:
+			return elf.R_RISCV(nr).String()
 		default:
 			panic("unreachable")
 		}
diff --git a/src/cmd/link/internal/sym/symkind.go b/src/cmd/link/internal/sym/symkind.go
index 77dbf75..08cafb2 100644
--- a/src/cmd/link/internal/sym/symkind.go
+++ b/src/cmd/link/internal/sym/symkind.go
@@ -184,3 +184,7 @@
 func (t SymKind) IsData() bool {
 	return t == SDATA || t == SNOPTRDATA || t == SBSS || t == SNOPTRBSS
 }
+
+func (t SymKind) IsDWARF() bool {
+	return t >= SDWARFSECT && t <= SDWARFLINES
+}
diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go
index fa5ad67..876dbd9 100644
--- a/src/cmd/link/internal/x86/asm.go
+++ b/src/cmd/link/internal/x86/asm.go
@@ -421,7 +421,7 @@
 	return -1
 }
 
-func elfsetupplt(ctxt *ld.Link, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
+func elfsetupplt(ctxt *ld.Link, ldr *loader.Loader, plt, got *loader.SymbolBuilder, dynamic loader.Sym) {
 	if plt.Size() == 0 {
 		// pushl got+4
 		plt.AddUint8(0xff)
diff --git a/src/cmd/link/internal/x86/obj.go b/src/cmd/link/internal/x86/obj.go
index 6ccb8e0..4336f01 100644
--- a/src/cmd/link/internal/x86/obj.go
+++ b/src/cmd/link/internal/x86/obj.go
@@ -82,21 +82,11 @@
 
 	case objabi.Hplan9: /* plan 9 */
 		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 4096
 		}
-
-	case objabi.Hdarwin: /* apple MACH */
-		ld.HEADR = ld.INITIAL_MACHO_HEADR
 		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
+			*ld.FlagTextAddr = ld.Rnd(4096, *ld.FlagRound) + int64(ld.HEADR)
 		}
 
 	case objabi.Hlinux, /* elf32 executable */
@@ -106,12 +96,12 @@
 		ld.Elfinit(ctxt)
 
 		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x08048000 + int64(ld.HEADR)
-		}
 		if *ld.FlagRound == -1 {
 			*ld.FlagRound = 4096
 		}
+		if *ld.FlagTextAddr == -1 {
+			*ld.FlagTextAddr = ld.Rnd(0x08048000, *ld.FlagRound) + int64(ld.HEADR)
+		}
 
 	case objabi.Hwindows: /* PE executable */
 		// ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go
index c37d6e5..897607c 100644
--- a/src/cmd/link/link_test.go
+++ b/src/cmd/link/link_test.go
@@ -8,9 +8,11 @@
 	"bufio"
 	"bytes"
 	"debug/macho"
+	"errors"
 	"internal/platform"
 	"internal/testenv"
 	"os"
+	"os/exec"
 	"path/filepath"
 	"regexp"
 	"runtime"
@@ -265,9 +267,9 @@
 	testenv.MustHaveCGO(t)
 	testenv.MustHaveGoBuild(t)
 
-	// Only run this on darwin/amd64, where we can cross build for tvOS.
-	if runtime.GOARCH != "amd64" || runtime.GOOS != "darwin" {
-		t.Skip("skipping on non-darwin/amd64 platform")
+	// Only run this on darwin, where we can cross build for tvOS.
+	if runtime.GOOS != "darwin" {
+		t.Skip("skipping on non-darwin platform")
 	}
 	if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
 		t.Skip("skipping in -short mode with $GO_BUILDER_NAME empty")
@@ -296,14 +298,16 @@
 
 	ar := filepath.Join(tmpDir, "lib.a")
 	cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-buildmode=c-archive", "-o", ar, lib)
-	cmd.Env = append(os.Environ(),
+	env := []string{
 		"CGO_ENABLED=1",
 		"GOOS=ios",
 		"GOARCH=arm64",
-		"CC="+strings.Join(CC, " "),
+		"CC=" + strings.Join(CC, " "),
 		"CGO_CFLAGS=", // ensure CGO_CFLAGS does not contain any flags. Issue #35459
-		"CGO_LDFLAGS="+strings.Join(CGO_LDFLAGS, " "),
-	)
+		"CGO_LDFLAGS=" + strings.Join(CGO_LDFLAGS, " "),
+	}
+	cmd.Env = append(os.Environ(), env...)
+	t.Logf("%q %v", env, cmd)
 	if out, err := cmd.CombinedOutput(); err != nil {
 		t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
 	}
@@ -312,6 +316,7 @@
 	link.Args = append(link.Args, CGO_LDFLAGS...)
 	link.Args = append(link.Args, "-o", filepath.Join(tmpDir, "a.out")) // Avoid writing to package directory.
 	link.Args = append(link.Args, ar, filepath.Join("testdata", "testBuildFortvOS", "main.m"))
+	t.Log(link)
 	if out, err := link.CombinedOutput(); err != nil {
 		t.Fatalf("%v: %v:\n%s", link.Args, err, out)
 	}
@@ -1167,6 +1172,86 @@
 	}
 }
 
+func TestExtLinkCmdlineDeterminism(t *testing.T) {
+	// Test that we pass flags in deterministic order to the external linker
+	testenv.MustHaveGoBuild(t)
+	testenv.MustHaveCGO(t) // this test requires -linkmode=external
+	t.Parallel()
+
+	// test source code, with some cgo exports
+	testSrc := `
+package main
+import "C"
+//export F1
+func F1() {}
+//export F2
+func F2() {}
+//export F3
+func F3() {}
+func main() {}
+`
+
+	tmpdir := t.TempDir()
+	src := filepath.Join(tmpdir, "x.go")
+	if err := os.WriteFile(src, []byte(testSrc), 0666); err != nil {
+		t.Fatal(err)
+	}
+	exe := filepath.Join(tmpdir, "x.exe")
+
+	// Use a deterministc tmp directory so the temporary file paths are
+	// deterministc.
+	linktmp := filepath.Join(tmpdir, "linktmp")
+	if err := os.Mkdir(linktmp, 0777); err != nil {
+		t.Fatal(err)
+	}
+
+	// Link with -v -linkmode=external to see the flags we pass to the
+	// external linker.
+	ldflags := "-ldflags=-v -linkmode=external -tmpdir=" + linktmp
+	var out0 []byte
+	for i := 0; i < 5; i++ {
+		cmd := testenv.Command(t, testenv.GoToolPath(t), "build", ldflags, "-o", exe, src)
+		out, err := cmd.CombinedOutput()
+		if err != nil {
+			t.Fatalf("build failed: %v, output:\n%s", err, out)
+		}
+		if err := os.Remove(exe); err != nil {
+			t.Fatal(err)
+		}
+
+		// extract the "host link" invocaton
+		j := bytes.Index(out, []byte("\nhost link:"))
+		if j == -1 {
+			t.Fatalf("host link step not found, output:\n%s", out)
+		}
+		out = out[j+1:]
+		k := bytes.Index(out, []byte("\n"))
+		if k == -1 {
+			t.Fatalf("no newline after host link, output:\n%s", out)
+		}
+		out = out[:k]
+
+		// filter out output file name, which is passed by the go
+		// command and is nondeterministic.
+		fs := bytes.Fields(out)
+		for i, f := range fs {
+			if bytes.Equal(f, []byte(`"-o"`)) && i+1 < len(fs) {
+				fs[i+1] = []byte("a.out")
+				break
+			}
+		}
+		out = bytes.Join(fs, []byte{' '})
+
+		if i == 0 {
+			out0 = out
+			continue
+		}
+		if !bytes.Equal(out0, out) {
+			t.Fatalf("output differ:\n%s\n==========\n%s", out0, out)
+		}
+	}
+}
+
 // TestResponseFile tests that creating a response file to pass to the
 // external linker works correctly.
 func TestResponseFile(t *testing.T) {
@@ -1209,3 +1294,84 @@
 		t.Error(err)
 	}
 }
+
+func TestDynimportVar(t *testing.T) {
+	// Test that we can access dynamically imported variables.
+	// Currently darwin only.
+	if runtime.GOOS != "darwin" {
+		t.Skip("skip on non-darwin platform")
+	}
+
+	testenv.MustHaveGoBuild(t)
+	testenv.MustHaveCGO(t)
+
+	t.Parallel()
+
+	tmpdir := t.TempDir()
+	exe := filepath.Join(tmpdir, "a.exe")
+	src := filepath.Join("testdata", "dynimportvar", "main.go")
+
+	for _, mode := range []string{"internal", "external"} {
+		cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-linkmode="+mode, "-o", exe, src)
+		out, err := cmd.CombinedOutput()
+		if err != nil {
+			t.Fatalf("build (linkmode=%s) failed: %v\n%s", mode, err, out)
+		}
+		cmd = testenv.Command(t, exe)
+		out, err = cmd.CombinedOutput()
+		if err != nil {
+			t.Errorf("executable failed to run (%s): %v\n%s", mode, err, out)
+		}
+	}
+}
+
+const helloSrc = `
+package main
+var X = 42
+var Y int
+func main() { println("hello", X, Y) }
+`
+
+func TestFlagS(t *testing.T) {
+	// Test that the -s flag strips the symbol table.
+	testenv.MustHaveGoBuild(t)
+
+	t.Parallel()
+
+	tmpdir := t.TempDir()
+	exe := filepath.Join(tmpdir, "a.exe")
+	src := filepath.Join(tmpdir, "a.go")
+	err := os.WriteFile(src, []byte(helloSrc), 0666)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	modes := []string{"auto"}
+	if testenv.HasCGO() {
+		modes = append(modes, "external")
+	}
+
+	// check a text symbol, a data symbol, and a BSS symbol
+	syms := []string{"main.main", "main.X", "main.Y"}
+
+	for _, mode := range modes {
+		cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-s -linkmode="+mode, "-o", exe, src)
+		out, err := cmd.CombinedOutput()
+		if err != nil {
+			t.Fatalf("build (linkmode=%s) failed: %v\n%s", mode, err, out)
+		}
+		cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe)
+		out, err = cmd.CombinedOutput()
+		if err != nil && !errors.As(err, new(*exec.ExitError)) {
+			// Error exit is fine as it may have no symbols.
+			// On darwin we need to emit dynamic symbol references so it
+			// actually has some symbols, and nm succeeds.
+			t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out)
+		}
+		for _, s := range syms {
+			if bytes.Contains(out, []byte(s)) {
+				t.Errorf("(mode=%s): unexpected symbol %s", mode, s)
+			}
+		}
+	}
+}
diff --git a/src/cmd/link/testdata/dynimportvar/asm/a.go b/src/cmd/link/testdata/dynimportvar/asm/a.go
new file mode 100644
index 0000000..8d92999
--- /dev/null
+++ b/src/cmd/link/testdata/dynimportvar/asm/a.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a separate package because we cannot have Go
+// assembly code and cgo code in the same package.
+
+//go:build darwin
+
+package asm
+
+//go:cgo_import_dynamic libc_mach_task_self_ mach_task_self_ "/usr/lib/libSystem.B.dylib"
+
+// load mach_task_self_ from assembly code
+func Mach_task_self() uint32
diff --git a/src/cmd/link/testdata/dynimportvar/asm/a_amd64.s b/src/cmd/link/testdata/dynimportvar/asm/a_amd64.s
new file mode 100644
index 0000000..93547e3
--- /dev/null
+++ b/src/cmd/link/testdata/dynimportvar/asm/a_amd64.s
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+TEXT ·Mach_task_self(SB),0,$0-4
+	MOVQ	$libc_mach_task_self_(SB), AX
+	MOVQ	(AX), AX
+	MOVL	AX, ret+0(FP)
+	RET
diff --git a/src/cmd/link/testdata/dynimportvar/asm/a_arm64.s b/src/cmd/link/testdata/dynimportvar/asm/a_arm64.s
new file mode 100644
index 0000000..bd3c9d7
--- /dev/null
+++ b/src/cmd/link/testdata/dynimportvar/asm/a_arm64.s
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+TEXT ·Mach_task_self(SB),0,$0-4
+	MOVD	$libc_mach_task_self_(SB), R0
+	MOVD	(R0), R0
+	MOVW	R0, ret+0(FP)
+	RET
diff --git a/src/cmd/link/testdata/dynimportvar/main.go b/src/cmd/link/testdata/dynimportvar/main.go
new file mode 100644
index 0000000..658d340
--- /dev/null
+++ b/src/cmd/link/testdata/dynimportvar/main.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that we can access dynamically imported variables.
+// We ues mach_task_self_ from darwin's system library.
+// Check that loading the variable from C and Go gets the
+// same result.
+
+//go:build darwin
+
+package main
+
+/*
+#include <mach/mach_init.h>
+
+unsigned int Mach_task_self(void) {
+	return mach_task_self();
+}
+*/
+import "C"
+
+import "cmd/link/testdata/dynimportvar/asm"
+
+func main() {
+	c := uint32(C.Mach_task_self())
+	a := asm.Mach_task_self()
+	if a != c {
+		println("got", a, "want", c)
+		panic("FAIL")
+	}
+}
diff --git a/src/cmd/pprof/pprof_test.go b/src/cmd/pprof/pprof_test.go
index 2a651dd..494cd8f 100644
--- a/src/cmd/pprof/pprof_test.go
+++ b/src/cmd/pprof/pprof_test.go
@@ -76,8 +76,7 @@
 
 	// pprof can only disassemble PIE on some platforms.
 	// Skip the ones it can't handle yet.
-	if (runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") ||
-		(runtime.GOOS == "android" && runtime.GOARCH == "arm") {
+	if runtime.GOOS == "android" && runtime.GOARCH == "arm" {
 		t.Skipf("skipping on %s/%s, issue 46639", runtime.GOOS, runtime.GOARCH)
 	}
 }
diff --git a/src/cmd/trace/annotations.go b/src/cmd/trace/annotations.go
index 0addc24..df194a7 100644
--- a/src/cmd/trace/annotations.go
+++ b/src/cmd/trace/annotations.go
@@ -9,8 +9,8 @@
 	"fmt"
 	"html/template"
 	"internal/trace"
+	"internal/trace/traceviewer"
 	"log"
-	"math"
 	"net/http"
 	"net/url"
 	"reflect"
@@ -808,122 +808,9 @@
 	}, nil
 }
 
-type durationHistogram struct {
-	Count                int
-	Buckets              []int
-	MinBucket, MaxBucket int
-}
-
-// Five buckets for every power of 10.
-var logDiv = math.Log(math.Pow(10, 1.0/5))
-
-func (h *durationHistogram) add(d time.Duration) {
-	var bucket int
-	if d > 0 {
-		bucket = int(math.Log(float64(d)) / logDiv)
-	}
-	if len(h.Buckets) <= bucket {
-		h.Buckets = append(h.Buckets, make([]int, bucket-len(h.Buckets)+1)...)
-		h.Buckets = h.Buckets[:cap(h.Buckets)]
-	}
-	h.Buckets[bucket]++
-	if bucket < h.MinBucket || h.MaxBucket == 0 {
-		h.MinBucket = bucket
-	}
-	if bucket > h.MaxBucket {
-		h.MaxBucket = bucket
-	}
-	h.Count++
-}
-
-func (h *durationHistogram) BucketMin(bucket int) time.Duration {
-	return time.Duration(math.Exp(float64(bucket) * logDiv))
-}
-
-func niceDuration(d time.Duration) string {
-	var rnd time.Duration
-	var unit string
-	switch {
-	case d < 10*time.Microsecond:
-		rnd, unit = time.Nanosecond, "ns"
-	case d < 10*time.Millisecond:
-		rnd, unit = time.Microsecond, "µs"
-	case d < 10*time.Second:
-		rnd, unit = time.Millisecond, "ms"
-	default:
-		rnd, unit = time.Second, "s "
-	}
-	return fmt.Sprintf("%d%s", d/rnd, unit)
-}
-
-func (h *durationHistogram) ToHTML(urlmaker func(min, max time.Duration) string) template.HTML {
-	if h == nil || h.Count == 0 {
-		return template.HTML("")
-	}
-
-	const barWidth = 400
-
-	maxCount := 0
-	for _, count := range h.Buckets {
-		if count > maxCount {
-			maxCount = count
-		}
-	}
-
-	w := new(strings.Builder)
-	fmt.Fprintf(w, `<table>`)
-	for i := h.MinBucket; i <= h.MaxBucket; i++ {
-		// Tick label.
-		if h.Buckets[i] > 0 {
-			fmt.Fprintf(w, `<tr><td class="histoTime" align="right"><a href=%s>%s</a></td>`, urlmaker(h.BucketMin(i), h.BucketMin(i+1)), niceDuration(h.BucketMin(i)))
-		} else {
-			fmt.Fprintf(w, `<tr><td class="histoTime" align="right">%s</td>`, niceDuration(h.BucketMin(i)))
-		}
-		// Bucket bar.
-		width := h.Buckets[i] * barWidth / maxCount
-		fmt.Fprintf(w, `<td><div style="width:%dpx;background:blue;position:relative">&nbsp;</div></td>`, width)
-		// Bucket count.
-		fmt.Fprintf(w, `<td align="right"><div style="position:relative">%d</div></td>`, h.Buckets[i])
-		fmt.Fprintf(w, "</tr>\n")
-
-	}
-	// Final tick label.
-	fmt.Fprintf(w, `<tr><td align="right">%s</td></tr>`, niceDuration(h.BucketMin(h.MaxBucket+1)))
-	fmt.Fprintf(w, `</table>`)
-	return template.HTML(w.String())
-}
-
-func (h *durationHistogram) String() string {
-	const barWidth = 40
-
-	labels := []string{}
-	maxLabel := 0
-	maxCount := 0
-	for i := h.MinBucket; i <= h.MaxBucket; i++ {
-		// TODO: This formatting is pretty awful.
-		label := fmt.Sprintf("[%-12s%-11s)", h.BucketMin(i).String()+",", h.BucketMin(i+1))
-		labels = append(labels, label)
-		if len(label) > maxLabel {
-			maxLabel = len(label)
-		}
-		count := h.Buckets[i]
-		if count > maxCount {
-			maxCount = count
-		}
-	}
-
-	w := new(strings.Builder)
-	for i := h.MinBucket; i <= h.MaxBucket; i++ {
-		count := h.Buckets[i]
-		bar := count * barWidth / maxCount
-		fmt.Fprintf(w, "%*s %-*s %d\n", maxLabel, labels[i-h.MinBucket], barWidth, strings.Repeat("█", bar), count)
-	}
-	return w.String()
-}
-
 type regionStats struct {
 	regionTypeID
-	Histogram durationHistogram
+	Histogram traceviewer.TimeHistogram
 }
 
 func (s *regionStats) UserRegionURL() func(min, max time.Duration) string {
@@ -933,7 +820,7 @@
 }
 
 func (s *regionStats) add(region regionDesc) {
-	s.Histogram.add(region.duration())
+	s.Histogram.Add(region.duration())
 }
 
 var templUserRegionTypes = template.Must(template.New("").Parse(`
@@ -966,8 +853,8 @@
 
 type taskStats struct {
 	Type      string
-	Count     int               // Complete + incomplete tasks
-	Histogram durationHistogram // Complete tasks only
+	Count     int                       // Complete + incomplete tasks
+	Histogram traceviewer.TimeHistogram // Complete tasks only
 }
 
 func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string {
@@ -979,7 +866,7 @@
 func (s *taskStats) add(task *taskDesc) {
 	s.Count++
 	if task.complete() {
-		s.Histogram.add(task.duration())
+		s.Histogram.Add(task.duration())
 	}
 }
 
@@ -1169,7 +1056,7 @@
 var templUserRegionType = template.Must(template.New("").Funcs(template.FuncMap{
 	"prettyDuration": func(nsec int64) template.HTML {
 		d := time.Duration(nsec) * time.Nanosecond
-		return template.HTML(niceDuration(d))
+		return template.HTML(d.String())
 	},
 	"percent": func(dividend, divisor int64) template.HTML {
 		if divisor == 0 {
diff --git a/src/cmd/trace/annotations_test.go b/src/cmd/trace/annotations_test.go
index ca14d31..36d3ec9 100644
--- a/src/cmd/trace/annotations_test.go
+++ b/src/cmd/trace/annotations_test.go
@@ -11,6 +11,7 @@
 	"context"
 	"flag"
 	"fmt"
+	"internal/goexperiment"
 	traceparser "internal/trace"
 	"os"
 	"reflect"
@@ -330,6 +331,9 @@
 // If savetraces flag is set, the captured trace will be saved in the named file.
 func traceProgram(t *testing.T, f func(), name string) error {
 	t.Helper()
+	if goexperiment.ExecTracer2 {
+		t.Skip("skipping because test programs are covered elsewhere for the new tracer")
+	}
 	buf := new(bytes.Buffer)
 	if err := trace.Start(buf); err != nil {
 		return err
diff --git a/src/cmd/trace/goroutines.go b/src/cmd/trace/goroutines.go
index 7850fc0..28eace8 100644
--- a/src/cmd/trace/goroutines.go
+++ b/src/cmd/trace/goroutines.go
@@ -169,7 +169,7 @@
 var templGoroutine = template.Must(template.New("").Funcs(template.FuncMap{
 	"prettyDuration": func(nsec int64) template.HTML {
 		d := time.Duration(nsec) * time.Nanosecond
-		return template.HTML(niceDuration(d))
+		return template.HTML(d.String())
 	},
 	"percent": func(dividend, divisor int64) template.HTML {
 		if divisor == 0 {
diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go
index 9e9e7f3..5f0d6f6 100644
--- a/src/cmd/trace/main.go
+++ b/src/cmd/trace/main.go
@@ -7,11 +7,11 @@
 import (
 	"bufio"
 	"cmd/internal/browser"
+	cmdv2 "cmd/trace/v2"
 	"flag"
 	"fmt"
-	"html/template"
 	"internal/trace"
-	"io"
+	"internal/trace/traceviewer"
 	"log"
 	"net"
 	"net/http"
@@ -46,7 +46,7 @@
 Flags:
 	-http=addr: HTTP service address (e.g., ':6060')
 	-pprof=type: print a pprof-like profile instead
-	-d: print debug info such as parsed events
+	-d=int: print debug info such as parsed events (1 for high-level, 2 for low-level)
 
 Note that while the various profiles available when launching
 'go tool trace' work on every browser, the trace viewer itself
@@ -57,7 +57,7 @@
 var (
 	httpFlag  = flag.String("http", "localhost:0", "HTTP service address (e.g., ':6060')")
 	pprofFlag = flag.String("pprof", "", "print a pprof-like profile instead")
-	debugFlag = flag.Bool("d", false, "print debug information such as parsed events list")
+	debugFlag = flag.Int("d", 0, "print debug information (1 for basic debug info, 2 for lower-level info)")
 
 	// The binary file name, left here for serveSVGProfile.
 	programBinary string
@@ -83,7 +83,14 @@
 		flag.Usage()
 	}
 
-	var pprofFunc func(io.Writer, *http.Request) error
+	if isTraceV2(traceFile) {
+		if err := cmdv2.Main(traceFile, *httpFlag, *pprofFlag, *debugFlag); err != nil {
+			dief("%s\n", err)
+		}
+		return
+	}
+
+	var pprofFunc traceviewer.ProfileFunc
 	switch *pprofFlag {
 	case "net":
 		pprofFunc = pprofByGoroutine(computePprofIO)
@@ -95,7 +102,11 @@
 		pprofFunc = pprofByGoroutine(computePprofSched)
 	}
 	if pprofFunc != nil {
-		if err := pprofFunc(os.Stdout, &http.Request{}); err != nil {
+		records, err := pprofFunc(&http.Request{})
+		if err != nil {
+			dief("failed to generate pprof: %v\n", err)
+		}
+		if err := traceviewer.BuildProfile(records).Write(os.Stdout); err != nil {
 			dief("failed to generate pprof: %v\n", err)
 		}
 		os.Exit(0)
@@ -115,7 +126,7 @@
 		dief("%v\n", err)
 	}
 
-	if *debugFlag {
+	if *debugFlag != 0 {
 		trace.Print(res.Events)
 		os.Exit(0)
 	}
@@ -131,13 +142,35 @@
 	log.Printf("Opening browser. Trace viewer is listening on %s", addr)
 	browser.Open(addr)
 
+	// Install MMU handler.
+	http.HandleFunc("/mmu", traceviewer.MMUHandlerFunc(ranges, mutatorUtil))
+
+	// Install main handler.
+	http.Handle("/", traceviewer.MainHandler([]traceviewer.View{
+		{Type: traceviewer.ViewProc, Ranges: ranges},
+	}))
+
 	// Start http server.
-	http.HandleFunc("/", httpMain)
 	err = http.Serve(ln, nil)
 	dief("failed to start http server: %v\n", err)
 }
 
-var ranges []Range
+// isTraceV2 returns true if filename holds a v2 trace.
+func isTraceV2(filename string) bool {
+	file, err := os.Open(filename)
+	if err != nil {
+		return false
+	}
+	defer file.Close()
+
+	ver, _, err := trace.ReadVersion(file)
+	if err != nil {
+		return false
+	}
+	return ver >= 1022
+}
+
+var ranges []traceviewer.Range
 
 var loader struct {
 	once sync.Once
@@ -175,209 +208,6 @@
 	return loader.res, loader.err
 }
 
-// httpMain serves the starting page.
-func httpMain(w http.ResponseWriter, r *http.Request) {
-	if err := templMain.Execute(w, ranges); err != nil {
-		http.Error(w, err.Error(), http.StatusInternalServerError)
-		return
-	}
-}
-
-var templMain = template.Must(template.New("").Parse(`
-<html>
-<style>
-/* See https://github.com/golang/pkgsite/blob/master/static/shared/typography/typography.css */
-body {
-  font-family:	-apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji';
-  font-size:	1rem;
-  line-height:	normal;
-  max-width:	9in;
-  margin:	1em;
-}
-h1 { font-size: 1.5rem; }
-h2 { font-size: 1.375rem; }
-h1,h2 {
-  font-weight: 600;
-  line-height: 1.25em;
-  word-break: break-word;
-}
-p  { color: grey85; font-size:85%; }
-</style>
-<body>
-<h1>cmd/trace: the Go trace event viewer</h1>
-<p>
-  This web server provides various visualizations of an event log gathered during
-  the execution of a Go program that uses the <a href='https://pkg.go.dev/runtime/trace'>runtime/trace</a> package.
-</p>
-
-<h2>Event timelines for running goroutines</h2>
-{{if $}}
-<p>
-  Large traces are split into multiple sections of equal data size
-  (not duration) to avoid overwhelming the visualizer.
-</p>
-<ul>
-	{{range $e := $}}
-		<li><a href="{{$e.URL}}">View trace ({{$e.Name}})</a></li>
-	{{end}}
-</ul>
-{{else}}
-<ul>
-	<li><a href="/trace">View trace</a></li>
-</ul>
-{{end}}
-<p>
-  This view displays a timeline for each of the GOMAXPROCS logical
-  processors, showing which goroutine (if any) was running on that
-  logical processor at each moment.
-
-  Each goroutine has an identifying number (e.g. G123), main function,
-  and color.
-
-  A colored bar represents an uninterrupted span of execution.
-
-  Execution of a goroutine may migrate from one logical processor to another,
-  causing a single colored bar to be horizontally continuous but
-  vertically displaced.
-</p>
-<p>
-  Clicking on a span reveals information about it, such as its
-  duration, its causal predecessors and successors, and the stack trace
-  at the final moment when it yielded the logical processor, for example
-  because it made a system call or tried to acquire a mutex.
-
-  Directly underneath each bar, a smaller bar or more commonly a fine
-  vertical line indicates an event occurring during its execution.
-  Some of these are related to garbage collection; most indicate that
-  a goroutine yielded its logical processor but then immediately resumed execution
-  on the same logical processor. Clicking on the event displays the stack trace
-  at the moment it occurred.
-</p>
-<p>
-  The causal relationships between spans of goroutine execution
-  can be displayed by clicking the Flow Events button at the top.
-</p>
-<p>
-  At the top ("STATS"), there are three additional timelines that
-  display statistical information.
-
-  "Goroutines" is a time series of the count of existing goroutines;
-  clicking on it displays their breakdown by state at that moment:
-  running, runnable, or waiting.
-
-  "Heap" is a time series of the amount of heap memory allocated (in orange)
-  and (in green) the allocation limit at which the next GC cycle will begin.
-
-  "Threads" shows the number of kernel threads in existence: there is
-  always one kernel thread per logical processor, and additional threads
-  are created for calls to non-Go code such as a system call or a
-  function written in C.
-</p>
-<p>
-  Above the event trace for the first logical processor are
-  traces for various runtime-internal events.
-
-  The "GC" bar shows when the garbage collector is running, and in which stage.
-  Garbage collection may temporarily affect all the logical processors
-  and the other metrics.
-
-  The "Network", "Timers", and "Syscalls" traces indicate events in
-  the runtime that cause goroutines to wake up.
-</p>
-<p>
-  The visualization allows you to navigate events at scales ranging from several
-  seconds to a handful of nanoseconds.
-
-  Consult the documentation for the Chromium <a href='https://www.chromium.org/developers/how-tos/trace-event-profiling-tool/'>Trace Event Profiling Tool<a/>
-  for help navigating the view.
-</p>
-
-<ul>
-<li><a href="/goroutines">Goroutine analysis</a></li>
-</ul>
-<p>
-  This view displays information about each set of goroutines that
-  shares the same main function.
-
-  Clicking on a main function shows links to the four types of
-  blocking profile (see below) applied to that subset of goroutines.
-
-  It also shows a table of specific goroutine instances, with various
-  execution statistics and a link to the event timeline for each one.
-
-  The timeline displays only the selected goroutine and any others it
-  interacts with via block/unblock events. (The timeline is
-  goroutine-oriented rather than logical processor-oriented.)
-</p>
-
-<h2>Profiles</h2>
-<p>
-  Each link below displays a global profile in zoomable graph form as
-  produced by <a href='https://go.dev/blog/pprof'>pprof</a>'s "web" command.
-
-  In addition there is a link to download the profile for offline
-  analysis with pprof.
-
-  All four profiles represent causes of delay that prevent a goroutine
-  from running on a logical processor: because it was waiting for the network,
-  for a synchronization operation on a mutex or channel, for a system call,
-  or for a logical processor to become available.
-</p>
-<ul>
-<li><a href="/io">Network blocking profile</a> (<a href="/io?raw=1" download="io.profile">⬇</a>)</li>
-<li><a href="/block">Synchronization blocking profile</a> (<a href="/block?raw=1" download="block.profile">⬇</a>)</li>
-<li><a href="/syscall">Syscall blocking profile</a> (<a href="/syscall?raw=1" download="syscall.profile">⬇</a>)</li>
-<li><a href="/sched">Scheduler latency profile</a> (<a href="/sche?raw=1" download="sched.profile">⬇</a>)</li>
-</ul>
-
-<h2>User-defined tasks and regions</h2>
-<p>
-  The trace API allows a target program to annotate a <a
-  href='https://pkg.go.dev/runtime/trace#Region'>region</a> of code
-  within a goroutine, such as a key function, so that its performance
-  can be analyzed.
-
-  <a href='https://pkg.go.dev/runtime/trace#Log'>Log events</a> may be
-  associated with a region to record progress and relevant values.
-
-  The API also allows annotation of higher-level
-  <a href='https://pkg.go.dev/runtime/trace#Task'>tasks</a>,
-  which may involve work across many goroutines.
-</p>
-<p>
-  The links below display, for each region and task, a histogram of its execution times.
-
-  Each histogram bucket contains a sample trace that records the
-  sequence of events such as goroutine creations, log events, and
-  subregion start/end times.
-
-  For each task, you can click through to a logical-processor or
-  goroutine-oriented view showing the tasks and regions on the
-  timeline.
-
-  Such information may help uncover which steps in a region are
-  unexpectedly slow, or reveal relationships between the data values
-  logged in a request and its running time.
-</p>
-<ul>
-<li><a href="/usertasks">User-defined tasks</a></li>
-<li><a href="/userregions">User-defined regions</a></li>
-</ul>
-
-<h2>Garbage collection metrics</h2>
-<ul>
-<li><a href="/mmu">Minimum mutator utilization</a></li>
-</ul>
-<p>
-  This chart indicates the maximum GC pause time (the largest x value
-  for which y is zero), and more generally, the fraction of time that
-  the processors are available to application goroutines ("mutators"),
-  for any time window of a specified size, in the worst case.
-</p>
-</body>
-</html>
-`))
-
 func dief(msg string, args ...any) {
 	fmt.Fprintf(os.Stderr, msg, args...)
 	os.Exit(1)
@@ -408,3 +238,11 @@
 	fmt.Printf("Enter to continue...")
 	fmt.Scanf("%s", &dummy)
 }
+
+func mutatorUtil(flags trace.UtilFlags) ([][]trace.MutatorUtil, error) {
+	events, err := parseEvents()
+	if err != nil {
+		return nil, err
+	}
+	return trace.MutatorUtilization(events, flags), nil
+}
diff --git a/src/cmd/trace/mmu.go b/src/cmd/trace/mmu.go
deleted file mode 100644
index b71dcd6..0000000
--- a/src/cmd/trace/mmu.go
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Minimum mutator utilization (MMU) graphing.
-
-// TODO:
-//
-// In worst window list, show break-down of GC utilization sources
-// (STW, assist, etc). Probably requires a different MutatorUtil
-// representation.
-//
-// When a window size is selected, show a second plot of the mutator
-// utilization distribution for that window size.
-//
-// Render plot progressively so rough outline is visible quickly even
-// for very complex MUTs. Start by computing just a few window sizes
-// and then add more window sizes.
-//
-// Consider using sampling to compute an approximate MUT. This would
-// work by sampling the mutator utilization at randomly selected
-// points in time in the trace to build an empirical distribution. We
-// could potentially put confidence intervals on these estimates and
-// render this progressively as we refine the distributions.
-
-package main
-
-import (
-	"encoding/json"
-	"fmt"
-	"internal/trace"
-	"log"
-	"math"
-	"net/http"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-)
-
-func init() {
-	http.HandleFunc("/mmu", httpMMU)
-	http.HandleFunc("/mmuPlot", httpMMUPlot)
-	http.HandleFunc("/mmuDetails", httpMMUDetails)
-}
-
-var utilFlagNames = map[string]trace.UtilFlags{
-	"perProc":    trace.UtilPerProc,
-	"stw":        trace.UtilSTW,
-	"background": trace.UtilBackground,
-	"assist":     trace.UtilAssist,
-	"sweep":      trace.UtilSweep,
-}
-
-type mmuCacheEntry struct {
-	init     sync.Once
-	util     [][]trace.MutatorUtil
-	mmuCurve *trace.MMUCurve
-	err      error
-}
-
-var mmuCache struct {
-	m    map[trace.UtilFlags]*mmuCacheEntry
-	lock sync.Mutex
-}
-
-func init() {
-	mmuCache.m = make(map[trace.UtilFlags]*mmuCacheEntry)
-}
-
-func getMMUCurve(r *http.Request) ([][]trace.MutatorUtil, *trace.MMUCurve, error) {
-	var flags trace.UtilFlags
-	for _, flagStr := range strings.Split(r.FormValue("flags"), "|") {
-		flags |= utilFlagNames[flagStr]
-	}
-
-	mmuCache.lock.Lock()
-	c := mmuCache.m[flags]
-	if c == nil {
-		c = new(mmuCacheEntry)
-		mmuCache.m[flags] = c
-	}
-	mmuCache.lock.Unlock()
-
-	c.init.Do(func() {
-		events, err := parseEvents()
-		if err != nil {
-			c.err = err
-		} else {
-			c.util = trace.MutatorUtilization(events, flags)
-			c.mmuCurve = trace.NewMMUCurve(c.util)
-		}
-	})
-	return c.util, c.mmuCurve, c.err
-}
-
-// httpMMU serves the MMU plot page.
-func httpMMU(w http.ResponseWriter, r *http.Request) {
-	http.ServeContent(w, r, "", time.Time{}, strings.NewReader(templMMU))
-}
-
-// httpMMUPlot serves the JSON data for the MMU plot.
-func httpMMUPlot(w http.ResponseWriter, r *http.Request) {
-	mu, mmuCurve, err := getMMUCurve(r)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("failed to parse events: %v", err), http.StatusInternalServerError)
-		return
-	}
-
-	var quantiles []float64
-	for _, flagStr := range strings.Split(r.FormValue("flags"), "|") {
-		if flagStr == "mut" {
-			quantiles = []float64{0, 1 - .999, 1 - .99, 1 - .95}
-			break
-		}
-	}
-
-	// Find a nice starting point for the plot.
-	xMin := time.Second
-	for xMin > 1 {
-		if mmu := mmuCurve.MMU(xMin); mmu < 0.0001 {
-			break
-		}
-		xMin /= 1000
-	}
-	// Cover six orders of magnitude.
-	xMax := xMin * 1e6
-	// But no more than the length of the trace.
-	minEvent, maxEvent := mu[0][0].Time, mu[0][len(mu[0])-1].Time
-	for _, mu1 := range mu[1:] {
-		if mu1[0].Time < minEvent {
-			minEvent = mu1[0].Time
-		}
-		if mu1[len(mu1)-1].Time > maxEvent {
-			maxEvent = mu1[len(mu1)-1].Time
-		}
-	}
-	if maxMax := time.Duration(maxEvent - minEvent); xMax > maxMax {
-		xMax = maxMax
-	}
-	// Compute MMU curve.
-	logMin, logMax := math.Log(float64(xMin)), math.Log(float64(xMax))
-	const samples = 100
-	plot := make([][]float64, samples)
-	for i := 0; i < samples; i++ {
-		window := time.Duration(math.Exp(float64(i)/(samples-1)*(logMax-logMin) + logMin))
-		if quantiles == nil {
-			plot[i] = make([]float64, 2)
-			plot[i][1] = mmuCurve.MMU(window)
-		} else {
-			plot[i] = make([]float64, 1+len(quantiles))
-			copy(plot[i][1:], mmuCurve.MUD(window, quantiles))
-		}
-		plot[i][0] = float64(window)
-	}
-
-	// Create JSON response.
-	err = json.NewEncoder(w).Encode(map[string]any{"xMin": int64(xMin), "xMax": int64(xMax), "quantiles": quantiles, "curve": plot})
-	if err != nil {
-		log.Printf("failed to serialize response: %v", err)
-		return
-	}
-}
-
-var templMMU = `<!doctype html>
-<html>
-  <head>
-    <meta charset="utf-8">
-    <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
-    <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
-    <script type="text/javascript">
-      google.charts.load('current', {'packages':['corechart']});
-      var chartsReady = false;
-      google.charts.setOnLoadCallback(function() { chartsReady = true; refreshChart(); });
-
-      var chart;
-      var curve;
-
-      function niceDuration(ns) {
-          if (ns < 1e3) { return ns + 'ns'; }
-          else if (ns < 1e6) { return ns / 1e3 + 'µs'; }
-          else if (ns < 1e9) { return ns / 1e6 + 'ms'; }
-          else { return ns / 1e9 + 's'; }
-      }
-
-      function niceQuantile(q) {
-        return 'p' + q*100;
-      }
-
-      function mmuFlags() {
-        var flags = "";
-        $("#options input").each(function(i, elt) {
-          if (elt.checked)
-            flags += "|" + elt.id;
-        });
-        return flags.substr(1);
-      }
-
-      function refreshChart() {
-        if (!chartsReady) return;
-        var container = $('#mmu_chart');
-        container.css('opacity', '.5');
-        refreshChart.count++;
-        var seq = refreshChart.count;
-        $.getJSON('/mmuPlot?flags=' + mmuFlags())
-         .fail(function(xhr, status, error) {
-           alert('failed to load plot: ' + status);
-         })
-         .done(function(result) {
-           if (refreshChart.count === seq)
-             drawChart(result);
-         });
-      }
-      refreshChart.count = 0;
-
-      function drawChart(plotData) {
-        curve = plotData.curve;
-        var data = new google.visualization.DataTable();
-        data.addColumn('number', 'Window duration');
-        data.addColumn('number', 'Minimum mutator utilization');
-        if (plotData.quantiles) {
-          for (var i = 1; i < plotData.quantiles.length; i++) {
-            data.addColumn('number', niceQuantile(1 - plotData.quantiles[i]) + ' MU');
-          }
-        }
-        data.addRows(curve);
-        for (var i = 0; i < curve.length; i++) {
-          data.setFormattedValue(i, 0, niceDuration(curve[i][0]));
-        }
-
-        var options = {
-          chart: {
-            title: 'Minimum mutator utilization',
-          },
-          hAxis: {
-            title: 'Window duration',
-            scaleType: 'log',
-            ticks: [],
-          },
-          vAxis: {
-            title: 'Minimum mutator utilization',
-            minValue: 0.0,
-            maxValue: 1.0,
-          },
-          legend: { position: 'none' },
-          focusTarget: 'category',
-          width: 900,
-          height: 500,
-          chartArea: { width: '80%', height: '80%' },
-        };
-        for (var v = plotData.xMin; v <= plotData.xMax; v *= 10) {
-          options.hAxis.ticks.push({v:v, f:niceDuration(v)});
-        }
-        if (plotData.quantiles) {
-          options.vAxis.title = 'Mutator utilization';
-          options.legend.position = 'in';
-        }
-
-        var container = $('#mmu_chart');
-        container.empty();
-        container.css('opacity', '');
-        chart = new google.visualization.LineChart(container[0]);
-        chart = new google.visualization.LineChart(document.getElementById('mmu_chart'));
-        chart.draw(data, options);
-
-        google.visualization.events.addListener(chart, 'select', selectHandler);
-        $('#details').empty();
-      }
-
-      function selectHandler() {
-        var items = chart.getSelection();
-        if (items.length === 0) {
-          return;
-        }
-        var details = $('#details');
-        details.empty();
-        var windowNS = curve[items[0].row][0];
-        var url = '/mmuDetails?window=' + windowNS + '&flags=' + mmuFlags();
-        $.getJSON(url)
-         .fail(function(xhr, status, error) {
-            details.text(status + ': ' + url + ' could not be loaded');
-         })
-         .done(function(worst) {
-            details.text('Lowest mutator utilization in ' + niceDuration(windowNS) + ' windows:');
-            for (var i = 0; i < worst.length; i++) {
-              details.append($('<br>'));
-              var text = worst[i].MutatorUtil.toFixed(3) + ' at time ' + niceDuration(worst[i].Time);
-              details.append($('<a/>').text(text).attr('href', worst[i].URL));
-            }
-         });
-      }
-
-      $.when($.ready).then(function() {
-        $("#options input").click(refreshChart);
-      });
-    </script>
-    <style>
-      .help {
-        display: inline-block;
-        position: relative;
-        width: 1em;
-        height: 1em;
-        border-radius: 50%;
-        color: #fff;
-        background: #555;
-        text-align: center;
-        cursor: help;
-      }
-      .help > span {
-        display: none;
-      }
-      .help:hover > span {
-        display: block;
-        position: absolute;
-        left: 1.1em;
-        top: 1.1em;
-        background: #555;
-        text-align: left;
-        width: 20em;
-        padding: 0.5em;
-        border-radius: 0.5em;
-        z-index: 5;
-      }
-    </style>
-  </head>
-  <body>
-    <div style="position: relative">
-      <div id="mmu_chart" style="width: 900px; height: 500px; display: inline-block; vertical-align: top">Loading plot...</div>
-      <div id="options" style="display: inline-block; vertical-align: top">
-        <p>
-          <b>View</b><br>
-          <input type="radio" name="view" id="system" checked><label for="system">System</label>
-          <span class="help">?<span>Consider whole system utilization. For example, if one of four procs is available to the mutator, mutator utilization will be 0.25. This is the standard definition of an MMU.</span></span><br>
-          <input type="radio" name="view" id="perProc"><label for="perProc">Per-goroutine</label>
-          <span class="help">?<span>Consider per-goroutine utilization. When even one goroutine is interrupted by GC, mutator utilization is 0.</span></span><br>
-        </p>
-        <p>
-          <b>Include</b><br>
-          <input type="checkbox" id="stw" checked><label for="stw">STW</label>
-          <span class="help">?<span>Stop-the-world stops all goroutines simultaneously.</span></span><br>
-          <input type="checkbox" id="background" checked><label for="background">Background workers</label>
-          <span class="help">?<span>Background workers are GC-specific goroutines. 25% of the CPU is dedicated to background workers during GC.</span></span><br>
-          <input type="checkbox" id="assist" checked><label for="assist">Mark assist</label>
-          <span class="help">?<span>Mark assists are performed by allocation to prevent the mutator from outpacing GC.</span></span><br>
-          <input type="checkbox" id="sweep"><label for="sweep">Sweep</label>
-          <span class="help">?<span>Sweep reclaims unused memory between GCs. (Enabling this may be very slow.).</span></span><br>
-        </p>
-        <p>
-          <b>Display</b><br>
-          <input type="checkbox" id="mut"><label for="mut">Show percentiles</label>
-          <span class="help">?<span>Display percentile mutator utilization in addition to minimum. E.g., p99 MU drops the worst 1% of windows.</span></span><br>
-        </p>
-      </div>
-    </div>
-    <div id="details">Select a point for details.</div>
-  </body>
-</html>
-`
-
-// httpMMUDetails serves details of an MMU graph at a particular window.
-func httpMMUDetails(w http.ResponseWriter, r *http.Request) {
-	_, mmuCurve, err := getMMUCurve(r)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("failed to parse events: %v", err), http.StatusInternalServerError)
-		return
-	}
-
-	windowStr := r.FormValue("window")
-	window, err := strconv.ParseUint(windowStr, 10, 64)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("failed to parse window parameter %q: %v", windowStr, err), http.StatusBadRequest)
-		return
-	}
-	worst := mmuCurve.Examples(time.Duration(window), 10)
-
-	// Construct a link for each window.
-	var links []linkedUtilWindow
-	for _, ui := range worst {
-		links = append(links, newLinkedUtilWindow(ui, time.Duration(window)))
-	}
-
-	err = json.NewEncoder(w).Encode(links)
-	if err != nil {
-		log.Printf("failed to serialize trace: %v", err)
-		return
-	}
-}
-
-type linkedUtilWindow struct {
-	trace.UtilWindow
-	URL string
-}
-
-func newLinkedUtilWindow(ui trace.UtilWindow, window time.Duration) linkedUtilWindow {
-	// Find the range containing this window.
-	var r Range
-	for _, r = range ranges {
-		if r.EndTime > ui.Time {
-			break
-		}
-	}
-	return linkedUtilWindow{ui, fmt.Sprintf("%s#%v:%v", r.URL(), float64(ui.Time)/1e6, float64(ui.Time+int64(window))/1e6)}
-}
diff --git a/src/cmd/trace/pprof.go b/src/cmd/trace/pprof.go
index a73ff53..3722b37 100644
--- a/src/cmd/trace/pprof.go
+++ b/src/cmd/trace/pprof.go
@@ -7,51 +7,25 @@
 package main
 
 import (
-	"bufio"
 	"fmt"
 	"internal/trace"
-	"io"
+	"internal/trace/traceviewer"
 	"net/http"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"runtime"
 	"sort"
 	"strconv"
 	"time"
-
-	"github.com/google/pprof/profile"
 )
 
-func goCmd() string {
-	var exeSuffix string
-	if runtime.GOOS == "windows" {
-		exeSuffix = ".exe"
-	}
-	path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix)
-	if _, err := os.Stat(path); err == nil {
-		return path
-	}
-	return "go"
-}
-
 func init() {
-	http.HandleFunc("/io", serveSVGProfile(pprofByGoroutine(computePprofIO)))
-	http.HandleFunc("/block", serveSVGProfile(pprofByGoroutine(computePprofBlock)))
-	http.HandleFunc("/syscall", serveSVGProfile(pprofByGoroutine(computePprofSyscall)))
-	http.HandleFunc("/sched", serveSVGProfile(pprofByGoroutine(computePprofSched)))
+	http.HandleFunc("/io", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofIO)))
+	http.HandleFunc("/block", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofBlock)))
+	http.HandleFunc("/syscall", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSyscall)))
+	http.HandleFunc("/sched", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSched)))
 
-	http.HandleFunc("/regionio", serveSVGProfile(pprofByRegion(computePprofIO)))
-	http.HandleFunc("/regionblock", serveSVGProfile(pprofByRegion(computePprofBlock)))
-	http.HandleFunc("/regionsyscall", serveSVGProfile(pprofByRegion(computePprofSyscall)))
-	http.HandleFunc("/regionsched", serveSVGProfile(pprofByRegion(computePprofSched)))
-}
-
-// Record represents one entry in pprof-like profiles.
-type Record struct {
-	stk  []*trace.Frame
-	n    uint64
-	time int64
+	http.HandleFunc("/regionio", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofIO)))
+	http.HandleFunc("/regionblock", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofBlock)))
+	http.HandleFunc("/regionsyscall", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSyscall)))
+	http.HandleFunc("/regionsched", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSched)))
 }
 
 // interval represents a time interval in the trace.
@@ -59,34 +33,34 @@
 	begin, end int64 // nanoseconds.
 }
 
-func pprofByGoroutine(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error {
-	return func(w io.Writer, r *http.Request) error {
+func pprofByGoroutine(compute computePprofFunc) traceviewer.ProfileFunc {
+	return func(r *http.Request) ([]traceviewer.ProfileRecord, error) {
 		id := r.FormValue("id")
 		events, err := parseEvents()
 		if err != nil {
-			return err
+			return nil, err
 		}
 		gToIntervals, err := pprofMatchingGoroutines(id, events)
 		if err != nil {
-			return err
+			return nil, err
 		}
-		return compute(w, gToIntervals, events)
+		return compute(gToIntervals, events)
 	}
 }
 
-func pprofByRegion(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error {
-	return func(w io.Writer, r *http.Request) error {
+func pprofByRegion(compute computePprofFunc) traceviewer.ProfileFunc {
+	return func(r *http.Request) ([]traceviewer.ProfileRecord, error) {
 		filter, err := newRegionFilter(r)
 		if err != nil {
-			return err
+			return nil, err
 		}
 		gToIntervals, err := pprofMatchingRegions(filter)
 		if err != nil {
-			return err
+			return nil, err
 		}
 		events, _ := parseEvents()
 
-		return compute(w, gToIntervals, events)
+		return compute(gToIntervals, events)
 	}
 }
 
@@ -170,9 +144,11 @@
 	return gToIntervals, nil
 }
 
+type computePprofFunc func(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error)
+
 // computePprofIO generates IO pprof-like profile (time spent in IO wait, currently only network blocking event).
-func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
-	prof := make(map[uint64]Record)
+func computePprofIO(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) {
+	prof := make(map[uint64]traceviewer.ProfileRecord)
 	for _, ev := range events {
 		if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
 			continue
@@ -180,18 +156,18 @@
 		overlapping := pprofOverlappingDuration(gToIntervals, ev)
 		if overlapping > 0 {
 			rec := prof[ev.StkID]
-			rec.stk = ev.Stk
-			rec.n++
-			rec.time += overlapping.Nanoseconds()
+			rec.Stack = ev.Stk
+			rec.Count++
+			rec.Time += overlapping
 			prof[ev.StkID] = rec
 		}
 	}
-	return buildProfile(prof).Write(w)
+	return recordsOf(prof), nil
 }
 
 // computePprofBlock generates blocking pprof-like profile (time spent blocked on synchronization primitives).
-func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
-	prof := make(map[uint64]Record)
+func computePprofBlock(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) {
+	prof := make(map[uint64]traceviewer.ProfileRecord)
 	for _, ev := range events {
 		switch ev.Type {
 		case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect,
@@ -208,18 +184,18 @@
 		overlapping := pprofOverlappingDuration(gToIntervals, ev)
 		if overlapping > 0 {
 			rec := prof[ev.StkID]
-			rec.stk = ev.Stk
-			rec.n++
-			rec.time += overlapping.Nanoseconds()
+			rec.Stack = ev.Stk
+			rec.Count++
+			rec.Time += overlapping
 			prof[ev.StkID] = rec
 		}
 	}
-	return buildProfile(prof).Write(w)
+	return recordsOf(prof), nil
 }
 
 // computePprofSyscall generates syscall pprof-like profile (time spent blocked in syscalls).
-func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
-	prof := make(map[uint64]Record)
+func computePprofSyscall(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) {
+	prof := make(map[uint64]traceviewer.ProfileRecord)
 	for _, ev := range events {
 		if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
 			continue
@@ -227,19 +203,19 @@
 		overlapping := pprofOverlappingDuration(gToIntervals, ev)
 		if overlapping > 0 {
 			rec := prof[ev.StkID]
-			rec.stk = ev.Stk
-			rec.n++
-			rec.time += overlapping.Nanoseconds()
+			rec.Stack = ev.Stk
+			rec.Count++
+			rec.Time += overlapping
 			prof[ev.StkID] = rec
 		}
 	}
-	return buildProfile(prof).Write(w)
+	return recordsOf(prof), nil
 }
 
 // computePprofSched generates scheduler latency pprof-like profile
 // (time between a goroutine become runnable and actually scheduled for execution).
-func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
-	prof := make(map[uint64]Record)
+func computePprofSched(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) {
+	prof := make(map[uint64]traceviewer.ProfileRecord)
 	for _, ev := range events {
 		if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) ||
 			ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
@@ -248,13 +224,13 @@
 		overlapping := pprofOverlappingDuration(gToIntervals, ev)
 		if overlapping > 0 {
 			rec := prof[ev.StkID]
-			rec.stk = ev.Stk
-			rec.n++
-			rec.time += overlapping.Nanoseconds()
+			rec.Stack = ev.Stk
+			rec.Count++
+			rec.Time += overlapping
 			prof[ev.StkID] = rec
 		}
 	}
-	return buildProfile(prof).Write(w)
+	return recordsOf(prof), nil
 }
 
 // pprofOverlappingDuration returns the overlapping duration between
@@ -278,100 +254,10 @@
 	return overlapping
 }
 
-// serveSVGProfile serves pprof-like profile generated by prof as svg.
-func serveSVGProfile(prof func(w io.Writer, r *http.Request) error) http.HandlerFunc {
-	return func(w http.ResponseWriter, r *http.Request) {
-
-		if r.FormValue("raw") != "" {
-			w.Header().Set("Content-Type", "application/octet-stream")
-			if err := prof(w, r); err != nil {
-				w.Header().Set("Content-Type", "text/plain; charset=utf-8")
-				w.Header().Set("X-Go-Pprof", "1")
-				http.Error(w, fmt.Sprintf("failed to get profile: %v", err), http.StatusInternalServerError)
-				return
-			}
-			return
-		}
-
-		blockf, err := os.CreateTemp("", "block")
-		if err != nil {
-			http.Error(w, fmt.Sprintf("failed to create temp file: %v", err), http.StatusInternalServerError)
-			return
-		}
-		defer func() {
-			blockf.Close()
-			os.Remove(blockf.Name())
-		}()
-		blockb := bufio.NewWriter(blockf)
-		if err := prof(blockb, r); err != nil {
-			http.Error(w, fmt.Sprintf("failed to generate profile: %v", err), http.StatusInternalServerError)
-			return
-		}
-		if err := blockb.Flush(); err != nil {
-			http.Error(w, fmt.Sprintf("failed to flush temp file: %v", err), http.StatusInternalServerError)
-			return
-		}
-		if err := blockf.Close(); err != nil {
-			http.Error(w, fmt.Sprintf("failed to close temp file: %v", err), http.StatusInternalServerError)
-			return
-		}
-		svgFilename := blockf.Name() + ".svg"
-		if output, err := exec.Command(goCmd(), "tool", "pprof", "-svg", "-output", svgFilename, blockf.Name()).CombinedOutput(); err != nil {
-			http.Error(w, fmt.Sprintf("failed to execute go tool pprof: %v\n%s", err, output), http.StatusInternalServerError)
-			return
-		}
-		defer os.Remove(svgFilename)
-		w.Header().Set("Content-Type", "image/svg+xml")
-		http.ServeFile(w, r, svgFilename)
+func recordsOf(records map[uint64]traceviewer.ProfileRecord) []traceviewer.ProfileRecord {
+	result := make([]traceviewer.ProfileRecord, 0, len(records))
+	for _, record := range records {
+		result = append(result, record)
 	}
-}
-
-func buildProfile(prof map[uint64]Record) *profile.Profile {
-	p := &profile.Profile{
-		PeriodType: &profile.ValueType{Type: "trace", Unit: "count"},
-		Period:     1,
-		SampleType: []*profile.ValueType{
-			{Type: "contentions", Unit: "count"},
-			{Type: "delay", Unit: "nanoseconds"},
-		},
-	}
-	locs := make(map[uint64]*profile.Location)
-	funcs := make(map[string]*profile.Function)
-	for _, rec := range prof {
-		var sloc []*profile.Location
-		for _, frame := range rec.stk {
-			loc := locs[frame.PC]
-			if loc == nil {
-				fn := funcs[frame.File+frame.Fn]
-				if fn == nil {
-					fn = &profile.Function{
-						ID:         uint64(len(p.Function) + 1),
-						Name:       frame.Fn,
-						SystemName: frame.Fn,
-						Filename:   frame.File,
-					}
-					p.Function = append(p.Function, fn)
-					funcs[frame.File+frame.Fn] = fn
-				}
-				loc = &profile.Location{
-					ID:      uint64(len(p.Location) + 1),
-					Address: frame.PC,
-					Line: []profile.Line{
-						{
-							Function: fn,
-							Line:     int64(frame.Line),
-						},
-					},
-				}
-				p.Location = append(p.Location, loc)
-				locs[frame.PC] = loc
-			}
-			sloc = append(sloc, loc)
-		}
-		p.Sample = append(p.Sample, &profile.Sample{
-			Value:    []int64{int64(rec.n), rec.time},
-			Location: sloc,
-		})
-	}
-	return p
+	return result
 }
diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go
index 618df42..438b8dd 100644
--- a/src/cmd/trace/trace.go
+++ b/src/cmd/trace/trace.go
@@ -5,29 +5,24 @@
 package main
 
 import (
-	"cmd/internal/traceviewer"
-	"embed"
-	"encoding/json"
 	"fmt"
 	"internal/trace"
-	"io"
+	"internal/trace/traceviewer"
 	"log"
 	"math"
 	"net/http"
 	"runtime/debug"
 	"sort"
 	"strconv"
-	"strings"
 	"time"
-)
 
-//go:embed static/trace_viewer_full.html static/webcomponents.min.js
-var staticContent embed.FS
+	"internal/trace/traceviewer/format"
+)
 
 func init() {
 	http.HandleFunc("/trace", httpTrace)
 	http.HandleFunc("/jsontrace", httpJsonTrace)
-	http.Handle("/static/", http.FileServer(http.FS(staticContent)))
+	http.Handle("/static/", traceviewer.StaticHandler())
 }
 
 // httpTrace serves either whole trace (goid==0) or trace for goid goroutine.
@@ -37,143 +32,9 @@
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
-	if err := r.ParseForm(); err != nil {
-		http.Error(w, err.Error(), http.StatusInternalServerError)
-		return
-	}
-	html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode())
-	w.Write([]byte(html))
-
+	traceviewer.TraceHandler().ServeHTTP(w, r)
 }
 
-// https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962/tracing/docs/embedding-trace-viewer.md
-// This is almost verbatim copy of https://chromium-review.googlesource.com/c/catapult/+/2062938/2/tracing/bin/index.html
-var templTrace = `
-<html>
-<head>
-<script src="/static/webcomponents.min.js"></script>
-<script>
-'use strict';
-
-function onTraceViewerImportFail() {
-  document.addEventListener('DOMContentLoaded', function() {
-    document.body.textContent =
-    '/static/trace_viewer_full.html is missing. File a bug in https://golang.org/issue';
-  });
-}
-</script>
-
-<link rel="import" href="/static/trace_viewer_full.html"
-      onerror="onTraceViewerImportFail(event)">
-
-<style type="text/css">
-  html, body {
-    box-sizing: border-box;
-    overflow: hidden;
-    margin: 0px;
-    padding: 0;
-    width: 100%;
-    height: 100%;
-  }
-  #trace-viewer {
-    width: 100%;
-    height: 100%;
-  }
-  #trace-viewer:focus {
-    outline: none;
-  }
-</style>
-<script>
-'use strict';
-(function() {
-  var viewer;
-  var url;
-  var model;
-
-  function load() {
-    var req = new XMLHttpRequest();
-    var isBinary = /[.]gz$/.test(url) || /[.]zip$/.test(url);
-    req.overrideMimeType('text/plain; charset=x-user-defined');
-    req.open('GET', url, true);
-    if (isBinary)
-      req.responseType = 'arraybuffer';
-
-    req.onreadystatechange = function(event) {
-      if (req.readyState !== 4)
-        return;
-
-      window.setTimeout(function() {
-        if (req.status === 200)
-          onResult(isBinary ? req.response : req.responseText);
-        else
-          onResultFail(req.status);
-      }, 0);
-    };
-    req.send(null);
-  }
-
-  function onResultFail(err) {
-    var overlay = new tr.ui.b.Overlay();
-    overlay.textContent = err + ': ' + url + ' could not be loaded';
-    overlay.title = 'Failed to fetch data';
-    overlay.visible = true;
-  }
-
-  function onResult(result) {
-    model = new tr.Model();
-    var opts = new tr.importer.ImportOptions();
-    opts.shiftWorldToZero = false;
-    var i = new tr.importer.Import(model, opts);
-    var p = i.importTracesWithProgressDialog([result]);
-    p.then(onModelLoaded, onImportFail);
-  }
-
-  function onModelLoaded() {
-    viewer.model = model;
-    viewer.viewTitle = "trace";
-
-    if (!model || model.bounds.isEmpty)
-      return;
-    var sel = window.location.hash.substr(1);
-    if (sel === '')
-      return;
-    var parts = sel.split(':');
-    var range = new (tr.b.Range || tr.b.math.Range)();
-    range.addValue(parseFloat(parts[0]));
-    range.addValue(parseFloat(parts[1]));
-    viewer.trackView.viewport.interestRange.set(range);
-  }
-
-  function onImportFail(err) {
-    var overlay = new tr.ui.b.Overlay();
-    overlay.textContent = tr.b.normalizeException(err).message;
-    overlay.title = 'Import error';
-    overlay.visible = true;
-  }
-
-  document.addEventListener('WebComponentsReady', function() {
-    var container = document.createElement('track-view-container');
-    container.id = 'track_view_container';
-
-    viewer = document.createElement('tr-ui-timeline-view');
-    viewer.track_view_container = container;
-    Polymer.dom(viewer).appendChild(container);
-
-    viewer.id = 'trace-viewer';
-    viewer.globalMode = true;
-    Polymer.dom(document.body).appendChild(viewer);
-
-    url = '/jsontrace?{{PARAMS}}';
-    load();
-  });
-}());
-</script>
-</head>
-<body>
-</body>
-</html>
-`
-
 // httpJsonTrace serves json trace, requested from within templTrace HTML.
 func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
 	defer debug.FreeOSMemory()
@@ -203,7 +64,7 @@
 			log.Printf("failed to find goroutine %d", goid)
 			return
 		}
-		params.mode = modeGoroutineOriented
+		params.mode = traceviewer.ModeGoroutineOriented
 		params.startTime = g.StartTime
 		if g.EndTime != 0 {
 			params.endTime = g.EndTime
@@ -225,7 +86,7 @@
 			return
 		}
 		goid := task.events[0].G
-		params.mode = modeGoroutineOriented | modeTaskOriented
+		params.mode = traceviewer.ModeGoroutineOriented | traceviewer.ModeTaskOriented
 		params.startTime = task.firstTimestamp() - 1
 		params.endTime = task.lastTimestamp() + 1
 		params.maing = goid
@@ -250,7 +111,7 @@
 			log.Printf("failed to find task with id %d", taskid)
 			return
 		}
-		params.mode = modeTaskOriented
+		params.mode = traceviewer.ModeTaskOriented
 		params.startTime = task.firstTimestamp() - 1
 		params.endTime = task.lastTimestamp() + 1
 		params.tasks = task.descendants()
@@ -272,247 +133,31 @@
 		}
 	}
 
-	c := viewerDataTraceConsumer(w, start, end)
+	c := traceviewer.ViewerDataTraceConsumer(w, start, end)
 	if err := generateTrace(params, c); err != nil {
 		log.Printf("failed to generate trace: %v", err)
 		return
 	}
 }
 
-type Range struct {
-	Name      string
-	Start     int
-	End       int
-	StartTime int64
-	EndTime   int64
-}
-
-func (r Range) URL() string {
-	return fmt.Sprintf("/trace?start=%d&end=%d", r.Start, r.End)
-}
-
 // splitTrace splits the trace into a number of ranges,
 // each resulting in approx 100MB of json output
 // (trace viewer can hardly handle more).
-func splitTrace(res trace.ParseResult) []Range {
+func splitTrace(res trace.ParseResult) []traceviewer.Range {
 	params := &traceParams{
 		parsed:  res,
 		endTime: math.MaxInt64,
 	}
-	s, c := splittingTraceConsumer(100 << 20) // 100M
+	s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100M
 	if err := generateTrace(params, c); err != nil {
 		dief("%v\n", err)
 	}
 	return s.Ranges
 }
 
-type splitter struct {
-	Ranges []Range
-}
-
-// walkStackFrames calls fn for id and all of its parent frames from allFrames.
-func walkStackFrames(allFrames map[string]traceviewer.Frame, id int, fn func(id int)) {
-	for id != 0 {
-		f, ok := allFrames[strconv.Itoa(id)]
-		if !ok {
-			break
-		}
-		fn(id)
-		id = f.Parent
-	}
-}
-
-func stackFrameEncodedSize(id uint, f traceviewer.Frame) int {
-	// We want to know the marginal size of traceviewer.Data.Frames for
-	// each event. Running full JSON encoding of the map for each event is
-	// far too slow.
-	//
-	// Since the format is fixed, we can easily compute the size without
-	// encoding.
-	//
-	// A single entry looks like one of the following:
-	//
-	//   "1":{"name":"main.main:30"},
-	//   "10":{"name":"pkg.NewSession:173","parent":9},
-	//
-	// The parent is omitted if 0. The trailing comma is omitted from the
-	// last entry, but we don't need that much precision.
-	const (
-		baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`)
-
-		// Don't count the trailing quote on the name, as that is
-		// counted in baseSize.
-		parentBaseSize = len(`,"parent":`)
-	)
-
-	size := baseSize
-
-	size += len(f.Name)
-
-	// Bytes for id (always positive).
-	for id > 0 {
-		size += 1
-		id /= 10
-	}
-
-	if f.Parent > 0 {
-		size += parentBaseSize
-		// Bytes for parent (always positive).
-		for f.Parent > 0 {
-			size += 1
-			f.Parent /= 10
-		}
-	}
-
-	return size
-}
-
-func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
-	type eventSz struct {
-		Time   float64
-		Sz     int
-		Frames []int
-	}
-
-	var (
-		// data.Frames contains only the frames for required events.
-		data = traceviewer.Data{Frames: make(map[string]traceviewer.Frame)}
-
-		allFrames = make(map[string]traceviewer.Frame)
-
-		sizes []eventSz
-		cw    countingWriter
-	)
-
-	s := new(splitter)
-
-	return s, traceConsumer{
-		consumeTimeUnit: func(unit string) {
-			data.TimeUnit = unit
-		},
-		consumeViewerEvent: func(v *traceviewer.Event, required bool) {
-			if required {
-				// Store required events inside data so flush
-				// can include them in the required part of the
-				// trace.
-				data.Events = append(data.Events, v)
-				walkStackFrames(allFrames, v.Stack, func(id int) {
-					s := strconv.Itoa(id)
-					data.Frames[s] = allFrames[s]
-				})
-				walkStackFrames(allFrames, v.EndStack, func(id int) {
-					s := strconv.Itoa(id)
-					data.Frames[s] = allFrames[s]
-				})
-				return
-			}
-			enc := json.NewEncoder(&cw)
-			enc.Encode(v)
-			size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",".
-			// Add referenced stack frames. Their size is computed
-			// in flush, where we can dedup across events.
-			walkStackFrames(allFrames, v.Stack, func(id int) {
-				size.Frames = append(size.Frames, id)
-			})
-			walkStackFrames(allFrames, v.EndStack, func(id int) {
-				size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later.
-			})
-			sizes = append(sizes, size)
-			cw.size = 0
-		},
-		consumeViewerFrame: func(k string, v traceviewer.Frame) {
-			allFrames[k] = v
-		},
-		flush: func() {
-			// Calculate size of the mandatory part of the trace.
-			// This includes thread names and stack frames for
-			// required events.
-			cw.size = 0
-			enc := json.NewEncoder(&cw)
-			enc.Encode(data)
-			requiredSize := cw.size
-
-			// Then calculate size of each individual event and
-			// their stack frames, grouping them into ranges. We
-			// only include stack frames relevant to the events in
-			// the range to reduce overhead.
-
-			var (
-				start = 0
-
-				eventsSize = 0
-
-				frames     = make(map[string]traceviewer.Frame)
-				framesSize = 0
-			)
-			for i, ev := range sizes {
-				eventsSize += ev.Sz
-
-				// Add required stack frames. Note that they
-				// may already be in the map.
-				for _, id := range ev.Frames {
-					s := strconv.Itoa(id)
-					_, ok := frames[s]
-					if ok {
-						continue
-					}
-					f := allFrames[s]
-					frames[s] = f
-					framesSize += stackFrameEncodedSize(uint(id), f)
-				}
-
-				total := requiredSize + framesSize + eventsSize
-				if total < max {
-					continue
-				}
-
-				// Reached max size, commit this range and
-				// start a new range.
-				startTime := time.Duration(sizes[start].Time * 1000)
-				endTime := time.Duration(ev.Time * 1000)
-				ranges = append(ranges, Range{
-					Name:      fmt.Sprintf("%v-%v", startTime, endTime),
-					Start:     start,
-					End:       i + 1,
-					StartTime: int64(startTime),
-					EndTime:   int64(endTime),
-				})
-				start = i + 1
-				frames = make(map[string]traceviewer.Frame)
-				framesSize = 0
-				eventsSize = 0
-			}
-			if len(ranges) <= 1 {
-				s.Ranges = nil
-				return
-			}
-
-			if end := len(sizes) - 1; start < end {
-				ranges = append(ranges, Range{
-					Name:      fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)),
-					Start:     start,
-					End:       end,
-					StartTime: int64(sizes[start].Time * 1000),
-					EndTime:   int64(sizes[end].Time * 1000),
-				})
-			}
-			s.Ranges = ranges
-		},
-	}
-}
-
-type countingWriter struct {
-	size int
-}
-
-func (cw *countingWriter) Write(data []byte) (int, error) {
-	cw.size += len(data)
-	return len(data), nil
-}
-
 type traceParams struct {
 	parsed    trace.ParseResult
-	mode      traceviewMode
+	mode      traceviewer.Mode
 	startTime int64
 	endTime   int64
 	maing     uint64          // for goroutine-oriented view, place this goroutine on the top row
@@ -520,59 +165,18 @@
 	tasks     []*taskDesc     // Tasks to be displayed. tasks[0] is the top-most task
 }
 
-type traceviewMode uint
-
-const (
-	modeGoroutineOriented traceviewMode = 1 << iota
-	modeTaskOriented
-)
-
 type traceContext struct {
 	*traceParams
-	consumer  traceConsumer
-	frameTree frameNode
-	frameSeq  int
-	arrowSeq  uint64
-	gcount    uint64
-
-	heapStats, prevHeapStats     heapStats
-	threadStats, prevThreadStats threadStats
-	gstates, prevGstates         [gStateCount]int64
-
+	consumer traceviewer.TraceConsumer
+	emitter  *traceviewer.Emitter
+	arrowSeq uint64
+	gcount   uint64
 	regionID int // last emitted region id. incremented in each emitRegion call.
 }
 
-type heapStats struct {
-	heapAlloc uint64
-	nextGC    uint64
-}
-
-type threadStats struct {
-	insyscallRuntime int64 // system goroutine in syscall
-	insyscall        int64 // user goroutine in syscall
-	prunning         int64 // thread running P
-}
-
-type frameNode struct {
-	id       int
-	children map[uint64]frameNode
-}
-
-type gState int
-
-const (
-	gDead gState = iota
-	gRunnable
-	gRunning
-	gWaiting
-	gWaitingGC
-
-	gStateCount
-)
-
 type gInfo struct {
-	state      gState // current state
-	name       string // name chosen for this goroutine at first EvGoStart
+	state      traceviewer.GState // current state
+	name       string             // name chosen for this goroutine at first EvGoStart
 	isSystemG  bool
 	start      *trace.Event // most recent EvGoStart
 	markAssist *trace.Event // if non-nil, the mark assist currently running.
@@ -596,19 +200,6 @@
 	Index int `json:"sort_index"`
 }
 
-type traceConsumer struct {
-	consumeTimeUnit    func(unit string)
-	consumeViewerEvent func(v *traceviewer.Event, required bool)
-	consumeViewerFrame func(key string, f traceviewer.Frame)
-	flush              func()
-}
-
-const (
-	procsSection = 0 // where Goroutines or per-P timelines are presented.
-	statsSection = 1 // where counters are presented.
-	tasksSection = 2 // where Task hierarchy & timeline is presented.
-)
-
 // generateTrace generates json trace for trace-viewer:
 // https://github.com/google/trace-viewer
 // Trace format is described at:
@@ -616,14 +207,22 @@
 // If mode==goroutineMode, generate trace for goroutine goid, otherwise whole trace.
 // startTime, endTime determine part of the trace that we are interested in.
 // gset restricts goroutines that are included in the resulting trace.
-func generateTrace(params *traceParams, consumer traceConsumer) error {
-	defer consumer.flush()
+func generateTrace(params *traceParams, consumer traceviewer.TraceConsumer) error {
+	emitter := traceviewer.NewEmitter(
+		consumer,
+		time.Duration(params.startTime),
+		time.Duration(params.endTime),
+	)
+	if params.mode&traceviewer.ModeGoroutineOriented != 0 {
+		emitter.SetResourceType("G")
+	} else {
+		emitter.SetResourceType("PROCS")
+	}
+	defer emitter.Flush()
 
-	ctx := &traceContext{traceParams: params}
-	ctx.frameTree.children = make(map[uint64]frameNode)
+	ctx := &traceContext{traceParams: params, emitter: emitter}
 	ctx.consumer = consumer
 
-	ctx.consumer.consumeTimeUnit("ns")
 	maxProc := 0
 	ginfos := make(map[uint64]*gInfo)
 	stacks := params.parsed.Stacks
@@ -640,17 +239,17 @@
 	// Since we make many calls to setGState, we record a sticky
 	// error in setGStateErr and check it after every event.
 	var setGStateErr error
-	setGState := func(ev *trace.Event, g uint64, oldState, newState gState) {
+	setGState := func(ev *trace.Event, g uint64, oldState, newState traceviewer.GState) {
 		info := getGInfo(g)
-		if oldState == gWaiting && info.state == gWaitingGC {
-			// For checking, gWaiting counts as any gWaiting*.
+		if oldState == traceviewer.GWaiting && info.state == traceviewer.GWaitingGC {
+			// For checking, traceviewer.GWaiting counts as any traceviewer.GWaiting*.
 			oldState = info.state
 		}
 		if info.state != oldState && setGStateErr == nil {
 			setGStateErr = fmt.Errorf("expected G %d to be in state %d, but got state %d", g, oldState, info.state)
 		}
-		ctx.gstates[info.state]--
-		ctx.gstates[newState]++
+
+		emitter.GoroutineTransition(time.Duration(ev.Ts), info.state, newState)
 		info.state = newState
 	}
 
@@ -658,13 +257,13 @@
 		// Handle state transitions before we filter out events.
 		switch ev.Type {
 		case trace.EvGoStart, trace.EvGoStartLabel:
-			setGState(ev, ev.G, gRunnable, gRunning)
+			setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GRunning)
 			info := getGInfo(ev.G)
 			info.start = ev
 		case trace.EvProcStart:
-			ctx.threadStats.prunning++
+			emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateRunning, 1)
 		case trace.EvProcStop:
-			ctx.threadStats.prunning--
+			emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateRunning, -1)
 		case trace.EvGoCreate:
 			newG := ev.Args[0]
 			info := getGInfo(newG)
@@ -682,58 +281,59 @@
 			info.isSystemG = trace.IsSystemGoroutine(fname)
 
 			ctx.gcount++
-			setGState(ev, newG, gDead, gRunnable)
+			setGState(ev, newG, traceviewer.GDead, traceviewer.GRunnable)
 		case trace.EvGoEnd:
 			ctx.gcount--
-			setGState(ev, ev.G, gRunning, gDead)
+			setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GDead)
 		case trace.EvGoUnblock:
-			setGState(ev, ev.Args[0], gWaiting, gRunnable)
+			setGState(ev, ev.Args[0], traceviewer.GWaiting, traceviewer.GRunnable)
 		case trace.EvGoSysExit:
-			setGState(ev, ev.G, gWaiting, gRunnable)
+			setGState(ev, ev.G, traceviewer.GWaiting, traceviewer.GRunnable)
 			if getGInfo(ev.G).isSystemG {
-				ctx.threadStats.insyscallRuntime--
+				emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, -1)
 			} else {
-				ctx.threadStats.insyscall--
+				emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, -1)
 			}
 		case trace.EvGoSysBlock:
-			setGState(ev, ev.G, gRunning, gWaiting)
+			setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaiting)
 			if getGInfo(ev.G).isSystemG {
-				ctx.threadStats.insyscallRuntime++
+				emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, 1)
 			} else {
-				ctx.threadStats.insyscall++
+				emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, 1)
 			}
 		case trace.EvGoSched, trace.EvGoPreempt:
-			setGState(ev, ev.G, gRunning, gRunnable)
+			setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GRunnable)
 		case trace.EvGoStop,
 			trace.EvGoSleep, trace.EvGoBlock, trace.EvGoBlockSend, trace.EvGoBlockRecv,
 			trace.EvGoBlockSelect, trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockNet:
-			setGState(ev, ev.G, gRunning, gWaiting)
+			setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaiting)
 		case trace.EvGoBlockGC:
-			setGState(ev, ev.G, gRunning, gWaitingGC)
+			setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaitingGC)
 		case trace.EvGCMarkAssistStart:
 			getGInfo(ev.G).markAssist = ev
 		case trace.EvGCMarkAssistDone:
 			getGInfo(ev.G).markAssist = nil
 		case trace.EvGoWaiting:
-			setGState(ev, ev.G, gRunnable, gWaiting)
+			setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GWaiting)
 		case trace.EvGoInSyscall:
 			// Cancel out the effect of EvGoCreate at the beginning.
-			setGState(ev, ev.G, gRunnable, gWaiting)
+			setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GWaiting)
 			if getGInfo(ev.G).isSystemG {
-				ctx.threadStats.insyscallRuntime++
+				emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, 1)
 			} else {
-				ctx.threadStats.insyscall++
+				emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, 1)
 			}
 		case trace.EvHeapAlloc:
-			ctx.heapStats.heapAlloc = ev.Args[0]
+			emitter.HeapAlloc(time.Duration(ev.Ts), ev.Args[0])
 		case trace.EvHeapGoal:
-			ctx.heapStats.nextGC = ev.Args[0]
+			emitter.HeapGoal(time.Duration(ev.Ts), ev.Args[0])
 		}
 		if setGStateErr != nil {
 			return setGStateErr
 		}
-		if ctx.gstates[gRunnable] < 0 || ctx.gstates[gRunning] < 0 || ctx.threadStats.insyscall < 0 || ctx.threadStats.insyscallRuntime < 0 {
-			return fmt.Errorf("invalid state after processing %v: runnable=%d running=%d insyscall=%d insyscallRuntime=%d", ev, ctx.gstates[gRunnable], ctx.gstates[gRunning], ctx.threadStats.insyscall, ctx.threadStats.insyscallRuntime)
+
+		if err := emitter.Err(); err != nil {
+			return fmt.Errorf("invalid state after processing %v: %s", ev, err)
 		}
 
 		// Ignore events that are from uninteresting goroutines
@@ -752,12 +352,12 @@
 		// Emit trace objects.
 		switch ev.Type {
 		case trace.EvProcStart:
-			if ctx.mode&modeGoroutineOriented != 0 {
+			if ctx.mode&traceviewer.ModeGoroutineOriented != 0 {
 				continue
 			}
 			ctx.emitInstant(ev, "proc start", "")
 		case trace.EvProcStop:
-			if ctx.mode&modeGoroutineOriented != 0 {
+			if ctx.mode&traceviewer.ModeGoroutineOriented != 0 {
 				continue
 			}
 			ctx.emitInstant(ev, "proc stop", "")
@@ -765,7 +365,7 @@
 			ctx.emitSlice(ev, "GC")
 		case trace.EvGCDone:
 		case trace.EvSTWStart:
-			if ctx.mode&modeGoroutineOriented != 0 {
+			if ctx.mode&traceviewer.ModeGoroutineOriented != 0 {
 				continue
 			}
 			ctx.emitSlice(ev, fmt.Sprintf("STW (%s)", ev.SArgs[0]))
@@ -832,46 +432,10 @@
 				ctx.emitInstant(ev, "CPU profile sample", "")
 			}
 		}
-		// Emit any counter updates.
-		ctx.emitThreadCounters(ev)
-		ctx.emitHeapCounters(ev)
-		ctx.emitGoroutineCounters(ev)
-	}
-
-	ctx.emitSectionFooter(statsSection, "STATS", 0)
-
-	if ctx.mode&modeTaskOriented != 0 {
-		ctx.emitSectionFooter(tasksSection, "TASKS", 1)
-	}
-
-	if ctx.mode&modeGoroutineOriented != 0 {
-		ctx.emitSectionFooter(procsSection, "G", 2)
-	} else {
-		ctx.emitSectionFooter(procsSection, "PROCS", 2)
-	}
-
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.GCP, Arg: &NameArg{"GC"}})
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.GCP, Arg: &SortIndexArg{-6}})
-
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.NetpollP, Arg: &NameArg{"Network"}})
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.NetpollP, Arg: &SortIndexArg{-5}})
-
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.TimerP, Arg: &NameArg{"Timers"}})
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.TimerP, Arg: &SortIndexArg{-4}})
-
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.SyscallP, Arg: &NameArg{"Syscalls"}})
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.SyscallP, Arg: &SortIndexArg{-3}})
-
-	// Display rows for Ps if we are in the default trace view mode (not goroutine-oriented presentation)
-	if ctx.mode&modeGoroutineOriented == 0 {
-		for i := 0; i <= maxProc; i++ {
-			ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: uint64(i), Arg: &NameArg{fmt.Sprintf("Proc %v", i)}})
-			ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: uint64(i), Arg: &SortIndexArg{i}})
-		}
 	}
 
 	// Display task and its regions if we are in task-oriented presentation mode.
-	if ctx.mode&modeTaskOriented != 0 {
+	if ctx.mode&traceviewer.ModeTaskOriented != 0 {
 		// sort tasks based on the task start time.
 		sortedTask := make([]*taskDesc, len(ctx.tasks))
 		copy(sortedTask, ctx.tasks)
@@ -888,7 +452,7 @@
 
 			// If we are in goroutine-oriented mode, we draw regions.
 			// TODO(hyangah): add this for task/P-oriented mode (i.e., focustask view) too.
-			if ctx.mode&modeGoroutineOriented != 0 {
+			if ctx.mode&traceviewer.ModeGoroutineOriented != 0 {
 				for _, s := range task.regions {
 					ctx.emitRegion(s)
 				}
@@ -897,34 +461,34 @@
 	}
 
 	// Display goroutine rows if we are either in goroutine-oriented mode.
-	if ctx.mode&modeGoroutineOriented != 0 {
+	if ctx.mode&traceviewer.ModeGoroutineOriented != 0 {
 		for k, v := range ginfos {
 			if !ctx.gs[k] {
 				continue
 			}
-			ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: k, Arg: &NameArg{v.name}})
+			emitter.Resource(k, v.name)
 		}
-		// Row for the main goroutine (maing)
-		ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: ctx.maing, Arg: &SortIndexArg{-2}})
+		emitter.Focus(ctx.maing)
+
 		// Row for GC or global state (specified with G=0)
-		ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: 0, Arg: &SortIndexArg{-1}})
+		ctx.emitFooter(&format.Event{Name: "thread_sort_index", Phase: "M", PID: format.ProcsSection, TID: 0, Arg: &SortIndexArg{-1}})
+	} else {
+		// Display rows for Ps if we are in the default trace view mode.
+		for i := 0; i <= maxProc; i++ {
+			emitter.Resource(uint64(i), fmt.Sprintf("Proc %v", i))
+		}
 	}
 
 	return nil
 }
 
-func (ctx *traceContext) emit(e *traceviewer.Event) {
-	ctx.consumer.consumeViewerEvent(e, false)
+func (ctx *traceContext) emit(e *format.Event) {
+	ctx.consumer.ConsumeViewerEvent(e, false)
 }
 
-func (ctx *traceContext) emitFooter(e *traceviewer.Event) {
-	ctx.consumer.consumeViewerEvent(e, true)
+func (ctx *traceContext) emitFooter(e *format.Event) {
+	ctx.consumer.ConsumeViewerEvent(e, true)
 }
-func (ctx *traceContext) emitSectionFooter(sectionID uint64, name string, priority int) {
-	ctx.emitFooter(&traceviewer.Event{Name: "process_name", Phase: "M", PID: sectionID, Arg: &NameArg{name}})
-	ctx.emitFooter(&traceviewer.Event{Name: "process_sort_index", Phase: "M", PID: sectionID, Arg: &SortIndexArg{priority}})
-}
-
 func (ctx *traceContext) time(ev *trace.Event) float64 {
 	// Trace viewer wants timestamps in microseconds.
 	return float64(ev.Ts) / 1000
@@ -942,7 +506,7 @@
 }
 
 func (ctx *traceContext) proc(ev *trace.Event) uint64 {
-	if ctx.mode&modeGoroutineOriented != 0 && ev.P < trace.FakeP {
+	if ctx.mode&traceviewer.ModeGoroutineOriented != 0 && ev.P < trace.FakeP {
 		return ev.G
 	} else {
 		return uint64(ev.P)
@@ -953,7 +517,7 @@
 	ctx.emit(ctx.makeSlice(ev, name))
 }
 
-func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *traceviewer.Event {
+func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *format.Event {
 	// If ViewerEvent.Dur is not a positive value,
 	// trace viewer handles it as a non-terminating time interval.
 	// Avoid it by setting the field with a small value.
@@ -961,18 +525,18 @@
 	if ev.Link.Ts-ev.Ts <= 0 {
 		durationUsec = 0.0001 // 0.1 nanoseconds
 	}
-	sl := &traceviewer.Event{
+	sl := &format.Event{
 		Name:     name,
 		Phase:    "X",
 		Time:     ctx.time(ev),
 		Dur:      durationUsec,
 		TID:      ctx.proc(ev),
-		Stack:    ctx.stack(ev.Stk),
-		EndStack: ctx.stack(ev.Link.Stk),
+		Stack:    ctx.emitter.Stack(ev.Stk),
+		EndStack: ctx.emitter.Stack(ev.Link.Stk),
 	}
 
 	// grey out non-overlapping events if the event is not a global event (ev.G == 0)
-	if ctx.mode&modeTaskOriented != 0 && ev.G != 0 {
+	if ctx.mode&traceviewer.ModeTaskOriented != 0 && ev.G != 0 {
 		// include P information.
 		if t := ev.Type; t == trace.EvGoStart || t == trace.EvGoStartLabel {
 			type Arg struct {
@@ -1000,25 +564,24 @@
 	taskName := task.name
 	durationUsec := float64(task.lastTimestamp()-task.firstTimestamp()) / 1e3
 
-	ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: tasksSection, TID: taskRow, Arg: &NameArg{fmt.Sprintf("T%d %s", task.id, taskName)}})
-	ctx.emit(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: tasksSection, TID: taskRow, Arg: &SortIndexArg{sortIndex}})
+	ctx.emitter.Task(taskRow, taskName, sortIndex)
 	ts := float64(task.firstTimestamp()) / 1e3
-	sl := &traceviewer.Event{
+	sl := &format.Event{
 		Name:  taskName,
 		Phase: "X",
 		Time:  ts,
 		Dur:   durationUsec,
-		PID:   tasksSection,
+		PID:   format.TasksSection,
 		TID:   taskRow,
 		Cname: pickTaskColor(task.id),
 	}
 	targ := TaskArg{ID: task.id}
 	if task.create != nil {
-		sl.Stack = ctx.stack(task.create.Stk)
+		sl.Stack = ctx.emitter.Stack(task.create.Stk)
 		targ.StartG = task.create.G
 	}
 	if task.end != nil {
-		sl.EndStack = ctx.stack(task.end.Stk)
+		sl.EndStack = ctx.emitter.Stack(task.end.Stk)
 		targ.EndG = task.end.G
 	}
 	sl.Arg = targ
@@ -1026,8 +589,8 @@
 
 	if task.create != nil && task.create.Type == trace.EvUserTaskCreate && task.create.Args[1] != 0 {
 		ctx.arrowSeq++
-		ctx.emit(&traceviewer.Event{Name: "newTask", Phase: "s", TID: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, PID: tasksSection})
-		ctx.emit(&traceviewer.Event{Name: "newTask", Phase: "t", TID: taskRow, ID: ctx.arrowSeq, Time: ts, PID: tasksSection})
+		ctx.emit(&format.Event{Name: "newTask", Phase: "s", TID: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, PID: format.TasksSection})
+		ctx.emit(&format.Event{Name: "newTask", Phase: "t", TID: taskRow, ID: ctx.arrowSeq, Time: ts, PID: format.TasksSection})
 	}
 }
 
@@ -1048,7 +611,7 @@
 	scopeID := fmt.Sprintf("%x", id)
 	name := s.Name
 
-	sl0 := &traceviewer.Event{
+	sl0 := &format.Event{
 		Category: "Region",
 		Name:     name,
 		Phase:    "b",
@@ -1059,11 +622,11 @@
 		Cname:    pickTaskColor(s.TaskID),
 	}
 	if s.Start != nil {
-		sl0.Stack = ctx.stack(s.Start.Stk)
+		sl0.Stack = ctx.emitter.Stack(s.Start.Stk)
 	}
 	ctx.emit(sl0)
 
-	sl1 := &traceviewer.Event{
+	sl1 := &format.Event{
 		Category: "Region",
 		Name:     name,
 		Phase:    "e",
@@ -1075,70 +638,18 @@
 		Arg:      RegionArg{TaskID: s.TaskID},
 	}
 	if s.End != nil {
-		sl1.Stack = ctx.stack(s.End.Stk)
+		sl1.Stack = ctx.emitter.Stack(s.End.Stk)
 	}
 	ctx.emit(sl1)
 }
 
-type heapCountersArg struct {
-	Allocated uint64
-	NextGC    uint64
-}
-
-func (ctx *traceContext) emitHeapCounters(ev *trace.Event) {
-	if ctx.prevHeapStats == ctx.heapStats {
-		return
-	}
-	diff := uint64(0)
-	if ctx.heapStats.nextGC > ctx.heapStats.heapAlloc {
-		diff = ctx.heapStats.nextGC - ctx.heapStats.heapAlloc
-	}
-	if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
-		ctx.emit(&traceviewer.Event{Name: "Heap", Phase: "C", Time: ctx.time(ev), PID: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}})
-	}
-	ctx.prevHeapStats = ctx.heapStats
-}
-
-type goroutineCountersArg struct {
-	Running   uint64
-	Runnable  uint64
-	GCWaiting uint64
-}
-
-func (ctx *traceContext) emitGoroutineCounters(ev *trace.Event) {
-	if ctx.prevGstates == ctx.gstates {
-		return
-	}
-	if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
-		ctx.emit(&traceviewer.Event{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), PID: 1, Arg: &goroutineCountersArg{uint64(ctx.gstates[gRunning]), uint64(ctx.gstates[gRunnable]), uint64(ctx.gstates[gWaitingGC])}})
-	}
-	ctx.prevGstates = ctx.gstates
-}
-
-type threadCountersArg struct {
-	Running   int64
-	InSyscall int64
-}
-
-func (ctx *traceContext) emitThreadCounters(ev *trace.Event) {
-	if ctx.prevThreadStats == ctx.threadStats {
-		return
-	}
-	if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
-		ctx.emit(&traceviewer.Event{Name: "Threads", Phase: "C", Time: ctx.time(ev), PID: 1, Arg: &threadCountersArg{
-			Running:   ctx.threadStats.prunning,
-			InSyscall: ctx.threadStats.insyscall}})
-	}
-	ctx.prevThreadStats = ctx.threadStats
-}
-
 func (ctx *traceContext) emitInstant(ev *trace.Event, name, category string) {
 	if !tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
 		return
 	}
 
 	cname := ""
-	if ctx.mode&modeTaskOriented != 0 {
+	if ctx.mode&traceviewer.ModeTaskOriented != 0 {
 		taskID, isUserAnnotation := isUserAnnotationEvent(ev)
 
 		show := false
@@ -1163,14 +674,14 @@
 		}
 		arg = &Arg{ev.Args[0]}
 	}
-	ctx.emit(&traceviewer.Event{
+	ctx.emit(&format.Event{
 		Name:     name,
 		Category: category,
 		Phase:    "I",
 		Scope:    "t",
 		Time:     ctx.time(ev),
 		TID:      ctx.proc(ev),
-		Stack:    ctx.stack(ev.Stk),
+		Stack:    ctx.emitter.Stack(ev.Stk),
 		Cname:    cname,
 		Arg:      arg})
 }
@@ -1181,7 +692,7 @@
 		// For example, a goroutine was unblocked but was not scheduled before trace stop.
 		return
 	}
-	if ctx.mode&modeGoroutineOriented != 0 && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) {
+	if ctx.mode&traceviewer.ModeGoroutineOriented != 0 && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) {
 		return
 	}
 
@@ -1192,7 +703,7 @@
 	}
 
 	color := ""
-	if ctx.mode&modeTaskOriented != 0 {
+	if ctx.mode&traceviewer.ModeTaskOriented != 0 {
 		overlapping := false
 		// skip non-overlapping arrows.
 		for _, task := range ctx.tasks {
@@ -1207,32 +718,8 @@
 	}
 
 	ctx.arrowSeq++
-	ctx.emit(&traceviewer.Event{Name: name, Phase: "s", TID: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.stack(ev.Stk), Cname: color})
-	ctx.emit(&traceviewer.Event{Name: name, Phase: "t", TID: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color})
-}
-
-func (ctx *traceContext) stack(stk []*trace.Frame) int {
-	return ctx.buildBranch(ctx.frameTree, stk)
-}
-
-// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree.
-func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
-	if len(stk) == 0 {
-		return parent.id
-	}
-	last := len(stk) - 1
-	frame := stk[last]
-	stk = stk[:last]
-
-	node, ok := parent.children[frame.PC]
-	if !ok {
-		ctx.frameSeq++
-		node.id = ctx.frameSeq
-		node.children = make(map[uint64]frameNode)
-		parent.children[frame.PC] = node
-		ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), traceviewer.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id})
-	}
-	return ctx.buildBranch(node, stk)
+	ctx.emit(&format.Event{Name: name, Phase: "s", TID: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.emitter.Stack(ev.Stk), Cname: color})
+	ctx.emit(&format.Event{Name: name, Phase: "t", TID: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color})
 }
 
 // firstTimestamp returns the timestamp of the first event record.
@@ -1253,61 +740,6 @@
 	return 0
 }
 
-type jsonWriter struct {
-	w   io.Writer
-	enc *json.Encoder
-}
-
-func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
-	allFrames := make(map[string]traceviewer.Frame)
-	requiredFrames := make(map[string]traceviewer.Frame)
-	enc := json.NewEncoder(w)
-	written := 0
-	index := int64(-1)
-
-	io.WriteString(w, "{")
-	return traceConsumer{
-		consumeTimeUnit: func(unit string) {
-			io.WriteString(w, `"displayTimeUnit":`)
-			enc.Encode(unit)
-			io.WriteString(w, ",")
-		},
-		consumeViewerEvent: func(v *traceviewer.Event, required bool) {
-			index++
-			if !required && (index < start || index > end) {
-				// not in the range. Skip!
-				return
-			}
-			walkStackFrames(allFrames, v.Stack, func(id int) {
-				s := strconv.Itoa(id)
-				requiredFrames[s] = allFrames[s]
-			})
-			walkStackFrames(allFrames, v.EndStack, func(id int) {
-				s := strconv.Itoa(id)
-				requiredFrames[s] = allFrames[s]
-			})
-			if written == 0 {
-				io.WriteString(w, `"traceEvents": [`)
-			}
-			if written > 0 {
-				io.WriteString(w, ",")
-			}
-			enc.Encode(v)
-			// TODO: get rid of the extra \n inserted by enc.Encode.
-			// Same should be applied to splittingTraceConsumer.
-			written++
-		},
-		consumeViewerFrame: func(k string, v traceviewer.Frame) {
-			allFrames[k] = v
-		},
-		flush: func() {
-			io.WriteString(w, `], "stackFrames":`)
-			enc.Encode(requiredFrames)
-			io.WriteString(w, `}`)
-		},
-	}
-}
-
 // Mapping from more reasonable color names to the reserved color names in
 // https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50
 // The chrome trace viewer allows only those as cname values.
diff --git a/src/cmd/trace/trace_test.go b/src/cmd/trace/trace_test.go
index 87fd3a3..d315fad 100644
--- a/src/cmd/trace/trace_test.go
+++ b/src/cmd/trace/trace_test.go
@@ -7,9 +7,10 @@
 package main
 
 import (
-	"cmd/internal/traceviewer"
 	"context"
 	"internal/trace"
+	"internal/trace/traceviewer"
+	"internal/trace/traceviewer/format"
 	"io"
 	rtrace "runtime/trace"
 	"strings"
@@ -78,10 +79,10 @@
 
 	// Use the default viewerDataTraceConsumer but replace
 	// consumeViewerEvent to intercept the ViewerEvents for testing.
-	c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
-	c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
+	c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
+	c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
 		if ev.Name == "Goroutines" {
-			cnt := ev.Arg.(*goroutineCountersArg)
+			cnt := ev.Arg.(*format.GoroutineCountersArg)
 			if cnt.Runnable+cnt.Running > 2 {
 				t.Errorf("goroutine count=%+v; want no more than 2 goroutines in runnable/running state", cnt)
 			}
@@ -131,7 +132,7 @@
 		gs:      map[uint64]bool{10: true},
 	}
 
-	c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
+	c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
 	if err := generateTrace(params, c); err != nil {
 		t.Fatalf("generateTrace failed: %v", err)
 	}
@@ -163,10 +164,10 @@
 		endTime: int64(1<<63 - 1),
 	}
 
-	c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
+	c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
 
 	marks := 0
-	c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
+	c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
 		if strings.Contains(ev.Name, "MARK ASSIST") {
 			marks++
 		}
@@ -208,16 +209,16 @@
 
 	params := &traceParams{
 		parsed:    res,
-		mode:      modeTaskOriented,
+		mode:      traceviewer.ModeTaskOriented,
 		startTime: task.firstTimestamp() - 1,
 		endTime:   task.lastTimestamp() + 1,
 		tasks:     []*taskDesc{task},
 	}
 
-	c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
+	c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
 
 	var logBeforeTaskEnd, logAfterTaskEnd bool
-	c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
+	c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
 		if ev.Name == "log before task ends" {
 			logBeforeTaskEnd = true
 		}
diff --git a/src/cmd/trace/trace_unix_test.go b/src/cmd/trace/trace_unix_test.go
index f35061e..e634635 100644
--- a/src/cmd/trace/trace_unix_test.go
+++ b/src/cmd/trace/trace_unix_test.go
@@ -8,8 +8,10 @@
 
 import (
 	"bytes"
-	"cmd/internal/traceviewer"
+	"internal/goexperiment"
 	traceparser "internal/trace"
+	"internal/trace/traceviewer"
+	"internal/trace/traceviewer/format"
 	"io"
 	"runtime"
 	"runtime/trace"
@@ -23,6 +25,9 @@
 // that preexisted when the tracing started were not counted
 // as threads in syscall. See golang.org/issues/22574.
 func TestGoroutineInSyscall(t *testing.T) {
+	if goexperiment.ExecTracer2 {
+		t.Skip("skipping because this test is obsolete and incompatible with the new tracer")
+	}
 	// Start one goroutine blocked in syscall.
 	//
 	// TODO: syscall.Pipe used to cause the goroutine to
@@ -83,10 +88,10 @@
 
 	// Check only one thread for the pipe read goroutine is
 	// considered in-syscall.
-	c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
-	c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) {
+	c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1)
+	c.ConsumeViewerEvent = func(ev *format.Event, _ bool) {
 		if ev.Name == "Threads" {
-			arg := ev.Arg.(*threadCountersArg)
+			arg := ev.Arg.(*format.ThreadCountersArg)
 			if arg.InSyscall > 1 {
 				t.Errorf("%d threads in syscall at time %v; want less than 1 thread in syscall", arg.InSyscall, ev.Time)
 			}
diff --git a/src/cmd/trace/v2/gen.go b/src/cmd/trace/v2/gen.go
new file mode 100644
index 0000000..f6a4bb6
--- /dev/null
+++ b/src/cmd/trace/v2/gen.go
@@ -0,0 +1,394 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"strings"
+)
+
+// generator is an interface for generating a JSON trace for the trace viewer
+// from a trace. Each method in this interface is a handler for a kind of event
+// that is interesting to render in the UI via the JSON trace.
+type generator interface {
+	// Global parts.
+	Sync() // Notifies the generator of an EventSync event.
+	StackSample(ctx *traceContext, ev *tracev2.Event)
+	GlobalRange(ctx *traceContext, ev *tracev2.Event)
+	GlobalMetric(ctx *traceContext, ev *tracev2.Event)
+
+	// Goroutine parts.
+	GoroutineLabel(ctx *traceContext, ev *tracev2.Event)
+	GoroutineRange(ctx *traceContext, ev *tracev2.Event)
+	GoroutineTransition(ctx *traceContext, ev *tracev2.Event)
+
+	// Proc parts.
+	ProcRange(ctx *traceContext, ev *tracev2.Event)
+	ProcTransition(ctx *traceContext, ev *tracev2.Event)
+
+	// User annotations.
+	Log(ctx *traceContext, ev *tracev2.Event)
+
+	// Finish indicates the end of the trace and finalizes generation.
+	Finish(ctx *traceContext)
+}
+
+// runGenerator produces a trace into ctx by running the generator over the parsed trace.
+func runGenerator(ctx *traceContext, g generator, parsed *parsedTrace, opts *genOpts) {
+	for i := range parsed.events {
+		ev := &parsed.events[i]
+
+		switch ev.Kind() {
+		case tracev2.EventSync:
+			g.Sync()
+		case tracev2.EventStackSample:
+			g.StackSample(ctx, ev)
+		case tracev2.EventRangeBegin, tracev2.EventRangeActive, tracev2.EventRangeEnd:
+			r := ev.Range()
+			switch r.Scope.Kind {
+			case tracev2.ResourceGoroutine:
+				g.GoroutineRange(ctx, ev)
+			case tracev2.ResourceProc:
+				g.ProcRange(ctx, ev)
+			case tracev2.ResourceNone:
+				g.GlobalRange(ctx, ev)
+			}
+		case tracev2.EventMetric:
+			g.GlobalMetric(ctx, ev)
+		case tracev2.EventLabel:
+			l := ev.Label()
+			if l.Resource.Kind == tracev2.ResourceGoroutine {
+				g.GoroutineLabel(ctx, ev)
+			}
+		case tracev2.EventStateTransition:
+			switch ev.StateTransition().Resource.Kind {
+			case tracev2.ResourceProc:
+				g.ProcTransition(ctx, ev)
+			case tracev2.ResourceGoroutine:
+				g.GoroutineTransition(ctx, ev)
+			}
+		case tracev2.EventLog:
+			g.Log(ctx, ev)
+		}
+	}
+	for i, task := range opts.tasks {
+		emitTask(ctx, task, i)
+		if opts.mode&traceviewer.ModeGoroutineOriented != 0 {
+			for _, region := range task.Regions {
+				emitRegion(ctx, region)
+			}
+		}
+	}
+	g.Finish(ctx)
+}
+
+// emitTask emits information about a task into the trace viewer's event stream.
+//
+// sortIndex sets the order in which this task will appear related to other tasks,
+// lowest first.
+func emitTask(ctx *traceContext, task *trace.UserTaskSummary, sortIndex int) {
+	// Collect information about the task.
+	var startStack, endStack tracev2.Stack
+	var startG, endG tracev2.GoID
+	startTime, endTime := ctx.startTime, ctx.endTime
+	if task.Start != nil {
+		startStack = task.Start.Stack()
+		startG = task.Start.Goroutine()
+		startTime = task.Start.Time()
+	}
+	if task.End != nil {
+		endStack = task.End.Stack()
+		endG = task.End.Goroutine()
+		endTime = task.End.Time()
+	}
+	arg := struct {
+		ID     uint64 `json:"id"`
+		StartG uint64 `json:"start_g,omitempty"`
+		EndG   uint64 `json:"end_g,omitempty"`
+	}{
+		ID:     uint64(task.ID),
+		StartG: uint64(startG),
+		EndG:   uint64(endG),
+	}
+
+	// Emit the task slice and notify the emitter of the task.
+	ctx.Task(uint64(task.ID), fmt.Sprintf("T%d %s", task.ID, task.Name), sortIndex)
+	ctx.TaskSlice(traceviewer.SliceEvent{
+		Name:     task.Name,
+		Ts:       ctx.elapsed(startTime),
+		Dur:      endTime.Sub(startTime),
+		Resource: uint64(task.ID),
+		Stack:    ctx.Stack(viewerFrames(startStack)),
+		EndStack: ctx.Stack(viewerFrames(endStack)),
+		Arg:      arg,
+	})
+	// Emit an arrow from the parent to the child.
+	if task.Parent != nil && task.Start != nil && task.Start.Kind() == tracev2.EventTaskBegin {
+		ctx.TaskArrow(traceviewer.ArrowEvent{
+			Name:         "newTask",
+			Start:        ctx.elapsed(task.Start.Time()),
+			End:          ctx.elapsed(task.Start.Time()),
+			FromResource: uint64(task.Parent.ID),
+			ToResource:   uint64(task.ID),
+			FromStack:    ctx.Stack(viewerFrames(task.Start.Stack())),
+		})
+	}
+}
+
+// emitRegion emits goroutine-based slice events to the UI. The caller
+// must be emitting for a goroutine-oriented trace.
+//
+// TODO(mknyszek): Make regions part of the regular generator loop and
+// treat them like ranges so that we can emit regions in traces oriented
+// by proc or thread.
+func emitRegion(ctx *traceContext, region *trace.UserRegionSummary) {
+	if region.Name == "" {
+		return
+	}
+	// Collect information about the region.
+	var startStack, endStack tracev2.Stack
+	goroutine := tracev2.NoGoroutine
+	startTime, endTime := ctx.startTime, ctx.endTime
+	if region.Start != nil {
+		startStack = region.Start.Stack()
+		startTime = region.Start.Time()
+		goroutine = region.Start.Goroutine()
+	}
+	if region.End != nil {
+		endStack = region.End.Stack()
+		endTime = region.End.Time()
+		goroutine = region.End.Goroutine()
+	}
+	if goroutine == tracev2.NoGoroutine {
+		return
+	}
+	arg := struct {
+		TaskID uint64 `json:"taskid"`
+	}{
+		TaskID: uint64(region.TaskID),
+	}
+	ctx.AsyncSlice(traceviewer.AsyncSliceEvent{
+		SliceEvent: traceviewer.SliceEvent{
+			Name:     region.Name,
+			Ts:       ctx.elapsed(startTime),
+			Dur:      endTime.Sub(startTime),
+			Resource: uint64(goroutine),
+			Stack:    ctx.Stack(viewerFrames(startStack)),
+			EndStack: ctx.Stack(viewerFrames(endStack)),
+			Arg:      arg,
+		},
+		Category:       "Region",
+		Scope:          fmt.Sprintf("%x", region.TaskID),
+		TaskColorIndex: uint64(region.TaskID),
+	})
+}
+
+// Building blocks for generators.
+
+// stackSampleGenerator implements a generic handler for stack sample events.
+// The provided resource is the resource the stack sample should count against.
+type stackSampleGenerator[R resource] struct {
+	// getResource is a function to extract a resource ID from a stack sample event.
+	getResource func(*tracev2.Event) R
+}
+
+// StackSample implements a stack sample event handler. It expects ev to be one such event.
+func (g *stackSampleGenerator[R]) StackSample(ctx *traceContext, ev *tracev2.Event) {
+	id := g.getResource(ev)
+	if id == R(noResource) {
+		// We have nowhere to put this in the UI.
+		return
+	}
+	ctx.Instant(traceviewer.InstantEvent{
+		Name:     "CPU profile sample",
+		Ts:       ctx.elapsed(ev.Time()),
+		Resource: uint64(id),
+		Stack:    ctx.Stack(viewerFrames(ev.Stack())),
+	})
+}
+
+// globalRangeGenerator implements a generic handler for EventRange* events that pertain
+// to tracev2.ResourceNone (the global scope).
+type globalRangeGenerator struct {
+	ranges   map[string]activeRange
+	seenSync bool
+}
+
+// Sync notifies the generator of an EventSync event.
+func (g *globalRangeGenerator) Sync() {
+	g.seenSync = true
+}
+
+// GlobalRange implements a handler for EventRange* events whose Scope.Kind is ResourceNone.
+// It expects ev to be one such event.
+func (g *globalRangeGenerator) GlobalRange(ctx *traceContext, ev *tracev2.Event) {
+	if g.ranges == nil {
+		g.ranges = make(map[string]activeRange)
+	}
+	r := ev.Range()
+	switch ev.Kind() {
+	case tracev2.EventRangeBegin:
+		g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()}
+	case tracev2.EventRangeActive:
+		// If we've seen a Sync event, then Active events are always redundant.
+		if !g.seenSync {
+			// Otherwise, they extend back to the start of the trace.
+			g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()}
+		}
+	case tracev2.EventRangeEnd:
+		// Only emit GC events, because we have nowhere to
+		// put other events.
+		ar := g.ranges[r.Name]
+		if strings.Contains(r.Name, "GC") {
+			ctx.Slice(traceviewer.SliceEvent{
+				Name:     r.Name,
+				Ts:       ctx.elapsed(ar.time),
+				Dur:      ev.Time().Sub(ar.time),
+				Resource: trace.GCP,
+				Stack:    ctx.Stack(viewerFrames(ar.stack)),
+				EndStack: ctx.Stack(viewerFrames(ev.Stack())),
+			})
+		}
+		delete(g.ranges, r.Name)
+	}
+}
+
+// Finish flushes any outstanding ranges at the end of the trace.
+func (g *globalRangeGenerator) Finish(ctx *traceContext) {
+	for name, ar := range g.ranges {
+		if !strings.Contains(name, "GC") {
+			continue
+		}
+		ctx.Slice(traceviewer.SliceEvent{
+			Name:     name,
+			Ts:       ctx.elapsed(ar.time),
+			Dur:      ctx.endTime.Sub(ar.time),
+			Resource: trace.GCP,
+			Stack:    ctx.Stack(viewerFrames(ar.stack)),
+		})
+	}
+}
+
+// globalMetricGenerator implements a generic handler for Metric events.
+type globalMetricGenerator struct {
+}
+
+// GlobalMetric implements an event handler for EventMetric events. ev must be one such event.
+func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *tracev2.Event) {
+	m := ev.Metric()
+	switch m.Name {
+	case "/memory/classes/heap/objects:bytes":
+		ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.Uint64())
+	case "/gc/heap/goal:bytes":
+		ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.Uint64())
+	case "/sched/gomaxprocs:threads":
+		ctx.Gomaxprocs(m.Value.Uint64())
+	}
+}
+
+// procRangeGenerator implements a generic handler for EventRange* events whose Scope.Kind is
+// ResourceProc.
+type procRangeGenerator struct {
+	ranges   map[tracev2.Range]activeRange
+	seenSync bool
+}
+
+// Sync notifies the generator of an EventSync event.
+func (g *procRangeGenerator) Sync() {
+	g.seenSync = true
+}
+
+// ProcRange implements a handler for EventRange* events whose Scope.Kind is ResourceProc.
+// It expects ev to be one such event.
+func (g *procRangeGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) {
+	if g.ranges == nil {
+		g.ranges = make(map[tracev2.Range]activeRange)
+	}
+	r := ev.Range()
+	switch ev.Kind() {
+	case tracev2.EventRangeBegin:
+		g.ranges[r] = activeRange{ev.Time(), ev.Stack()}
+	case tracev2.EventRangeActive:
+		// If we've seen a Sync event, then Active events are always redundant.
+		if !g.seenSync {
+			// Otherwise, they extend back to the start of the trace.
+			g.ranges[r] = activeRange{ctx.startTime, ev.Stack()}
+		}
+	case tracev2.EventRangeEnd:
+		// Emit proc-based ranges.
+		ar := g.ranges[r]
+		ctx.Slice(traceviewer.SliceEvent{
+			Name:     r.Name,
+			Ts:       ctx.elapsed(ar.time),
+			Dur:      ev.Time().Sub(ar.time),
+			Resource: uint64(r.Scope.Proc()),
+			Stack:    ctx.Stack(viewerFrames(ar.stack)),
+			EndStack: ctx.Stack(viewerFrames(ev.Stack())),
+		})
+		delete(g.ranges, r)
+	}
+}
+
+// Finish flushes any outstanding ranges at the end of the trace.
+func (g *procRangeGenerator) Finish(ctx *traceContext) {
+	for r, ar := range g.ranges {
+		ctx.Slice(traceviewer.SliceEvent{
+			Name:     r.Name,
+			Ts:       ctx.elapsed(ar.time),
+			Dur:      ctx.endTime.Sub(ar.time),
+			Resource: uint64(r.Scope.Proc()),
+			Stack:    ctx.Stack(viewerFrames(ar.stack)),
+		})
+	}
+}
+
+// activeRange represents an active EventRange* range.
+type activeRange struct {
+	time  tracev2.Time
+	stack tracev2.Stack
+}
+
+// completedRange represents a completed EventRange* range.
+type completedRange struct {
+	name       string
+	startTime  tracev2.Time
+	endTime    tracev2.Time
+	startStack tracev2.Stack
+	endStack   tracev2.Stack
+	arg        any
+}
+
+type logEventGenerator[R resource] struct {
+	// getResource is a function to extract a resource ID from a Log event.
+	getResource func(*tracev2.Event) R
+}
+
+// Log implements a log event handler. It expects ev to be one such event.
+func (g *logEventGenerator[R]) Log(ctx *traceContext, ev *tracev2.Event) {
+	id := g.getResource(ev)
+	if id == R(noResource) {
+		// We have nowhere to put this in the UI.
+		return
+	}
+
+	// Construct the name to present.
+	log := ev.Log()
+	name := log.Message
+	if log.Category != "" {
+		name = "[" + log.Category + "] " + name
+	}
+
+	// Emit an instant event.
+	ctx.Instant(traceviewer.InstantEvent{
+		Name:     name,
+		Ts:       ctx.elapsed(ev.Time()),
+		Category: "user event",
+		Resource: uint64(id),
+		Stack:    ctx.Stack(viewerFrames(ev.Stack())),
+	})
+}
diff --git a/src/cmd/trace/v2/goroutinegen.go b/src/cmd/trace/v2/goroutinegen.go
new file mode 100644
index 0000000..c76bd84
--- /dev/null
+++ b/src/cmd/trace/v2/goroutinegen.go
@@ -0,0 +1,167 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	tracev2 "internal/trace/v2"
+)
+
+var _ generator = &goroutineGenerator{}
+
+type goroutineGenerator struct {
+	globalRangeGenerator
+	globalMetricGenerator
+	stackSampleGenerator[tracev2.GoID]
+	logEventGenerator[tracev2.GoID]
+
+	gStates map[tracev2.GoID]*gState[tracev2.GoID]
+	focus   tracev2.GoID
+	filter  map[tracev2.GoID]struct{}
+}
+
+func newGoroutineGenerator(ctx *traceContext, focus tracev2.GoID, filter map[tracev2.GoID]struct{}) *goroutineGenerator {
+	gg := new(goroutineGenerator)
+	rg := func(ev *tracev2.Event) tracev2.GoID {
+		return ev.Goroutine()
+	}
+	gg.stackSampleGenerator.getResource = rg
+	gg.logEventGenerator.getResource = rg
+	gg.gStates = make(map[tracev2.GoID]*gState[tracev2.GoID])
+	gg.focus = focus
+	gg.filter = filter
+
+	// Enable a filter on the emitter.
+	if filter != nil {
+		ctx.SetResourceFilter(func(resource uint64) bool {
+			_, ok := filter[tracev2.GoID(resource)]
+			return ok
+		})
+	}
+	return gg
+}
+
+func (g *goroutineGenerator) Sync() {
+	g.globalRangeGenerator.Sync()
+}
+
+func (g *goroutineGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) {
+	l := ev.Label()
+	g.gStates[l.Resource.Goroutine()].setLabel(l.Label)
+}
+
+func (g *goroutineGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) {
+	r := ev.Range()
+	switch ev.Kind() {
+	case tracev2.EventRangeBegin:
+		g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack())
+	case tracev2.EventRangeActive:
+		g.gStates[r.Scope.Goroutine()].rangeActive(r.Name)
+	case tracev2.EventRangeEnd:
+		gs := g.gStates[r.Scope.Goroutine()]
+		gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx)
+	}
+}
+
+func (g *goroutineGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) {
+	st := ev.StateTransition()
+	goID := st.Resource.Goroutine()
+
+	// If we haven't seen this goroutine before, create a new
+	// gState for it.
+	gs, ok := g.gStates[goID]
+	if !ok {
+		gs = newGState[tracev2.GoID](goID)
+		g.gStates[goID] = gs
+	}
+
+	// Try to augment the name of the goroutine.
+	gs.augmentName(st.Stack)
+
+	// Handle the goroutine state transition.
+	from, to := st.Goroutine()
+	if from == to {
+		// Filter out no-op events.
+		return
+	}
+	if from.Executing() && !to.Executing() {
+		if to == tracev2.GoWaiting {
+			// Goroutine started blocking.
+			gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
+		} else {
+			gs.stop(ev.Time(), ev.Stack(), ctx)
+		}
+	}
+	if !from.Executing() && to.Executing() {
+		start := ev.Time()
+		if from == tracev2.GoUndetermined {
+			// Back-date the event to the start of the trace.
+			start = ctx.startTime
+		}
+		gs.start(start, goID, ctx)
+	}
+
+	if from == tracev2.GoWaiting {
+		// Goroutine unblocked.
+		gs.unblock(ev.Time(), ev.Stack(), ev.Goroutine(), ctx)
+	}
+	if from == tracev2.GoNotExist && to == tracev2.GoRunnable {
+		// Goroutine was created.
+		gs.created(ev.Time(), ev.Goroutine(), ev.Stack())
+	}
+	if from == tracev2.GoSyscall && to != tracev2.GoRunning {
+		// Exiting blocked syscall.
+		gs.syscallEnd(ev.Time(), true, ctx)
+		gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx)
+	} else if from == tracev2.GoSyscall {
+		// Check if we're exiting a syscall in a non-blocking way.
+		gs.syscallEnd(ev.Time(), false, ctx)
+	}
+
+	// Handle syscalls.
+	if to == tracev2.GoSyscall {
+		start := ev.Time()
+		if from == tracev2.GoUndetermined {
+			// Back-date the event to the start of the trace.
+			start = ctx.startTime
+		}
+		// Write down that we've entered a syscall. Note: we might have no G or P here
+		// if we're in a cgo callback or this is a transition from GoUndetermined
+		// (i.e. the G has been blocked in a syscall).
+		gs.syscallBegin(start, goID, ev.Stack())
+	}
+
+	// Note down the goroutine transition.
+	_, inMarkAssist := gs.activeRanges["GC mark assist"]
+	ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist))
+}
+
+func (g *goroutineGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) {
+	// TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges
+	// that overlap with a goroutine's execution.
+}
+
+func (g *goroutineGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) {
+	// Not needed. All relevant information for goroutines can be derived from goroutine transitions.
+}
+
+func (g *goroutineGenerator) Finish(ctx *traceContext) {
+	ctx.SetResourceType("G")
+
+	// Finish off global ranges.
+	g.globalRangeGenerator.Finish(ctx)
+
+	// Finish off all the goroutine slices.
+	for id, gs := range g.gStates {
+		gs.finish(ctx)
+
+		// Tell the emitter about the goroutines we want to render.
+		ctx.Resource(uint64(id), gs.name())
+	}
+
+	// Set the goroutine to focus on.
+	if g.focus != tracev2.NoGoroutine {
+		ctx.Focus(uint64(g.focus))
+	}
+}
diff --git a/src/cmd/trace/v2/goroutines.go b/src/cmd/trace/v2/goroutines.go
new file mode 100644
index 0000000..3cf3666
--- /dev/null
+++ b/src/cmd/trace/v2/goroutines.go
@@ -0,0 +1,420 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Goroutine-related profiles.
+
+package trace
+
+import (
+	"cmp"
+	"fmt"
+	"html/template"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"log"
+	"net/http"
+	"slices"
+	"sort"
+	"strings"
+	"time"
+)
+
+// GoroutinesHandlerFunc returns a HandlerFunc that serves list of goroutine groups.
+func GoroutinesHandlerFunc(summaries map[tracev2.GoID]*trace.GoroutineSummary) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		// goroutineGroup describes a group of goroutines grouped by name.
+		type goroutineGroup struct {
+			Name     string        // Start function.
+			N        int           // Total number of goroutines in this group.
+			ExecTime time.Duration // Total execution time of all goroutines in this group.
+		}
+		// Accumulate groups by Name.
+		groupsByName := make(map[string]goroutineGroup)
+		for _, summary := range summaries {
+			group := groupsByName[summary.Name]
+			group.Name = summary.Name
+			group.N++
+			group.ExecTime += summary.ExecTime
+			groupsByName[summary.Name] = group
+		}
+		var groups []goroutineGroup
+		for _, group := range groupsByName {
+			groups = append(groups, group)
+		}
+		slices.SortFunc(groups, func(a, b goroutineGroup) int {
+			return cmp.Compare(b.ExecTime, a.ExecTime)
+		})
+		w.Header().Set("Content-Type", "text/html;charset=utf-8")
+		if err := templGoroutines.Execute(w, groups); err != nil {
+			log.Printf("failed to execute template: %v", err)
+			return
+		}
+	}
+}
+
+var templGoroutines = template.Must(template.New("").Parse(`
+<html>
+<style>` + traceviewer.CommonStyle + `
+table {
+  border-collapse: collapse;
+}
+td,
+th {
+  border: 1px solid black;
+  padding-left: 8px;
+  padding-right: 8px;
+  padding-top: 4px;
+  padding-bottom: 4px;
+}
+</style>
+<body>
+<h1>Goroutines</h1>
+Below is a table of all goroutines in the trace grouped by start location and sorted by the total execution time of the group.<br>
+<br>
+Click a start location to view more details about that group.<br>
+<br>
+<table>
+  <tr>
+    <th>Start location</th>
+	<th>Count</th>
+	<th>Total execution time</th>
+  </tr>
+{{range $}}
+  <tr>
+    <td><code><a href="/goroutine?name={{.Name}}">{{or .Name "(Inactive, no stack trace sampled)"}}</a></code></td>
+	<td>{{.N}}</td>
+	<td>{{.ExecTime}}</td>
+  </tr>
+{{end}}
+</table>
+</body>
+</html>
+`))
+
+// GoroutineHandler creates a handler that serves information about
+// goroutines in a particular group.
+func GoroutineHandler(summaries map[tracev2.GoID]*trace.GoroutineSummary) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		goroutineName := r.FormValue("name")
+
+		type goroutine struct {
+			*trace.GoroutineSummary
+			NonOverlappingStats map[string]time.Duration
+			HasRangeTime        bool
+		}
+
+		// Collect all the goroutines in the group.
+		var (
+			goroutines              []goroutine
+			name                    string
+			totalExecTime, execTime time.Duration
+			maxTotalTime            time.Duration
+		)
+		validNonOverlappingStats := make(map[string]struct{})
+		validRangeStats := make(map[string]struct{})
+		for _, summary := range summaries {
+			totalExecTime += summary.ExecTime
+
+			if summary.Name != goroutineName {
+				continue
+			}
+			nonOverlappingStats := summary.NonOverlappingStats()
+			for name := range nonOverlappingStats {
+				validNonOverlappingStats[name] = struct{}{}
+			}
+			var totalRangeTime time.Duration
+			for name, dt := range summary.RangeTime {
+				validRangeStats[name] = struct{}{}
+				totalRangeTime += dt
+			}
+			goroutines = append(goroutines, goroutine{
+				GoroutineSummary:    summary,
+				NonOverlappingStats: nonOverlappingStats,
+				HasRangeTime:        totalRangeTime != 0,
+			})
+			name = summary.Name
+			execTime += summary.ExecTime
+			if maxTotalTime < summary.TotalTime {
+				maxTotalTime = summary.TotalTime
+			}
+		}
+
+		// Compute the percent of total execution time these goroutines represent.
+		execTimePercent := ""
+		if totalExecTime > 0 {
+			execTimePercent = fmt.Sprintf("%.2f%%", float64(execTime)/float64(totalExecTime)*100)
+		}
+
+		// Sort.
+		sortBy := r.FormValue("sortby")
+		if _, ok := validNonOverlappingStats[sortBy]; ok {
+			slices.SortFunc(goroutines, func(a, b goroutine) int {
+				return cmp.Compare(b.NonOverlappingStats[sortBy], a.NonOverlappingStats[sortBy])
+			})
+		} else {
+			// Sort by total time by default.
+			slices.SortFunc(goroutines, func(a, b goroutine) int {
+				return cmp.Compare(b.TotalTime, a.TotalTime)
+			})
+		}
+
+		// Write down all the non-overlapping stats and sort them.
+		allNonOverlappingStats := make([]string, 0, len(validNonOverlappingStats))
+		for name := range validNonOverlappingStats {
+			allNonOverlappingStats = append(allNonOverlappingStats, name)
+		}
+		slices.SortFunc(allNonOverlappingStats, func(a, b string) int {
+			if a == b {
+				return 0
+			}
+			if a == "Execution time" {
+				return -1
+			}
+			if b == "Execution time" {
+				return 1
+			}
+			return cmp.Compare(a, b)
+		})
+
+		// Write down all the range stats and sort them.
+		allRangeStats := make([]string, 0, len(validRangeStats))
+		for name := range validRangeStats {
+			allRangeStats = append(allRangeStats, name)
+		}
+		sort.Strings(allRangeStats)
+
+		err := templGoroutine.Execute(w, struct {
+			Name                string
+			N                   int
+			ExecTimePercent     string
+			MaxTotal            time.Duration
+			Goroutines          []goroutine
+			NonOverlappingStats []string
+			RangeStats          []string
+		}{
+			Name:                name,
+			N:                   len(goroutines),
+			ExecTimePercent:     execTimePercent,
+			MaxTotal:            maxTotalTime,
+			Goroutines:          goroutines,
+			NonOverlappingStats: allNonOverlappingStats,
+			RangeStats:          allRangeStats,
+		})
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
+			return
+		}
+	}
+}
+
+func stat2Color(statName string) string {
+	color := "#636363"
+	if strings.HasPrefix(statName, "Block time") {
+		color = "#d01c8b"
+	}
+	switch statName {
+	case "Sched wait time":
+		color = "#2c7bb6"
+	case "Syscall execution time":
+		color = "#7b3294"
+	case "Execution time":
+		color = "#d7191c"
+	}
+	return color
+}
+
+var templGoroutine = template.Must(template.New("").Funcs(template.FuncMap{
+	"percent": func(dividend, divisor time.Duration) template.HTML {
+		if divisor == 0 {
+			return ""
+		}
+		return template.HTML(fmt.Sprintf("(%.1f%%)", float64(dividend)/float64(divisor)*100))
+	},
+	"headerStyle": func(statName string) template.HTMLAttr {
+		return template.HTMLAttr(fmt.Sprintf("style=\"background-color: %s;\"", stat2Color(statName)))
+	},
+	"barStyle": func(statName string, dividend, divisor time.Duration) template.HTMLAttr {
+		width := "0"
+		if divisor != 0 {
+			width = fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100)
+		}
+		return template.HTMLAttr(fmt.Sprintf("style=\"width: %s; background-color: %s;\"", width, stat2Color(statName)))
+	},
+}).Parse(`
+<!DOCTYPE html>
+<title>Goroutines: {{.Name}}</title>
+<style>` + traceviewer.CommonStyle + `
+th {
+  background-color: #050505;
+  color: #fff;
+}
+th.link {
+  cursor: pointer;
+}
+table {
+  border-collapse: collapse;
+}
+td,
+th {
+  padding-left: 8px;
+  padding-right: 8px;
+  padding-top: 4px;
+  padding-bottom: 4px;
+}
+.details tr:hover {
+  background-color: #f2f2f2;
+}
+.details td {
+  text-align: right;
+  border: 1px solid black;
+}
+.details td.id {
+  text-align: left;
+}
+.stacked-bar-graph {
+  width: 300px;
+  height: 10px;
+  color: #414042;
+  white-space: nowrap;
+  font-size: 5px;
+}
+.stacked-bar-graph span {
+  display: inline-block;
+  width: 100%;
+  height: 100%;
+  box-sizing: border-box;
+  float: left;
+  padding: 0;
+}
+</style>
+
+<script>
+function reloadTable(key, value) {
+  let params = new URLSearchParams(window.location.search);
+  params.set(key, value);
+  window.location.search = params.toString();
+}
+</script>
+
+<h1>Goroutines</h1>
+
+Table of contents
+<ul>
+	<li><a href="#summary">Summary</a></li>
+	<li><a href="#breakdown">Breakdown</a></li>
+	<li><a href="#ranges">Special ranges</a></li>
+</ul>
+
+<h3 id="summary">Summary</h3>
+
+<table class="summary">
+	<tr>
+		<td>Goroutine start location:</td>
+		<td><code>{{.Name}}</code></td>
+	</tr>
+	<tr>
+		<td>Count:</td>
+		<td>{{.N}}</td>
+	</tr>
+	<tr>
+		<td>Execution Time:</td>
+		<td>{{.ExecTimePercent}} of total program execution time </td>
+	</tr>
+	<tr>
+		<td>Network wait profile:</td>
+		<td> <a href="/io?name={{.Name}}">graph</a> <a href="/io?name={{.Name}}&raw=1" download="io.profile">(download)</a></td>
+	</tr>
+	<tr>
+		<td>Sync block profile:</td>
+		<td> <a href="/block?name={{.Name}}">graph</a> <a href="/block?name={{.Name}}&raw=1" download="block.profile">(download)</a></td>
+	</tr>
+	<tr>
+		<td>Syscall profile:</td>
+		<td> <a href="/syscall?name={{.Name}}">graph</a> <a href="/syscall?name={{.Name}}&raw=1" download="syscall.profile">(download)</a></td>
+		</tr>
+	<tr>
+		<td>Scheduler wait profile:</td>
+		<td> <a href="/sched?name={{.Name}}">graph</a> <a href="/sched?name={{.Name}}&raw=1" download="sched.profile">(download)</a></td>
+	</tr>
+</table>
+
+<h3 id="breakdown">Breakdown</h3>
+
+The table below breaks down where each goroutine is spent its time during the
+traced period.
+All of the columns except total time are non-overlapping.
+<br>
+<br>
+
+<table class="details">
+<tr>
+<th> Goroutine</th>
+<th class="link" onclick="reloadTable('sortby', 'Total time')"> Total</th>
+<th></th>
+{{range $.NonOverlappingStats}}
+<th class="link" onclick="reloadTable('sortby', '{{.}}')" {{headerStyle .}}> {{.}}</th>
+{{end}}
+</tr>
+{{range .Goroutines}}
+	<tr>
+		<td> <a href="/trace?goid={{.ID}}">{{.ID}}</a> </td>
+		<td> {{ .TotalTime.String }} </td>
+		<td>
+			<div class="stacked-bar-graph">
+			{{$Goroutine := .}}
+			{{range $.NonOverlappingStats}}
+				{{$Time := index $Goroutine.NonOverlappingStats .}}
+				{{if $Time}}
+					<span {{barStyle . $Time $.MaxTotal}}>&nbsp;</span>
+				{{end}}
+			{{end}}
+			</div>
+		</td>
+		{{$Goroutine := .}}
+		{{range $.NonOverlappingStats}}
+			{{$Time := index $Goroutine.NonOverlappingStats .}}
+			<td> {{$Time.String}}</td>
+		{{end}}
+	</tr>
+{{end}}
+</table>
+
+<h3 id="ranges">Special ranges</h3>
+
+The table below describes how much of the traced period each goroutine spent in
+certain special time ranges.
+If a goroutine has spent no time in any special time ranges, it is excluded from
+the table.
+For example, how much time it spent helping the GC. Note that these times do
+overlap with the times from the first table.
+In general the goroutine may not be executing in these special time ranges.
+For example, it may have blocked while trying to help the GC.
+This must be taken into account when interpreting the data.
+<br>
+<br>
+
+<table class="details">
+<tr>
+<th> Goroutine</th>
+<th> Total</th>
+{{range $.RangeStats}}
+<th {{headerStyle .}}> {{.}}</th>
+{{end}}
+</tr>
+{{range .Goroutines}}
+	{{if .HasRangeTime}}
+		<tr>
+			<td> <a href="/trace?goid={{.ID}}">{{.ID}}</a> </td>
+			<td> {{ .TotalTime.String }} </td>
+			{{$Goroutine := .}}
+			{{range $.RangeStats}}
+				{{$Time := index $Goroutine.RangeTime .}}
+				<td> {{$Time.String}}</td>
+			{{end}}
+		</tr>
+	{{end}}
+{{end}}
+</table>
+`))
diff --git a/src/cmd/trace/v2/gstate.go b/src/cmd/trace/v2/gstate.go
new file mode 100644
index 0000000..aeba7ec
--- /dev/null
+++ b/src/cmd/trace/v2/gstate.go
@@ -0,0 +1,373 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	"internal/trace/traceviewer/format"
+	tracev2 "internal/trace/v2"
+	"strings"
+)
+
+// resource is a generic constraint interface for resource IDs.
+type resource interface {
+	tracev2.GoID | tracev2.ProcID | tracev2.ThreadID
+}
+
+// noResource indicates the lack of a resource.
+const noResource = -1
+
+// gState represents the trace viewer state of a goroutine in a trace.
+//
+// The type parameter on this type is the resource which is used to construct
+// a timeline of events. e.g. R=ProcID for a proc-oriented view, R=GoID for
+// a goroutine-oriented view, etc.
+type gState[R resource] struct {
+	baseName  string
+	named     bool   // Whether baseName has been set.
+	label     string // EventLabel extension.
+	isSystemG bool
+
+	executing R // The resource this goroutine is executing on. (Could be itself.)
+
+	// lastStopStack is the stack trace at the point of the last
+	// call to the stop method. This tends to be a more reliable way
+	// of picking up stack traces, since the parser doesn't provide
+	// a stack for every state transition event.
+	lastStopStack tracev2.Stack
+
+	// activeRanges is the set of all active ranges on the goroutine.
+	activeRanges map[string]activeRange
+
+	// completedRanges is a list of ranges that completed since before the
+	// goroutine stopped executing. These are flushed on every stop or block.
+	completedRanges []completedRange
+
+	// startRunning is the most recent event that caused a goroutine to
+	// transition to GoRunning.
+	startRunningTime tracev2.Time
+
+	// startSyscall is the most recent event that caused a goroutine to
+	// transition to GoSyscall.
+	syscall struct {
+		time   tracev2.Time
+		stack  tracev2.Stack
+		active bool
+	}
+
+	// startBlockReason is the StateTransition.Reason of the most recent
+	// event that caused a gorotuine to transition to GoWaiting.
+	startBlockReason string
+
+	// startCause is the event that allowed this goroutine to start running.
+	// It's used to generate flow events. This is typically something like
+	// an unblock event or a goroutine creation event.
+	//
+	// startCause.resource is the resource on which startCause happened, but is
+	// listed separately because the cause may have happened on a resource that
+	// isn't R (or perhaps on some abstract nebulous resource, like trace.NetpollP).
+	startCause struct {
+		time     tracev2.Time
+		name     string
+		resource uint64
+		stack    tracev2.Stack
+	}
+}
+
+// newGState constructs a new goroutine state for the goroutine
+// identified by the provided ID.
+func newGState[R resource](goID tracev2.GoID) *gState[R] {
+	return &gState[R]{
+		baseName:     fmt.Sprintf("G%d", goID),
+		executing:    R(noResource),
+		activeRanges: make(map[string]activeRange),
+	}
+}
+
+// augmentName attempts to use stk to augment the name of the goroutine
+// with stack information. This stack must be related to the goroutine
+// in some way, but it doesn't really matter which stack.
+func (gs *gState[R]) augmentName(stk tracev2.Stack) {
+	if gs.named {
+		return
+	}
+	if stk == tracev2.NoStack {
+		return
+	}
+	name := lastFunc(stk)
+	gs.baseName += fmt.Sprintf(" %s", name)
+	gs.named = true
+	gs.isSystemG = trace.IsSystemGoroutine(name)
+}
+
+// setLabel adds an additional label to the goroutine's name.
+func (gs *gState[R]) setLabel(label string) {
+	gs.label = label
+}
+
+// name returns a name for the goroutine.
+func (gs *gState[R]) name() string {
+	name := gs.baseName
+	if gs.label != "" {
+		name += " (" + gs.label + ")"
+	}
+	return name
+}
+
+// setStartCause sets the reason a goroutine will be allowed to start soon.
+// For example, via unblocking or exiting a blocked syscall.
+func (gs *gState[R]) setStartCause(ts tracev2.Time, name string, resource uint64, stack tracev2.Stack) {
+	gs.startCause.time = ts
+	gs.startCause.name = name
+	gs.startCause.resource = resource
+	gs.startCause.stack = stack
+}
+
+// created indicates that this goroutine was just created by the provided creator.
+func (gs *gState[R]) created(ts tracev2.Time, creator R, stack tracev2.Stack) {
+	if creator == R(noResource) {
+		return
+	}
+	gs.setStartCause(ts, "go", uint64(creator), stack)
+}
+
+// start indicates that a goroutine has started running on a proc.
+func (gs *gState[R]) start(ts tracev2.Time, resource R, ctx *traceContext) {
+	// Set the time for all the active ranges.
+	for name := range gs.activeRanges {
+		gs.activeRanges[name] = activeRange{ts, tracev2.NoStack}
+	}
+
+	if gs.startCause.name != "" {
+		// It has a start cause. Emit a flow event.
+		ctx.Arrow(traceviewer.ArrowEvent{
+			Name:         gs.startCause.name,
+			Start:        ctx.elapsed(gs.startCause.time),
+			End:          ctx.elapsed(ts),
+			FromResource: uint64(gs.startCause.resource),
+			ToResource:   uint64(resource),
+			FromStack:    ctx.Stack(viewerFrames(gs.startCause.stack)),
+		})
+		gs.startCause.time = 0
+		gs.startCause.name = ""
+		gs.startCause.resource = 0
+		gs.startCause.stack = tracev2.NoStack
+	}
+	gs.executing = resource
+	gs.startRunningTime = ts
+}
+
+// syscallBegin indicates that the goroutine entered a syscall on a proc.
+func (gs *gState[R]) syscallBegin(ts tracev2.Time, resource R, stack tracev2.Stack) {
+	gs.syscall.time = ts
+	gs.syscall.stack = stack
+	gs.syscall.active = true
+	if gs.executing == R(noResource) {
+		gs.executing = resource
+		gs.startRunningTime = ts
+	}
+}
+
+// syscallEnd ends the syscall slice, wherever the syscall is at. This is orthogonal
+// to blockedSyscallEnd -- both must be called when a syscall ends and that syscall
+// blocked. They're kept separate because syscallEnd indicates the point at which the
+// goroutine is no longer executing on the resource (e.g. a proc) whereas blockedSyscallEnd
+// is the point at which the goroutine actually exited the syscall regardless of which
+// resource that happened on.
+func (gs *gState[R]) syscallEnd(ts tracev2.Time, blocked bool, ctx *traceContext) {
+	if !gs.syscall.active {
+		return
+	}
+	blockString := "no"
+	if blocked {
+		blockString = "yes"
+	}
+	gs.completedRanges = append(gs.completedRanges, completedRange{
+		name:       "syscall",
+		startTime:  gs.syscall.time,
+		endTime:    ts,
+		startStack: gs.syscall.stack,
+		arg:        format.BlockedArg{Blocked: blockString},
+	})
+	gs.syscall.active = false
+	gs.syscall.time = 0
+	gs.syscall.stack = tracev2.NoStack
+}
+
+// blockedSyscallEnd indicates the point at which the blocked syscall ended. This is distinct
+// and orthogonal to syscallEnd; both must be called if the syscall blocked. This sets up an instant
+// to emit a flow event from, indicating explicitly that this goroutine was unblocked by the system.
+func (gs *gState[R]) blockedSyscallEnd(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) {
+	name := "exit blocked syscall"
+	gs.setStartCause(ts, name, trace.SyscallP, stack)
+
+	// Emit an syscall exit instant event for the "Syscall" lane.
+	ctx.Instant(traceviewer.InstantEvent{
+		Name:     name,
+		Ts:       ctx.elapsed(ts),
+		Resource: trace.SyscallP,
+		Stack:    ctx.Stack(viewerFrames(stack)),
+	})
+}
+
+// unblock indicates that the goroutine gs represents has been unblocked.
+func (gs *gState[R]) unblock(ts tracev2.Time, stack tracev2.Stack, resource R, ctx *traceContext) {
+	name := "unblock"
+	viewerResource := uint64(resource)
+	if gs.startBlockReason != "" {
+		name = fmt.Sprintf("%s (%s)", name, gs.startBlockReason)
+	}
+	if strings.Contains(gs.startBlockReason, "network") {
+		// Attribute the network instant to the nebulous "NetpollP" if
+		// resource isn't a thread, because there's a good chance that
+		// resource isn't going to be valid in this case.
+		//
+		// TODO(mknyszek): Handle this invalidness in a more general way.
+		if _, ok := any(resource).(tracev2.ThreadID); !ok {
+			// Emit an unblock instant event for the "Network" lane.
+			viewerResource = trace.NetpollP
+		}
+		ctx.Instant(traceviewer.InstantEvent{
+			Name:     name,
+			Ts:       ctx.elapsed(ts),
+			Resource: viewerResource,
+			Stack:    ctx.Stack(viewerFrames(stack)),
+		})
+	}
+	gs.startBlockReason = ""
+	if viewerResource != 0 {
+		gs.setStartCause(ts, name, viewerResource, stack)
+	}
+}
+
+// block indicates that the goroutine has stopped executing on a proc -- specifically,
+// it blocked for some reason.
+func (gs *gState[R]) block(ts tracev2.Time, stack tracev2.Stack, reason string, ctx *traceContext) {
+	gs.startBlockReason = reason
+	gs.stop(ts, stack, ctx)
+}
+
+// stop indicates that the goroutine has stopped executing on a proc.
+func (gs *gState[R]) stop(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) {
+	// Emit the execution time slice.
+	var stk int
+	if gs.lastStopStack != tracev2.NoStack {
+		stk = ctx.Stack(viewerFrames(gs.lastStopStack))
+	}
+	// Check invariants.
+	if gs.startRunningTime == 0 {
+		panic("silently broken trace or generator invariant (startRunningTime != 0) not held")
+	}
+	if gs.executing == R(noResource) {
+		panic("non-executing goroutine stopped")
+	}
+	ctx.Slice(traceviewer.SliceEvent{
+		Name:     gs.name(),
+		Ts:       ctx.elapsed(gs.startRunningTime),
+		Dur:      ts.Sub(gs.startRunningTime),
+		Resource: uint64(gs.executing),
+		Stack:    stk,
+	})
+
+	// Flush completed ranges.
+	for _, cr := range gs.completedRanges {
+		ctx.Slice(traceviewer.SliceEvent{
+			Name:     cr.name,
+			Ts:       ctx.elapsed(cr.startTime),
+			Dur:      cr.endTime.Sub(cr.startTime),
+			Resource: uint64(gs.executing),
+			Stack:    ctx.Stack(viewerFrames(cr.startStack)),
+			EndStack: ctx.Stack(viewerFrames(cr.endStack)),
+			Arg:      cr.arg,
+		})
+	}
+	gs.completedRanges = gs.completedRanges[:0]
+
+	// Continue in-progress ranges.
+	for name, r := range gs.activeRanges {
+		// Check invariant.
+		if r.time == 0 {
+			panic("silently broken trace or generator invariant (activeRanges time != 0) not held")
+		}
+		ctx.Slice(traceviewer.SliceEvent{
+			Name:     name,
+			Ts:       ctx.elapsed(r.time),
+			Dur:      ts.Sub(r.time),
+			Resource: uint64(gs.executing),
+			Stack:    ctx.Stack(viewerFrames(r.stack)),
+		})
+	}
+
+	// Clear the range info.
+	for name := range gs.activeRanges {
+		gs.activeRanges[name] = activeRange{0, tracev2.NoStack}
+	}
+
+	gs.startRunningTime = 0
+	gs.lastStopStack = stack
+	gs.executing = R(noResource)
+}
+
+// finalize writes out any in-progress slices as if the goroutine stopped.
+// This must only be used once the trace has been fully processed and no
+// further events will be processed. This method may leave the gState in
+// an inconsistent state.
+func (gs *gState[R]) finish(ctx *traceContext) {
+	if gs.executing != R(noResource) {
+		gs.syscallEnd(ctx.endTime, false, ctx)
+		gs.stop(ctx.endTime, tracev2.NoStack, ctx)
+	}
+}
+
+// rangeBegin indicates the start of a special range of time.
+func (gs *gState[R]) rangeBegin(ts tracev2.Time, name string, stack tracev2.Stack) {
+	if gs.executing != R(noResource) {
+		// If we're executing, start the slice from here.
+		gs.activeRanges[name] = activeRange{ts, stack}
+	} else {
+		// If the goroutine isn't executing, there's no place for
+		// us to create a slice from. Wait until it starts executing.
+		gs.activeRanges[name] = activeRange{0, stack}
+	}
+}
+
+// rangeActive indicates that a special range of time has been in progress.
+func (gs *gState[R]) rangeActive(name string) {
+	if gs.executing != R(noResource) {
+		// If we're executing, and the range is active, then start
+		// from wherever the goroutine started running from.
+		gs.activeRanges[name] = activeRange{gs.startRunningTime, tracev2.NoStack}
+	} else {
+		// If the goroutine isn't executing, there's no place for
+		// us to create a slice from. Wait until it starts executing.
+		gs.activeRanges[name] = activeRange{0, tracev2.NoStack}
+	}
+}
+
+// rangeEnd indicates the end of a special range of time.
+func (gs *gState[R]) rangeEnd(ts tracev2.Time, name string, stack tracev2.Stack, ctx *traceContext) {
+	if gs.executing != R(noResource) {
+		r := gs.activeRanges[name]
+		gs.completedRanges = append(gs.completedRanges, completedRange{
+			name:       name,
+			startTime:  r.time,
+			endTime:    ts,
+			startStack: r.stack,
+			endStack:   stack,
+		})
+	}
+	delete(gs.activeRanges, name)
+}
+
+func lastFunc(s tracev2.Stack) string {
+	var last tracev2.StackFrame
+	s.Frames(func(f tracev2.StackFrame) bool {
+		last = f
+		return true
+	})
+	return last.Func
+}
diff --git a/src/cmd/trace/v2/jsontrace.go b/src/cmd/trace/v2/jsontrace.go
new file mode 100644
index 0000000..e4ca613
--- /dev/null
+++ b/src/cmd/trace/v2/jsontrace.go
@@ -0,0 +1,229 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"cmp"
+	"log"
+	"math"
+	"net/http"
+	"slices"
+	"strconv"
+	"time"
+
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+)
+
+func JSONTraceHandler(parsed *parsedTrace) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		opts := defaultGenOpts()
+
+		switch r.FormValue("view") {
+		case "thread":
+			opts.mode = traceviewer.ModeThreadOriented
+		}
+		if goids := r.FormValue("goid"); goids != "" {
+			// Render trace focused on a particular goroutine.
+
+			id, err := strconv.ParseUint(goids, 10, 64)
+			if err != nil {
+				log.Printf("failed to parse goid parameter %q: %v", goids, err)
+				return
+			}
+			goid := tracev2.GoID(id)
+			g, ok := parsed.summary.Goroutines[goid]
+			if !ok {
+				log.Printf("failed to find goroutine %d", goid)
+				return
+			}
+			opts.mode = traceviewer.ModeGoroutineOriented
+			if g.StartTime != 0 {
+				opts.startTime = g.StartTime.Sub(parsed.startTime())
+			} else {
+				opts.startTime = 0
+			}
+			if g.EndTime != 0 {
+				opts.endTime = g.EndTime.Sub(parsed.startTime())
+			} else { // The goroutine didn't end.
+				opts.endTime = parsed.endTime().Sub(parsed.startTime())
+			}
+			opts.focusGoroutine = goid
+			opts.goroutines = trace.RelatedGoroutinesV2(parsed.events, goid)
+		} else if taskids := r.FormValue("focustask"); taskids != "" {
+			taskid, err := strconv.ParseUint(taskids, 10, 64)
+			if err != nil {
+				log.Printf("failed to parse focustask parameter %q: %v", taskids, err)
+				return
+			}
+			task, ok := parsed.summary.Tasks[tracev2.TaskID(taskid)]
+			if !ok || (task.Start == nil && task.End == nil) {
+				log.Printf("failed to find task with id %d", taskid)
+				return
+			}
+			opts.setTask(parsed, task)
+		} else if taskids := r.FormValue("taskid"); taskids != "" {
+			taskid, err := strconv.ParseUint(taskids, 10, 64)
+			if err != nil {
+				log.Printf("failed to parse taskid parameter %q: %v", taskids, err)
+				return
+			}
+			task, ok := parsed.summary.Tasks[tracev2.TaskID(taskid)]
+			if !ok {
+				log.Printf("failed to find task with id %d", taskid)
+				return
+			}
+			// This mode is goroutine-oriented.
+			opts.mode = traceviewer.ModeGoroutineOriented
+			opts.setTask(parsed, task)
+
+			// Pick the goroutine to orient ourselves around by just
+			// trying to pick the earliest event in the task that makes
+			// any sense. Though, we always want the start if that's there.
+			var firstEv *tracev2.Event
+			if task.Start != nil {
+				firstEv = task.Start
+			} else {
+				for _, logEv := range task.Logs {
+					if firstEv == nil || logEv.Time() < firstEv.Time() {
+						firstEv = logEv
+					}
+				}
+				if task.End != nil && (firstEv == nil || task.End.Time() < firstEv.Time()) {
+					firstEv = task.End
+				}
+			}
+			if firstEv == nil || firstEv.Goroutine() == tracev2.NoGoroutine {
+				log.Printf("failed to find task with id %d", taskid)
+				return
+			}
+
+			// Set the goroutine filtering options.
+			goid := firstEv.Goroutine()
+			opts.focusGoroutine = goid
+			goroutines := make(map[tracev2.GoID]struct{})
+			for _, task := range opts.tasks {
+				// Find only directly involved goroutines.
+				for id := range task.Goroutines {
+					goroutines[id] = struct{}{}
+				}
+			}
+			opts.goroutines = goroutines
+		}
+
+		// Parse start and end options. Both or none must be present.
+		start := int64(0)
+		end := int64(math.MaxInt64)
+		if startStr, endStr := r.FormValue("start"), r.FormValue("end"); startStr != "" && endStr != "" {
+			var err error
+			start, err = strconv.ParseInt(startStr, 10, 64)
+			if err != nil {
+				log.Printf("failed to parse start parameter %q: %v", startStr, err)
+				return
+			}
+
+			end, err = strconv.ParseInt(endStr, 10, 64)
+			if err != nil {
+				log.Printf("failed to parse end parameter %q: %v", endStr, err)
+				return
+			}
+		}
+
+		c := traceviewer.ViewerDataTraceConsumer(w, start, end)
+		if err := generateTrace(parsed, opts, c); err != nil {
+			log.Printf("failed to generate trace: %v", err)
+		}
+	})
+}
+
+// traceContext is a wrapper around a traceviewer.Emitter with some additional
+// information that's useful to most parts of trace viewer JSON emission.
+type traceContext struct {
+	*traceviewer.Emitter
+	startTime tracev2.Time
+	endTime   tracev2.Time
+}
+
+// elapsed returns the elapsed time between the trace time and the start time
+// of the trace.
+func (ctx *traceContext) elapsed(now tracev2.Time) time.Duration {
+	return now.Sub(ctx.startTime)
+}
+
+type genOpts struct {
+	mode      traceviewer.Mode
+	startTime time.Duration
+	endTime   time.Duration
+
+	// Used if mode != 0.
+	focusGoroutine tracev2.GoID
+	goroutines     map[tracev2.GoID]struct{} // Goroutines to be displayed for goroutine-oriented or task-oriented view. goroutines[0] is the main goroutine.
+	tasks          []*trace.UserTaskSummary
+}
+
+// setTask sets a task to focus on.
+func (opts *genOpts) setTask(parsed *parsedTrace, task *trace.UserTaskSummary) {
+	opts.mode |= traceviewer.ModeTaskOriented
+	if task.Start != nil {
+		opts.startTime = task.Start.Time().Sub(parsed.startTime())
+	} else { // The task started before the trace did.
+		opts.startTime = 0
+	}
+	if task.End != nil {
+		opts.endTime = task.End.Time().Sub(parsed.startTime())
+	} else { // The task didn't end.
+		opts.endTime = parsed.endTime().Sub(parsed.startTime())
+	}
+	opts.tasks = task.Descendents()
+	slices.SortStableFunc(opts.tasks, func(a, b *trace.UserTaskSummary) int {
+		aStart, bStart := parsed.startTime(), parsed.startTime()
+		if a.Start != nil {
+			aStart = a.Start.Time()
+		}
+		if b.Start != nil {
+			bStart = b.Start.Time()
+		}
+		if a.Start != b.Start {
+			return cmp.Compare(aStart, bStart)
+		}
+		// Break ties with the end time.
+		aEnd, bEnd := parsed.endTime(), parsed.endTime()
+		if a.End != nil {
+			aEnd = a.End.Time()
+		}
+		if b.End != nil {
+			bEnd = b.End.Time()
+		}
+		return cmp.Compare(aEnd, bEnd)
+	})
+}
+
+func defaultGenOpts() *genOpts {
+	return &genOpts{
+		startTime: time.Duration(0),
+		endTime:   time.Duration(math.MaxInt64),
+	}
+}
+
+func generateTrace(parsed *parsedTrace, opts *genOpts, c traceviewer.TraceConsumer) error {
+	ctx := &traceContext{
+		Emitter:   traceviewer.NewEmitter(c, opts.startTime, opts.endTime),
+		startTime: parsed.events[0].Time(),
+		endTime:   parsed.events[len(parsed.events)-1].Time(),
+	}
+	defer ctx.Flush()
+
+	var g generator
+	if opts.mode&traceviewer.ModeGoroutineOriented != 0 {
+		g = newGoroutineGenerator(ctx, opts.focusGoroutine, opts.goroutines)
+	} else if opts.mode&traceviewer.ModeThreadOriented != 0 {
+		g = newThreadGenerator()
+	} else {
+		g = newProcGenerator()
+	}
+	runGenerator(ctx, g, parsed, opts)
+	return nil
+}
diff --git a/src/cmd/trace/v2/jsontrace_test.go b/src/cmd/trace/v2/jsontrace_test.go
new file mode 100644
index 0000000..65ce041
--- /dev/null
+++ b/src/cmd/trace/v2/jsontrace_test.go
@@ -0,0 +1,291 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bytes"
+	"encoding/json"
+	tracev1 "internal/trace"
+	"io"
+	"net/http/httptest"
+	"os"
+	"path/filepath"
+	"slices"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+
+	"internal/trace/traceviewer/format"
+	"internal/trace/v2/raw"
+)
+
+func TestJSONTraceHandler(t *testing.T) {
+	testPaths, err := filepath.Glob("./testdata/*.test")
+	if err != nil {
+		t.Fatalf("discovering tests: %v", err)
+	}
+	for _, testPath := range testPaths {
+		t.Run(filepath.Base(testPath), func(t *testing.T) {
+			parsed := getTestTrace(t, testPath)
+			data := recordJSONTraceHandlerResponse(t, parsed)
+			// TODO(mknyszek): Check that there's one at most goroutine per proc at any given time.
+			checkExecutionTimes(t, data)
+			checkPlausibleHeapMetrics(t, data)
+			// TODO(mknyszek): Check for plausible thread and goroutine metrics.
+			checkMetaNamesEmitted(t, data, "process_name", []string{"STATS", "PROCS"})
+			checkMetaNamesEmitted(t, data, "thread_name", []string{"GC", "Network", "Timers", "Syscalls", "Proc 0"})
+			checkProcStartStop(t, data)
+			checkSyscalls(t, data)
+			checkNetworkUnblock(t, data)
+			// TODO(mknyszek): Check for flow events.
+		})
+	}
+}
+
+func checkSyscalls(t *testing.T, data format.Data) {
+	data = filterViewerTrace(data,
+		filterEventName("syscall"),
+		filterStackRootFunc("main.blockingSyscall"))
+	if len(data.Events) <= 1 {
+		t.Errorf("got %d events, want > 1", len(data.Events))
+	}
+	data = filterViewerTrace(data, filterBlocked("yes"))
+	if len(data.Events) != 1 {
+		t.Errorf("got %d events, want 1", len(data.Events))
+	}
+}
+
+type eventFilterFn func(*format.Event, *format.Data) bool
+
+func filterEventName(name string) eventFilterFn {
+	return func(e *format.Event, _ *format.Data) bool {
+		return e.Name == name
+	}
+}
+
+// filterGoRoutineName returns an event filter that returns true if the event's
+// goroutine name is equal to name.
+func filterGoRoutineName(name string) eventFilterFn {
+	return func(e *format.Event, _ *format.Data) bool {
+		return parseGoroutineName(e) == name
+	}
+}
+
+// parseGoroutineName returns the goroutine name from the event's name field.
+// E.g. if e.Name is "G42 main.cpu10", this returns "main.cpu10".
+func parseGoroutineName(e *format.Event) string {
+	parts := strings.SplitN(e.Name, " ", 2)
+	if len(parts) != 2 || !strings.HasPrefix(parts[0], "G") {
+		return ""
+	}
+	return parts[1]
+}
+
+// filterBlocked returns an event filter that returns true if the event's
+// "blocked" argument is equal to blocked.
+func filterBlocked(blocked string) eventFilterFn {
+	return func(e *format.Event, _ *format.Data) bool {
+		m, ok := e.Arg.(map[string]any)
+		if !ok {
+			return false
+		}
+		return m["blocked"] == blocked
+	}
+}
+
+// filterStackRootFunc returns an event filter that returns true if the function
+// at the root of the stack trace is named name.
+func filterStackRootFunc(name string) eventFilterFn {
+	return func(e *format.Event, data *format.Data) bool {
+		frames := stackFrames(data, e.Stack)
+		rootFrame := frames[len(frames)-1]
+		return strings.HasPrefix(rootFrame, name+":")
+	}
+}
+
+// filterViewerTrace returns a copy of data with only the events that pass all
+// of the given filters.
+func filterViewerTrace(data format.Data, fns ...eventFilterFn) (filtered format.Data) {
+	filtered = data
+	filtered.Events = nil
+	for _, e := range data.Events {
+		keep := true
+		for _, fn := range fns {
+			keep = keep && fn(e, &filtered)
+		}
+		if keep {
+			filtered.Events = append(filtered.Events, e)
+		}
+	}
+	return
+}
+
+func stackFrames(data *format.Data, stackID int) (frames []string) {
+	for {
+		frame, ok := data.Frames[strconv.Itoa(stackID)]
+		if !ok {
+			return
+		}
+		frames = append(frames, frame.Name)
+		stackID = frame.Parent
+	}
+}
+
+func checkProcStartStop(t *testing.T, data format.Data) {
+	procStarted := map[uint64]bool{}
+	for _, e := range data.Events {
+		if e.Name == "proc start" {
+			if procStarted[e.TID] == true {
+				t.Errorf("proc started twice: %d", e.TID)
+			}
+			procStarted[e.TID] = true
+		}
+		if e.Name == "proc stop" {
+			if procStarted[e.TID] == false {
+				t.Errorf("proc stopped twice: %d", e.TID)
+			}
+			procStarted[e.TID] = false
+		}
+	}
+	if got, want := len(procStarted), 8; got != want {
+		t.Errorf("wrong number of procs started/stopped got=%d want=%d", got, want)
+	}
+}
+
+func checkNetworkUnblock(t *testing.T, data format.Data) {
+	count := 0
+	var netBlockEv *format.Event
+	for _, e := range data.Events {
+		if e.TID == tracev1.NetpollP && e.Name == "unblock (network)" && e.Phase == "I" && e.Scope == "t" {
+			count++
+			netBlockEv = e
+		}
+	}
+	if netBlockEv == nil {
+		t.Error("failed to find a network unblock")
+	}
+	if count == 0 {
+		t.Errorf("found zero network block events, want at least one")
+	}
+	// TODO(mknyszek): Check for the flow of this event to some slice event of a goroutine running.
+}
+
+func checkExecutionTimes(t *testing.T, data format.Data) {
+	cpu10 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu10")))
+	cpu20 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu20")))
+	if cpu10 <= 0 || cpu20 <= 0 || cpu10 >= cpu20 {
+		t.Errorf("bad execution times: cpu10=%v, cpu20=%v", cpu10, cpu20)
+	}
+}
+
+func checkMetaNamesEmitted(t *testing.T, data format.Data, category string, want []string) {
+	t.Helper()
+	names := metaEventNameArgs(category, data)
+	for _, wantName := range want {
+		if !slices.Contains(names, wantName) {
+			t.Errorf("%s: names=%v, want %q", category, names, wantName)
+		}
+	}
+}
+
+func metaEventNameArgs(category string, data format.Data) (names []string) {
+	for _, e := range data.Events {
+		if e.Name == category && e.Phase == "M" {
+			names = append(names, e.Arg.(map[string]any)["name"].(string))
+		}
+	}
+	return
+}
+
+func checkPlausibleHeapMetrics(t *testing.T, data format.Data) {
+	hms := heapMetrics(data)
+	var nonZeroAllocated, nonZeroNextGC bool
+	for _, hm := range hms {
+		if hm.Allocated > 0 {
+			nonZeroAllocated = true
+		}
+		if hm.NextGC > 0 {
+			nonZeroNextGC = true
+		}
+	}
+
+	if !nonZeroAllocated {
+		t.Errorf("nonZeroAllocated=%v, want true", nonZeroAllocated)
+	}
+	if !nonZeroNextGC {
+		t.Errorf("nonZeroNextGC=%v, want true", nonZeroNextGC)
+	}
+}
+
+func heapMetrics(data format.Data) (metrics []format.HeapCountersArg) {
+	for _, e := range data.Events {
+		if e.Phase == "C" && e.Name == "Heap" {
+			j, _ := json.Marshal(e.Arg)
+			var metric format.HeapCountersArg
+			json.Unmarshal(j, &metric)
+			metrics = append(metrics, metric)
+		}
+	}
+	return
+}
+
+func recordJSONTraceHandlerResponse(t *testing.T, parsed *parsedTrace) format.Data {
+	h := JSONTraceHandler(parsed)
+	recorder := httptest.NewRecorder()
+	r := httptest.NewRequest("GET", "/jsontrace", nil)
+	h.ServeHTTP(recorder, r)
+
+	var data format.Data
+	if err := json.Unmarshal(recorder.Body.Bytes(), &data); err != nil {
+		t.Fatal(err)
+	}
+	return data
+}
+
+func sumExecutionTime(data format.Data) (sum time.Duration) {
+	for _, e := range data.Events {
+		sum += time.Duration(e.Dur) * time.Microsecond
+	}
+	return
+}
+
+func getTestTrace(t *testing.T, testPath string) *parsedTrace {
+	t.Helper()
+
+	// First read in the text trace and write it out as bytes.
+	f, err := os.Open(testPath)
+	if err != nil {
+		t.Fatalf("failed to open test %s: %v", testPath, err)
+	}
+	r, err := raw.NewTextReader(f)
+	if err != nil {
+		t.Fatalf("failed to read test %s: %v", testPath, err)
+	}
+	var trace bytes.Buffer
+	w, err := raw.NewWriter(&trace, r.Version())
+	if err != nil {
+		t.Fatalf("failed to write out test %s: %v", testPath, err)
+	}
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatalf("failed to read test %s: %v", testPath, err)
+		}
+		if err := w.WriteEvent(ev); err != nil {
+			t.Fatalf("failed to write out test %s: %v", testPath, err)
+		}
+	}
+
+	// Parse the test trace.
+	parsed, err := parseTrace(&trace)
+	if err != nil {
+		t.Fatalf("failed to parse trace: %v", err)
+	}
+	return parsed
+}
diff --git a/src/cmd/trace/v2/main.go b/src/cmd/trace/v2/main.go
new file mode 100644
index 0000000..0a60ef0
--- /dev/null
+++ b/src/cmd/trace/v2/main.go
@@ -0,0 +1,190 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"os"
+
+	"internal/trace/v2/raw"
+
+	"cmd/internal/browser"
+)
+
+// Main is the main function for cmd/trace v2.
+func Main(traceFile, httpAddr, pprof string, debug int) error {
+	tracef, err := os.Open(traceFile)
+	if err != nil {
+		return fmt.Errorf("failed to read trace file: %w", err)
+	}
+	defer tracef.Close()
+
+	// Debug flags.
+	switch debug {
+	case 1:
+		return debugProcessedEvents(tracef)
+	case 2:
+		return debugRawEvents(tracef)
+	}
+
+	ln, err := net.Listen("tcp", httpAddr)
+	if err != nil {
+		return fmt.Errorf("failed to create server socket: %w", err)
+	}
+	addr := "http://" + ln.Addr().String()
+
+	log.Print("Preparing trace for viewer...")
+	parsed, err := parseTrace(tracef)
+	if err != nil {
+		return err
+	}
+	// N.B. tracef not needed after this point.
+	// We might double-close, but that's fine; we ignore the error.
+	tracef.Close()
+
+	log.Print("Splitting trace for viewer...")
+	ranges, err := splitTrace(parsed)
+	if err != nil {
+		return err
+	}
+
+	log.Printf("Opening browser. Trace viewer is listening on %s", addr)
+	browser.Open(addr)
+
+	mutatorUtil := func(flags trace.UtilFlags) ([][]trace.MutatorUtil, error) {
+		return trace.MutatorUtilizationV2(parsed.events, flags), nil
+	}
+
+	mux := http.NewServeMux()
+
+	// Main endpoint.
+	mux.Handle("/", traceviewer.MainHandler([]traceviewer.View{
+		{Type: traceviewer.ViewProc, Ranges: ranges},
+		// N.B. Use the same ranges for threads. It takes a long time to compute
+		// the split a second time, but the makeup of the events are similar enough
+		// that this is still a good split.
+		{Type: traceviewer.ViewThread, Ranges: ranges},
+	}))
+
+	// Catapult handlers.
+	mux.Handle("/trace", traceviewer.TraceHandler())
+	mux.Handle("/jsontrace", JSONTraceHandler(parsed))
+	mux.Handle("/static/", traceviewer.StaticHandler())
+
+	// Goroutines handlers.
+	mux.HandleFunc("/goroutines", GoroutinesHandlerFunc(parsed.summary.Goroutines))
+	mux.HandleFunc("/goroutine", GoroutineHandler(parsed.summary.Goroutines))
+
+	// MMU handler.
+	mux.HandleFunc("/mmu", traceviewer.MMUHandlerFunc(ranges, mutatorUtil))
+
+	// Basic pprof endpoints.
+	mux.HandleFunc("/io", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofIO(), parsed)))
+	mux.HandleFunc("/block", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofBlock(), parsed)))
+	mux.HandleFunc("/syscall", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSyscall(), parsed)))
+	mux.HandleFunc("/sched", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSched(), parsed)))
+
+	// Region-based pprof endpoints.
+	mux.HandleFunc("/regionio", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofIO(), parsed)))
+	mux.HandleFunc("/regionblock", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofBlock(), parsed)))
+	mux.HandleFunc("/regionsyscall", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSyscall(), parsed)))
+	mux.HandleFunc("/regionsched", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSched(), parsed)))
+
+	// Region endpoints.
+	mux.HandleFunc("/userregions", UserRegionsHandlerFunc(parsed))
+	mux.HandleFunc("/userregion", UserRegionHandlerFunc(parsed))
+
+	// Task endpoints.
+	mux.HandleFunc("/usertasks", UserTasksHandlerFunc(parsed))
+	mux.HandleFunc("/usertask", UserTaskHandlerFunc(parsed))
+
+	err = http.Serve(ln, mux)
+	return fmt.Errorf("failed to start http server: %w", err)
+}
+
+type parsedTrace struct {
+	events  []tracev2.Event
+	summary *trace.Summary
+}
+
+func parseTrace(tr io.Reader) (*parsedTrace, error) {
+	r, err := tracev2.NewReader(tr)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create trace reader: %w", err)
+	}
+	s := trace.NewSummarizer()
+	t := new(parsedTrace)
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, fmt.Errorf("failed to read event: %w", err)
+		}
+		t.events = append(t.events, ev)
+		s.Event(&t.events[len(t.events)-1])
+	}
+	t.summary = s.Finalize()
+	return t, nil
+}
+
+func (t *parsedTrace) startTime() tracev2.Time {
+	return t.events[0].Time()
+}
+
+func (t *parsedTrace) endTime() tracev2.Time {
+	return t.events[len(t.events)-1].Time()
+}
+
+// splitTrace splits the trace into a number of ranges, each resulting in approx 100 MiB of
+// json output (the trace viewer can hardly handle more).
+func splitTrace(parsed *parsedTrace) ([]traceviewer.Range, error) {
+	// TODO(mknyszek): Split traces by generation by doing a quick first pass over the
+	// trace to identify all the generation boundaries.
+	s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100 MiB
+	if err := generateTrace(parsed, defaultGenOpts(), c); err != nil {
+		return nil, err
+	}
+	return s.Ranges, nil
+}
+
+func debugProcessedEvents(trace io.Reader) error {
+	tr, err := tracev2.NewReader(trace)
+	if err != nil {
+		return err
+	}
+	for {
+		ev, err := tr.ReadEvent()
+		if err == io.EOF {
+			return nil
+		} else if err != nil {
+			return err
+		}
+		fmt.Println(ev.String())
+	}
+}
+
+func debugRawEvents(trace io.Reader) error {
+	rr, err := raw.NewReader(trace)
+	if err != nil {
+		return err
+	}
+	for {
+		ev, err := rr.ReadEvent()
+		if err == io.EOF {
+			return nil
+		} else if err != nil {
+			return err
+		}
+		fmt.Println(ev.String())
+	}
+}
diff --git a/src/cmd/trace/v2/pprof.go b/src/cmd/trace/v2/pprof.go
new file mode 100644
index 0000000..05895ed
--- /dev/null
+++ b/src/cmd/trace/v2/pprof.go
@@ -0,0 +1,336 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Serving of pprof-like profiles.
+
+package trace
+
+import (
+	"cmp"
+	"fmt"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"net/http"
+	"slices"
+	"strings"
+	"time"
+)
+
+func pprofByGoroutine(compute computePprofFunc, t *parsedTrace) traceviewer.ProfileFunc {
+	return func(r *http.Request) ([]traceviewer.ProfileRecord, error) {
+		name := r.FormValue("name")
+		gToIntervals, err := pprofMatchingGoroutines(name, t)
+		if err != nil {
+			return nil, err
+		}
+		return compute(gToIntervals, t.events)
+	}
+}
+
+func pprofByRegion(compute computePprofFunc, t *parsedTrace) traceviewer.ProfileFunc {
+	return func(r *http.Request) ([]traceviewer.ProfileRecord, error) {
+		filter, err := newRegionFilter(r)
+		if err != nil {
+			return nil, err
+		}
+		gToIntervals, err := pprofMatchingRegions(filter, t)
+		if err != nil {
+			return nil, err
+		}
+		return compute(gToIntervals, t.events)
+	}
+}
+
+// pprofMatchingGoroutines returns the ids of goroutines of the matching name and its interval.
+// If the id string is empty, returns nil without an error.
+func pprofMatchingGoroutines(name string, t *parsedTrace) (map[tracev2.GoID][]interval, error) {
+	res := make(map[tracev2.GoID][]interval)
+	for _, g := range t.summary.Goroutines {
+		if g.Name != name {
+			continue
+		}
+		endTime := g.EndTime
+		if g.EndTime == 0 {
+			endTime = t.endTime() // Use the trace end time, since the goroutine is still live then.
+		}
+		res[g.ID] = []interval{{start: g.StartTime, end: endTime}}
+	}
+	if len(res) == 0 {
+		return nil, fmt.Errorf("failed to find matching goroutines for name: %s", name)
+	}
+	return res, nil
+}
+
+// pprofMatchingRegions returns the time intervals of matching regions
+// grouped by the goroutine id. If the filter is nil, returns nil without an error.
+func pprofMatchingRegions(filter *regionFilter, t *parsedTrace) (map[tracev2.GoID][]interval, error) {
+	if filter == nil {
+		return nil, nil
+	}
+
+	gToIntervals := make(map[tracev2.GoID][]interval)
+	for _, g := range t.summary.Goroutines {
+		for _, r := range g.Regions {
+			if !filter.match(t, r) {
+				continue
+			}
+			gToIntervals[g.ID] = append(gToIntervals[g.ID], regionInterval(t, r))
+		}
+	}
+
+	for g, intervals := range gToIntervals {
+		// In order to remove nested regions and
+		// consider only the outermost regions,
+		// first, we sort based on the start time
+		// and then scan through to select only the outermost regions.
+		slices.SortFunc(intervals, func(a, b interval) int {
+			if c := cmp.Compare(a.start, b.start); c != 0 {
+				return c
+			}
+			return cmp.Compare(a.end, b.end)
+		})
+		var lastTimestamp tracev2.Time
+		var n int
+		// Select only the outermost regions.
+		for _, i := range intervals {
+			if lastTimestamp <= i.start {
+				intervals[n] = i // new non-overlapping region starts.
+				lastTimestamp = i.end
+				n++
+			}
+			// Otherwise, skip because this region overlaps with a previous region.
+		}
+		gToIntervals[g] = intervals[:n]
+	}
+	return gToIntervals, nil
+}
+
+type computePprofFunc func(gToIntervals map[tracev2.GoID][]interval, events []tracev2.Event) ([]traceviewer.ProfileRecord, error)
+
+// computePprofIO returns a computePprofFunc that generates IO pprof-like profile (time spent in
+// IO wait, currently only network blocking event).
+func computePprofIO() computePprofFunc {
+	return makeComputePprofFunc(tracev2.GoWaiting, func(reason string) bool {
+		return reason == "network"
+	})
+}
+
+// computePprofBlock returns a computePprofFunc that generates blocking pprof-like profile
+// (time spent blocked on synchronization primitives).
+func computePprofBlock() computePprofFunc {
+	return makeComputePprofFunc(tracev2.GoWaiting, func(reason string) bool {
+		return strings.Contains(reason, "chan") || strings.Contains(reason, "sync") || strings.Contains(reason, "select")
+	})
+}
+
+// computePprofSyscall returns a computePprofFunc that generates a syscall pprof-like
+// profile (time spent in syscalls).
+func computePprofSyscall() computePprofFunc {
+	return makeComputePprofFunc(tracev2.GoSyscall, func(_ string) bool {
+		return true
+	})
+}
+
+// computePprofSched returns a computePprofFunc that generates a scheduler latency pprof-like profile
+// (time between a goroutine become runnable and actually scheduled for execution).
+func computePprofSched() computePprofFunc {
+	return makeComputePprofFunc(tracev2.GoRunnable, func(_ string) bool {
+		return true
+	})
+}
+
+// makeComputePprofFunc returns a computePprofFunc that generates a profile of time goroutines spend
+// in a particular state for the specified reasons.
+func makeComputePprofFunc(state tracev2.GoState, trackReason func(string) bool) computePprofFunc {
+	return func(gToIntervals map[tracev2.GoID][]interval, events []tracev2.Event) ([]traceviewer.ProfileRecord, error) {
+		stacks := newStackMap()
+		tracking := make(map[tracev2.GoID]*tracev2.Event)
+		for i := range events {
+			ev := &events[i]
+
+			// Filter out any non-state-transitions and events without stacks.
+			if ev.Kind() != tracev2.EventStateTransition {
+				continue
+			}
+			stack := ev.Stack()
+			if stack == tracev2.NoStack {
+				continue
+			}
+
+			// The state transition has to apply to a goroutine.
+			st := ev.StateTransition()
+			if st.Resource.Kind != tracev2.ResourceGoroutine {
+				continue
+			}
+			id := st.Resource.Goroutine()
+			_, new := st.Goroutine()
+
+			// Check if we're tracking this goroutine.
+			startEv := tracking[id]
+			if startEv == nil {
+				// We're not. Start tracking if the new state
+				// matches what we want and the transition is
+				// for one of the reasons we care about.
+				if new == state && trackReason(st.Reason) {
+					tracking[id] = ev
+				}
+				continue
+			}
+			// We're tracking this goroutine.
+			if new == state {
+				// We're tracking this goroutine, but it's just transitioning
+				// to the same state (this is a no-ip
+				continue
+			}
+			// The goroutine has transitioned out of the state we care about,
+			// so remove it from tracking and record the stack.
+			delete(tracking, id)
+
+			overlapping := pprofOverlappingDuration(gToIntervals, id, interval{startEv.Time(), ev.Time()})
+			if overlapping > 0 {
+				rec := stacks.getOrAdd(startEv.Stack())
+				rec.Count++
+				rec.Time += overlapping
+			}
+		}
+		return stacks.profile(), nil
+	}
+}
+
+// pprofOverlappingDuration returns the overlapping duration between
+// the time intervals in gToIntervals and the specified event.
+// If gToIntervals is nil, this simply returns the event's duration.
+func pprofOverlappingDuration(gToIntervals map[tracev2.GoID][]interval, id tracev2.GoID, sample interval) time.Duration {
+	if gToIntervals == nil { // No filtering.
+		return sample.duration()
+	}
+	intervals := gToIntervals[id]
+	if len(intervals) == 0 {
+		return 0
+	}
+
+	var overlapping time.Duration
+	for _, i := range intervals {
+		if o := i.overlap(sample); o > 0 {
+			overlapping += o
+		}
+	}
+	return overlapping
+}
+
+// interval represents a time interval in the trace.
+type interval struct {
+	start, end tracev2.Time
+}
+
+func (i interval) duration() time.Duration {
+	return i.end.Sub(i.start)
+}
+
+func (i1 interval) overlap(i2 interval) time.Duration {
+	// Assume start1 <= end1 and start2 <= end2
+	if i1.end < i2.start || i2.end < i1.start {
+		return 0
+	}
+	if i1.start < i2.start { // choose the later one
+		i1.start = i2.start
+	}
+	if i1.end > i2.end { // choose the earlier one
+		i1.end = i2.end
+	}
+	return i1.duration()
+}
+
+// pprofMaxStack is the extent of the deduplication we're willing to do.
+//
+// Because slices aren't comparable and we want to leverage maps for deduplication,
+// we have to choose a fixed constant upper bound on the amount of frames we want
+// to support. In practice this is fine because there's a maximum depth to these
+// stacks anyway.
+const pprofMaxStack = 128
+
+// stackMap is a map of tracev2.Stack to some value V.
+type stackMap struct {
+	// stacks contains the full list of stacks in the set, however
+	// it is insufficient for deduplication because tracev2.Stack
+	// equality is only optimistic. If two tracev2.Stacks are equal,
+	// then they are guaranteed to be equal in content. If they are
+	// not equal, then they might still be equal in content.
+	stacks map[tracev2.Stack]*traceviewer.ProfileRecord
+
+	// pcs is the source-of-truth for deduplication. It is a map of
+	// the actual PCs in the stack to a tracev2.Stack.
+	pcs map[[pprofMaxStack]uint64]tracev2.Stack
+}
+
+func newStackMap() *stackMap {
+	return &stackMap{
+		stacks: make(map[tracev2.Stack]*traceviewer.ProfileRecord),
+		pcs:    make(map[[pprofMaxStack]uint64]tracev2.Stack),
+	}
+}
+
+func (m *stackMap) getOrAdd(stack tracev2.Stack) *traceviewer.ProfileRecord {
+	// Fast path: check to see if this exact stack is already in the map.
+	if rec, ok := m.stacks[stack]; ok {
+		return rec
+	}
+	// Slow path: the stack may still be in the map.
+
+	// Grab the stack's PCs as the source-of-truth.
+	var pcs [pprofMaxStack]uint64
+	pcsForStack(stack, &pcs)
+
+	// Check the source-of-truth.
+	var rec *traceviewer.ProfileRecord
+	if existing, ok := m.pcs[pcs]; ok {
+		// In the map.
+		rec = m.stacks[existing]
+		delete(m.stacks, existing)
+	} else {
+		// Not in the map.
+		rec = new(traceviewer.ProfileRecord)
+	}
+	// Insert regardless of whether we have a match in m.pcs.
+	// Even if we have a match, we want to keep the newest version
+	// of that stack, since we're much more likely tos see it again
+	// as we iterate through the trace linearly. Simultaneously, we
+	// are likely to never see the old stack again.
+	m.pcs[pcs] = stack
+	m.stacks[stack] = rec
+	return rec
+}
+
+func (m *stackMap) profile() []traceviewer.ProfileRecord {
+	prof := make([]traceviewer.ProfileRecord, 0, len(m.stacks))
+	for stack, record := range m.stacks {
+		rec := *record
+		i := 0
+		stack.Frames(func(frame tracev2.StackFrame) bool {
+			rec.Stack = append(rec.Stack, &trace.Frame{
+				PC:   frame.PC,
+				Fn:   frame.Func,
+				File: frame.File,
+				Line: int(frame.Line),
+			})
+			i++
+			// Cut this off at pprofMaxStack because that's as far
+			// as our deduplication goes.
+			return i < pprofMaxStack
+		})
+		prof = append(prof, rec)
+	}
+	return prof
+}
+
+// pcsForStack extracts the first pprofMaxStack PCs from stack into pcs.
+func pcsForStack(stack tracev2.Stack, pcs *[pprofMaxStack]uint64) {
+	i := 0
+	stack.Frames(func(frame tracev2.StackFrame) bool {
+		pcs[i] = frame.PC
+		i++
+		return i < len(pcs)
+	})
+}
diff --git a/src/cmd/trace/v2/procgen.go b/src/cmd/trace/v2/procgen.go
new file mode 100644
index 0000000..41e3795
--- /dev/null
+++ b/src/cmd/trace/v2/procgen.go
@@ -0,0 +1,212 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"internal/trace/traceviewer"
+	"internal/trace/traceviewer/format"
+	tracev2 "internal/trace/v2"
+)
+
+var _ generator = &procGenerator{}
+
+type procGenerator struct {
+	globalRangeGenerator
+	globalMetricGenerator
+	procRangeGenerator
+	stackSampleGenerator[tracev2.ProcID]
+	logEventGenerator[tracev2.ProcID]
+
+	gStates   map[tracev2.GoID]*gState[tracev2.ProcID]
+	inSyscall map[tracev2.ProcID]*gState[tracev2.ProcID]
+	maxProc   tracev2.ProcID
+}
+
+func newProcGenerator() *procGenerator {
+	pg := new(procGenerator)
+	rg := func(ev *tracev2.Event) tracev2.ProcID {
+		return ev.Proc()
+	}
+	pg.stackSampleGenerator.getResource = rg
+	pg.logEventGenerator.getResource = rg
+	pg.gStates = make(map[tracev2.GoID]*gState[tracev2.ProcID])
+	pg.inSyscall = make(map[tracev2.ProcID]*gState[tracev2.ProcID])
+	return pg
+}
+
+func (g *procGenerator) Sync() {
+	g.globalRangeGenerator.Sync()
+	g.procRangeGenerator.Sync()
+}
+
+func (g *procGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) {
+	l := ev.Label()
+	g.gStates[l.Resource.Goroutine()].setLabel(l.Label)
+}
+
+func (g *procGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) {
+	r := ev.Range()
+	switch ev.Kind() {
+	case tracev2.EventRangeBegin:
+		g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack())
+	case tracev2.EventRangeActive:
+		g.gStates[r.Scope.Goroutine()].rangeActive(r.Name)
+	case tracev2.EventRangeEnd:
+		gs := g.gStates[r.Scope.Goroutine()]
+		gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx)
+	}
+}
+
+func (g *procGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) {
+	st := ev.StateTransition()
+	goID := st.Resource.Goroutine()
+
+	// If we haven't seen this goroutine before, create a new
+	// gState for it.
+	gs, ok := g.gStates[goID]
+	if !ok {
+		gs = newGState[tracev2.ProcID](goID)
+		g.gStates[goID] = gs
+	}
+	// If we haven't already named this goroutine, try to name it.
+	gs.augmentName(st.Stack)
+
+	// Handle the goroutine state transition.
+	from, to := st.Goroutine()
+	if from == to {
+		// Filter out no-op events.
+		return
+	}
+	if from == tracev2.GoRunning && !to.Executing() {
+		if to == tracev2.GoWaiting {
+			// Goroutine started blocking.
+			gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
+		} else {
+			gs.stop(ev.Time(), ev.Stack(), ctx)
+		}
+	}
+	if !from.Executing() && to == tracev2.GoRunning {
+		start := ev.Time()
+		if from == tracev2.GoUndetermined {
+			// Back-date the event to the start of the trace.
+			start = ctx.startTime
+		}
+		gs.start(start, ev.Proc(), ctx)
+	}
+
+	if from == tracev2.GoWaiting {
+		// Goroutine was unblocked.
+		gs.unblock(ev.Time(), ev.Stack(), ev.Proc(), ctx)
+	}
+	if from == tracev2.GoNotExist && to == tracev2.GoRunnable {
+		// Goroutine was created.
+		gs.created(ev.Time(), ev.Proc(), ev.Stack())
+	}
+	if from == tracev2.GoSyscall && to != tracev2.GoRunning {
+		// Goroutine exited a blocked syscall.
+		gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx)
+	}
+
+	// Handle syscalls.
+	if to == tracev2.GoSyscall && ev.Proc() != tracev2.NoProc {
+		start := ev.Time()
+		if from == tracev2.GoUndetermined {
+			// Back-date the event to the start of the trace.
+			start = ctx.startTime
+		}
+		// Write down that we've entered a syscall. Note: we might have no P here
+		// if we're in a cgo callback or this is a transition from GoUndetermined
+		// (i.e. the G has been blocked in a syscall).
+		gs.syscallBegin(start, ev.Proc(), ev.Stack())
+		g.inSyscall[ev.Proc()] = gs
+	}
+	// Check if we're exiting a non-blocking syscall.
+	_, didNotBlock := g.inSyscall[ev.Proc()]
+	if from == tracev2.GoSyscall && didNotBlock {
+		gs.syscallEnd(ev.Time(), false, ctx)
+		delete(g.inSyscall, ev.Proc())
+	}
+
+	// Note down the goroutine transition.
+	_, inMarkAssist := gs.activeRanges["GC mark assist"]
+	ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist))
+}
+
+func (g *procGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) {
+	st := ev.StateTransition()
+	proc := st.Resource.Proc()
+
+	g.maxProc = max(g.maxProc, proc)
+	viewerEv := traceviewer.InstantEvent{
+		Resource: uint64(proc),
+		Stack:    ctx.Stack(viewerFrames(ev.Stack())),
+	}
+
+	from, to := st.Proc()
+	if from == to {
+		// Filter out no-op events.
+		return
+	}
+	if to.Executing() {
+		start := ev.Time()
+		if from == tracev2.ProcUndetermined {
+			start = ctx.startTime
+		}
+		viewerEv.Name = "proc start"
+		viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())}
+		viewerEv.Ts = ctx.elapsed(start)
+		ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1)
+	}
+	if from.Executing() {
+		start := ev.Time()
+		viewerEv.Name = "proc stop"
+		viewerEv.Ts = ctx.elapsed(start)
+		ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1)
+
+		// Check if this proc was in a syscall before it stopped.
+		// This means the syscall blocked. We need to emit it to the
+		// viewer at this point because we only display the time the
+		// syscall occupied a P when the viewer is in per-P mode.
+		//
+		// TODO(mknyszek): We could do better in a per-M mode because
+		// all events have to happen on *some* thread, and in v2 traces
+		// we know what that thread is.
+		gs, ok := g.inSyscall[proc]
+		if ok {
+			// Emit syscall slice for blocked syscall.
+			gs.syscallEnd(start, true, ctx)
+			gs.stop(start, ev.Stack(), ctx)
+			delete(g.inSyscall, proc)
+		}
+	}
+	// TODO(mknyszek): Consider modeling procs differently and have them be
+	// transition to and from NotExist when GOMAXPROCS changes. We can emit
+	// events for this to clearly delineate GOMAXPROCS changes.
+
+	if viewerEv.Name != "" {
+		ctx.Instant(viewerEv)
+	}
+}
+
+func (g *procGenerator) Finish(ctx *traceContext) {
+	ctx.SetResourceType("PROCS")
+
+	// Finish off ranges first. It doesn't really matter for the global ranges,
+	// but the proc ranges need to either be a subset of a goroutine slice or
+	// their own slice entirely. If the former, it needs to end first.
+	g.procRangeGenerator.Finish(ctx)
+	g.globalRangeGenerator.Finish(ctx)
+
+	// Finish off all the goroutine slices.
+	for _, gs := range g.gStates {
+		gs.finish(ctx)
+	}
+
+	// Name all the procs to the emitter.
+	for i := uint64(0); i <= uint64(g.maxProc); i++ {
+		ctx.Resource(i, fmt.Sprintf("Proc %v", i))
+	}
+}
diff --git a/src/cmd/trace/v2/regions.go b/src/cmd/trace/v2/regions.go
new file mode 100644
index 0000000..5d04fd2
--- /dev/null
+++ b/src/cmd/trace/v2/regions.go
@@ -0,0 +1,529 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"cmp"
+	"fmt"
+	"html/template"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"net/http"
+	"net/url"
+	"slices"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// UserTasksHandlerFunc returns a HandlerFunc that reports all regions found in the trace.
+func UserRegionsHandlerFunc(t *parsedTrace) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		// Summarize all the regions.
+		summary := make(map[regionFingerprint]regionStats)
+		for _, g := range t.summary.Goroutines {
+			for _, r := range g.Regions {
+				id := fingerprintRegion(r)
+				stats, ok := summary[id]
+				if !ok {
+					stats.regionFingerprint = id
+				}
+				stats.add(t, r)
+				summary[id] = stats
+			}
+		}
+		// Sort regions by PC and name.
+		userRegions := make([]regionStats, 0, len(summary))
+		for _, stats := range summary {
+			userRegions = append(userRegions, stats)
+		}
+		slices.SortFunc(userRegions, func(a, b regionStats) int {
+			if c := cmp.Compare(a.Type, b.Type); c != 0 {
+				return c
+			}
+			return cmp.Compare(a.Frame.PC, b.Frame.PC)
+		})
+		// Emit table.
+		err := templUserRegionTypes.Execute(w, userRegions)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
+			return
+		}
+	}
+}
+
+// regionFingerprint is a way to categorize regions that goes just one step beyond the region's Type
+// by including the top stack frame.
+type regionFingerprint struct {
+	Frame tracev2.StackFrame
+	Type  string
+}
+
+func fingerprintRegion(r *trace.UserRegionSummary) regionFingerprint {
+	return regionFingerprint{
+		Frame: regionTopStackFrame(r),
+		Type:  r.Name,
+	}
+}
+
+func regionTopStackFrame(r *trace.UserRegionSummary) tracev2.StackFrame {
+	var frame tracev2.StackFrame
+	if r.Start != nil && r.Start.Stack() != tracev2.NoStack {
+		r.Start.Stack().Frames(func(f tracev2.StackFrame) bool {
+			frame = f
+			return false
+		})
+	}
+	return frame
+}
+
+type regionStats struct {
+	regionFingerprint
+	Histogram traceviewer.TimeHistogram
+}
+
+func (s *regionStats) UserRegionURL() func(min, max time.Duration) string {
+	return func(min, max time.Duration) string {
+		return fmt.Sprintf("/userregion?type=%s&pc=%x&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), s.Frame.PC, template.URLQueryEscaper(min), template.URLQueryEscaper(max))
+	}
+}
+
+func (s *regionStats) add(t *parsedTrace, region *trace.UserRegionSummary) {
+	s.Histogram.Add(regionInterval(t, region).duration())
+}
+
+var templUserRegionTypes = template.Must(template.New("").Parse(`
+<!DOCTYPE html>
+<title>Regions</title>
+<style>` + traceviewer.CommonStyle + `
+.histoTime {
+  width: 20%;
+  white-space:nowrap;
+}
+th {
+  background-color: #050505;
+  color: #fff;
+}
+table {
+  border-collapse: collapse;
+}
+td,
+th {
+  padding-left: 8px;
+  padding-right: 8px;
+  padding-top: 4px;
+  padding-bottom: 4px;
+}
+</style>
+<body>
+<h1>Regions</h1>
+
+Below is a table containing a summary of all the user-defined regions in the trace.
+Regions are grouped by the region type and the point at which the region started.
+The rightmost column of the table contains a latency histogram for each region group.
+Note that this histogram only counts regions that began and ended within the traced
+period.
+However, the "Count" column includes all regions, including those that only started
+or ended during the traced period.
+Regions that were active through the trace period were not recorded, and so are not
+accounted for at all.
+Click on the links to explore a breakdown of time spent for each region by goroutine
+and user-defined task.
+<br>
+<br>
+
+<table border="1" sortable="1">
+<tr>
+<th>Region type</th>
+<th>Count</th>
+<th>Duration distribution (complete tasks)</th>
+</tr>
+{{range $}}
+  <tr>
+    <td><pre>{{printf "%q" .Type}}<br>{{.Frame.Func}} @ {{printf "0x%x" .Frame.PC}}<br>{{.Frame.File}}:{{.Frame.Line}}</pre></td>
+    <td><a href="/userregion?type={{.Type}}&pc={{.Frame.PC | printf "%x"}}">{{.Histogram.Count}}</a></td>
+    <td>{{.Histogram.ToHTML (.UserRegionURL)}}</td>
+  </tr>
+{{end}}
+</table>
+</body>
+</html>
+`))
+
+// UserRegionHandlerFunc returns a HandlerFunc that presents the details of the selected regions.
+func UserRegionHandlerFunc(t *parsedTrace) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		// Construct the filter from the request.
+		filter, err := newRegionFilter(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		// Collect all the regions with their goroutines.
+		type region struct {
+			*trace.UserRegionSummary
+			Goroutine           tracev2.GoID
+			NonOverlappingStats map[string]time.Duration
+			HasRangeTime        bool
+		}
+		var regions []region
+		var maxTotal time.Duration
+		validNonOverlappingStats := make(map[string]struct{})
+		validRangeStats := make(map[string]struct{})
+		for _, g := range t.summary.Goroutines {
+			for _, r := range g.Regions {
+				if !filter.match(t, r) {
+					continue
+				}
+				nonOverlappingStats := r.NonOverlappingStats()
+				for name := range nonOverlappingStats {
+					validNonOverlappingStats[name] = struct{}{}
+				}
+				var totalRangeTime time.Duration
+				for name, dt := range r.RangeTime {
+					validRangeStats[name] = struct{}{}
+					totalRangeTime += dt
+				}
+				regions = append(regions, region{
+					UserRegionSummary:   r,
+					Goroutine:           g.ID,
+					NonOverlappingStats: nonOverlappingStats,
+					HasRangeTime:        totalRangeTime != 0,
+				})
+				if maxTotal < r.TotalTime {
+					maxTotal = r.TotalTime
+				}
+			}
+		}
+
+		// Sort.
+		sortBy := r.FormValue("sortby")
+		if _, ok := validNonOverlappingStats[sortBy]; ok {
+			slices.SortFunc(regions, func(a, b region) int {
+				return cmp.Compare(b.NonOverlappingStats[sortBy], a.NonOverlappingStats[sortBy])
+			})
+		} else {
+			// Sort by total time by default.
+			slices.SortFunc(regions, func(a, b region) int {
+				return cmp.Compare(b.TotalTime, a.TotalTime)
+			})
+		}
+
+		// Write down all the non-overlapping stats and sort them.
+		allNonOverlappingStats := make([]string, 0, len(validNonOverlappingStats))
+		for name := range validNonOverlappingStats {
+			allNonOverlappingStats = append(allNonOverlappingStats, name)
+		}
+		slices.SortFunc(allNonOverlappingStats, func(a, b string) int {
+			if a == b {
+				return 0
+			}
+			if a == "Execution time" {
+				return -1
+			}
+			if b == "Execution time" {
+				return 1
+			}
+			return cmp.Compare(a, b)
+		})
+
+		// Write down all the range stats and sort them.
+		allRangeStats := make([]string, 0, len(validRangeStats))
+		for name := range validRangeStats {
+			allRangeStats = append(allRangeStats, name)
+		}
+		sort.Strings(allRangeStats)
+
+		err = templUserRegionType.Execute(w, struct {
+			MaxTotal            time.Duration
+			Regions             []region
+			Name                string
+			Filter              *regionFilter
+			NonOverlappingStats []string
+			RangeStats          []string
+		}{
+			MaxTotal:            maxTotal,
+			Regions:             regions,
+			Name:                filter.name,
+			Filter:              filter,
+			NonOverlappingStats: allNonOverlappingStats,
+			RangeStats:          allRangeStats,
+		})
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
+			return
+		}
+	}
+}
+
+var templUserRegionType = template.Must(template.New("").Funcs(template.FuncMap{
+	"headerStyle": func(statName string) template.HTMLAttr {
+		return template.HTMLAttr(fmt.Sprintf("style=\"background-color: %s;\"", stat2Color(statName)))
+	},
+	"barStyle": func(statName string, dividend, divisor time.Duration) template.HTMLAttr {
+		width := "0"
+		if divisor != 0 {
+			width = fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100)
+		}
+		return template.HTMLAttr(fmt.Sprintf("style=\"width: %s; background-color: %s;\"", width, stat2Color(statName)))
+	},
+	"filterParams": func(f *regionFilter) template.URL {
+		return template.URL(f.params.Encode())
+	},
+}).Parse(`
+<!DOCTYPE html>
+<title>Regions: {{.Name}}</title>
+<style>` + traceviewer.CommonStyle + `
+th {
+  background-color: #050505;
+  color: #fff;
+}
+th.link {
+  cursor: pointer;
+}
+table {
+  border-collapse: collapse;
+}
+td,
+th {
+  padding-left: 8px;
+  padding-right: 8px;
+  padding-top: 4px;
+  padding-bottom: 4px;
+}
+.details tr:hover {
+  background-color: #f2f2f2;
+}
+.details td {
+  text-align: right;
+  border: 1px solid #000;
+}
+.details td.id {
+  text-align: left;
+}
+.stacked-bar-graph {
+  width: 300px;
+  height: 10px;
+  color: #414042;
+  white-space: nowrap;
+  font-size: 5px;
+}
+.stacked-bar-graph span {
+  display: inline-block;
+  width: 100%;
+  height: 100%;
+  box-sizing: border-box;
+  float: left;
+  padding: 0;
+}
+</style>
+
+<script>
+function reloadTable(key, value) {
+  let params = new URLSearchParams(window.location.search);
+  params.set(key, value);
+  window.location.search = params.toString();
+}
+</script>
+
+<h1>Regions: {{.Name}}</h1>
+
+Table of contents
+<ul>
+	<li><a href="#summary">Summary</a></li>
+	<li><a href="#breakdown">Breakdown</a></li>
+	<li><a href="#ranges">Special ranges</a></li>
+</ul>
+
+<h3 id="summary">Summary</h3>
+
+{{ with $p := filterParams .Filter}}
+<table class="summary">
+	<tr>
+		<td>Network wait profile:</td>
+		<td> <a href="/regionio?{{$p}}">graph</a> <a href="/regionio?{{$p}}&raw=1" download="io.profile">(download)</a></td>
+	</tr>
+	<tr>
+		<td>Sync block profile:</td>
+		<td> <a href="/regionblock?{{$p}}">graph</a> <a href="/regionblock?{{$p}}&raw=1" download="block.profile">(download)</a></td>
+	</tr>
+	<tr>
+		<td>Syscall profile:</td>
+		<td> <a href="/regionsyscall?{{$p}}">graph</a> <a href="/regionsyscall?{{$p}}&raw=1" download="syscall.profile">(download)</a></td>
+	</tr>
+	<tr>
+		<td>Scheduler wait profile:</td>
+		<td> <a href="/regionsched?{{$p}}">graph</a> <a href="/regionsched?{{$p}}&raw=1" download="sched.profile">(download)</a></td>
+	</tr>
+</table>
+{{ end }}
+
+<h3 id="breakdown">Breakdown</h3>
+
+The table below breaks down where each goroutine is spent its time during the
+traced period.
+All of the columns except total time are non-overlapping.
+<br>
+<br>
+
+<table class="details">
+<tr>
+<th> Goroutine </th>
+<th> Task </th>
+<th class="link" onclick="reloadTable('sortby', 'Total time')"> Total</th>
+<th></th>
+{{range $.NonOverlappingStats}}
+<th class="link" onclick="reloadTable('sortby', '{{.}}')" {{headerStyle .}}> {{.}}</th>
+{{end}}
+</tr>
+{{range .Regions}}
+	<tr>
+		<td> <a href="/trace?goid={{.Goroutine}}">{{.Goroutine}}</a> </td>
+		<td> {{if .TaskID}}<a href="/trace?focustask={{.TaskID}}">{{.TaskID}}</a>{{end}} </td>
+		<td> {{ .TotalTime.String }} </td>
+		<td>
+			<div class="stacked-bar-graph">
+			{{$Region := .}}
+			{{range $.NonOverlappingStats}}
+				{{$Time := index $Region.NonOverlappingStats .}}
+				{{if $Time}}
+					<span {{barStyle . $Time $.MaxTotal}}>&nbsp;</span>
+				{{end}}
+			{{end}}
+			</div>
+		</td>
+		{{$Region := .}}
+		{{range $.NonOverlappingStats}}
+			{{$Time := index $Region.NonOverlappingStats .}}
+			<td> {{$Time.String}}</td>
+		{{end}}
+	</tr>
+{{end}}
+</table>
+
+<h3 id="ranges">Special ranges</h3>
+
+The table below describes how much of the traced period each goroutine spent in
+certain special time ranges.
+If a goroutine has spent no time in any special time ranges, it is excluded from
+the table.
+For example, how much time it spent helping the GC. Note that these times do
+overlap with the times from the first table.
+In general the goroutine may not be executing in these special time ranges.
+For example, it may have blocked while trying to help the GC.
+This must be taken into account when interpreting the data.
+<br>
+<br>
+
+<table class="details">
+<tr>
+<th> Goroutine</th>
+<th> Task </th>
+<th> Total</th>
+{{range $.RangeStats}}
+<th {{headerStyle .}}> {{.}}</th>
+{{end}}
+</tr>
+{{range .Regions}}
+	{{if .HasRangeTime}}
+		<tr>
+			<td> <a href="/trace?goid={{.Goroutine}}">{{.Goroutine}}</a> </td>
+			<td> {{if .TaskID}}<a href="/trace?focustask={{.TaskID}}">{{.TaskID}}</a>{{end}} </td>
+			<td> {{ .TotalTime.String }} </td>
+			{{$Region := .}}
+			{{range $.RangeStats}}
+				{{$Time := index $Region.RangeTime .}}
+				<td> {{$Time.String}}</td>
+			{{end}}
+		</tr>
+	{{end}}
+{{end}}
+</table>
+`))
+
+// regionFilter represents a region filter specified by a user of cmd/trace.
+type regionFilter struct {
+	name   string
+	params url.Values
+	cond   []func(*parsedTrace, *trace.UserRegionSummary) bool
+}
+
+// match returns true if a region, described by its ID and summary, matches
+// the filter.
+func (f *regionFilter) match(t *parsedTrace, s *trace.UserRegionSummary) bool {
+	for _, c := range f.cond {
+		if !c(t, s) {
+			return false
+		}
+	}
+	return true
+}
+
+// newRegionFilter creates a new region filter from URL query variables.
+func newRegionFilter(r *http.Request) (*regionFilter, error) {
+	if err := r.ParseForm(); err != nil {
+		return nil, err
+	}
+
+	var name []string
+	var conditions []func(*parsedTrace, *trace.UserRegionSummary) bool
+	filterParams := make(url.Values)
+
+	param := r.Form
+	if typ, ok := param["type"]; ok && len(typ) > 0 {
+		name = append(name, fmt.Sprintf("%q", typ[0]))
+		conditions = append(conditions, func(_ *parsedTrace, r *trace.UserRegionSummary) bool {
+			return r.Name == typ[0]
+		})
+		filterParams.Add("type", typ[0])
+	}
+	if pc, err := strconv.ParseUint(r.FormValue("pc"), 16, 64); err == nil {
+		encPC := fmt.Sprintf("0x%x", pc)
+		name = append(name, "@ "+encPC)
+		conditions = append(conditions, func(_ *parsedTrace, r *trace.UserRegionSummary) bool {
+			return regionTopStackFrame(r).PC == pc
+		})
+		filterParams.Add("pc", encPC)
+	}
+
+	if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil {
+		name = append(name, fmt.Sprintf("(latency >= %s)", lat))
+		conditions = append(conditions, func(t *parsedTrace, r *trace.UserRegionSummary) bool {
+			return regionInterval(t, r).duration() >= lat
+		})
+		filterParams.Add("latmin", lat.String())
+	}
+	if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil {
+		name = append(name, fmt.Sprintf("(latency <= %s)", lat))
+		conditions = append(conditions, func(t *parsedTrace, r *trace.UserRegionSummary) bool {
+			return regionInterval(t, r).duration() <= lat
+		})
+		filterParams.Add("latmax", lat.String())
+	}
+
+	return &regionFilter{
+		name:   strings.Join(name, " "),
+		cond:   conditions,
+		params: filterParams,
+	}, nil
+}
+
+func regionInterval(t *parsedTrace, s *trace.UserRegionSummary) interval {
+	var i interval
+	if s.Start != nil {
+		i.start = s.Start.Time()
+	} else {
+		i.start = t.startTime()
+	}
+	if s.End != nil {
+		i.end = s.End.Time()
+	} else {
+		i.end = t.endTime()
+	}
+	return i
+}
diff --git a/src/cmd/trace/v2/tasks.go b/src/cmd/trace/v2/tasks.go
new file mode 100644
index 0000000..fb40811
--- /dev/null
+++ b/src/cmd/trace/v2/tasks.go
@@ -0,0 +1,477 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bytes"
+	"cmp"
+	"fmt"
+	"html/template"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"log"
+	"net/http"
+	"slices"
+	"strings"
+	"time"
+)
+
+// UserTasksHandlerFunc returns a HandlerFunc that reports all tasks found in the trace.
+func UserTasksHandlerFunc(t *parsedTrace) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		tasks := t.summary.Tasks
+
+		// Summarize groups of tasks with the same name.
+		summary := make(map[string]taskStats)
+		for _, task := range tasks {
+			stats, ok := summary[task.Name]
+			if !ok {
+				stats.Type = task.Name
+			}
+			stats.add(task)
+			summary[task.Name] = stats
+		}
+
+		// Sort tasks by type.
+		userTasks := make([]taskStats, 0, len(summary))
+		for _, stats := range summary {
+			userTasks = append(userTasks, stats)
+		}
+		slices.SortFunc(userTasks, func(a, b taskStats) int {
+			return cmp.Compare(a.Type, b.Type)
+		})
+
+		// Emit table.
+		err := templUserTaskTypes.Execute(w, userTasks)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
+			return
+		}
+	}
+}
+
+type taskStats struct {
+	Type      string
+	Count     int                       // Complete + incomplete tasks
+	Histogram traceviewer.TimeHistogram // Complete tasks only
+}
+
+func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string {
+	return func(min, max time.Duration) string {
+		return fmt.Sprintf("/usertask?type=%s&complete=%v&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), template.URLQueryEscaper(complete), template.URLQueryEscaper(min), template.URLQueryEscaper(max))
+	}
+}
+
+func (s *taskStats) add(task *trace.UserTaskSummary) {
+	s.Count++
+	if task.Complete() {
+		s.Histogram.Add(task.End.Time().Sub(task.Start.Time()))
+	}
+}
+
+var templUserTaskTypes = template.Must(template.New("").Parse(`
+<!DOCTYPE html>
+<title>Tasks</title>
+<style>` + traceviewer.CommonStyle + `
+.histoTime {
+  width: 20%;
+  white-space:nowrap;
+}
+th {
+  background-color: #050505;
+  color: #fff;
+}
+table {
+  border-collapse: collapse;
+}
+td,
+th {
+  padding-left: 8px;
+  padding-right: 8px;
+  padding-top: 4px;
+  padding-bottom: 4px;
+}
+</style>
+<body>
+Search log text: <form action="/usertask"><input name="logtext" type="text"><input type="submit"></form><br>
+<table border="1" sortable="1">
+<tr>
+<th>Task type</th>
+<th>Count</th>
+<th>Duration distribution (complete tasks)</th>
+</tr>
+{{range $}}
+  <tr>
+    <td>{{.Type}}</td>
+    <td><a href="/usertask?type={{.Type}}">{{.Count}}</a></td>
+    <td>{{.Histogram.ToHTML (.UserTaskURL true)}}</td>
+  </tr>
+{{end}}
+</table>
+</body>
+</html>
+`))
+
+// UserTaskHandlerFunc returns a HandlerFunc that presents the details of the selected tasks.
+func UserTaskHandlerFunc(t *parsedTrace) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		filter, err := newTaskFilter(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		type event struct {
+			WhenString string
+			Elapsed    time.Duration
+			Goroutine  tracev2.GoID
+			What       string
+			// TODO: include stack trace of creation time
+		}
+		type task struct {
+			WhenString string
+			ID         tracev2.TaskID
+			Duration   time.Duration
+			Complete   bool
+			Events     []event
+			Start, End time.Duration // Time since the beginning of the trace
+			GCTime     time.Duration
+		}
+		var tasks []task
+		for _, summary := range t.summary.Tasks {
+			if !filter.match(t, summary) {
+				continue
+			}
+
+			// Collect all the events for the task.
+			var rawEvents []*tracev2.Event
+			if summary.Start != nil {
+				rawEvents = append(rawEvents, summary.Start)
+			}
+			if summary.End != nil {
+				rawEvents = append(rawEvents, summary.End)
+			}
+			rawEvents = append(rawEvents, summary.Logs...)
+			for _, r := range summary.Regions {
+				if r.Start != nil {
+					rawEvents = append(rawEvents, r.Start)
+				}
+				if r.End != nil {
+					rawEvents = append(rawEvents, r.End)
+				}
+			}
+
+			// Sort them.
+			slices.SortStableFunc(rawEvents, func(a, b *tracev2.Event) int {
+				return cmp.Compare(a.Time(), b.Time())
+			})
+
+			// Summarize them.
+			var events []event
+			last := t.startTime()
+			for _, ev := range rawEvents {
+				what := describeEvent(ev)
+				if what == "" {
+					continue
+				}
+				sinceStart := ev.Time().Sub(t.startTime())
+				events = append(events, event{
+					WhenString: fmt.Sprintf("%2.9f", sinceStart.Seconds()),
+					Elapsed:    ev.Time().Sub(last),
+					What:       what,
+					Goroutine:  primaryGoroutine(ev),
+				})
+				last = ev.Time()
+			}
+			taskSpan := taskInterval(t, summary)
+			taskStart := taskSpan.start.Sub(t.startTime())
+
+			// Produce the task summary.
+			tasks = append(tasks, task{
+				WhenString: fmt.Sprintf("%2.9fs", taskStart.Seconds()),
+				Duration:   taskSpan.duration(),
+				ID:         summary.ID,
+				Complete:   summary.Complete(),
+				Events:     events,
+				Start:      taskStart,
+				End:        taskStart + taskSpan.duration(),
+			})
+		}
+		// Sort the tasks by duration.
+		slices.SortFunc(tasks, func(a, b task) int {
+			return cmp.Compare(a.Duration, b.Duration)
+		})
+
+		// Emit table.
+		err = templUserTaskType.Execute(w, struct {
+			Name  string
+			Tasks []task
+		}{
+			Name:  filter.name,
+			Tasks: tasks,
+		})
+		if err != nil {
+			log.Printf("failed to execute template: %v", err)
+			http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
+			return
+		}
+	}
+}
+
+var templUserTaskType = template.Must(template.New("userTask").Funcs(template.FuncMap{
+	"elapsed":       elapsed,
+	"asMillisecond": asMillisecond,
+	"trimSpace":     strings.TrimSpace,
+}).Parse(`
+<!DOCTYPE html>
+<title>Tasks: {{.Name}}</title>
+<style>` + traceviewer.CommonStyle + `
+body {
+  font-family: sans-serif;
+}
+table#req-status td.family {
+  padding-right: 2em;
+}
+table#req-status td.active {
+  padding-right: 1em;
+}
+table#req-status td.empty {
+  color: #aaa;
+}
+table#reqs {
+  margin-top: 1em;
+  border-collapse: collapse;
+}
+table#reqs tr.first {
+  font-weight: bold;
+}
+table#reqs td {
+  font-family: monospace;
+}
+table#reqs td.when {
+  text-align: right;
+  white-space: nowrap;
+}
+table#reqs td.elapsed {
+  padding: 0 0.5em;
+  text-align: right;
+  white-space: pre;
+  width: 10em;
+}
+address {
+  font-size: smaller;
+  margin-top: 5em;
+}
+</style>
+<body>
+
+<h2>User Task: {{.Name}}</h2>
+
+Search log text: <form onsubmit="window.location.search+='&logtext='+window.logtextinput.value; return false">
+<input name="logtext" id="logtextinput" type="text"><input type="submit">
+</form><br>
+
+<table id="reqs">
+	<tr>
+		<th>When</th>
+		<th>Elapsed</th>
+		<th>Goroutine</th>
+		<th>Events</th>
+	</tr>
+	{{range $el := $.Tasks}}
+	<tr class="first">
+		<td class="when">{{$el.WhenString}}</td>
+		<td class="elapsed">{{$el.Duration}}</td>
+		<td></td>
+		<td>
+			<a href="/trace?focustask={{$el.ID}}#{{asMillisecond $el.Start}}:{{asMillisecond $el.End}}">Task {{$el.ID}}</a>
+			<a href="/trace?taskid={{$el.ID}}#{{asMillisecond $el.Start}}:{{asMillisecond $el.End}}">(goroutine view)</a>
+			({{if .Complete}}complete{{else}}incomplete{{end}})
+		</td>
+	</tr>
+	{{range $el.Events}}
+	<tr>
+		<td class="when">{{.WhenString}}</td>
+		<td class="elapsed">{{elapsed .Elapsed}}</td>
+		<td class="goid">{{.Goroutine}}</td>
+		<td>{{.What}}</td>
+	</tr>
+	{{end}}
+    {{end}}
+</body>
+</html>
+`))
+
+// taskFilter represents a task filter specified by a user of cmd/trace.
+type taskFilter struct {
+	name string
+	cond []func(*parsedTrace, *trace.UserTaskSummary) bool
+}
+
+// match returns true if a task, described by its ID and summary, matches
+// the filter.
+func (f *taskFilter) match(t *parsedTrace, task *trace.UserTaskSummary) bool {
+	if t == nil {
+		return false
+	}
+	for _, c := range f.cond {
+		if !c(t, task) {
+			return false
+		}
+	}
+	return true
+}
+
+// newTaskFilter creates a new task filter from URL query variables.
+func newTaskFilter(r *http.Request) (*taskFilter, error) {
+	if err := r.ParseForm(); err != nil {
+		return nil, err
+	}
+
+	var name []string
+	var conditions []func(*parsedTrace, *trace.UserTaskSummary) bool
+
+	param := r.Form
+	if typ, ok := param["type"]; ok && len(typ) > 0 {
+		name = append(name, fmt.Sprintf("%q", typ[0]))
+		conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool {
+			return task.Name == typ[0]
+		})
+	}
+	if complete := r.FormValue("complete"); complete == "1" {
+		name = append(name, "complete")
+		conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool {
+			return task.Complete()
+		})
+	} else if complete == "0" {
+		name = append(name, "incomplete")
+		conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool {
+			return !task.Complete()
+		})
+	}
+	if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil {
+		name = append(name, fmt.Sprintf("latency >= %s", lat))
+		conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool {
+			return task.Complete() && taskInterval(t, task).duration() >= lat
+		})
+	}
+	if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil {
+		name = append(name, fmt.Sprintf("latency <= %s", lat))
+		conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool {
+			return task.Complete() && taskInterval(t, task).duration() <= lat
+		})
+	}
+	if text := r.FormValue("logtext"); text != "" {
+		name = append(name, fmt.Sprintf("log contains %q", text))
+		conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool {
+			return taskMatches(task, text)
+		})
+	}
+
+	return &taskFilter{name: strings.Join(name, ","), cond: conditions}, nil
+}
+
+func taskInterval(t *parsedTrace, s *trace.UserTaskSummary) interval {
+	var i interval
+	if s.Start != nil {
+		i.start = s.Start.Time()
+	} else {
+		i.start = t.startTime()
+	}
+	if s.End != nil {
+		i.end = s.End.Time()
+	} else {
+		i.end = t.endTime()
+	}
+	return i
+}
+
+func taskMatches(t *trace.UserTaskSummary, text string) bool {
+	matches := func(s string) bool {
+		return strings.Contains(s, text)
+	}
+	if matches(t.Name) {
+		return true
+	}
+	for _, r := range t.Regions {
+		if matches(r.Name) {
+			return true
+		}
+	}
+	for _, ev := range t.Logs {
+		log := ev.Log()
+		if matches(log.Category) {
+			return true
+		}
+		if matches(log.Message) {
+			return true
+		}
+	}
+	return false
+}
+
+func describeEvent(ev *tracev2.Event) string {
+	switch ev.Kind() {
+	case tracev2.EventStateTransition:
+		st := ev.StateTransition()
+		if st.Resource.Kind != tracev2.ResourceGoroutine {
+			return ""
+		}
+		old, new := st.Goroutine()
+		return fmt.Sprintf("%s -> %s", old, new)
+	case tracev2.EventRegionBegin:
+		return fmt.Sprintf("region %q begin", ev.Region().Type)
+	case tracev2.EventRegionEnd:
+		return fmt.Sprintf("region %q end", ev.Region().Type)
+	case tracev2.EventTaskBegin:
+		t := ev.Task()
+		return fmt.Sprintf("task %q (D %d, parent %d) begin", t.Type, t.ID, t.Parent)
+	case tracev2.EventTaskEnd:
+		return "task end"
+	case tracev2.EventLog:
+		log := ev.Log()
+		if log.Category != "" {
+			return fmt.Sprintf("log %q", log.Message)
+		}
+		return fmt.Sprintf("log (category: %s): %q", log.Category, log.Message)
+	}
+	return ""
+}
+
+func primaryGoroutine(ev *tracev2.Event) tracev2.GoID {
+	if ev.Kind() != tracev2.EventStateTransition {
+		return ev.Goroutine()
+	}
+	st := ev.StateTransition()
+	if st.Resource.Kind != tracev2.ResourceGoroutine {
+		return tracev2.NoGoroutine
+	}
+	return st.Resource.Goroutine()
+}
+
+func elapsed(d time.Duration) string {
+	b := fmt.Appendf(nil, "%.9f", d.Seconds())
+
+	// For subsecond durations, blank all zeros before decimal point,
+	// and all zeros between the decimal point and the first non-zero digit.
+	if d < time.Second {
+		dot := bytes.IndexByte(b, '.')
+		for i := 0; i < dot; i++ {
+			b[i] = ' '
+		}
+		for i := dot + 1; i < len(b); i++ {
+			if b[i] == '0' {
+				b[i] = ' '
+			} else {
+				break
+			}
+		}
+	}
+	return string(b)
+}
+
+func asMillisecond(d time.Duration) float64 {
+	return float64(d.Nanoseconds()) / float64(time.Millisecond)
+}
diff --git a/src/cmd/trace/v2/testdata/generate.go b/src/cmd/trace/v2/testdata/generate.go
new file mode 100644
index 0000000..c0658b2
--- /dev/null
+++ b/src/cmd/trace/v2/testdata/generate.go
@@ -0,0 +1,6 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mktests.go
+package testdata
diff --git a/src/cmd/trace/v2/testdata/go122.test b/src/cmd/trace/v2/testdata/go122.test
new file mode 100644
index 0000000..2ec9e88
--- /dev/null
+++ b/src/cmd/trace/v2/testdata/go122.test
@@ -0,0 +1,4639 @@
+Trace Go1.22
+EventBatch gen=1 m=18446744073709551615 time=7689672466239 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=1709048 time=7689670869319 size=423
+ProcStart dt=409 p=7 p_seq=1
+GoStart dt=31 g=34 g_seq=1
+GoStop dt=291990 reason_string=16 stack=50
+GoStart dt=21 g=34 g_seq=2
+GoStop dt=315853 reason_string=16 stack=50
+GoStart dt=30 g=34 g_seq=3
+GoUnblock dt=173432 g=1 g_seq=73 stack=52
+GoDestroy dt=96
+GoStart dt=22 g=1 g_seq=74
+HeapAlloc dt=79 heapalloc_value=26397576
+HeapAlloc dt=51 heapalloc_value=26405640
+GoCreate dt=62 new_g=50 new_stack=53 stack=54
+GoBlock dt=23 reason_string=12 stack=55
+GoStart dt=7 g=50 g_seq=1
+HeapAlloc dt=301 heapalloc_value=26413776
+HeapAlloc dt=30 heapalloc_value=26421680
+GoSyscallBegin dt=35 p_seq=2 stack=56
+GoSyscallEnd dt=39
+GoSyscallBegin dt=13 p_seq=3 stack=57
+GoSyscallEnd dt=16
+GoSyscallBegin dt=396 p_seq=4 stack=58
+GoSyscallEnd dt=16
+GoSyscallBegin dt=15 p_seq=5 stack=59
+GoSyscallEnd dt=14
+HeapAlloc dt=305 heapalloc_value=26429872
+HeapAlloc dt=34 heapalloc_value=26437248
+HeapAlloc dt=42 heapalloc_value=26445120
+GoSyscallBegin dt=42 p_seq=6 stack=60
+GoSyscallEnd dt=18
+GoSyscallBegin dt=10 p_seq=7 stack=61
+GoSyscallEnd dt=14
+GoSyscallBegin dt=23 p_seq=8 stack=62
+ProcStart dt=787251 p=7 p_seq=15
+GoSyscallEndBlocked dt=7
+GoStart dt=1 g=50 g_seq=2
+GoUnblock dt=48 g=1 g_seq=75 stack=65
+GoDestroy dt=143
+GoStart dt=30 g=1 g_seq=76
+HeapAlloc dt=621 heapalloc_value=26468232
+GoStop dt=656 reason_string=16 stack=66
+GoStart dt=103 g=1 g_seq=77
+HeapAlloc dt=42 heapalloc_value=26476424
+HeapAlloc dt=87 heapalloc_value=26484360
+GoSyscallBegin dt=18 p_seq=16 stack=67
+GoSyscallEnd dt=456
+GoSyscallBegin dt=41 p_seq=17 stack=68
+GoSyscallEnd dt=25
+GoSyscallBegin dt=16 p_seq=18 stack=69
+GoSyscallEnd dt=18
+HeapAlloc dt=193 heapalloc_value=26549896
+GoSyscallBegin dt=69 p_seq=19 stack=70
+GoSyscallEnd dt=227
+GoSyscallBegin dt=12 p_seq=20 stack=70
+GoSyscallEnd dt=105
+GoSyscallBegin dt=87 p_seq=21 stack=71
+GoSyscallEnd dt=48
+GoSyscallBegin dt=37 p_seq=22 stack=72
+GoSyscallEnd dt=51
+GoSyscallBegin dt=49 p_seq=23 stack=73
+GoSyscallEnd dt=158
+GoSyscallBegin dt=12 p_seq=24 stack=74
+GoSyscallEnd dt=67
+HeapAlloc dt=126 heapalloc_value=26558088
+HeapAlloc dt=30 heapalloc_value=26566160
+GoCreate dt=34 new_g=52 new_stack=75 stack=76
+HeapAlloc dt=205 heapalloc_value=26573872
+GoSyscallBegin dt=890 p_seq=25 stack=77
+GoSyscallEnd dt=1128
+GoBlock dt=96 reason_string=7 stack=80
+ProcStop dt=29
+ProcStart dt=384 p=6 p_seq=7
+GoStart dt=14 g=52 g_seq=4
+GoSyscallBegin dt=16 p_seq=8 stack=78
+ProcStart dt=160 p=5 p_seq=13
+GoSyscallEndBlocked dt=3
+GoStart dt=1 g=52 g_seq=5
+HeapAlloc dt=297 heapalloc_value=26581840
+HeapAlloc dt=31 heapalloc_value=26590032
+HeapAlloc dt=164 heapalloc_value=26598224
+GoSyscallBegin dt=34 p_seq=14 stack=88
+GoSyscallEnd dt=33
+GoSyscallBegin dt=14 p_seq=15 stack=89
+GoSyscallEnd dt=36
+GoSyscallBegin dt=12 p_seq=16 stack=90
+GoSyscallEnd dt=22
+GoSyscallBegin dt=15 p_seq=17 stack=91
+GoSyscallEnd dt=28
+HeapAlloc dt=18 heapalloc_value=26606416
+HeapAlloc dt=20 heapalloc_value=26614608
+GoBlock dt=16 reason_string=19 stack=92
+ProcStop dt=136
+ProcStart dt=17788 p=6 p_seq=12
+GoUnblock dt=41 g=1 g_seq=80 stack=0
+GoStart dt=136 g=1 g_seq=81
+GoSyscallBegin dt=14 p_seq=13 stack=86
+GoSyscallEnd dt=65
+GoSyscallBegin dt=72 p_seq=14 stack=95
+GoSyscallEnd dt=534
+HeapAlloc dt=284 heapalloc_value=26630992
+HeapAlloc dt=38 heapalloc_value=26639120
+EventBatch gen=1 m=1709047 time=7689670866279 size=202
+ProcStart dt=437 p=6 p_seq=2
+HeapAlloc dt=131 heapalloc_value=26373928
+HeapAlloc dt=368 heapalloc_value=26382120
+HeapAlloc dt=55 heapalloc_value=26390056
+GoStart dt=1030 g=36 g_seq=1
+GoStop dt=293329 reason_string=16 stack=50
+GoStart dt=25 g=36 g_seq=2
+GoStop dt=315834 reason_string=16 stack=50
+GoStart dt=24 g=36 g_seq=3
+GoDestroy dt=172079
+ProcStop dt=60
+ProcStart dt=1749 p=6 p_seq=3
+ProcStop dt=1621
+ProcStart dt=64901 p=5 p_seq=4
+ProcStop dt=24
+ProcStart dt=722061 p=5 p_seq=5
+ProcStop dt=31
+ProcStart dt=2847 p=5 p_seq=8
+ProcStop dt=20
+ProcStart dt=3166 p=7 p_seq=26
+GoUnblock dt=6 g=52 g_seq=3 stack=0
+GoUnblock dt=90 g=1 g_seq=78 stack=0
+GoStart dt=5 g=1 g_seq=79
+GoSyscallBegin dt=31 p_seq=27 stack=81
+GoSyscallEnd dt=35
+GoSyscallBegin dt=134 p_seq=28 stack=82
+GoSyscallEnd dt=29
+GoSyscallBegin dt=17 p_seq=29 stack=83
+GoSyscallEnd dt=30
+GoSyscallBegin dt=8 p_seq=30 stack=84
+GoSyscallEnd dt=19
+GoSyscallBegin dt=11 p_seq=31 stack=85
+GoSyscallEnd dt=24
+GoSyscallBegin dt=65 p_seq=32 stack=86
+GoSyscallEnd dt=57
+GoBlock dt=19 reason_string=7 stack=87
+ProcStop dt=38
+ProcStart dt=458 p=6 p_seq=11
+ProcStop dt=30
+ProcStart dt=377 p=5 p_seq=18
+ProcStop dt=23
+ProcStart dt=17141 p=5 p_seq=19
+GoUnblock dt=19 g=52 g_seq=6 stack=0
+GoStart dt=111 g=52 g_seq=7
+HeapAlloc dt=38 heapalloc_value=26622800
+GoSyscallBegin dt=36 p_seq=20 stack=93
+GoSyscallEnd dt=554
+GoSyscallBegin dt=83 p_seq=21 stack=94
+GoSyscallEnd dt=196
+GoDestroy dt=15
+ProcStop dt=37
+EventBatch gen=1 m=1709046 time=7689670697530 size=167
+ProcStart dt=236 p=5 p_seq=1
+ProcStop dt=281
+ProcStart dt=1683 p=2 p_seq=14
+ProcStop dt=33
+ProcStart dt=147800 p=2 p_seq=16
+ProcStop dt=29
+ProcStart dt=3880 p=1 p_seq=28
+ProcStop dt=30
+ProcStart dt=801175 p=5 p_seq=3
+ProcStop dt=19
+ProcStart dt=47961 p=6 p_seq=4
+ProcStop dt=15
+ProcStart dt=16716 p=6 p_seq=5
+GoUnblock dt=60 g=6 g_seq=2 stack=0
+GoStart dt=90 g=6 g_seq=3
+HeapAlloc dt=193 heapalloc_value=26453304
+GoBlock dt=29 reason_string=12 stack=15
+ProcStop dt=12
+ProcStart dt=704555 p=7 p_seq=10
+ProcStop dt=25
+ProcStart dt=16755 p=7 p_seq=11
+HeapAlloc dt=61 heapalloc_value=26461496
+GoCreate dt=72 new_g=51 new_stack=63 stack=0
+GoStart dt=98 g=51 g_seq=1
+GoSyscallBegin dt=45 p_seq=12 stack=64
+ProcStart dt=206 p=7 p_seq=14
+GoSyscallEndBlocked dt=3
+GoStart dt=1 g=51 g_seq=2
+GoDestroy dt=12
+ProcStop dt=18
+ProcStart dt=849 p=5 p_seq=6
+ProcStop dt=16
+ProcStart dt=1359 p=5 p_seq=7
+ProcStop dt=12
+ProcStart dt=2079 p=5 p_seq=9
+GoStart dt=1134 g=52 g_seq=1
+GoSyscallBegin dt=39 p_seq=10 stack=78
+ProcStart dt=232 p=5 p_seq=12
+GoSyscallEndBlocked dt=2
+GoStart dt=1 g=52 g_seq=2
+GoBlock dt=27 reason_string=7 stack=79
+ProcStop dt=20
+EventBatch gen=1 m=1709045 time=7689670544102 size=3297
+ProcStart dt=84 p=4 p_seq=5
+GoUnblock dt=91 g=1 g_seq=34 stack=0
+GoStart dt=157 g=1 g_seq=35
+HeapAlloc dt=117 heapalloc_value=8105520
+HeapAlloc dt=67 heapalloc_value=8113712
+HeapAlloc dt=36 heapalloc_value=8121904
+HeapAlloc dt=25 heapalloc_value=8130096
+HeapAlloc dt=25 heapalloc_value=8138288
+HeapAlloc dt=25 heapalloc_value=8146480
+HeapAlloc dt=21 heapalloc_value=8154672
+HeapAlloc dt=26 heapalloc_value=8162864
+HeapAlloc dt=18 heapalloc_value=8171056
+HeapAlloc dt=24 heapalloc_value=8179248
+HeapAlloc dt=15 heapalloc_value=8187440
+HeapAlloc dt=133 heapalloc_value=8195632
+HeapAlloc dt=105 heapalloc_value=8203824
+HeapAlloc dt=20 heapalloc_value=8212016
+HeapAlloc dt=18 heapalloc_value=8220208
+HeapAlloc dt=8 heapalloc_value=8228400
+HeapAlloc dt=8 heapalloc_value=8236592
+HeapAlloc dt=9 heapalloc_value=8244784
+GCMarkAssistBegin dt=27 stack=31
+HeapAlloc dt=69 heapalloc_value=8252784
+GoBlock dt=31 reason_string=10 stack=36
+ProcStop dt=156
+ProcStart dt=993 p=0 p_seq=11
+GoStart dt=192 g=1 g_seq=37
+GCMarkAssistEnd dt=12
+HeapAlloc dt=35 heapalloc_value=8746312
+GCSweepBegin dt=26 stack=42
+GCSweepEnd dt=777 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=22 heapalloc_value=8754504
+GCSweepBegin dt=47 stack=42
+GCSweepEnd dt=662 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=11 heapalloc_value=8762696
+GCSweepBegin dt=25 stack=42
+GCSweepEnd dt=712 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=39 heapalloc_value=8770888
+GCSweepBegin dt=27 stack=42
+GCSweepEnd dt=630 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=9 heapalloc_value=8779080
+GCSweepBegin dt=25 stack=42
+GCSweepEnd dt=1256 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=8 heapalloc_value=8787272
+GCSweepBegin dt=40 stack=42
+GCSweepEnd dt=529 swept_value=360448 reclaimed_value=0
+HeapAlloc dt=9 heapalloc_value=8795464
+HeapAlloc dt=24 heapalloc_value=8803656
+HeapAlloc dt=24 heapalloc_value=8811848
+HeapAlloc dt=25 heapalloc_value=8820040
+HeapAlloc dt=23 heapalloc_value=8828232
+HeapAlloc dt=18 heapalloc_value=8836424
+HeapAlloc dt=95 heapalloc_value=8844616
+HeapAlloc dt=25 heapalloc_value=8852808
+HeapAlloc dt=23 heapalloc_value=8861000
+HeapAlloc dt=19 heapalloc_value=8869192
+HeapAlloc dt=93 heapalloc_value=8877384
+HeapAlloc dt=23 heapalloc_value=8885576
+HeapAlloc dt=23 heapalloc_value=8893768
+HeapAlloc dt=23 heapalloc_value=8901960
+HeapAlloc dt=22 heapalloc_value=8910152
+HeapAlloc dt=18 heapalloc_value=8918344
+HeapAlloc dt=174 heapalloc_value=8926536
+HeapAlloc dt=31 heapalloc_value=8934728
+HeapAlloc dt=38 heapalloc_value=8942920
+HeapAlloc dt=31 heapalloc_value=8951112
+HeapAlloc dt=57 heapalloc_value=8959304
+HeapAlloc dt=58 heapalloc_value=8967496
+HeapAlloc dt=60 heapalloc_value=8975688
+HeapAlloc dt=44 heapalloc_value=8983880
+HeapAlloc dt=53 heapalloc_value=8992072
+HeapAlloc dt=57 heapalloc_value=9000264
+HeapAlloc dt=63 heapalloc_value=9008456
+HeapAlloc dt=55 heapalloc_value=9016648
+HeapAlloc dt=28 heapalloc_value=9024840
+HeapAlloc dt=12 heapalloc_value=9033032
+HeapAlloc dt=9 heapalloc_value=9041224
+HeapAlloc dt=8 heapalloc_value=9049416
+HeapAlloc dt=7 heapalloc_value=9057608
+HeapAlloc dt=8 heapalloc_value=9065800
+HeapAlloc dt=14 heapalloc_value=9073992
+HeapAlloc dt=8 heapalloc_value=9082184
+HeapAlloc dt=45 heapalloc_value=9090376
+HeapAlloc dt=10 heapalloc_value=9098568
+HeapAlloc dt=14 heapalloc_value=9106760
+HeapAlloc dt=8 heapalloc_value=9114952
+HeapAlloc dt=10 heapalloc_value=9123144
+HeapAlloc dt=15 heapalloc_value=9131336
+HeapAlloc dt=53 heapalloc_value=9139528
+HeapAlloc dt=27 heapalloc_value=9147720
+HeapAlloc dt=38 heapalloc_value=9155912
+HeapAlloc dt=33 heapalloc_value=9164104
+HeapAlloc dt=33 heapalloc_value=9172296
+HeapAlloc dt=34 heapalloc_value=9180488
+HeapAlloc dt=36 heapalloc_value=9188680
+HeapAlloc dt=39 heapalloc_value=9196872
+HeapAlloc dt=40 heapalloc_value=9205064
+HeapAlloc dt=59 heapalloc_value=9213256
+HeapAlloc dt=28 heapalloc_value=9221448
+HeapAlloc dt=22 heapalloc_value=9229640
+HeapAlloc dt=20 heapalloc_value=9237832
+HeapAlloc dt=25 heapalloc_value=9246024
+HeapAlloc dt=20 heapalloc_value=9254216
+HeapAlloc dt=16 heapalloc_value=9262408
+HeapAlloc dt=14 heapalloc_value=9270600
+HeapAlloc dt=18 heapalloc_value=9278792
+HeapAlloc dt=32 heapalloc_value=9286984
+HeapAlloc dt=21 heapalloc_value=9295176
+HeapAlloc dt=49 heapalloc_value=9303368
+HeapAlloc dt=23 heapalloc_value=9311560
+HeapAlloc dt=16 heapalloc_value=9319752
+HeapAlloc dt=15 heapalloc_value=9327944
+HeapAlloc dt=13 heapalloc_value=9336136
+HeapAlloc dt=15 heapalloc_value=9344328
+HeapAlloc dt=14 heapalloc_value=9352520
+HeapAlloc dt=16 heapalloc_value=9360712
+HeapAlloc dt=14 heapalloc_value=9368904
+HeapAlloc dt=19 heapalloc_value=9377096
+HeapAlloc dt=16 heapalloc_value=9385288
+HeapAlloc dt=15 heapalloc_value=9393480
+HeapAlloc dt=14 heapalloc_value=9401672
+HeapAlloc dt=16 heapalloc_value=9409864
+HeapAlloc dt=15 heapalloc_value=9418056
+HeapAlloc dt=15 heapalloc_value=9426248
+HeapAlloc dt=15 heapalloc_value=9434440
+HeapAlloc dt=18 heapalloc_value=9442632
+HeapAlloc dt=94 heapalloc_value=9450824
+HeapAlloc dt=17 heapalloc_value=9459016
+HeapAlloc dt=14 heapalloc_value=9467208
+HeapAlloc dt=16 heapalloc_value=9475400
+HeapAlloc dt=15 heapalloc_value=9483592
+HeapAlloc dt=15 heapalloc_value=9491784
+HeapAlloc dt=15 heapalloc_value=9499976
+HeapAlloc dt=49 heapalloc_value=9508168
+HeapAlloc dt=16 heapalloc_value=9516360
+HeapAlloc dt=14 heapalloc_value=9524552
+HeapAlloc dt=15 heapalloc_value=9532744
+HeapAlloc dt=15 heapalloc_value=9540936
+HeapAlloc dt=15 heapalloc_value=9549128
+HeapAlloc dt=17 heapalloc_value=9557320
+HeapAlloc dt=15 heapalloc_value=9565512
+HeapAlloc dt=21 heapalloc_value=9573704
+HeapAlloc dt=15 heapalloc_value=9581896
+HeapAlloc dt=16 heapalloc_value=9590088
+HeapAlloc dt=14 heapalloc_value=9598280
+HeapAlloc dt=16 heapalloc_value=9606472
+HeapAlloc dt=14 heapalloc_value=9614664
+HeapAlloc dt=16 heapalloc_value=9622856
+GoBlock dt=21 reason_string=19 stack=21
+ProcStop dt=157
+ProcStart dt=17320 p=2 p_seq=6
+ProcStop dt=15
+ProcStart dt=2411 p=0 p_seq=14
+ProcStop dt=8
+ProcStart dt=16766 p=0 p_seq=15
+GoUnblock dt=9 g=1 g_seq=40 stack=0
+GoStart dt=91 g=1 g_seq=41
+HeapAlloc dt=19 heapalloc_value=10859848
+HeapAlloc dt=9 heapalloc_value=10868040
+HeapAlloc dt=7 heapalloc_value=10876232
+HeapAlloc dt=6 heapalloc_value=10884424
+HeapAlloc dt=6 heapalloc_value=10892616
+HeapAlloc dt=6 heapalloc_value=10900808
+HeapAlloc dt=6 heapalloc_value=10909000
+HeapAlloc dt=6 heapalloc_value=10917192
+HeapAlloc dt=6 heapalloc_value=10925384
+HeapAlloc dt=6 heapalloc_value=10933576
+HeapAlloc dt=6 heapalloc_value=10941768
+HeapAlloc dt=6 heapalloc_value=10949960
+HeapAlloc dt=6 heapalloc_value=10958152
+HeapAlloc dt=5 heapalloc_value=10966344
+HeapAlloc dt=6 heapalloc_value=10974536
+HeapAlloc dt=6 heapalloc_value=10982728
+HeapAlloc dt=6 heapalloc_value=10990920
+HeapAlloc dt=6 heapalloc_value=10999112
+HeapAlloc dt=6 heapalloc_value=11007304
+HeapAlloc dt=5 heapalloc_value=11015496
+HeapAlloc dt=7 heapalloc_value=11023688
+HeapAlloc dt=6 heapalloc_value=11031880
+HeapAlloc dt=14 heapalloc_value=11040072
+HeapAlloc dt=7 heapalloc_value=11048264
+HeapAlloc dt=6 heapalloc_value=11056456
+HeapAlloc dt=6 heapalloc_value=11064648
+HeapAlloc dt=5 heapalloc_value=11072840
+HeapAlloc dt=6 heapalloc_value=11081032
+HeapAlloc dt=6 heapalloc_value=11089224
+HeapAlloc dt=6 heapalloc_value=11097416
+HeapAlloc dt=6 heapalloc_value=11105608
+HeapAlloc dt=6 heapalloc_value=11113800
+HeapAlloc dt=59 heapalloc_value=11121992
+HeapAlloc dt=9 heapalloc_value=11130184
+HeapAlloc dt=7 heapalloc_value=11138376
+HeapAlloc dt=6 heapalloc_value=11146568
+HeapAlloc dt=6 heapalloc_value=11154760
+HeapAlloc dt=5 heapalloc_value=11162952
+HeapAlloc dt=6 heapalloc_value=11171144
+HeapAlloc dt=6 heapalloc_value=11179336
+HeapAlloc dt=6 heapalloc_value=11187528
+HeapAlloc dt=5 heapalloc_value=11195720
+HeapAlloc dt=6 heapalloc_value=11203912
+HeapAlloc dt=6 heapalloc_value=11212104
+HeapAlloc dt=84 heapalloc_value=11220296
+HeapAlloc dt=7 heapalloc_value=11228488
+HeapAlloc dt=6 heapalloc_value=11236680
+HeapAlloc dt=6 heapalloc_value=11244872
+HeapAlloc dt=5 heapalloc_value=11253064
+HeapAlloc dt=6 heapalloc_value=11261256
+HeapAlloc dt=6 heapalloc_value=11269448
+HeapAlloc dt=6 heapalloc_value=11277640
+HeapAlloc dt=5 heapalloc_value=11285832
+HeapAlloc dt=6 heapalloc_value=11294024
+HeapAlloc dt=6 heapalloc_value=11302216
+HeapAlloc dt=5 heapalloc_value=11310408
+HeapAlloc dt=6 heapalloc_value=11318600
+HeapAlloc dt=38 heapalloc_value=11326792
+HeapAlloc dt=7 heapalloc_value=11334984
+HeapAlloc dt=6 heapalloc_value=11343176
+HeapAlloc dt=6 heapalloc_value=11351368
+HeapAlloc dt=5 heapalloc_value=11359560
+HeapAlloc dt=6 heapalloc_value=11367752
+HeapAlloc dt=6 heapalloc_value=11375944
+HeapAlloc dt=6 heapalloc_value=11384136
+HeapAlloc dt=6 heapalloc_value=11392328
+HeapAlloc dt=5 heapalloc_value=11400520
+HeapAlloc dt=6 heapalloc_value=11408712
+HeapAlloc dt=6 heapalloc_value=11416904
+HeapAlloc dt=5 heapalloc_value=11425096
+HeapAlloc dt=6 heapalloc_value=11433288
+HeapAlloc dt=6 heapalloc_value=11441480
+HeapAlloc dt=6 heapalloc_value=11449672
+HeapAlloc dt=5 heapalloc_value=11457864
+HeapAlloc dt=6 heapalloc_value=11466056
+HeapAlloc dt=79 heapalloc_value=11474248
+HeapAlloc dt=6 heapalloc_value=11482440
+HeapAlloc dt=5 heapalloc_value=11490632
+HeapAlloc dt=6 heapalloc_value=11498824
+HeapAlloc dt=6 heapalloc_value=11507016
+HeapAlloc dt=6 heapalloc_value=11515208
+HeapAlloc dt=5 heapalloc_value=11523400
+HeapAlloc dt=6 heapalloc_value=11531592
+HeapAlloc dt=5 heapalloc_value=11539784
+HeapAlloc dt=6 heapalloc_value=11547976
+HeapAlloc dt=6 heapalloc_value=11556168
+HeapAlloc dt=10 heapalloc_value=11564360
+HeapAlloc dt=6 heapalloc_value=11572552
+HeapAlloc dt=24 heapalloc_value=11580744
+HeapAlloc dt=7 heapalloc_value=11588936
+HeapAlloc dt=5 heapalloc_value=11597128
+HeapAlloc dt=6 heapalloc_value=11605320
+HeapAlloc dt=6 heapalloc_value=11613512
+HeapAlloc dt=6 heapalloc_value=11621704
+HeapAlloc dt=5 heapalloc_value=11629896
+HeapAlloc dt=6 heapalloc_value=11638088
+HeapAlloc dt=6 heapalloc_value=11646280
+HeapAlloc dt=5 heapalloc_value=11654472
+HeapAlloc dt=6 heapalloc_value=11662664
+HeapAlloc dt=6 heapalloc_value=11670856
+HeapAlloc dt=6 heapalloc_value=11679048
+HeapAlloc dt=5 heapalloc_value=11687240
+HeapAlloc dt=6 heapalloc_value=11695432
+HeapAlloc dt=6 heapalloc_value=11703624
+HeapAlloc dt=6 heapalloc_value=11711816
+HeapAlloc dt=5 heapalloc_value=11720008
+HeapAlloc dt=6 heapalloc_value=11728200
+HeapAlloc dt=6 heapalloc_value=11736392
+HeapAlloc dt=70 heapalloc_value=11744584
+HeapAlloc dt=8 heapalloc_value=11752776
+HeapAlloc dt=5 heapalloc_value=11760968
+HeapAlloc dt=6 heapalloc_value=11769160
+HeapAlloc dt=5 heapalloc_value=11777352
+HeapAlloc dt=6 heapalloc_value=11785544
+HeapAlloc dt=6 heapalloc_value=11793736
+HeapAlloc dt=6 heapalloc_value=11801928
+HeapAlloc dt=5 heapalloc_value=11810120
+HeapAlloc dt=6 heapalloc_value=11818312
+HeapAlloc dt=6 heapalloc_value=11826504
+HeapAlloc dt=6 heapalloc_value=11834696
+HeapAlloc dt=6 heapalloc_value=11842888
+HeapAlloc dt=5 heapalloc_value=11851080
+HeapAlloc dt=6 heapalloc_value=11859272
+HeapAlloc dt=5 heapalloc_value=11867464
+HeapAlloc dt=6 heapalloc_value=11875656
+GoBlock dt=9 reason_string=19 stack=21
+ProcStop dt=105
+ProcStart dt=17283 p=2 p_seq=8
+ProcStop dt=12
+ProcStart dt=4008 p=0 p_seq=18
+ProcStop dt=9
+ProcStart dt=16692 p=0 p_seq=19
+GoUnblock dt=9 g=1 g_seq=44 stack=0
+GoStart dt=76 g=1 g_seq=45
+HeapAlloc dt=16 heapalloc_value=13169992
+HeapAlloc dt=9 heapalloc_value=13178184
+HeapAlloc dt=7 heapalloc_value=13186376
+HeapAlloc dt=5 heapalloc_value=13194568
+HeapAlloc dt=6 heapalloc_value=13202760
+HeapAlloc dt=6 heapalloc_value=13210952
+HeapAlloc dt=5 heapalloc_value=13219144
+HeapAlloc dt=6 heapalloc_value=13227336
+HeapAlloc dt=6 heapalloc_value=13235528
+HeapAlloc dt=6 heapalloc_value=13243720
+HeapAlloc dt=6 heapalloc_value=13251912
+HeapAlloc dt=59 heapalloc_value=13260104
+HeapAlloc dt=8 heapalloc_value=13268296
+HeapAlloc dt=6 heapalloc_value=13276488
+HeapAlloc dt=5 heapalloc_value=13284680
+HeapAlloc dt=6 heapalloc_value=13292872
+HeapAlloc dt=5 heapalloc_value=13301064
+HeapAlloc dt=6 heapalloc_value=13309256
+HeapAlloc dt=5 heapalloc_value=13317448
+HeapAlloc dt=6 heapalloc_value=13325640
+HeapAlloc dt=6 heapalloc_value=13333832
+HeapAlloc dt=6 heapalloc_value=13342024
+HeapAlloc dt=5 heapalloc_value=13350216
+HeapAlloc dt=6 heapalloc_value=13358408
+HeapAlloc dt=6 heapalloc_value=13366600
+HeapAlloc dt=5 heapalloc_value=13374792
+HeapAlloc dt=6 heapalloc_value=13382984
+HeapAlloc dt=6 heapalloc_value=13391176
+HeapAlloc dt=6 heapalloc_value=13399368
+HeapAlloc dt=5 heapalloc_value=13407560
+HeapAlloc dt=8 heapalloc_value=13415752
+HeapAlloc dt=6 heapalloc_value=13423944
+HeapAlloc dt=7 heapalloc_value=13432136
+HeapAlloc dt=5 heapalloc_value=13440328
+HeapAlloc dt=6 heapalloc_value=13448520
+HeapAlloc dt=5 heapalloc_value=13456712
+HeapAlloc dt=6 heapalloc_value=13464904
+HeapAlloc dt=6 heapalloc_value=13473096
+HeapAlloc dt=6 heapalloc_value=13481288
+HeapAlloc dt=5 heapalloc_value=13489480
+HeapAlloc dt=5 heapalloc_value=13497672
+HeapAlloc dt=6 heapalloc_value=13505864
+HeapAlloc dt=5 heapalloc_value=13514056
+HeapAlloc dt=6 heapalloc_value=13522248
+HeapAlloc dt=5 heapalloc_value=13530440
+HeapAlloc dt=6 heapalloc_value=13538632
+HeapAlloc dt=5 heapalloc_value=13546824
+HeapAlloc dt=6 heapalloc_value=13555016
+HeapAlloc dt=6 heapalloc_value=13563208
+HeapAlloc dt=48 heapalloc_value=13571400
+HeapAlloc dt=7 heapalloc_value=13579592
+HeapAlloc dt=6 heapalloc_value=13587784
+HeapAlloc dt=5 heapalloc_value=13595976
+HeapAlloc dt=6 heapalloc_value=13604168
+HeapAlloc dt=5 heapalloc_value=13612360
+HeapAlloc dt=6 heapalloc_value=13620552
+HeapAlloc dt=5 heapalloc_value=13628744
+HeapAlloc dt=6 heapalloc_value=13636936
+HeapAlloc dt=5 heapalloc_value=13645128
+HeapAlloc dt=6 heapalloc_value=13653320
+HeapAlloc dt=14 heapalloc_value=13661512
+HeapAlloc dt=6 heapalloc_value=13669704
+HeapAlloc dt=6 heapalloc_value=13677896
+HeapAlloc dt=35 heapalloc_value=13686088
+HeapAlloc dt=7 heapalloc_value=13694280
+HeapAlloc dt=6 heapalloc_value=13702472
+HeapAlloc dt=6 heapalloc_value=13710664
+HeapAlloc dt=5 heapalloc_value=13718856
+HeapAlloc dt=6 heapalloc_value=13727048
+HeapAlloc dt=6 heapalloc_value=13735240
+HeapAlloc dt=5 heapalloc_value=13743432
+HeapAlloc dt=6 heapalloc_value=13751624
+HeapAlloc dt=5 heapalloc_value=13759816
+HeapAlloc dt=6 heapalloc_value=13768008
+HeapAlloc dt=5 heapalloc_value=13776200
+HeapAlloc dt=5 heapalloc_value=13784392
+HeapAlloc dt=6 heapalloc_value=13792584
+HeapAlloc dt=6 heapalloc_value=13800776
+HeapAlloc dt=5 heapalloc_value=13808968
+HeapAlloc dt=6 heapalloc_value=13817160
+HeapAlloc dt=5 heapalloc_value=13825352
+HeapAlloc dt=6 heapalloc_value=13833544
+HeapAlloc dt=5 heapalloc_value=13841736
+HeapAlloc dt=6 heapalloc_value=13849928
+HeapAlloc dt=5 heapalloc_value=13858120
+HeapAlloc dt=6 heapalloc_value=13866312
+HeapAlloc dt=5 heapalloc_value=13874504
+HeapAlloc dt=5 heapalloc_value=13882696
+HeapAlloc dt=6 heapalloc_value=13890888
+HeapAlloc dt=5 heapalloc_value=13899080
+HeapAlloc dt=6 heapalloc_value=13907272
+HeapAlloc dt=5 heapalloc_value=13915464
+HeapAlloc dt=6 heapalloc_value=13923656
+HeapAlloc dt=21 heapalloc_value=13931848
+HeapAlloc dt=6 heapalloc_value=13940040
+HeapAlloc dt=6 heapalloc_value=13948232
+HeapAlloc dt=6 heapalloc_value=13956424
+HeapAlloc dt=6 heapalloc_value=13964616
+HeapAlloc dt=5 heapalloc_value=13972808
+HeapAlloc dt=5 heapalloc_value=13981000
+HeapAlloc dt=6 heapalloc_value=13989192
+HeapAlloc dt=6 heapalloc_value=13997384
+HeapAlloc dt=5 heapalloc_value=14005576
+HeapAlloc dt=6 heapalloc_value=14013768
+HeapAlloc dt=5 heapalloc_value=14021960
+HeapAlloc dt=6 heapalloc_value=14030152
+HeapAlloc dt=6 heapalloc_value=14038344
+HeapAlloc dt=5 heapalloc_value=14046536
+HeapAlloc dt=6 heapalloc_value=14054728
+HeapAlloc dt=5 heapalloc_value=14062920
+HeapAlloc dt=6 heapalloc_value=14071112
+HeapAlloc dt=5 heapalloc_value=14079304
+HeapAlloc dt=5 heapalloc_value=14087496
+HeapAlloc dt=76 heapalloc_value=14095688
+HeapAlloc dt=35 heapalloc_value=14103880
+HeapAlloc dt=7 heapalloc_value=14112072
+HeapAlloc dt=5 heapalloc_value=14120264
+HeapAlloc dt=6 heapalloc_value=14128456
+HeapAlloc dt=7 heapalloc_value=14136648
+HeapAlloc dt=5 heapalloc_value=14144840
+HeapAlloc dt=5 heapalloc_value=14153032
+HeapAlloc dt=6 heapalloc_value=14161224
+HeapAlloc dt=5 heapalloc_value=14169416
+HeapAlloc dt=6 heapalloc_value=14177608
+HeapAlloc dt=10 heapalloc_value=14185800
+GoBlock dt=9 reason_string=19 stack=21
+ProcStop dt=108
+ProcStart dt=17296 p=2 p_seq=10
+ProcStop dt=12
+ProcStart dt=3626 p=0 p_seq=22
+ProcStop dt=8
+ProcStart dt=16715 p=0 p_seq=23
+GoUnblock dt=6 g=1 g_seq=48 stack=0
+GoStart dt=79 g=1 g_seq=49
+HeapAlloc dt=15 heapalloc_value=15553864
+HeapAlloc dt=13 heapalloc_value=15562056
+HeapAlloc dt=15 heapalloc_value=15570248
+HeapAlloc dt=7 heapalloc_value=15578440
+HeapAlloc dt=6 heapalloc_value=15586632
+HeapAlloc dt=6 heapalloc_value=15594824
+HeapAlloc dt=6 heapalloc_value=15603016
+HeapAlloc dt=6 heapalloc_value=15611208
+HeapAlloc dt=5 heapalloc_value=15619400
+HeapAlloc dt=6 heapalloc_value=15627592
+HeapAlloc dt=6 heapalloc_value=15635784
+HeapAlloc dt=5 heapalloc_value=15643976
+HeapAlloc dt=6 heapalloc_value=15652168
+HeapAlloc dt=5 heapalloc_value=15660360
+HeapAlloc dt=6 heapalloc_value=15668552
+HeapAlloc dt=6 heapalloc_value=15676744
+HeapAlloc dt=57 heapalloc_value=15684936
+HeapAlloc dt=7 heapalloc_value=15693128
+HeapAlloc dt=6 heapalloc_value=15701320
+HeapAlloc dt=6 heapalloc_value=15709512
+HeapAlloc dt=5 heapalloc_value=15717704
+HeapAlloc dt=6 heapalloc_value=15725896
+HeapAlloc dt=5 heapalloc_value=15734088
+HeapAlloc dt=6 heapalloc_value=15742280
+HeapAlloc dt=6 heapalloc_value=15750472
+HeapAlloc dt=10 heapalloc_value=15758664
+HeapAlloc dt=6 heapalloc_value=15766856
+HeapAlloc dt=6 heapalloc_value=15775048
+HeapAlloc dt=5 heapalloc_value=15783240
+HeapAlloc dt=6 heapalloc_value=15791432
+HeapAlloc dt=6 heapalloc_value=15799624
+HeapAlloc dt=6 heapalloc_value=15807816
+HeapAlloc dt=6 heapalloc_value=15816008
+HeapAlloc dt=7 heapalloc_value=15824200
+HeapAlloc dt=6 heapalloc_value=15832392
+HeapAlloc dt=6 heapalloc_value=15840584
+HeapAlloc dt=5 heapalloc_value=15848776
+HeapAlloc dt=6 heapalloc_value=15856968
+HeapAlloc dt=6 heapalloc_value=15865160
+HeapAlloc dt=6 heapalloc_value=15873352
+HeapAlloc dt=5 heapalloc_value=15881544
+HeapAlloc dt=6 heapalloc_value=15889736
+HeapAlloc dt=6 heapalloc_value=15897928
+HeapAlloc dt=5 heapalloc_value=15906120
+HeapAlloc dt=6 heapalloc_value=15914312
+HeapAlloc dt=5 heapalloc_value=15922504
+HeapAlloc dt=6 heapalloc_value=15930696
+HeapAlloc dt=5 heapalloc_value=15938888
+HeapAlloc dt=6 heapalloc_value=15947080
+HeapAlloc dt=5 heapalloc_value=15955272
+HeapAlloc dt=6 heapalloc_value=15963464
+HeapAlloc dt=6 heapalloc_value=15971656
+HeapAlloc dt=5 heapalloc_value=15979848
+HeapAlloc dt=6 heapalloc_value=15988040
+HeapAlloc dt=44 heapalloc_value=15996232
+HeapAlloc dt=8 heapalloc_value=16004424
+HeapAlloc dt=5 heapalloc_value=16012616
+HeapAlloc dt=6 heapalloc_value=16020808
+HeapAlloc dt=5 heapalloc_value=16029000
+HeapAlloc dt=6 heapalloc_value=16037192
+HeapAlloc dt=5 heapalloc_value=16045384
+HeapAlloc dt=6 heapalloc_value=16053576
+HeapAlloc dt=5 heapalloc_value=16061768
+HeapAlloc dt=6 heapalloc_value=16069960
+HeapAlloc dt=5 heapalloc_value=16078152
+HeapAlloc dt=6 heapalloc_value=16086344
+HeapAlloc dt=5 heapalloc_value=16094536
+HeapAlloc dt=6 heapalloc_value=16102728
+HeapAlloc dt=36 heapalloc_value=16110920
+HeapAlloc dt=8 heapalloc_value=16119112
+HeapAlloc dt=6 heapalloc_value=16127304
+HeapAlloc dt=5 heapalloc_value=16135496
+HeapAlloc dt=6 heapalloc_value=16143688
+HeapAlloc dt=5 heapalloc_value=16151880
+HeapAlloc dt=5 heapalloc_value=16160072
+HeapAlloc dt=5 heapalloc_value=16168264
+HeapAlloc dt=5 heapalloc_value=16176456
+HeapAlloc dt=5 heapalloc_value=16184648
+HeapAlloc dt=6 heapalloc_value=16192840
+HeapAlloc dt=5 heapalloc_value=16201032
+HeapAlloc dt=5 heapalloc_value=16209224
+HeapAlloc dt=5 heapalloc_value=16217416
+HeapAlloc dt=5 heapalloc_value=16225608
+HeapAlloc dt=6 heapalloc_value=16233800
+HeapAlloc dt=5 heapalloc_value=16241992
+HeapAlloc dt=73 heapalloc_value=16250184
+HeapAlloc dt=6 heapalloc_value=16258376
+HeapAlloc dt=5 heapalloc_value=16266568
+HeapAlloc dt=6 heapalloc_value=16274760
+HeapAlloc dt=371 heapalloc_value=16282952
+HeapAlloc dt=13 heapalloc_value=16291144
+HeapAlloc dt=7 heapalloc_value=16299336
+HeapAlloc dt=6 heapalloc_value=16307528
+HeapAlloc dt=6 heapalloc_value=16315720
+HeapAlloc dt=5 heapalloc_value=16323912
+HeapAlloc dt=6 heapalloc_value=16332104
+HeapAlloc dt=5 heapalloc_value=16340296
+HeapAlloc dt=5 heapalloc_value=16348488
+HeapAlloc dt=22 heapalloc_value=16356680
+HeapAlloc dt=6 heapalloc_value=16364872
+HeapAlloc dt=5 heapalloc_value=16373064
+HeapAlloc dt=6 heapalloc_value=16381256
+HeapAlloc dt=5 heapalloc_value=16389448
+HeapAlloc dt=5 heapalloc_value=16397640
+HeapAlloc dt=5 heapalloc_value=16405832
+HeapAlloc dt=5 heapalloc_value=16414024
+HeapAlloc dt=5 heapalloc_value=16422216
+HeapAlloc dt=6 heapalloc_value=16430408
+HeapAlloc dt=5 heapalloc_value=16438600
+HeapAlloc dt=6 heapalloc_value=16446792
+HeapAlloc dt=5 heapalloc_value=16454984
+HeapAlloc dt=5 heapalloc_value=16463176
+HeapAlloc dt=6 heapalloc_value=16471368
+HeapAlloc dt=5 heapalloc_value=16479560
+HeapAlloc dt=5 heapalloc_value=16487752
+HeapAlloc dt=5 heapalloc_value=16495944
+HeapAlloc dt=6 heapalloc_value=16504136
+HeapAlloc dt=5 heapalloc_value=16512328
+HeapAlloc dt=45 heapalloc_value=16520520
+HeapAlloc dt=38 heapalloc_value=16528712
+HeapAlloc dt=7 heapalloc_value=16536904
+HeapAlloc dt=5 heapalloc_value=16545096
+HeapAlloc dt=5 heapalloc_value=16553288
+HeapAlloc dt=6 heapalloc_value=16561480
+HeapAlloc dt=5 heapalloc_value=16569672
+GoBlock dt=11 reason_string=19 stack=21
+ProcStop dt=109
+ProcStart dt=18122 p=2 p_seq=12
+ProcStop dt=23
+ProcStart dt=803 p=1 p_seq=12
+GoUnblock dt=12 g=24 g_seq=10 stack=0
+GoStart dt=143 g=24 g_seq=11
+GoLabel dt=2 label_string=2
+GoBlock dt=3389 reason_string=15 stack=27
+ProcStop dt=2403
+ProcStart dt=161103 p=4 p_seq=8
+GoStart dt=172 g=38 g_seq=1
+GoStop dt=304901 reason_string=16 stack=50
+GoStart dt=21 g=38 g_seq=2
+GoStop dt=315468 reason_string=16 stack=50
+GoStart dt=20 g=38 g_seq=3
+GoDestroy dt=160861
+ProcStop dt=34
+EventBatch gen=1 m=1709044 time=7689670489757 size=2312
+ProcStart dt=310 p=3 p_seq=2
+ProcStop dt=39
+ProcStart dt=1386 p=3 p_seq=3
+ProcStop dt=138
+ProcStart dt=3920 p=0 p_seq=5
+GoStart dt=266 g=24 g_seq=7
+GoUnblock dt=50 g=1 g_seq=25 stack=41
+GoBlock dt=13 reason_string=15 stack=27
+GoStart dt=7 g=1 g_seq=26
+GCMarkAssistEnd dt=6
+HeapAlloc dt=29 heapalloc_value=3843824
+GCSweepBegin dt=57 stack=42
+GCSweepEnd dt=816 swept_value=827392 reclaimed_value=0
+GCSweepBegin dt=310 stack=43
+GCSweepEnd dt=63 swept_value=67108864 reclaimed_value=0
+HeapAlloc dt=23 heapalloc_value=3852016
+HeapAlloc dt=46 heapalloc_value=3860208
+HeapAlloc dt=27 heapalloc_value=3868400
+HeapAlloc dt=16 heapalloc_value=3876592
+HeapAlloc dt=109 heapalloc_value=3884784
+HeapAlloc dt=32 heapalloc_value=3892976
+HeapAlloc dt=33 heapalloc_value=3901168
+HeapAlloc dt=26 heapalloc_value=3909360
+HeapAlloc dt=35 heapalloc_value=3917552
+HeapAlloc dt=16 heapalloc_value=3925744
+HeapAlloc dt=16 heapalloc_value=3933936
+HeapAlloc dt=16 heapalloc_value=3942128
+HeapAlloc dt=68 heapalloc_value=3950320
+HeapAlloc dt=21 heapalloc_value=3958512
+HeapAlloc dt=20 heapalloc_value=3966704
+HeapAlloc dt=15 heapalloc_value=3974896
+HeapAlloc dt=24 heapalloc_value=3983088
+HeapAlloc dt=15 heapalloc_value=3991280
+HeapAlloc dt=16 heapalloc_value=3999472
+HeapAlloc dt=15 heapalloc_value=4007664
+HeapAlloc dt=18 heapalloc_value=4015856
+HeapAlloc dt=15 heapalloc_value=4024048
+HeapAlloc dt=21 heapalloc_value=4032240
+HeapAlloc dt=26 heapalloc_value=4040432
+HeapAlloc dt=28 heapalloc_value=4048624
+HeapAlloc dt=16 heapalloc_value=4056816
+HeapAlloc dt=16 heapalloc_value=4065008
+HeapAlloc dt=16 heapalloc_value=4073200
+HeapAlloc dt=17 heapalloc_value=4081392
+HeapAlloc dt=15 heapalloc_value=4089584
+HeapAlloc dt=19 heapalloc_value=4097776
+HeapAlloc dt=15 heapalloc_value=4105968
+HeapAlloc dt=20 heapalloc_value=4114160
+HeapAlloc dt=15 heapalloc_value=4122352
+HeapAlloc dt=16 heapalloc_value=4130544
+HeapAlloc dt=16 heapalloc_value=4138736
+HeapAlloc dt=17 heapalloc_value=4146928
+HeapAlloc dt=15 heapalloc_value=4155120
+HeapAlloc dt=20 heapalloc_value=4163312
+HeapAlloc dt=18 heapalloc_value=4171504
+HeapAlloc dt=23 heapalloc_value=4179696
+HeapAlloc dt=18 heapalloc_value=4187888
+HeapAlloc dt=20 heapalloc_value=4196080
+HeapAlloc dt=19 heapalloc_value=4204272
+HeapAlloc dt=19 heapalloc_value=4212464
+HeapAlloc dt=105 heapalloc_value=4220656
+HeapAlloc dt=45 heapalloc_value=4228848
+HeapAlloc dt=22 heapalloc_value=4237040
+HeapAlloc dt=23 heapalloc_value=4245232
+HeapAlloc dt=29 heapalloc_value=4253424
+HeapAlloc dt=21 heapalloc_value=4261616
+HeapAlloc dt=56 heapalloc_value=4269808
+HeapAlloc dt=21 heapalloc_value=4278000
+HeapAlloc dt=25 heapalloc_value=4286192
+HeapAlloc dt=15 heapalloc_value=4294384
+HeapAlloc dt=60 heapalloc_value=4302576
+HeapAlloc dt=40 heapalloc_value=4359920
+HeapAlloc dt=152 heapalloc_value=4368112
+HeapAlloc dt=30 heapalloc_value=4376304
+HeapAlloc dt=27 heapalloc_value=4384496
+HeapAlloc dt=20 heapalloc_value=4392688
+HeapAlloc dt=32 heapalloc_value=4400880
+HeapAlloc dt=25 heapalloc_value=4409072
+HeapAlloc dt=48 heapalloc_value=4417264
+HeapAlloc dt=58 heapalloc_value=4425456
+HeapAlloc dt=30 heapalloc_value=4433648
+HeapAlloc dt=23 heapalloc_value=4441840
+HeapAlloc dt=16 heapalloc_value=4450032
+HeapAlloc dt=17 heapalloc_value=4458224
+HeapAlloc dt=16 heapalloc_value=4466416
+HeapAlloc dt=19 heapalloc_value=4474608
+HeapAlloc dt=16 heapalloc_value=4482800
+HeapAlloc dt=15 heapalloc_value=4490992
+HeapAlloc dt=16 heapalloc_value=4499184
+HeapAlloc dt=16 heapalloc_value=4507376
+HeapAlloc dt=15 heapalloc_value=4515568
+HeapAlloc dt=16 heapalloc_value=4523760
+HeapAlloc dt=16 heapalloc_value=4531952
+HeapAlloc dt=21 heapalloc_value=4540144
+HeapAlloc dt=25 heapalloc_value=4548336
+HeapAlloc dt=22 heapalloc_value=4556528
+HeapAlloc dt=59 heapalloc_value=4564720
+HeapAlloc dt=21 heapalloc_value=4572912
+HeapAlloc dt=16 heapalloc_value=4581104
+HeapAlloc dt=16 heapalloc_value=4589296
+HeapAlloc dt=15 heapalloc_value=4597488
+HeapAlloc dt=24 heapalloc_value=4605680
+HeapAlloc dt=12 heapalloc_value=4613872
+HeapAlloc dt=8 heapalloc_value=4622064
+HeapAlloc dt=11 heapalloc_value=4630256
+HeapAlloc dt=7 heapalloc_value=4638448
+HeapAlloc dt=7 heapalloc_value=4646640
+HeapAlloc dt=7 heapalloc_value=4654832
+GoBlock dt=31 reason_string=19 stack=21
+ProcStop dt=34
+ProcStart dt=6196 p=4 p_seq=2
+ProcStop dt=26
+ProcStart dt=1578 p=0 p_seq=7
+ProcStop dt=12
+ProcStart dt=16743 p=0 p_seq=8
+GoUnblock dt=21 g=1 g_seq=29 stack=0
+GoStart dt=147 g=1 g_seq=30
+HeapAlloc dt=51 heapalloc_value=5768944
+HeapAlloc dt=22 heapalloc_value=5777136
+HeapAlloc dt=16 heapalloc_value=5785328
+HeapAlloc dt=15 heapalloc_value=5793520
+HeapAlloc dt=16 heapalloc_value=5801712
+HeapAlloc dt=18 heapalloc_value=5809904
+HeapAlloc dt=15 heapalloc_value=5818096
+HeapAlloc dt=15 heapalloc_value=5826288
+HeapAlloc dt=12 heapalloc_value=5834480
+HeapAlloc dt=12 heapalloc_value=5842672
+HeapAlloc dt=15 heapalloc_value=5850864
+HeapAlloc dt=16 heapalloc_value=5859056
+HeapAlloc dt=12 heapalloc_value=5867248
+HeapAlloc dt=12 heapalloc_value=5875440
+HeapAlloc dt=6 heapalloc_value=5883632
+HeapAlloc dt=8 heapalloc_value=5891824
+HeapAlloc dt=6 heapalloc_value=5900016
+HeapAlloc dt=6 heapalloc_value=5908208
+HeapAlloc dt=98 heapalloc_value=5916400
+HeapAlloc dt=21 heapalloc_value=5924592
+HeapAlloc dt=5 heapalloc_value=5932784
+HeapAlloc dt=7 heapalloc_value=5940976
+HeapAlloc dt=6 heapalloc_value=5949168
+HeapAlloc dt=9 heapalloc_value=5957360
+HeapAlloc dt=6 heapalloc_value=5965552
+HeapAlloc dt=5 heapalloc_value=5973744
+HeapAlloc dt=7 heapalloc_value=5981936
+HeapAlloc dt=5 heapalloc_value=5990128
+HeapAlloc dt=6 heapalloc_value=5998320
+HeapAlloc dt=5 heapalloc_value=6006512
+HeapAlloc dt=6 heapalloc_value=6014704
+HeapAlloc dt=9 heapalloc_value=6022896
+HeapAlloc dt=5 heapalloc_value=6031088
+HeapAlloc dt=6 heapalloc_value=6039280
+HeapAlloc dt=6 heapalloc_value=6047472
+HeapAlloc dt=40 heapalloc_value=6055664
+HeapAlloc dt=6 heapalloc_value=6063856
+HeapAlloc dt=35 heapalloc_value=6072048
+HeapAlloc dt=8 heapalloc_value=6080240
+HeapAlloc dt=9 heapalloc_value=6088432
+HeapAlloc dt=5 heapalloc_value=6096624
+HeapAlloc dt=6 heapalloc_value=6104816
+HeapAlloc dt=5 heapalloc_value=6113008
+HeapAlloc dt=6 heapalloc_value=6121200
+HeapAlloc dt=6 heapalloc_value=6129392
+HeapAlloc dt=6 heapalloc_value=6137584
+HeapAlloc dt=5 heapalloc_value=6145776
+HeapAlloc dt=9 heapalloc_value=6153968
+HeapAlloc dt=5 heapalloc_value=6162160
+HeapAlloc dt=6 heapalloc_value=6170352
+HeapAlloc dt=6 heapalloc_value=6178544
+HeapAlloc dt=8 heapalloc_value=6186736
+HeapAlloc dt=11 heapalloc_value=6301424
+HeapAlloc dt=2483 heapalloc_value=6309616
+HeapAlloc dt=9 heapalloc_value=6317808
+HeapAlloc dt=7 heapalloc_value=6326000
+HeapAlloc dt=11 heapalloc_value=6334192
+HeapAlloc dt=6 heapalloc_value=6342384
+HeapAlloc dt=6 heapalloc_value=6350576
+HeapAlloc dt=6 heapalloc_value=6358768
+HeapAlloc dt=7 heapalloc_value=6366960
+HeapAlloc dt=9 heapalloc_value=6375152
+HeapAlloc dt=5 heapalloc_value=6383344
+HeapAlloc dt=6 heapalloc_value=6391536
+HeapAlloc dt=6 heapalloc_value=6399728
+HeapAlloc dt=5 heapalloc_value=6407920
+HeapAlloc dt=5 heapalloc_value=6416112
+HeapAlloc dt=6 heapalloc_value=6424304
+HeapAlloc dt=9 heapalloc_value=6432496
+HeapAlloc dt=8 heapalloc_value=6440688
+HeapAlloc dt=9 heapalloc_value=6448880
+HeapAlloc dt=6 heapalloc_value=6457072
+HeapAlloc dt=13 heapalloc_value=6465264
+HeapAlloc dt=6 heapalloc_value=6473456
+HeapAlloc dt=5 heapalloc_value=6481648
+HeapAlloc dt=6 heapalloc_value=6489840
+HeapAlloc dt=5 heapalloc_value=6498032
+HeapAlloc dt=6 heapalloc_value=6506224
+HeapAlloc dt=8 heapalloc_value=6514416
+HeapAlloc dt=6 heapalloc_value=6522608
+HeapAlloc dt=6 heapalloc_value=6530800
+HeapAlloc dt=5 heapalloc_value=6538992
+HeapAlloc dt=81 heapalloc_value=6547184
+HeapAlloc dt=7 heapalloc_value=6555376
+HeapAlloc dt=6 heapalloc_value=6563568
+HeapAlloc dt=5 heapalloc_value=6571760
+HeapAlloc dt=20 heapalloc_value=6579952
+HeapAlloc dt=6 heapalloc_value=6588144
+HeapAlloc dt=56 heapalloc_value=6596336
+HeapAlloc dt=7 heapalloc_value=6604528
+HeapAlloc dt=7 heapalloc_value=6612720
+HeapAlloc dt=6 heapalloc_value=6620912
+HeapAlloc dt=5 heapalloc_value=6629104
+HeapAlloc dt=5 heapalloc_value=6637296
+HeapAlloc dt=6 heapalloc_value=6645488
+HeapAlloc dt=5 heapalloc_value=6653680
+HeapAlloc dt=5 heapalloc_value=6661872
+HeapAlloc dt=6 heapalloc_value=6670064
+HeapAlloc dt=5 heapalloc_value=6678256
+HeapAlloc dt=5 heapalloc_value=6686448
+HeapAlloc dt=6 heapalloc_value=6694640
+HeapAlloc dt=5 heapalloc_value=6702832
+HeapAlloc dt=5 heapalloc_value=6711024
+HeapAlloc dt=6 heapalloc_value=6719216
+HeapAlloc dt=9 heapalloc_value=6727408
+HeapAlloc dt=7 heapalloc_value=6735600
+HeapAlloc dt=5 heapalloc_value=6743792
+HeapAlloc dt=5 heapalloc_value=6751984
+HeapAlloc dt=6 heapalloc_value=6760176
+HeapAlloc dt=5 heapalloc_value=6768368
+HeapAlloc dt=5 heapalloc_value=6776560
+HeapAlloc dt=6 heapalloc_value=6784752
+HeapAlloc dt=5 heapalloc_value=6792944
+HeapAlloc dt=6 heapalloc_value=6801136
+HeapAlloc dt=36 heapalloc_value=6809328
+HeapAlloc dt=7 heapalloc_value=6817520
+HeapAlloc dt=5 heapalloc_value=6825712
+HeapAlloc dt=6 heapalloc_value=6833904
+HeapAlloc dt=6 heapalloc_value=6842096
+HeapAlloc dt=5 heapalloc_value=6850288
+HeapAlloc dt=6 heapalloc_value=6858480
+HeapAlloc dt=5 heapalloc_value=6866672
+HeapAlloc dt=5 heapalloc_value=6874864
+HeapAlloc dt=5 heapalloc_value=6883056
+HeapAlloc dt=5 heapalloc_value=6891248
+HeapAlloc dt=6 heapalloc_value=6899440
+GoBlock dt=14 reason_string=19 stack=21
+ProcStop dt=198
+ProcStart dt=2996 p=0 p_seq=10
+GoUnblock dt=12 g=1 g_seq=31 stack=0
+GoStart dt=135 g=1 g_seq=32
+HeapAlloc dt=25 heapalloc_value=6907632
+HeapAlloc dt=9 heapalloc_value=6915824
+HeapAlloc dt=6 heapalloc_value=6924016
+HeapAlloc dt=5 heapalloc_value=6932208
+HeapAlloc dt=6 heapalloc_value=6940400
+HeapAlloc dt=5 heapalloc_value=6948592
+HeapAlloc dt=5 heapalloc_value=6956784
+HeapAlloc dt=6 heapalloc_value=6964976
+HeapAlloc dt=5 heapalloc_value=6973168
+HeapAlloc dt=6 heapalloc_value=6981360
+HeapAlloc dt=5 heapalloc_value=6989552
+HeapAlloc dt=5 heapalloc_value=6997744
+HeapAlloc dt=5 heapalloc_value=7005936
+HeapAlloc dt=97 heapalloc_value=7014128
+HeapAlloc dt=7 heapalloc_value=7022320
+HeapAlloc dt=5 heapalloc_value=7030512
+HeapAlloc dt=6 heapalloc_value=7038704
+HeapAlloc dt=5 heapalloc_value=7046896
+HeapAlloc dt=5 heapalloc_value=7055088
+HeapAlloc dt=5 heapalloc_value=7063280
+HeapAlloc dt=50 heapalloc_value=7071472
+HeapAlloc dt=7 heapalloc_value=7079664
+HeapAlloc dt=6 heapalloc_value=7087856
+HeapAlloc dt=5 heapalloc_value=7096048
+HeapAlloc dt=20 heapalloc_value=7104240
+HeapAlloc dt=6 heapalloc_value=7112432
+HeapAlloc dt=8 heapalloc_value=7120624
+HeapAlloc dt=6 heapalloc_value=7128816
+HeapAlloc dt=5 heapalloc_value=7137008
+HeapAlloc dt=6 heapalloc_value=7145200
+HeapAlloc dt=8 heapalloc_value=7153392
+HeapAlloc dt=6 heapalloc_value=7161584
+HeapAlloc dt=5 heapalloc_value=7169776
+HeapAlloc dt=5 heapalloc_value=7177968
+HeapAlloc dt=6 heapalloc_value=7186160
+HeapAlloc dt=5 heapalloc_value=7194352
+HeapAlloc dt=5 heapalloc_value=7202544
+HeapAlloc dt=6 heapalloc_value=7210736
+HeapAlloc dt=5 heapalloc_value=7218928
+HeapAlloc dt=35 heapalloc_value=7227120
+HeapAlloc dt=10 heapalloc_value=7235312
+HeapAlloc dt=5 heapalloc_value=7243504
+HeapAlloc dt=5 heapalloc_value=7251696
+HeapAlloc dt=6 heapalloc_value=7259888
+HeapAlloc dt=5 heapalloc_value=7268080
+HeapAlloc dt=5 heapalloc_value=7276272
+HeapAlloc dt=5 heapalloc_value=7284464
+HeapAlloc dt=6 heapalloc_value=7292656
+HeapAlloc dt=6 heapalloc_value=7300848
+HeapAlloc dt=5 heapalloc_value=7309040
+HeapAlloc dt=13 heapalloc_value=7317232
+HeapAlloc dt=5 heapalloc_value=7325424
+HeapAlloc dt=6 heapalloc_value=7333616
+HeapAlloc dt=8 heapalloc_value=7341808
+HeapAlloc dt=5 heapalloc_value=7350000
+HeapAlloc dt=9 heapalloc_value=7358192
+HeapAlloc dt=5 heapalloc_value=7366384
+HeapAlloc dt=6 heapalloc_value=7374576
+HeapAlloc dt=5 heapalloc_value=7382768
+HeapAlloc dt=5 heapalloc_value=7390960
+HeapAlloc dt=5 heapalloc_value=7399152
+HeapAlloc dt=6 heapalloc_value=7407344
+HeapAlloc dt=5 heapalloc_value=7415536
+HeapAlloc dt=5 heapalloc_value=7423728
+HeapAlloc dt=6 heapalloc_value=7431920
+HeapAlloc dt=5 heapalloc_value=7440112
+HeapAlloc dt=5 heapalloc_value=7448304
+HeapAlloc dt=5 heapalloc_value=7456496
+HeapAlloc dt=6 heapalloc_value=7464688
+HeapAlloc dt=5 heapalloc_value=7472880
+HeapAlloc dt=5 heapalloc_value=7481072
+HeapAlloc dt=5 heapalloc_value=7489264
+HeapAlloc dt=6 heapalloc_value=7497456
+HeapAlloc dt=5 heapalloc_value=7505648
+HeapAlloc dt=5 heapalloc_value=7513840
+HeapAlloc dt=5 heapalloc_value=7522032
+HeapAlloc dt=5 heapalloc_value=7530224
+HeapAlloc dt=6 heapalloc_value=7538416
+HeapAlloc dt=5 heapalloc_value=7546608
+HeapAlloc dt=6 heapalloc_value=7554800
+HeapAlloc dt=5 heapalloc_value=7562992
+HeapAlloc dt=5 heapalloc_value=7571184
+HeapAlloc dt=6 heapalloc_value=7579376
+HeapAlloc dt=5 heapalloc_value=7587568
+HeapAlloc dt=45 heapalloc_value=7595760
+HeapAlloc dt=7 heapalloc_value=7603952
+HeapAlloc dt=5 heapalloc_value=7612144
+HeapAlloc dt=6 heapalloc_value=7620336
+HeapAlloc dt=376 heapalloc_value=7628528
+HeapAlloc dt=13 heapalloc_value=7636720
+HeapAlloc dt=7 heapalloc_value=7644912
+HeapAlloc dt=35 heapalloc_value=7653104
+GCBegin dt=23 gc_seq=3 stack=22
+STWBegin dt=73 kind_string=22 stack=28
+GoUnblock dt=258 g=4 g_seq=5 stack=29
+ProcsChange dt=80 procs_value=8 stack=30
+STWEnd dt=37
+GCMarkAssistBegin dt=96 stack=31
+GCMarkAssistEnd dt=4606
+HeapAlloc dt=187 heapalloc_value=7671600
+HeapAlloc dt=26 heapalloc_value=7679792
+HeapAlloc dt=17 heapalloc_value=7687984
+HeapAlloc dt=29 heapalloc_value=7696176
+HeapAlloc dt=16 heapalloc_value=7704368
+HeapAlloc dt=12 heapalloc_value=7712560
+HeapAlloc dt=48 heapalloc_value=7868208
+GoStop dt=4635 reason_string=16 stack=45
+GoStart dt=48 g=1 g_seq=33
+HeapAlloc dt=27 heapalloc_value=7884336
+HeapAlloc dt=11 heapalloc_value=7892528
+HeapAlloc dt=8 heapalloc_value=7900720
+HeapAlloc dt=12 heapalloc_value=7908912
+HeapAlloc dt=9 heapalloc_value=7917104
+HeapAlloc dt=9 heapalloc_value=7925296
+HeapAlloc dt=9 heapalloc_value=7933488
+HeapAlloc dt=8 heapalloc_value=7941680
+HeapAlloc dt=10 heapalloc_value=7949872
+HeapAlloc dt=8 heapalloc_value=7958064
+HeapAlloc dt=10 heapalloc_value=7966256
+HeapAlloc dt=12 heapalloc_value=7974448
+HeapAlloc dt=8 heapalloc_value=7982640
+HeapAlloc dt=8 heapalloc_value=7990832
+HeapAlloc dt=9 heapalloc_value=7999024
+HeapAlloc dt=8 heapalloc_value=8007216
+HeapAlloc dt=54 heapalloc_value=8015408
+HeapAlloc dt=10 heapalloc_value=8023600
+HeapAlloc dt=8 heapalloc_value=8031792
+HeapAlloc dt=9 heapalloc_value=8039984
+HeapAlloc dt=8 heapalloc_value=8048176
+HeapAlloc dt=9 heapalloc_value=8056368
+HeapAlloc dt=8 heapalloc_value=8064560
+HeapAlloc dt=9 heapalloc_value=8072752
+HeapAlloc dt=8 heapalloc_value=8080944
+HeapAlloc dt=9 heapalloc_value=8089136
+HeapAlloc dt=8 heapalloc_value=8097328
+GoBlock dt=20 reason_string=19 stack=21
+ProcStop dt=35
+ProcStart dt=147580 p=3 p_seq=6
+GoStart dt=144 g=4 g_seq=10
+GoBlock dt=38 reason_string=15 stack=32
+GoUnblock dt=41 g=25 g_seq=4 stack=0
+GoStart dt=6 g=25 g_seq=5
+GoLabel dt=1 label_string=4
+GoBlock dt=5825 reason_string=15 stack=27
+ProcStop dt=299
+ProcStart dt=158874 p=3 p_seq=7
+GoStart dt=231 g=35 g_seq=1
+GoStop dt=305629 reason_string=16 stack=51
+GoStart dt=79 g=35 g_seq=2
+GoStop dt=315206 reason_string=16 stack=50
+GoStart dt=36 g=35 g_seq=3
+GoDestroy dt=160337
+ProcStop dt=68
+EventBatch gen=1 m=1709042 time=7689670149213 size=4550
+ProcStart dt=287 p=2 p_seq=1
+GoStart dt=328 g=7 g_seq=1
+HeapAlloc dt=7006 heapalloc_value=2793472
+HeapAlloc dt=74 heapalloc_value=2801664
+GoBlock dt=275 reason_string=12 stack=18
+ProcStop dt=34
+ProcStart dt=327698 p=0 p_seq=3
+ProcStop dt=7
+ProcStart dt=2124 p=2 p_seq=3
+GoUnblock dt=32 g=24 g_seq=2 stack=0
+HeapAlloc dt=302 heapalloc_value=4038656
+HeapAlloc dt=104 heapalloc_value=4046848
+HeapAlloc dt=52 heapalloc_value=4055040
+GoStart dt=1147 g=24 g_seq=3
+GoLabel dt=5 label_string=2
+GoBlock dt=128 reason_string=15 stack=27
+GoUnblock dt=72 g=1 g_seq=21 stack=0
+GoStart dt=11 g=1 g_seq=22
+HeapAlloc dt=44 heapalloc_value=4063232
+HeapAlloc dt=43 heapalloc_value=4071424
+HeapAlloc dt=28 heapalloc_value=4079616
+HeapAlloc dt=24 heapalloc_value=4087808
+HeapAlloc dt=84 heapalloc_value=4096000
+HeapAlloc dt=25 heapalloc_value=4104192
+HeapAlloc dt=20 heapalloc_value=4112384
+HeapAlloc dt=24 heapalloc_value=4120576
+HeapAlloc dt=20 heapalloc_value=4128768
+HeapAlloc dt=19 heapalloc_value=4136960
+HeapAlloc dt=24 heapalloc_value=4145152
+HeapAlloc dt=20 heapalloc_value=4153344
+HeapAlloc dt=19 heapalloc_value=4161536
+HeapAlloc dt=20 heapalloc_value=4169728
+HeapAlloc dt=24 heapalloc_value=4177920
+HeapAlloc dt=33 heapalloc_value=4186112
+HeapAlloc dt=26 heapalloc_value=4194304
+HeapAlloc dt=31 heapalloc_value=4235264
+HeapAlloc dt=363 heapalloc_value=4243456
+HeapAlloc dt=61 heapalloc_value=4251648
+HeapAlloc dt=14 heapalloc_value=4259840
+HeapAlloc dt=12 heapalloc_value=4268032
+HeapAlloc dt=9 heapalloc_value=4276224
+HeapAlloc dt=9 heapalloc_value=4284416
+HeapAlloc dt=9 heapalloc_value=4292608
+HeapAlloc dt=8 heapalloc_value=4300800
+HeapAlloc dt=162 heapalloc_value=4308992
+HeapAlloc dt=14 heapalloc_value=4317184
+HeapAlloc dt=8 heapalloc_value=4325376
+HeapAlloc dt=53 heapalloc_value=4333568
+HeapAlloc dt=10 heapalloc_value=4341760
+HeapAlloc dt=16 heapalloc_value=4349952
+HeapAlloc dt=14 heapalloc_value=4358144
+GCMarkAssistBegin dt=27 stack=31
+GCMarkAssistEnd dt=18
+GCMarkAssistBegin dt=4 stack=31
+GoBlock dt=198 reason_string=13 stack=33
+ProcStop dt=19
+ProcStart dt=387 p=2 p_seq=4
+GoUnblock dt=265 g=24 g_seq=4 stack=0
+GoStart dt=69 g=24 g_seq=5
+GoLabel dt=1 label_string=2
+GoBlock dt=132 reason_string=10 stack=35
+GoStart dt=20 g=1 g_seq=24
+GCMarkAssistEnd dt=2
+HeapAlloc dt=13 heapalloc_value=4366336
+GCMarkAssistBegin dt=7 stack=31
+GoBlock dt=25 reason_string=10 stack=36
+ProcStop dt=24
+ProcStart dt=4689 p=1 p_seq=7
+ProcStop dt=23
+ProcStart dt=36183 p=1 p_seq=8
+ProcStop dt=24
+ProcStart dt=1076 p=1 p_seq=9
+GoUnblock dt=12 g=22 g_seq=4 stack=0
+GoStart dt=118 g=22 g_seq=5
+GoLabel dt=1 label_string=2
+GoBlock dt=7117 reason_string=15 stack=27
+ProcStop dt=41
+ProcStart dt=150567 p=4 p_seq=7
+GoUnblock dt=41 g=23 g_seq=4 stack=0
+HeapAlloc dt=108 heapalloc_value=17163592
+HeapAlloc dt=61 heapalloc_value=17166856
+HeapAlloc dt=2994 heapalloc_value=17608712
+GoStart dt=1008 g=23 g_seq=5
+GoLabel dt=4 label_string=4
+GoBlock dt=40 reason_string=15 stack=27
+GoUnblock dt=49 g=1 g_seq=52 stack=0
+GoStart dt=7 g=1 g_seq=53
+HeapAlloc dt=30 heapalloc_value=17616904
+HeapAlloc dt=52 heapalloc_value=17625096
+HeapAlloc dt=35 heapalloc_value=17633288
+HeapAlloc dt=27 heapalloc_value=17641480
+HeapAlloc dt=28 heapalloc_value=17649672
+HeapAlloc dt=87 heapalloc_value=17657864
+HeapAlloc dt=32 heapalloc_value=17666056
+HeapAlloc dt=24 heapalloc_value=17674248
+HeapAlloc dt=22 heapalloc_value=17682440
+HeapAlloc dt=16 heapalloc_value=17690632
+HeapAlloc dt=15 heapalloc_value=17698824
+HeapAlloc dt=20 heapalloc_value=17707016
+HeapAlloc dt=19 heapalloc_value=17715208
+HeapAlloc dt=15 heapalloc_value=17723400
+HeapAlloc dt=18 heapalloc_value=17731592
+HeapAlloc dt=20 heapalloc_value=17739784
+HeapAlloc dt=15 heapalloc_value=17747976
+HeapAlloc dt=17 heapalloc_value=17756168
+HeapAlloc dt=67 heapalloc_value=17764360
+HeapAlloc dt=28 heapalloc_value=17772552
+HeapAlloc dt=22 heapalloc_value=17780744
+HeapAlloc dt=19 heapalloc_value=17788936
+HeapAlloc dt=22 heapalloc_value=17797128
+HeapAlloc dt=19 heapalloc_value=17805320
+HeapAlloc dt=19 heapalloc_value=17813512
+HeapAlloc dt=19 heapalloc_value=17821704
+HeapAlloc dt=15 heapalloc_value=17829896
+HeapAlloc dt=21 heapalloc_value=17838088
+HeapAlloc dt=19 heapalloc_value=17846280
+HeapAlloc dt=16 heapalloc_value=17854472
+HeapAlloc dt=14 heapalloc_value=17862664
+HeapAlloc dt=18 heapalloc_value=17870856
+HeapAlloc dt=58 heapalloc_value=17879048
+HeapAlloc dt=19 heapalloc_value=17887240
+HeapAlloc dt=15 heapalloc_value=17895432
+HeapAlloc dt=19 heapalloc_value=17903624
+HeapAlloc dt=21 heapalloc_value=17911816
+HeapAlloc dt=17 heapalloc_value=17920008
+HeapAlloc dt=19 heapalloc_value=17928200
+HeapAlloc dt=19 heapalloc_value=17936392
+HeapAlloc dt=16 heapalloc_value=17944584
+HeapAlloc dt=15 heapalloc_value=17952776
+HeapAlloc dt=15 heapalloc_value=17960968
+HeapAlloc dt=19 heapalloc_value=17969160
+HeapAlloc dt=16 heapalloc_value=17977352
+HeapAlloc dt=16 heapalloc_value=17985544
+HeapAlloc dt=16 heapalloc_value=17993736
+HeapAlloc dt=19 heapalloc_value=18001928
+HeapAlloc dt=15 heapalloc_value=18010120
+HeapAlloc dt=16 heapalloc_value=18018312
+HeapAlloc dt=15 heapalloc_value=18026504
+HeapAlloc dt=19 heapalloc_value=18034696
+HeapAlloc dt=14 heapalloc_value=18042888
+HeapAlloc dt=17 heapalloc_value=18051080
+HeapAlloc dt=18 heapalloc_value=18059272
+HeapAlloc dt=20 heapalloc_value=18067464
+HeapAlloc dt=17 heapalloc_value=18075656
+HeapAlloc dt=125 heapalloc_value=18083848
+GoStop dt=20 reason_string=16 stack=46
+GoUnblock dt=288 g=25 g_seq=6 stack=0
+GoStart dt=7 g=25 g_seq=7
+GoLabel dt=1 label_string=2
+HeapAlloc dt=255 heapalloc_value=18091752
+GoBlock dt=30 reason_string=10 stack=35
+GoStart dt=5 g=1 g_seq=54
+HeapAlloc dt=25 heapalloc_value=18099944
+HeapAlloc dt=19 heapalloc_value=18108136
+HeapAlloc dt=45 heapalloc_value=18116328
+HeapAlloc dt=9 heapalloc_value=18124520
+HeapAlloc dt=80 heapalloc_value=18132712
+HeapAlloc dt=11 heapalloc_value=18140904
+HeapAlloc dt=6 heapalloc_value=18149096
+HeapAlloc dt=7 heapalloc_value=18157288
+HeapAlloc dt=7 heapalloc_value=18165480
+HeapAlloc dt=12 heapalloc_value=18173672
+HeapAlloc dt=11 heapalloc_value=18181864
+HeapAlloc dt=11 heapalloc_value=18190056
+HeapAlloc dt=7 heapalloc_value=18198248
+HeapAlloc dt=62 heapalloc_value=18206440
+HeapAlloc dt=8 heapalloc_value=18214632
+HeapAlloc dt=7 heapalloc_value=18222824
+HeapAlloc dt=6 heapalloc_value=18231016
+HeapAlloc dt=7 heapalloc_value=18239208
+HeapAlloc dt=11 heapalloc_value=18247400
+HeapAlloc dt=6 heapalloc_value=18255592
+HeapAlloc dt=7 heapalloc_value=18263784
+HeapAlloc dt=11 heapalloc_value=18271976
+HeapAlloc dt=6 heapalloc_value=18280168
+HeapAlloc dt=7 heapalloc_value=18288360
+HeapAlloc dt=7 heapalloc_value=18296552
+HeapAlloc dt=6 heapalloc_value=18304744
+HeapAlloc dt=10 heapalloc_value=18312936
+HeapAlloc dt=7 heapalloc_value=18321128
+HeapAlloc dt=7 heapalloc_value=18329320
+HeapAlloc dt=7 heapalloc_value=18337512
+HeapAlloc dt=31 heapalloc_value=18345704
+HeapAlloc dt=17 heapalloc_value=18353896
+HeapAlloc dt=7 heapalloc_value=18362088
+HeapAlloc dt=13 heapalloc_value=18370280
+HeapAlloc dt=6 heapalloc_value=18378472
+HeapAlloc dt=7 heapalloc_value=18386664
+HeapAlloc dt=7 heapalloc_value=18394856
+HeapAlloc dt=11 heapalloc_value=18403048
+HeapAlloc dt=6 heapalloc_value=18411240
+HeapAlloc dt=7 heapalloc_value=18419432
+HeapAlloc dt=7 heapalloc_value=18427624
+HeapAlloc dt=6 heapalloc_value=18435816
+HeapAlloc dt=7 heapalloc_value=18444008
+HeapAlloc dt=7 heapalloc_value=18452200
+GCMarkAssistBegin dt=13 stack=31
+GoBlock dt=35 reason_string=10 stack=36
+ProcStop dt=22
+ProcStart dt=936 p=1 p_seq=13
+GoStart dt=212 g=25 g_seq=9
+GoUnblock dt=31 g=1 g_seq=55 stack=41
+GoBlock dt=7 reason_string=15 stack=27
+GoStart dt=13 g=1 g_seq=56
+GCMarkAssistEnd dt=4
+HeapAlloc dt=30 heapalloc_value=16971400
+GCSweepBegin dt=41 stack=42
+GCSweepEnd dt=310 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=23 heapalloc_value=16979592
+GCSweepBegin dt=30 stack=42
+GCSweepEnd dt=934 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=80 heapalloc_value=16987784
+GCSweepBegin dt=43 stack=42
+GCSweepEnd dt=1671 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=6 heapalloc_value=16995976
+GCSweepBegin dt=41 stack=42
+GCSweepEnd dt=1680 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=13 heapalloc_value=17004168
+GCSweepBegin dt=44 stack=42
+GCSweepEnd dt=1555 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=12 heapalloc_value=17012360
+GCSweepBegin dt=46 stack=42
+GCSweepEnd dt=1914 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=16 heapalloc_value=17020552
+GCSweepBegin dt=47 stack=42
+GCSweepEnd dt=1545 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=10 heapalloc_value=17028744
+GCSweepBegin dt=37 stack=42
+GCSweepEnd dt=1763 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=9 heapalloc_value=17036936
+GCSweepBegin dt=37 stack=42
+GCSweepEnd dt=1712 swept_value=827392 reclaimed_value=0
+HeapAlloc dt=18 heapalloc_value=17045128
+GCSweepBegin dt=34 stack=42
+GCSweepEnd dt=1009 swept_value=466944 reclaimed_value=0
+HeapAlloc dt=9 heapalloc_value=17053320
+HeapAlloc dt=28 heapalloc_value=17061512
+HeapAlloc dt=25 heapalloc_value=17069704
+HeapAlloc dt=34 heapalloc_value=17077896
+HeapAlloc dt=39 heapalloc_value=17086088
+HeapAlloc dt=72 heapalloc_value=17094280
+HeapAlloc dt=32 heapalloc_value=17102472
+HeapAlloc dt=16 heapalloc_value=17110664
+HeapAlloc dt=15 heapalloc_value=17118856
+HeapAlloc dt=14 heapalloc_value=17127048
+HeapAlloc dt=16 heapalloc_value=17135240
+HeapAlloc dt=15 heapalloc_value=17143432
+HeapAlloc dt=19 heapalloc_value=17151624
+HeapAlloc dt=15 heapalloc_value=17159816
+HeapAlloc dt=54 heapalloc_value=17585800
+GoBlock dt=482 reason_string=19 stack=21
+ProcStop dt=210
+ProcStart dt=17621 p=0 p_seq=26
+ProcStop dt=24
+ProcStart dt=5194 p=1 p_seq=16
+ProcStop dt=17
+ProcStart dt=16724 p=1 p_seq=17
+GoUnblock dt=27 g=1 g_seq=59 stack=0
+GoStart dt=127 g=1 g_seq=60
+HeapAlloc dt=55 heapalloc_value=18617992
+HeapAlloc dt=64 heapalloc_value=18626184
+HeapAlloc dt=65 heapalloc_value=18634376
+HeapAlloc dt=61 heapalloc_value=18642568
+HeapAlloc dt=54 heapalloc_value=18650760
+HeapAlloc dt=66 heapalloc_value=18658952
+HeapAlloc dt=67 heapalloc_value=18667144
+HeapAlloc dt=54 heapalloc_value=18675336
+HeapAlloc dt=57 heapalloc_value=18683528
+HeapAlloc dt=45 heapalloc_value=18691720
+HeapAlloc dt=84 heapalloc_value=18699912
+HeapAlloc dt=26 heapalloc_value=18708104
+HeapAlloc dt=18 heapalloc_value=18716296
+HeapAlloc dt=15 heapalloc_value=18724488
+HeapAlloc dt=24 heapalloc_value=18732680
+HeapAlloc dt=26 heapalloc_value=18740872
+HeapAlloc dt=21 heapalloc_value=18749064
+HeapAlloc dt=15 heapalloc_value=18757256
+HeapAlloc dt=31 heapalloc_value=18765448
+HeapAlloc dt=7 heapalloc_value=18773640
+HeapAlloc dt=7 heapalloc_value=18781832
+HeapAlloc dt=113 heapalloc_value=18790024
+HeapAlloc dt=8 heapalloc_value=18798216
+HeapAlloc dt=6 heapalloc_value=18806408
+HeapAlloc dt=7 heapalloc_value=18814600
+HeapAlloc dt=6 heapalloc_value=18822792
+HeapAlloc dt=6 heapalloc_value=18830984
+HeapAlloc dt=7 heapalloc_value=18839176
+HeapAlloc dt=6 heapalloc_value=18847368
+HeapAlloc dt=6 heapalloc_value=18855560
+HeapAlloc dt=6 heapalloc_value=18863752
+HeapAlloc dt=6 heapalloc_value=18871944
+HeapAlloc dt=6 heapalloc_value=18880136
+HeapAlloc dt=6 heapalloc_value=18888328
+HeapAlloc dt=6 heapalloc_value=18896520
+HeapAlloc dt=7 heapalloc_value=18904712
+HeapAlloc dt=6 heapalloc_value=18912904
+HeapAlloc dt=38 heapalloc_value=18921096
+HeapAlloc dt=7 heapalloc_value=18929288
+HeapAlloc dt=6 heapalloc_value=18937480
+HeapAlloc dt=14 heapalloc_value=18945672
+HeapAlloc dt=6 heapalloc_value=18953864
+HeapAlloc dt=6 heapalloc_value=18962056
+HeapAlloc dt=6 heapalloc_value=18970248
+HeapAlloc dt=7 heapalloc_value=18978440
+HeapAlloc dt=6 heapalloc_value=18986632
+HeapAlloc dt=6 heapalloc_value=18994824
+HeapAlloc dt=13 heapalloc_value=19003016
+HeapAlloc dt=7 heapalloc_value=19011208
+HeapAlloc dt=6 heapalloc_value=19019400
+HeapAlloc dt=6 heapalloc_value=19027592
+HeapAlloc dt=6 heapalloc_value=19035784
+HeapAlloc dt=7 heapalloc_value=19043976
+HeapAlloc dt=6 heapalloc_value=19052168
+HeapAlloc dt=6 heapalloc_value=19060360
+HeapAlloc dt=6 heapalloc_value=19068552
+HeapAlloc dt=6 heapalloc_value=19076744
+HeapAlloc dt=7 heapalloc_value=19084936
+HeapAlloc dt=6 heapalloc_value=19093128
+HeapAlloc dt=6 heapalloc_value=19101320
+HeapAlloc dt=6 heapalloc_value=19109512
+HeapAlloc dt=7 heapalloc_value=19117704
+HeapAlloc dt=5 heapalloc_value=19125896
+HeapAlloc dt=7 heapalloc_value=19134088
+HeapAlloc dt=6 heapalloc_value=19142280
+HeapAlloc dt=6 heapalloc_value=19150472
+HeapAlloc dt=6 heapalloc_value=19158664
+HeapAlloc dt=6 heapalloc_value=19166856
+HeapAlloc dt=7 heapalloc_value=19175048
+HeapAlloc dt=6 heapalloc_value=19183240
+HeapAlloc dt=6 heapalloc_value=19191432
+HeapAlloc dt=6 heapalloc_value=19199624
+HeapAlloc dt=7 heapalloc_value=19207816
+HeapAlloc dt=6 heapalloc_value=19216008
+HeapAlloc dt=6 heapalloc_value=19224200
+HeapAlloc dt=6 heapalloc_value=19232392
+HeapAlloc dt=7 heapalloc_value=19240584
+HeapAlloc dt=6 heapalloc_value=19248776
+HeapAlloc dt=6 heapalloc_value=19256968
+HeapAlloc dt=6 heapalloc_value=19265160
+HeapAlloc dt=6 heapalloc_value=19273352
+HeapAlloc dt=6 heapalloc_value=19281544
+HeapAlloc dt=6 heapalloc_value=19289736
+HeapAlloc dt=7 heapalloc_value=19297928
+HeapAlloc dt=6 heapalloc_value=19306120
+HeapAlloc dt=62 heapalloc_value=19314312
+HeapAlloc dt=7 heapalloc_value=19322504
+HeapAlloc dt=6 heapalloc_value=19330696
+HeapAlloc dt=6 heapalloc_value=19338888
+HeapAlloc dt=35 heapalloc_value=19347080
+HeapAlloc dt=7 heapalloc_value=19355272
+HeapAlloc dt=6 heapalloc_value=19363464
+HeapAlloc dt=6 heapalloc_value=19371656
+HeapAlloc dt=6 heapalloc_value=19379848
+HeapAlloc dt=6 heapalloc_value=19388040
+HeapAlloc dt=6 heapalloc_value=19396232
+HeapAlloc dt=7 heapalloc_value=19404424
+HeapAlloc dt=6 heapalloc_value=19412616
+HeapAlloc dt=7 heapalloc_value=19420808
+HeapAlloc dt=6 heapalloc_value=19429000
+HeapAlloc dt=6 heapalloc_value=19437192
+HeapAlloc dt=6 heapalloc_value=19445384
+HeapAlloc dt=7 heapalloc_value=19453576
+HeapAlloc dt=6 heapalloc_value=19461768
+HeapAlloc dt=10 heapalloc_value=19469960
+HeapAlloc dt=6 heapalloc_value=19478152
+HeapAlloc dt=6 heapalloc_value=19486344
+HeapAlloc dt=6 heapalloc_value=19494536
+HeapAlloc dt=6 heapalloc_value=19502728
+HeapAlloc dt=7 heapalloc_value=19510920
+HeapAlloc dt=6 heapalloc_value=19519112
+HeapAlloc dt=6 heapalloc_value=19527304
+HeapAlloc dt=6 heapalloc_value=19535496
+HeapAlloc dt=6 heapalloc_value=19543688
+HeapAlloc dt=35 heapalloc_value=19551880
+HeapAlloc dt=7 heapalloc_value=19560072
+HeapAlloc dt=6 heapalloc_value=19568264
+HeapAlloc dt=6 heapalloc_value=19576456
+HeapAlloc dt=6 heapalloc_value=19584648
+HeapAlloc dt=7 heapalloc_value=19592840
+HeapAlloc dt=7 heapalloc_value=19601032
+HeapAlloc dt=6 heapalloc_value=19609224
+HeapAlloc dt=6 heapalloc_value=19617416
+HeapAlloc dt=6 heapalloc_value=19625608
+HeapAlloc dt=6 heapalloc_value=19633800
+GoBlock dt=12 reason_string=19 stack=21
+ProcStop dt=171
+ProcStart dt=17527 p=0 p_seq=28
+ProcStop dt=24
+ProcStart dt=1830 p=1 p_seq=20
+ProcStop dt=13
+ProcStart dt=16742 p=1 p_seq=21
+GoUnblock dt=20 g=1 g_seq=63 stack=0
+GoStart dt=121 g=1 g_seq=64
+HeapAlloc dt=62 heapalloc_value=20665992
+HeapAlloc dt=21 heapalloc_value=20674184
+HeapAlloc dt=25 heapalloc_value=20682376
+HeapAlloc dt=20 heapalloc_value=20690568
+HeapAlloc dt=12 heapalloc_value=20698760
+HeapAlloc dt=16 heapalloc_value=20706952
+HeapAlloc dt=15 heapalloc_value=20715144
+HeapAlloc dt=18 heapalloc_value=20723336
+HeapAlloc dt=12 heapalloc_value=20731528
+HeapAlloc dt=16 heapalloc_value=20739720
+HeapAlloc dt=12 heapalloc_value=20747912
+HeapAlloc dt=12 heapalloc_value=20756104
+HeapAlloc dt=12 heapalloc_value=20764296
+HeapAlloc dt=12 heapalloc_value=20772488
+HeapAlloc dt=9 heapalloc_value=20780680
+HeapAlloc dt=5 heapalloc_value=20788872
+HeapAlloc dt=6 heapalloc_value=20797064
+HeapAlloc dt=9 heapalloc_value=20805256
+HeapAlloc dt=5 heapalloc_value=20813448
+HeapAlloc dt=6 heapalloc_value=20821640
+HeapAlloc dt=5 heapalloc_value=20829832
+HeapAlloc dt=6 heapalloc_value=20838024
+HeapAlloc dt=15 heapalloc_value=20846216
+HeapAlloc dt=12 heapalloc_value=20854408
+HeapAlloc dt=11 heapalloc_value=20862600
+HeapAlloc dt=13 heapalloc_value=20870792
+HeapAlloc dt=5 heapalloc_value=20878984
+HeapAlloc dt=106 heapalloc_value=20887176
+HeapAlloc dt=8 heapalloc_value=20895368
+HeapAlloc dt=5 heapalloc_value=20903560
+HeapAlloc dt=6 heapalloc_value=20911752
+HeapAlloc dt=6 heapalloc_value=20919944
+HeapAlloc dt=5 heapalloc_value=20928136
+HeapAlloc dt=9 heapalloc_value=20936328
+HeapAlloc dt=6 heapalloc_value=20944520
+HeapAlloc dt=5 heapalloc_value=20952712
+HeapAlloc dt=6 heapalloc_value=20960904
+HeapAlloc dt=5 heapalloc_value=20969096
+HeapAlloc dt=6 heapalloc_value=20977288
+HeapAlloc dt=5 heapalloc_value=20985480
+HeapAlloc dt=5 heapalloc_value=20993672
+HeapAlloc dt=10 heapalloc_value=21001864
+HeapAlloc dt=6 heapalloc_value=21010056
+HeapAlloc dt=37 heapalloc_value=21018248
+HeapAlloc dt=7 heapalloc_value=21026440
+HeapAlloc dt=6 heapalloc_value=21034632
+HeapAlloc dt=34 heapalloc_value=21042824
+HeapAlloc dt=6 heapalloc_value=21051016
+HeapAlloc dt=6 heapalloc_value=21059208
+HeapAlloc dt=11 heapalloc_value=21067400
+HeapAlloc dt=6 heapalloc_value=21075592
+HeapAlloc dt=5 heapalloc_value=21083784
+HeapAlloc dt=6 heapalloc_value=21091976
+HeapAlloc dt=5 heapalloc_value=21100168
+HeapAlloc dt=9 heapalloc_value=21108360
+HeapAlloc dt=6 heapalloc_value=21116552
+HeapAlloc dt=6 heapalloc_value=21124744
+HeapAlloc dt=10 heapalloc_value=21132936
+HeapAlloc dt=5 heapalloc_value=21141128
+HeapAlloc dt=6 heapalloc_value=21149320
+HeapAlloc dt=5 heapalloc_value=21157512
+HeapAlloc dt=6 heapalloc_value=21165704
+HeapAlloc dt=5 heapalloc_value=21173896
+HeapAlloc dt=6 heapalloc_value=21182088
+HeapAlloc dt=5 heapalloc_value=21190280
+HeapAlloc dt=9 heapalloc_value=21198472
+HeapAlloc dt=6 heapalloc_value=21206664
+HeapAlloc dt=6 heapalloc_value=21214856
+HeapAlloc dt=6 heapalloc_value=21223048
+HeapAlloc dt=5 heapalloc_value=21231240
+HeapAlloc dt=6 heapalloc_value=21239432
+HeapAlloc dt=5 heapalloc_value=21247624
+HeapAlloc dt=6 heapalloc_value=21255816
+HeapAlloc dt=5 heapalloc_value=21264008
+HeapAlloc dt=6 heapalloc_value=21272200
+HeapAlloc dt=5 heapalloc_value=21280392
+HeapAlloc dt=6 heapalloc_value=21288584
+HeapAlloc dt=5 heapalloc_value=21296776
+HeapAlloc dt=6 heapalloc_value=21304968
+HeapAlloc dt=5 heapalloc_value=21313160
+HeapAlloc dt=6 heapalloc_value=21321352
+HeapAlloc dt=6 heapalloc_value=21329544
+HeapAlloc dt=6 heapalloc_value=21337736
+HeapAlloc dt=6 heapalloc_value=21345928
+HeapAlloc dt=6 heapalloc_value=21354120
+HeapAlloc dt=5 heapalloc_value=21362312
+HeapAlloc dt=6 heapalloc_value=21370504
+HeapAlloc dt=6 heapalloc_value=21378696
+HeapAlloc dt=6 heapalloc_value=21386888
+HeapAlloc dt=5 heapalloc_value=21395080
+HeapAlloc dt=6 heapalloc_value=21403272
+HeapAlloc dt=96 heapalloc_value=21411464
+HeapAlloc dt=7 heapalloc_value=21419656
+HeapAlloc dt=6 heapalloc_value=21427848
+HeapAlloc dt=21 heapalloc_value=21968520
+HeapAlloc dt=1835 heapalloc_value=21976712
+HeapAlloc dt=11 heapalloc_value=21984904
+HeapAlloc dt=8 heapalloc_value=21993096
+HeapAlloc dt=7 heapalloc_value=22001288
+HeapAlloc dt=8 heapalloc_value=22009480
+HeapAlloc dt=7 heapalloc_value=22017672
+HeapAlloc dt=8 heapalloc_value=22025864
+HeapAlloc dt=7 heapalloc_value=22034056
+HeapAlloc dt=8 heapalloc_value=22042248
+HeapAlloc dt=7 heapalloc_value=22050440
+HeapAlloc dt=7 heapalloc_value=22058632
+HeapAlloc dt=8 heapalloc_value=22066824
+HeapAlloc dt=7 heapalloc_value=22075016
+HeapAlloc dt=8 heapalloc_value=22083208
+HeapAlloc dt=7 heapalloc_value=22091400
+HeapAlloc dt=7 heapalloc_value=22099592
+HeapAlloc dt=14 heapalloc_value=22107784
+HeapAlloc dt=5 heapalloc_value=22115976
+HeapAlloc dt=6 heapalloc_value=22124168
+HeapAlloc dt=6 heapalloc_value=22132360
+HeapAlloc dt=5 heapalloc_value=22140552
+HeapAlloc dt=6 heapalloc_value=22148744
+HeapAlloc dt=5 heapalloc_value=22156936
+HeapAlloc dt=6 heapalloc_value=22165128
+HeapAlloc dt=6 heapalloc_value=22173320
+HeapAlloc dt=38 heapalloc_value=22181512
+HeapAlloc dt=7 heapalloc_value=22189704
+HeapAlloc dt=5 heapalloc_value=22197896
+HeapAlloc dt=6 heapalloc_value=22206088
+HeapAlloc dt=6 heapalloc_value=22214280
+HeapAlloc dt=5 heapalloc_value=22222472
+GoBlock dt=9 reason_string=19 stack=21
+ProcStop dt=163
+ProcStart dt=16841 p=0 p_seq=30
+ProcStop dt=23
+ProcStart dt=1498 p=1 p_seq=24
+ProcStop dt=11
+ProcStart dt=16726 p=1 p_seq=25
+GoUnblock dt=19 g=1 g_seq=67 stack=0
+GoStart dt=117 g=1 g_seq=68
+HeapAlloc dt=46 heapalloc_value=23254664
+HeapAlloc dt=19 heapalloc_value=23262856
+HeapAlloc dt=20 heapalloc_value=23271048
+HeapAlloc dt=16 heapalloc_value=23279240
+HeapAlloc dt=12 heapalloc_value=23287432
+HeapAlloc dt=12 heapalloc_value=23295624
+HeapAlloc dt=13 heapalloc_value=23303816
+HeapAlloc dt=15 heapalloc_value=23312008
+HeapAlloc dt=13 heapalloc_value=23320200
+HeapAlloc dt=13 heapalloc_value=23328392
+HeapAlloc dt=12 heapalloc_value=23336584
+HeapAlloc dt=12 heapalloc_value=23344776
+HeapAlloc dt=5 heapalloc_value=23352968
+HeapAlloc dt=100 heapalloc_value=23361160
+HeapAlloc dt=14 heapalloc_value=23369352
+HeapAlloc dt=16 heapalloc_value=23377544
+HeapAlloc dt=13 heapalloc_value=23385736
+HeapAlloc dt=5 heapalloc_value=23393928
+HeapAlloc dt=6 heapalloc_value=23402120
+HeapAlloc dt=9 heapalloc_value=23410312
+HeapAlloc dt=6 heapalloc_value=23418504
+HeapAlloc dt=6 heapalloc_value=23426696
+HeapAlloc dt=5 heapalloc_value=23434888
+HeapAlloc dt=6 heapalloc_value=23443080
+HeapAlloc dt=5 heapalloc_value=23451272
+HeapAlloc dt=6 heapalloc_value=23459464
+HeapAlloc dt=6 heapalloc_value=23467656
+HeapAlloc dt=6 heapalloc_value=23475848
+HeapAlloc dt=6 heapalloc_value=23484040
+HeapAlloc dt=5 heapalloc_value=23492232
+HeapAlloc dt=6 heapalloc_value=23500424
+HeapAlloc dt=5 heapalloc_value=23508616
+HeapAlloc dt=83 heapalloc_value=23516808
+HeapAlloc dt=8 heapalloc_value=23525000
+HeapAlloc dt=5 heapalloc_value=23533192
+HeapAlloc dt=6 heapalloc_value=23541384
+HeapAlloc dt=6 heapalloc_value=23549576
+HeapAlloc dt=5 heapalloc_value=23557768
+HeapAlloc dt=7 heapalloc_value=23565960
+HeapAlloc dt=7 heapalloc_value=23574152
+HeapAlloc dt=6 heapalloc_value=23582344
+HeapAlloc dt=5 heapalloc_value=23590536
+HeapAlloc dt=6 heapalloc_value=23598728
+HeapAlloc dt=6 heapalloc_value=23606920
+HeapAlloc dt=5 heapalloc_value=23615112
+HeapAlloc dt=6 heapalloc_value=23623304
+HeapAlloc dt=6 heapalloc_value=23631496
+HeapAlloc dt=5 heapalloc_value=23639688
+HeapAlloc dt=38 heapalloc_value=23647880
+HeapAlloc dt=8 heapalloc_value=23656072
+HeapAlloc dt=37 heapalloc_value=23664264
+HeapAlloc dt=6 heapalloc_value=23672456
+HeapAlloc dt=6 heapalloc_value=23680648
+HeapAlloc dt=6 heapalloc_value=23688840
+HeapAlloc dt=6 heapalloc_value=23697032
+HeapAlloc dt=6 heapalloc_value=23705224
+HeapAlloc dt=5 heapalloc_value=23713416
+HeapAlloc dt=6 heapalloc_value=23721608
+HeapAlloc dt=10 heapalloc_value=23729800
+HeapAlloc dt=5 heapalloc_value=23737992
+HeapAlloc dt=6 heapalloc_value=23746184
+HeapAlloc dt=6 heapalloc_value=23754376
+HeapAlloc dt=5 heapalloc_value=23762568
+HeapAlloc dt=6 heapalloc_value=23770760
+HeapAlloc dt=6 heapalloc_value=23778952
+HeapAlloc dt=5 heapalloc_value=23787144
+HeapAlloc dt=9 heapalloc_value=23795336
+HeapAlloc dt=6 heapalloc_value=23803528
+HeapAlloc dt=6 heapalloc_value=23811720
+HeapAlloc dt=5 heapalloc_value=23819912
+HeapAlloc dt=6 heapalloc_value=23828104
+HeapAlloc dt=6 heapalloc_value=23836296
+HeapAlloc dt=6 heapalloc_value=23844488
+HeapAlloc dt=5 heapalloc_value=23852680
+HeapAlloc dt=6 heapalloc_value=23860872
+HeapAlloc dt=6 heapalloc_value=23869064
+HeapAlloc dt=6 heapalloc_value=23877256
+HeapAlloc dt=6 heapalloc_value=23885448
+HeapAlloc dt=5 heapalloc_value=23893640
+HeapAlloc dt=6 heapalloc_value=23901832
+HeapAlloc dt=5 heapalloc_value=23910024
+HeapAlloc dt=6 heapalloc_value=23918216
+HeapAlloc dt=6 heapalloc_value=23926408
+HeapAlloc dt=6 heapalloc_value=23934600
+HeapAlloc dt=6 heapalloc_value=23942792
+HeapAlloc dt=5 heapalloc_value=23950984
+HeapAlloc dt=6 heapalloc_value=23959176
+HeapAlloc dt=5 heapalloc_value=23967368
+HeapAlloc dt=6 heapalloc_value=23975560
+HeapAlloc dt=7 heapalloc_value=23983752
+HeapAlloc dt=5 heapalloc_value=23991944
+HeapAlloc dt=6 heapalloc_value=24000136
+HeapAlloc dt=5 heapalloc_value=24008328
+HeapAlloc dt=6 heapalloc_value=24016520
+HeapAlloc dt=6 heapalloc_value=24024712
+HeapAlloc dt=5 heapalloc_value=24032904
+HeapAlloc dt=50 heapalloc_value=24041096
+HeapAlloc dt=7 heapalloc_value=24049288
+HeapAlloc dt=6 heapalloc_value=24057480
+HeapAlloc dt=5 heapalloc_value=24065672
+HeapAlloc dt=34 heapalloc_value=24073864
+HeapAlloc dt=7 heapalloc_value=24082056
+HeapAlloc dt=6 heapalloc_value=24090248
+HeapAlloc dt=6 heapalloc_value=24098440
+HeapAlloc dt=6 heapalloc_value=24106632
+HeapAlloc dt=5 heapalloc_value=24114824
+HeapAlloc dt=6 heapalloc_value=24123016
+HeapAlloc dt=6 heapalloc_value=24131208
+HeapAlloc dt=6 heapalloc_value=24139400
+HeapAlloc dt=6 heapalloc_value=24147592
+HeapAlloc dt=5 heapalloc_value=24155784
+HeapAlloc dt=6 heapalloc_value=24163976
+HeapAlloc dt=5 heapalloc_value=24172168
+HeapAlloc dt=6 heapalloc_value=24180360
+HeapAlloc dt=365 heapalloc_value=24188552
+HeapAlloc dt=13 heapalloc_value=24196744
+HeapAlloc dt=6 heapalloc_value=24204936
+HeapAlloc dt=6 heapalloc_value=24213128
+HeapAlloc dt=5 heapalloc_value=24221320
+HeapAlloc dt=6 heapalloc_value=24229512
+HeapAlloc dt=6 heapalloc_value=24237704
+HeapAlloc dt=6 heapalloc_value=24245896
+HeapAlloc dt=6 heapalloc_value=24254088
+HeapAlloc dt=6 heapalloc_value=24262280
+HeapAlloc dt=6 heapalloc_value=24270472
+GoBlock dt=10 reason_string=19 stack=21
+ProcStop dt=157
+ProcStart dt=12778 p=1 p_seq=27
+GoUnblock dt=12 g=1 g_seq=69 stack=0
+GoStart dt=143 g=1 g_seq=70
+HeapAlloc dt=61 heapalloc_value=24278664
+HeapAlloc dt=11 heapalloc_value=24286856
+HeapAlloc dt=5 heapalloc_value=24295048
+HeapAlloc dt=6 heapalloc_value=24303240
+HeapAlloc dt=5 heapalloc_value=24311432
+HeapAlloc dt=6 heapalloc_value=24319624
+HeapAlloc dt=6 heapalloc_value=24327816
+HeapAlloc dt=6 heapalloc_value=24336008
+HeapAlloc dt=7 heapalloc_value=24344200
+HeapAlloc dt=5 heapalloc_value=24352392
+HeapAlloc dt=7 heapalloc_value=24360584
+HeapAlloc dt=5 heapalloc_value=24368776
+HeapAlloc dt=6 heapalloc_value=24376968
+HeapAlloc dt=6 heapalloc_value=24385160
+HeapAlloc dt=5 heapalloc_value=24393352
+HeapAlloc dt=6 heapalloc_value=24401544
+HeapAlloc dt=6 heapalloc_value=24409736
+HeapAlloc dt=6 heapalloc_value=24417928
+HeapAlloc dt=6 heapalloc_value=24426120
+HeapAlloc dt=5 heapalloc_value=24434312
+HeapAlloc dt=6 heapalloc_value=24442504
+HeapAlloc dt=6 heapalloc_value=24450696
+HeapAlloc dt=6 heapalloc_value=24458888
+HeapAlloc dt=6 heapalloc_value=24467080
+HeapAlloc dt=5 heapalloc_value=24475272
+HeapAlloc dt=6 heapalloc_value=24483464
+HeapAlloc dt=5 heapalloc_value=24491656
+HeapAlloc dt=6 heapalloc_value=24499848
+HeapAlloc dt=6 heapalloc_value=24508040
+HeapAlloc dt=5 heapalloc_value=24516232
+HeapAlloc dt=6 heapalloc_value=24524424
+HeapAlloc dt=6 heapalloc_value=24532616
+HeapAlloc dt=5 heapalloc_value=24540808
+HeapAlloc dt=6 heapalloc_value=24549000
+HeapAlloc dt=5 heapalloc_value=24557192
+HeapAlloc dt=49 heapalloc_value=24565384
+HeapAlloc dt=7 heapalloc_value=24573576
+HeapAlloc dt=5 heapalloc_value=24581768
+HeapAlloc dt=6 heapalloc_value=24589960
+HeapAlloc dt=17 heapalloc_value=24598152
+HeapAlloc dt=12 heapalloc_value=24606344
+HeapAlloc dt=5 heapalloc_value=24614536
+HeapAlloc dt=6 heapalloc_value=24622728
+HeapAlloc dt=5 heapalloc_value=24630920
+HeapAlloc dt=6 heapalloc_value=24639112
+HeapAlloc dt=6 heapalloc_value=24647304
+HeapAlloc dt=5 heapalloc_value=24655496
+HeapAlloc dt=6 heapalloc_value=24663688
+HeapAlloc dt=37 heapalloc_value=24671880
+HeapAlloc dt=6 heapalloc_value=24680072
+HeapAlloc dt=6 heapalloc_value=24688264
+HeapAlloc dt=36 heapalloc_value=24696456
+HeapAlloc dt=7 heapalloc_value=24704648
+HeapAlloc dt=12 heapalloc_value=24712840
+HeapAlloc dt=6 heapalloc_value=24721032
+HeapAlloc dt=17 heapalloc_value=24729224
+HeapAlloc dt=5 heapalloc_value=24737416
+HeapAlloc dt=6 heapalloc_value=24745608
+HeapAlloc dt=19 heapalloc_value=24753800
+HeapAlloc dt=5 heapalloc_value=24761992
+HeapAlloc dt=6 heapalloc_value=24770184
+HeapAlloc dt=79 heapalloc_value=24778376
+HeapAlloc dt=7 heapalloc_value=24786568
+HeapAlloc dt=6 heapalloc_value=24794760
+HeapAlloc dt=5 heapalloc_value=24802952
+HeapAlloc dt=6 heapalloc_value=24811144
+HeapAlloc dt=6 heapalloc_value=24819336
+HeapAlloc dt=6 heapalloc_value=24827528
+HeapAlloc dt=5 heapalloc_value=24835720
+HeapAlloc dt=6 heapalloc_value=24843912
+HeapAlloc dt=6 heapalloc_value=24852104
+HeapAlloc dt=6 heapalloc_value=24860296
+HeapAlloc dt=6 heapalloc_value=24868488
+HeapAlloc dt=5 heapalloc_value=24876680
+HeapAlloc dt=6 heapalloc_value=24884872
+HeapAlloc dt=6 heapalloc_value=24893064
+HeapAlloc dt=5 heapalloc_value=24901256
+HeapAlloc dt=6 heapalloc_value=24909448
+HeapAlloc dt=6 heapalloc_value=24917640
+HeapAlloc dt=5 heapalloc_value=24925832
+HeapAlloc dt=6 heapalloc_value=24934024
+HeapAlloc dt=5 heapalloc_value=24942216
+HeapAlloc dt=6 heapalloc_value=24950408
+HeapAlloc dt=6 heapalloc_value=24958600
+HeapAlloc dt=6 heapalloc_value=24966792
+HeapAlloc dt=5 heapalloc_value=24974984
+HeapAlloc dt=6 heapalloc_value=24983176
+HeapAlloc dt=6 heapalloc_value=24991368
+HeapAlloc dt=6 heapalloc_value=24999560
+HeapAlloc dt=5 heapalloc_value=25007752
+HeapAlloc dt=6 heapalloc_value=25015944
+HeapAlloc dt=5 heapalloc_value=25024136
+HeapAlloc dt=6 heapalloc_value=25032328
+HeapAlloc dt=6 heapalloc_value=25040520
+HeapAlloc dt=6 heapalloc_value=25048712
+HeapAlloc dt=6 heapalloc_value=25056904
+HeapAlloc dt=5 heapalloc_value=25065096
+HeapAlloc dt=6 heapalloc_value=25073288
+HeapAlloc dt=6 heapalloc_value=25081480
+HeapAlloc dt=46 heapalloc_value=25089672
+HeapAlloc dt=7 heapalloc_value=25097864
+HeapAlloc dt=6 heapalloc_value=25106056
+HeapAlloc dt=5 heapalloc_value=25114248
+HeapAlloc dt=36 heapalloc_value=25122440
+HeapAlloc dt=7 heapalloc_value=25130632
+HeapAlloc dt=6 heapalloc_value=25138824
+HeapAlloc dt=6 heapalloc_value=25147016
+HeapAlloc dt=5 heapalloc_value=25155208
+HeapAlloc dt=6 heapalloc_value=25163400
+HeapAlloc dt=5 heapalloc_value=25171592
+HeapAlloc dt=6 heapalloc_value=25179784
+HeapAlloc dt=5 heapalloc_value=25187976
+HeapAlloc dt=6 heapalloc_value=25196168
+HeapAlloc dt=5 heapalloc_value=25204360
+HeapAlloc dt=6 heapalloc_value=25212552
+HeapAlloc dt=5 heapalloc_value=25220744
+HeapAlloc dt=6 heapalloc_value=25228936
+HeapAlloc dt=10 heapalloc_value=25237128
+HeapAlloc dt=5 heapalloc_value=25245320
+HeapAlloc dt=6 heapalloc_value=25253512
+HeapAlloc dt=5 heapalloc_value=25261704
+HeapAlloc dt=6 heapalloc_value=25269896
+HeapAlloc dt=6 heapalloc_value=25278088
+HeapAlloc dt=5 heapalloc_value=25286280
+HeapAlloc dt=6 heapalloc_value=25294472
+GoBlock dt=10 reason_string=19 stack=21
+ProcStop dt=14
+ProcStart dt=7152 p=1 p_seq=29
+GoStart dt=199 g=37 g_seq=1
+GoStop dt=306782 reason_string=16 stack=50
+GoStart dt=57 g=37 g_seq=2
+GoStop dt=315218 reason_string=16 stack=50
+GoStart dt=17 g=37 g_seq=3
+GoDestroy dt=159214
+ProcStop dt=60
+EventBatch gen=1 m=1709041 time=7689670150297 size=5255
+ProcStart dt=316 p=3 p_seq=1
+ProcStop dt=37
+ProcStart dt=311299 p=1 p_seq=5
+ProcStop dt=17
+ProcStart dt=16759 p=1 p_seq=6
+GoUnblock dt=47 g=1 g_seq=3 stack=0
+GoStart dt=137 g=1 g_seq=4
+HeapAlloc dt=56 heapalloc_value=2809856
+HeapAlloc dt=29 heapalloc_value=2818048
+HeapAlloc dt=19 heapalloc_value=2826240
+HeapAlloc dt=22 heapalloc_value=2834432
+HeapAlloc dt=91 heapalloc_value=2842624
+HeapAlloc dt=21 heapalloc_value=2850816
+HeapAlloc dt=24 heapalloc_value=2859008
+HeapAlloc dt=7 heapalloc_value=2867200
+HeapAlloc dt=15 heapalloc_value=2875392
+HeapAlloc dt=16 heapalloc_value=2883584
+HeapAlloc dt=12 heapalloc_value=2899968
+HeapAlloc dt=9 heapalloc_value=2908160
+HeapAlloc dt=16 heapalloc_value=2916352
+HeapAlloc dt=15 heapalloc_value=2924544
+HeapAlloc dt=12 heapalloc_value=2932736
+HeapAlloc dt=12 heapalloc_value=2940928
+HeapAlloc dt=7 heapalloc_value=2949120
+HeapAlloc dt=18 heapalloc_value=2957312
+HeapAlloc dt=14 heapalloc_value=2965504
+HeapAlloc dt=12 heapalloc_value=2973696
+HeapAlloc dt=13 heapalloc_value=2981888
+HeapAlloc dt=12 heapalloc_value=2990080
+HeapAlloc dt=11 heapalloc_value=2998272
+HeapAlloc dt=12 heapalloc_value=3006464
+HeapAlloc dt=13 heapalloc_value=3014656
+HeapAlloc dt=12 heapalloc_value=3022848
+HeapAlloc dt=11 heapalloc_value=3031040
+HeapAlloc dt=11 heapalloc_value=3039232
+HeapAlloc dt=13 heapalloc_value=3047424
+HeapAlloc dt=11 heapalloc_value=3055616
+HeapAlloc dt=20 heapalloc_value=3063808
+HeapAlloc dt=12 heapalloc_value=3072000
+HeapAlloc dt=12 heapalloc_value=3080192
+HeapAlloc dt=11 heapalloc_value=3088384
+HeapAlloc dt=12 heapalloc_value=3096576
+HeapAlloc dt=11 heapalloc_value=3104768
+HeapAlloc dt=11 heapalloc_value=3112960
+HeapAlloc dt=12 heapalloc_value=3121152
+HeapAlloc dt=11 heapalloc_value=3129344
+HeapAlloc dt=15 heapalloc_value=3137536
+HeapAlloc dt=15 heapalloc_value=3145728
+HeapAlloc dt=18 heapalloc_value=3153920
+HeapAlloc dt=13 heapalloc_value=3162112
+HeapAlloc dt=12 heapalloc_value=3170304
+HeapAlloc dt=16 heapalloc_value=3178496
+HeapAlloc dt=11 heapalloc_value=3186688
+HeapAlloc dt=12 heapalloc_value=3194880
+HeapAlloc dt=11 heapalloc_value=3203072
+HeapAlloc dt=13 heapalloc_value=3211264
+HeapAlloc dt=12 heapalloc_value=3219456
+HeapAlloc dt=11 heapalloc_value=3227648
+HeapAlloc dt=13 heapalloc_value=3244032
+HeapAlloc dt=734 heapalloc_value=3252224
+HeapAlloc dt=16 heapalloc_value=3260416
+HeapAlloc dt=8 heapalloc_value=3268608
+HeapAlloc dt=5 heapalloc_value=3276800
+HeapAlloc dt=8 heapalloc_value=3284992
+HeapAlloc dt=88 heapalloc_value=3293184
+HeapAlloc dt=7 heapalloc_value=3301376
+HeapAlloc dt=5 heapalloc_value=3309568
+HeapAlloc dt=6 heapalloc_value=3317760
+HeapAlloc dt=5 heapalloc_value=3325952
+HeapAlloc dt=5 heapalloc_value=3334144
+HeapAlloc dt=5 heapalloc_value=3342336
+HeapAlloc dt=5 heapalloc_value=3350528
+HeapAlloc dt=6 heapalloc_value=3358720
+HeapAlloc dt=5 heapalloc_value=3366912
+HeapAlloc dt=5 heapalloc_value=3375104
+HeapAlloc dt=7 heapalloc_value=3383296
+HeapAlloc dt=6 heapalloc_value=3391488
+HeapAlloc dt=5 heapalloc_value=3399680
+HeapAlloc dt=5 heapalloc_value=3407872
+HeapAlloc dt=5 heapalloc_value=3416064
+HeapAlloc dt=6 heapalloc_value=3424256
+HeapAlloc dt=5 heapalloc_value=3432448
+HeapAlloc dt=5 heapalloc_value=3440640
+HeapAlloc dt=5 heapalloc_value=3448832
+HeapAlloc dt=6 heapalloc_value=3457024
+HeapAlloc dt=5 heapalloc_value=3465216
+HeapAlloc dt=38 heapalloc_value=3473408
+HeapAlloc dt=6 heapalloc_value=3481600
+HeapAlloc dt=5 heapalloc_value=3489792
+HeapAlloc dt=6 heapalloc_value=3497984
+HeapAlloc dt=5 heapalloc_value=3506176
+HeapAlloc dt=6 heapalloc_value=3514368
+HeapAlloc dt=5 heapalloc_value=3522560
+HeapAlloc dt=5 heapalloc_value=3530752
+HeapAlloc dt=5 heapalloc_value=3538944
+HeapAlloc dt=5 heapalloc_value=3547136
+HeapAlloc dt=6 heapalloc_value=3555328
+HeapAlloc dt=5 heapalloc_value=3563520
+HeapAlloc dt=5 heapalloc_value=3571712
+HeapAlloc dt=5 heapalloc_value=3579904
+HeapAlloc dt=5 heapalloc_value=3588096
+HeapAlloc dt=6 heapalloc_value=3596288
+HeapAlloc dt=10 heapalloc_value=3678208
+HeapAlloc dt=2433 heapalloc_value=3686400
+HeapAlloc dt=6 heapalloc_value=3694592
+HeapAlloc dt=6 heapalloc_value=3702784
+HeapAlloc dt=6 heapalloc_value=3710976
+HeapAlloc dt=5 heapalloc_value=3719168
+HeapAlloc dt=6 heapalloc_value=3727360
+HeapAlloc dt=5 heapalloc_value=3735552
+HeapAlloc dt=5 heapalloc_value=3743744
+HeapAlloc dt=5 heapalloc_value=3751936
+HeapAlloc dt=6 heapalloc_value=3760128
+HeapAlloc dt=5 heapalloc_value=3768320
+HeapAlloc dt=11 heapalloc_value=3776512
+HeapAlloc dt=31 heapalloc_value=3784704
+HeapAlloc dt=7 heapalloc_value=3792896
+HeapAlloc dt=6 heapalloc_value=3801088
+HeapAlloc dt=5 heapalloc_value=3809280
+HeapAlloc dt=6 heapalloc_value=3817472
+HeapAlloc dt=5 heapalloc_value=3825664
+HeapAlloc dt=5 heapalloc_value=3833856
+HeapAlloc dt=6 heapalloc_value=3842048
+HeapAlloc dt=5 heapalloc_value=3850240
+HeapAlloc dt=5 heapalloc_value=3858432
+HeapAlloc dt=6 heapalloc_value=3866624
+HeapAlloc dt=5 heapalloc_value=3874816
+HeapAlloc dt=5 heapalloc_value=3883008
+HeapAlloc dt=78 heapalloc_value=3891200
+HeapAlloc dt=7 heapalloc_value=3899392
+HeapAlloc dt=6 heapalloc_value=3907584
+HeapAlloc dt=5 heapalloc_value=3915776
+HeapAlloc dt=5 heapalloc_value=3923968
+HeapAlloc dt=5 heapalloc_value=3932160
+HeapAlloc dt=6 heapalloc_value=3940352
+HeapAlloc dt=5 heapalloc_value=3948544
+HeapAlloc dt=5 heapalloc_value=3956736
+HeapAlloc dt=5 heapalloc_value=3964928
+HeapAlloc dt=5 heapalloc_value=3973120
+HeapAlloc dt=6 heapalloc_value=3981312
+HeapAlloc dt=5 heapalloc_value=3989504
+HeapAlloc dt=6 heapalloc_value=3997696
+GCBegin dt=38 gc_seq=1 stack=22
+HeapAlloc dt=42 heapalloc_value=4005888
+HeapAlloc dt=14 heapalloc_value=4014080
+GoCreate dt=73 new_g=18 new_stack=23 stack=24
+GoBlock dt=235 reason_string=12 stack=25
+GoStart dt=11 g=18 g_seq=1
+HeapAlloc dt=16 heapalloc_value=4022272
+GoUnblock dt=15 g=1 g_seq=5 stack=26
+GoBlock dt=9 reason_string=15 stack=27
+GoStart dt=12 g=1 g_seq=6
+GoCreate dt=44 new_g=19 new_stack=23 stack=24
+GoBlock dt=4 reason_string=12 stack=25
+GoStart dt=3 g=19 g_seq=1
+GoUnblock dt=5 g=1 g_seq=7 stack=26
+GoBlock dt=2 reason_string=15 stack=27
+GoStart dt=2 g=1 g_seq=8
+GoCreate dt=8 new_g=20 new_stack=23 stack=24
+GoBlock dt=3 reason_string=12 stack=25
+GoStart dt=2 g=20 g_seq=1
+GoUnblock dt=3 g=1 g_seq=9 stack=26
+GoBlock dt=1 reason_string=15 stack=27
+GoStart dt=2 g=1 g_seq=10
+GoCreate dt=6 new_g=21 new_stack=23 stack=24
+GoBlock dt=3 reason_string=12 stack=25
+GoStart dt=1 g=21 g_seq=1
+GoUnblock dt=6 g=1 g_seq=11 stack=26
+GoBlock dt=1 reason_string=15 stack=27
+GoStart dt=8 g=1 g_seq=12
+GoCreate dt=7 new_g=22 new_stack=23 stack=24
+GoBlock dt=2 reason_string=12 stack=25
+GoStart dt=2 g=22 g_seq=1
+GoUnblock dt=2 g=1 g_seq=13 stack=26
+GoBlock dt=6 reason_string=15 stack=27
+GoStart dt=4 g=1 g_seq=14
+GoCreate dt=15 new_g=23 new_stack=23 stack=24
+GoBlock dt=166 reason_string=12 stack=25
+GoStart dt=4 g=23 g_seq=1
+GoUnblock dt=3 g=1 g_seq=15 stack=26
+GoBlock dt=3 reason_string=15 stack=27
+GoStart dt=3 g=1 g_seq=16
+HeapAlloc dt=18 heapalloc_value=4030464
+GoCreate dt=11 new_g=24 new_stack=23 stack=24
+GoBlock dt=3 reason_string=12 stack=25
+GoStart dt=1 g=24 g_seq=1
+GoUnblock dt=3 g=1 g_seq=17 stack=26
+GoBlock dt=2 reason_string=15 stack=27
+GoStart dt=1 g=1 g_seq=18
+GoCreate dt=6 new_g=25 new_stack=23 stack=24
+GoBlock dt=3 reason_string=12 stack=25
+GoStart dt=1 g=25 g_seq=1
+GoUnblock dt=2 g=1 g_seq=19 stack=26
+GoBlock dt=2 reason_string=15 stack=27
+GoStart dt=1 g=1 g_seq=20
+STWBegin dt=118 kind_string=22 stack=28
+GoStatus dt=1398 g=4 m=18446744073709551615 gstatus=4
+GoUnblock dt=83 g=4 g_seq=1 stack=29
+ProcsChange dt=91 procs_value=8 stack=30
+STWEnd dt=31
+GCMarkAssistBegin dt=149 stack=31
+GCMarkAssistEnd dt=1458
+GoBlock dt=23 reason_string=19 stack=21
+GoStart dt=166 g=4 g_seq=2
+GoBlock dt=22 reason_string=15 stack=32
+GoUnblock dt=35 g=23 g_seq=2 stack=0
+GoStart dt=4 g=23 g_seq=3
+GoLabel dt=1 label_string=4
+GoBlock dt=441 reason_string=15 stack=27
+ProcStop dt=23
+ProcStart dt=16781 p=0 p_seq=6
+GoUnblock dt=28 g=1 g_seq=27 stack=0
+GoStart dt=162 g=1 g_seq=28
+HeapAlloc dt=69 heapalloc_value=4663024
+HeapAlloc dt=23 heapalloc_value=4671216
+HeapAlloc dt=15 heapalloc_value=4679408
+HeapAlloc dt=10 heapalloc_value=4687600
+HeapAlloc dt=12 heapalloc_value=4695792
+HeapAlloc dt=8 heapalloc_value=4703984
+HeapAlloc dt=6 heapalloc_value=4712176
+HeapAlloc dt=12 heapalloc_value=4720368
+HeapAlloc dt=12 heapalloc_value=4728560
+HeapAlloc dt=12 heapalloc_value=4736752
+HeapAlloc dt=15 heapalloc_value=4744944
+HeapAlloc dt=9 heapalloc_value=4753136
+HeapAlloc dt=9 heapalloc_value=4761328
+HeapAlloc dt=7 heapalloc_value=4769520
+HeapAlloc dt=8 heapalloc_value=4777712
+HeapAlloc dt=9 heapalloc_value=4785904
+HeapAlloc dt=112 heapalloc_value=4794096
+HeapAlloc dt=7 heapalloc_value=4802288
+HeapAlloc dt=9 heapalloc_value=4810480
+HeapAlloc dt=13 heapalloc_value=4818672
+HeapAlloc dt=14 heapalloc_value=4826864
+HeapAlloc dt=6 heapalloc_value=4835056
+HeapAlloc dt=5 heapalloc_value=4843248
+HeapAlloc dt=6 heapalloc_value=4851440
+HeapAlloc dt=14 heapalloc_value=4859632
+HeapAlloc dt=10 heapalloc_value=4867824
+HeapAlloc dt=10 heapalloc_value=4876016
+HeapAlloc dt=6 heapalloc_value=4884208
+HeapAlloc dt=9 heapalloc_value=4892400
+HeapAlloc dt=72 heapalloc_value=4900592
+HeapAlloc dt=6 heapalloc_value=4908784
+HeapAlloc dt=5 heapalloc_value=4916976
+HeapAlloc dt=6 heapalloc_value=4925168
+HeapAlloc dt=6 heapalloc_value=4933360
+HeapAlloc dt=9 heapalloc_value=4941552
+HeapAlloc dt=46 heapalloc_value=4949744
+HeapAlloc dt=10 heapalloc_value=4957936
+HeapAlloc dt=6 heapalloc_value=4966128
+HeapAlloc dt=6 heapalloc_value=4974320
+HeapAlloc dt=6 heapalloc_value=4982512
+HeapAlloc dt=5 heapalloc_value=4990704
+HeapAlloc dt=6 heapalloc_value=4998896
+HeapAlloc dt=45 heapalloc_value=5007088
+HeapAlloc dt=6 heapalloc_value=5015280
+HeapAlloc dt=9 heapalloc_value=5023472
+HeapAlloc dt=6 heapalloc_value=5031664
+HeapAlloc dt=5 heapalloc_value=5039856
+HeapAlloc dt=6 heapalloc_value=5048048
+HeapAlloc dt=6 heapalloc_value=5056240
+HeapAlloc dt=15 heapalloc_value=5138160
+HeapAlloc dt=81 heapalloc_value=5146352
+HeapAlloc dt=6 heapalloc_value=5154544
+HeapAlloc dt=6 heapalloc_value=5162736
+HeapAlloc dt=5 heapalloc_value=5170928
+HeapAlloc dt=6 heapalloc_value=5179120
+HeapAlloc dt=5 heapalloc_value=5187312
+HeapAlloc dt=6 heapalloc_value=5195504
+HeapAlloc dt=7 heapalloc_value=5203696
+HeapAlloc dt=5 heapalloc_value=5211888
+HeapAlloc dt=6 heapalloc_value=5220080
+HeapAlloc dt=6 heapalloc_value=5228272
+HeapAlloc dt=37 heapalloc_value=5236464
+HeapAlloc dt=7 heapalloc_value=5244656
+HeapAlloc dt=6 heapalloc_value=5252848
+HeapAlloc dt=5 heapalloc_value=5261040
+HeapAlloc dt=8 heapalloc_value=5269232
+HeapAlloc dt=6 heapalloc_value=5277424
+HeapAlloc dt=6 heapalloc_value=5285616
+HeapAlloc dt=5 heapalloc_value=5293808
+HeapAlloc dt=7 heapalloc_value=5302000
+HeapAlloc dt=5 heapalloc_value=5310192
+HeapAlloc dt=5 heapalloc_value=5318384
+HeapAlloc dt=6 heapalloc_value=5326576
+HeapAlloc dt=7 heapalloc_value=5334768
+HeapAlloc dt=6 heapalloc_value=5342960
+HeapAlloc dt=5 heapalloc_value=5351152
+HeapAlloc dt=6 heapalloc_value=5359344
+HeapAlloc dt=5 heapalloc_value=5367536
+HeapAlloc dt=13 heapalloc_value=5375728
+HeapAlloc dt=6 heapalloc_value=5383920
+HeapAlloc dt=100 heapalloc_value=5392112
+HeapAlloc dt=8 heapalloc_value=5400304
+HeapAlloc dt=6 heapalloc_value=5408496
+HeapAlloc dt=6 heapalloc_value=5416688
+HeapAlloc dt=5 heapalloc_value=5424880
+HeapAlloc dt=6 heapalloc_value=5433072
+HeapAlloc dt=33 heapalloc_value=5441264
+HeapAlloc dt=7 heapalloc_value=5449456
+HeapAlloc dt=5 heapalloc_value=5457648
+HeapAlloc dt=8 heapalloc_value=5465840
+HeapAlloc dt=6 heapalloc_value=5474032
+HeapAlloc dt=5 heapalloc_value=5482224
+HeapAlloc dt=6 heapalloc_value=5490416
+HeapAlloc dt=5 heapalloc_value=5498608
+HeapAlloc dt=6 heapalloc_value=5506800
+HeapAlloc dt=6 heapalloc_value=5514992
+HeapAlloc dt=5 heapalloc_value=5523184
+HeapAlloc dt=12 heapalloc_value=5531376
+HeapAlloc dt=6 heapalloc_value=5539568
+HeapAlloc dt=6 heapalloc_value=5547760
+HeapAlloc dt=5 heapalloc_value=5555952
+HeapAlloc dt=6 heapalloc_value=5564144
+HeapAlloc dt=5 heapalloc_value=5572336
+HeapAlloc dt=6 heapalloc_value=5580528
+HeapAlloc dt=5 heapalloc_value=5588720
+HeapAlloc dt=7 heapalloc_value=5596912
+HeapAlloc dt=6 heapalloc_value=5605104
+HeapAlloc dt=5 heapalloc_value=5613296
+HeapAlloc dt=6 heapalloc_value=5621488
+HeapAlloc dt=5 heapalloc_value=5629680
+HeapAlloc dt=6 heapalloc_value=5637872
+HeapAlloc dt=6 heapalloc_value=5646064
+HeapAlloc dt=37 heapalloc_value=5654256
+HeapAlloc dt=7 heapalloc_value=5662448
+HeapAlloc dt=6 heapalloc_value=5670640
+HeapAlloc dt=5 heapalloc_value=5678832
+HeapAlloc dt=6 heapalloc_value=5687024
+HeapAlloc dt=5 heapalloc_value=5695216
+HeapAlloc dt=6 heapalloc_value=5703408
+HeapAlloc dt=6 heapalloc_value=5711600
+HeapAlloc dt=5 heapalloc_value=5719792
+HeapAlloc dt=5 heapalloc_value=5727984
+HeapAlloc dt=6 heapalloc_value=5736176
+HeapAlloc dt=6 heapalloc_value=5744368
+HeapAlloc dt=5 heapalloc_value=5752560
+HeapAlloc dt=5 heapalloc_value=5760752
+GoBlock dt=15 reason_string=19 stack=21
+ProcStop dt=178
+ProcStart dt=17613 p=4 p_seq=3
+ProcStop dt=26
+ProcStart dt=3944 p=0 p_seq=9
+ProcStop dt=12
+ProcStart dt=16762 p=4 p_seq=6
+ProcStop dt=14
+ProcStart dt=9275 p=0 p_seq=12
+ProcStop dt=9
+ProcStart dt=16732 p=0 p_seq=13
+GoUnblock dt=9 g=1 g_seq=38 stack=0
+GoStart dt=84 g=1 g_seq=39
+HeapAlloc dt=23 heapalloc_value=9631048
+HeapAlloc dt=24 heapalloc_value=9639240
+HeapAlloc dt=15 heapalloc_value=9647432
+HeapAlloc dt=15 heapalloc_value=9655624
+HeapAlloc dt=15 heapalloc_value=9663816
+HeapAlloc dt=16 heapalloc_value=9672008
+HeapAlloc dt=14 heapalloc_value=9680200
+HeapAlloc dt=18 heapalloc_value=9688392
+HeapAlloc dt=14 heapalloc_value=9696584
+HeapAlloc dt=19 heapalloc_value=9704776
+HeapAlloc dt=15 heapalloc_value=9712968
+HeapAlloc dt=76 heapalloc_value=9721160
+HeapAlloc dt=18 heapalloc_value=9729352
+HeapAlloc dt=17 heapalloc_value=9737544
+HeapAlloc dt=14 heapalloc_value=9745736
+HeapAlloc dt=15 heapalloc_value=9753928
+HeapAlloc dt=16 heapalloc_value=9762120
+HeapAlloc dt=28 heapalloc_value=9770312
+HeapAlloc dt=23 heapalloc_value=9778504
+HeapAlloc dt=19 heapalloc_value=9786696
+HeapAlloc dt=14 heapalloc_value=9794888
+HeapAlloc dt=26 heapalloc_value=9803080
+HeapAlloc dt=18 heapalloc_value=9811272
+HeapAlloc dt=16 heapalloc_value=9819464
+HeapAlloc dt=15 heapalloc_value=9827656
+HeapAlloc dt=19 heapalloc_value=9835848
+HeapAlloc dt=16 heapalloc_value=9844040
+HeapAlloc dt=15 heapalloc_value=9852232
+HeapAlloc dt=15 heapalloc_value=9860424
+HeapAlloc dt=15 heapalloc_value=9868616
+HeapAlloc dt=15 heapalloc_value=9876808
+HeapAlloc dt=15 heapalloc_value=9885000
+HeapAlloc dt=15 heapalloc_value=9893192
+HeapAlloc dt=15 heapalloc_value=9901384
+HeapAlloc dt=16 heapalloc_value=9909576
+HeapAlloc dt=16 heapalloc_value=9917768
+HeapAlloc dt=15 heapalloc_value=9925960
+HeapAlloc dt=15 heapalloc_value=9934152
+HeapAlloc dt=15 heapalloc_value=9942344
+HeapAlloc dt=15 heapalloc_value=9950536
+HeapAlloc dt=16 heapalloc_value=9958728
+HeapAlloc dt=15 heapalloc_value=9966920
+HeapAlloc dt=63 heapalloc_value=9975112
+HeapAlloc dt=20 heapalloc_value=9983304
+HeapAlloc dt=14 heapalloc_value=9991496
+HeapAlloc dt=15 heapalloc_value=9999688
+HeapAlloc dt=14 heapalloc_value=10007880
+HeapAlloc dt=15 heapalloc_value=10016072
+HeapAlloc dt=16 heapalloc_value=10024264
+HeapAlloc dt=16 heapalloc_value=10032456
+HeapAlloc dt=16 heapalloc_value=10040648
+HeapAlloc dt=16 heapalloc_value=10048840
+HeapAlloc dt=15 heapalloc_value=10057032
+HeapAlloc dt=16 heapalloc_value=10065224
+HeapAlloc dt=14 heapalloc_value=10073416
+HeapAlloc dt=16 heapalloc_value=10081608
+HeapAlloc dt=15 heapalloc_value=10089800
+HeapAlloc dt=16 heapalloc_value=10097992
+HeapAlloc dt=16 heapalloc_value=10106184
+HeapAlloc dt=17 heapalloc_value=10114376
+HeapAlloc dt=15 heapalloc_value=10122568
+HeapAlloc dt=33 heapalloc_value=10327368
+HeapAlloc dt=367 heapalloc_value=10335560
+HeapAlloc dt=21 heapalloc_value=10343752
+HeapAlloc dt=16 heapalloc_value=10351944
+HeapAlloc dt=15 heapalloc_value=10360136
+HeapAlloc dt=16 heapalloc_value=10368328
+HeapAlloc dt=16 heapalloc_value=10376520
+HeapAlloc dt=16 heapalloc_value=10384712
+HeapAlloc dt=15 heapalloc_value=10392904
+HeapAlloc dt=15 heapalloc_value=10401096
+HeapAlloc dt=15 heapalloc_value=10409288
+HeapAlloc dt=15 heapalloc_value=10417480
+HeapAlloc dt=15 heapalloc_value=10425672
+HeapAlloc dt=17 heapalloc_value=10433864
+HeapAlloc dt=15 heapalloc_value=10442056
+HeapAlloc dt=15 heapalloc_value=10450248
+HeapAlloc dt=15 heapalloc_value=10458440
+HeapAlloc dt=15 heapalloc_value=10466632
+HeapAlloc dt=15 heapalloc_value=10474824
+HeapAlloc dt=15 heapalloc_value=10483016
+HeapAlloc dt=14 heapalloc_value=10491208
+HeapAlloc dt=22 heapalloc_value=10499400
+HeapAlloc dt=7 heapalloc_value=10507592
+HeapAlloc dt=9 heapalloc_value=10515784
+HeapAlloc dt=7 heapalloc_value=10523976
+HeapAlloc dt=6 heapalloc_value=10532168
+HeapAlloc dt=5 heapalloc_value=10540360
+HeapAlloc dt=6 heapalloc_value=10548552
+HeapAlloc dt=6 heapalloc_value=10556744
+HeapAlloc dt=5 heapalloc_value=10564936
+HeapAlloc dt=6 heapalloc_value=10573128
+HeapAlloc dt=6 heapalloc_value=10581320
+HeapAlloc dt=5 heapalloc_value=10589512
+HeapAlloc dt=6 heapalloc_value=10597704
+HeapAlloc dt=6 heapalloc_value=10605896
+HeapAlloc dt=5 heapalloc_value=10614088
+HeapAlloc dt=6 heapalloc_value=10622280
+HeapAlloc dt=5 heapalloc_value=10630472
+HeapAlloc dt=6 heapalloc_value=10638664
+HeapAlloc dt=6 heapalloc_value=10646856
+HeapAlloc dt=5 heapalloc_value=10655048
+HeapAlloc dt=6 heapalloc_value=10663240
+HeapAlloc dt=5 heapalloc_value=10671432
+HeapAlloc dt=6 heapalloc_value=10679624
+HeapAlloc dt=5 heapalloc_value=10687816
+HeapAlloc dt=221 heapalloc_value=10696008
+HeapAlloc dt=9 heapalloc_value=10704200
+HeapAlloc dt=6 heapalloc_value=10712392
+HeapAlloc dt=5 heapalloc_value=10720584
+HeapAlloc dt=6 heapalloc_value=10728776
+HeapAlloc dt=6 heapalloc_value=10736968
+HeapAlloc dt=5 heapalloc_value=10745160
+HeapAlloc dt=6 heapalloc_value=10753352
+HeapAlloc dt=5 heapalloc_value=10761544
+HeapAlloc dt=6 heapalloc_value=10769736
+HeapAlloc dt=5 heapalloc_value=10777928
+HeapAlloc dt=5 heapalloc_value=10786120
+HeapAlloc dt=6 heapalloc_value=10794312
+HeapAlloc dt=6 heapalloc_value=10802504
+HeapAlloc dt=5 heapalloc_value=10810696
+HeapAlloc dt=6 heapalloc_value=10818888
+HeapAlloc dt=5 heapalloc_value=10827080
+HeapAlloc dt=6 heapalloc_value=10835272
+HeapAlloc dt=5 heapalloc_value=10843464
+HeapAlloc dt=6 heapalloc_value=10851656
+GoBlock dt=11 reason_string=19 stack=21
+ProcStop dt=119
+ProcStart dt=17350 p=2 p_seq=7
+ProcStop dt=13
+ProcStart dt=1133 p=0 p_seq=16
+ProcStop dt=8
+ProcStart dt=16748 p=0 p_seq=17
+GoUnblock dt=7 g=1 g_seq=42 stack=0
+GoStart dt=84 g=1 g_seq=43
+HeapAlloc dt=15 heapalloc_value=11883848
+HeapAlloc dt=10 heapalloc_value=11892040
+HeapAlloc dt=6 heapalloc_value=11900232
+HeapAlloc dt=6 heapalloc_value=11908424
+HeapAlloc dt=6 heapalloc_value=11916616
+HeapAlloc dt=6 heapalloc_value=11924808
+HeapAlloc dt=8 heapalloc_value=11933000
+HeapAlloc dt=5 heapalloc_value=11941192
+HeapAlloc dt=6 heapalloc_value=11949384
+HeapAlloc dt=62 heapalloc_value=11957576
+HeapAlloc dt=7 heapalloc_value=11965768
+HeapAlloc dt=6 heapalloc_value=11973960
+HeapAlloc dt=6 heapalloc_value=11982152
+HeapAlloc dt=5 heapalloc_value=11990344
+HeapAlloc dt=6 heapalloc_value=11998536
+HeapAlloc dt=6 heapalloc_value=12006728
+HeapAlloc dt=5 heapalloc_value=12014920
+HeapAlloc dt=6 heapalloc_value=12023112
+HeapAlloc dt=5 heapalloc_value=12031304
+HeapAlloc dt=6 heapalloc_value=12039496
+HeapAlloc dt=5 heapalloc_value=12047688
+HeapAlloc dt=6 heapalloc_value=12055880
+HeapAlloc dt=6 heapalloc_value=12064072
+HeapAlloc dt=6 heapalloc_value=12072264
+HeapAlloc dt=5 heapalloc_value=12080456
+HeapAlloc dt=352 heapalloc_value=12088648
+HeapAlloc dt=14 heapalloc_value=12096840
+HeapAlloc dt=7 heapalloc_value=12105032
+HeapAlloc dt=5 heapalloc_value=12113224
+HeapAlloc dt=6 heapalloc_value=12121416
+HeapAlloc dt=41 heapalloc_value=12129608
+HeapAlloc dt=7 heapalloc_value=12137800
+HeapAlloc dt=5 heapalloc_value=12145992
+HeapAlloc dt=6 heapalloc_value=12154184
+HeapAlloc dt=6 heapalloc_value=12162376
+HeapAlloc dt=6 heapalloc_value=12170568
+HeapAlloc dt=5 heapalloc_value=12178760
+HeapAlloc dt=6 heapalloc_value=12186952
+HeapAlloc dt=5 heapalloc_value=12195144
+HeapAlloc dt=7 heapalloc_value=12203336
+HeapAlloc dt=5 heapalloc_value=12211528
+HeapAlloc dt=6 heapalloc_value=12219720
+HeapAlloc dt=5 heapalloc_value=12227912
+HeapAlloc dt=6 heapalloc_value=12236104
+HeapAlloc dt=6 heapalloc_value=12244296
+HeapAlloc dt=6 heapalloc_value=12252488
+HeapAlloc dt=5 heapalloc_value=12260680
+HeapAlloc dt=46 heapalloc_value=12268872
+HeapAlloc dt=6 heapalloc_value=12277064
+HeapAlloc dt=6 heapalloc_value=12285256
+HeapAlloc dt=6 heapalloc_value=12293448
+HeapAlloc dt=5 heapalloc_value=12301640
+HeapAlloc dt=6 heapalloc_value=12309832
+HeapAlloc dt=5 heapalloc_value=12318024
+HeapAlloc dt=6 heapalloc_value=12326216
+HeapAlloc dt=5 heapalloc_value=12334408
+HeapAlloc dt=6 heapalloc_value=12342600
+HeapAlloc dt=5 heapalloc_value=12350792
+HeapAlloc dt=6 heapalloc_value=12358984
+HeapAlloc dt=5 heapalloc_value=12367176
+HeapAlloc dt=6 heapalloc_value=12375368
+HeapAlloc dt=37 heapalloc_value=12383560
+HeapAlloc dt=7 heapalloc_value=12391752
+HeapAlloc dt=6 heapalloc_value=12399944
+HeapAlloc dt=5 heapalloc_value=12408136
+HeapAlloc dt=6 heapalloc_value=12416328
+HeapAlloc dt=6 heapalloc_value=12424520
+HeapAlloc dt=13 heapalloc_value=12686664
+HeapAlloc dt=2516 heapalloc_value=12694856
+HeapAlloc dt=9 heapalloc_value=12703048
+HeapAlloc dt=8 heapalloc_value=12711240
+HeapAlloc dt=7 heapalloc_value=12719432
+HeapAlloc dt=8 heapalloc_value=12727624
+HeapAlloc dt=7 heapalloc_value=12735816
+HeapAlloc dt=8 heapalloc_value=12744008
+HeapAlloc dt=7 heapalloc_value=12752200
+HeapAlloc dt=8 heapalloc_value=12760392
+HeapAlloc dt=7 heapalloc_value=12768584
+HeapAlloc dt=7 heapalloc_value=12776776
+HeapAlloc dt=8 heapalloc_value=12784968
+HeapAlloc dt=7 heapalloc_value=12793160
+HeapAlloc dt=8 heapalloc_value=12801352
+HeapAlloc dt=8 heapalloc_value=12809544
+HeapAlloc dt=7 heapalloc_value=12817736
+HeapAlloc dt=7 heapalloc_value=12825928
+HeapAlloc dt=8 heapalloc_value=12834120
+HeapAlloc dt=7 heapalloc_value=12842312
+HeapAlloc dt=8 heapalloc_value=12850504
+HeapAlloc dt=8 heapalloc_value=12858696
+HeapAlloc dt=7 heapalloc_value=12866888
+HeapAlloc dt=13 heapalloc_value=12875080
+HeapAlloc dt=8 heapalloc_value=12883272
+HeapAlloc dt=5 heapalloc_value=12891464
+HeapAlloc dt=6 heapalloc_value=12899656
+HeapAlloc dt=6 heapalloc_value=12907848
+HeapAlloc dt=5 heapalloc_value=12916040
+HeapAlloc dt=6 heapalloc_value=12924232
+HeapAlloc dt=6 heapalloc_value=12932424
+HeapAlloc dt=5 heapalloc_value=12940616
+HeapAlloc dt=6 heapalloc_value=12948808
+HeapAlloc dt=5 heapalloc_value=12957000
+HeapAlloc dt=6 heapalloc_value=12965192
+HeapAlloc dt=5 heapalloc_value=12973384
+HeapAlloc dt=6 heapalloc_value=12981576
+HeapAlloc dt=6 heapalloc_value=12989768
+HeapAlloc dt=5 heapalloc_value=12997960
+HeapAlloc dt=6 heapalloc_value=13006152
+HeapAlloc dt=6 heapalloc_value=13014344
+HeapAlloc dt=5 heapalloc_value=13022536
+HeapAlloc dt=6 heapalloc_value=13030728
+HeapAlloc dt=5 heapalloc_value=13038920
+HeapAlloc dt=62 heapalloc_value=13047112
+HeapAlloc dt=39 heapalloc_value=13055304
+HeapAlloc dt=7 heapalloc_value=13063496
+HeapAlloc dt=6 heapalloc_value=13071688
+HeapAlloc dt=6 heapalloc_value=13079880
+HeapAlloc dt=6 heapalloc_value=13088072
+HeapAlloc dt=5 heapalloc_value=13096264
+HeapAlloc dt=5 heapalloc_value=13104456
+HeapAlloc dt=6 heapalloc_value=13112648
+HeapAlloc dt=6 heapalloc_value=13120840
+HeapAlloc dt=5 heapalloc_value=13129032
+HeapAlloc dt=10 heapalloc_value=13137224
+HeapAlloc dt=6 heapalloc_value=13145416
+HeapAlloc dt=5 heapalloc_value=13153608
+HeapAlloc dt=6 heapalloc_value=13161800
+GoBlock dt=12 reason_string=19 stack=21
+ProcStop dt=124
+ProcStart dt=17212 p=2 p_seq=9
+ProcStop dt=13
+ProcStart dt=1068 p=0 p_seq=20
+ProcStop dt=8
+ProcStart dt=16756 p=0 p_seq=21
+GoUnblock dt=11 g=1 g_seq=46 stack=0
+GoStart dt=92 g=1 g_seq=47
+HeapAlloc dt=19 heapalloc_value=14193992
+HeapAlloc dt=10 heapalloc_value=14202184
+HeapAlloc dt=6 heapalloc_value=14210376
+HeapAlloc dt=6 heapalloc_value=14218568
+HeapAlloc dt=6 heapalloc_value=14226760
+HeapAlloc dt=6 heapalloc_value=14234952
+HeapAlloc dt=6 heapalloc_value=14243144
+HeapAlloc dt=6 heapalloc_value=14251336
+HeapAlloc dt=6 heapalloc_value=14259528
+HeapAlloc dt=6 heapalloc_value=14267720
+HeapAlloc dt=5 heapalloc_value=14275912
+HeapAlloc dt=6 heapalloc_value=14284104
+HeapAlloc dt=6 heapalloc_value=14292296
+HeapAlloc dt=6 heapalloc_value=14300488
+HeapAlloc dt=60 heapalloc_value=14308680
+HeapAlloc dt=8 heapalloc_value=14316872
+HeapAlloc dt=6 heapalloc_value=14325064
+HeapAlloc dt=6 heapalloc_value=14333256
+HeapAlloc dt=6 heapalloc_value=14341448
+HeapAlloc dt=5 heapalloc_value=14349640
+HeapAlloc dt=6 heapalloc_value=14357832
+HeapAlloc dt=6 heapalloc_value=14366024
+HeapAlloc dt=6 heapalloc_value=14374216
+HeapAlloc dt=6 heapalloc_value=14382408
+HeapAlloc dt=8 heapalloc_value=14390600
+HeapAlloc dt=6 heapalloc_value=14398792
+HeapAlloc dt=6 heapalloc_value=14406984
+HeapAlloc dt=6 heapalloc_value=14415176
+HeapAlloc dt=6 heapalloc_value=14423368
+HeapAlloc dt=5 heapalloc_value=14431560
+HeapAlloc dt=6 heapalloc_value=14439752
+HeapAlloc dt=7 heapalloc_value=14447944
+HeapAlloc dt=5 heapalloc_value=14456136
+HeapAlloc dt=6 heapalloc_value=14464328
+HeapAlloc dt=6 heapalloc_value=14472520
+HeapAlloc dt=5 heapalloc_value=14480712
+HeapAlloc dt=6 heapalloc_value=14488904
+HeapAlloc dt=6 heapalloc_value=14497096
+HeapAlloc dt=6 heapalloc_value=14505288
+HeapAlloc dt=6 heapalloc_value=14513480
+HeapAlloc dt=6 heapalloc_value=14521672
+HeapAlloc dt=6 heapalloc_value=14529864
+HeapAlloc dt=5 heapalloc_value=14538056
+HeapAlloc dt=6 heapalloc_value=14546248
+HeapAlloc dt=6 heapalloc_value=14554440
+HeapAlloc dt=5 heapalloc_value=14562632
+HeapAlloc dt=6 heapalloc_value=14570824
+HeapAlloc dt=6 heapalloc_value=14579016
+HeapAlloc dt=6 heapalloc_value=14587208
+HeapAlloc dt=6 heapalloc_value=14595400
+HeapAlloc dt=5 heapalloc_value=14603592
+HeapAlloc dt=6 heapalloc_value=14611784
+HeapAlloc dt=45 heapalloc_value=14619976
+HeapAlloc dt=7 heapalloc_value=14628168
+HeapAlloc dt=6 heapalloc_value=14636360
+HeapAlloc dt=7 heapalloc_value=14644552
+HeapAlloc dt=5 heapalloc_value=14652744
+HeapAlloc dt=6 heapalloc_value=14660936
+HeapAlloc dt=6 heapalloc_value=14669128
+HeapAlloc dt=5 heapalloc_value=14677320
+HeapAlloc dt=6 heapalloc_value=14685512
+HeapAlloc dt=6 heapalloc_value=14693704
+HeapAlloc dt=6 heapalloc_value=14701896
+HeapAlloc dt=15 heapalloc_value=14710088
+HeapAlloc dt=6 heapalloc_value=14718280
+HeapAlloc dt=5 heapalloc_value=14726472
+HeapAlloc dt=35 heapalloc_value=14734664
+HeapAlloc dt=7 heapalloc_value=14742856
+HeapAlloc dt=6 heapalloc_value=14751048
+HeapAlloc dt=6 heapalloc_value=14759240
+HeapAlloc dt=6 heapalloc_value=14767432
+HeapAlloc dt=6 heapalloc_value=14775624
+HeapAlloc dt=6 heapalloc_value=14783816
+HeapAlloc dt=6 heapalloc_value=14792008
+HeapAlloc dt=5 heapalloc_value=14800200
+HeapAlloc dt=6 heapalloc_value=14808392
+HeapAlloc dt=5 heapalloc_value=14816584
+HeapAlloc dt=6 heapalloc_value=14824776
+HeapAlloc dt=6 heapalloc_value=14832968
+HeapAlloc dt=6 heapalloc_value=14841160
+HeapAlloc dt=6 heapalloc_value=14849352
+HeapAlloc dt=45 heapalloc_value=14857544
+HeapAlloc dt=6 heapalloc_value=14865736
+HeapAlloc dt=5 heapalloc_value=14873928
+HeapAlloc dt=6 heapalloc_value=14882120
+HeapAlloc dt=6 heapalloc_value=14890312
+HeapAlloc dt=6 heapalloc_value=14898504
+HeapAlloc dt=6 heapalloc_value=14906696
+HeapAlloc dt=6 heapalloc_value=14914888
+HeapAlloc dt=5 heapalloc_value=14923080
+HeapAlloc dt=6 heapalloc_value=14931272
+HeapAlloc dt=6 heapalloc_value=14939464
+HeapAlloc dt=5 heapalloc_value=14947656
+HeapAlloc dt=6 heapalloc_value=14955848
+HeapAlloc dt=6 heapalloc_value=14964040
+HeapAlloc dt=6 heapalloc_value=14972232
+HeapAlloc dt=5 heapalloc_value=14980424
+HeapAlloc dt=6 heapalloc_value=14988616
+HeapAlloc dt=6 heapalloc_value=14996808
+HeapAlloc dt=5 heapalloc_value=15005000
+HeapAlloc dt=6 heapalloc_value=15013192
+HeapAlloc dt=6 heapalloc_value=15021384
+HeapAlloc dt=6 heapalloc_value=15029576
+HeapAlloc dt=6 heapalloc_value=15037768
+HeapAlloc dt=6 heapalloc_value=15045960
+HeapAlloc dt=5 heapalloc_value=15054152
+HeapAlloc dt=6 heapalloc_value=15062344
+HeapAlloc dt=6 heapalloc_value=15070536
+HeapAlloc dt=6 heapalloc_value=15078728
+HeapAlloc dt=5 heapalloc_value=15086920
+HeapAlloc dt=6 heapalloc_value=15095112
+HeapAlloc dt=6 heapalloc_value=15103304
+HeapAlloc dt=5 heapalloc_value=15111496
+HeapAlloc dt=6 heapalloc_value=15119688
+HeapAlloc dt=6 heapalloc_value=15127880
+HeapAlloc dt=5 heapalloc_value=15136072
+HeapAlloc dt=51 heapalloc_value=15471944
+HeapAlloc dt=2533 heapalloc_value=15480136
+HeapAlloc dt=11 heapalloc_value=15488328
+HeapAlloc dt=9 heapalloc_value=15496520
+HeapAlloc dt=7 heapalloc_value=15504712
+HeapAlloc dt=9 heapalloc_value=15512904
+HeapAlloc dt=9 heapalloc_value=15521096
+HeapAlloc dt=7 heapalloc_value=15529288
+HeapAlloc dt=8 heapalloc_value=15537480
+HeapAlloc dt=8 heapalloc_value=15545672
+GoBlock dt=13 reason_string=19 stack=21
+ProcStop dt=116
+ProcStart dt=17265 p=2 p_seq=11
+ProcStop dt=10
+ProcStart dt=1450 p=0 p_seq=24
+ProcStop dt=9
+ProcStart dt=17026 p=0 p_seq=25
+GoUnblock dt=12 g=1 g_seq=50 stack=0
+GoStart dt=148 g=1 g_seq=51
+HeapAlloc dt=20 heapalloc_value=16577864
+HeapAlloc dt=15 heapalloc_value=16586056
+HeapAlloc dt=10 heapalloc_value=16594248
+HeapAlloc dt=11 heapalloc_value=16602440
+HeapAlloc dt=9 heapalloc_value=16610632
+HeapAlloc dt=9 heapalloc_value=16618824
+HeapAlloc dt=10 heapalloc_value=16627016
+HeapAlloc dt=9 heapalloc_value=16635208
+HeapAlloc dt=11 heapalloc_value=16643400
+HeapAlloc dt=11 heapalloc_value=16651592
+HeapAlloc dt=9 heapalloc_value=16659784
+HeapAlloc dt=11 heapalloc_value=16667976
+HeapAlloc dt=9 heapalloc_value=16676168
+HeapAlloc dt=10 heapalloc_value=16684360
+HeapAlloc dt=10 heapalloc_value=16692552
+HeapAlloc dt=10 heapalloc_value=16700744
+HeapAlloc dt=11 heapalloc_value=16708936
+HeapAlloc dt=11 heapalloc_value=16717128
+HeapAlloc dt=9 heapalloc_value=16725320
+HeapAlloc dt=78 heapalloc_value=16733512
+HeapAlloc dt=14 heapalloc_value=16741704
+HeapAlloc dt=10 heapalloc_value=16749896
+HeapAlloc dt=11 heapalloc_value=16758088
+HeapAlloc dt=11 heapalloc_value=16766280
+HeapAlloc dt=10 heapalloc_value=16774472
+HeapAlloc dt=9 heapalloc_value=16782664
+HeapAlloc dt=10 heapalloc_value=16790856
+HeapAlloc dt=9 heapalloc_value=16799048
+HeapAlloc dt=21 heapalloc_value=16807240
+HeapAlloc dt=11 heapalloc_value=16815432
+HeapAlloc dt=9 heapalloc_value=16823624
+HeapAlloc dt=9 heapalloc_value=16831816
+HeapAlloc dt=9 heapalloc_value=16840008
+HeapAlloc dt=10 heapalloc_value=16848200
+HeapAlloc dt=11 heapalloc_value=16856392
+HeapAlloc dt=9 heapalloc_value=16864584
+HeapAlloc dt=6 heapalloc_value=16872776
+HeapAlloc dt=9 heapalloc_value=16880968
+HeapAlloc dt=6 heapalloc_value=16889160
+HeapAlloc dt=6 heapalloc_value=16897352
+HeapAlloc dt=5 heapalloc_value=16905544
+HeapAlloc dt=6 heapalloc_value=16913736
+HeapAlloc dt=6 heapalloc_value=16921928
+HeapAlloc dt=5 heapalloc_value=16930120
+HeapAlloc dt=6 heapalloc_value=16938312
+HeapAlloc dt=5 heapalloc_value=16946504
+HeapAlloc dt=6 heapalloc_value=16954696
+HeapAlloc dt=5 heapalloc_value=16962888
+HeapAlloc dt=5 heapalloc_value=16971080
+HeapAlloc dt=5 heapalloc_value=16979272
+HeapAlloc dt=6 heapalloc_value=16987464
+HeapAlloc dt=5 heapalloc_value=16995656
+HeapAlloc dt=5 heapalloc_value=17003848
+HeapAlloc dt=6 heapalloc_value=17012040
+HeapAlloc dt=5 heapalloc_value=17020232
+HeapAlloc dt=6 heapalloc_value=17028424
+HeapAlloc dt=5 heapalloc_value=17036616
+HeapAlloc dt=53 heapalloc_value=17044808
+HeapAlloc dt=7 heapalloc_value=17053000
+HeapAlloc dt=5 heapalloc_value=17061192
+HeapAlloc dt=6 heapalloc_value=17069384
+HeapAlloc dt=11 heapalloc_value=17077576
+HeapAlloc dt=10 heapalloc_value=17085768
+HeapAlloc dt=5 heapalloc_value=17093960
+HeapAlloc dt=5 heapalloc_value=17102152
+HeapAlloc dt=6 heapalloc_value=17110344
+HeapAlloc dt=5 heapalloc_value=17118536
+HeapAlloc dt=5 heapalloc_value=17126728
+HeapAlloc dt=6 heapalloc_value=17134920
+HeapAlloc dt=5 heapalloc_value=17143112
+HeapAlloc dt=6 heapalloc_value=17151304
+HeapAlloc dt=37 heapalloc_value=17159496
+GCBegin dt=15 gc_seq=5 stack=22
+STWBegin dt=37 kind_string=22 stack=28
+GoUnblock dt=288 g=4 g_seq=9 stack=29
+ProcsChange dt=56 procs_value=8 stack=30
+STWEnd dt=23
+GCMarkAssistBegin dt=90 stack=31
+GCMarkAssistEnd dt=3424
+HeapAlloc dt=523 heapalloc_value=17175048
+HeapAlloc dt=21 heapalloc_value=17183240
+HeapAlloc dt=46 heapalloc_value=17191432
+HeapAlloc dt=96 heapalloc_value=17199624
+HeapAlloc dt=12 heapalloc_value=17207816
+HeapAlloc dt=12 heapalloc_value=17216008
+HeapAlloc dt=13 heapalloc_value=17224200
+HeapAlloc dt=10 heapalloc_value=17232392
+HeapAlloc dt=12 heapalloc_value=17240584
+HeapAlloc dt=13 heapalloc_value=17248776
+HeapAlloc dt=12 heapalloc_value=17256968
+HeapAlloc dt=14 heapalloc_value=17265160
+HeapAlloc dt=12 heapalloc_value=17273352
+HeapAlloc dt=12 heapalloc_value=17281544
+HeapAlloc dt=11 heapalloc_value=17289736
+HeapAlloc dt=13 heapalloc_value=17297928
+HeapAlloc dt=36 heapalloc_value=17306120
+HeapAlloc dt=12 heapalloc_value=17314312
+HeapAlloc dt=10 heapalloc_value=17322504
+HeapAlloc dt=12 heapalloc_value=17330696
+HeapAlloc dt=10 heapalloc_value=17338888
+HeapAlloc dt=11 heapalloc_value=17347080
+HeapAlloc dt=10 heapalloc_value=17355272
+HeapAlloc dt=10 heapalloc_value=17363464
+HeapAlloc dt=10 heapalloc_value=17371656
+HeapAlloc dt=11 heapalloc_value=17379848
+HeapAlloc dt=8 heapalloc_value=17388040
+HeapAlloc dt=13 heapalloc_value=17396232
+HeapAlloc dt=10 heapalloc_value=17404424
+HeapAlloc dt=13 heapalloc_value=17412616
+HeapAlloc dt=13 heapalloc_value=17420808
+HeapAlloc dt=10 heapalloc_value=17429000
+HeapAlloc dt=31 heapalloc_value=17437192
+HeapAlloc dt=6 heapalloc_value=17445384
+HeapAlloc dt=7 heapalloc_value=17453576
+HeapAlloc dt=6 heapalloc_value=17461768
+HeapAlloc dt=7 heapalloc_value=17469960
+HeapAlloc dt=7 heapalloc_value=17478152
+HeapAlloc dt=7 heapalloc_value=17486344
+HeapAlloc dt=7 heapalloc_value=17494536
+HeapAlloc dt=12 heapalloc_value=17502728
+HeapAlloc dt=7 heapalloc_value=17510920
+HeapAlloc dt=12 heapalloc_value=17519112
+HeapAlloc dt=13 heapalloc_value=17527304
+HeapAlloc dt=20 heapalloc_value=17535496
+HeapAlloc dt=15 heapalloc_value=17543688
+HeapAlloc dt=6 heapalloc_value=17551880
+HeapAlloc dt=7 heapalloc_value=17560072
+HeapAlloc dt=72 heapalloc_value=17568264
+HeapAlloc dt=37 heapalloc_value=17576456
+HeapAlloc dt=7 heapalloc_value=17584648
+HeapAlloc dt=7 heapalloc_value=17592840
+HeapAlloc dt=6 heapalloc_value=17601032
+GoBlock dt=13 reason_string=19 stack=21
+GoUnblock dt=157 g=24 g_seq=12 stack=0
+GoStart dt=7 g=24 g_seq=13
+GoLabel dt=1 label_string=2
+STWBegin dt=4128 kind_string=23 stack=37
+GoUnblock dt=64 g=25 g_seq=8 stack=38
+HeapAlloc dt=25 heapalloc_value=16970376
+GoUnblock dt=24 g=3 g_seq=5 stack=39
+GCEnd dt=6 gc_seq=6
+HeapGoal dt=7 heapgoal_value=34360936
+ProcsChange dt=46 procs_value=8 stack=40
+STWEnd dt=49
+GoBlock dt=756 reason_string=15 stack=27
+GoStart dt=10 g=3 g_seq=6
+GoBlock dt=14862 reason_string=14 stack=44
+ProcStop dt=25
+ProcStart dt=132428 p=0 p_seq=32
+GoStart dt=162 g=4 g_seq=12
+GoBlock dt=19 reason_string=15 stack=32
+ProcStop dt=20
+ProcStart dt=8304 p=0 p_seq=33
+GoStart dt=191 g=39 g_seq=1
+GoStop dt=306173 reason_string=16 stack=50
+GoStart dt=17 g=39 g_seq=2
+GoStop dt=315175 reason_string=16 stack=50
+GoStart dt=7 g=39 g_seq=3
+GoDestroy dt=159902
+ProcStop dt=50
+EventBatch gen=1 m=1709040 time=7689670148204 size=3534
+ProcStart dt=256 p=1 p_seq=1
+GoStart dt=186 g=6 g_seq=1
+HeapAlloc dt=320 heapalloc_value=2768896
+HeapAlloc dt=22 heapalloc_value=2777088
+GoBlock dt=229 reason_string=12 stack=15
+GoStart dt=12 g=8 g_seq=1
+HeapAlloc dt=15 heapalloc_value=2785280
+GoSyscallBegin dt=16 p_seq=2 stack=16
+GoSyscallEnd dt=254
+GoBlock dt=293 reason_string=15 stack=17
+GoStart dt=19 g=9 g_seq=1
+GoDestroy dt=156265
+ProcStop dt=44
+ProcStart dt=67218 p=1 p_seq=3
+ProcStop dt=19
+ProcStart dt=88214 p=1 p_seq=4
+ProcStop dt=13
+ProcStart dt=17539 p=0 p_seq=1
+ProcStop dt=14
+ProcStart dt=9071 p=4 p_seq=1
+GoUnblock dt=33 g=22 g_seq=2 stack=0
+GoStart dt=6 g=22 g_seq=3
+GoLabel dt=1 label_string=4
+GoUnblock dt=2321 g=1 g_seq=23 stack=34
+STWBegin dt=1205 kind_string=23 stack=37
+GoUnblock dt=78 g=24 g_seq=6 stack=38
+HeapAlloc dt=26 heapalloc_value=3840752
+GoStatus dt=14 g=3 m=18446744073709551615 gstatus=4
+GoUnblock dt=7 g=3 g_seq=1 stack=39
+GCEnd dt=3 gc_seq=2
+HeapGoal dt=6 heapgoal_value=8101720
+ProcsChange dt=43 procs_value=8 stack=40
+STWEnd dt=31
+GoBlock dt=4030 reason_string=15 stack=27
+GoStart dt=12 g=3 g_seq=2
+GoBlock dt=1406 reason_string=14 stack=44
+ProcStop dt=24
+ProcStart dt=34332 p=4 p_seq=4
+GoStart dt=153 g=4 g_seq=4
+GoBlock dt=20 reason_string=15 stack=32
+ProcStop dt=19
+ProcStart dt=1832 p=2 p_seq=5
+GoUnblock dt=22 g=24 g_seq=8 stack=0
+GoStart dt=102 g=24 g_seq=9
+GoLabel dt=1 label_string=2
+STWBegin dt=11769 kind_string=23 stack=37
+GoUnblock dt=60 g=1 g_seq=36 stack=38
+HeapAlloc dt=23 heapalloc_value=8744264
+GoUnblock dt=17 g=3 g_seq=3 stack=39
+GCEnd dt=6 gc_seq=4
+HeapGoal dt=7 heapgoal_value=17908728
+ProcsChange dt=47 procs_value=8 stack=40
+STWEnd dt=28
+GoBlock dt=572 reason_string=15 stack=27
+GoStart dt=13 g=3 g_seq=4
+GoBlock dt=5707 reason_string=14 stack=44
+ProcStop dt=16
+ProcStart dt=136502 p=1 p_seq=11
+GoStart dt=17 g=4 g_seq=8
+GoBlock dt=12 reason_string=15 stack=32
+ProcStop dt=22
+ProcStart dt=5977 p=6 p_seq=1
+ProcStop dt=34
+ProcStart dt=16775 p=2 p_seq=15
+ProcStop dt=23
+ProcStart dt=3966 p=1 p_seq=14
+ProcStop dt=15
+ProcStart dt=16753 p=1 p_seq=15
+GoUnblock dt=35 g=1 g_seq=57 stack=0
+GoStart dt=139 g=1 g_seq=58
+HeapAlloc dt=71 heapalloc_value=17593992
+HeapAlloc dt=47 heapalloc_value=17602184
+HeapAlloc dt=24 heapalloc_value=17610376
+HeapAlloc dt=97 heapalloc_value=17618568
+HeapAlloc dt=23 heapalloc_value=17626760
+HeapAlloc dt=18 heapalloc_value=17634952
+HeapAlloc dt=15 heapalloc_value=17643144
+HeapAlloc dt=18 heapalloc_value=17651336
+HeapAlloc dt=21 heapalloc_value=17659528
+HeapAlloc dt=28 heapalloc_value=17667720
+HeapAlloc dt=26 heapalloc_value=17675912
+HeapAlloc dt=23 heapalloc_value=17684104
+HeapAlloc dt=12 heapalloc_value=17692296
+HeapAlloc dt=12 heapalloc_value=17700488
+HeapAlloc dt=11 heapalloc_value=17708680
+HeapAlloc dt=15 heapalloc_value=17716872
+HeapAlloc dt=18 heapalloc_value=17725064
+HeapAlloc dt=15 heapalloc_value=17733256
+HeapAlloc dt=165 heapalloc_value=17741448
+HeapAlloc dt=16 heapalloc_value=17749640
+HeapAlloc dt=12 heapalloc_value=17757832
+HeapAlloc dt=15 heapalloc_value=17766024
+HeapAlloc dt=12 heapalloc_value=17774216
+HeapAlloc dt=12 heapalloc_value=17782408
+HeapAlloc dt=15 heapalloc_value=17790600
+HeapAlloc dt=11 heapalloc_value=17798792
+HeapAlloc dt=11 heapalloc_value=17806984
+HeapAlloc dt=12 heapalloc_value=17815176
+HeapAlloc dt=12 heapalloc_value=17823368
+HeapAlloc dt=15 heapalloc_value=17831560
+HeapAlloc dt=11 heapalloc_value=17839752
+HeapAlloc dt=12 heapalloc_value=17847944
+HeapAlloc dt=15 heapalloc_value=17856136
+HeapAlloc dt=11 heapalloc_value=17864328
+HeapAlloc dt=12 heapalloc_value=17872520
+HeapAlloc dt=12 heapalloc_value=17880712
+HeapAlloc dt=14 heapalloc_value=17888904
+HeapAlloc dt=42 heapalloc_value=17897096
+HeapAlloc dt=54 heapalloc_value=17905288
+HeapAlloc dt=49 heapalloc_value=17913480
+HeapAlloc dt=54 heapalloc_value=17921672
+HeapAlloc dt=56 heapalloc_value=17929864
+HeapAlloc dt=45 heapalloc_value=17938056
+HeapAlloc dt=57 heapalloc_value=17946248
+HeapAlloc dt=63 heapalloc_value=17954440
+HeapAlloc dt=57 heapalloc_value=17962632
+HeapAlloc dt=56 heapalloc_value=17970824
+HeapAlloc dt=62 heapalloc_value=17979016
+HeapAlloc dt=109 heapalloc_value=17987208
+HeapAlloc dt=59 heapalloc_value=17995400
+HeapAlloc dt=45 heapalloc_value=18003592
+HeapAlloc dt=61 heapalloc_value=18011784
+HeapAlloc dt=35 heapalloc_value=18019976
+HeapAlloc dt=16 heapalloc_value=18028168
+HeapAlloc dt=15 heapalloc_value=18036360
+HeapAlloc dt=15 heapalloc_value=18044552
+HeapAlloc dt=21 heapalloc_value=18052744
+HeapAlloc dt=16 heapalloc_value=18060936
+HeapAlloc dt=16 heapalloc_value=18069128
+HeapAlloc dt=22 heapalloc_value=18077320
+HeapAlloc dt=43 heapalloc_value=18085512
+HeapAlloc dt=46 heapalloc_value=18093704
+HeapAlloc dt=43 heapalloc_value=18101896
+HeapAlloc dt=42 heapalloc_value=18110088
+HeapAlloc dt=44 heapalloc_value=18118280
+HeapAlloc dt=35 heapalloc_value=18126472
+HeapAlloc dt=39 heapalloc_value=18134664
+HeapAlloc dt=40 heapalloc_value=18142856
+HeapAlloc dt=43 heapalloc_value=18151048
+HeapAlloc dt=44 heapalloc_value=18159240
+HeapAlloc dt=38 heapalloc_value=18167432
+HeapAlloc dt=42 heapalloc_value=18175624
+HeapAlloc dt=40 heapalloc_value=18183816
+HeapAlloc dt=40 heapalloc_value=18192008
+HeapAlloc dt=36 heapalloc_value=18200200
+HeapAlloc dt=55 heapalloc_value=18208392
+HeapAlloc dt=54 heapalloc_value=18216584
+HeapAlloc dt=54 heapalloc_value=18224776
+HeapAlloc dt=41 heapalloc_value=18232968
+HeapAlloc dt=58 heapalloc_value=18241160
+HeapAlloc dt=61 heapalloc_value=18249352
+HeapAlloc dt=55 heapalloc_value=18257544
+HeapAlloc dt=141 heapalloc_value=18265736
+HeapAlloc dt=55 heapalloc_value=18273928
+HeapAlloc dt=54 heapalloc_value=18282120
+HeapAlloc dt=50 heapalloc_value=18290312
+HeapAlloc dt=82 heapalloc_value=18298504
+HeapAlloc dt=64 heapalloc_value=18306696
+HeapAlloc dt=55 heapalloc_value=18314888
+HeapAlloc dt=58 heapalloc_value=18323080
+HeapAlloc dt=54 heapalloc_value=18331272
+HeapAlloc dt=57 heapalloc_value=18339464
+HeapAlloc dt=46 heapalloc_value=18347656
+HeapAlloc dt=41 heapalloc_value=18355848
+HeapAlloc dt=56 heapalloc_value=18364040
+HeapAlloc dt=50 heapalloc_value=18372232
+HeapAlloc dt=54 heapalloc_value=18380424
+HeapAlloc dt=56 heapalloc_value=18388616
+HeapAlloc dt=57 heapalloc_value=18396808
+HeapAlloc dt=55 heapalloc_value=18405000
+HeapAlloc dt=55 heapalloc_value=18413192
+HeapAlloc dt=51 heapalloc_value=18421384
+HeapAlloc dt=52 heapalloc_value=18429576
+HeapAlloc dt=67 heapalloc_value=18437768
+HeapAlloc dt=36 heapalloc_value=18445960
+HeapAlloc dt=28 heapalloc_value=18454152
+HeapAlloc dt=30 heapalloc_value=18462344
+HeapAlloc dt=40 heapalloc_value=18470536
+HeapAlloc dt=29 heapalloc_value=18478728
+HeapAlloc dt=37 heapalloc_value=18486920
+HeapAlloc dt=34 heapalloc_value=18495112
+HeapAlloc dt=73 heapalloc_value=18503304
+HeapAlloc dt=37 heapalloc_value=18511496
+HeapAlloc dt=38 heapalloc_value=18519688
+HeapAlloc dt=29 heapalloc_value=18527880
+HeapAlloc dt=35 heapalloc_value=18536072
+HeapAlloc dt=33 heapalloc_value=18544264
+HeapAlloc dt=40 heapalloc_value=18552456
+HeapAlloc dt=32 heapalloc_value=18560648
+HeapAlloc dt=42 heapalloc_value=18568840
+HeapAlloc dt=34 heapalloc_value=18577032
+HeapAlloc dt=37 heapalloc_value=18585224
+HeapAlloc dt=35 heapalloc_value=18593416
+HeapAlloc dt=39 heapalloc_value=18601608
+HeapAlloc dt=35 heapalloc_value=18609800
+GoBlock dt=51 reason_string=19 stack=21
+ProcStop dt=192
+ProcStart dt=17579 p=0 p_seq=27
+ProcStop dt=18
+ProcStart dt=1930 p=1 p_seq=18
+ProcStop dt=15
+ProcStart dt=16696 p=1 p_seq=19
+GoUnblock dt=22 g=1 g_seq=61 stack=0
+GoStart dt=125 g=1 g_seq=62
+HeapAlloc dt=53 heapalloc_value=19641992
+HeapAlloc dt=19 heapalloc_value=19650184
+HeapAlloc dt=20 heapalloc_value=19658376
+HeapAlloc dt=23 heapalloc_value=19666568
+HeapAlloc dt=16 heapalloc_value=19674760
+HeapAlloc dt=16 heapalloc_value=19682952
+HeapAlloc dt=19 heapalloc_value=19691144
+HeapAlloc dt=15 heapalloc_value=19699336
+HeapAlloc dt=12 heapalloc_value=19707528
+HeapAlloc dt=12 heapalloc_value=19715720
+HeapAlloc dt=13 heapalloc_value=19723912
+HeapAlloc dt=18 heapalloc_value=19732104
+HeapAlloc dt=12 heapalloc_value=19740296
+HeapAlloc dt=12 heapalloc_value=19748488
+HeapAlloc dt=9 heapalloc_value=19756680
+HeapAlloc dt=6 heapalloc_value=19764872
+HeapAlloc dt=5 heapalloc_value=19773064
+HeapAlloc dt=6 heapalloc_value=19781256
+HeapAlloc dt=5 heapalloc_value=19789448
+HeapAlloc dt=10 heapalloc_value=19797640
+HeapAlloc dt=5 heapalloc_value=19805832
+HeapAlloc dt=6 heapalloc_value=19814024
+HeapAlloc dt=9 heapalloc_value=19822216
+HeapAlloc dt=6 heapalloc_value=19830408
+HeapAlloc dt=117 heapalloc_value=19838600
+HeapAlloc dt=17 heapalloc_value=19846792
+HeapAlloc dt=5 heapalloc_value=19854984
+HeapAlloc dt=10 heapalloc_value=19863176
+HeapAlloc dt=6 heapalloc_value=19871368
+HeapAlloc dt=6 heapalloc_value=19879560
+HeapAlloc dt=9 heapalloc_value=19887752
+HeapAlloc dt=6 heapalloc_value=19895944
+HeapAlloc dt=6 heapalloc_value=19904136
+HeapAlloc dt=5 heapalloc_value=19912328
+HeapAlloc dt=6 heapalloc_value=19920520
+HeapAlloc dt=10 heapalloc_value=19928712
+HeapAlloc dt=5 heapalloc_value=19936904
+HeapAlloc dt=6 heapalloc_value=19945096
+HeapAlloc dt=9 heapalloc_value=19953288
+HeapAlloc dt=6 heapalloc_value=19961480
+HeapAlloc dt=35 heapalloc_value=19969672
+HeapAlloc dt=7 heapalloc_value=19977864
+HeapAlloc dt=5 heapalloc_value=19986056
+HeapAlloc dt=468 heapalloc_value=19994248
+HeapAlloc dt=14 heapalloc_value=20002440
+HeapAlloc dt=6 heapalloc_value=20010632
+HeapAlloc dt=10 heapalloc_value=20018824
+HeapAlloc dt=5 heapalloc_value=20027016
+HeapAlloc dt=6 heapalloc_value=20035208
+HeapAlloc dt=11 heapalloc_value=20043400
+HeapAlloc dt=6 heapalloc_value=20051592
+HeapAlloc dt=5 heapalloc_value=20059784
+HeapAlloc dt=6 heapalloc_value=20067976
+HeapAlloc dt=5 heapalloc_value=20076168
+HeapAlloc dt=7 heapalloc_value=20084360
+HeapAlloc dt=6 heapalloc_value=20092552
+HeapAlloc dt=5 heapalloc_value=20100744
+HeapAlloc dt=6 heapalloc_value=20108936
+HeapAlloc dt=6 heapalloc_value=20117128
+HeapAlloc dt=5 heapalloc_value=20125320
+HeapAlloc dt=6 heapalloc_value=20133512
+HeapAlloc dt=6 heapalloc_value=20141704
+HeapAlloc dt=7 heapalloc_value=20149896
+HeapAlloc dt=5 heapalloc_value=20158088
+HeapAlloc dt=6 heapalloc_value=20166280
+HeapAlloc dt=5 heapalloc_value=20174472
+HeapAlloc dt=6 heapalloc_value=20182664
+HeapAlloc dt=6 heapalloc_value=20190856
+HeapAlloc dt=5 heapalloc_value=20199048
+HeapAlloc dt=5 heapalloc_value=20207240
+HeapAlloc dt=6 heapalloc_value=20215432
+HeapAlloc dt=6 heapalloc_value=20223624
+HeapAlloc dt=5 heapalloc_value=20231816
+HeapAlloc dt=6 heapalloc_value=20240008
+HeapAlloc dt=5 heapalloc_value=20248200
+HeapAlloc dt=5 heapalloc_value=20256392
+HeapAlloc dt=6 heapalloc_value=20264584
+HeapAlloc dt=5 heapalloc_value=20272776
+HeapAlloc dt=6 heapalloc_value=20280968
+HeapAlloc dt=5 heapalloc_value=20289160
+HeapAlloc dt=6 heapalloc_value=20297352
+HeapAlloc dt=5 heapalloc_value=20305544
+HeapAlloc dt=6 heapalloc_value=20313736
+HeapAlloc dt=5 heapalloc_value=20321928
+HeapAlloc dt=6 heapalloc_value=20330120
+HeapAlloc dt=5 heapalloc_value=20338312
+HeapAlloc dt=6 heapalloc_value=20346504
+HeapAlloc dt=6 heapalloc_value=20354696
+HeapAlloc dt=62 heapalloc_value=20362888
+HeapAlloc dt=7 heapalloc_value=20371080
+HeapAlloc dt=5 heapalloc_value=20379272
+HeapAlloc dt=6 heapalloc_value=20387464
+HeapAlloc dt=37 heapalloc_value=20395656
+HeapAlloc dt=7 heapalloc_value=20403848
+HeapAlloc dt=6 heapalloc_value=20412040
+HeapAlloc dt=5 heapalloc_value=20420232
+HeapAlloc dt=6 heapalloc_value=20428424
+HeapAlloc dt=5 heapalloc_value=20436616
+HeapAlloc dt=6 heapalloc_value=20444808
+HeapAlloc dt=5 heapalloc_value=20453000
+HeapAlloc dt=6 heapalloc_value=20461192
+HeapAlloc dt=5 heapalloc_value=20469384
+HeapAlloc dt=6 heapalloc_value=20477576
+HeapAlloc dt=5 heapalloc_value=20485768
+HeapAlloc dt=6 heapalloc_value=20493960
+HeapAlloc dt=5 heapalloc_value=20502152
+HeapAlloc dt=6 heapalloc_value=20510344
+HeapAlloc dt=9 heapalloc_value=20518536
+HeapAlloc dt=6 heapalloc_value=20526728
+HeapAlloc dt=5 heapalloc_value=20534920
+HeapAlloc dt=6 heapalloc_value=20543112
+HeapAlloc dt=5 heapalloc_value=20551304
+HeapAlloc dt=6 heapalloc_value=20559496
+HeapAlloc dt=5 heapalloc_value=20567688
+HeapAlloc dt=6 heapalloc_value=20575880
+HeapAlloc dt=5 heapalloc_value=20584072
+HeapAlloc dt=6 heapalloc_value=20592264
+HeapAlloc dt=38 heapalloc_value=20600456
+HeapAlloc dt=7 heapalloc_value=20608648
+HeapAlloc dt=5 heapalloc_value=20616840
+HeapAlloc dt=6 heapalloc_value=20625032
+HeapAlloc dt=5 heapalloc_value=20633224
+HeapAlloc dt=6 heapalloc_value=20641416
+HeapAlloc dt=5 heapalloc_value=20649608
+HeapAlloc dt=6 heapalloc_value=20657800
+GoBlock dt=12 reason_string=19 stack=21
+ProcStop dt=167
+ProcStart dt=17576 p=0 p_seq=29
+ProcStop dt=20
+ProcStart dt=3256 p=1 p_seq=22
+ProcStop dt=17
+ProcStart dt=16071 p=1 p_seq=23
+GoUnblock dt=21 g=1 g_seq=65 stack=0
+GoStart dt=124 g=1 g_seq=66
+HeapAlloc dt=51 heapalloc_value=22230664
+HeapAlloc dt=26 heapalloc_value=22238856
+HeapAlloc dt=16 heapalloc_value=22247048
+HeapAlloc dt=19 heapalloc_value=22255240
+HeapAlloc dt=19 heapalloc_value=22263432
+HeapAlloc dt=16 heapalloc_value=22271624
+HeapAlloc dt=16 heapalloc_value=22279816
+HeapAlloc dt=19 heapalloc_value=22288008
+HeapAlloc dt=18 heapalloc_value=22296200
+HeapAlloc dt=16 heapalloc_value=22304392
+HeapAlloc dt=12 heapalloc_value=22312584
+HeapAlloc dt=13 heapalloc_value=22320776
+HeapAlloc dt=15 heapalloc_value=22328968
+HeapAlloc dt=12 heapalloc_value=22337160
+HeapAlloc dt=6 heapalloc_value=22345352
+HeapAlloc dt=8 heapalloc_value=22353544
+HeapAlloc dt=6 heapalloc_value=22361736
+HeapAlloc dt=5 heapalloc_value=22369928
+HeapAlloc dt=25 heapalloc_value=22378120
+HeapAlloc dt=23 heapalloc_value=22386312
+HeapAlloc dt=9 heapalloc_value=22394504
+HeapAlloc dt=6 heapalloc_value=22402696
+HeapAlloc dt=5 heapalloc_value=22410888
+HeapAlloc dt=10 heapalloc_value=22419080
+HeapAlloc dt=5 heapalloc_value=22427272
+HeapAlloc dt=6 heapalloc_value=22435464
+HeapAlloc dt=5 heapalloc_value=22443656
+HeapAlloc dt=6 heapalloc_value=22451848
+HeapAlloc dt=8 heapalloc_value=22460040
+HeapAlloc dt=135 heapalloc_value=22468232
+HeapAlloc dt=8 heapalloc_value=22476424
+HeapAlloc dt=9 heapalloc_value=22484616
+HeapAlloc dt=6 heapalloc_value=22492808
+HeapAlloc dt=6 heapalloc_value=22501000
+HeapAlloc dt=6 heapalloc_value=22509192
+HeapAlloc dt=5 heapalloc_value=22517384
+HeapAlloc dt=9 heapalloc_value=22525576
+HeapAlloc dt=6 heapalloc_value=22533768
+HeapAlloc dt=6 heapalloc_value=22541960
+HeapAlloc dt=5 heapalloc_value=22550152
+HeapAlloc dt=6 heapalloc_value=22558344
+HeapAlloc dt=5 heapalloc_value=22566536
+HeapAlloc dt=6 heapalloc_value=22574728
+HeapAlloc dt=5 heapalloc_value=22582920
+HeapAlloc dt=9 heapalloc_value=22591112
+HeapAlloc dt=44 heapalloc_value=22599304
+HeapAlloc dt=7 heapalloc_value=22607496
+HeapAlloc dt=38 heapalloc_value=22615688
+HeapAlloc dt=6 heapalloc_value=22623880
+HeapAlloc dt=6 heapalloc_value=22632072
+HeapAlloc dt=6 heapalloc_value=22640264
+HeapAlloc dt=6 heapalloc_value=22648456
+HeapAlloc dt=6 heapalloc_value=22656648
+HeapAlloc dt=5 heapalloc_value=22664840
+HeapAlloc dt=6 heapalloc_value=22673032
+HeapAlloc dt=5 heapalloc_value=22681224
+HeapAlloc dt=6 heapalloc_value=22689416
+HeapAlloc dt=5 heapalloc_value=22697608
+HeapAlloc dt=6 heapalloc_value=22705800
+HeapAlloc dt=6 heapalloc_value=22713992
+HeapAlloc dt=5 heapalloc_value=22722184
+HeapAlloc dt=5 heapalloc_value=22730376
+HeapAlloc dt=6 heapalloc_value=22738568
+HeapAlloc dt=6 heapalloc_value=22746760
+HeapAlloc dt=5 heapalloc_value=22754952
+HeapAlloc dt=6 heapalloc_value=22763144
+HeapAlloc dt=6 heapalloc_value=22771336
+HeapAlloc dt=6 heapalloc_value=22779528
+HeapAlloc dt=5 heapalloc_value=22787720
+HeapAlloc dt=5 heapalloc_value=22795912
+HeapAlloc dt=6 heapalloc_value=22804104
+HeapAlloc dt=75 heapalloc_value=22812296
+HeapAlloc dt=7 heapalloc_value=22820488
+HeapAlloc dt=5 heapalloc_value=22828680
+HeapAlloc dt=6 heapalloc_value=22836872
+HeapAlloc dt=5 heapalloc_value=22845064
+HeapAlloc dt=6 heapalloc_value=22853256
+HeapAlloc dt=6 heapalloc_value=22861448
+HeapAlloc dt=5 heapalloc_value=22869640
+HeapAlloc dt=6 heapalloc_value=22877832
+HeapAlloc dt=5 heapalloc_value=22886024
+HeapAlloc dt=5 heapalloc_value=22894216
+HeapAlloc dt=6 heapalloc_value=22902408
+HeapAlloc dt=7 heapalloc_value=22910600
+HeapAlloc dt=6 heapalloc_value=22918792
+HeapAlloc dt=5 heapalloc_value=22926984
+HeapAlloc dt=6 heapalloc_value=22935176
+HeapAlloc dt=6 heapalloc_value=22943368
+HeapAlloc dt=6 heapalloc_value=22951560
+HeapAlloc dt=5 heapalloc_value=22959752
+HeapAlloc dt=6 heapalloc_value=22967944
+HeapAlloc dt=7 heapalloc_value=22976136
+HeapAlloc dt=5 heapalloc_value=22984328
+HeapAlloc dt=43 heapalloc_value=22992520
+HeapAlloc dt=7 heapalloc_value=23000712
+HeapAlloc dt=5 heapalloc_value=23008904
+HeapAlloc dt=6 heapalloc_value=23017096
+HeapAlloc dt=35 heapalloc_value=23025288
+HeapAlloc dt=7 heapalloc_value=23033480
+HeapAlloc dt=5 heapalloc_value=23041672
+HeapAlloc dt=5 heapalloc_value=23049864
+HeapAlloc dt=6 heapalloc_value=23058056
+HeapAlloc dt=5 heapalloc_value=23066248
+HeapAlloc dt=6 heapalloc_value=23074440
+HeapAlloc dt=5 heapalloc_value=23082632
+HeapAlloc dt=6 heapalloc_value=23090824
+HeapAlloc dt=5 heapalloc_value=23099016
+HeapAlloc dt=6 heapalloc_value=23107208
+HeapAlloc dt=5 heapalloc_value=23115400
+HeapAlloc dt=6 heapalloc_value=23123592
+HeapAlloc dt=5 heapalloc_value=23131784
+HeapAlloc dt=12 heapalloc_value=23139976
+HeapAlloc dt=5 heapalloc_value=23148168
+HeapAlloc dt=6 heapalloc_value=23156360
+HeapAlloc dt=5 heapalloc_value=23164552
+HeapAlloc dt=6 heapalloc_value=23172744
+HeapAlloc dt=5 heapalloc_value=23180936
+HeapAlloc dt=6 heapalloc_value=23189128
+HeapAlloc dt=5 heapalloc_value=23197320
+HeapAlloc dt=7 heapalloc_value=23205512
+HeapAlloc dt=5 heapalloc_value=23213704
+HeapAlloc dt=6 heapalloc_value=23221896
+HeapAlloc dt=38 heapalloc_value=23230088
+HeapAlloc dt=7 heapalloc_value=23238280
+HeapAlloc dt=5 heapalloc_value=23246472
+GoBlock dt=9 reason_string=19 stack=21
+ProcStop dt=164
+ProcStart dt=17494 p=0 p_seq=31
+ProcStop dt=25
+ProcStart dt=1701 p=1 p_seq=26
+ProcStop dt=16
+ProcStart dt=16748 p=2 p_seq=17
+GoUnblock dt=36 g=1 g_seq=71 stack=0
+GoStart dt=149 g=1 g_seq=72
+HeapAlloc dt=67 heapalloc_value=25302664
+HeapAlloc dt=38 heapalloc_value=25310856
+HeapAlloc dt=23 heapalloc_value=25319048
+HeapAlloc dt=17 heapalloc_value=25327240
+HeapAlloc dt=21 heapalloc_value=25335432
+HeapAlloc dt=17 heapalloc_value=25343624
+HeapAlloc dt=17 heapalloc_value=25351816
+HeapAlloc dt=16 heapalloc_value=25360008
+HeapAlloc dt=19 heapalloc_value=25368200
+HeapAlloc dt=16 heapalloc_value=25376392
+HeapAlloc dt=16 heapalloc_value=25384584
+HeapAlloc dt=16 heapalloc_value=25392776
+HeapAlloc dt=17 heapalloc_value=25400968
+HeapAlloc dt=16 heapalloc_value=25409160
+HeapAlloc dt=9 heapalloc_value=25417352
+HeapAlloc dt=9 heapalloc_value=25425544
+HeapAlloc dt=9 heapalloc_value=25433736
+HeapAlloc dt=10 heapalloc_value=25441928
+HeapAlloc dt=9 heapalloc_value=25450120
+HeapAlloc dt=10 heapalloc_value=25458312
+HeapAlloc dt=9 heapalloc_value=25466504
+HeapAlloc dt=6 heapalloc_value=25474696
+HeapAlloc dt=5 heapalloc_value=25482888
+HeapAlloc dt=6 heapalloc_value=25491080
+HeapAlloc dt=9 heapalloc_value=25499272
+HeapAlloc dt=6 heapalloc_value=25507464
+HeapAlloc dt=8 heapalloc_value=25515656
+HeapAlloc dt=7 heapalloc_value=25523848
+HeapAlloc dt=10 heapalloc_value=25532040
+HeapAlloc dt=9 heapalloc_value=25540232
+HeapAlloc dt=102 heapalloc_value=25548424
+HeapAlloc dt=7 heapalloc_value=25556616
+HeapAlloc dt=10 heapalloc_value=25564808
+HeapAlloc dt=5 heapalloc_value=25573000
+HeapAlloc dt=5 heapalloc_value=25581192
+HeapAlloc dt=36 heapalloc_value=25589384
+HeapAlloc dt=8 heapalloc_value=25597576
+HeapAlloc dt=5 heapalloc_value=25605768
+HeapAlloc dt=43 heapalloc_value=25613960
+HeapAlloc dt=7 heapalloc_value=25622152
+HeapAlloc dt=10 heapalloc_value=25630344
+HeapAlloc dt=6 heapalloc_value=25638536
+HeapAlloc dt=6 heapalloc_value=25646728
+HeapAlloc dt=6 heapalloc_value=25654920
+HeapAlloc dt=7 heapalloc_value=25663112
+HeapAlloc dt=5 heapalloc_value=25671304
+HeapAlloc dt=6 heapalloc_value=25679496
+HeapAlloc dt=41 heapalloc_value=25687688
+HeapAlloc dt=13 heapalloc_value=25695880
+HeapAlloc dt=5 heapalloc_value=25704072
+HeapAlloc dt=6 heapalloc_value=25712264
+HeapAlloc dt=13 heapalloc_value=25720456
+HeapAlloc dt=13 heapalloc_value=25728648
+HeapAlloc dt=5 heapalloc_value=25736840
+HeapAlloc dt=6 heapalloc_value=25745032
+HeapAlloc dt=6 heapalloc_value=25753224
+HeapAlloc dt=9 heapalloc_value=25761416
+HeapAlloc dt=6 heapalloc_value=25769608
+HeapAlloc dt=5 heapalloc_value=25777800
+HeapAlloc dt=6 heapalloc_value=25785992
+HeapAlloc dt=5 heapalloc_value=25794184
+HeapAlloc dt=6 heapalloc_value=25802376
+HeapAlloc dt=5 heapalloc_value=25810568
+HeapAlloc dt=6 heapalloc_value=25818760
+HeapAlloc dt=10 heapalloc_value=25826952
+HeapAlloc dt=6 heapalloc_value=25835144
+HeapAlloc dt=6 heapalloc_value=25843336
+HeapAlloc dt=5 heapalloc_value=25851528
+HeapAlloc dt=6 heapalloc_value=25859720
+HeapAlloc dt=5 heapalloc_value=25867912
+HeapAlloc dt=6 heapalloc_value=25876104
+HeapAlloc dt=6 heapalloc_value=25884296
+HeapAlloc dt=7 heapalloc_value=25892488
+HeapAlloc dt=6 heapalloc_value=25900680
+HeapAlloc dt=5 heapalloc_value=25908872
+HeapAlloc dt=6 heapalloc_value=25917064
+HeapAlloc dt=6 heapalloc_value=25925256
+HeapAlloc dt=5 heapalloc_value=25933448
+HeapAlloc dt=6 heapalloc_value=25941640
+HeapAlloc dt=6 heapalloc_value=25949832
+HeapAlloc dt=6 heapalloc_value=25958024
+HeapAlloc dt=5 heapalloc_value=25966216
+HeapAlloc dt=6 heapalloc_value=25974408
+HeapAlloc dt=5 heapalloc_value=25982600
+HeapAlloc dt=6 heapalloc_value=25990792
+HeapAlloc dt=6 heapalloc_value=25998984
+HeapAlloc dt=5 heapalloc_value=26007176
+HeapAlloc dt=6 heapalloc_value=26015368
+HeapAlloc dt=6 heapalloc_value=26023560
+HeapAlloc dt=6 heapalloc_value=26031752
+HeapAlloc dt=5 heapalloc_value=26039944
+HeapAlloc dt=6 heapalloc_value=26048136
+HeapAlloc dt=5 heapalloc_value=26056328
+HeapAlloc dt=6 heapalloc_value=26064520
+HeapAlloc dt=94 heapalloc_value=26072712
+HeapAlloc dt=7 heapalloc_value=26080904
+HeapAlloc dt=5 heapalloc_value=26089096
+HeapAlloc dt=6 heapalloc_value=26097288
+HeapAlloc dt=6 heapalloc_value=26105480
+HeapAlloc dt=5 heapalloc_value=26113672
+HeapAlloc dt=6 heapalloc_value=26121864
+HeapAlloc dt=6 heapalloc_value=26130056
+HeapAlloc dt=5 heapalloc_value=26138248
+HeapAlloc dt=6 heapalloc_value=26146440
+HeapAlloc dt=6 heapalloc_value=26154632
+HeapAlloc dt=5 heapalloc_value=26162824
+HeapAlloc dt=1696 heapalloc_value=26171016
+HeapAlloc dt=7 heapalloc_value=26179208
+HeapAlloc dt=6 heapalloc_value=26187400
+HeapAlloc dt=5 heapalloc_value=26195592
+HeapAlloc dt=6 heapalloc_value=26203784
+HeapAlloc dt=5 heapalloc_value=26211976
+HeapAlloc dt=47 heapalloc_value=26220168
+HeapAlloc dt=8 heapalloc_value=26228360
+HeapAlloc dt=5 heapalloc_value=26236552
+HeapAlloc dt=6 heapalloc_value=26244744
+HeapAlloc dt=6 heapalloc_value=26252936
+HeapAlloc dt=5 heapalloc_value=26261128
+HeapAlloc dt=6 heapalloc_value=26269320
+HeapAlloc dt=5 heapalloc_value=26277512
+HeapAlloc dt=6 heapalloc_value=26285704
+HeapAlloc dt=6 heapalloc_value=26293896
+HeapAlloc dt=5 heapalloc_value=26302088
+HeapAlloc dt=6 heapalloc_value=26310280
+HeapAlloc dt=6 heapalloc_value=26318472
+HeapAlloc dt=30 heapalloc_value=26326360
+HeapAlloc dt=30 heapalloc_value=26334536
+HeapAlloc dt=24 heapalloc_value=26336904
+GoCreate dt=72 new_g=34 new_stack=47 stack=48
+GoCreate dt=183 new_g=35 new_stack=47 stack=48
+GoCreate dt=15 new_g=36 new_stack=47 stack=48
+GoCreate dt=12 new_g=37 new_stack=47 stack=48
+GoCreate dt=14 new_g=38 new_stack=47 stack=48
+HeapAlloc dt=25 heapalloc_value=26344200
+GoCreate dt=9 new_g=39 new_stack=47 stack=48
+GoCreate dt=13 new_g=40 new_stack=47 stack=48
+GoCreate dt=4 new_g=41 new_stack=47 stack=48
+HeapAlloc dt=17 heapalloc_value=26351912
+GoBlock dt=15 reason_string=10 stack=49
+GoStart dt=5 g=41 g_seq=1
+GoStop dt=307427 reason_string=16 stack=51
+GoStart dt=34 g=41 g_seq=2
+GoStop dt=315328 reason_string=16 stack=50
+GoStart dt=10 g=41 g_seq=3
+GoDestroy dt=158464
+ProcStop dt=40
+EventBatch gen=1 m=1709039 time=7689670530705 size=53
+GoUnblock dt=117 g=4 g_seq=3 stack=0
+GoUnblock dt=157408 g=4 g_seq=7 stack=0
+GoUnblock dt=157553 g=4 g_seq=11 stack=0
+ProcSteal dt=947714 p=7 p_seq=9 m=1709048
+ProcSteal dt=646055 p=7 p_seq=13 m=1709046
+ProcSteal dt=5677 p=5 p_seq=11 m=1709046
+ProcSteal dt=1312 p=6 p_seq=9 m=1709048
+EventBatch gen=1 m=1709038 time=7689670147327 size=336
+ProcStatus dt=56 p=0 pstatus=1
+GoStatus dt=4 g=1 m=1709038 gstatus=2
+ProcsChange dt=184 procs_value=8 stack=1
+STWBegin dt=81 kind_string=21 stack=2
+HeapGoal dt=5 heapgoal_value=4194304
+ProcStatus dt=2 p=1 pstatus=2
+ProcStatus dt=1 p=2 pstatus=2
+ProcStatus dt=1 p=3 pstatus=2
+ProcStatus dt=1 p=4 pstatus=2
+ProcStatus dt=1 p=5 pstatus=2
+ProcStatus dt=1 p=6 pstatus=2
+ProcStatus dt=1 p=7 pstatus=2
+ProcsChange dt=51 procs_value=8 stack=3
+STWEnd dt=74
+GoCreate dt=216 new_g=6 new_stack=4 stack=5
+HeapAlloc dt=174 heapalloc_value=2752512
+GoCreate dt=140 new_g=7 new_stack=6 stack=7
+HeapAlloc dt=16 heapalloc_value=2760704
+GoCreate dt=11 new_g=8 new_stack=8 stack=9
+GoCreate dt=197 new_g=9 new_stack=10 stack=11
+GoCreate dt=18 new_g=10 new_stack=12 stack=13
+GoBlock dt=159 reason_string=10 stack=14
+GoStart dt=10 g=10 g_seq=1
+GoStop dt=224159 reason_string=16 stack=19
+GoStart dt=105 g=10 g_seq=2
+GoUnblock dt=88262 g=1 g_seq=1 stack=20
+GoDestroy dt=111
+GoStart dt=10 g=1 g_seq=2
+GoBlock dt=18 reason_string=19 stack=21
+ProcStop dt=177
+ProcStart dt=22598 p=0 p_seq=2
+ProcStop dt=20
+ProcStart dt=30 p=2 p_seq=2
+ProcStop dt=1158
+ProcStart dt=1116 p=0 p_seq=4
+GoUnblock dt=19 g=25 g_seq=2 stack=0
+GoStart dt=130 g=25 g_seq=3
+GoLabel dt=1 label_string=2
+GoBlock dt=1809 reason_string=15 stack=27
+ProcStop dt=35
+ProcStart dt=45680 p=3 p_seq=4
+HeapAlloc dt=46 heapalloc_value=7659248
+HeapAlloc dt=48 heapalloc_value=7663408
+HeapAlloc dt=6065 heapalloc_value=7876144
+GoStart dt=2865 g=4 g_seq=6
+GoBlock dt=31 reason_string=15 stack=32
+ProcStop dt=49
+ProcStart dt=1490 p=3 p_seq=5
+ProcStop dt=29
+ProcStart dt=2071 p=1 p_seq=10
+ProcStop dt=21
+ProcStart dt=143297 p=2 p_seq=13
+GoUnblock dt=21 g=22 g_seq=6 stack=0
+GoStart dt=177 g=22 g_seq=7
+GoLabel dt=2 label_string=2
+GoBlock dt=2058 reason_string=15 stack=27
+ProcStop dt=2352
+ProcStart dt=162401 p=5 p_seq=2
+HeapAlloc dt=51 heapalloc_value=26353960
+HeapAlloc dt=42 heapalloc_value=26360360
+HeapAlloc dt=6510 heapalloc_value=26367784
+GoStart dt=1039 g=40 g_seq=1
+GoStop dt=297000 reason_string=16 stack=50
+GoStart dt=15 g=40 g_seq=2
+GoStop dt=315522 reason_string=16 stack=50
+GoStart dt=7 g=40 g_seq=3
+GoDestroy dt=168735
+ProcStop dt=43
+ProcStart dt=799345 p=6 p_seq=6
+ProcStop dt=33
+ProcStart dt=1506 p=6 p_seq=10
+ProcStop dt=26
+ProcStart dt=18634 p=7 p_seq=33
+ProcStop dt=34
+EventBatch gen=1 m=18446744073709551615 time=7689672466616 size=28
+GoStatus dt=61 g=2 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=5 m=18446744073709551615 gstatus=4
+EventBatch gen=1 m=18446744073709551615 time=7689672467258 size=4540
+Stacks
+Stack id=86 nframes=7
+	pc=4754167 func=24 file=25 line=736
+	pc=4814861 func=26 file=27 line=181
+	pc=4814837 func=28 file=29 line=736
+	pc=4814480 func=30 file=29 line=160
+	pc=4996132 func=31 file=32 line=55
+	pc=5032836 func=33 file=34 line=179
+	pc=5078635 func=35 file=36 line=73
+Stack id=77 nframes=16
+	pc=4756520 func=37 file=25 line=1442
+	pc=4751813 func=38 file=27 line=298
+	pc=4996815 func=39 file=40 line=59
+	pc=5049499 func=41 file=42 line=124
+	pc=5048282 func=43 file=42 line=70
+	pc=5021687 func=44 file=45 line=154
+	pc=5057739 func=46 file=47 line=85
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=13 nframes=1
+	pc=5077820 func=35 file=36 line=28
+Stack id=65 nframes=2
+	pc=4224086 func=57 file=58 line=145
+	pc=5080123 func=59 file=36 line=94
+Stack id=21 nframes=3
+	pc=4640852 func=60 file=61 line=195
+	pc=5081128 func=62 file=36 line=125
+	pc=5077843 func=35 file=36 line=32
+Stack id=11 nframes=1
+	pc=5077754 func=35 file=36 line=27
+Stack id=10 nframes=1
+	pc=5080288 func=63 file=36 line=97
+Stack id=44 nframes=2
+	pc=4354430 func=64 file=65 line=408
+	pc=4354396 func=66 file=67 line=318
+Stack id=51 nframes=3
+	pc=4658586 func=68 file=69 line=53
+	pc=5080816 func=70 file=36 line=110
+	pc=5079149 func=71 file=36 line=40
+Stack id=36 nframes=7
+	pc=4310007 func=72 file=73 line=806
+	pc=4326610 func=74 file=75 line=562
+	pc=4258131 func=76 file=77 line=1353
+	pc=4255947 func=78 file=77 line=1025
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=57 nframes=5
+	pc=4753924 func=81 file=25 line=432
+	pc=4744496 func=82 file=83 line=118
+	pc=4823012 func=84 file=85 line=218
+	pc=4824373 func=86 file=87 line=21
+	pc=5079543 func=59 file=36 line=82
+Stack id=16 nframes=7
+	pc=4754618 func=88 file=25 line=964
+	pc=4816103 func=89 file=27 line=209
+	pc=4816095 func=28 file=29 line=736
+	pc=4815648 func=90 file=29 line=380
+	pc=4821008 func=91 file=92 line=46
+	pc=4821000 func=93 file=94 line=189
+	pc=5077114 func=95 file=96 line=134
+Stack id=63 nframes=1
+	pc=5080224 func=97 file=36 line=89
+Stack id=2 nframes=3
+	pc=4567556 func=98 file=99 line=239
+	pc=5076805 func=100 file=96 line=125
+	pc=5077595 func=35 file=36 line=20
+Stack id=80 nframes=15
+	pc=4998478 func=101 file=29 line=683
+	pc=4998507 func=39 file=40 line=141
+	pc=5049499 func=41 file=42 line=124
+	pc=5048282 func=43 file=42 line=70
+	pc=5021687 func=44 file=45 line=154
+	pc=5057739 func=46 file=47 line=85
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=47 nframes=1
+	pc=5079072 func=71 file=36 line=38
+Stack id=55 nframes=2
+	pc=4227441 func=102 file=58 line=442
+	pc=5078106 func=35 file=36 line=48
+Stack id=5 nframes=4
+	pc=4576789 func=103 file=104 line=44
+	pc=4567832 func=98 file=99 line=258
+	pc=5076805 func=100 file=96 line=125
+	pc=5077595 func=35 file=36 line=20
+Stack id=46 nframes=3
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=8 nframes=1
+	pc=5077056 func=95 file=96 line=128
+Stack id=24 nframes=6
+	pc=4315620 func=105 file=73 line=1249
+	pc=4308860 func=106 file=73 line=662
+	pc=4257811 func=78 file=77 line=1308
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=37 nframes=1
+	pc=4316644 func=107 file=73 line=1469
+Stack id=79 nframes=5
+	pc=4817209 func=108 file=29 line=611
+	pc=5000296 func=109 file=40 line=172
+	pc=5058941 func=110 file=47 line=159
+	pc=5055951 func=111 file=112 line=327
+	pc=5078747 func=113 file=36 line=59
+Stack id=17 nframes=1
+	pc=5077124 func=95 file=96 line=130
+Stack id=41 nframes=2
+	pc=4310763 func=72 file=73 line=816
+	pc=4316644 func=107 file=73 line=1469
+Stack id=33 nframes=7
+	pc=4328420 func=114 file=75 line=747
+	pc=4326674 func=74 file=75 line=587
+	pc=4258131 func=76 file=77 line=1353
+	pc=4255947 func=78 file=77 line=1025
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=29 nframes=6
+	pc=4644903 func=115 file=116 line=474
+	pc=4309092 func=106 file=73 line=683
+	pc=4257811 func=78 file=77 line=1308
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=73 nframes=10
+	pc=4756296 func=117 file=25 line=1432
+	pc=4751685 func=118 file=27 line=290
+	pc=5051812 func=119 file=42 line=167
+	pc=5048051 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=92 nframes=2
+	pc=4640852 func=60 file=61 line=195
+	pc=5078782 func=113 file=36 line=63
+Stack id=32 nframes=2
+	pc=4344589 func=124 file=125 line=425
+	pc=4346072 func=126 file=125 line=658
+Stack id=45 nframes=1
+	pc=5077843 func=35 file=36 line=32
+Stack id=62 nframes=3
+	pc=4754167 func=24 file=25 line=736
+	pc=5079848 func=26 file=27 line=181
+	pc=5079785 func=59 file=36 line=90
+Stack id=15 nframes=3
+	pc=4227441 func=102 file=58 line=442
+	pc=4574090 func=127 file=99 line=937
+	pc=4576964 func=128 file=104 line=56
+Stack id=28 nframes=4
+	pc=4257811 func=78 file=77 line=1308
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=64 nframes=7
+	pc=4754618 func=88 file=25 line=964
+	pc=4816103 func=89 file=27 line=209
+	pc=4816095 func=28 file=29 line=736
+	pc=4815648 func=90 file=29 line=380
+	pc=4821008 func=91 file=92 line=46
+	pc=4821000 func=93 file=94 line=189
+	pc=5080260 func=97 file=36 line=89
+Stack id=91 nframes=8
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5060022 func=133 file=134 line=21
+	pc=5055784 func=135 file=112 line=257
+	pc=5058972 func=110 file=47 line=163
+	pc=5055951 func=111 file=112 line=327
+	pc=5078747 func=113 file=36 line=59
+Stack id=95 nframes=8
+	pc=4753732 func=136 file=25 line=335
+	pc=4813424 func=137 file=138 line=24
+	pc=4813394 func=139 file=29 line=81
+	pc=4811154 func=140 file=141 line=213
+	pc=4813572 func=142 file=29 line=104
+	pc=4996049 func=143 file=32 line=37
+	pc=5033653 func=144 file=34 line=203
+	pc=5078651 func=35 file=36 line=74
+Stack id=22 nframes=4
+	pc=4257811 func=78 file=77 line=1308
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=56 nframes=5
+	pc=4753924 func=81 file=25 line=432
+	pc=4744422 func=82 file=83 line=106
+	pc=4823012 func=84 file=85 line=218
+	pc=4824373 func=86 file=87 line=21
+	pc=5079543 func=59 file=36 line=82
+Stack id=60 nframes=5
+	pc=4753924 func=81 file=25 line=432
+	pc=4744422 func=82 file=83 line=106
+	pc=4813961 func=145 file=29 line=129
+	pc=5079772 func=146 file=85 line=90
+	pc=5079785 func=59 file=36 line=90
+Stack id=38 nframes=2
+	pc=4310679 func=72 file=73 line=914
+	pc=4316644 func=107 file=73 line=1469
+Stack id=52 nframes=3
+	pc=4708004 func=147 file=148 line=81
+	pc=5079238 func=149 file=148 line=87
+	pc=5079164 func=71 file=36 line=41
+Stack id=20 nframes=3
+	pc=4708004 func=147 file=148 line=81
+	pc=5080678 func=149 file=148 line=87
+	pc=5080600 func=150 file=36 line=105
+Stack id=67 nframes=19
+	pc=4752943 func=151 file=25 line=98
+	pc=4822218 func=152 file=153 line=280
+	pc=4822195 func=154 file=155 line=15
+	pc=4823409 func=156 file=85 line=272
+	pc=4821405 func=157 file=94 line=374
+	pc=5042404 func=158 file=94 line=354
+	pc=5042391 func=159 file=160 line=76
+	pc=5047095 func=161 file=162 line=35
+	pc=5068462 func=163 file=34 line=373
+	pc=4703265 func=164 file=165 line=74
+	pc=5034315 func=166 file=165 line=65
+	pc=5034286 func=167 file=34 line=373
+	pc=5047998 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=84 nframes=15
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5059867 func=133 file=134 line=18
+	pc=5055784 func=135 file=112 line=257
+	pc=5058352 func=46 file=47 line=121
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=74 nframes=9
+	pc=4755428 func=168 file=25 line=1213
+	pc=5051952 func=119 file=42 line=170
+	pc=5048051 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=50 nframes=1
+	pc=5079149 func=71 file=36 line=40
+Stack id=14 nframes=2
+	pc=4708263 func=169 file=148 line=116
+	pc=5077833 func=35 file=36 line=29
+Stack id=27 nframes=2
+	pc=4437613 func=170 file=65 line=402
+	pc=4316040 func=107 file=73 line=1333
+Stack id=30 nframes=5
+	pc=4309402 func=106 file=73 line=745
+	pc=4257811 func=78 file=77 line=1308
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=75 nframes=1
+	pc=5078720 func=113 file=36 line=58
+Stack id=88 nframes=8
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5059594 func=171 file=172 line=15
+	pc=5055722 func=135 file=112 line=251
+	pc=5058972 func=110 file=47 line=163
+	pc=5055951 func=111 file=112 line=327
+	pc=5078747 func=113 file=36 line=59
+Stack id=70 nframes=21
+	pc=4754167 func=24 file=25 line=736
+	pc=4814861 func=26 file=27 line=181
+	pc=4814837 func=28 file=29 line=736
+	pc=4814480 func=30 file=29 line=160
+	pc=4820817 func=173 file=92 line=29
+	pc=4820809 func=174 file=94 line=118
+	pc=4742703 func=175 file=176 line=335
+	pc=5041967 func=177 file=176 line=354
+	pc=5041927 func=178 file=160 line=55
+	pc=5047143 func=161 file=162 line=40
+	pc=5068462 func=163 file=34 line=373
+	pc=4703265 func=164 file=165 line=74
+	pc=5034315 func=166 file=165 line=65
+	pc=5034286 func=167 file=34 line=373
+	pc=5047998 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=25 nframes=7
+	pc=4227441 func=102 file=58 line=442
+	pc=4315507 func=105 file=73 line=1259
+	pc=4308860 func=106 file=73 line=662
+	pc=4257811 func=78 file=77 line=1308
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=58 nframes=5
+	pc=4753924 func=81 file=25 line=432
+	pc=4744422 func=82 file=83 line=106
+	pc=4823012 func=84 file=85 line=218
+	pc=4824408 func=86 file=87 line=21
+	pc=5079543 func=59 file=36 line=82
+Stack id=69 nframes=19
+	pc=4753924 func=81 file=25 line=432
+	pc=4744496 func=82 file=83 line=118
+	pc=4823012 func=84 file=85 line=218
+	pc=4823631 func=156 file=85 line=301
+	pc=4821405 func=157 file=94 line=374
+	pc=5042404 func=158 file=94 line=354
+	pc=5042391 func=159 file=160 line=76
+	pc=5047095 func=161 file=162 line=35
+	pc=5068462 func=163 file=34 line=373
+	pc=4703265 func=164 file=165 line=74
+	pc=5034315 func=166 file=165 line=65
+	pc=5034286 func=167 file=34 line=373
+	pc=5047998 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=83 nframes=15
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5054762 func=179 file=180 line=88
+	pc=5055769 func=135 file=112 line=256
+	pc=5058352 func=46 file=47 line=121
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=43 nframes=9
+	pc=4368154 func=181 file=182 line=958
+	pc=4293585 func=183 file=184 line=254
+	pc=4293175 func=185 file=184 line=170
+	pc=4290674 func=186 file=187 line=182
+	pc=4255364 func=188 file=77 line=948
+	pc=4256932 func=78 file=77 line=1149
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=78 nframes=8
+	pc=4756062 func=189 file=25 line=1421
+	pc=4750293 func=190 file=153 line=684
+	pc=4818215 func=191 file=192 line=17
+	pc=4816989 func=108 file=29 line=602
+	pc=5000296 func=109 file=40 line=172
+	pc=5058941 func=110 file=47 line=159
+	pc=5055951 func=111 file=112 line=327
+	pc=5078747 func=113 file=36 line=59
+Stack id=71 nframes=20
+	pc=4753732 func=136 file=25 line=335
+	pc=4813424 func=137 file=138 line=24
+	pc=4813394 func=139 file=29 line=81
+	pc=4811154 func=140 file=141 line=213
+	pc=4813572 func=142 file=29 line=104
+	pc=4823895 func=193 file=85 line=315
+	pc=5047564 func=194 file=92 line=23
+	pc=5047547 func=195 file=160 line=23
+	pc=5047406 func=161 file=162 line=53
+	pc=5068462 func=163 file=34 line=373
+	pc=4703265 func=164 file=165 line=74
+	pc=5034315 func=166 file=165 line=65
+	pc=5034286 func=167 file=34 line=373
+	pc=5047998 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=3 nframes=4
+	pc=4446827 func=196 file=65 line=1369
+	pc=4567827 func=98 file=99 line=256
+	pc=5076805 func=100 file=96 line=125
+	pc=5077595 func=35 file=36 line=20
+Stack id=35 nframes=2
+	pc=4310007 func=72 file=73 line=806
+	pc=4316644 func=107 file=73 line=1469
+Stack id=6 nframes=1
+	pc=4573664 func=197 file=99 line=877
+Stack id=19 nframes=1
+	pc=5080585 func=150 file=36 line=104
+Stack id=54 nframes=1
+	pc=5078085 func=35 file=36 line=47
+Stack id=82 nframes=15
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5059594 func=171 file=172 line=15
+	pc=5055722 func=135 file=112 line=251
+	pc=5058352 func=46 file=47 line=121
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=90 nframes=8
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5059867 func=133 file=134 line=18
+	pc=5055784 func=135 file=112 line=257
+	pc=5058972 func=110 file=47 line=163
+	pc=5055951 func=111 file=112 line=327
+	pc=5078747 func=113 file=36 line=59
+Stack id=61 nframes=5
+	pc=4753924 func=81 file=25 line=432
+	pc=4744496 func=82 file=83 line=118
+	pc=4813961 func=145 file=29 line=129
+	pc=5079772 func=146 file=85 line=90
+	pc=5079785 func=59 file=36 line=90
+Stack id=23 nframes=1
+	pc=4315808 func=107 file=73 line=1298
+Stack id=12 nframes=1
+	pc=5080512 func=150 file=36 line=102
+Stack id=68 nframes=19
+	pc=4753924 func=81 file=25 line=432
+	pc=4744422 func=82 file=83 line=106
+	pc=4823012 func=84 file=85 line=218
+	pc=4823631 func=156 file=85 line=301
+	pc=4821405 func=157 file=94 line=374
+	pc=5042404 func=158 file=94 line=354
+	pc=5042391 func=159 file=160 line=76
+	pc=5047095 func=161 file=162 line=35
+	pc=5068462 func=163 file=34 line=373
+	pc=4703265 func=164 file=165 line=74
+	pc=5034315 func=166 file=165 line=65
+	pc=5034286 func=167 file=34 line=373
+	pc=5047998 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=4 nframes=1
+	pc=4576896 func=128 file=104 line=44
+Stack id=66 nframes=6
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=81 nframes=16
+	pc=4757147 func=198 file=25 line=1478
+	pc=4752076 func=199 file=27 line=313
+	pc=4998549 func=39 file=40 line=149
+	pc=5049499 func=41 file=42 line=124
+	pc=5048282 func=43 file=42 line=70
+	pc=5021687 func=44 file=45 line=154
+	pc=5057739 func=46 file=47 line=85
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=87 nframes=4
+	pc=4814791 func=30 file=29 line=164
+	pc=4996132 func=31 file=32 line=55
+	pc=5032836 func=33 file=34 line=179
+	pc=5078635 func=35 file=36 line=73
+Stack id=85 nframes=15
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5060022 func=133 file=134 line=21
+	pc=5055784 func=135 file=112 line=257
+	pc=5058352 func=46 file=47 line=121
+	pc=5057380 func=48 file=47 line=75
+	pc=5057381 func=49 file=47 line=71
+	pc=4965884 func=50 file=51 line=651
+	pc=4964173 func=52 file=51 line=616
+	pc=4961811 func=53 file=51 line=517
+	pc=4960409 func=54 file=51 line=508
+	pc=4958646 func=55 file=51 line=434
+	pc=4958647 func=56 file=51 line=401
+	pc=5078500 func=35 file=36 line=68
+Stack id=39 nframes=4
+	pc=4644903 func=115 file=116 line=474
+	pc=4311677 func=200 file=73 line=964
+	pc=4310756 func=72 file=73 line=926
+	pc=4316644 func=107 file=73 line=1469
+Stack id=31 nframes=7
+	pc=4585153 func=201 file=202 line=383
+	pc=4326396 func=74 file=75 line=534
+	pc=4258131 func=76 file=77 line=1353
+	pc=4255947 func=78 file=77 line=1025
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=89 nframes=8
+	pc=4757394 func=129 file=25 line=1488
+	pc=4819063 func=130 file=27 line=462
+	pc=4819041 func=131 file=132 line=17
+	pc=5054762 func=179 file=180 line=88
+	pc=5055769 func=135 file=112 line=256
+	pc=5058972 func=110 file=47 line=163
+	pc=5055951 func=111 file=112 line=327
+	pc=5078747 func=113 file=36 line=59
+Stack id=53 nframes=1
+	pc=5079488 func=59 file=36 line=81
+Stack id=18 nframes=3
+	pc=4227441 func=102 file=58 line=442
+	pc=4574090 func=127 file=99 line=937
+	pc=4573703 func=197 file=99 line=880
+Stack id=48 nframes=1
+	pc=5077881 func=35 file=36 line=38
+Stack id=94 nframes=8
+	pc=4753732 func=136 file=25 line=335
+	pc=4813424 func=137 file=138 line=24
+	pc=4813394 func=139 file=29 line=81
+	pc=4811154 func=140 file=141 line=213
+	pc=4813572 func=142 file=29 line=104
+	pc=4996049 func=143 file=32 line=37
+	pc=5033653 func=144 file=34 line=203
+	pc=5078837 func=113 file=36 line=66
+Stack id=42 nframes=9
+	pc=4584693 func=203 file=202 line=357
+	pc=4355940 func=204 file=67 line=522
+	pc=4292956 func=185 file=184 line=147
+	pc=4290674 func=186 file=187 line=182
+	pc=4255364 func=188 file=77 line=948
+	pc=4256932 func=78 file=77 line=1149
+	pc=4528840 func=79 file=80 line=107
+	pc=5081148 func=62 file=36 line=127
+	pc=5077843 func=35 file=36 line=32
+Stack id=93 nframes=7
+	pc=4754618 func=88 file=25 line=964
+	pc=4816103 func=89 file=27 line=209
+	pc=4816095 func=28 file=29 line=736
+	pc=4815648 func=90 file=29 line=380
+	pc=4996388 func=205 file=32 line=96
+	pc=5033284 func=206 file=34 line=191
+	pc=5078821 func=113 file=36 line=65
+Stack id=34 nframes=2
+	pc=4644903 func=115 file=116 line=474
+	pc=4316309 func=107 file=73 line=1393
+Stack id=49 nframes=2
+	pc=4708263 func=169 file=148 line=116
+	pc=5078001 func=35 file=36 line=43
+Stack id=7 nframes=4
+	pc=4573636 func=207 file=99 line=877
+	pc=4567844 func=98 file=99 line=259
+	pc=5076805 func=100 file=96 line=125
+	pc=5077595 func=35 file=36 line=20
+Stack id=76 nframes=1
+	pc=5078444 func=35 file=36 line=58
+Stack id=1 nframes=4
+	pc=4583115 func=208 file=202 line=260
+	pc=4567535 func=98 file=99 line=238
+	pc=5076805 func=100 file=96 line=125
+	pc=5077595 func=35 file=36 line=20
+Stack id=26 nframes=2
+	pc=4224086 func=57 file=58 line=145
+	pc=4316011 func=107 file=73 line=1312
+Stack id=40 nframes=3
+	pc=4312646 func=200 file=73 line=1086
+	pc=4310756 func=72 file=73 line=926
+	pc=4316644 func=107 file=73 line=1469
+Stack id=72 nframes=11
+	pc=4757394 func=129 file=25 line=1488
+	pc=5054386 func=130 file=27 line=462
+	pc=5054396 func=209 file=210 line=28
+	pc=5051349 func=119 file=42 line=152
+	pc=5048051 func=43 file=42 line=57
+	pc=5021687 func=44 file=45 line=154
+	pc=5059172 func=120 file=47 line=189
+	pc=4967876 func=121 file=47 line=179
+	pc=4967838 func=122 file=51 line=734
+	pc=4968614 func=123 file=51 line=808
+	pc=5078215 func=35 file=36 line=53
+Stack id=59 nframes=5
+	pc=4753924 func=81 file=25 line=432
+	pc=4744496 func=82 file=83 line=118
+	pc=4823012 func=84 file=85 line=218
+	pc=4824408 func=86 file=87 line=21
+	pc=5079543 func=59 file=36 line=82
+Stack id=9 nframes=2
+	pc=5076879 func=100 file=96 line=128
+	pc=5077595 func=35 file=36 line=20
+EventBatch gen=1 m=18446744073709551615 time=7689670146021 size=6980
+Strings
+String id=1
+	data="Not worker"
+String id=2
+	data="GC (dedicated)"
+String id=3
+	data="GC (fractional)"
+String id=4
+	data="GC (idle)"
+String id=5
+	data="unspecified"
+String id=6
+	data="forever"
+String id=7
+	data="network"
+String id=8
+	data="select"
+String id=9
+	data="sync.(*Cond).Wait"
+String id=10
+	data="sync"
+String id=11
+	data="chan send"
+String id=12
+	data="chan receive"
+String id=13
+	data="GC mark assist wait for work"
+String id=14
+	data="GC background sweeper wait"
+String id=15
+	data="system goroutine wait"
+String id=16
+	data="preempted"
+String id=17
+	data="wait for debug call"
+String id=18
+	data="wait until GC ends"
+String id=19
+	data="sleep"
+String id=20
+	data="runtime.Gosched"
+String id=21
+	data="start trace"
+String id=22
+	data="GC sweep termination"
+String id=23
+	data="GC mark termination"
+String id=24
+	data="syscall.read"
+String id=25
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/zsyscall_linux_amd64.go"
+String id=26
+	data="syscall.Read"
+String id=27
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_unix.go"
+String id=28
+	data="internal/poll.ignoringEINTRIO"
+String id=29
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unix.go"
+String id=30
+	data="internal/poll.(*FD).Read"
+String id=31
+	data="net.(*netFD).Read"
+String id=32
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/fd_posix.go"
+String id=33
+	data="net.(*conn).Read"
+String id=34
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/net.go"
+String id=35
+	data="main.main"
+String id=36
+	data="/usr/local/google/home/mknyszek/work/go-1/src/cmd/trace/v2/testdata/testprog/main.go"
+String id=37
+	data="syscall.connect"
+String id=38
+	data="syscall.Connect"
+String id=39
+	data="net.(*netFD).connect"
+String id=40
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/fd_unix.go"
+String id=41
+	data="net.(*netFD).dial"
+String id=42
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/sock_posix.go"
+String id=43
+	data="net.socket"
+String id=44
+	data="net.internetSocket"
+String id=45
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/ipsock_posix.go"
+String id=46
+	data="net.(*sysDialer).doDialTCPProto"
+String id=47
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsock_posix.go"
+String id=48
+	data="net.(*sysDialer).doDialTCP"
+String id=49
+	data="net.(*sysDialer).dialTCP"
+String id=50
+	data="net.(*sysDialer).dialSingle"
+String id=51
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/dial.go"
+String id=52
+	data="net.(*sysDialer).dialSerial"
+String id=53
+	data="net.(*sysDialer).dialParallel"
+String id=54
+	data="net.(*Dialer).DialContext"
+String id=55
+	data="net.(*Dialer).Dial"
+String id=56
+	data="net.Dial"
+String id=57
+	data="runtime.chansend1"
+String id=58
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go"
+String id=59
+	data="main.blockingSyscall"
+String id=60
+	data="time.Sleep"
+String id=61
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/time.go"
+String id=62
+	data="main.allocHog"
+String id=63
+	data="main.cpu10"
+String id=64
+	data="runtime.goparkunlock"
+String id=65
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go"
+String id=66
+	data="runtime.bgsweep"
+String id=67
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcsweep.go"
+String id=68
+	data="runtime.asyncPreempt"
+String id=69
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/preempt_amd64.s"
+String id=70
+	data="main.cpuHog"
+String id=71
+	data="main.main.func1"
+String id=72
+	data="runtime.gcMarkDone"
+String id=73
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgc.go"
+String id=74
+	data="runtime.gcAssistAlloc"
+String id=75
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcmark.go"
+String id=76
+	data="runtime.deductAssistCredit"
+String id=77
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go"
+String id=78
+	data="runtime.mallocgc"
+String id=79
+	data="runtime.makeslice"
+String id=80
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/slice.go"
+String id=81
+	data="syscall.fcntl"
+String id=82
+	data="syscall.SetNonblock"
+String id=83
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/exec_unix.go"
+String id=84
+	data="os.newFile"
+String id=85
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_unix.go"
+String id=86
+	data="os.Pipe"
+String id=87
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/pipe2_unix.go"
+String id=88
+	data="syscall.write"
+String id=89
+	data="syscall.Write"
+String id=90
+	data="internal/poll.(*FD).Write"
+String id=91
+	data="os.(*File).write"
+String id=92
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_posix.go"
+String id=93
+	data="os.(*File).Write"
+String id=94
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file.go"
+String id=95
+	data="runtime/trace.Start.func1"
+String id=96
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go"
+String id=97
+	data="main.blockingSyscall.func1"
+String id=98
+	data="runtime.StartTrace"
+String id=99
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go"
+String id=100
+	data="runtime/trace.Start"
+String id=101
+	data="internal/poll.(*FD).WaitWrite"
+String id=102
+	data="runtime.chanrecv1"
+String id=103
+	data="runtime.traceStartReadCPU"
+String id=104
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2cpu.go"
+String id=105
+	data="runtime.gcBgMarkStartWorkers"
+String id=106
+	data="runtime.gcStart"
+String id=107
+	data="runtime.gcBgMarkWorker"
+String id=108
+	data="internal/poll.(*FD).Accept"
+String id=109
+	data="net.(*netFD).accept"
+String id=110
+	data="net.(*TCPListener).accept"
+String id=111
+	data="net.(*TCPListener).Accept"
+String id=112
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsock.go"
+String id=113
+	data="main.main.func2"
+String id=114
+	data="runtime.gcParkAssist"
+String id=115
+	data="runtime.systemstack_switch"
+String id=116
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/asm_amd64.s"
+String id=117
+	data="syscall.bind"
+String id=118
+	data="syscall.Bind"
+String id=119
+	data="net.(*netFD).listenStream"
+String id=120
+	data="net.(*sysListener).listenTCPProto"
+String id=121
+	data="net.(*sysListener).listenTCP"
+String id=122
+	data="net.(*ListenConfig).Listen"
+String id=123
+	data="net.Listen"
+String id=124
+	data="runtime.(*scavengerState).park"
+String id=125
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcscavenge.go"
+String id=126
+	data="runtime.bgscavenge"
+String id=127
+	data="runtime.(*wakeableSleep).sleep"
+String id=128
+	data="runtime.traceStartReadCPU.func1"
+String id=129
+	data="syscall.setsockopt"
+String id=130
+	data="syscall.SetsockoptInt"
+String id=131
+	data="internal/poll.(*FD).SetsockoptInt"
+String id=132
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/sockopt.go"
+String id=133
+	data="net.setKeepAlivePeriod"
+String id=134
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsockopt_unix.go"
+String id=135
+	data="net.newTCPConn"
+String id=136
+	data="syscall.Close"
+String id=137
+	data="internal/poll.(*SysFile).destroy"
+String id=138
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unixjs.go"
+String id=139
+	data="internal/poll.(*FD).destroy"
+String id=140
+	data="internal/poll.(*FD).decref"
+String id=141
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_mutex.go"
+String id=142
+	data="internal/poll.(*FD).Close"
+String id=143
+	data="net.(*netFD).Close"
+String id=144
+	data="net.(*conn).Close"
+String id=145
+	data="internal/poll.(*FD).SetBlocking"
+String id=146
+	data="os.(*File).Fd"
+String id=147
+	data="sync.(*WaitGroup).Add"
+String id=148
+	data="/usr/local/google/home/mknyszek/work/go-1/src/sync/waitgroup.go"
+String id=149
+	data="sync.(*WaitGroup).Done"
+String id=150
+	data="main.cpu20"
+String id=151
+	data="syscall.openat"
+String id=152
+	data="syscall.Open"
+String id=153
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_linux.go"
+String id=154
+	data="os.open"
+String id=155
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_open_unix.go"
+String id=156
+	data="os.openFileNolog"
+String id=157
+	data="os.OpenFile"
+String id=158
+	data="os.Open"
+String id=159
+	data="net.open"
+String id=160
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/parse.go"
+String id=161
+	data="net.maxListenerBacklog"
+String id=162
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/sock_linux.go"
+String id=163
+	data="net.listenerBacklog.func1"
+String id=164
+	data="sync.(*Once).doSlow"
+String id=165
+	data="/usr/local/google/home/mknyszek/work/go-1/src/sync/once.go"
+String id=166
+	data="sync.(*Once).Do"
+String id=167
+	data="net.listenerBacklog"
+String id=168
+	data="syscall.Listen"
+String id=169
+	data="sync.(*WaitGroup).Wait"
+String id=170
+	data="runtime.gopark"
+String id=171
+	data="net.setNoDelay"
+String id=172
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsockopt_posix.go"
+String id=173
+	data="os.(*File).read"
+String id=174
+	data="os.(*File).Read"
+String id=175
+	data="io.ReadAtLeast"
+String id=176
+	data="/usr/local/google/home/mknyszek/work/go-1/src/io/io.go"
+String id=177
+	data="io.ReadFull"
+String id=178
+	data="net.(*file).readLine"
+String id=179
+	data="net.setKeepAlive"
+String id=180
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/sockopt_posix.go"
+String id=181
+	data="runtime.(*mheap).alloc"
+String id=182
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mheap.go"
+String id=183
+	data="runtime.(*mcentral).grow"
+String id=184
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcentral.go"
+String id=185
+	data="runtime.(*mcentral).cacheSpan"
+String id=186
+	data="runtime.(*mcache).refill"
+String id=187
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcache.go"
+String id=188
+	data="runtime.(*mcache).nextFree"
+String id=189
+	data="syscall.accept4"
+String id=190
+	data="syscall.Accept4"
+String id=191
+	data="internal/poll.accept"
+String id=192
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/sock_cloexec.go"
+String id=193
+	data="os.(*file).close"
+String id=194
+	data="os.(*File).Close"
+String id=195
+	data="net.(*file).close"
+String id=196
+	data="runtime.startTheWorld"
+String id=197
+	data="runtime.(*traceAdvancerState).start.func1"
+String id=198
+	data="syscall.getsockopt"
+String id=199
+	data="syscall.GetsockoptInt"
+String id=200
+	data="runtime.gcMarkTermination"
+String id=201
+	data="runtime.traceLocker.GCMarkAssistStart"
+String id=202
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go"
+String id=203
+	data="runtime.traceLocker.GCSweepSpan"
+String id=204
+	data="runtime.(*sweepLocked).sweep"
+String id=205
+	data="net.(*netFD).Write"
+String id=206
+	data="net.(*conn).Write"
+String id=207
+	data="runtime.(*traceAdvancerState).start"
+String id=208
+	data="runtime.traceLocker.Gomaxprocs"
+String id=209
+	data="net.setDefaultListenerSockopts"
+String id=210
+	data="/usr/local/google/home/mknyszek/work/go-1/src/net/sockopt_linux.go"
diff --git a/src/cmd/trace/v2/testdata/mktests.go b/src/cmd/trace/v2/testdata/mktests.go
new file mode 100644
index 0000000..143e8ec
--- /dev/null
+++ b/src/cmd/trace/v2/testdata/mktests.go
@@ -0,0 +1,60 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"internal/trace/v2/raw"
+	"internal/trace/v2/version"
+	"io"
+	"log"
+	"os"
+	"os/exec"
+)
+
+func main() {
+	// Create command.
+	var trace, stderr bytes.Buffer
+	cmd := exec.Command("go", "run", "./testprog/main.go")
+	// TODO(mknyszek): Remove if goexperiment.Exectracer2 becomes the default.
+	cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2")
+	cmd.Stdout = &trace
+	cmd.Stderr = &stderr
+
+	// Run trace program; the trace will appear in stdout.
+	fmt.Fprintln(os.Stderr, "running trace program...")
+	if err := cmd.Run(); err != nil {
+		log.Fatalf("running trace program: %v:\n%s", err, stderr.String())
+	}
+
+	// Create file.
+	f, err := os.Create(fmt.Sprintf("./go1%d.test", version.Current))
+	if err != nil {
+		log.Fatalf("creating output file: %v", err)
+	}
+	defer f.Close()
+
+	// Write out the trace.
+	r, err := raw.NewReader(&trace)
+	if err != nil {
+		log.Fatalf("reading trace: %v", err)
+	}
+	w, err := raw.NewTextWriter(f, version.Current)
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			log.Fatalf("reading trace: %v", err)
+		}
+		if err := w.WriteEvent(ev); err != nil {
+			log.Fatalf("writing trace: %v", err)
+		}
+	}
+}
diff --git a/src/cmd/trace/v2/testdata/testprog/main.go b/src/cmd/trace/v2/testdata/testprog/main.go
new file mode 100644
index 0000000..fcf4dc1
--- /dev/null
+++ b/src/cmd/trace/v2/testdata/testprog/main.go
@@ -0,0 +1,129 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"log"
+	"net"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"sync"
+	"syscall"
+	"time"
+)
+
+func main() {
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatal(err)
+	}
+
+	// checkExecutionTimes relies on this.
+	var wg sync.WaitGroup
+	wg.Add(2)
+	go cpu10(&wg)
+	go cpu20(&wg)
+	wg.Wait()
+
+	// checkHeapMetrics relies on this.
+	allocHog(25 * time.Millisecond)
+
+	// checkProcStartStop relies on this.
+	var wg2 sync.WaitGroup
+	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
+		wg2.Add(1)
+		go func() {
+			defer wg2.Done()
+			cpuHog(50 * time.Millisecond)
+		}()
+	}
+	wg2.Wait()
+
+	// checkSyscalls relies on this.
+	done := make(chan error)
+	go blockingSyscall(50*time.Millisecond, done)
+	if err := <-done; err != nil {
+		log.Fatal(err)
+	}
+
+	// checkNetworkUnblock relies on this.
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		log.Fatalf("listen failed: %v", err)
+	}
+	defer ln.Close()
+	go func() {
+		c, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		time.Sleep(time.Millisecond)
+		var buf [1]byte
+		c.Write(buf[:])
+		c.Close()
+	}()
+	c, err := net.Dial("tcp", ln.Addr().String())
+	if err != nil {
+		log.Fatalf("dial failed: %v", err)
+	}
+	var tmp [1]byte
+	c.Read(tmp[:])
+	c.Close()
+
+	trace.Stop()
+}
+
+// blockingSyscall blocks the current goroutine for duration d in a syscall and
+// sends a message to done when it is done or if the syscall failed.
+func blockingSyscall(d time.Duration, done chan<- error) {
+	r, w, err := os.Pipe()
+	if err != nil {
+		done <- err
+		return
+	}
+	start := time.Now()
+	msg := []byte("hello")
+	time.AfterFunc(d, func() { w.Write(msg) })
+	_, err = syscall.Read(int(r.Fd()), make([]byte, len(msg)))
+	if err == nil && time.Since(start) < d {
+		err = fmt.Errorf("syscall returned too early: want=%s got=%s", d, time.Since(start))
+	}
+	done <- err
+}
+
+func cpu10(wg *sync.WaitGroup) {
+	defer wg.Done()
+	cpuHog(10 * time.Millisecond)
+}
+
+func cpu20(wg *sync.WaitGroup) {
+	defer wg.Done()
+	cpuHog(20 * time.Millisecond)
+}
+
+func cpuHog(dt time.Duration) {
+	start := time.Now()
+	for i := 0; ; i++ {
+		if i%1000 == 0 && time.Since(start) > dt {
+			return
+		}
+	}
+}
+
+func allocHog(dt time.Duration) {
+	start := time.Now()
+	var s [][]byte
+	for i := 0; ; i++ {
+		if i%1000 == 0 {
+			if time.Since(start) > dt {
+				return
+			}
+			// Take a break... this will generate a ton of events otherwise.
+			time.Sleep(50 * time.Microsecond)
+		}
+		s = append(s, make([]byte, 1024))
+	}
+}
diff --git a/src/cmd/trace/v2/threadgen.go b/src/cmd/trace/v2/threadgen.go
new file mode 100644
index 0000000..e1cae2b
--- /dev/null
+++ b/src/cmd/trace/v2/threadgen.go
@@ -0,0 +1,204 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"internal/trace/traceviewer"
+	"internal/trace/traceviewer/format"
+	tracev2 "internal/trace/v2"
+)
+
+var _ generator = &threadGenerator{}
+
+type threadGenerator struct {
+	globalRangeGenerator
+	globalMetricGenerator
+	stackSampleGenerator[tracev2.ThreadID]
+	logEventGenerator[tracev2.ThreadID]
+
+	gStates map[tracev2.GoID]*gState[tracev2.ThreadID]
+	threads map[tracev2.ThreadID]struct{}
+}
+
+func newThreadGenerator() *threadGenerator {
+	tg := new(threadGenerator)
+	rg := func(ev *tracev2.Event) tracev2.ThreadID {
+		return ev.Thread()
+	}
+	tg.stackSampleGenerator.getResource = rg
+	tg.logEventGenerator.getResource = rg
+	tg.gStates = make(map[tracev2.GoID]*gState[tracev2.ThreadID])
+	tg.threads = make(map[tracev2.ThreadID]struct{})
+	return tg
+}
+
+func (g *threadGenerator) Sync() {
+	g.globalRangeGenerator.Sync()
+}
+
+func (g *threadGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) {
+	l := ev.Label()
+	g.gStates[l.Resource.Goroutine()].setLabel(l.Label)
+}
+
+func (g *threadGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) {
+	r := ev.Range()
+	switch ev.Kind() {
+	case tracev2.EventRangeBegin:
+		g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack())
+	case tracev2.EventRangeActive:
+		g.gStates[r.Scope.Goroutine()].rangeActive(r.Name)
+	case tracev2.EventRangeEnd:
+		gs := g.gStates[r.Scope.Goroutine()]
+		gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx)
+	}
+}
+
+func (g *threadGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) {
+	if ev.Thread() != tracev2.NoThread {
+		if _, ok := g.threads[ev.Thread()]; !ok {
+			g.threads[ev.Thread()] = struct{}{}
+		}
+	}
+
+	st := ev.StateTransition()
+	goID := st.Resource.Goroutine()
+
+	// If we haven't seen this goroutine before, create a new
+	// gState for it.
+	gs, ok := g.gStates[goID]
+	if !ok {
+		gs = newGState[tracev2.ThreadID](goID)
+		g.gStates[goID] = gs
+	}
+	// If we haven't already named this goroutine, try to name it.
+	gs.augmentName(st.Stack)
+
+	// Handle the goroutine state transition.
+	from, to := st.Goroutine()
+	if from == to {
+		// Filter out no-op events.
+		return
+	}
+	if from.Executing() && !to.Executing() {
+		if to == tracev2.GoWaiting {
+			// Goroutine started blocking.
+			gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
+		} else {
+			gs.stop(ev.Time(), ev.Stack(), ctx)
+		}
+	}
+	if !from.Executing() && to.Executing() {
+		start := ev.Time()
+		if from == tracev2.GoUndetermined {
+			// Back-date the event to the start of the trace.
+			start = ctx.startTime
+		}
+		gs.start(start, ev.Thread(), ctx)
+	}
+
+	if from == tracev2.GoWaiting {
+		// Goroutine was unblocked.
+		gs.unblock(ev.Time(), ev.Stack(), ev.Thread(), ctx)
+	}
+	if from == tracev2.GoNotExist && to == tracev2.GoRunnable {
+		// Goroutine was created.
+		gs.created(ev.Time(), ev.Thread(), ev.Stack())
+	}
+	if from == tracev2.GoSyscall {
+		// Exiting syscall.
+		gs.syscallEnd(ev.Time(), to != tracev2.GoRunning, ctx)
+	}
+
+	// Handle syscalls.
+	if to == tracev2.GoSyscall {
+		start := ev.Time()
+		if from == tracev2.GoUndetermined {
+			// Back-date the event to the start of the trace.
+			start = ctx.startTime
+		}
+		// Write down that we've entered a syscall. Note: we might have no P here
+		// if we're in a cgo callback or this is a transition from GoUndetermined
+		// (i.e. the G has been blocked in a syscall).
+		gs.syscallBegin(start, ev.Thread(), ev.Stack())
+	}
+
+	// Note down the goroutine transition.
+	_, inMarkAssist := gs.activeRanges["GC mark assist"]
+	ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist))
+}
+
+func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) {
+	if ev.Thread() != tracev2.NoThread {
+		if _, ok := g.threads[ev.Thread()]; !ok {
+			g.threads[ev.Thread()] = struct{}{}
+		}
+	}
+
+	type procArg struct {
+		Proc uint64 `json:"proc,omitempty"`
+	}
+	st := ev.StateTransition()
+	viewerEv := traceviewer.InstantEvent{
+		Resource: uint64(ev.Thread()),
+		Stack:    ctx.Stack(viewerFrames(ev.Stack())),
+		Arg:      procArg{Proc: uint64(st.Resource.Proc())},
+	}
+
+	from, to := st.Proc()
+	if from == to {
+		// Filter out no-op events.
+		return
+	}
+	if to.Executing() {
+		start := ev.Time()
+		if from == tracev2.ProcUndetermined {
+			start = ctx.startTime
+		}
+		viewerEv.Name = "proc start"
+		viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())}
+		viewerEv.Ts = ctx.elapsed(start)
+		// TODO(mknyszek): We don't have a state machine for threads, so approximate
+		// running threads with running Ps.
+		ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1)
+	}
+	if from.Executing() {
+		start := ev.Time()
+		viewerEv.Name = "proc stop"
+		viewerEv.Ts = ctx.elapsed(start)
+		// TODO(mknyszek): We don't have a state machine for threads, so approximate
+		// running threads with running Ps.
+		ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1)
+	}
+	// TODO(mknyszek): Consider modeling procs differently and have them be
+	// transition to and from NotExist when GOMAXPROCS changes. We can emit
+	// events for this to clearly delineate GOMAXPROCS changes.
+
+	if viewerEv.Name != "" {
+		ctx.Instant(viewerEv)
+	}
+}
+
+func (g *threadGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) {
+	// TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges on threads.
+}
+
+func (g *threadGenerator) Finish(ctx *traceContext) {
+	ctx.SetResourceType("OS THREADS")
+
+	// Finish off global ranges.
+	g.globalRangeGenerator.Finish(ctx)
+
+	// Finish off all the goroutine slices.
+	for _, gs := range g.gStates {
+		gs.finish(ctx)
+	}
+
+	// Name all the threads to the emitter.
+	for id := range g.threads {
+		ctx.Resource(uint64(id), fmt.Sprintf("Thread %d", id))
+	}
+}
diff --git a/src/cmd/trace/v2/viewer.go b/src/cmd/trace/v2/viewer.go
new file mode 100644
index 0000000..de67fc4
--- /dev/null
+++ b/src/cmd/trace/v2/viewer.go
@@ -0,0 +1,56 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"internal/trace"
+	"internal/trace/traceviewer"
+	tracev2 "internal/trace/v2"
+	"time"
+)
+
+// viewerFrames returns the frames of the stack of ev. The given frame slice is
+// used to store the frames to reduce allocations.
+func viewerFrames(stk tracev2.Stack) []*trace.Frame {
+	var frames []*trace.Frame
+	stk.Frames(func(f tracev2.StackFrame) bool {
+		frames = append(frames, &trace.Frame{
+			PC:   f.PC,
+			Fn:   f.Func,
+			File: f.File,
+			Line: int(f.Line),
+		})
+		return true
+	})
+	return frames
+}
+
+func viewerGState(state tracev2.GoState, inMarkAssist bool) traceviewer.GState {
+	switch state {
+	case tracev2.GoUndetermined:
+		return traceviewer.GDead
+	case tracev2.GoNotExist:
+		return traceviewer.GDead
+	case tracev2.GoRunnable:
+		return traceviewer.GRunnable
+	case tracev2.GoRunning:
+		return traceviewer.GRunning
+	case tracev2.GoWaiting:
+		if inMarkAssist {
+			return traceviewer.GWaitingGC
+		}
+		return traceviewer.GWaiting
+	case tracev2.GoSyscall:
+		// N.B. A goroutine in a syscall is considered "executing" (state.Executing() == true).
+		return traceviewer.GRunning
+	default:
+		panic(fmt.Sprintf("unknown GoState: %s", state.String()))
+	}
+}
+
+func viewerTime(t time.Duration) float64 {
+	return float64(t) / float64(time.Microsecond)
+}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
index 0c70239..c2e45c6 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
@@ -83,7 +83,7 @@
 	a.cmd.Wait()
 }
 
-// newAddr2liner starts the given addr2liner command reporting
+// newAddr2Liner starts the given addr2liner command reporting
 // information about the given executable file. If file is a shared
 // library, base should be the address at which it was mapped in the
 // program under consideration.
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
index 844c7a4..491422f 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
@@ -66,7 +66,7 @@
 	a.cmd.Wait()
 }
 
-// newLlvmSymbolizer starts the given llvmSymbolizer command reporting
+// newLLVMSymbolizer starts the given llvmSymbolizer command reporting
 // information about the given executable file. If file is a shared
 // library, base should be the address at which it was mapped in the
 // program under consideration.
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
index e64adf5..2709ef8 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
@@ -95,8 +95,8 @@
 		// Match all possible demangled versions of the name.
 		for _, o := range [][]demangle.Option{
 			{demangle.NoClones},
-			{demangle.NoParams},
-			{demangle.NoParams, demangle.NoTemplateParams},
+			{demangle.NoParams, demangle.NoEnclosingParams},
+			{demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams},
 		} {
 			if demangled, err := demangle.ToString(name, o...); err == nil && r.MatchString(demangled) {
 				return []string{demangled}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go
index a9cae92..b97ef85 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go
@@ -18,7 +18,6 @@
 	"errors"
 	"fmt"
 	"os"
-	"strings"
 
 	"github.com/google/pprof/internal/binutils"
 	"github.com/google/pprof/internal/plugin"
@@ -67,7 +66,7 @@
 	flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames")
 
 	flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port")
-	flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browswer for the interactive web UI")
+	flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browser for the interactive web UI")
 
 	// Flags that set configuration properties.
 	cfg := currentConfig()
@@ -102,9 +101,6 @@
 			file.Close()
 			execName = arg0
 			args = args[1:]
-		} else if *flagBuildID == "" && isBuildID(arg0) {
-			*flagBuildID = arg0
-			args = args[1:]
 		}
 	}
 
@@ -265,12 +261,6 @@
 	}
 }
 
-// isBuildID determines if the profile may contain a build ID, by
-// checking that it is a string of hex digits.
-func isBuildID(id string) bool {
-	return strings.Trim(id, "0123456789abcdefABCDEF") == ""
-}
-
 func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string {
 	if *flag {
 		if si == "" {
@@ -364,5 +354,7 @@
 	"   PPROF_BINARY_PATH  Search path for local binary files\n" +
 	"                      default: $HOME/pprof/binaries\n" +
 	"                      searches $buildid/$name, $buildid/*, $path/$buildid,\n" +
-	"                      ${buildid:0:2}/${buildid:2}.debug, $name, $path\n" +
+	"                      ${buildid:0:2}/${buildid:2}.debug, $name, $path,\n" +
+	"                      ${name}.debug, $dir/.debug/${name}.debug,\n" +
+	"                      usr/lib/debug/$dir/${name}.debug\n" +
 	"   * On Windows, %USERPROFILE% is used instead of $HOME"
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
index 5ddee33..584c5d8 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
@@ -389,7 +389,7 @@
 // set to the remote source URL by collectMappingSources back to empty string.
 func unsourceMappings(p *profile.Profile) {
 	for _, m := range p.Mapping {
-		if m.BuildID == "" {
+		if m.BuildID == "" && filepath.VolumeName(m.File) == "" {
 			if u, err := url.Parse(m.File); err == nil && u.IsAbs() {
 				m.File = ""
 			}
@@ -408,9 +408,13 @@
 	}
 mapping:
 	for _, m := range p.Mapping {
+		var noVolumeFile string
 		var baseName string
+		var dirName string
 		if m.File != "" {
+			noVolumeFile = strings.TrimPrefix(m.File, filepath.VolumeName(m.File))
 			baseName = filepath.Base(m.File)
+			dirName = filepath.Dir(noVolumeFile)
 		}
 
 		for _, path := range filepath.SplitList(searchPath) {
@@ -420,7 +424,7 @@
 				if matches, err := filepath.Glob(filepath.Join(path, m.BuildID, "*")); err == nil {
 					fileNames = append(fileNames, matches...)
 				}
-				fileNames = append(fileNames, filepath.Join(path, m.File, m.BuildID)) // perf path format
+				fileNames = append(fileNames, filepath.Join(path, noVolumeFile, m.BuildID)) // perf path format
 				// Llvm buildid protocol: the first two characters of the build id
 				// are used as directory, and the remaining part is in the filename.
 				// e.g. `/ab/cdef0123456.debug`
@@ -429,10 +433,13 @@
 			if m.File != "" {
 				// Try both the basename and the full path, to support the same directory
 				// structure as the perf symfs option.
-				if baseName != "" {
-					fileNames = append(fileNames, filepath.Join(path, baseName))
-				}
-				fileNames = append(fileNames, filepath.Join(path, m.File))
+				fileNames = append(fileNames, filepath.Join(path, baseName))
+				fileNames = append(fileNames, filepath.Join(path, noVolumeFile))
+				// Other locations: use the same search paths as GDB, according to
+				// https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
+				fileNames = append(fileNames, filepath.Join(path, noVolumeFile+".debug"))
+				fileNames = append(fileNames, filepath.Join(path, dirName, ".debug", baseName+".debug"))
+				fileNames = append(fileNames, filepath.Join(path, "usr", "lib", "debug", dirName, baseName+".debug"))
 			}
 			for _, name := range fileNames {
 				if f, err := obj.Open(name, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol); err == nil {
@@ -461,8 +468,8 @@
 			l.Mapping = m
 		}
 	}
-	// Replace executable filename/buildID with the overrides from source.
-	// Assumes the executable is the first Mapping entry.
+	// If configured, apply executable filename override and (maybe, see below)
+	// build ID override from source. Assume the executable is the first mapping.
 	if execName, buildID := s.ExecName, s.BuildID; execName != "" || buildID != "" {
 		m := p.Mapping[0]
 		if execName != "" {
@@ -470,7 +477,10 @@
 			// the source override is most likely missing it.
 			m.File = execName
 		}
-		if buildID != "" {
+		// Only apply the build ID override if the build ID in the main mapping is
+		// missing. Overwriting the build ID in case it's present is very likely a
+		// wrong thing to do so we refuse to do that.
+		if buildID != "" && m.BuildID == "" {
 			m.BuildID = buildID
 		}
 	}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js
index 5282c1b..ff980f6 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js
@@ -563,11 +563,11 @@
     return str.replace(/([\\\.?+*\[\](){}|^$])/g, '\\$1');
   }
 
-  function setSampleIndexLink(id) {
-    const elem = document.getElementById(id);
+  function setSampleIndexLink(si) {
+    const elem = document.getElementById('sampletype-' + si);
     if (elem != null) {
       setHrefParams(elem, function (params) {
-        params.set("si", id);
+        params.set("si", si);
       });
     }
   }
@@ -682,8 +682,10 @@
     toptable.addEventListener('touchstart', handleTopClick);
   }
 
-  const ids = ['topbtn', 'graphbtn', 'flamegraph', 'flamegraph2', 'peek', 'list',
-	       'disasm', 'focus', 'ignore', 'hide', 'show', 'show-from'];
+  const ids = ['topbtn', 'graphbtn',
+               'flamegraph', 'flamegraph2', 'flamegraphold',
+               'peek', 'list',
+               'disasm', 'focus', 'ignore', 'hide', 'show', 'show-from'];
   ids.forEach(makeSearchLinkDynamic);
 
   const sampleIDs = [{{range .SampleTypes}}'{{.}}', {{end}}];
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html
index 39cb55a..42cb796 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html
@@ -12,7 +12,7 @@
       <a title="{{.Help.top}}"  href="./top" id="topbtn">Top</a>
       <a title="{{.Help.graph}}" href="./" id="graphbtn">Graph</a>
       <a title="{{.Help.flamegraph}}" href="./flamegraph" id="flamegraph">Flame Graph</a>
-      <a title="{{.Help.flamegraph2}}" href="./flamegraph2" id="flamegraph2">Flame Graph (new)</a>
+      <a title="{{.Help.flamegraphold}}" href="./flamegraphold" id="flamegraphold">Flame Graph (old)</a>
       <a title="{{.Help.peek}}" href="./peek" id="peek">Peek</a>
       <a title="{{.Help.list}}" href="./source" id="list">Source</a>
       <a title="{{.Help.disasm}}" href="./disasm" id="disasm">Disassemble</a>
@@ -28,7 +28,7 @@
     </div>
     <div class="submenu">
       {{range .SampleTypes}}
-      <a href="?si={{.}}" id="{{.}}">{{.}}</a>
+      <a href="?si={{.}}" id="sampletype-{{.}}">{{.}}</a>
       {{end}}
     </div>
   </div>
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css
index d142aa7..f5aeb98 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.css
@@ -28,7 +28,10 @@
   position: absolute;
   overflow: hidden;
   box-sizing: border-box;
+  background: #d8d8d8;
 }
+.positive { position: absolute; background: #caa; }
+.negative { position: absolute; background: #aca; }
 /* Not-inlined frames are visually separated from their caller. */
 .not-inlined {
   border-top: 1px solid black;
@@ -47,11 +50,6 @@
 /* Box highlighting via shadows to avoid size changes */
 .hilite { box-shadow: 0px 0px 0px 2px #000; z-index: 1; }
 .hilite2 { box-shadow: 0px 0px 0px 2px #000; z-index: 1; }
-/* Self-cost region inside a box */
-.self {
-  position: absolute;
-  background: rgba(0,0,0,0.25); /* Darker hue */
-}
 /* Gap left between callers and callees */
 .separator {
   position: absolute;
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
index 64229a0..be78edd 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
@@ -31,13 +31,20 @@
       ['hrs', 60*60]]]]);
 
   // Fields
-  let shownTotal = 0;       // Total value of all stacks
   let pivots = [];          // Indices of currently selected data.Sources entries.
   let matches = new Set();  // Indices of sources that match search
   let elems = new Map();    // Mapping from source index to display elements
   let displayList = [];     // List of boxes to display.
   let actionMenuOn = false; // Is action menu visible?
   let actionTarget = null;  // Box on which action menu is operating.
+  let diff = false;         // Are we displaying a diff?
+
+  for (const stack of stacks.Stacks) {
+    if (stack.Value < 0) {
+      diff = true;
+      break;
+    }
+  }
 
   // Setup to allow measuring text width.
   const textSizer = document.createElement('canvas');
@@ -177,9 +184,8 @@
   function handleEnter(box, div) {
     if (actionMenuOn) return;
     const src = stacks.Sources[box.src];
-    const d = details(box);
-    div.title = d + ' ' + src.FullName + (src.Inlined ? "\n(inlined)" : "");
-    detailBox.innerText = d;
+    div.title = details(box) + ' │ ' + src.FullName + (src.Inlined ? "\n(inlined)" : "");
+    detailBox.innerText = summary(box.sumpos, box.sumneg);
     // Highlight all boxes that have the same source as box.
     toggleClass(box.src, 'hilite2', true);
   }
@@ -228,16 +234,16 @@
     const width = chart.clientWidth;
     elems.clear();
     actionTarget = null;
-    const total = totalValue(places);
+    const [pos, neg] = totalValue(places);
+    const total = pos + neg;
     const xscale = (width-2*PADDING) / total; // Converts from profile value to X pixels
     const x = PADDING;
     const y = 0;
-    shownTotal = total;
 
     displayList.length = 0;
     renderStacks(0, xscale, x, y, places, +1);  // Callees
     renderStacks(0, xscale, x, y-ROW, places, -1);  // Callers (ROW left for separator)
-    display(displayList);
+    display(xscale, pos, neg, displayList);
   }
 
   // renderStacks creates boxes with top-left at x,y with children drawn as
@@ -256,29 +262,59 @@
     const groups = partitionPlaces(places);
     for (const g of groups) {
       renderGroup(depth, xscale, x, y, g, direction);
-      x += xscale*g.sum;
+      x += groupWidth(xscale, g);
     }
   }
 
+  // Some of the types used below:
+  //
+  // // Group represents a displayed (sub)tree.
+  // interface Group {
+  //   name: string;     // Full name of source
+  //   src: number;	 // Index in stacks.Sources
+  //   self: number;     // Contribution as leaf (may be < 0 for diffs)
+  //   sumpos: number;	 // Sum of |self| of positive nodes in tree (>= 0)
+  //   sumneg: number;	 // Sum of |self| of negative nodes in tree (>= 0)
+  //   places: Place[];  // Stack slots that contributed to this group
+  // }
+  //
+  // // Box is a rendered item.
+  // interface Box {
+  //   x: number;	   // X coordinate of top-left
+  //   y: number;	   // Y coordinate of top-left
+  //   width: number;	   // Width of box to display
+  //   src: number;	   // Index in stacks.Sources
+  //   sumpos: number;	   // From corresponding Group
+  //   sumneg: number;	   // From corresponding Group
+  //   self: number;	   // From corresponding Group
+  // };
+
+  function groupWidth(xscale, g) {
+    return xscale * (g.sumpos + g.sumneg);
+  }
+
   function renderGroup(depth, xscale, x, y, g, direction) {
     // Skip if not wide enough.
-    const width = xscale * g.sum;
+    const width = groupWidth(xscale, g);
     if (width < MIN_WIDTH) return;
 
     // Draw the box for g.src (except for selected element in upwards direction
     // since that duplicates the box we added in downwards direction).
     if (depth != 0 || direction > 0) {
       const box = {
-        x:         x,
-        y:         y,
-        src:       g.src,
-        sum:       g.sum,
-        selfValue: g.self,
-        width:     xscale*g.sum,
-        selfWidth: (direction > 0) ? xscale*g.self : 0,
+        x:      x,
+        y:      y,
+        width:  width,
+        src:    g.src,
+	sumpos: g.sumpos,
+	sumneg: g.sumneg,
+        self:   g.self,
       };
       displayList.push(box);
-      x += box.selfWidth;
+      if (direction > 0) {
+	// Leave gap on left hand side to indicate self contribution.
+	x += xscale*Math.abs(g.self);
+      }
     }
     y += direction * ROW;
 
@@ -322,11 +358,15 @@
       let group = groupMap.get(src);
       if (!group) {
         const name = stacks.Sources[src].FullName;
-        group = {name: name, src: src, sum: 0, self: 0, places: []};
+        group = {name: name, src: src, sumpos: 0, sumneg: 0, self: 0, places: []};
         groupMap.set(src, group);
         groups.push(group);
       }
-      group.sum += stack.Value;
+      if (stack.Value < 0) {
+	group.sumneg += -stack.Value;
+      } else {
+	group.sumpos += stack.Value;
+      }
       group.self += (place.Pos == stack.Sources.length-1) ? stack.Value : 0;
       group.places.push(place);
     }
@@ -334,12 +374,14 @@
     // Order by decreasing cost (makes it easier to spot heavy functions).
     // Though alphabetical ordering is a potential alternative that will make
     // profile comparisons easier.
-    groups.sort(function(a, b) { return b.sum - a.sum; });
+    groups.sort(function(a, b) {
+      return (b.sumpos + b.sumneg) - (a.sumpos + a.sumneg);
+    });
 
     return groups;
   }
 
-  function display(list) {
+  function display(xscale, posTotal, negTotal, list) {
     // Sort boxes so that text selection follows a predictable order.
     list.sort(function(a, b) {
       if (a.y != b.y) return a.y - b.y;
@@ -353,40 +395,46 @@
     const divs = [];
     for (const box of list) {
       box.y -= adjust;
-      divs.push(drawBox(box));
+      divs.push(drawBox(xscale, box));
     }
-    divs.push(drawSep(-adjust));
+    divs.push(drawSep(-adjust, posTotal, negTotal));
 
     const h = (list.length > 0 ?  list[list.length-1].y : 0) + 4*ROW;
     chart.style.height = h+'px';
     chart.replaceChildren(...divs);
   }
 
-  function drawBox(box) {
+  function drawBox(xscale, box) {
     const srcIndex = box.src;
     const src = stacks.Sources[srcIndex];
 
+    function makeRect(cl, x, y, w, h) {
+      const r = document.createElement('div');
+      r.style.left = x+'px';
+      r.style.top = y+'px';
+      r.style.width = w+'px';
+      r.style.height = h+'px';
+      r.classList.add(cl);
+      return r;
+    }
+
     // Background
     const w = box.width - 1; // Leave 1px gap
-    const r = document.createElement('div');
-    r.style.left = box.x + 'px';
-    r.style.top = box.y + 'px';
-    r.style.width = w + 'px';
-    r.style.height = ROW + 'px';
-    r.classList.add('boxbg');
-    r.style.background = makeColor(src.Color);
+    const r = makeRect('boxbg', box.x, box.y, w, ROW);
+    if (!diff) r.style.background = makeColor(src.Color);
     addElem(srcIndex, r);
     if (!src.Inlined) {
       r.classList.add('not-inlined');
     }
 
-    // Box that shows time spent in self
-    if (box.selfWidth >= MIN_WIDTH) {
-      const s = document.createElement('div');
-      s.style.width = Math.min(box.selfWidth, w)+'px';
-      s.style.height = (ROW-1)+'px';
-      s.classList.add('self');
-      r.appendChild(s);
+    // Positive/negative indicator for diff mode.
+    if (diff) {
+      const delta = box.sumpos - box.sumneg;
+      const partWidth = xscale * Math.abs(delta);
+      if (partWidth >= MIN_WIDTH) {
+	r.appendChild(makeRect((delta < 0 ? 'negative' : 'positive'),
+			       0, 0, partWidth, ROW-1));
+      }
     }
 
     // Label
@@ -404,11 +452,9 @@
     return r;
   }
 
-  function drawSep(y) {
+  function drawSep(y, posTotal, negTotal) {
     const m = document.createElement('div');
-    m.innerText = percent(shownTotal, stacks.Total) +
-	'\xa0\xa0\xa0\xa0' +  // Some non-breaking spaces
-	valueString(shownTotal);
+    m.innerText = summary(posTotal, negTotal);
     m.style.top = (y-ROW) + 'px';
     m.style.left = PADDING + 'px';
     m.style.width = (chart.clientWidth - PADDING*2) + 'px';
@@ -458,36 +504,66 @@
     t.innerText = text;
   }
 
-  // totalValue returns the combined sum of the stacks listed in places.
+  // totalValue returns the positive and negative sums of the Values of stacks
+  // listed in places.
   function totalValue(places) {
     const seen = new Set();
-    let result = 0;
+    let pos = 0;
+    let neg = 0;
     for (const place of places) {
       if (seen.has(place.Stack)) continue; // Do not double-count stacks
       seen.add(place.Stack);
       const stack = stacks.Stacks[place.Stack];
-      result += stack.Value;
+      if (stack.Value < 0) {
+	neg += -stack.Value;
+      } else {
+	pos += stack.Value;
+      }
     }
-    return result;
+    return [pos, neg];
+  }
+
+  function summary(pos, neg) {
+    // Examples:
+    //    6s (10%)
+    //    12s (20%) 🠆 18s (30%)
+    return diff ? diffText(neg, pos) : percentText(pos);
   }
 
   function details(box) {
-    // E.g., 10% 7s
-    // or    10% 7s (3s self
-    let result = percent(box.sum, stacks.Total) + ' ' + valueString(box.sum);
-    if (box.selfValue > 0) {
-      result += ` (${valueString(box.selfValue)} self)`;
+    // Examples:
+    //    6s (10%)
+    //    6s (10%) │ self 3s (5%)
+    //    6s (10%) │ 12s (20%) 🠆 18s (30%)
+    let result = percentText(box.sumpos - box.sumneg);
+    if (box.self != 0) {
+      result += " │ self " + unitText(box.self);
+    }
+    if (diff && box.sumpos > 0 && box.sumneg > 0) {
+      result += " │ " + diffText(box.sumneg, box.sumpos);
     }
     return result;
   }
 
-  function percent(v, total) {
-    return Number(((100.0 * v) / total).toFixed(1)) + '%';
+  // diffText returns text that displays from and to alongside their percentages.
+  // E.g., 9s (45%) 🠆 10s (50%)
+  function diffText(from, to) {
+    return percentText(from) + " 🠆 " + percentText(to);
   }
 
-  // valueString returns a formatted string to display for value.
-  function valueString(value) {
-    let v = value * stacks.Scale;
+  // percentText returns text that displays v in appropriate units alongside its
+  // percentange.
+  function percentText(v) {
+    function percent(v, total) {
+      return Number(((100.0 * v) / total).toFixed(1)) + '%';
+    }
+    return unitText(v) + " (" + percent(v, stacks.Total) + ")";
+  }
+
+  // unitText returns a formatted string to display for value.
+  function unitText(value) {
+    const sign = (value < 0) ? "-" : "";
+    let v = Math.abs(value) * stacks.Scale;
     // Rescale to appropriate display unit.
     let unit = stacks.Unit;
     const list = UNITS.get(unit);
@@ -501,7 +577,7 @@
         }
       }
     }
-    return Number(v.toFixed(2)) + unit;
+    return sign + Number(v.toFixed(2)) + unit;
   }
 
   function find(name) {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go
index 8881e39..41b3002 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go
@@ -112,7 +112,7 @@
 	ui.help["details"] = "Show information about the profile and this view"
 	ui.help["graph"] = "Display profile as a directed graph"
 	ui.help["flamegraph"] = "Display profile as a flame graph"
-	ui.help["flamegraph2"] = "Display profile as a flame graph (experimental version that can display caller info on selection)"
+	ui.help["flamegraphold"] = "Display profile as a flame graph (old version; slated for removal)"
 	ui.help["reset"] = "Show the entire profile"
 	ui.help["save_config"] = "Save current settings"
 
@@ -125,15 +125,16 @@
 		Host:     host,
 		Port:     port,
 		Handlers: map[string]http.Handler{
-			"/":             http.HandlerFunc(ui.dot),
-			"/top":          http.HandlerFunc(ui.top),
-			"/disasm":       http.HandlerFunc(ui.disasm),
-			"/source":       http.HandlerFunc(ui.source),
-			"/peek":         http.HandlerFunc(ui.peek),
-			"/flamegraph":   http.HandlerFunc(ui.flamegraph),
-			"/flamegraph2":  http.HandlerFunc(ui.stackView), // Experimental
-			"/saveconfig":   http.HandlerFunc(ui.saveConfig),
-			"/deleteconfig": http.HandlerFunc(ui.deleteConfig),
+			"/":              http.HandlerFunc(ui.dot),
+			"/top":           http.HandlerFunc(ui.top),
+			"/disasm":        http.HandlerFunc(ui.disasm),
+			"/source":        http.HandlerFunc(ui.source),
+			"/peek":          http.HandlerFunc(ui.peek),
+			"/flamegraphold": http.HandlerFunc(ui.flamegraph),
+			"/flamegraph":    http.HandlerFunc(ui.stackView),
+			"/flamegraph2":   http.HandlerFunc(ui.stackView), // Support older URL
+			"/saveconfig":    http.HandlerFunc(ui.saveConfig),
+			"/deleteconfig":  http.HandlerFunc(ui.deleteConfig),
 			"/download": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
 				w.Header().Set("Content-Type", "application/vnd.google.protobuf+gzip")
 				w.Header().Set("Content-Disposition", "attachment;filename=profile.pb.gz")
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
index 74b904c..b64ef27 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
@@ -33,7 +33,7 @@
 	javaRegExp = regexp.MustCompile(`^(?:[a-z]\w*\.)*([A-Z][\w\$]*\.(?:<init>|[a-z][\w\$]*(?:\$\d+)?))(?:(?:\()|$)`)
 	// Removes package name and method arguments for Go function names.
 	// See tests for examples.
-	goRegExp = regexp.MustCompile(`^(?:[\w\-\.]+\/)+(.+)`)
+	goRegExp = regexp.MustCompile(`^(?:[\w\-\.]+\/)+([^.]+\..+)`)
 	// Removes potential module versions in a package path.
 	goVerRegExp = regexp.MustCompile(`^(.*?)/v(?:[2-9]|[1-9][0-9]+)([./].*)$`)
 	// Strips C++ namespace prefix from a C++ function / method name.
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
index b5fcfbc..d9644f9 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
@@ -121,7 +121,7 @@
 	return false
 }
 
-// Scale a measurement from an unit to a different unit and returns
+// Scale a measurement from a unit to a different unit and returns
 // the scaled value and the target unit. The returned target unit
 // will be empty if uninteresting (could be skipped).
 func Scale(value int64, fromUnit, toUnit string) (float64, string) {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/package.go b/src/cmd/vendor/github.com/google/pprof/internal/report/package.go
index 6d53859..0f6dcf5 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/report/package.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/report/package.go
@@ -4,7 +4,7 @@
 
 // pkgRE extracts package name, It looks for the first "." or "::" that occurs
 // after the last "/". (Searching after the last / allows us to correctly handle
-// names that look like "some.url.com/foo.bar".
+// names that look like "some.url.com/foo.bar".)
 var pkgRE = regexp.MustCompile(`^((.*/)?[\w\d_]+)(\.|::)([^/]*)$`)
 
 // packageName returns the package name of the named symbol, or "" if not found.
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go
index 36ddf2e..f73e49a 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go
@@ -433,7 +433,16 @@
 	}
 
 	if len(syms) == 0 {
-		return fmt.Errorf("no matches found for regexp: %s", o.Symbol)
+		// The symbol regexp case
+		if address == nil {
+			return fmt.Errorf("no matches found for regexp %s", o.Symbol)
+		}
+
+		// The address case
+		if len(symbols) == 0 {
+			return fmt.Errorf("no matches found for address 0x%x", *address)
+		}
+		return fmt.Errorf("address 0x%x found in binary, but the corresponding symbols do not have samples in the profile", *address)
 	}
 
 	// Correlate the symbols from the binary with the profile samples.
@@ -505,22 +514,26 @@
 	return nil
 }
 
-// symbolsFromBinaries examines the binaries listed on the profile
-// that have associated samples, and identifies symbols matching rx.
+// symbolsFromBinaries examines the binaries listed on the profile that have
+// associated samples, and returns the identified symbols matching rx.
 func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regexp, address *uint64, obj plugin.ObjTool) []*objSymbol {
-	hasSamples := make(map[string]bool)
-	// Only examine mappings that have samples that match the
-	// regexp. This is an optimization to speed up pprof.
+	// fileHasSamplesAndMatched is for optimization to speed up pprof: when later
+	// walking through the profile mappings, it will only examine the ones that have
+	// samples and are matched to the regexp.
+	fileHasSamplesAndMatched := make(map[string]bool)
 	for _, n := range g.Nodes {
 		if name := n.Info.PrintableName(); rx.MatchString(name) && n.Info.Objfile != "" {
-			hasSamples[n.Info.Objfile] = true
+			fileHasSamplesAndMatched[n.Info.Objfile] = true
 		}
 	}
 
 	// Walk all mappings looking for matching functions with samples.
 	var objSyms []*objSymbol
 	for _, m := range prof.Mapping {
-		if !hasSamples[m.File] {
+		// Skip the mapping if its file does not have samples or is not matched to
+		// the regexp (unless the regexp is an address and the mapping's range covers
+		// the address)
+		if !fileHasSamplesAndMatched[m.File] {
 			if address == nil || !(m.Start <= *address && *address <= m.Limit) {
 				continue
 			}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
index 87f202b..c3f6cc6 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
@@ -214,9 +214,9 @@
 func demanglerModeToOptions(demanglerMode string) []demangle.Option {
 	switch demanglerMode {
 	case "": // demangled, simplified: no parameters, no templates, no return type
-		return []demangle.Option{demangle.NoParams, demangle.NoTemplateParams}
+		return []demangle.Option{demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams}
 	case "templates": // demangled, simplified: no parameters, no return type
-		return []demangle.Option{demangle.NoParams}
+		return []demangle.Option{demangle.NoParams, demangle.NoEnclosingParams}
 	case "full":
 		return []demangle.Option{demangle.NoClones}
 	case "none": // no demangling
@@ -371,7 +371,7 @@
 	segments map[*profile.Mapping]plugin.ObjFile
 }
 
-// Close releases any external processes being used for the mapping.
+// close releases any external processes being used for the mapping.
 func (mt *mappingTable) close() {
 	for _, segment := range mt.segments {
 		segment.Close()
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/encode.go b/src/cmd/vendor/github.com/google/pprof/profile/encode.go
index c8a1beb..182c926 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/encode.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/encode.go
@@ -258,10 +258,10 @@
 		// If this a main linux kernel mapping with a relocation symbol suffix
 		// ("[kernel.kallsyms]_text"), extract said suffix.
 		// It is fairly hacky to handle at this level, but the alternatives appear even worse.
-		if strings.HasPrefix(m.File, "[kernel.kallsyms]") {
-			m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "")
+		const prefix = "[kernel.kallsyms]"
+		if strings.HasPrefix(m.File, prefix) {
+			m.KernelRelocationSymbol = m.File[len(prefix):]
 		}
-
 	}
 
 	functions := make(map[uint64]*Function, len(p.Function))
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go
index 4ec00fe..60ef7e9 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/profile.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go
@@ -72,9 +72,23 @@
 type Sample struct {
 	Location []*Location
 	Value    []int64
-	Label    map[string][]string
+	// Label is a per-label-key map to values for string labels.
+	//
+	// In general, having multiple values for the given label key is strongly
+	// discouraged - see docs for the sample label field in profile.proto.  The
+	// main reason this unlikely state is tracked here is to make the
+	// decoding->encoding roundtrip not lossy. But we expect that the value
+	// slices present in this map are always of length 1.
+	Label map[string][]string
+	// NumLabel is a per-label-key map to values for numeric labels. See a note
+	// above on handling multiple values for a label.
 	NumLabel map[string][]int64
-	NumUnit  map[string][]string
+	// NumUnit is a per-label-key map to the unit names of corresponding numeric
+	// label values. The unit info may be missing even if the label is in
+	// NumLabel, see the docs in profile.proto for details. When the value is
+	// slice is present and not nil, its length must be equal to the length of
+	// the corresponding value slice in NumLabel.
+	NumUnit map[string][]string
 
 	locationIDX []uint64
 	labelX      []label
@@ -715,6 +729,35 @@
 	return false
 }
 
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds).  If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+	for _, sample := range p.Sample {
+		if sample.NumLabel == nil {
+			sample.NumLabel = map[string][]int64{key: value}
+		} else {
+			sample.NumLabel[key] = value
+		}
+		if sample.NumUnit == nil {
+			sample.NumUnit = map[string][]string{key: unit}
+		} else {
+			sample.NumUnit[key] = unit
+		}
+	}
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+	for _, sample := range p.Sample {
+		delete(sample.NumLabel, key)
+		delete(sample.NumUnit, key)
+	}
+}
+
 // DiffBaseSample returns true if a sample belongs to the diff base and false
 // otherwise.
 func (s *Sample) DiffBaseSample() bool {
diff --git a/src/cmd/vendor/github.com/ianlancetaylor/demangle/SECURITY.md b/src/cmd/vendor/github.com/ianlancetaylor/demangle/SECURITY.md
new file mode 100644
index 0000000..f4edf9e
--- /dev/null
+++ b/src/cmd/vendor/github.com/ianlancetaylor/demangle/SECURITY.md
@@ -0,0 +1,13 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/ianlancetaylor/demangle/security/advisories/new).
+
+This project is maintained by volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure.
diff --git a/src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go b/src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go
index 20d8a99..cdc98c3 100644
--- a/src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go
+++ b/src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go
@@ -38,25 +38,42 @@
 // ASTToString returns the demangled name of the AST.
 func ASTToString(a AST, options ...Option) string {
 	tparams := true
+	enclosingParams := true
 	llvmStyle := false
+	max := 0
 	for _, o := range options {
-		switch o {
-		case NoTemplateParams:
+		switch {
+		case o == NoTemplateParams:
 			tparams = false
-		case LLVMStyle:
+		case o == NoEnclosingParams:
+			enclosingParams = false
+		case o == LLVMStyle:
 			llvmStyle = true
+		case isMaxLength(o):
+			max = maxLength(o)
 		}
 	}
 
-	ps := printState{tparams: tparams, llvmStyle: llvmStyle}
+	ps := printState{
+		tparams:         tparams,
+		enclosingParams: enclosingParams,
+		llvmStyle:       llvmStyle,
+		max:             max,
+	}
 	a.print(&ps)
-	return ps.buf.String()
+	s := ps.buf.String()
+	if max > 0 && len(s) > max {
+		s = s[:max]
+	}
+	return s
 }
 
 // The printState type holds information needed to print an AST.
 type printState struct {
-	tparams   bool // whether to print template parameters
-	llvmStyle bool
+	tparams         bool // whether to print template parameters
+	enclosingParams bool // whether to print enclosing parameters
+	llvmStyle       bool
+	max             int // maximum output length
 
 	buf  strings.Builder
 	last byte // Last byte written to buffer.
@@ -88,6 +105,10 @@
 
 // Print an AST.
 func (ps *printState) print(a AST) {
+	if ps.max > 0 && ps.buf.Len() > ps.max {
+		return
+	}
+
 	c := 0
 	for _, v := range ps.printing {
 		if v == a {
@@ -1144,7 +1165,7 @@
 
 func (ft *FunctionType) print(ps *printState) {
 	retType := ft.Return
-	if ft.ForLocalName && !ps.llvmStyle {
+	if ft.ForLocalName && (!ps.enclosingParams || !ps.llvmStyle) {
 		retType = nil
 	}
 	if retType != nil {
@@ -1201,16 +1222,18 @@
 	}
 
 	ps.writeByte('(')
-	first := true
-	for _, a := range ft.Args {
-		if ps.isEmpty(a) {
-			continue
+	if !ft.ForLocalName || ps.enclosingParams {
+		first := true
+		for _, a := range ft.Args {
+			if ps.isEmpty(a) {
+				continue
+			}
+			if !first {
+				ps.writeString(", ")
+			}
+			ps.print(a)
+			first = false
 		}
-		if !first {
-			ps.writeString(", ")
-		}
-		ps.print(a)
-		first = false
 	}
 	ps.writeByte(')')
 
diff --git a/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go b/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
index 66ac7dd..14e77a6 100644
--- a/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
+++ b/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
@@ -27,11 +27,21 @@
 
 const (
 	// The NoParams option disables demangling of function parameters.
+	// It only omits the parameters of the function name being demangled,
+	// not the parameter types of other functions that may be mentioned.
+	// Using the option will speed up the demangler and cause it to
+	// use less memory.
 	NoParams Option = iota
 
 	// The NoTemplateParams option disables demangling of template parameters.
+	// This applies to both C++ and Rust.
 	NoTemplateParams
 
+	// The NoEnclosingParams option disables demangling of the function
+	// parameter types of the enclosing function when demangling a
+	// local name defined within a function.
+	NoEnclosingParams
+
 	// The NoClones option disables inclusion of clone suffixes.
 	// NoParams implies NoClones.
 	NoClones
@@ -51,6 +61,34 @@
 	LLVMStyle
 )
 
+// maxLengthShift is how we shift the MaxLength value.
+const maxLengthShift = 16
+
+// maxLengthMask is a mask for the maxLength value.
+const maxLengthMask = 0x1f << maxLengthShift
+
+// MaxLength returns an Option that limits the maximum length of a
+// demangled string. The maximum length is expressed as a power of 2,
+// so a value of 1 limits the returned string to 2 characters, and
+// a value of 16 limits the returned string to 65,536 characters.
+// The value must be between 1 and 30.
+func MaxLength(pow int) Option {
+	if pow <= 0 || pow > 30 {
+		panic("demangle: invalid MaxLength value")
+	}
+	return Option(pow << maxLengthShift)
+}
+
+// isMaxLength reports whether an Option holds a maximum length.
+func isMaxLength(opt Option) bool {
+	return opt&maxLengthMask != 0
+}
+
+// maxLength returns the maximum length stored in an Option.
+func maxLength(opt Option) int {
+	return 1 << ((opt & maxLengthMask) >> maxLengthShift)
+}
+
 // Filter demangles a C++ or Rust symbol name,
 // returning the human-readable C++ or Rust name.
 // If any error occurs during demangling, the input string is returned.
@@ -216,17 +254,19 @@
 	clones := true
 	verbose := false
 	for _, o := range options {
-		switch o {
-		case NoParams:
+		switch {
+		case o == NoParams:
 			params = false
 			clones = false
-		case NoClones:
+		case o == NoClones:
 			clones = false
-		case Verbose:
+		case o == Verbose:
 			verbose = true
-		case NoTemplateParams, LLVMStyle:
+		case o == NoTemplateParams || o == NoEnclosingParams || o == LLVMStyle || isMaxLength(o):
 			// These are valid options but only affect
 			// printing of the AST.
+		case o == NoRust:
+			// Unimportant here.
 		default:
 			return nil, fmt.Errorf("unrecognized demangler option %v", o)
 		}
@@ -660,7 +700,7 @@
 		}
 	}
 
-	isCast := false
+	var cast *Cast
 	for {
 		if len(st.str) == 0 {
 			st.fail("expected prefix")
@@ -672,7 +712,10 @@
 			un, isUnCast := st.unqualifiedName()
 			next = un
 			if isUnCast {
-				isCast = true
+				if tn, ok := un.(*TaggedName); ok {
+					un = tn.Name
+				}
+				cast = un.(*Cast)
 			}
 		} else {
 			switch st.str[0] {
@@ -726,10 +769,10 @@
 				var args []AST
 				args = st.templateArgs()
 				tmpl := &Template{Name: a, Args: args}
-				if isCast {
-					st.setTemplate(a, tmpl)
+				if cast != nil {
+					st.setTemplate(cast, tmpl)
 					st.clearTemplateArgs(args)
-					isCast = false
+					cast = nil
 				}
 				a = nil
 				next = tmpl
@@ -739,8 +782,12 @@
 				if a == nil {
 					st.fail("expected prefix")
 				}
-				if isCast {
-					st.setTemplate(a, nil)
+				if cast != nil {
+					var toTmpl *Template
+					if castTempl, ok := cast.To.(*Template); ok {
+						toTmpl = castTempl
+					}
+					st.setTemplate(cast, toTmpl)
 				}
 				return a
 			case 'M':
@@ -770,10 +817,10 @@
 				}
 				st.advance(1)
 				tmpl := &Template{Name: a, Args: args}
-				if isCast {
-					st.setTemplate(a, tmpl)
+				if cast != nil {
+					st.setTemplate(cast, tmpl)
 					st.clearTemplateArgs(args)
-					isCast = false
+					cast = nil
 				}
 				a = nil
 				next = tmpl
@@ -1715,7 +1762,7 @@
 	return tp
 }
 
-// mergeQualifiers merges two qualifer lists into one.
+// mergeQualifiers merges two qualifier lists into one.
 func mergeQualifiers(q1AST, q2AST AST) AST {
 	if q1AST == nil {
 		return q2AST
diff --git a/src/cmd/vendor/github.com/ianlancetaylor/demangle/rust.go b/src/cmd/vendor/github.com/ianlancetaylor/demangle/rust.go
index 3979218..f3d2d33 100644
--- a/src/cmd/vendor/github.com/ianlancetaylor/demangle/rust.go
+++ b/src/cmd/vendor/github.com/ianlancetaylor/demangle/rust.go
@@ -40,6 +40,15 @@
 
 	name = name[2:]
 	rst := &rustState{orig: name, str: name}
+
+	for _, o := range options {
+		if o == NoTemplateParams {
+			rst.noGenericArgs = true
+		} else if isMaxLength(o) {
+			rst.max = maxLength(o)
+		}
+	}
+
 	rst.symbolName()
 
 	if len(rst.str) > 0 {
@@ -62,18 +71,24 @@
 		}
 	}
 
-	return rst.buf.String(), nil
+	s := rst.buf.String()
+	if rst.max > 0 && len(s) > rst.max {
+		s = s[:rst.max]
+	}
+	return s, nil
 }
 
 // A rustState holds the current state of demangling a Rust string.
 type rustState struct {
-	orig      string          // the original string being demangled
-	str       string          // remainder of string to demangle
-	off       int             // offset of str within original string
-	buf       strings.Builder // demangled string being built
-	skip      bool            // don't print, just skip
-	lifetimes int64           // number of bound lifetimes
-	last      byte            // last byte written to buffer
+	orig          string          // the original string being demangled
+	str           string          // remainder of string to demangle
+	off           int             // offset of str within original string
+	buf           strings.Builder // demangled string being built
+	skip          bool            // don't print, just skip
+	lifetimes     int64           // number of bound lifetimes
+	last          byte            // last byte written to buffer
+	noGenericArgs bool            // don't demangle generic arguments
+	max           int             // maximum output length
 }
 
 // fail panics with demangleErr, to be caught in rustToString.
@@ -104,6 +119,10 @@
 	if rst.skip {
 		return
 	}
+	if rst.max > 0 && rst.buf.Len() > rst.max {
+		rst.skip = true
+		return
+	}
 	rst.last = c
 	rst.buf.WriteByte(c)
 }
@@ -113,6 +132,10 @@
 	if rst.skip {
 		return
 	}
+	if rst.max > 0 && rst.buf.Len() > rst.max {
+		rst.skip = true
+		return
+	}
 	if len(s) > 0 {
 		rst.last = s[len(s)-1]
 		rst.buf.WriteString(s)
@@ -232,15 +255,7 @@
 			rst.writeString("::")
 		}
 		rst.writeByte('<')
-		first := true
-		for len(rst.str) > 0 && rst.str[0] != 'E' {
-			if first {
-				first = false
-			} else {
-				rst.writeString(", ")
-			}
-			rst.genericArg()
-		}
+		rst.genericArgs()
 		rst.writeByte('>')
 		rst.checkChar('E')
 	case 'B':
@@ -436,6 +451,27 @@
 	return string(output)
 }
 
+// genericArgs prints a list of generic arguments, without angle brackets.
+func (rst *rustState) genericArgs() {
+	if rst.noGenericArgs {
+		hold := rst.skip
+		rst.skip = true
+		defer func() {
+			rst.skip = hold
+		}()
+	}
+
+	first := true
+	for len(rst.str) > 0 && rst.str[0] != 'E' {
+		if first {
+			first = false
+		} else {
+			rst.writeString(", ")
+		}
+		rst.genericArg()
+	}
+}
+
 // genericArg parses:
 //
 //	<generic-arg> = <lifetime>
@@ -724,15 +760,7 @@
 		rst.advance(1)
 		rst.path(false)
 		rst.writeByte('<')
-		first := true
-		for len(rst.str) > 0 && rst.str[0] != 'E' {
-			if first {
-				first = false
-			} else {
-				rst.writeString(", ")
-			}
-			rst.genericArg()
-		}
+		rst.genericArgs()
 		rst.checkChar('E')
 		return true
 	case 'B':
@@ -944,6 +972,9 @@
 	if rst.skip {
 		return
 	}
+	if rst.max > 0 && rst.buf.Len() > rst.max {
+		return
+	}
 
 	idx := int(idx64)
 	if int64(idx) != idx64 {
@@ -986,6 +1017,13 @@
 // oldRustToString demangles a Rust symbol using the old demangling.
 // The second result reports whether this is a valid Rust mangled name.
 func oldRustToString(name string, options []Option) (string, bool) {
+	max := 0
+	for _, o := range options {
+		if isMaxLength(o) {
+			max = maxLength(o)
+		}
+	}
+
 	// We know that the string starts with _ZN.
 	name = name[3:]
 
@@ -1019,6 +1057,10 @@
 	// The name is a sequence of length-preceded identifiers.
 	var sb strings.Builder
 	for len(name) > 0 {
+		if max > 0 && sb.Len() > max {
+			break
+		}
+
 		if !isDigit(name[0]) {
 			return "", false
 		}
@@ -1115,5 +1157,9 @@
 		}
 	}
 
-	return sb.String(), true
+	s := sb.String()
+	if max > 0 && len(s) > max {
+		s = s[:max]
+	}
+	return s, true
 }
diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go b/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go
index a143d2e..842ab98 100644
--- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go
+++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go
@@ -253,10 +253,13 @@
 // [r2, r0, ror #1] -> (R2)(R0@>1)
 // inst [r2, -r0, ror #1] -> INST.U (R2)(R0@>1)
 // input:
-//   a memory operand
+//
+//	a memory operand
+//
 // return values:
-//   corresponding memory operand in Plan 9 syntax
-//   .W/.P/.U suffix
+//
+//	corresponding memory operand in Plan 9 syntax
+//	.W/.P/.U suffix
 func memOpTrans(mem Mem) (string, string) {
 	suffix := ""
 	switch mem.Mode {
diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go
index 8c633fe..866e399 100644
--- a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go
+++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go
@@ -934,8 +934,10 @@
 	return result
 }
 
-// Register with arrangement and index: <Vm>.<Ts>[<index>],
-//   { <Vt>.B, <Vt2>.B }[<index>].
+// Register with arrangement and index:
+//
+//	<Vm>.<Ts>[<index>],
+//	{ <Vt>.B, <Vt2>.B }[<index>].
 type RegisterWithArrangementAndIndex struct {
 	r     Reg
 	a     Arrangement
diff --git a/src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go b/src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go
index b4c9bf8..367acdd 100644
--- a/src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go
+++ b/src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go
@@ -359,7 +359,7 @@
 
 // gnuArg formats arg (which is the argIndex's arg in inst) according to GNU rules.
 // NOTE: because GNUSyntax is the only caller of this func, and it receives a copy
-//       of inst, it's ok to modify inst.Args here.
+// of inst, it's ok to modify inst.Args here.
 func gnuArg(inst *Inst, argIndex int, arg Arg, pc uint64) string {
 	// special cases for load/store instructions
 	if _, ok := arg.(Offset); ok {
diff --git a/src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go b/src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go
index 75cff72..8eba1fd 100644
--- a/src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go
+++ b/src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go
@@ -10,7 +10,7 @@
 )
 
 // GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils.
-// This general form is often called ``AT&T syntax'' as a reference to AT&T System V Unix.
+// This general form is often called “AT&T syntax” as a reference to AT&T System V Unix.
 func GNUSyntax(inst Inst, pc uint64, symname SymLookup) string {
 	// Rewrite instruction to mimic GNU peculiarities.
 	// Note that inst has been passed by value and contains
diff --git a/src/cmd/vendor/golang.org/x/arch/x86/x86asm/inst.go b/src/cmd/vendor/golang.org/x/arch/x86/x86asm/inst.go
index 4632b50..e98f1a8 100644
--- a/src/cmd/vendor/golang.org/x/arch/x86/x86asm/inst.go
+++ b/src/cmd/vendor/golang.org/x/arch/x86/x86asm/inst.go
@@ -144,7 +144,7 @@
 // the interface value instead of requiring an allocation.
 
 // A Reg is a single register.
-// The zero Reg value has no name but indicates ``no register.''
+// The zero Reg value has no name but indicates “no register.”
 type Reg uint8
 
 const (
diff --git a/src/cmd/vendor/golang.org/x/mod/modfile/rule.go b/src/cmd/vendor/golang.org/x/mod/modfile/rule.go
index 930b6c5..35fd1f5 100644
--- a/src/cmd/vendor/golang.org/x/mod/modfile/rule.go
+++ b/src/cmd/vendor/golang.org/x/mod/modfile/rule.go
@@ -367,7 +367,7 @@
 				}
 			}
 			if !fixed {
-				errorf("invalid go version '%s': must match format 1.23", args[0])
+				errorf("invalid go version '%s': must match format 1.23.0", args[0])
 				return
 			}
 		}
@@ -384,7 +384,7 @@
 			errorf("toolchain directive expects exactly one argument")
 			return
 		} else if strict && !ToolchainRE.MatchString(args[0]) {
-			errorf("invalid toolchain version '%s': must match format go1.23 or local", args[0])
+			errorf("invalid toolchain version '%s': must match format go1.23.0 or local", args[0])
 			return
 		}
 		f.Toolchain = &Toolchain{Syntax: line}
@@ -542,7 +542,7 @@
 			if strings.Contains(ns, "@") {
 				return nil, errorf("replacement module must match format 'path version', not 'path@version'")
 			}
-			return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)")
+			return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)")
 		}
 		if filepath.Separator == '/' && strings.Contains(ns, `\`) {
 			return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)")
@@ -555,7 +555,6 @@
 		}
 		if IsDirectoryPath(ns) {
 			return nil, errorf("replacement module directory path %q cannot have version", ns)
-
 		}
 	}
 	return &Replace{
@@ -679,14 +678,15 @@
 	}
 }
 
-// IsDirectoryPath reports whether the given path should be interpreted
-// as a directory path. Just like on the go command line, relative paths
+// IsDirectoryPath reports whether the given path should be interpreted as a directory path.
+// Just like on the go command line, relative paths starting with a '.' or '..' path component
 // and rooted paths are directory paths; the rest are module paths.
 func IsDirectoryPath(ns string) bool {
 	// Because go.mod files can move from one system to another,
 	// we check all known path syntaxes, both Unix and Windows.
-	return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
-		strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
+	return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) ||
+		ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) ||
+		strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) ||
 		len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
 }
 
diff --git a/src/cmd/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/src/cmd/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
deleted file mode 100644
index e07899b..0000000
--- a/src/cmd/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package unsafeheader contains header declarations for the Go runtime's
-// slice and string implementations.
-//
-// This package allows x/sys to use types equivalent to
-// reflect.SliceHeader and reflect.StringHeader without introducing
-// a dependency on the (relatively heavy) "reflect" package.
-package unsafeheader
-
-import (
-	"unsafe"
-)
-
-// Slice is the runtime representation of a slice.
-// It cannot be used safely or portably and its representation may change in a later release.
-type Slice struct {
-	Data unsafe.Pointer
-	Len  int
-	Cap  int
-}
-
-// String is the runtime representation of a string.
-// It cannot be used safely or portably and its representation may change in a later release.
-type String struct {
-	Data unsafe.Pointer
-	Len  int
-}
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/src/cmd/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
index c9b6993..73687de 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.5
-// +build go1.5
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/src/cmd/vendor/golang.org/x/sys/plan9/pwd_plan9.go
index 98bf56b..fb94582 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/pwd_plan9.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/pwd_plan9.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.5
-// +build !go1.5
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/race.go b/src/cmd/vendor/golang.org/x/sys/plan9/race.go
index 62377d2..c02d9ed 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/race.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/race.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build plan9 && race
-// +build plan9,race
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/race0.go b/src/cmd/vendor/golang.org/x/sys/plan9/race0.go
index f8da308..7b15e15 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/race0.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/race0.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build plan9 && !race
-// +build plan9,!race
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/str.go b/src/cmd/vendor/golang.org/x/sys/plan9/str.go
index 55fa8d0..ba3e8ff 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/str.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/str.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build plan9
-// +build plan9
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/syscall.go b/src/cmd/vendor/golang.org/x/sys/plan9/syscall.go
index 67e5b01..d631fd6 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/syscall.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/syscall.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build plan9
-// +build plan9
 
 // Package plan9 contains an interface to the low-level operating system
 // primitives. OS details vary depending on the underlying system, and
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
index 3f40b9b..f780d5c 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build plan9 && 386
-// +build plan9,386
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
index 0e6a96a..7de6106 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build plan9 && amd64
-// +build plan9,amd64
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
index 244c501..ea85780 100644
--- a/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build plan9 && arm
-// +build plan9,arm
 
 package plan9
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/aliases.go b/src/cmd/vendor/golang.org/x/sys/unix/aliases.go
index abc89c1..e7d3df4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/aliases.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/aliases.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-// +build go1.9
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
index db9171c..269e173 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_386.s
index e0fcd9b..a4fcef0 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_386.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_386.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (freebsd || netbsd || openbsd) && gc
-// +build freebsd netbsd openbsd
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
index 2b99c34..1e63615 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
-// +build darwin dragonfly freebsd netbsd openbsd
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
index d702d4a..6496c31 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (freebsd || netbsd || openbsd) && gc
-// +build freebsd netbsd openbsd
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
index fe36a73..4fd1f54 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin || freebsd || netbsd || openbsd) && gc
-// +build darwin freebsd netbsd openbsd
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
index e5b9a84..42f7eb9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin || freebsd || netbsd || openbsd) && gc
-// +build darwin freebsd netbsd openbsd
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
index d560019..f890266 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin || freebsd || netbsd || openbsd) && gc
-// +build darwin freebsd netbsd openbsd
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_386.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_386.s
index 8fd101d..3b47348 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_386.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
index 7ed38e4..67e29f3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s
index 8ef1d51..d6ae269 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
index 98ae027..01e5e25 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && arm64 && gc
-// +build linux
-// +build arm64
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
index 5653572..2abf12f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && loong64 && gc
-// +build linux
-// +build loong64
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
index 21231d2..f84bae7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (mips64 || mips64le) && gc
-// +build linux
-// +build mips64 mips64le
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
index 6783b26..f08f628 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (mips || mipsle) && gc
-// +build linux
-// +build mips mipsle
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
index 19d4989..bdfc024 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (ppc64 || ppc64le) && gc
-// +build linux
-// +build ppc64 ppc64le
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
index e42eb81..2e8c996 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build riscv64 && gc
-// +build riscv64
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
index c46aab3..2c394b1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && s390x && gc
-// +build linux
-// +build s390x
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
index 5e7a116..fab586a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
index f8c5394..f949ec5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
index 3b54e18..2f67ba8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x && gc
-// +build zos
-// +build s390x
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/cap_freebsd.go b/src/cmd/vendor/golang.org/x/sys/unix/cap_freebsd.go
index 0b7c6ad..a086578 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/cap_freebsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build freebsd
-// +build freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/constants.go b/src/cmd/vendor/golang.org/x/sys/unix/constants.go
index 394a396..6fb7cb7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/constants.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/constants.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
index 65a9985..d785134 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix && ppc
-// +build aix,ppc
 
 // Functions to access/create device major and minor numbers matching the
 // encoding used by AIX.
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
index 8fc08ad..623a5e6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix && ppc64
-// +build aix,ppc64
 
 // Functions to access/create device major and minor numbers matching the
 // encoding used AIX.
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/dev_zos.go b/src/cmd/vendor/golang.org/x/sys/unix/dev_zos.go
index a388e59..bb6a64f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/dev_zos.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/dev_zos.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 // Functions to access/create device major and minor numbers matching the
 // encoding used by z/OS.
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/dirent.go b/src/cmd/vendor/golang.org/x/sys/unix/dirent.go
index 2499f97..1ebf117 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/dirent.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/dirent.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/endian_big.go b/src/cmd/vendor/golang.org/x/sys/unix/endian_big.go
index a520265..1095fd3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/endian_big.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/endian_big.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 //
 //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
-// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go b/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go
index b0f2bc4..b9f0e27 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 //
 //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
-// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/env_unix.go b/src/cmd/vendor/golang.org/x/sys/unix/env_unix.go
index 29ccc4d..a96da71 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/env_unix.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/env_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 // Unix environment variables.
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/epoll_zos.go b/src/cmd/vendor/golang.org/x/sys/unix/epoll_zos.go
index cedaf7e..7753fdd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/epoll_zos.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/epoll_zos.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fcntl.go b/src/cmd/vendor/golang.org/x/sys/unix/fcntl.go
index e9b9912..6200876 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/fcntl.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/fcntl.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build dragonfly || freebsd || linux || netbsd || openbsd
-// +build dragonfly freebsd linux netbsd openbsd
+//go:build dragonfly || freebsd || linux || netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/src/cmd/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
index 29d4480..13b4acd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
-// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go
index a8068f9..9e83d18 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/src/cmd/vendor/golang.org/x/sys/unix/fstatfs_zos.go
index e377cc9..c8bde60 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/fstatfs_zos.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/fstatfs_zos.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/gccgo.go b/src/cmd/vendor/golang.org/x/sys/unix/gccgo.go
index b06f52d..aca5721 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/gccgo.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/gccgo.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gccgo && !aix && !hurd
-// +build gccgo,!aix,!hurd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/gccgo_c.c b/src/cmd/vendor/golang.org/x/sys/unix/gccgo_c.c
index f98a1c5..d468b7b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/src/cmd/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gccgo && !aix && !hurd
-// +build gccgo,!aix,!hurd
 
 #include <errno.h>
 #include <stdint.h>
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
index e60e49a..972d61b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gccgo && linux && amd64
-// +build gccgo,linux,amd64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 15721a5..848840a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux
-// +build linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_linux.go
index 0d12c08..dbe680e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -231,3 +231,8 @@
 func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
 	return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
 }
+
+// IoctlLoopConfigure configures all loop device parameters in a single step
+func IoctlLoopConfigure(fd int, value *LoopConfig) error {
+	return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value))
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_signed.go b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_signed.go
index 7def958..5b0759b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_signed.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_signed.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || solaris
-// +build aix solaris
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
index 649913d..20f470b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd
-// +build darwin dragonfly freebsd hurd linux netbsd openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_zos.go b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_zos.go
index cdc21bf..c8b2a75 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ioctl_zos.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ioctl_zos.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh
index 0c4d149..6202638 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -519,6 +519,7 @@
 		$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
 		$2 ~ /^LO_(KEY|NAME)_SIZE$/ ||
 		$2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ ||
+		$2 == "LOOP_CONFIGURE" ||
 		$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
 		$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
 		$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
@@ -560,7 +561,7 @@
 		$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
 		$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
 		$2 ~ /^CLONE_[A-Z_]+/ ||
-		$2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ &&
+		$2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ &&
 		$2 ~ /^(BPF|DLT)_/ ||
 		$2 ~ /^AUDIT_/ ||
 		$2 ~ /^(CLOCK|TIMER)_/ ||
@@ -583,6 +584,7 @@
 		$2 ~ /^PERF_/ ||
 		$2 ~ /^SECCOMP_MODE_/ ||
 		$2 ~ /^SEEK_/ ||
+		$2 ~ /^SCHED_/ ||
 		$2 ~ /^SPLICE_/ ||
 		$2 ~ /^SYNC_FILE_RANGE_/ ||
 		$2 !~ /IOC_MAGIC/ &&
@@ -624,7 +626,7 @@
 		$2 ~ /^MEM/ ||
 		$2 ~ /^WG/ ||
 		$2 ~ /^FIB_RULE_/ ||
-		$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
+		$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)}
 		$2 ~ /^__WCOREFLAG$/ {next}
 		$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
 
@@ -662,7 +664,6 @@
 echo '// Code generated by the command above; see README.md. DO NOT EDIT.'
 echo
 echo "//go:build ${GOARCH} && ${GOOS}"
-echo "// +build ${GOARCH},${GOOS}"
 echo
 go tool cgo -godefs -- "$@" _const.go >_error.out
 cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/src/cmd/vendor/golang.org/x/sys/unix/mmap_nomremap.go
new file mode 100644
index 0000000..4b68e59
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/unix/mmap_nomremap.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris
+
+package unix
+
+var mapper = &mmapper{
+	active: make(map[*byte][]byte),
+	mmap:   mmap,
+	munmap: munmap,
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mremap.go b/src/cmd/vendor/golang.org/x/sys/unix/mremap.go
index 86213c0..fd45fe5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/mremap.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/mremap.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build linux
-// +build linux
+//go:build linux || netbsd
 
 package unix
 
@@ -14,8 +13,17 @@
 	mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error)
 }
 
+var mapper = &mremapMmapper{
+	mmapper: mmapper{
+		active: make(map[*byte][]byte),
+		mmap:   mmap,
+		munmap: munmap,
+	},
+	mremap: mremap,
+}
+
 func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
-	if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 {
+	if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 {
 		return nil, EINVAL
 	}
 
@@ -32,9 +40,13 @@
 	}
 	bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength)
 	pNew := &bNew[cap(bNew)-1]
-	if flags&MREMAP_DONTUNMAP == 0 {
+	if flags&mremapDontunmap == 0 {
 		delete(m.active, pOld)
 	}
 	m.active[pNew] = bNew
 	return bNew, nil
 }
+
+func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
+	return mapper.Mremap(oldData, newLength, flags)
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/pagesize_unix.go b/src/cmd/vendor/golang.org/x/sys/unix/pagesize_unix.go
index 53f1b4c..4d0a343 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/pagesize_unix.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/pagesize_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
 
 // For Unix, get the pagesize from the runtime.
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/pledge_openbsd.go
index eb48294..6a09af5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/pledge_openbsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/pledge_openbsd.go
@@ -8,54 +8,31 @@
 	"errors"
 	"fmt"
 	"strconv"
-	"syscall"
-	"unsafe"
 )
 
 // Pledge implements the pledge syscall.
 //
-// The pledge syscall does not accept execpromises on OpenBSD releases
-// before 6.3.
-//
-// execpromises must be empty when Pledge is called on OpenBSD
-// releases predating 6.3, otherwise an error will be returned.
+// This changes both the promises and execpromises; use PledgePromises or
+// PledgeExecpromises to only change the promises or execpromises
+// respectively.
 //
 // For more information see pledge(2).
 func Pledge(promises, execpromises string) error {
-	maj, min, err := majmin()
+	if err := pledgeAvailable(); err != nil {
+		return err
+	}
+
+	pptr, err := BytePtrFromString(promises)
 	if err != nil {
 		return err
 	}
 
-	err = pledgeAvailable(maj, min, execpromises)
+	exptr, err := BytePtrFromString(execpromises)
 	if err != nil {
 		return err
 	}
 
-	pptr, err := syscall.BytePtrFromString(promises)
-	if err != nil {
-		return err
-	}
-
-	// This variable will hold either a nil unsafe.Pointer or
-	// an unsafe.Pointer to a string (execpromises).
-	var expr unsafe.Pointer
-
-	// If we're running on OpenBSD > 6.2, pass execpromises to the syscall.
-	if maj > 6 || (maj == 6 && min > 2) {
-		exptr, err := syscall.BytePtrFromString(execpromises)
-		if err != nil {
-			return err
-		}
-		expr = unsafe.Pointer(exptr)
-	}
-
-	_, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0)
-	if e != 0 {
-		return e
-	}
-
-	return nil
+	return pledge(pptr, exptr)
 }
 
 // PledgePromises implements the pledge syscall.
@@ -64,30 +41,16 @@
 //
 // For more information see pledge(2).
 func PledgePromises(promises string) error {
-	maj, min, err := majmin()
+	if err := pledgeAvailable(); err != nil {
+		return err
+	}
+
+	pptr, err := BytePtrFromString(promises)
 	if err != nil {
 		return err
 	}
 
-	err = pledgeAvailable(maj, min, "")
-	if err != nil {
-		return err
-	}
-
-	// This variable holds the execpromises and is always nil.
-	var expr unsafe.Pointer
-
-	pptr, err := syscall.BytePtrFromString(promises)
-	if err != nil {
-		return err
-	}
-
-	_, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0)
-	if e != 0 {
-		return e
-	}
-
-	return nil
+	return pledge(pptr, nil)
 }
 
 // PledgeExecpromises implements the pledge syscall.
@@ -96,30 +59,16 @@
 //
 // For more information see pledge(2).
 func PledgeExecpromises(execpromises string) error {
-	maj, min, err := majmin()
+	if err := pledgeAvailable(); err != nil {
+		return err
+	}
+
+	exptr, err := BytePtrFromString(execpromises)
 	if err != nil {
 		return err
 	}
 
-	err = pledgeAvailable(maj, min, execpromises)
-	if err != nil {
-		return err
-	}
-
-	// This variable holds the promises and is always nil.
-	var pptr unsafe.Pointer
-
-	exptr, err := syscall.BytePtrFromString(execpromises)
-	if err != nil {
-		return err
-	}
-
-	_, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0)
-	if e != 0 {
-		return e
-	}
-
-	return nil
+	return pledge(nil, exptr)
 }
 
 // majmin returns major and minor version number for an OpenBSD system.
@@ -147,16 +96,15 @@
 
 // pledgeAvailable checks for availability of the pledge(2) syscall
 // based on the running OpenBSD version.
-func pledgeAvailable(maj, min int, execpromises string) error {
-	// If OpenBSD <= 5.9, pledge is not available.
-	if (maj == 5 && min != 9) || maj < 5 {
-		return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min)
+func pledgeAvailable() error {
+	maj, min, err := majmin()
+	if err != nil {
+		return err
 	}
 
-	// If OpenBSD <= 6.2 and execpromises is not empty,
-	// return an error - execpromises is not available before 6.3
-	if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" {
-		return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min)
+	// Require OpenBSD 6.4 as a minimum.
+	if maj < 6 || (maj == 6 && min <= 3) {
+		return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min)
 	}
 
 	return nil
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/src/cmd/vendor/golang.org/x/sys/unix/ptrace_darwin.go
index 39dba6c..3f0975f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ptrace_darwin.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ptrace_darwin.go
@@ -3,16 +3,9 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin && !ios
-// +build darwin,!ios
 
 package unix
 
-import "unsafe"
-
 func ptrace(request int, pid int, addr uintptr, data uintptr) error {
 	return ptrace1(request, pid, addr, data)
 }
-
-func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error {
-	return ptrace1Ptr(request, pid, addr, data)
-}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ptrace_ios.go b/src/cmd/vendor/golang.org/x/sys/unix/ptrace_ios.go
index 9ea6633..a4d35db 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ptrace_ios.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ptrace_ios.go
@@ -3,16 +3,9 @@
 // license that can be found in the LICENSE file.
 
 //go:build ios
-// +build ios
 
 package unix
 
-import "unsafe"
-
 func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
 	return ENOTSUP
 }
-
-func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
-	return ENOTSUP
-}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/race.go b/src/cmd/vendor/golang.org/x/sys/unix/race.go
index 6f6c5fe..714d2aa 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/race.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/race.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin && race) || (linux && race) || (freebsd && race)
-// +build darwin,race linux,race freebsd,race
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/race0.go b/src/cmd/vendor/golang.org/x/sys/unix/race0.go
index 706e132..4a9f663 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/race0.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/race0.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos
-// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdents.go
index 4d62575..dbd2b6c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdents.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdents.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd
-// +build aix dragonfly freebsd linux netbsd openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
index 2a4ba47..130398b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin
-// +build darwin
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
index 3865943..c3a62db 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 // Socket control messages
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go
index 0840fe4..4a1eab3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin freebsd linux netbsd openbsd solaris zos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall.go
index 63e8c83..5ea74da 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 // Package unix contains an interface to the low-level operating system
 // primitives. OS details vary depending on the underlying system, and
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix.go
index c406ae0..67ce6ce 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix
-// +build aix
 
 // Aix system calls.
 // This file is compiled as ordinary Go code,
@@ -107,7 +106,8 @@
 	if n > 0 {
 		sl += _Socklen(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -487,8 +487,6 @@
 //sys	Unlinkat(dirfd int, path string, flags int) (err error)
 //sys	Ustat(dev int, ubuf *Ustat_t) (err error)
 //sys	write(fd int, p []byte) (n int, err error)
-//sys	readlen(fd int, p *byte, np int) (n int, err error) = read
-//sys	writelen(fd int, p *byte, np int) (n int, err error) = write
 
 //sys	Dup2(oldfd int, newfd int) (err error)
 //sys	Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64
@@ -535,21 +533,6 @@
 //sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg
 
 //sys	munmap(addr uintptr, length uintptr) (err error)
-
-var mapper = &mmapper{
-	active: make(map[*byte][]byte),
-	mmap:   mmap,
-	munmap: munmap,
-}
-
-func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
-	return mapper.Mmap(fd, offset, length, prot, flags)
-}
-
-func Munmap(b []byte) (err error) {
-	return mapper.Munmap(b)
-}
-
 //sys	Madvise(b []byte, advice int) (err error)
 //sys	Mprotect(b []byte, prot int) (err error)
 //sys	Mlock(b []byte) (err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
index f2871fa..1fdaa47 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix && ppc
-// +build aix,ppc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
index 75718ec..c87f9a9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix && ppc64
-// +build aix,ppc64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 7705c32..a00c3e5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 // BSD system call wrappers shared by *BSD based systems
 // including OS X (Darwin) and FreeBSD.  Like the other
@@ -317,7 +316,7 @@
 	if err != nil {
 		return "", err
 	}
-	return string(buf[:vallen-1]), nil
+	return ByteSliceToString(buf[:vallen]), nil
 }
 
 //sys	recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
@@ -601,20 +600,6 @@
 //	Gethostuuid(uuid *byte, timeout *Timespec) (err error)
 //	Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error)
 
-var mapper = &mmapper{
-	active: make(map[*byte][]byte),
-	mmap:   mmap,
-	munmap: munmap,
-}
-
-func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
-	return mapper.Mmap(fd, offset, length, prot, flags)
-}
-
-func Munmap(b []byte) (err error) {
-	return mapper.Munmap(b)
-}
-
 //sys	Madvise(b []byte, behav int) (err error)
 //sys	Mlock(b []byte) (err error)
 //sys	Mlockall(flags int) (err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 2069215..59542a8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -510,30 +510,36 @@
 		return nil, err
 	}
 
-	// Find size.
-	n := uintptr(0)
-	if err := sysctl(mib, nil, &n, nil, 0); err != nil {
-		return nil, err
-	}
-	if n == 0 {
-		return nil, nil
-	}
-	if n%SizeofKinfoProc != 0 {
-		return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
-	}
+	for {
+		// Find size.
+		n := uintptr(0)
+		if err := sysctl(mib, nil, &n, nil, 0); err != nil {
+			return nil, err
+		}
+		if n == 0 {
+			return nil, nil
+		}
+		if n%SizeofKinfoProc != 0 {
+			return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
+		}
 
-	// Read into buffer of that size.
-	buf := make([]KinfoProc, n/SizeofKinfoProc)
-	if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil {
-		return nil, err
-	}
-	if n%SizeofKinfoProc != 0 {
-		return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
-	}
+		// Read into buffer of that size.
+		buf := make([]KinfoProc, n/SizeofKinfoProc)
+		if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil {
+			if err == ENOMEM {
+				// Process table grew. Try again.
+				continue
+			}
+			return nil, err
+		}
+		if n%SizeofKinfoProc != 0 {
+			return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
+		}
 
-	// The actual call may return less than the original reported required
-	// size so ensure we deal with that.
-	return buf[:n/SizeofKinfoProc], nil
+		// The actual call may return less than the original reported required
+		// size so ensure we deal with that.
+		return buf[:n/SizeofKinfoProc], nil
+	}
 }
 
 //sys	sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
@@ -638,189 +644,3 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
-//sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
-
-/*
- * Unimplemented
- */
-// Profil
-// Sigaction
-// Sigprocmask
-// Getlogin
-// Sigpending
-// Sigaltstack
-// Ioctl
-// Reboot
-// Execve
-// Vfork
-// Sbrk
-// Sstk
-// Ovadvise
-// Mincore
-// Setitimer
-// Swapon
-// Select
-// Sigsuspend
-// Readv
-// Writev
-// Nfssvc
-// Getfh
-// Quotactl
-// Csops
-// Waitid
-// Add_profil
-// Kdebug_trace
-// Sigreturn
-// Atsocket
-// Kqueue_from_portset_np
-// Kqueue_portset
-// Getattrlist
-// Getdirentriesattr
-// Searchfs
-// Delete
-// Copyfile
-// Watchevent
-// Waitevent
-// Modwatch
-// Fsctl
-// Initgroups
-// Posix_spawn
-// Nfsclnt
-// Fhopen
-// Minherit
-// Semsys
-// Msgsys
-// Shmsys
-// Semctl
-// Semget
-// Semop
-// Msgctl
-// Msgget
-// Msgsnd
-// Msgrcv
-// Shm_open
-// Shm_unlink
-// Sem_open
-// Sem_close
-// Sem_unlink
-// Sem_wait
-// Sem_trywait
-// Sem_post
-// Sem_getvalue
-// Sem_init
-// Sem_destroy
-// Open_extended
-// Umask_extended
-// Stat_extended
-// Lstat_extended
-// Fstat_extended
-// Chmod_extended
-// Fchmod_extended
-// Access_extended
-// Settid
-// Gettid
-// Setsgroups
-// Getsgroups
-// Setwgroups
-// Getwgroups
-// Mkfifo_extended
-// Mkdir_extended
-// Identitysvc
-// Shared_region_check_np
-// Shared_region_map_np
-// __pthread_mutex_destroy
-// __pthread_mutex_init
-// __pthread_mutex_lock
-// __pthread_mutex_trylock
-// __pthread_mutex_unlock
-// __pthread_cond_init
-// __pthread_cond_destroy
-// __pthread_cond_broadcast
-// __pthread_cond_signal
-// Setsid_with_pid
-// __pthread_cond_timedwait
-// Aio_fsync
-// Aio_return
-// Aio_suspend
-// Aio_cancel
-// Aio_error
-// Aio_read
-// Aio_write
-// Lio_listio
-// __pthread_cond_wait
-// Iopolicysys
-// __pthread_kill
-// __pthread_sigmask
-// __sigwait
-// __disable_threadsignal
-// __pthread_markcancel
-// __pthread_canceled
-// __semwait_signal
-// Proc_info
-// sendfile
-// Stat64_extended
-// Lstat64_extended
-// Fstat64_extended
-// __pthread_chdir
-// __pthread_fchdir
-// Audit
-// Auditon
-// Getauid
-// Setauid
-// Getaudit
-// Setaudit
-// Getaudit_addr
-// Setaudit_addr
-// Auditctl
-// Bsdthread_create
-// Bsdthread_terminate
-// Stack_snapshot
-// Bsdthread_register
-// Workq_open
-// Workq_ops
-// __mac_execve
-// __mac_syscall
-// __mac_get_file
-// __mac_set_file
-// __mac_get_link
-// __mac_set_link
-// __mac_get_proc
-// __mac_set_proc
-// __mac_get_fd
-// __mac_set_fd
-// __mac_get_pid
-// __mac_get_lcid
-// __mac_get_lctx
-// __mac_set_lctx
-// Setlcid
-// Read_nocancel
-// Write_nocancel
-// Open_nocancel
-// Close_nocancel
-// Wait4_nocancel
-// Recvmsg_nocancel
-// Sendmsg_nocancel
-// Recvfrom_nocancel
-// Accept_nocancel
-// Fcntl_nocancel
-// Select_nocancel
-// Fsync_nocancel
-// Connect_nocancel
-// Sigsuspend_nocancel
-// Readv_nocancel
-// Writev_nocancel
-// Sendto_nocancel
-// Pread_nocancel
-// Pwrite_nocancel
-// Waitid_nocancel
-// Poll_nocancel
-// Msgsnd_nocancel
-// Msgrcv_nocancel
-// Sem_wait_nocancel
-// Aio_suspend_nocancel
-// __sigwait_nocancel
-// __semwait_signal_nocancel
-// __mac_mount
-// __mac_get_mount
-// __mac_getfsstat
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
index 9fa8798..0eaecf5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && darwin
-// +build amd64,darwin
 
 package unix
 
@@ -47,6 +46,5 @@
 //sys	getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64
 //sys	Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
 //sys	ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
-//sys	ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
 //sys	Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
 //sys	Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
index f17b8c5..f36c670 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm64 && darwin
-// +build arm64,darwin
 
 package unix
 
@@ -47,6 +46,5 @@
 //sys	getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT
 //sys	Lstat(path string, stat *Stat_t) (err error)
 //sys	ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
-//sys	ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
 //sys	Stat(path string, stat *Stat_t) (err error)
 //sys	Statfs(path string, stat *Statfs_t) (err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go
index 53c9664..16dc699 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin && go1.12
-// +build darwin,go1.12
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index d4ce988..97cb916 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -343,203 +343,5 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
-//sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
-
-/*
- * Unimplemented
- * TODO(jsing): Update this list for DragonFly.
- */
-// Profil
-// Sigaction
-// Sigprocmask
-// Getlogin
-// Sigpending
-// Sigaltstack
-// Reboot
-// Execve
-// Vfork
-// Sbrk
-// Sstk
-// Ovadvise
-// Mincore
-// Setitimer
-// Swapon
-// Select
-// Sigsuspend
-// Readv
-// Writev
-// Nfssvc
-// Getfh
-// Quotactl
-// Mount
-// Csops
-// Waitid
-// Add_profil
-// Kdebug_trace
-// Sigreturn
-// Atsocket
-// Kqueue_from_portset_np
-// Kqueue_portset
-// Getattrlist
-// Setattrlist
-// Getdirentriesattr
-// Searchfs
-// Delete
-// Copyfile
-// Watchevent
-// Waitevent
-// Modwatch
-// Getxattr
-// Fgetxattr
-// Setxattr
-// Fsetxattr
-// Removexattr
-// Fremovexattr
-// Listxattr
-// Flistxattr
-// Fsctl
-// Initgroups
-// Posix_spawn
-// Nfsclnt
-// Fhopen
-// Minherit
-// Semsys
-// Msgsys
-// Shmsys
-// Semctl
-// Semget
-// Semop
-// Msgctl
-// Msgget
-// Msgsnd
-// Msgrcv
-// Shmat
-// Shmctl
-// Shmdt
-// Shmget
-// Shm_open
-// Shm_unlink
-// Sem_open
-// Sem_close
-// Sem_unlink
-// Sem_wait
-// Sem_trywait
-// Sem_post
-// Sem_getvalue
-// Sem_init
-// Sem_destroy
-// Open_extended
-// Umask_extended
-// Stat_extended
-// Lstat_extended
-// Fstat_extended
-// Chmod_extended
-// Fchmod_extended
-// Access_extended
-// Settid
-// Gettid
-// Setsgroups
-// Getsgroups
-// Setwgroups
-// Getwgroups
-// Mkfifo_extended
-// Mkdir_extended
-// Identitysvc
-// Shared_region_check_np
-// Shared_region_map_np
-// __pthread_mutex_destroy
-// __pthread_mutex_init
-// __pthread_mutex_lock
-// __pthread_mutex_trylock
-// __pthread_mutex_unlock
-// __pthread_cond_init
-// __pthread_cond_destroy
-// __pthread_cond_broadcast
-// __pthread_cond_signal
-// Setsid_with_pid
-// __pthread_cond_timedwait
-// Aio_fsync
-// Aio_return
-// Aio_suspend
-// Aio_cancel
-// Aio_error
-// Aio_read
-// Aio_write
-// Lio_listio
-// __pthread_cond_wait
-// Iopolicysys
-// __pthread_kill
-// __pthread_sigmask
-// __sigwait
-// __disable_threadsignal
-// __pthread_markcancel
-// __pthread_canceled
-// __semwait_signal
-// Proc_info
-// Stat64_extended
-// Lstat64_extended
-// Fstat64_extended
-// __pthread_chdir
-// __pthread_fchdir
-// Audit
-// Auditon
-// Getauid
-// Setauid
-// Getaudit
-// Setaudit
-// Getaudit_addr
-// Setaudit_addr
-// Auditctl
-// Bsdthread_create
-// Bsdthread_terminate
-// Stack_snapshot
-// Bsdthread_register
-// Workq_open
-// Workq_ops
-// __mac_execve
-// __mac_syscall
-// __mac_get_file
-// __mac_set_file
-// __mac_get_link
-// __mac_set_link
-// __mac_get_proc
-// __mac_set_proc
-// __mac_get_fd
-// __mac_set_fd
-// __mac_get_pid
-// __mac_get_lcid
-// __mac_get_lctx
-// __mac_set_lctx
-// Setlcid
-// Read_nocancel
-// Write_nocancel
-// Open_nocancel
-// Close_nocancel
-// Wait4_nocancel
-// Recvmsg_nocancel
-// Sendmsg_nocancel
-// Recvfrom_nocancel
-// Accept_nocancel
-// Fcntl_nocancel
-// Select_nocancel
-// Fsync_nocancel
-// Connect_nocancel
-// Sigsuspend_nocancel
-// Readv_nocancel
-// Writev_nocancel
-// Sendto_nocancel
-// Pread_nocancel
-// Pwrite_nocancel
-// Waitid_nocancel
-// Msgsnd_nocancel
-// Msgrcv_nocancel
-// Sem_wait_nocancel
-// Aio_suspend_nocancel
-// __sigwait_nocancel
-// __semwait_signal_nocancel
-// __mac_mount
-// __mac_get_mount
-// __mac_getfsstat
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
index 4e2d321..14bab6b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && dragonfly
-// +build amd64,dragonfly
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index afb1010..64d1bb4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -449,197 +449,5 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
-//sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
-
-/*
- * Unimplemented
- */
-// Profil
-// Sigaction
-// Sigprocmask
-// Getlogin
-// Sigpending
-// Sigaltstack
-// Ioctl
-// Reboot
-// Execve
-// Vfork
-// Sbrk
-// Sstk
-// Ovadvise
-// Mincore
-// Setitimer
-// Swapon
-// Select
-// Sigsuspend
-// Readv
-// Writev
-// Nfssvc
-// Getfh
-// Quotactl
-// Mount
-// Csops
-// Waitid
-// Add_profil
-// Kdebug_trace
-// Sigreturn
-// Atsocket
-// Kqueue_from_portset_np
-// Kqueue_portset
-// Getattrlist
-// Setattrlist
-// Getdents
-// Getdirentriesattr
-// Searchfs
-// Delete
-// Copyfile
-// Watchevent
-// Waitevent
-// Modwatch
-// Fsctl
-// Initgroups
-// Posix_spawn
-// Nfsclnt
-// Fhopen
-// Minherit
-// Semsys
-// Msgsys
-// Shmsys
-// Semctl
-// Semget
-// Semop
-// Msgctl
-// Msgget
-// Msgsnd
-// Msgrcv
-// Shmat
-// Shmctl
-// Shmdt
-// Shmget
-// Shm_open
-// Shm_unlink
-// Sem_open
-// Sem_close
-// Sem_unlink
-// Sem_wait
-// Sem_trywait
-// Sem_post
-// Sem_getvalue
-// Sem_init
-// Sem_destroy
-// Open_extended
-// Umask_extended
-// Stat_extended
-// Lstat_extended
-// Fstat_extended
-// Chmod_extended
-// Fchmod_extended
-// Access_extended
-// Settid
-// Gettid
-// Setsgroups
-// Getsgroups
-// Setwgroups
-// Getwgroups
-// Mkfifo_extended
-// Mkdir_extended
-// Identitysvc
-// Shared_region_check_np
-// Shared_region_map_np
-// __pthread_mutex_destroy
-// __pthread_mutex_init
-// __pthread_mutex_lock
-// __pthread_mutex_trylock
-// __pthread_mutex_unlock
-// __pthread_cond_init
-// __pthread_cond_destroy
-// __pthread_cond_broadcast
-// __pthread_cond_signal
-// Setsid_with_pid
-// __pthread_cond_timedwait
-// Aio_fsync
-// Aio_return
-// Aio_suspend
-// Aio_cancel
-// Aio_error
-// Aio_read
-// Aio_write
-// Lio_listio
-// __pthread_cond_wait
-// Iopolicysys
-// __pthread_kill
-// __pthread_sigmask
-// __sigwait
-// __disable_threadsignal
-// __pthread_markcancel
-// __pthread_canceled
-// __semwait_signal
-// Proc_info
-// Stat64_extended
-// Lstat64_extended
-// Fstat64_extended
-// __pthread_chdir
-// __pthread_fchdir
-// Audit
-// Auditon
-// Getauid
-// Setauid
-// Getaudit
-// Setaudit
-// Getaudit_addr
-// Setaudit_addr
-// Auditctl
-// Bsdthread_create
-// Bsdthread_terminate
-// Stack_snapshot
-// Bsdthread_register
-// Workq_open
-// Workq_ops
-// __mac_execve
-// __mac_syscall
-// __mac_get_file
-// __mac_set_file
-// __mac_get_link
-// __mac_set_link
-// __mac_get_proc
-// __mac_set_proc
-// __mac_get_fd
-// __mac_set_fd
-// __mac_get_pid
-// __mac_get_lcid
-// __mac_get_lctx
-// __mac_set_lctx
-// Setlcid
-// Read_nocancel
-// Write_nocancel
-// Open_nocancel
-// Close_nocancel
-// Wait4_nocancel
-// Recvmsg_nocancel
-// Sendmsg_nocancel
-// Recvfrom_nocancel
-// Accept_nocancel
-// Fcntl_nocancel
-// Select_nocancel
-// Fsync_nocancel
-// Connect_nocancel
-// Sigsuspend_nocancel
-// Readv_nocancel
-// Writev_nocancel
-// Sendto_nocancel
-// Pread_nocancel
-// Pwrite_nocancel
-// Waitid_nocancel
-// Poll_nocancel
-// Msgsnd_nocancel
-// Msgrcv_nocancel
-// Sem_wait_nocancel
-// Aio_suspend_nocancel
-// __sigwait_nocancel
-// __semwait_signal_nocancel
-// __mac_mount
-// __mac_get_mount
-// __mac_getfsstat
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
index b8da510..3967bca 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 && freebsd
-// +build 386,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
index 47155c4..eff19ad 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && freebsd
-// +build amd64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
index 0893209..4f24b51 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm && freebsd
-// +build arm,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
index d151a0d..ac30759 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm64 && freebsd
-// +build arm64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
index d5cd64b..aab725c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build riscv64 && freebsd
-// +build riscv64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd.go
index 381fd46..ba46651 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build hurd
-// +build hurd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd_386.go
index 7cf54a3..df89f9e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_hurd_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 && hurd
-// +build 386,hurd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go
index 87db5a6..a863f70 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go
@@ -5,7 +5,6 @@
 // illumos system calls not present on Solaris.
 
 //go:build amd64 && illumos
-// +build amd64,illumos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
index 39de5f1..0f85e29 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -61,15 +61,23 @@
 }
 
 //sys	fchmodat(dirfd int, path string, mode uint32) (err error)
+//sys	fchmodat2(dirfd int, path string, mode uint32, flags int) (err error)
 
-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
-	// Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior
-	// and check the flags. Otherwise the mode would be applied to the symlink
-	// destination which is not what the user expects.
-	if flags&^AT_SYMLINK_NOFOLLOW != 0 {
-		return EINVAL
-	} else if flags&AT_SYMLINK_NOFOLLOW != 0 {
-		return EOPNOTSUPP
+func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
+	// Linux fchmodat doesn't support the flags parameter, but fchmodat2 does.
+	// Try fchmodat2 if flags are specified.
+	if flags != 0 {
+		err := fchmodat2(dirfd, path, mode, flags)
+		if err == ENOSYS {
+			// fchmodat2 isn't available. If the flags are known to be valid,
+			// return EOPNOTSUPP to indicate that fchmodat doesn't support them.
+			if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 {
+				return EINVAL
+			} else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 {
+				return EOPNOTSUPP
+			}
+		}
+		return err
 	}
 	return fchmodat(dirfd, path, mode)
 }
@@ -417,7 +425,8 @@
 	if n > 0 {
 		sl += _Socklen(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -693,10 +702,10 @@
 
 func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	// Leave room for NUL byte terminator.
-	if len(sa.Type) > 13 {
+	if len(sa.Type) > len(sa.raw.Type)-1 {
 		return nil, 0, EINVAL
 	}
-	if len(sa.Name) > 63 {
+	if len(sa.Name) > len(sa.raw.Name)-1 {
 		return nil, 0, EINVAL
 	}
 
@@ -704,17 +713,8 @@
 	sa.raw.Feat = sa.Feature
 	sa.raw.Mask = sa.Mask
 
-	typ, err := ByteSliceFromString(sa.Type)
-	if err != nil {
-		return nil, 0, err
-	}
-	name, err := ByteSliceFromString(sa.Name)
-	if err != nil {
-		return nil, 0, err
-	}
-
-	copy(sa.raw.Type[:], typ)
-	copy(sa.raw.Name[:], name)
+	copy(sa.raw.Type[:], sa.Type)
+	copy(sa.raw.Name[:], sa.Name)
 
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil
 }
@@ -1310,7 +1310,7 @@
 			return "", err
 		}
 	}
-	return string(buf[:vallen-1]), nil
+	return ByteSliceToString(buf[:vallen]), nil
 }
 
 func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) {
@@ -1885,7 +1885,7 @@
 //sys	PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
 //sys	PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
 //sys	Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
-//sys	Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
+//sys	pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error)
 //sys	read(fd int, p []byte) (n int, err error)
 //sys	Removexattr(path string, attr string) (err error)
 //sys	Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
@@ -1988,8 +1988,6 @@
 //sys	Unshare(flags int) (err error)
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	exitThread(code int) (err error) = SYS_EXIT
-//sys	readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
 //sys	readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV
 //sys	writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV
 //sys	preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV
@@ -2125,28 +2123,6 @@
 // mmap varies by architecture; see syscall_linux_*.go.
 //sys	munmap(addr uintptr, length uintptr) (err error)
 //sys	mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error)
-
-var mapper = &mremapMmapper{
-	mmapper: mmapper{
-		active: make(map[*byte][]byte),
-		mmap:   mmap,
-		munmap: munmap,
-	},
-	mremap: mremap,
-}
-
-func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
-	return mapper.Mmap(fd, offset, length, prot, flags)
-}
-
-func Munmap(b []byte) (err error) {
-	return mapper.Munmap(b)
-}
-
-func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
-	return mapper.Mremap(oldData, newLength, flags)
-}
-
 //sys	Madvise(b []byte, advice int) (err error)
 //sys	Mprotect(b []byte, prot int) (err error)
 //sys	Mlock(b []byte) (err error)
@@ -2155,6 +2131,12 @@
 //sys	Munlock(b []byte) (err error)
 //sys	Munlockall() (err error)
 
+const (
+	mremapFixed     = MREMAP_FIXED
+	mremapDontunmap = MREMAP_DONTUNMAP
+	mremapMaymove   = MREMAP_MAYMOVE
+)
+
 // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd,
 // using the specified flags.
 func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
@@ -2454,98 +2436,60 @@
 	return int(r), int(e), int(s)
 }
 
-/*
- * Unimplemented
- */
-// AfsSyscall
-// ArchPrctl
-// Brk
-// ClockNanosleep
-// ClockSettime
-// Clone
-// EpollCtlOld
-// EpollPwait
-// EpollWaitOld
-// Execve
-// Fork
-// Futex
-// GetKernelSyms
-// GetMempolicy
-// GetRobustList
-// GetThreadArea
-// Getpmsg
-// IoCancel
-// IoDestroy
-// IoGetevents
-// IoSetup
-// IoSubmit
-// IoprioGet
-// IoprioSet
-// KexecLoad
-// LookupDcookie
-// Mbind
-// MigratePages
-// Mincore
-// ModifyLdt
-// Mount
-// MovePages
-// MqGetsetattr
-// MqNotify
-// MqOpen
-// MqTimedreceive
-// MqTimedsend
-// MqUnlink
-// Msgctl
-// Msgget
-// Msgrcv
-// Msgsnd
-// Nfsservctl
-// Personality
-// Pselect6
-// Ptrace
-// Putpmsg
-// Quotactl
-// Readahead
-// Readv
-// RemapFilePages
-// RestartSyscall
-// RtSigaction
-// RtSigpending
-// RtSigqueueinfo
-// RtSigreturn
-// RtSigsuspend
-// RtSigtimedwait
-// SchedGetPriorityMax
-// SchedGetPriorityMin
-// SchedGetparam
-// SchedGetscheduler
-// SchedRrGetInterval
-// SchedSetparam
-// SchedYield
-// Security
-// Semctl
-// Semget
-// Semop
-// Semtimedop
-// SetMempolicy
-// SetRobustList
-// SetThreadArea
-// SetTidAddress
-// Sigaltstack
-// Swapoff
-// Swapon
-// Sysfs
-// TimerCreate
-// TimerDelete
-// TimerGetoverrun
-// TimerGettime
-// TimerSettime
-// Tkill (obsolete)
-// Tuxcall
-// Umount2
-// Uselib
-// Utimensat
-// Vfork
-// Vhangup
-// Vserver
-// _Sysctl
+// Pselect is a wrapper around the Linux pselect6 system call.
+// This version does not modify the timeout argument.
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	// Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES,
+	// The Linux pselect6() system call modifies its timeout argument.
+	// [Not modifying the argument] is the behavior required by POSIX.1-2001.
+	var mutableTimeout *Timespec
+	if timeout != nil {
+		mutableTimeout = new(Timespec)
+		*mutableTimeout = *timeout
+	}
+
+	// The final argument of the pselect6() system call is not a
+	// sigset_t * pointer, but is instead a structure
+	var kernelMask *sigset_argpack
+	if sigmask != nil {
+		wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize
+
+		// A sigset stores one bit per signal,
+		// offset by 1 (because signal 0 does not exist).
+		// So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉.
+		sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits)
+
+		sigsetBytes := uintptr(sigsetWords * (wordBits / 8))
+		kernelMask = &sigset_argpack{
+			ss:    sigmask,
+			ssLen: sigsetBytes,
+		}
+	}
+
+	return pselect6(nfd, r, w, e, mutableTimeout, kernelMask)
+}
+
+//sys	schedSetattr(pid int, attr *SchedAttr, flags uint) (err error)
+//sys	schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error)
+
+// SchedSetAttr is a wrapper for sched_setattr(2) syscall.
+// https://man7.org/linux/man-pages/man2/sched_setattr.2.html
+func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error {
+	if attr == nil {
+		return EINVAL
+	}
+	attr.Size = SizeofSchedAttr
+	return schedSetattr(pid, attr, flags)
+}
+
+// SchedGetAttr is a wrapper for sched_getattr(2) syscall.
+// https://man7.org/linux/man-pages/man2/sched_getattr.2.html
+func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
+	attr := &SchedAttr{}
+	if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil {
+		return nil, err
+	}
+	return attr, nil
+}
+
+//sys	Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go
index c7d9945..506dafa 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 && linux
-// +build 386,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go
index 08086ac..38d5564 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64)
-// +build linux
-// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index 5b21fcf..d557cf8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && linux
-// +build amd64,linux
 
 package unix
 
@@ -40,7 +39,7 @@
 	if timeout != nil {
 		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
 	}
-	return Pselect(nfd, r, w, e, ts, nil)
+	return pselect6(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
index 8b0f0f3..facdb83 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && linux && gc
-// +build amd64,linux,gc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index da29864..cd2dd79 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm && linux
-// +build arm,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index a81f574..cf2ee6c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm64 && linux
-// +build arm64,linux
 
 package unix
 
@@ -33,7 +32,7 @@
 	if timeout != nil {
 		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
 	}
-	return Pselect(nfd, r, w, e, ts, nil)
+	return pselect6(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc.go
index 2b1168d..ffc4c2b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && gc
-// +build linux,gc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go
index 9843fb4..9ebfdcf 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && gc && 386
-// +build linux,gc,386
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
index a6008fc..5f2b57c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm && gc && linux
-// +build arm,gc,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go
index 7740af2..d1a3ad8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && gccgo && 386
-// +build linux,gccgo,386
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go
index e16a122..f2f6742 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && gccgo && arm
-// +build linux,gccgo,arm
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
index 69d2d7c..3d0e984 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build loong64 && linux
-// +build loong64,linux
 
 package unix
 
@@ -28,7 +27,7 @@
 	if timeout != nil {
 		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
 	}
-	return Pselect(nfd, r, w, e, ts, nil)
+	return pselect6(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
index 76d5640..70963a9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (mips64 || mips64le)
-// +build linux
-// +build mips64 mips64le
 
 package unix
 
@@ -31,7 +29,7 @@
 	if timeout != nil {
 		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
 	}
-	return Pselect(nfd, r, w, e, ts, nil)
+	return pselect6(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
index aae7f0f..c218ebd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (mips || mipsle)
-// +build linux
-// +build mips mipsle
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
index 66eff19..e6c4850 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && ppc
-// +build linux,ppc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index 806aa25..7286a9a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (ppc64 || ppc64le)
-// +build linux
-// +build ppc64 ppc64le
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 35851ef..6f5a288 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build riscv64 && linux
-// +build riscv64,linux
 
 package unix
 
@@ -32,7 +31,7 @@
 	if timeout != nil {
 		ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
 	}
-	return Pselect(nfd, r, w, e, ts, nil)
+	return pselect6(nfd, r, w, e, ts, nil)
 }
 
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
@@ -177,3 +176,14 @@
 	}
 	return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
 }
+
+//sys	riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error)
+
+func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) {
+	var setSize uintptr
+
+	if set != nil {
+		setSize = uintptr(unsafe.Sizeof(*set))
+	}
+	return riscvHWProbe(pairs, setSize, set, flags)
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
index 2f89e8f..66f3121 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build s390x && linux
-// +build s390x,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
index 7ca064a..11d1f16 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build sparc64 && linux
-// +build sparc64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 018d7d4..8816209 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -356,266 +356,16 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
-//sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
 
-/*
- * Unimplemented
- */
-// ____semctl13
-// __clone
-// __fhopen40
-// __fhstat40
-// __fhstatvfs140
-// __fstat30
-// __getcwd
-// __getfh30
-// __getlogin
-// __lstat30
-// __mount50
-// __msgctl13
-// __msync13
-// __ntp_gettime30
-// __posix_chown
-// __posix_fchown
-// __posix_lchown
-// __posix_rename
-// __setlogin
-// __shmctl13
-// __sigaction_sigtramp
-// __sigaltstack14
-// __sigpending14
-// __sigprocmask14
-// __sigsuspend14
-// __sigtimedwait
-// __stat30
-// __syscall
-// __vfork14
-// _ksem_close
-// _ksem_destroy
-// _ksem_getvalue
-// _ksem_init
-// _ksem_open
-// _ksem_post
-// _ksem_trywait
-// _ksem_unlink
-// _ksem_wait
-// _lwp_continue
-// _lwp_create
-// _lwp_ctl
-// _lwp_detach
-// _lwp_exit
-// _lwp_getname
-// _lwp_getprivate
-// _lwp_kill
-// _lwp_park
-// _lwp_self
-// _lwp_setname
-// _lwp_setprivate
-// _lwp_suspend
-// _lwp_unpark
-// _lwp_unpark_all
-// _lwp_wait
-// _lwp_wakeup
-// _pset_bind
-// _sched_getaffinity
-// _sched_getparam
-// _sched_setaffinity
-// _sched_setparam
-// acct
-// aio_cancel
-// aio_error
-// aio_fsync
-// aio_read
-// aio_return
-// aio_suspend
-// aio_write
-// break
-// clock_getres
-// clock_gettime
-// clock_settime
-// compat_09_ogetdomainname
-// compat_09_osetdomainname
-// compat_09_ouname
-// compat_10_omsgsys
-// compat_10_osemsys
-// compat_10_oshmsys
-// compat_12_fstat12
-// compat_12_getdirentries
-// compat_12_lstat12
-// compat_12_msync
-// compat_12_oreboot
-// compat_12_oswapon
-// compat_12_stat12
-// compat_13_sigaction13
-// compat_13_sigaltstack13
-// compat_13_sigpending13
-// compat_13_sigprocmask13
-// compat_13_sigreturn13
-// compat_13_sigsuspend13
-// compat_14___semctl
-// compat_14_msgctl
-// compat_14_shmctl
-// compat_16___sigaction14
-// compat_16___sigreturn14
-// compat_20_fhstatfs
-// compat_20_fstatfs
-// compat_20_getfsstat
-// compat_20_statfs
-// compat_30___fhstat30
-// compat_30___fstat13
-// compat_30___lstat13
-// compat_30___stat13
-// compat_30_fhopen
-// compat_30_fhstat
-// compat_30_fhstatvfs1
-// compat_30_getdents
-// compat_30_getfh
-// compat_30_ntp_gettime
-// compat_30_socket
-// compat_40_mount
-// compat_43_fstat43
-// compat_43_lstat43
-// compat_43_oaccept
-// compat_43_ocreat
-// compat_43_oftruncate
-// compat_43_ogetdirentries
-// compat_43_ogetdtablesize
-// compat_43_ogethostid
-// compat_43_ogethostname
-// compat_43_ogetkerninfo
-// compat_43_ogetpagesize
-// compat_43_ogetpeername
-// compat_43_ogetrlimit
-// compat_43_ogetsockname
-// compat_43_okillpg
-// compat_43_olseek
-// compat_43_ommap
-// compat_43_oquota
-// compat_43_orecv
-// compat_43_orecvfrom
-// compat_43_orecvmsg
-// compat_43_osend
-// compat_43_osendmsg
-// compat_43_osethostid
-// compat_43_osethostname
-// compat_43_osigblock
-// compat_43_osigsetmask
-// compat_43_osigstack
-// compat_43_osigvec
-// compat_43_otruncate
-// compat_43_owait
-// compat_43_stat43
-// execve
-// extattr_delete_fd
-// extattr_delete_file
-// extattr_delete_link
-// extattr_get_fd
-// extattr_get_file
-// extattr_get_link
-// extattr_list_fd
-// extattr_list_file
-// extattr_list_link
-// extattr_set_fd
-// extattr_set_file
-// extattr_set_link
-// extattrctl
-// fchroot
-// fdatasync
-// fgetxattr
-// fktrace
-// flistxattr
-// fork
-// fremovexattr
-// fsetxattr
-// fstatvfs1
-// fsync_range
-// getcontext
-// getitimer
-// getvfsstat
-// getxattr
-// ktrace
-// lchflags
-// lchmod
-// lfs_bmapv
-// lfs_markv
-// lfs_segclean
-// lfs_segwait
-// lgetxattr
-// lio_listio
-// listxattr
-// llistxattr
-// lremovexattr
-// lseek
-// lsetxattr
-// lutimes
-// madvise
-// mincore
-// minherit
-// modctl
-// mq_close
-// mq_getattr
-// mq_notify
-// mq_open
-// mq_receive
-// mq_send
-// mq_setattr
-// mq_timedreceive
-// mq_timedsend
-// mq_unlink
-// mremap
-// msgget
-// msgrcv
-// msgsnd
-// nfssvc
-// ntp_adjtime
-// pmc_control
-// pmc_get_info
-// pollts
-// preadv
-// profil
-// pselect
-// pset_assign
-// pset_create
-// pset_destroy
-// ptrace
-// pwritev
-// quotactl
-// rasctl
-// readv
-// reboot
-// removexattr
-// sa_enable
-// sa_preempt
-// sa_register
-// sa_setconcurrency
-// sa_stacks
-// sa_yield
-// sbrk
-// sched_yield
-// semconfig
-// semget
-// semop
-// setcontext
-// setitimer
-// setxattr
-// shmat
-// shmdt
-// shmget
-// sstk
-// statvfs1
-// swapctl
-// sysarch
-// syscall
-// timer_create
-// timer_delete
-// timer_getoverrun
-// timer_gettime
-// timer_settime
-// undelete
-// utrace
-// uuidgen
-// vadvise
-// vfork
-// writev
+const (
+	mremapFixed     = MAP_FIXED
+	mremapDontunmap = 0
+	mremapMaymove   = 0
+)
+
+//sys	mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP
+
+func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) {
+	return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags)
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
index 5199d28..7a5eb57 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 && netbsd
-// +build 386,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
index 70a9c52..62d8957 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && netbsd
-// +build amd64,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
index 3eb5942..ce6a068 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm && netbsd
-// +build arm,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
index fc6ccfd..d46d689 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm64 && netbsd
-// +build arm64,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index c5f166a..b25343c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -137,18 +137,13 @@
 }
 
 func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
-	var _p0 unsafe.Pointer
+	var bufptr *Statfs_t
 	var bufsize uintptr
 	if len(buf) > 0 {
-		_p0 = unsafe.Pointer(&buf[0])
+		bufptr = &buf[0]
 		bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
 	}
-	r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
-	n = int(r0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
+	return getfsstat(bufptr, bufsize, flags)
 }
 
 //sysnb	getresuid(ruid *_C_int, euid *_C_int, suid *_C_int)
@@ -171,6 +166,20 @@
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
 
+//sys	fcntl(fd int, cmd int, arg int) (n int, err error)
+//sys	fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+	return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+	_, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk))
+	return err
+}
+
 //sys	ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
 
 func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
@@ -326,78 +335,7 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
-//sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+//sys	getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error)
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
-
-/*
- * Unimplemented
- */
-// __getcwd
-// __semctl
-// __syscall
-// __sysctl
-// adjfreq
-// break
-// clock_getres
-// clock_gettime
-// clock_settime
-// closefrom
-// execve
-// fhopen
-// fhstat
-// fhstatfs
-// fork
-// futimens
-// getfh
-// getgid
-// getitimer
-// getlogin
-// getthrid
-// ktrace
-// lfs_bmapv
-// lfs_markv
-// lfs_segclean
-// lfs_segwait
-// mincore
-// minherit
-// mount
-// mquery
-// msgctl
-// msgget
-// msgrcv
-// msgsnd
-// nfssvc
-// nnpfspioctl
-// preadv
-// profil
-// pwritev
-// quotactl
-// readv
-// reboot
-// renameat
-// rfork
-// sched_yield
-// semget
-// semop
-// setgroups
-// setitimer
-// setsockopt
-// shmat
-// shmctl
-// shmdt
-// shmget
-// sigaction
-// sigaltstack
-// sigpending
-// sigprocmask
-// sigreturn
-// sigsuspend
-// sysarch
-// syscall
-// threxit
-// thrsigdivert
-// thrsleep
-// thrwakeup
-// vfork
-// writev
+//sys	pledge(promises *byte, execpromises *byte) (err error)
+//sys	unveil(path *byte, flags *byte) (err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
index 6baabcd..9ddc89f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 && openbsd
-// +build 386,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
index bab2536..70a3c96 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && openbsd
-// +build amd64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
index 8eed3c4..265caa8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm && openbsd
-// +build arm,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
index 483dde9..ac4fda1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build arm64 && openbsd
-// +build arm64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
index 04aa43f..0a451e6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build openbsd
-// +build openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go
index c279613..30a308c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ppc64 && openbsd
-// +build ppc64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go
index 23199a7..ea95433 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build riscv64 && openbsd
-// +build riscv64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris.go
index b600a28..21974af 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -128,7 +128,8 @@
 	if n > 0 {
 		sl += _Socklen(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -157,7 +158,7 @@
 	if err != nil {
 		return "", err
 	}
-	return string(buf[:vallen-1]), nil
+	return ByteSliceToString(buf[:vallen]), nil
 }
 
 const ImplementsGetwd = true
@@ -698,38 +699,6 @@
 //sys	setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt
 //sys	recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
-	n = int(r0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
-}
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
-	n = int(r0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
-}
-
-var mapper = &mmapper{
-	active: make(map[*byte][]byte),
-	mmap:   mmap,
-	munmap: munmap,
-}
-
-func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
-	return mapper.Mmap(fd, offset, length, prot, flags)
-}
-
-func Munmap(b []byte) (err error) {
-	return mapper.Munmap(b)
-}
-
 // Event Ports
 
 type fileObjCookie struct {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
index 0bd25ef..e02d8ce 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build amd64 && solaris
-// +build amd64,solaris
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix.go
index 8e48c29..77081de 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
 
 package unix
 
@@ -147,6 +146,14 @@
 	return nil
 }
 
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+	return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+	return mapper.Munmap(b)
+}
+
 func Read(fd int, p []byte) (n int, err error) {
 	n, err = read(fd, p)
 	if raceenabled {
@@ -541,6 +548,9 @@
 	if err != nil {
 		return err
 	}
+	if (flag&O_NONBLOCK != 0) == nonblocking {
+		return nil
+	}
 	if nonblocking {
 		flag |= O_NONBLOCK
 	} else {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc.go
index b6919ca..05c95bc 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc
-// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris
-// +build gc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go
index f6f707a..23f39b7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (ppc64le || ppc64) && gc
-// +build linux
-// +build ppc64le ppc64
-// +build gc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
index d3d49ec..b473038 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 package unix
 
@@ -192,7 +191,6 @@
 
 //sys   fcntl(fd int, cmd int, arg int) (val int, err error)
 //sys	read(fd int, p []byte) (n int, err error)
-//sys   readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
 //sys	write(fd int, p []byte) (n int, err error)
 
 //sys	accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A
@@ -285,25 +283,11 @@
 	return
 }
 
-var mapper = &mmapper{
-	active: make(map[*byte][]byte),
-	mmap:   mmap,
-	munmap: munmap,
-}
-
 // Dummy function: there are no semantics for Madvise on z/OS
 func Madvise(b []byte, advice int) (err error) {
 	return
 }
 
-func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
-	return mapper.Mmap(fd, offset, length, prot, flags)
-}
-
-func Munmap(b []byte) (err error) {
-	return mapper.Munmap(b)
-}
-
 //sys   Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
 //sysnb	Getegid() (egid int)
 //sysnb	Geteuid() (uid int)
@@ -1120,7 +1104,7 @@
 		return "", err
 	}
 
-	return string(buf[:vallen-1]), nil
+	return ByteSliceToString(buf[:vallen]), nil
 }
 
 func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_linux.go
index 2c3a443..4fcd38d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_linux.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux
-// +build linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix.go
index 5bb41d1..79a84f1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (darwin && !ios) || linux
-// +build darwin,!ios linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go
index 71bddef..9eb0db6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin && !ios
-// +build darwin,!ios
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/timestruct.go b/src/cmd/vendor/golang.org/x/sys/unix/timestruct.go
index 616b1b2..7997b19 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/timestruct.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/timestruct.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/unveil_openbsd.go
index 168d5ae..cb7e598 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/unveil_openbsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/unveil_openbsd.go
@@ -4,39 +4,48 @@
 
 package unix
 
-import (
-	"syscall"
-	"unsafe"
-)
+import "fmt"
 
 // Unveil implements the unveil syscall.
 // For more information see unveil(2).
 // Note that the special case of blocking further
 // unveil calls is handled by UnveilBlock.
 func Unveil(path string, flags string) error {
-	pathPtr, err := syscall.BytePtrFromString(path)
+	if err := supportsUnveil(); err != nil {
+		return err
+	}
+	pathPtr, err := BytePtrFromString(path)
 	if err != nil {
 		return err
 	}
-	flagsPtr, err := syscall.BytePtrFromString(flags)
+	flagsPtr, err := BytePtrFromString(flags)
 	if err != nil {
 		return err
 	}
-	_, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0)
-	if e != 0 {
-		return e
-	}
-	return nil
+	return unveil(pathPtr, flagsPtr)
 }
 
 // UnveilBlock blocks future unveil calls.
 // For more information see unveil(2).
 func UnveilBlock() error {
-	// Both pointers must be nil.
-	var pathUnsafe, flagsUnsafe unsafe.Pointer
-	_, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0)
-	if e != 0 {
-		return e
+	if err := supportsUnveil(); err != nil {
+		return err
 	}
+	return unveil(nil, nil)
+}
+
+// supportsUnveil checks for availability of the unveil(2) system call based
+// on the running OpenBSD version.
+func supportsUnveil() error {
+	maj, min, err := majmin()
+	if err != nil {
+		return err
+	}
+
+	// unveil is not available before 6.4
+	if maj < 6 || (maj == 6 && min <= 3) {
+		return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min)
+	}
+
 	return nil
 }
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/xattr_bsd.go b/src/cmd/vendor/golang.org/x/sys/unix/xattr_bsd.go
index f5f8e9f..e168793 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/xattr_bsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/xattr_bsd.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build freebsd || netbsd
-// +build freebsd netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
index ca9799b..2fb219d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc && aix
-// +build ppc,aix
 
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs -- -maix32 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
index 200c8c2..b0e6f5c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && aix
-// +build ppc64,aix
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -maix64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
index 1430076..e40fa85 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && darwin
-// +build amd64,darwin
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
index ab044a7..bb02aa6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && darwin
-// +build arm64,darwin
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
index 17bba0e..c0e0f86 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && dragonfly
-// +build amd64,dragonfly
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
index f8c2c51..6c69239 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && freebsd
-// +build 386,freebsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m32 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
index 96310c3..dd9163f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && freebsd
-// +build amd64,freebsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
index 777b69d..493a2a7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && freebsd
-// +build arm,freebsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
index c557ac2..8b437b3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && freebsd
-// +build arm64,freebsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go
index 341b4d9..67c02dd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && freebsd
-// +build riscv64,freebsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 3784f40..c73cfe2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1,7 +1,6 @@
 // Code generated by mkmerge; DO NOT EDIT.
 
 //go:build linux
-// +build linux
 
 package unix
 
@@ -481,10 +480,13 @@
 	BPF_FROM_BE                                 = 0x8
 	BPF_FROM_LE                                 = 0x0
 	BPF_FS_MAGIC                                = 0xcafe4a11
+	BPF_F_AFTER                                 = 0x10
 	BPF_F_ALLOW_MULTI                           = 0x2
 	BPF_F_ALLOW_OVERRIDE                        = 0x1
 	BPF_F_ANY_ALIGNMENT                         = 0x2
-	BPF_F_KPROBE_MULTI_RETURN                   = 0x1
+	BPF_F_BEFORE                                = 0x8
+	BPF_F_ID                                    = 0x20
+	BPF_F_NETFILTER_IP_DEFRAG                   = 0x1
 	BPF_F_QUERY_EFFECTIVE                       = 0x1
 	BPF_F_REPLACE                               = 0x4
 	BPF_F_SLEEPABLE                             = 0x10
@@ -521,6 +523,7 @@
 	BPF_MAJOR_VERSION                           = 0x1
 	BPF_MAXINSNS                                = 0x1000
 	BPF_MEM                                     = 0x60
+	BPF_MEMSX                                   = 0x80
 	BPF_MEMWORDS                                = 0x10
 	BPF_MINOR_VERSION                           = 0x1
 	BPF_MISC                                    = 0x7
@@ -776,6 +779,8 @@
 	DEVLINK_GENL_MCGRP_CONFIG_NAME              = "config"
 	DEVLINK_GENL_NAME                           = "devlink"
 	DEVLINK_GENL_VERSION                        = 0x1
+	DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO            = 0x4
+	DEVLINK_PORT_FN_CAP_IPSEC_PACKET            = 0x8
 	DEVLINK_PORT_FN_CAP_MIGRATABLE              = 0x2
 	DEVLINK_PORT_FN_CAP_ROCE                    = 0x1
 	DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX           = 0x14
@@ -1698,6 +1703,7 @@
 	KEXEC_ON_CRASH                              = 0x1
 	KEXEC_PRESERVE_CONTEXT                      = 0x2
 	KEXEC_SEGMENT_MAX                           = 0x10
+	KEXEC_UPDATE_ELFCOREHDR                     = 0x4
 	KEYCTL_ASSUME_AUTHORITY                     = 0x10
 	KEYCTL_CAPABILITIES                         = 0x1f
 	KEYCTL_CAPS0_BIG_KEY                        = 0x10
@@ -1795,6 +1801,7 @@
 	LOCK_SH                                     = 0x1
 	LOCK_UN                                     = 0x8
 	LOOP_CLR_FD                                 = 0x4c01
+	LOOP_CONFIGURE                              = 0x4c0a
 	LOOP_CTL_ADD                                = 0x4c80
 	LOOP_CTL_GET_FREE                           = 0x4c82
 	LOOP_CTL_REMOVE                             = 0x4c81
@@ -2275,6 +2282,7 @@
 	PERF_MEM_LVLNUM_PMEM                        = 0xe
 	PERF_MEM_LVLNUM_RAM                         = 0xd
 	PERF_MEM_LVLNUM_SHIFT                       = 0x21
+	PERF_MEM_LVLNUM_UNC                         = 0x8
 	PERF_MEM_LVL_HIT                            = 0x2
 	PERF_MEM_LVL_IO                             = 0x1000
 	PERF_MEM_LVL_L1                             = 0x8
@@ -2421,6 +2429,15 @@
 	PR_PAC_GET_ENABLED_KEYS                     = 0x3d
 	PR_PAC_RESET_KEYS                           = 0x36
 	PR_PAC_SET_ENABLED_KEYS                     = 0x3c
+	PR_RISCV_V_GET_CONTROL                      = 0x46
+	PR_RISCV_V_SET_CONTROL                      = 0x45
+	PR_RISCV_V_VSTATE_CTRL_CUR_MASK             = 0x3
+	PR_RISCV_V_VSTATE_CTRL_DEFAULT              = 0x0
+	PR_RISCV_V_VSTATE_CTRL_INHERIT              = 0x10
+	PR_RISCV_V_VSTATE_CTRL_MASK                 = 0x1f
+	PR_RISCV_V_VSTATE_CTRL_NEXT_MASK            = 0xc
+	PR_RISCV_V_VSTATE_CTRL_OFF                  = 0x1
+	PR_RISCV_V_VSTATE_CTRL_ON                   = 0x2
 	PR_SCHED_CORE                               = 0x3e
 	PR_SCHED_CORE_CREATE                        = 0x1
 	PR_SCHED_CORE_GET                           = 0x0
@@ -2821,6 +2838,23 @@
 	RWF_SUPPORTED                               = 0x1f
 	RWF_SYNC                                    = 0x4
 	RWF_WRITE_LIFE_NOT_SET                      = 0x0
+	SCHED_BATCH                                 = 0x3
+	SCHED_DEADLINE                              = 0x6
+	SCHED_FIFO                                  = 0x1
+	SCHED_FLAG_ALL                              = 0x7f
+	SCHED_FLAG_DL_OVERRUN                       = 0x4
+	SCHED_FLAG_KEEP_ALL                         = 0x18
+	SCHED_FLAG_KEEP_PARAMS                      = 0x10
+	SCHED_FLAG_KEEP_POLICY                      = 0x8
+	SCHED_FLAG_RECLAIM                          = 0x2
+	SCHED_FLAG_RESET_ON_FORK                    = 0x1
+	SCHED_FLAG_UTIL_CLAMP                       = 0x60
+	SCHED_FLAG_UTIL_CLAMP_MAX                   = 0x40
+	SCHED_FLAG_UTIL_CLAMP_MIN                   = 0x20
+	SCHED_IDLE                                  = 0x5
+	SCHED_NORMAL                                = 0x0
+	SCHED_RESET_ON_FORK                         = 0x40000000
+	SCHED_RR                                    = 0x2
 	SCM_CREDENTIALS                             = 0x2
 	SCM_RIGHTS                                  = 0x1
 	SCM_TIMESTAMP                               = 0x1d
@@ -3435,6 +3469,7 @@
 	XDP_PACKET_HEADROOM                         = 0x100
 	XDP_PGOFF_RX_RING                           = 0x0
 	XDP_PGOFF_TX_RING                           = 0x80000000
+	XDP_PKT_CONTD                               = 0x1
 	XDP_RING_NEED_WAKEUP                        = 0x1
 	XDP_RX_RING                                 = 0x2
 	XDP_SHARED_UMEM                             = 0x1
@@ -3447,6 +3482,7 @@
 	XDP_UMEM_REG                                = 0x4
 	XDP_UMEM_UNALIGNED_CHUNK_FLAG               = 0x1
 	XDP_USE_NEED_WAKEUP                         = 0x8
+	XDP_USE_SG                                  = 0x10
 	XDP_ZEROCOPY                                = 0x4
 	XENFS_SUPER_MAGIC                           = 0xabba1974
 	XFS_SUPER_MAGIC                             = 0x58465342
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index a46df0f..4920821 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && linux
-// +build 386,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80041270
 	BLKBSZSET                        = 0x40041271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80041272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -317,10 +325,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 6cd4a3e..a0c1e41 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && linux
-// +build amd64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80081270
 	BLKBSZSET                        = 0x40081271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80081272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -318,10 +326,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index c7ebee2..c639855 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && linux
-// +build arm,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80041270
 	BLKBSZSET                        = 0x40041271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80041272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -324,10 +332,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 12a9a13..47cc62e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && linux
-// +build arm64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80081270
 	BLKBSZSET                        = 0x40081271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80081272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -314,10 +322,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index f26a164..27ac4a0 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build loong64 && linux
-// +build loong64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80081270
 	BLKBSZSET                        = 0x40081271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80081272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -109,6 +117,9 @@
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
 	IXON                             = 0x400
+	LASX_CTX_MAGIC                   = 0x41535801
+	LBT_CTX_MAGIC                    = 0x42540001
+	LSX_CTX_MAGIC                    = 0x53580001
 	MAP_ANON                         = 0x20
 	MAP_ANONYMOUS                    = 0x20
 	MAP_DENYWRITE                    = 0x800
@@ -308,10 +319,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 890bc3c..5469464 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips && linux
-// +build mips,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40041270
 	BLKBSZSET                        = 0x80041271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40041272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -317,10 +325,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1e
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x1028
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 549f26a..3adb81d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64 && linux
-// +build mips64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40081270
 	BLKBSZSET                        = 0x80081271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40081272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -317,10 +325,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1e
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x1028
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index e0365e3..2dfe98f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64le && linux
-// +build mips64le,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40081270
 	BLKBSZSET                        = 0x80081271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40081272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -317,10 +325,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1e
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x1028
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index fdccce1..f5398f8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mipsle && linux
-// +build mipsle,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40041270
 	BLKBSZSET                        = 0x80041271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40041272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -317,10 +325,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1e
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x1028
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index b2205c8..c54f152 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc && linux
-// +build ppc,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x10
 	B576000                          = 0x15
 	B921600                          = 0x16
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40041270
 	BLKBSZSET                        = 0x80041271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40041272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1f
 	BS1                              = 0x8000
 	BSDLY                            = 0x8000
@@ -372,10 +380,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x14
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x15
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 81aa5ad..76057dc 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && linux
-// +build ppc64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x10
 	B576000                          = 0x15
 	B921600                          = 0x16
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40081270
 	BLKBSZSET                        = 0x80081271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40081272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1f
 	BS1                              = 0x8000
 	BSDLY                            = 0x8000
@@ -376,10 +384,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x14
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x15
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 76807a1..e0c3725 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64le && linux
-// +build ppc64le,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x10
 	B576000                          = 0x15
 	B921600                          = 0x16
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40081270
 	BLKBSZSET                        = 0x80081271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40081272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1f
 	BS1                              = 0x8000
 	BSDLY                            = 0x8000
@@ -376,10 +384,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x14
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x15
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index d4a5ab9..18f2813 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && linux
-// +build riscv64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80081270
 	BLKBSZSET                        = 0x40081271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80081272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -219,6 +227,9 @@
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTRACE_GETFDPIC                  = 0x21
+	PTRACE_GETFDPIC_EXEC             = 0x0
+	PTRACE_GETFDPIC_INTERP           = 0x1
 	RLIMIT_AS                        = 0x9
 	RLIMIT_MEMLOCK                   = 0x8
 	RLIMIT_NOFILE                    = 0x7
@@ -305,10 +316,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 66e65db..11619d4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build s390x && linux
-// +build s390x,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go
@@ -27,22 +26,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x127a
 	BLKBSZGET                        = 0x80081270
 	BLKBSZSET                        = 0x40081271
+	BLKDISCARD                       = 0x1277
+	BLKDISCARDZEROES                 = 0x127c
 	BLKFLSBUF                        = 0x1261
 	BLKFRAGET                        = 0x1265
 	BLKFRASET                        = 0x1264
+	BLKGETDISKSEQ                    = 0x80081280
 	BLKGETSIZE                       = 0x1260
 	BLKGETSIZE64                     = 0x80081272
+	BLKIOMIN                         = 0x1278
+	BLKIOOPT                         = 0x1279
 	BLKPBSZGET                       = 0x127b
 	BLKRAGET                         = 0x1263
 	BLKRASET                         = 0x1262
 	BLKROGET                         = 0x125e
 	BLKROSET                         = 0x125d
+	BLKROTATIONAL                    = 0x127e
 	BLKRRPART                        = 0x125f
+	BLKSECDISCARD                    = 0x127d
 	BLKSECTGET                       = 0x1267
 	BLKSECTSET                       = 0x1266
 	BLKSSZGET                        = 0x1268
+	BLKZEROOUT                       = 0x127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -380,10 +388,12 @@
 	SO_NOFCS                         = 0x2b
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
+	SO_PASSPIDFD                     = 0x4c
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
 	SO_PEERGROUPS                    = 0x3b
+	SO_PEERPIDFD                     = 0x4d
 	SO_PEERSEC                       = 0x1f
 	SO_PREFER_BUSY_POLL              = 0x45
 	SO_PROTOCOL                      = 0x26
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 4898420..396d994 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build sparc64 && linux
-// +build sparc64,linux
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go
@@ -30,22 +29,31 @@
 	B57600                           = 0x1001
 	B576000                          = 0x1006
 	B921600                          = 0x1007
+	BLKALIGNOFF                      = 0x2000127a
 	BLKBSZGET                        = 0x40081270
 	BLKBSZSET                        = 0x80081271
+	BLKDISCARD                       = 0x20001277
+	BLKDISCARDZEROES                 = 0x2000127c
 	BLKFLSBUF                        = 0x20001261
 	BLKFRAGET                        = 0x20001265
 	BLKFRASET                        = 0x20001264
+	BLKGETDISKSEQ                    = 0x40081280
 	BLKGETSIZE                       = 0x20001260
 	BLKGETSIZE64                     = 0x40081272
+	BLKIOMIN                         = 0x20001278
+	BLKIOOPT                         = 0x20001279
 	BLKPBSZGET                       = 0x2000127b
 	BLKRAGET                         = 0x20001263
 	BLKRASET                         = 0x20001262
 	BLKROGET                         = 0x2000125e
 	BLKROSET                         = 0x2000125d
+	BLKROTATIONAL                    = 0x2000127e
 	BLKRRPART                        = 0x2000125f
+	BLKSECDISCARD                    = 0x2000127d
 	BLKSECTGET                       = 0x20001267
 	BLKSECTSET                       = 0x20001266
 	BLKSSZGET                        = 0x20001268
+	BLKZEROOUT                       = 0x2000127f
 	BOTHER                           = 0x1000
 	BS1                              = 0x2000
 	BSDLY                            = 0x2000
@@ -419,10 +427,12 @@
 	SO_NOFCS                         = 0x27
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x2
+	SO_PASSPIDFD                     = 0x55
 	SO_PASSSEC                       = 0x1f
 	SO_PEEK_OFF                      = 0x26
 	SO_PEERCRED                      = 0x40
 	SO_PEERGROUPS                    = 0x3d
+	SO_PEERPIDFD                     = 0x56
 	SO_PEERSEC                       = 0x1e
 	SO_PREFER_BUSY_POLL              = 0x48
 	SO_PROTOCOL                      = 0x1028
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
index 72f7420..130085d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && netbsd
-// +build 386,netbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m32 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
index 8d4eb0c..84769a1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && netbsd
-// +build amd64,netbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
index 9eef974..602ded0 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && netbsd
-// +build arm,netbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -marm _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
index 3b62ba1..efc0406 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && netbsd
-// +build arm64,netbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
index af20e47..5a6500f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && openbsd
-// +build 386,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m32 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
index 6015fcb..a5aeeb9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && openbsd
-// +build amd64,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
index 8d44955..0e9748a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && openbsd
-// +build arm,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
index ae16fe7..4f4449a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && openbsd
-// +build arm64,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
index 03d90fe..76a363f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64 && openbsd
-// +build mips64,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go
index 8e2c51b..43ca0cd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && openbsd
-// +build ppc64,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go
index 13d4030..b1b8bb2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && openbsd
-// +build riscv64,openbsd
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
index 1afee6a..d2ddd31 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && solaris
-// +build amd64,solaris
 
 // Code generated by cmd/cgo -godefs; DO NOT EDIT.
 // cgo -godefs -- -m64 _const.go
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
index fc7d050..4dfd2e0 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 // Hand edited based on zerrors_linux_s390x.go
 // TODO: auto-generate.
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
index 97f20ca..586317c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
@@ -1,8 +1,6 @@
 // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT.
 
 //go:build linux && (arm || arm64)
-// +build linux
-// +build arm arm64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
index 0b5f794..d7c881b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
@@ -1,8 +1,6 @@
 // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT.
 
 //go:build linux && (mips || mips64)
-// +build linux
-// +build mips mips64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
index 2807f7e..2d2de5d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
@@ -1,8 +1,6 @@
 // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT.
 
 //go:build linux && (mipsle || mips64le)
-// +build linux
-// +build mipsle mips64le
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
index 281ea64..5adc79f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
@@ -1,8 +1,6 @@
 // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT.
 
 //go:build linux && (386 || amd64)
-// +build linux
-// +build 386 amd64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
index 9a25721..6ea64a3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build aix && ppc
-// +build aix,ppc
 
 package unix
 
@@ -817,28 +816,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, p *byte, np int) (n int, err error) {
-	r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np))
-	n = int(r0)
-	if r0 == -1 && er != nil {
-		err = er
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np))
-	n = int(r0)
-	if r0 == -1 && er != nil {
-		err = er
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Dup2(oldfd int, newfd int) (err error) {
 	r0, er := C.dup2(C.int(oldfd), C.int(newfd))
 	if r0 == -1 && er != nil {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
index 6de80c2..99ee439 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build aix && ppc64
-// +build aix,ppc64
 
 package unix
 
@@ -762,28 +761,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, p *byte, np int) (n int, err error) {
-	r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np)
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np)
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Dup2(oldfd int, newfd int) (err error) {
 	_, e1 := calldup2(oldfd, newfd)
 	if e1 != 0 {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
index c4d50ae..b68a783 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build aix && ppc64 && gc
-// +build aix,ppc64,gc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
index 6903d3b..0a87450 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build aix && ppc64 && gccgo
-// +build aix,ppc64,gccgo
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 4037ccf..ccb02f2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build darwin && amd64
-// +build darwin,amd64
 
 package unix
 
@@ -725,6 +724,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -733,10 +738,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -2410,28 +2411,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Fstat(fd int, stat *Stat_t) (err error) {
 	_, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
@@ -2521,14 +2500,6 @@
 	return
 }
 
-func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
-	_, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
 var libc_ptrace_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index 4baaed0..8b8bb28 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -5,703 +5,586 @@
 
 TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fdopendir(SB)
-
 GLOBL	·libc_fdopendir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
 
 TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getgroups(SB)
-
 GLOBL	·libc_getgroups_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
 
 TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setgroups(SB)
-
 GLOBL	·libc_setgroups_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
 
 TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_wait4(SB)
-
 GLOBL	·libc_wait4_trampoline_addr(SB), RODATA, $8
 DATA	·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
 
 TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_accept(SB)
-
 GLOBL	·libc_accept_trampoline_addr(SB), RODATA, $8
 DATA	·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
 
 TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_bind(SB)
-
 GLOBL	·libc_bind_trampoline_addr(SB), RODATA, $8
 DATA	·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
 
 TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_connect(SB)
-
 GLOBL	·libc_connect_trampoline_addr(SB), RODATA, $8
 DATA	·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
 
 TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_socket(SB)
-
 GLOBL	·libc_socket_trampoline_addr(SB), RODATA, $8
 DATA	·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
 
 TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockopt(SB)
-
 GLOBL	·libc_getsockopt_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
 
 TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setsockopt(SB)
-
 GLOBL	·libc_setsockopt_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
 
 TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpeername(SB)
-
 GLOBL	·libc_getpeername_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
 
 TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockname(SB)
-
 GLOBL	·libc_getsockname_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
 
 TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shutdown(SB)
-
 GLOBL	·libc_shutdown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
 
 TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_socketpair(SB)
-
 GLOBL	·libc_socketpair_trampoline_addr(SB), RODATA, $8
 DATA	·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
 
 TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_recvfrom(SB)
-
 GLOBL	·libc_recvfrom_trampoline_addr(SB), RODATA, $8
 DATA	·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
 
 TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sendto(SB)
-
 GLOBL	·libc_sendto_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
 
 TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_recvmsg(SB)
-
 GLOBL	·libc_recvmsg_trampoline_addr(SB), RODATA, $8
 DATA	·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
 
 TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sendmsg(SB)
-
 GLOBL	·libc_sendmsg_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
 
 TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_kevent(SB)
-
 GLOBL	·libc_kevent_trampoline_addr(SB), RODATA, $8
 DATA	·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
 
 TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimes(SB)
-
 GLOBL	·libc_utimes_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
 
 TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_futimes(SB)
-
 GLOBL	·libc_futimes_trampoline_addr(SB), RODATA, $8
 DATA	·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
 
 TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_poll(SB)
-
 GLOBL	·libc_poll_trampoline_addr(SB), RODATA, $8
 DATA	·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
 
 TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_madvise(SB)
-
 GLOBL	·libc_madvise_trampoline_addr(SB), RODATA, $8
 DATA	·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
 
 TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mlock(SB)
-
 GLOBL	·libc_mlock_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
 
 TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mlockall(SB)
-
 GLOBL	·libc_mlockall_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
 
 TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mprotect(SB)
-
 GLOBL	·libc_mprotect_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
 
 TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_msync(SB)
-
 GLOBL	·libc_msync_trampoline_addr(SB), RODATA, $8
 DATA	·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
 
 TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_munlock(SB)
-
 GLOBL	·libc_munlock_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
 
 TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_munlockall(SB)
-
 GLOBL	·libc_munlockall_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
 
 TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_closedir(SB)
-
 GLOBL	·libc_closedir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
 
 TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_readdir_r(SB)
-
 GLOBL	·libc_readdir_r_trampoline_addr(SB), RODATA, $8
 DATA	·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
 
 TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pipe(SB)
-
 GLOBL	·libc_pipe_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB)
 
 TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getxattr(SB)
-
 GLOBL	·libc_getxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB)
 
 TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fgetxattr(SB)
-
 GLOBL	·libc_fgetxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB)
 
 TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setxattr(SB)
-
 GLOBL	·libc_setxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB)
 
 TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fsetxattr(SB)
-
 GLOBL	·libc_fsetxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB)
 
 TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_removexattr(SB)
-
 GLOBL	·libc_removexattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB)
 
 TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fremovexattr(SB)
-
 GLOBL	·libc_fremovexattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB)
 
 TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_listxattr(SB)
-
 GLOBL	·libc_listxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB)
 
 TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_flistxattr(SB)
-
 GLOBL	·libc_flistxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB)
 
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
-
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
 
 TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
-
 GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
 
 TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_kill(SB)
-
 GLOBL	·libc_kill_trampoline_addr(SB), RODATA, $8
 DATA	·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
 
 TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ioctl(SB)
-
 GLOBL	·libc_ioctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
 
 TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sysctl(SB)
-
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
 TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sendfile(SB)
-
 GLOBL	·libc_sendfile_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB)
 
 TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmat(SB)
-
 GLOBL	·libc_shmat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB)
 
 TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmctl(SB)
-
 GLOBL	·libc_shmctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB)
 
 TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmdt(SB)
-
 GLOBL	·libc_shmdt_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB)
 
 TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmget(SB)
-
 GLOBL	·libc_shmget_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB)
 
 TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_access(SB)
-
 GLOBL	·libc_access_trampoline_addr(SB), RODATA, $8
 DATA	·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
 
 TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_adjtime(SB)
-
 GLOBL	·libc_adjtime_trampoline_addr(SB), RODATA, $8
 DATA	·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
 
 TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chdir(SB)
-
 GLOBL	·libc_chdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
 
 TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chflags(SB)
-
 GLOBL	·libc_chflags_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
 
 TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chmod(SB)
-
 GLOBL	·libc_chmod_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
 
 TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chown(SB)
-
 GLOBL	·libc_chown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
 
 TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chroot(SB)
-
 GLOBL	·libc_chroot_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
 
 TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_clock_gettime(SB)
-
 GLOBL	·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
 DATA	·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
 
 TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_close(SB)
-
 GLOBL	·libc_close_trampoline_addr(SB), RODATA, $8
 DATA	·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
 
 TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_clonefile(SB)
-
 GLOBL	·libc_clonefile_trampoline_addr(SB), RODATA, $8
 DATA	·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB)
 
 TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_clonefileat(SB)
-
 GLOBL	·libc_clonefileat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB)
 
 TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_dup(SB)
-
 GLOBL	·libc_dup_trampoline_addr(SB), RODATA, $8
 DATA	·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
 
 TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_dup2(SB)
-
 GLOBL	·libc_dup2_trampoline_addr(SB), RODATA, $8
 DATA	·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
 
 TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_exchangedata(SB)
-
 GLOBL	·libc_exchangedata_trampoline_addr(SB), RODATA, $8
 DATA	·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB)
 
 TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_exit(SB)
-
 GLOBL	·libc_exit_trampoline_addr(SB), RODATA, $8
 DATA	·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
 
 TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_faccessat(SB)
-
 GLOBL	·libc_faccessat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
 
 TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchdir(SB)
-
 GLOBL	·libc_fchdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
 
 TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchflags(SB)
-
 GLOBL	·libc_fchflags_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
 
 TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmod(SB)
-
 GLOBL	·libc_fchmod_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
 
 TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmodat(SB)
-
 GLOBL	·libc_fchmodat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
 
 TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchown(SB)
-
 GLOBL	·libc_fchown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
 
 TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchownat(SB)
-
 GLOBL	·libc_fchownat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
 
 TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fclonefileat(SB)
-
 GLOBL	·libc_fclonefileat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB)
 
 TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_flock(SB)
-
 GLOBL	·libc_flock_trampoline_addr(SB), RODATA, $8
 DATA	·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
 
 TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fpathconf(SB)
-
 GLOBL	·libc_fpathconf_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
 
 TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fsync(SB)
-
 GLOBL	·libc_fsync_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
 
 TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ftruncate(SB)
-
 GLOBL	·libc_ftruncate_trampoline_addr(SB), RODATA, $8
 DATA	·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
 
 TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getcwd(SB)
-
 GLOBL	·libc_getcwd_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
 
 TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getdtablesize(SB)
-
 GLOBL	·libc_getdtablesize_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB)
 
 TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getegid(SB)
-
 GLOBL	·libc_getegid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
 
 TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_geteuid(SB)
-
 GLOBL	·libc_geteuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
 
 TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getgid(SB)
-
 GLOBL	·libc_getgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
 
 TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgid(SB)
-
 GLOBL	·libc_getpgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
 
 TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgrp(SB)
-
 GLOBL	·libc_getpgrp_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
 
 TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpid(SB)
-
 GLOBL	·libc_getpid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
 
 TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getppid(SB)
-
 GLOBL	·libc_getppid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
 
 TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpriority(SB)
-
 GLOBL	·libc_getpriority_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
 
 TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getrlimit(SB)
-
 GLOBL	·libc_getrlimit_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
 
 TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getrusage(SB)
-
 GLOBL	·libc_getrusage_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
 
 TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getsid(SB)
-
 GLOBL	·libc_getsid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
 
 TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_gettimeofday(SB)
-
 GLOBL	·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
 DATA	·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
 
 TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getuid(SB)
-
 GLOBL	·libc_getuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
 
 TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_issetugid(SB)
-
 GLOBL	·libc_issetugid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
 
 TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_kqueue(SB)
-
 GLOBL	·libc_kqueue_trampoline_addr(SB), RODATA, $8
 DATA	·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
 
 TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_lchown(SB)
-
 GLOBL	·libc_lchown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
 
 TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_link(SB)
-
 GLOBL	·libc_link_trampoline_addr(SB), RODATA, $8
 DATA	·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
 
 TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_linkat(SB)
-
 GLOBL	·libc_linkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
 
 TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_listen(SB)
-
 GLOBL	·libc_listen_trampoline_addr(SB), RODATA, $8
 DATA	·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
 
 TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdir(SB)
-
 GLOBL	·libc_mkdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
 
 TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdirat(SB)
-
 GLOBL	·libc_mkdirat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
 
 TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mkfifo(SB)
-
 GLOBL	·libc_mkfifo_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
 
 TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mknod(SB)
-
 GLOBL	·libc_mknod_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
 
 TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mount(SB)
-
 GLOBL	·libc_mount_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
 
 TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_open(SB)
-
 GLOBL	·libc_open_trampoline_addr(SB), RODATA, $8
 DATA	·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
 
 TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_openat(SB)
-
 GLOBL	·libc_openat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
 
 TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pathconf(SB)
-
 GLOBL	·libc_pathconf_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
 
 TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pread(SB)
-
 GLOBL	·libc_pread_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
 
 TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pwrite(SB)
-
 GLOBL	·libc_pwrite_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
 
 TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_read(SB)
-
 GLOBL	·libc_read_trampoline_addr(SB), RODATA, $8
 DATA	·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
 
 TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_readlink(SB)
-
 GLOBL	·libc_readlink_trampoline_addr(SB), RODATA, $8
 DATA	·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
 
 TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_readlinkat(SB)
-
 GLOBL	·libc_readlinkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
 
 TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_rename(SB)
-
 GLOBL	·libc_rename_trampoline_addr(SB), RODATA, $8
 DATA	·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
 
 TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_renameat(SB)
-
 GLOBL	·libc_renameat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
 
 TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_revoke(SB)
-
 GLOBL	·libc_revoke_trampoline_addr(SB), RODATA, $8
 DATA	·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
 
 TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_rmdir(SB)
-
 GLOBL	·libc_rmdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
 
 TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_lseek(SB)
-
 GLOBL	·libc_lseek_trampoline_addr(SB), RODATA, $8
 DATA	·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
 
 TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_select(SB)
-
 GLOBL	·libc_select_trampoline_addr(SB), RODATA, $8
 DATA	·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
 
@@ -712,192 +595,160 @@
 
 TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setegid(SB)
-
 GLOBL	·libc_setegid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
 
 TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_seteuid(SB)
-
 GLOBL	·libc_seteuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
 
 TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setgid(SB)
-
 GLOBL	·libc_setgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
 
 TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setlogin(SB)
-
 GLOBL	·libc_setlogin_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
 
 TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setpgid(SB)
-
 GLOBL	·libc_setpgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
 
 TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setpriority(SB)
-
 GLOBL	·libc_setpriority_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
 
 TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setprivexec(SB)
-
 GLOBL	·libc_setprivexec_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB)
 
 TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setregid(SB)
-
 GLOBL	·libc_setregid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
 
 TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setreuid(SB)
-
 GLOBL	·libc_setreuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
 
 TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setsid(SB)
-
 GLOBL	·libc_setsid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
 
 TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_settimeofday(SB)
-
 GLOBL	·libc_settimeofday_trampoline_addr(SB), RODATA, $8
 DATA	·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
 
 TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setuid(SB)
-
 GLOBL	·libc_setuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
 
 TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_symlink(SB)
-
 GLOBL	·libc_symlink_trampoline_addr(SB), RODATA, $8
 DATA	·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
 
 TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_symlinkat(SB)
-
 GLOBL	·libc_symlinkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
 
 TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sync(SB)
-
 GLOBL	·libc_sync_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
 
 TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_truncate(SB)
-
 GLOBL	·libc_truncate_trampoline_addr(SB), RODATA, $8
 DATA	·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
 
 TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_umask(SB)
-
 GLOBL	·libc_umask_trampoline_addr(SB), RODATA, $8
 DATA	·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
 
 TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_undelete(SB)
-
 GLOBL	·libc_undelete_trampoline_addr(SB), RODATA, $8
 DATA	·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB)
 
 TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_unlink(SB)
-
 GLOBL	·libc_unlink_trampoline_addr(SB), RODATA, $8
 DATA	·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
 
 TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_unlinkat(SB)
-
 GLOBL	·libc_unlinkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
 
 TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_unmount(SB)
-
 GLOBL	·libc_unmount_trampoline_addr(SB), RODATA, $8
 DATA	·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
 
 TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_write(SB)
-
 GLOBL	·libc_write_trampoline_addr(SB), RODATA, $8
 DATA	·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
 
 TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mmap(SB)
-
 GLOBL	·libc_mmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
 
 TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
-
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
 TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat64(SB)
-
 GLOBL	·libc_fstat64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB)
 
 TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatat64(SB)
-
 GLOBL	·libc_fstatat64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB)
 
 TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatfs64(SB)
-
 GLOBL	·libc_fstatfs64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB)
 
 TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getfsstat64(SB)
-
 GLOBL	·libc_getfsstat64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB)
 
 TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_lstat64(SB)
-
 GLOBL	·libc_lstat64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB)
 
 TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ptrace(SB)
-
 GLOBL	·libc_ptrace_trampoline_addr(SB), RODATA, $8
 DATA	·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB)
 
 TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_stat64(SB)
-
 GLOBL	·libc_stat64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB)
 
 TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_statfs64(SB)
-
 GLOBL	·libc_statfs64_trampoline_addr(SB), RODATA, $8
 DATA	·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 51d6f3f..1b40b99 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build darwin && arm64
-// +build darwin,arm64
 
 package unix
 
@@ -725,6 +724,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -733,10 +738,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -2410,28 +2411,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Fstat(fd int, stat *Stat_t) (err error) {
 	_, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
@@ -2521,14 +2500,6 @@
 	return
 }
 
-func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
-	_, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
 var libc_ptrace_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index c3b82c0..08362c1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -5,703 +5,586 @@
 
 TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fdopendir(SB)
-
 GLOBL	·libc_fdopendir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
 
 TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getgroups(SB)
-
 GLOBL	·libc_getgroups_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
 
 TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setgroups(SB)
-
 GLOBL	·libc_setgroups_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
 
 TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_wait4(SB)
-
 GLOBL	·libc_wait4_trampoline_addr(SB), RODATA, $8
 DATA	·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
 
 TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_accept(SB)
-
 GLOBL	·libc_accept_trampoline_addr(SB), RODATA, $8
 DATA	·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
 
 TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_bind(SB)
-
 GLOBL	·libc_bind_trampoline_addr(SB), RODATA, $8
 DATA	·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
 
 TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_connect(SB)
-
 GLOBL	·libc_connect_trampoline_addr(SB), RODATA, $8
 DATA	·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
 
 TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_socket(SB)
-
 GLOBL	·libc_socket_trampoline_addr(SB), RODATA, $8
 DATA	·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
 
 TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockopt(SB)
-
 GLOBL	·libc_getsockopt_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
 
 TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setsockopt(SB)
-
 GLOBL	·libc_setsockopt_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
 
 TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpeername(SB)
-
 GLOBL	·libc_getpeername_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
 
 TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockname(SB)
-
 GLOBL	·libc_getsockname_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
 
 TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shutdown(SB)
-
 GLOBL	·libc_shutdown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
 
 TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_socketpair(SB)
-
 GLOBL	·libc_socketpair_trampoline_addr(SB), RODATA, $8
 DATA	·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
 
 TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_recvfrom(SB)
-
 GLOBL	·libc_recvfrom_trampoline_addr(SB), RODATA, $8
 DATA	·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
 
 TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sendto(SB)
-
 GLOBL	·libc_sendto_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
 
 TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_recvmsg(SB)
-
 GLOBL	·libc_recvmsg_trampoline_addr(SB), RODATA, $8
 DATA	·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
 
 TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sendmsg(SB)
-
 GLOBL	·libc_sendmsg_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
 
 TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_kevent(SB)
-
 GLOBL	·libc_kevent_trampoline_addr(SB), RODATA, $8
 DATA	·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
 
 TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimes(SB)
-
 GLOBL	·libc_utimes_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
 
 TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_futimes(SB)
-
 GLOBL	·libc_futimes_trampoline_addr(SB), RODATA, $8
 DATA	·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
 
 TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_poll(SB)
-
 GLOBL	·libc_poll_trampoline_addr(SB), RODATA, $8
 DATA	·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
 
 TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_madvise(SB)
-
 GLOBL	·libc_madvise_trampoline_addr(SB), RODATA, $8
 DATA	·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
 
 TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mlock(SB)
-
 GLOBL	·libc_mlock_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
 
 TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mlockall(SB)
-
 GLOBL	·libc_mlockall_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
 
 TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mprotect(SB)
-
 GLOBL	·libc_mprotect_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
 
 TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_msync(SB)
-
 GLOBL	·libc_msync_trampoline_addr(SB), RODATA, $8
 DATA	·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
 
 TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_munlock(SB)
-
 GLOBL	·libc_munlock_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
 
 TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_munlockall(SB)
-
 GLOBL	·libc_munlockall_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
 
 TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_closedir(SB)
-
 GLOBL	·libc_closedir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
 
 TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_readdir_r(SB)
-
 GLOBL	·libc_readdir_r_trampoline_addr(SB), RODATA, $8
 DATA	·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
 
 TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pipe(SB)
-
 GLOBL	·libc_pipe_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB)
 
 TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getxattr(SB)
-
 GLOBL	·libc_getxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB)
 
 TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fgetxattr(SB)
-
 GLOBL	·libc_fgetxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB)
 
 TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setxattr(SB)
-
 GLOBL	·libc_setxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB)
 
 TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fsetxattr(SB)
-
 GLOBL	·libc_fsetxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB)
 
 TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_removexattr(SB)
-
 GLOBL	·libc_removexattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB)
 
 TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fremovexattr(SB)
-
 GLOBL	·libc_fremovexattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB)
 
 TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_listxattr(SB)
-
 GLOBL	·libc_listxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB)
 
 TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_flistxattr(SB)
-
 GLOBL	·libc_flistxattr_trampoline_addr(SB), RODATA, $8
 DATA	·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB)
 
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
-
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
 
 TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
-
 GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
 
 TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_kill(SB)
-
 GLOBL	·libc_kill_trampoline_addr(SB), RODATA, $8
 DATA	·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
 
 TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ioctl(SB)
-
 GLOBL	·libc_ioctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
 
 TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sysctl(SB)
-
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
 TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sendfile(SB)
-
 GLOBL	·libc_sendfile_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB)
 
 TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmat(SB)
-
 GLOBL	·libc_shmat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB)
 
 TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmctl(SB)
-
 GLOBL	·libc_shmctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB)
 
 TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmdt(SB)
-
 GLOBL	·libc_shmdt_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB)
 
 TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_shmget(SB)
-
 GLOBL	·libc_shmget_trampoline_addr(SB), RODATA, $8
 DATA	·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB)
 
 TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_access(SB)
-
 GLOBL	·libc_access_trampoline_addr(SB), RODATA, $8
 DATA	·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
 
 TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_adjtime(SB)
-
 GLOBL	·libc_adjtime_trampoline_addr(SB), RODATA, $8
 DATA	·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
 
 TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chdir(SB)
-
 GLOBL	·libc_chdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
 
 TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chflags(SB)
-
 GLOBL	·libc_chflags_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
 
 TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chmod(SB)
-
 GLOBL	·libc_chmod_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
 
 TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chown(SB)
-
 GLOBL	·libc_chown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
 
 TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_chroot(SB)
-
 GLOBL	·libc_chroot_trampoline_addr(SB), RODATA, $8
 DATA	·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
 
 TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_clock_gettime(SB)
-
 GLOBL	·libc_clock_gettime_trampoline_addr(SB), RODATA, $8
 DATA	·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB)
 
 TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_close(SB)
-
 GLOBL	·libc_close_trampoline_addr(SB), RODATA, $8
 DATA	·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
 
 TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_clonefile(SB)
-
 GLOBL	·libc_clonefile_trampoline_addr(SB), RODATA, $8
 DATA	·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB)
 
 TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_clonefileat(SB)
-
 GLOBL	·libc_clonefileat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB)
 
 TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_dup(SB)
-
 GLOBL	·libc_dup_trampoline_addr(SB), RODATA, $8
 DATA	·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
 
 TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_dup2(SB)
-
 GLOBL	·libc_dup2_trampoline_addr(SB), RODATA, $8
 DATA	·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
 
 TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_exchangedata(SB)
-
 GLOBL	·libc_exchangedata_trampoline_addr(SB), RODATA, $8
 DATA	·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB)
 
 TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_exit(SB)
-
 GLOBL	·libc_exit_trampoline_addr(SB), RODATA, $8
 DATA	·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
 
 TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_faccessat(SB)
-
 GLOBL	·libc_faccessat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
 
 TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchdir(SB)
-
 GLOBL	·libc_fchdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
 
 TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchflags(SB)
-
 GLOBL	·libc_fchflags_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
 
 TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmod(SB)
-
 GLOBL	·libc_fchmod_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
 
 TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmodat(SB)
-
 GLOBL	·libc_fchmodat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
 
 TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchown(SB)
-
 GLOBL	·libc_fchown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
 
 TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fchownat(SB)
-
 GLOBL	·libc_fchownat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
 
 TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fclonefileat(SB)
-
 GLOBL	·libc_fclonefileat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB)
 
 TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_flock(SB)
-
 GLOBL	·libc_flock_trampoline_addr(SB), RODATA, $8
 DATA	·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
 
 TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fpathconf(SB)
-
 GLOBL	·libc_fpathconf_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
 
 TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fsync(SB)
-
 GLOBL	·libc_fsync_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
 
 TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ftruncate(SB)
-
 GLOBL	·libc_ftruncate_trampoline_addr(SB), RODATA, $8
 DATA	·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
 
 TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getcwd(SB)
-
 GLOBL	·libc_getcwd_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
 
 TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getdtablesize(SB)
-
 GLOBL	·libc_getdtablesize_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB)
 
 TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getegid(SB)
-
 GLOBL	·libc_getegid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
 
 TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_geteuid(SB)
-
 GLOBL	·libc_geteuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
 
 TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getgid(SB)
-
 GLOBL	·libc_getgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
 
 TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgid(SB)
-
 GLOBL	·libc_getpgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
 
 TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgrp(SB)
-
 GLOBL	·libc_getpgrp_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
 
 TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpid(SB)
-
 GLOBL	·libc_getpid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
 
 TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getppid(SB)
-
 GLOBL	·libc_getppid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
 
 TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getpriority(SB)
-
 GLOBL	·libc_getpriority_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
 
 TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getrlimit(SB)
-
 GLOBL	·libc_getrlimit_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
 
 TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getrusage(SB)
-
 GLOBL	·libc_getrusage_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
 
 TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getsid(SB)
-
 GLOBL	·libc_getsid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
 
 TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_gettimeofday(SB)
-
 GLOBL	·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
 DATA	·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
 
 TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getuid(SB)
-
 GLOBL	·libc_getuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
 
 TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_issetugid(SB)
-
 GLOBL	·libc_issetugid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
 
 TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_kqueue(SB)
-
 GLOBL	·libc_kqueue_trampoline_addr(SB), RODATA, $8
 DATA	·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
 
 TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_lchown(SB)
-
 GLOBL	·libc_lchown_trampoline_addr(SB), RODATA, $8
 DATA	·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
 
 TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_link(SB)
-
 GLOBL	·libc_link_trampoline_addr(SB), RODATA, $8
 DATA	·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
 
 TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_linkat(SB)
-
 GLOBL	·libc_linkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
 
 TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_listen(SB)
-
 GLOBL	·libc_listen_trampoline_addr(SB), RODATA, $8
 DATA	·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
 
 TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdir(SB)
-
 GLOBL	·libc_mkdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
 
 TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdirat(SB)
-
 GLOBL	·libc_mkdirat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
 
 TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mkfifo(SB)
-
 GLOBL	·libc_mkfifo_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
 
 TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mknod(SB)
-
 GLOBL	·libc_mknod_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
 
 TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mount(SB)
-
 GLOBL	·libc_mount_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
 
 TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_open(SB)
-
 GLOBL	·libc_open_trampoline_addr(SB), RODATA, $8
 DATA	·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
 
 TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_openat(SB)
-
 GLOBL	·libc_openat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
 
 TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pathconf(SB)
-
 GLOBL	·libc_pathconf_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
 
 TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pread(SB)
-
 GLOBL	·libc_pread_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
 
 TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_pwrite(SB)
-
 GLOBL	·libc_pwrite_trampoline_addr(SB), RODATA, $8
 DATA	·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
 
 TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_read(SB)
-
 GLOBL	·libc_read_trampoline_addr(SB), RODATA, $8
 DATA	·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
 
 TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_readlink(SB)
-
 GLOBL	·libc_readlink_trampoline_addr(SB), RODATA, $8
 DATA	·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
 
 TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_readlinkat(SB)
-
 GLOBL	·libc_readlinkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
 
 TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_rename(SB)
-
 GLOBL	·libc_rename_trampoline_addr(SB), RODATA, $8
 DATA	·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
 
 TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_renameat(SB)
-
 GLOBL	·libc_renameat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
 
 TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_revoke(SB)
-
 GLOBL	·libc_revoke_trampoline_addr(SB), RODATA, $8
 DATA	·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
 
 TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_rmdir(SB)
-
 GLOBL	·libc_rmdir_trampoline_addr(SB), RODATA, $8
 DATA	·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
 
 TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_lseek(SB)
-
 GLOBL	·libc_lseek_trampoline_addr(SB), RODATA, $8
 DATA	·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
 
 TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_select(SB)
-
 GLOBL	·libc_select_trampoline_addr(SB), RODATA, $8
 DATA	·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
 
@@ -712,192 +595,160 @@
 
 TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setegid(SB)
-
 GLOBL	·libc_setegid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
 
 TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_seteuid(SB)
-
 GLOBL	·libc_seteuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
 
 TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setgid(SB)
-
 GLOBL	·libc_setgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
 
 TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setlogin(SB)
-
 GLOBL	·libc_setlogin_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
 
 TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setpgid(SB)
-
 GLOBL	·libc_setpgid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
 
 TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setpriority(SB)
-
 GLOBL	·libc_setpriority_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
 
 TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setprivexec(SB)
-
 GLOBL	·libc_setprivexec_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB)
 
 TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setregid(SB)
-
 GLOBL	·libc_setregid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
 
 TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setreuid(SB)
-
 GLOBL	·libc_setreuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
 
 TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setsid(SB)
-
 GLOBL	·libc_setsid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
 
 TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_settimeofday(SB)
-
 GLOBL	·libc_settimeofday_trampoline_addr(SB), RODATA, $8
 DATA	·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
 
 TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_setuid(SB)
-
 GLOBL	·libc_setuid_trampoline_addr(SB), RODATA, $8
 DATA	·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
 
 TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_symlink(SB)
-
 GLOBL	·libc_symlink_trampoline_addr(SB), RODATA, $8
 DATA	·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
 
 TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_symlinkat(SB)
-
 GLOBL	·libc_symlinkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
 
 TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_sync(SB)
-
 GLOBL	·libc_sync_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
 
 TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_truncate(SB)
-
 GLOBL	·libc_truncate_trampoline_addr(SB), RODATA, $8
 DATA	·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
 
 TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_umask(SB)
-
 GLOBL	·libc_umask_trampoline_addr(SB), RODATA, $8
 DATA	·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
 
 TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_undelete(SB)
-
 GLOBL	·libc_undelete_trampoline_addr(SB), RODATA, $8
 DATA	·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB)
 
 TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_unlink(SB)
-
 GLOBL	·libc_unlink_trampoline_addr(SB), RODATA, $8
 DATA	·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
 
 TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_unlinkat(SB)
-
 GLOBL	·libc_unlinkat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
 
 TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_unmount(SB)
-
 GLOBL	·libc_unmount_trampoline_addr(SB), RODATA, $8
 DATA	·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
 
 TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_write(SB)
-
 GLOBL	·libc_write_trampoline_addr(SB), RODATA, $8
 DATA	·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
 
 TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_mmap(SB)
-
 GLOBL	·libc_mmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
 
 TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
-
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
 TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat(SB)
-
 GLOBL	·libc_fstat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
 
 TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatat(SB)
-
 GLOBL	·libc_fstatat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
 
 TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatfs(SB)
-
 GLOBL	·libc_fstatfs_trampoline_addr(SB), RODATA, $8
 DATA	·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
 
 TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_getfsstat(SB)
-
 GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB)
 
 TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_lstat(SB)
-
 GLOBL	·libc_lstat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
 
 TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ptrace(SB)
-
 GLOBL	·libc_ptrace_trampoline_addr(SB), RODATA, $8
 DATA	·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB)
 
 TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_stat(SB)
-
 GLOBL	·libc_stat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
 
 TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_statfs(SB)
-
 GLOBL	·libc_statfs_trampoline_addr(SB), RODATA, $8
 DATA	·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
index 0eabac7..aad65fc 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build dragonfly && amd64
-// +build dragonfly,amd64
 
 package unix
 
@@ -1642,28 +1641,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
index ee313eb..c009639 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build freebsd && 386
-// +build freebsd,386
 
 package unix
 
@@ -1862,28 +1861,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
index 4c986e4..7664df7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build freebsd && amd64
-// +build freebsd,amd64
 
 package unix
 
@@ -1862,28 +1861,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
index 5552169..ae09918 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build freebsd && arm
-// +build freebsd,arm
 
 package unix
 
@@ -1862,28 +1861,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
index 67a226f..11fd5d4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build freebsd && arm64
-// +build freebsd,arm64
 
 package unix
 
@@ -1862,28 +1861,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
index f0b9dda..c3d2d65 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build freebsd && riscv64
-// +build freebsd,riscv64
 
 package unix
 
@@ -1862,28 +1861,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
index b57c705..c698cbc 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build illumos && amd64
-// +build illumos,amd64
 
 package unix
 
@@ -40,7 +39,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -55,7 +54,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -70,7 +69,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -85,7 +84,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -96,7 +95,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	fd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 7ceec23..1488d27 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -1,7 +1,6 @@
 // Code generated by mkmerge; DO NOT EDIT.
 
 //go:build linux
-// +build linux
 
 package unix
 
@@ -38,6 +37,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctl(fd int, req uint, arg uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -1356,7 +1370,7 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) {
 	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
 	n = int(r0)
 	if e1 != 0 {
@@ -1734,28 +1748,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func readv(fd int, iovs []Iovec) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(iovs) > 0 {
@@ -2197,3 +2189,33 @@
 	RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) {
+	_, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) {
+	_, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) {
+	_, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 07b549c..4def3e9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && 386
-// +build linux,386
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index 5f481bf..fef2bc8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && amd64
-// +build linux,amd64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 824cd52..a9fd76a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && arm
-// +build linux,arm
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index e77aecf..4600650 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && arm64
-// +build linux,arm64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
index 806ffd1..c8987d2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && loong64
-// +build linux,loong64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index 961a3af..921f430 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && mips
-// +build linux,mips
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index ed05005..44f0678 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && mips64
-// +build linux,mips64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index d365b71..e7fa0ab 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && mips64le
-// +build linux,mips64le
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index c3f1b8b..8c51256 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && mipsle
-// +build linux,mipsle
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
index a6574cf..7392fd4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && ppc
-// +build linux,ppc
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index f409902..4118043 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && ppc64
-// +build linux,ppc64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index 9dfcc29..40c6ce7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && ppc64le
-// +build linux,ppc64le
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
index 0b29239..2cfe34a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && riscv64
-// +build linux,riscv64
 
 package unix
 
@@ -531,3 +530,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) {
+	var _p0 unsafe.Pointer
+	if len(pairs) > 0 {
+		_p0 = unsafe.Pointer(&pairs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index 6cde322..61e6f07 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && s390x
-// +build linux,s390x
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
index 5253d65..834b842 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build linux && sparc64
-// +build linux,sparc64
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
index cdb2af5..e91ebc1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build netbsd && 386
-// +build netbsd,386
 
 package unix
 
@@ -1824,28 +1823,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1858,3 +1835,14 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
index 9d25f76..be28bab 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build netbsd && amd64
-// +build netbsd,amd64
 
 package unix
 
@@ -1824,28 +1823,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1858,3 +1835,14 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
index d3f8035..fb587e8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build netbsd && arm
-// +build netbsd,arm
 
 package unix
 
@@ -1824,28 +1823,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1858,3 +1835,14 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
index 887188a..d576438 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build netbsd && arm64
-// +build netbsd,arm64
 
 package unix
 
@@ -1824,28 +1823,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1858,3 +1835,14 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 9ab9abf..a1d0615 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && 386
-// +build openbsd,386
 
 package unix
 
@@ -549,6 +548,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -557,10 +562,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -583,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2211,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2220,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2249,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
index 3dcacd3..41b5617 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
@@ -178,6 +178,11 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $4
 DATA	·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $4
+DATA	·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ppoll(SB)
 GLOBL	·libc_ppoll_trampoline_addr(SB), RODATA, $4
@@ -668,7 +673,22 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $4
 DATA	·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $4
+DATA	·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $4
 DATA	·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pledge(SB)
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $4
+DATA	·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_unveil(SB)
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $4
+DATA	·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 915761e..5b2a740 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && amd64
-// +build openbsd,amd64
 
 package unix
 
@@ -585,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2213,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2222,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2251,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
index 2763620..4019a65 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
@@ -178,6 +178,11 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
+DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ppoll(SB)
 GLOBL	·libc_ppoll_trampoline_addr(SB), RODATA, $8
@@ -668,7 +673,22 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $8
+DATA	·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pledge(SB)
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_unveil(SB)
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $8
+DATA	·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index 8e87fdf..f6eda13 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && arm
-// +build openbsd,arm
 
 package unix
 
@@ -549,6 +548,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -557,10 +562,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -583,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2211,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2220,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2249,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
index c922314..ac4af24 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
@@ -178,6 +178,11 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $4
 DATA	·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $4
+DATA	·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ppoll(SB)
 GLOBL	·libc_ppoll_trampoline_addr(SB), RODATA, $4
@@ -668,7 +673,22 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $4
 DATA	·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $4
+DATA	·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $4
 DATA	·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pledge(SB)
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $4
+DATA	·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_unveil(SB)
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $4
+DATA	·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 12a7a21..55df20a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && arm64
-// +build openbsd,arm64
 
 package unix
 
@@ -549,6 +548,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -557,10 +562,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -583,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2211,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2220,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2249,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
index a6bc32c..f77d532 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
@@ -178,6 +178,11 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
+DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ppoll(SB)
 GLOBL	·libc_ppoll_trampoline_addr(SB), RODATA, $8
@@ -668,7 +673,22 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $8
+DATA	·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pledge(SB)
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_unveil(SB)
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $8
+DATA	·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index b19e8aa..8c1155c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && mips64
-// +build openbsd,mips64
 
 package unix
 
@@ -549,6 +548,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -557,10 +562,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -583,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2211,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2220,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2249,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
index b4e7bce..fae140b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
@@ -178,6 +178,11 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
+DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ppoll(SB)
 GLOBL	·libc_ppoll_trampoline_addr(SB), RODATA, $8
@@ -668,7 +673,22 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $8
+DATA	·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pledge(SB)
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_unveil(SB)
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $8
+DATA	·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index fb99594..7cc80c5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && ppc64
-// +build openbsd,ppc64
 
 package unix
 
@@ -549,6 +548,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -557,10 +562,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -583,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2211,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2220,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2249,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
index ca3f766..9d1e0ff 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
@@ -213,6 +213,12 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	CALL	libc_fcntl(SB)
+	RET
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
+DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	CALL	libc_ppoll(SB)
 	RET
@@ -801,8 +807,26 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	CALL	libc_getfsstat(SB)
+	RET
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $8
+DATA	·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	CALL	libc_utimensat(SB)
 	RET
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	CALL	libc_pledge(SB)
+	RET
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	CALL	libc_unveil(SB)
+	RET
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $8
+DATA	·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 32cbbbc..0688737 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build openbsd && riscv64
-// +build openbsd,riscv64
 
 package unix
 
@@ -549,6 +548,12 @@
 	return
 }
 
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
 	_, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
 	if e1 != 0 {
@@ -557,10 +562,6 @@
 	return
 }
 
-var libc_ioctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -583,6 +584,32 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntl(fd int, cmd int, arg int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_fcntl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
 	r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
 	n = int(r0)
@@ -2211,8 +2238,8 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2220,16 +2247,9 @@
 	return
 }
 
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+var libc_getfsstat_trampoline_addr uintptr
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -2249,3 +2269,33 @@
 var libc_utimensat_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pledge(promises *byte, execpromises *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pledge_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pledge pledge "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unveil(path *byte, flags *byte) (err error) {
+	_, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_unveil_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
index 477a7d5..da115f9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
@@ -178,6 +178,11 @@
 GLOBL	·libc_sysctl_trampoline_addr(SB), RODATA, $8
 DATA	·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
 
+TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+GLOBL	·libc_fcntl_trampoline_addr(SB), RODATA, $8
+DATA	·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
+
 TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_ppoll(SB)
 GLOBL	·libc_ppoll_trampoline_addr(SB), RODATA, $8
@@ -668,7 +673,22 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+GLOBL	·libc_getfsstat_trampoline_addr(SB), RODATA, $8
+DATA	·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB)
+
 TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 GLOBL	·libc_utimensat_trampoline_addr(SB), RODATA, $8
 DATA	·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)
+
+TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pledge(SB)
+GLOBL	·libc_pledge_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB)
+
+TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_unveil(SB)
+GLOBL	·libc_unveil_trampoline_addr(SB), RODATA, $8
+DATA	·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 609d1c5..829b87f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build solaris && amd64
-// +build solaris,amd64
 
 package unix
 
@@ -436,7 +435,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -446,7 +445,7 @@
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -456,7 +455,7 @@
 func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -471,7 +470,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -482,7 +481,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -492,7 +491,7 @@
 func setgroups(ngid int, gid *_Gid_t) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -503,7 +502,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
 	wpid = int32(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -518,7 +517,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -533,7 +532,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -548,7 +547,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -559,7 +558,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
 	val = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -569,7 +568,7 @@
 func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -580,7 +579,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
 	fd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -591,7 +590,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -602,7 +601,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -612,7 +611,7 @@
 func acct(path *byte) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -647,7 +646,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
 	ret = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -658,7 +657,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
 	ret = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -669,7 +668,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -684,7 +683,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -694,7 +693,7 @@
 func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -709,7 +708,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -724,7 +723,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -739,7 +738,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -754,7 +753,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -764,7 +763,7 @@
 func ClockGettime(clockid int32, time *Timespec) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -774,7 +773,7 @@
 func Close(fd int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -790,7 +789,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
 	fd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -801,7 +800,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0)
 	nfd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -811,7 +810,7 @@
 func Dup2(oldfd int, newfd int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -833,7 +832,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -843,7 +842,7 @@
 func Fchdir(fd int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -853,7 +852,7 @@
 func Fchmod(fd int, mode uint32) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -868,7 +867,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -878,7 +877,7 @@
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -893,7 +892,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -903,7 +902,7 @@
 func Fdatasync(fd int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -913,7 +912,7 @@
 func Flock(fd int, how int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -924,7 +923,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0)
 	val = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -934,7 +933,7 @@
 func Fstat(fd int, stat *Stat_t) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -949,7 +948,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -959,7 +958,7 @@
 func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -974,7 +973,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1001,7 +1000,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0)
 	pgid = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1012,7 +1011,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0)
 	pgid = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1047,7 +1046,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1057,7 +1056,7 @@
 func Getrlimit(which int, lim *Rlimit) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1067,7 +1066,7 @@
 func Getrusage(who int, rusage *Rusage) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1078,7 +1077,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0)
 	sid = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1088,7 +1087,7 @@
 func Gettimeofday(tv *Timeval) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1106,7 +1105,7 @@
 func Kill(pid int, signum syscall.Signal) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1121,7 +1120,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1141,7 +1140,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1151,7 +1150,7 @@
 func Listen(s int, backlog int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1166,7 +1165,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1180,7 +1179,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1195,7 +1194,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1210,7 +1209,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1225,7 +1224,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1240,7 +1239,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1255,7 +1254,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1270,7 +1269,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1284,7 +1283,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1294,7 +1293,7 @@
 func Mlockall(flags int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1308,7 +1307,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1322,7 +1321,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1336,7 +1335,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1346,7 +1345,7 @@
 func Munlockall() (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1356,7 +1355,7 @@
 func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1372,7 +1371,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0)
 	fd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1388,7 +1387,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
 	fd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1404,7 +1403,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0)
 	val = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1414,7 +1413,7 @@
 func Pause() (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1429,7 +1428,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1444,7 +1443,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1459,7 +1458,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1479,7 +1478,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1499,7 +1498,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1519,7 +1518,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1534,7 +1533,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1545,7 +1544,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0)
 	newoffset = int64(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1556,7 +1555,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1566,7 +1565,7 @@
 func Setegid(egid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1576,7 +1575,7 @@
 func Seteuid(euid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1586,7 +1585,7 @@
 func Setgid(gid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1600,7 +1599,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1610,7 +1609,7 @@
 func Setpgid(pid int, pgid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1620,7 +1619,7 @@
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1630,7 +1629,7 @@
 func Setregid(rgid int, egid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1640,7 +1639,7 @@
 func Setreuid(ruid int, euid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1651,7 +1650,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0)
 	pid = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1661,7 +1660,7 @@
 func Setuid(uid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1671,7 +1670,7 @@
 func Shutdown(s int, how int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1686,7 +1685,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1701,7 +1700,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1721,7 +1720,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1731,7 +1730,7 @@
 func Sync() (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1742,7 +1741,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0)
 	n = int64(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1753,7 +1752,7 @@
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0)
 	ticks = uintptr(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1768,7 +1767,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1778,7 +1777,7 @@
 func Fsync(fd int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1788,7 +1787,7 @@
 func Ftruncate(fd int, length int64) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1806,7 +1805,7 @@
 func Uname(buf *Utsname) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1821,7 +1820,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1836,7 +1835,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1851,7 +1850,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1861,7 +1860,7 @@
 func Ustat(dev int, ubuf *Ustat_t) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1876,7 +1875,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1886,7 +1885,7 @@
 func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1896,7 +1895,7 @@
 func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1907,7 +1906,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
 	ret = uintptr(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1917,7 +1916,7 @@
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1928,7 +1927,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
 	written = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1942,7 +1941,7 @@
 	}
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1953,7 +1952,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
 	fd = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1963,7 +1962,7 @@
 func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1978,7 +1977,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1988,7 +1987,7 @@
 func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -1998,7 +1997,7 @@
 func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2008,7 +2007,7 @@
 func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2023,7 +2022,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2034,7 +2033,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2045,7 +2044,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2056,7 +2055,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2067,7 +2066,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2078,7 +2077,7 @@
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0)
 	n = int(r0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2088,7 +2087,7 @@
 func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
@@ -2098,7 +2097,7 @@
 func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0)
 	if e1 != 0 {
-		err = e1
+		err = errnoErr(e1)
 	}
 	return
 }
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
index c316817..94f0112 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 package unix
 
@@ -40,17 +39,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
index 55e0484..3a58ae8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build 386 && openbsd
-// +build 386,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
index d2243cf..dcb7a0e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build amd64 && openbsd
-// +build amd64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
index 82dc51b..db5a7bf 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build arm && openbsd
-// +build arm,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
index cbdda1a..7be575a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build arm64 && openbsd
-// +build arm64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
index f55eae1..d6e3174 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build mips64 && openbsd
-// +build mips64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go
index e440544..ee97157 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build ppc64 && openbsd
-// +build ppc64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go
index a0db82f..35c3b91 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build riscv64 && openbsd
-// +build riscv64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
index f8298ff..5edda76 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && darwin
-// +build amd64,darwin
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
index 5eb433b..0dc9e8b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && darwin
-// +build arm64,darwin
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
index 703675c..308ddf3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && dragonfly
-// +build amd64,dragonfly
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
index 4e0d961..418664e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && freebsd
-// +build 386,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
index 01636b8..34d0b86 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && freebsd
-// +build amd64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
index ad99bc1..b71cf45 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && freebsd
-// +build arm,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
index 89dcc42..e32df1c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && freebsd
-// +build arm64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go
index ee37aaa..15ad611 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && freebsd
-// +build riscv64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index c9c4ad0..fcf3ecb 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && linux
-// +build 386,linux
 
 package unix
 
@@ -447,4 +446,6 @@
 	SYS_PROCESS_MRELEASE             = 448
 	SYS_FUTEX_WAITV                  = 449
 	SYS_SET_MEMPOLICY_HOME_NODE      = 450
+	SYS_CACHESTAT                    = 451
+	SYS_FCHMODAT2                    = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 12ff341..f56dc25 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && linux
-// +build amd64,linux
 
 package unix
 
@@ -369,4 +368,7 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
+	SYS_MAP_SHADOW_STACK        = 453
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index c3fb5e7..974bf24 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && linux
-// +build arm,linux
 
 package unix
 
@@ -411,4 +410,6 @@
 	SYS_PROCESS_MRELEASE             = 448
 	SYS_FUTEX_WAITV                  = 449
 	SYS_SET_MEMPOLICY_HOME_NODE      = 450
+	SYS_CACHESTAT                    = 451
+	SYS_FCHMODAT2                    = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 358c847..39a2739 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && linux
-// +build arm64,linux
 
 package unix
 
@@ -314,4 +313,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 81c4849..cf9c9d7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build loong64 && linux
-// +build loong64,linux
 
 package unix
 
@@ -308,4 +307,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 202a57e..10b7362 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips && linux
-// +build mips,linux
 
 package unix
 
@@ -431,4 +430,6 @@
 	SYS_PROCESS_MRELEASE             = 4448
 	SYS_FUTEX_WAITV                  = 4449
 	SYS_SET_MEMPOLICY_HOME_NODE      = 4450
+	SYS_CACHESTAT                    = 4451
+	SYS_FCHMODAT2                    = 4452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 1fbceb5..cd4d8b4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64 && linux
-// +build mips64,linux
 
 package unix
 
@@ -361,4 +360,6 @@
 	SYS_PROCESS_MRELEASE        = 5448
 	SYS_FUTEX_WAITV             = 5449
 	SYS_SET_MEMPOLICY_HOME_NODE = 5450
+	SYS_CACHESTAT               = 5451
+	SYS_FCHMODAT2               = 5452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index b4ffb7a..2c0efca 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64le && linux
-// +build mips64le,linux
 
 package unix
 
@@ -361,4 +360,6 @@
 	SYS_PROCESS_MRELEASE        = 5448
 	SYS_FUTEX_WAITV             = 5449
 	SYS_SET_MEMPOLICY_HOME_NODE = 5450
+	SYS_CACHESTAT               = 5451
+	SYS_FCHMODAT2               = 5452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 867985f..a72e31d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mipsle && linux
-// +build mipsle,linux
 
 package unix
 
@@ -431,4 +430,6 @@
 	SYS_PROCESS_MRELEASE             = 4448
 	SYS_FUTEX_WAITV                  = 4449
 	SYS_SET_MEMPOLICY_HOME_NODE      = 4450
+	SYS_CACHESTAT                    = 4451
+	SYS_FCHMODAT2                    = 4452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index a8cce69..c7d1e37 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc && linux
-// +build ppc,linux
 
 package unix
 
@@ -438,4 +437,6 @@
 	SYS_PROCESS_MRELEASE             = 448
 	SYS_FUTEX_WAITV                  = 449
 	SYS_SET_MEMPOLICY_HOME_NODE      = 450
+	SYS_CACHESTAT                    = 451
+	SYS_FCHMODAT2                    = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index d44c5b3..f4d4838 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && linux
-// +build ppc64,linux
 
 package unix
 
@@ -410,4 +409,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 4214dd9..b64f0e5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64le && linux
-// +build ppc64le,linux
 
 package unix
 
@@ -410,4 +409,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 3e594a8..9571119 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && linux
-// +build riscv64,linux
 
 package unix
 
@@ -251,6 +250,8 @@
 	SYS_ACCEPT4                 = 242
 	SYS_RECVMMSG                = 243
 	SYS_ARCH_SPECIFIC_SYSCALL   = 244
+	SYS_RISCV_HWPROBE           = 258
+	SYS_RISCV_FLUSH_ICACHE      = 259
 	SYS_WAIT4                   = 260
 	SYS_PRLIMIT64               = 261
 	SYS_FANOTIFY_INIT           = 262
@@ -313,4 +314,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index e6ed7d6..f94e943 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build s390x && linux
-// +build s390x,linux
 
 package unix
 
@@ -376,4 +375,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 92f628e..ba0c2bc 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build sparc64 && linux
-// +build sparc64,linux
 
 package unix
 
@@ -389,4 +388,6 @@
 	SYS_PROCESS_MRELEASE        = 448
 	SYS_FUTEX_WAITV             = 449
 	SYS_SET_MEMPOLICY_HOME_NODE = 450
+	SYS_CACHESTAT               = 451
+	SYS_FCHMODAT2               = 452
 )
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go
index 3a6699e..b2aa8cd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && netbsd
-// +build 386,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go
index 5677cd4..524a1b1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && netbsd
-// +build amd64,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go
index e784cb6..d59b943 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && netbsd
-// +build arm,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
index bd4952e..31e771d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; DO NOT EDIT.
 
 //go:build arm64 && netbsd
-// +build arm64,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
index 5977338..9fd77c6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && openbsd
-// +build 386,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
index 16af291..af10af2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && openbsd
-// +build amd64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
index f59b18a..cc2028a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && openbsd
-// +build arm,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
index 721ef59..c06dd44 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && openbsd
-// +build arm64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
index 01c43a0..9ddbf3e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64 && openbsd
-// +build mips64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go
index f258cfa..19a6ee4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && openbsd
-// +build ppc64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go
index 07919e0..05192a7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && openbsd
-// +build riscv64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go
index 073daad..b2e3085 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
index 7a8161c..3e6d57c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc && aix
-// +build ppc,aix
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
index 07ed733..3a219bd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && aix
-// +build ppc64,aix
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 690cefc..091d107 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && darwin
-// +build amd64,darwin
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 5bffc10..28ff4ef 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && darwin
-// +build arm64,darwin
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
index d0ba8e9..30e405b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && dragonfly
-// +build amd64,dragonfly
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index 29dc483..6cbd094 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && freebsd
-// +build 386,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index 0a89b28..7c03b6e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && freebsd
-// +build amd64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index c8666bb..422107e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && freebsd
-// +build arm,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index 88fb48a..505a12a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && freebsd
-// +build arm64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
index 698dc97..cc986c7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && freebsd
-// +build riscv64,freebsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 02e2462..bbf8399 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -1,7 +1,6 @@
 // Code generated by mkmerge; DO NOT EDIT.
 
 //go:build linux
-// +build linux
 
 package unix
 
@@ -866,6 +865,11 @@
 	POLLNVAL = 0x20
 )
 
+type sigset_argpack struct {
+	ss    *Sigset_t
+	ssLen uintptr
+}
+
 type SignalfdSiginfo struct {
 	Signo     uint32
 	Errno     int32
@@ -1972,7 +1976,7 @@
 	NFT_MSG_GETFLOWTABLE              = 0x17
 	NFT_MSG_DELFLOWTABLE              = 0x18
 	NFT_MSG_GETRULE_RESET             = 0x19
-	NFT_MSG_MAX                       = 0x21
+	NFT_MSG_MAX                       = 0x22
 	NFTA_LIST_UNSPEC                  = 0x0
 	NFTA_LIST_ELEM                    = 0x1
 	NFTA_HOOK_UNSPEC                  = 0x0
@@ -2667,6 +2671,7 @@
 	BPF_PROG_TYPE_LSM                          = 0x1d
 	BPF_PROG_TYPE_SK_LOOKUP                    = 0x1e
 	BPF_PROG_TYPE_SYSCALL                      = 0x1f
+	BPF_PROG_TYPE_NETFILTER                    = 0x20
 	BPF_CGROUP_INET_INGRESS                    = 0x0
 	BPF_CGROUP_INET_EGRESS                     = 0x1
 	BPF_CGROUP_INET_SOCK_CREATE                = 0x2
@@ -2711,6 +2716,11 @@
 	BPF_PERF_EVENT                             = 0x29
 	BPF_TRACE_KPROBE_MULTI                     = 0x2a
 	BPF_LSM_CGROUP                             = 0x2b
+	BPF_STRUCT_OPS                             = 0x2c
+	BPF_NETFILTER                              = 0x2d
+	BPF_TCX_INGRESS                            = 0x2e
+	BPF_TCX_EGRESS                             = 0x2f
+	BPF_TRACE_UPROBE_MULTI                     = 0x30
 	BPF_LINK_TYPE_UNSPEC                       = 0x0
 	BPF_LINK_TYPE_RAW_TRACEPOINT               = 0x1
 	BPF_LINK_TYPE_TRACING                      = 0x2
@@ -2721,6 +2731,18 @@
 	BPF_LINK_TYPE_PERF_EVENT                   = 0x7
 	BPF_LINK_TYPE_KPROBE_MULTI                 = 0x8
 	BPF_LINK_TYPE_STRUCT_OPS                   = 0x9
+	BPF_LINK_TYPE_NETFILTER                    = 0xa
+	BPF_LINK_TYPE_TCX                          = 0xb
+	BPF_LINK_TYPE_UPROBE_MULTI                 = 0xc
+	BPF_PERF_EVENT_UNSPEC                      = 0x0
+	BPF_PERF_EVENT_UPROBE                      = 0x1
+	BPF_PERF_EVENT_URETPROBE                   = 0x2
+	BPF_PERF_EVENT_KPROBE                      = 0x3
+	BPF_PERF_EVENT_KRETPROBE                   = 0x4
+	BPF_PERF_EVENT_TRACEPOINT                  = 0x5
+	BPF_PERF_EVENT_EVENT                       = 0x6
+	BPF_F_KPROBE_MULTI_RETURN                  = 0x1
+	BPF_F_UPROBE_MULTI_RETURN                  = 0x1
 	BPF_ANY                                    = 0x0
 	BPF_NOEXIST                                = 0x1
 	BPF_EXIST                                  = 0x2
@@ -2738,6 +2760,8 @@
 	BPF_F_MMAPABLE                             = 0x400
 	BPF_F_PRESERVE_ELEMS                       = 0x800
 	BPF_F_INNER_MAP                            = 0x1000
+	BPF_F_LINK                                 = 0x2000
+	BPF_F_PATH_FD                              = 0x4000
 	BPF_STATS_RUN_TIME                         = 0x0
 	BPF_STACK_BUILD_ID_EMPTY                   = 0x0
 	BPF_STACK_BUILD_ID_VALID                   = 0x1
@@ -2758,6 +2782,7 @@
 	BPF_F_ZERO_CSUM_TX                         = 0x2
 	BPF_F_DONT_FRAGMENT                        = 0x4
 	BPF_F_SEQ_NUMBER                           = 0x8
+	BPF_F_NO_TUNNEL_KEY                        = 0x10
 	BPF_F_TUNINFO_FLAGS                        = 0x10
 	BPF_F_INDEX_MASK                           = 0xffffffff
 	BPF_F_CURRENT_CPU                          = 0xffffffff
@@ -2774,6 +2799,8 @@
 	BPF_F_ADJ_ROOM_ENCAP_L4_UDP                = 0x10
 	BPF_F_ADJ_ROOM_NO_CSUM_RESET               = 0x20
 	BPF_F_ADJ_ROOM_ENCAP_L2_ETH                = 0x40
+	BPF_F_ADJ_ROOM_DECAP_L3_IPV4               = 0x80
+	BPF_F_ADJ_ROOM_DECAP_L3_IPV6               = 0x100
 	BPF_ADJ_ROOM_ENCAP_L2_MASK                 = 0xff
 	BPF_ADJ_ROOM_ENCAP_L2_SHIFT                = 0x38
 	BPF_F_SYSCTL_BASE_NAME                     = 0x1
@@ -2862,6 +2889,8 @@
 	BPF_DEVCG_DEV_CHAR                         = 0x2
 	BPF_FIB_LOOKUP_DIRECT                      = 0x1
 	BPF_FIB_LOOKUP_OUTPUT                      = 0x2
+	BPF_FIB_LOOKUP_SKIP_NEIGH                  = 0x4
+	BPF_FIB_LOOKUP_TBID                        = 0x8
 	BPF_FIB_LKUP_RET_SUCCESS                   = 0x0
 	BPF_FIB_LKUP_RET_BLACKHOLE                 = 0x1
 	BPF_FIB_LKUP_RET_UNREACHABLE               = 0x2
@@ -2897,6 +2926,7 @@
 	BPF_CORE_ENUMVAL_EXISTS                    = 0xa
 	BPF_CORE_ENUMVAL_VALUE                     = 0xb
 	BPF_CORE_TYPE_MATCHES                      = 0xc
+	BPF_F_TIMER_ABS                            = 0x1
 )
 
 const (
@@ -2975,6 +3005,12 @@
 	Encrypt_key      [32]uint8
 	Init             [2]uint64
 }
+type LoopConfig struct {
+	Fd   uint32
+	Size uint32
+	Info LoopInfo64
+	_    [8]uint64
+}
 
 type TIPCSocketAddr struct {
 	Ref  uint32
@@ -4494,7 +4530,7 @@
 	NL80211_ATTR_MAC_HINT                                   = 0xc8
 	NL80211_ATTR_MAC_MASK                                   = 0xd7
 	NL80211_ATTR_MAX_AP_ASSOC_STA                           = 0xca
-	NL80211_ATTR_MAX                                        = 0x145
+	NL80211_ATTR_MAX                                        = 0x146
 	NL80211_ATTR_MAX_CRIT_PROT_DURATION                     = 0xb4
 	NL80211_ATTR_MAX_CSA_COUNTERS                           = 0xce
 	NL80211_ATTR_MAX_MATCH_SETS                             = 0x85
@@ -4864,7 +4900,7 @@
 	NL80211_CMD_LEAVE_IBSS                                  = 0x2c
 	NL80211_CMD_LEAVE_MESH                                  = 0x45
 	NL80211_CMD_LEAVE_OCB                                   = 0x6d
-	NL80211_CMD_MAX                                         = 0x99
+	NL80211_CMD_MAX                                         = 0x9a
 	NL80211_CMD_MICHAEL_MIC_FAILURE                         = 0x29
 	NL80211_CMD_MODIFY_LINK_STA                             = 0x97
 	NL80211_CMD_NAN_MATCH                                   = 0x78
@@ -5498,7 +5534,7 @@
 	NL80211_RATE_INFO_HE_RU_ALLOC_52                        = 0x1
 	NL80211_RATE_INFO_HE_RU_ALLOC_996                       = 0x5
 	NL80211_RATE_INFO_HE_RU_ALLOC                           = 0x11
-	NL80211_RATE_INFO_MAX                                   = 0x16
+	NL80211_RATE_INFO_MAX                                   = 0x1d
 	NL80211_RATE_INFO_MCS                                   = 0x2
 	NL80211_RATE_INFO_SHORT_GI                              = 0x4
 	NL80211_RATE_INFO_VHT_MCS                               = 0x6
@@ -5863,3 +5899,30 @@
 	VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5
 	VIRTIO_NET_HDR_GSO_ECN    = 0x80
 )
+
+type SchedAttr struct {
+	Size     uint32
+	Policy   uint32
+	Flags    uint64
+	Nice     int32
+	Priority uint32
+	Runtime  uint64
+	Deadline uint64
+	Period   uint64
+	Util_min uint32
+	Util_max uint32
+}
+
+const SizeofSchedAttr = 0x38
+
+type Cachestat_t struct {
+	Cache            uint64
+	Dirty            uint64
+	Writeback        uint64
+	Evicted          uint64
+	Recently_evicted uint64
+}
+type CachestatRange struct {
+	Off uint64
+	Len uint64
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 6d8acbc..438a30a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && linux
-// +build 386,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 59293c6..adceca3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && linux
-// +build amd64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 40cfa38..eeaa00a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && linux
-// +build arm,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 055bc42..6739aa9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && linux
-// +build arm64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index f28affb..9920ef6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build loong64 && linux
-// +build loong64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 9d71e7c..2923b79 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips && linux
-// +build mips,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index fd5ccd3..ce2750e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64 && linux
-// +build mips64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 7704de7..3038811 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64le && linux
-// +build mips64le,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index df00b87..efc6fed 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mipsle && linux
-// +build mipsle,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 0942840..9a654b7 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc && linux
-// +build ppc,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 0348743..40d358e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && linux
-// +build ppc64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index bad0670..148c6ce 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64le && linux
-// +build ppc64le,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 9ea54b7..72ba815 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && linux
-// +build riscv64,linux
 
 package unix
 
@@ -718,3 +717,30 @@
 	_      uint64
 	_      uint64
 }
+
+type RISCVHWProbePairs struct {
+	Key   int64
+	Value uint64
+}
+
+const (
+	RISCV_HWPROBE_KEY_MVENDORID          = 0x0
+	RISCV_HWPROBE_KEY_MARCHID            = 0x1
+	RISCV_HWPROBE_KEY_MIMPID             = 0x2
+	RISCV_HWPROBE_KEY_BASE_BEHAVIOR      = 0x3
+	RISCV_HWPROBE_BASE_BEHAVIOR_IMA      = 0x1
+	RISCV_HWPROBE_KEY_IMA_EXT_0          = 0x4
+	RISCV_HWPROBE_IMA_FD                 = 0x1
+	RISCV_HWPROBE_IMA_C                  = 0x2
+	RISCV_HWPROBE_IMA_V                  = 0x4
+	RISCV_HWPROBE_EXT_ZBA                = 0x8
+	RISCV_HWPROBE_EXT_ZBB                = 0x10
+	RISCV_HWPROBE_EXT_ZBS                = 0x20
+	RISCV_HWPROBE_KEY_CPUPERF_0          = 0x5
+	RISCV_HWPROBE_MISALIGNED_UNKNOWN     = 0x0
+	RISCV_HWPROBE_MISALIGNED_EMULATED    = 0x1
+	RISCV_HWPROBE_MISALIGNED_SLOW        = 0x2
+	RISCV_HWPROBE_MISALIGNED_FAST        = 0x3
+	RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
+	RISCV_HWPROBE_MISALIGNED_MASK        = 0x7
+)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index aa268d0..71e7655 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build s390x && linux
-// +build s390x,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 444045b..4abbdb9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build sparc64 && linux
-// +build sparc64,linux
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
index 9bc4c8f..f22e794 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && netbsd
-// +build 386,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
index bb05f65..066a7d8 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && netbsd
-// +build amd64,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
index db40e3a..439548e 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && netbsd
-// +build arm,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
index 1112115..16085d3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && netbsd
-// +build arm64,netbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
index 26eba23..afd13a3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build 386 && openbsd
-// +build 386,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
index 5a54798..5d97f1f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && openbsd
-// +build amd64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
index be58c4e..34871cd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm && openbsd
-// +build arm,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
index 5233826..5911bce 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build arm64 && openbsd
-// +build arm64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
index 605cfdb..e4f24f3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build mips64 && openbsd
-// +build mips64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go
index d6724c0..ca50a79 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build ppc64 && openbsd
-// +build ppc64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go
index ddfd27a..d7d7f79 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build riscv64 && openbsd
-// +build riscv64,openbsd
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
index 0400747..1416057 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
@@ -2,7 +2,6 @@
 // Code generated by the command above; see README.md. DO NOT EDIT.
 
 //go:build amd64 && solaris
-// +build amd64,solaris
 
 package unix
 
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
index aec1efc..54f31be 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build zos && s390x
-// +build zos,s390x
 
 // Hand edited based on ztypes_linux_s390x.go
 // TODO: auto-generate.
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/aliases.go b/src/cmd/vendor/golang.org/x/sys/windows/aliases.go
index a20ebea..ce2d713 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/aliases.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/aliases.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows && go1.9
-// +build windows,go1.9
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/empty.s b/src/cmd/vendor/golang.org/x/sys/windows/empty.s
index fdbbbcd..ba64cac 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/empty.s
+++ b/src/cmd/vendor/golang.org/x/sys/windows/empty.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.12
-// +build !go1.12
 
 // This file is here to allow bodyless functions with go:linkname for Go 1.11
 // and earlier (see https://golang.org/issue/23311).
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/eventlog.go b/src/cmd/vendor/golang.org/x/sys/windows/eventlog.go
index 2cd6064..6c36695 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/eventlog.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/eventlog.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows
-// +build windows
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/exec_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/exec_windows.go
index a52e033..9cabbb6 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/exec_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/exec_windows.go
@@ -22,7 +22,7 @@
 //     but only if there is space or tab inside s.
 func EscapeArg(s string) string {
 	if len(s) == 0 {
-		return "\"\""
+		return `""`
 	}
 	n := len(s)
 	hasSpace := false
@@ -35,7 +35,7 @@
 		}
 	}
 	if hasSpace {
-		n += 2
+		n += 2 // Reserve space for quotes.
 	}
 	if n == len(s) {
 		return s
@@ -82,20 +82,68 @@
 // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument,
 // or any program that uses CommandLineToArgv.
 func ComposeCommandLine(args []string) string {
-	var commandLine string
-	for i := range args {
-		if i > 0 {
-			commandLine += " "
-		}
-		commandLine += EscapeArg(args[i])
+	if len(args) == 0 {
+		return ""
 	}
-	return commandLine
+
+	// Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw:
+	// “This function accepts command lines that contain a program name; the
+	// program name can be enclosed in quotation marks or not.”
+	//
+	// Unfortunately, it provides no means of escaping interior quotation marks
+	// within that program name, and we have no way to report them here.
+	prog := args[0]
+	mustQuote := len(prog) == 0
+	for i := 0; i < len(prog); i++ {
+		c := prog[i]
+		if c <= ' ' || (c == '"' && i == 0) {
+			// Force quotes for not only the ASCII space and tab as described in the
+			// MSDN article, but also ASCII control characters.
+			// The documentation for CommandLineToArgvW doesn't say what happens when
+			// the first argument is not a valid program name, but it empirically
+			// seems to drop unquoted control characters.
+			mustQuote = true
+			break
+		}
+	}
+	var commandLine []byte
+	if mustQuote {
+		commandLine = make([]byte, 0, len(prog)+2)
+		commandLine = append(commandLine, '"')
+		for i := 0; i < len(prog); i++ {
+			c := prog[i]
+			if c == '"' {
+				// This quote would interfere with our surrounding quotes.
+				// We have no way to report an error, so just strip out
+				// the offending character instead.
+				continue
+			}
+			commandLine = append(commandLine, c)
+		}
+		commandLine = append(commandLine, '"')
+	} else {
+		if len(args) == 1 {
+			// args[0] is a valid command line representing itself.
+			// No need to allocate a new slice or string for it.
+			return prog
+		}
+		commandLine = []byte(prog)
+	}
+
+	for _, arg := range args[1:] {
+		commandLine = append(commandLine, ' ')
+		// TODO(bcmills): since we're already appending to a slice, it would be nice
+		// to avoid the intermediate allocations of EscapeArg.
+		// Perhaps we can factor out an appendEscapedArg function.
+		commandLine = append(commandLine, EscapeArg(arg)...)
+	}
+	return string(commandLine)
 }
 
 // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv,
 // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that
 // command lines are passed around.
-// DecomposeCommandLine returns error if commandLine contains NUL.
+// DecomposeCommandLine returns an error if commandLine contains NUL.
 func DecomposeCommandLine(commandLine string) ([]string, error) {
 	if len(commandLine) == 0 {
 		return []string{}, nil
@@ -105,18 +153,35 @@
 		return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine")
 	}
 	var argc int32
-	argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc)
+	argv, err := commandLineToArgv(&utf16CommandLine[0], &argc)
 	if err != nil {
 		return nil, err
 	}
 	defer LocalFree(Handle(unsafe.Pointer(argv)))
+
 	var args []string
-	for _, v := range (*argv)[:argc] {
-		args = append(args, UTF16ToString((*v)[:]))
+	for _, p := range unsafe.Slice(argv, argc) {
+		args = append(args, UTF16PtrToString(p))
 	}
 	return args, nil
 }
 
+// CommandLineToArgv parses a Unicode command line string and sets
+// argc to the number of parsed arguments.
+//
+// The returned memory should be freed using a single call to LocalFree.
+//
+// Note that although the return type of CommandLineToArgv indicates 8192
+// entries of up to 8192 characters each, the actual count of parsed arguments
+// may exceed 8192, and the documentation for CommandLineToArgvW does not mention
+// any bound on the lengths of the individual argument strings.
+// (See https://go.dev/issue/63236.)
+func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) {
+	argp, err := commandLineToArgv(cmd, argc)
+	argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp))
+	return argv, err
+}
+
 func CloseOnExec(fd Handle) {
 	SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0)
 }
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/mksyscall.go b/src/cmd/vendor/golang.org/x/sys/windows/mksyscall.go
index 8563f79..dbcdb09 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/mksyscall.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/mksyscall.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build generate
-// +build generate
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/race.go b/src/cmd/vendor/golang.org/x/sys/windows/race.go
index 9196b08..0f1bdc3 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/race.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/race.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows && race
-// +build windows,race
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/race0.go b/src/cmd/vendor/golang.org/x/sys/windows/race0.go
index 7bae481..0c78da7 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/race0.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/race0.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows && !race
-// +build windows,!race
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
index d414ef1..26be94a 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
@@ -7,8 +7,6 @@
 import (
 	"syscall"
 	"unsafe"
-
-	"golang.org/x/sys/internal/unsafeheader"
 )
 
 const (
@@ -1341,21 +1339,14 @@
 		sdLen = min
 	}
 
-	var src []byte
-	h := (*unsafeheader.Slice)(unsafe.Pointer(&src))
-	h.Data = unsafe.Pointer(selfRelativeSD)
-	h.Len = sdLen
-	h.Cap = sdLen
-
+	src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen)
+	// SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to
+	// be aligned properly. When we're copying a Windows-allocated struct to a
+	// Go-allocated one, make sure that the Go allocation is aligned to the
+	// pointer size.
 	const psize = int(unsafe.Sizeof(uintptr(0)))
-
-	var dst []byte
-	h = (*unsafeheader.Slice)(unsafe.Pointer(&dst))
 	alloc := make([]uintptr, (sdLen+psize-1)/psize)
-	h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data
-	h.Len = sdLen
-	h.Cap = sdLen
-
+	dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen)
 	copy(dst, src)
 	return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0]))
 }
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/service.go b/src/cmd/vendor/golang.org/x/sys/windows/service.go
index c44a1b9..a9dc630 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/service.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/service.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows
-// +build windows
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/str.go b/src/cmd/vendor/golang.org/x/sys/windows/str.go
index 4fc0143..6a4f9ce 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/str.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/str.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows
-// +build windows
 
 package windows
 
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall.go
index 8732cdb..e85ed6b 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/syscall.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build windows
-// +build windows
 
 // Package windows contains an interface to the low-level operating system
 // primitives. OS details vary depending on the underlying system, and
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
index 9645900..47dc579 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -15,8 +15,6 @@
 	"time"
 	"unicode/utf16"
 	"unsafe"
-
-	"golang.org/x/sys/internal/unsafeheader"
 )
 
 type Handle uintptr
@@ -135,14 +133,14 @@
 
 // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
 // This is useful when interoperating with Windows code requiring callbacks.
-// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
 func NewCallback(fn interface{}) uintptr {
 	return syscall.NewCallback(fn)
 }
 
 // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
 // This is useful when interoperating with Windows code requiring callbacks.
-// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
 func NewCallbackCDecl(fn interface{}) uintptr {
 	return syscall.NewCallbackCDecl(fn)
 }
@@ -157,6 +155,8 @@
 //sys	GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW
 //sys	GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW
 //sys	SetDefaultDllDirectories(directoryFlags uint32) (err error)
+//sys	AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory
+//sys	RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory
 //sys	SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW
 //sys	GetVersion() (ver uint32, err error)
 //sys	FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW
@@ -216,7 +216,7 @@
 //sys	shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath
 //sys	TerminateProcess(handle Handle, exitcode uint32) (err error)
 //sys	GetExitCodeProcess(handle Handle, exitcode *uint32) (err error)
-//sys	GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW
+//sys	getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW
 //sys	GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error)
 //sys	DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error)
 //sys	WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff]
@@ -235,12 +235,13 @@
 //sys	CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock
 //sys	DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock
 //sys	getTickCount64() (ms uint64) = kernel32.GetTickCount64
+//sys   GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error)
 //sys	SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error)
 //sys	GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW
 //sys	SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW
 //sys	GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW
 //sys	GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW
-//sys	CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW
+//sys	commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW
 //sys	LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0]
 //sys	LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error)
 //sys	SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error)
@@ -299,12 +300,15 @@
 //sys	RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue
 //sys	GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId
 //sys	ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId
+//sys	ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole
+//sys	createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole
 //sys	GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode
 //sys	SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
 //sys	GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
 //sys	setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition
 //sys	WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
 //sys	ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys	resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
 //sys	CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
 //sys	Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
 //sys	Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW
@@ -437,6 +441,10 @@
 //sys	DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute
 //sys	DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute
 
+// Windows Multimedia API
+//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod
+//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod
+
 // syscall interface implementation for other packages
 
 // GetCurrentProcess returns the handle for the current process.
@@ -964,7 +972,8 @@
 	if n > 0 {
 		sl += int32(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -1624,6 +1633,11 @@
 	return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position))))
 }
 
+func GetStartupInfo(startupInfo *StartupInfo) error {
+	getStartupInfo(startupInfo)
+	return nil
+}
+
 func (s NTStatus) Errno() syscall.Errno {
 	return rtlNtStatusToDosErrorNoTeb(s)
 }
@@ -1658,12 +1672,8 @@
 
 // Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
 func (s *NTUnicodeString) Slice() []uint16 {
-	var slice []uint16
-	hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice))
-	hdr.Data = unsafe.Pointer(s.Buffer)
-	hdr.Len = int(s.Length)
-	hdr.Cap = int(s.MaximumLength)
-	return slice
+	slice := unsafe.Slice(s.Buffer, s.MaximumLength)
+	return slice[:s.Length]
 }
 
 func (s *NTUnicodeString) String() string {
@@ -1686,12 +1696,8 @@
 
 // Slice returns a byte slice that aliases the data in the NTString.
 func (s *NTString) Slice() []byte {
-	var slice []byte
-	hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice))
-	hdr.Data = unsafe.Pointer(s.Buffer)
-	hdr.Len = int(s.Length)
-	hdr.Cap = int(s.MaximumLength)
-	return slice
+	slice := unsafe.Slice(s.Buffer, s.MaximumLength)
+	return slice[:s.Length]
 }
 
 func (s *NTString) String() string {
@@ -1743,10 +1749,7 @@
 	if err != nil {
 		return
 	}
-	h := (*unsafeheader.Slice)(unsafe.Pointer(&data))
-	h.Data = unsafe.Pointer(ptr)
-	h.Len = int(size)
-	h.Cap = int(size)
+	data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size)
 	return
 }
 
@@ -1817,3 +1820,17 @@
 	// A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress.
 	VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK
 }
+
+// CreatePseudoConsole creates a windows pseudo console.
+func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error {
+	// We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only
+	// accept arguments that can be casted to uintptr, and Coord can't.
+	return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole)
+}
+
+// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
+func ResizePseudoConsole(pconsole Handle, size Coord) error {
+	// We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only
+	// accept arguments that can be casted to uintptr, and Coord can't.
+	return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size))))
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
index 88e62a6..359780f 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
@@ -247,6 +247,7 @@
 	PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007
 	PROC_THREAD_ATTRIBUTE_UMS_THREAD        = 0x00030006
 	PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL  = 0x0002000b
+	PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE     = 0x00020016
 )
 
 const (
@@ -1093,7 +1094,33 @@
 
 	SOMAXCONN = 0x7fffffff
 
-	TCP_NODELAY = 1
+	TCP_NODELAY                    = 1
+	TCP_EXPEDITED_1122             = 2
+	TCP_KEEPALIVE                  = 3
+	TCP_MAXSEG                     = 4
+	TCP_MAXRT                      = 5
+	TCP_STDURG                     = 6
+	TCP_NOURG                      = 7
+	TCP_ATMARK                     = 8
+	TCP_NOSYNRETRIES               = 9
+	TCP_TIMESTAMPS                 = 10
+	TCP_OFFLOAD_PREFERENCE         = 11
+	TCP_CONGESTION_ALGORITHM       = 12
+	TCP_DELAY_FIN_ACK              = 13
+	TCP_MAXRTMS                    = 14
+	TCP_FASTOPEN                   = 15
+	TCP_KEEPCNT                    = 16
+	TCP_KEEPIDLE                   = TCP_KEEPALIVE
+	TCP_KEEPINTVL                  = 17
+	TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18
+	TCP_ICMP_ERROR_INFO            = 19
+
+	UDP_NOCHECKSUM              = 1
+	UDP_SEND_MSG_SIZE           = 2
+	UDP_RECV_MAX_COALESCED_SIZE = 3
+	UDP_CHECKSUM_COVERAGE       = 20
+
+	UDP_COALESCED_INFO = 3
 
 	SHUT_RD   = 0
 	SHUT_WR   = 1
@@ -2139,6 +2166,12 @@
 	ENABLE_LVB_GRID_WORLDWIDE          = 0x10
 )
 
+// Pseudo console related constants used for the flags parameter to
+// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole
+const (
+	PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
+)
+
 type Coord struct {
 	X int16
 	Y int16
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 566dd3e..146a1f0 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -55,6 +55,7 @@
 	moduser32   = NewLazySystemDLL("user32.dll")
 	moduserenv  = NewLazySystemDLL("userenv.dll")
 	modversion  = NewLazySystemDLL("version.dll")
+	modwinmm    = NewLazySystemDLL("winmm.dll")
 	modwintrust = NewLazySystemDLL("wintrust.dll")
 	modws2_32   = NewLazySystemDLL("ws2_32.dll")
 	modwtsapi32 = NewLazySystemDLL("wtsapi32.dll")
@@ -183,10 +184,12 @@
 	procGetAdaptersInfo                                      = modiphlpapi.NewProc("GetAdaptersInfo")
 	procGetBestInterfaceEx                                   = modiphlpapi.NewProc("GetBestInterfaceEx")
 	procGetIfEntry                                           = modiphlpapi.NewProc("GetIfEntry")
+	procAddDllDirectory                                      = modkernel32.NewProc("AddDllDirectory")
 	procAssignProcessToJobObject                             = modkernel32.NewProc("AssignProcessToJobObject")
 	procCancelIo                                             = modkernel32.NewProc("CancelIo")
 	procCancelIoEx                                           = modkernel32.NewProc("CancelIoEx")
 	procCloseHandle                                          = modkernel32.NewProc("CloseHandle")
+	procClosePseudoConsole                                   = modkernel32.NewProc("ClosePseudoConsole")
 	procConnectNamedPipe                                     = modkernel32.NewProc("ConnectNamedPipe")
 	procCreateDirectoryW                                     = modkernel32.NewProc("CreateDirectoryW")
 	procCreateEventExW                                       = modkernel32.NewProc("CreateEventExW")
@@ -201,6 +204,7 @@
 	procCreateNamedPipeW                                     = modkernel32.NewProc("CreateNamedPipeW")
 	procCreatePipe                                           = modkernel32.NewProc("CreatePipe")
 	procCreateProcessW                                       = modkernel32.NewProc("CreateProcessW")
+	procCreatePseudoConsole                                  = modkernel32.NewProc("CreatePseudoConsole")
 	procCreateSymbolicLinkW                                  = modkernel32.NewProc("CreateSymbolicLinkW")
 	procCreateToolhelp32Snapshot                             = modkernel32.NewProc("CreateToolhelp32Snapshot")
 	procDefineDosDeviceW                                     = modkernel32.NewProc("DefineDosDeviceW")
@@ -250,6 +254,7 @@
 	procGetFileAttributesW                                   = modkernel32.NewProc("GetFileAttributesW")
 	procGetFileInformationByHandle                           = modkernel32.NewProc("GetFileInformationByHandle")
 	procGetFileInformationByHandleEx                         = modkernel32.NewProc("GetFileInformationByHandleEx")
+	procGetFileTime                                          = modkernel32.NewProc("GetFileTime")
 	procGetFileType                                          = modkernel32.NewProc("GetFileType")
 	procGetFinalPathNameByHandleW                            = modkernel32.NewProc("GetFinalPathNameByHandleW")
 	procGetFullPathNameW                                     = modkernel32.NewProc("GetFullPathNameW")
@@ -326,7 +331,9 @@
 	procReadProcessMemory                                    = modkernel32.NewProc("ReadProcessMemory")
 	procReleaseMutex                                         = modkernel32.NewProc("ReleaseMutex")
 	procRemoveDirectoryW                                     = modkernel32.NewProc("RemoveDirectoryW")
+	procRemoveDllDirectory                                   = modkernel32.NewProc("RemoveDllDirectory")
 	procResetEvent                                           = modkernel32.NewProc("ResetEvent")
+	procResizePseudoConsole                                  = modkernel32.NewProc("ResizePseudoConsole")
 	procResumeThread                                         = modkernel32.NewProc("ResumeThread")
 	procSetCommTimeouts                                      = modkernel32.NewProc("SetCommTimeouts")
 	procSetConsoleCursorPosition                             = modkernel32.NewProc("SetConsoleCursorPosition")
@@ -468,6 +475,8 @@
 	procGetFileVersionInfoSizeW                              = modversion.NewProc("GetFileVersionInfoSizeW")
 	procGetFileVersionInfoW                                  = modversion.NewProc("GetFileVersionInfoW")
 	procVerQueryValueW                                       = modversion.NewProc("VerQueryValueW")
+	proctimeBeginPeriod                                      = modwinmm.NewProc("timeBeginPeriod")
+	proctimeEndPeriod                                        = modwinmm.NewProc("timeEndPeriod")
 	procWinVerifyTrustEx                                     = modwintrust.NewProc("WinVerifyTrustEx")
 	procFreeAddrInfoW                                        = modws2_32.NewProc("FreeAddrInfoW")
 	procGetAddrInfoW                                         = modws2_32.NewProc("GetAddrInfoW")
@@ -1598,6 +1607,15 @@
 	return
 }
 
+func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
+	r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+	cookie = uintptr(r0)
+	if cookie == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func AssignProcessToJobObject(job Handle, process Handle) (err error) {
 	r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
 	if r1 == 0 {
@@ -1630,6 +1648,11 @@
 	return
 }
 
+func ClosePseudoConsole(console Handle) {
+	syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0)
+	return
+}
+
 func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
 	r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0)
 	if r1 == 0 {
@@ -1759,6 +1782,14 @@
 	return
 }
 
+func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) {
+	r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0)
+	if r0 != 0 {
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
 func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
 	r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
 	if r1&0xff == 0 {
@@ -2166,6 +2197,14 @@
 	return
 }
 
+func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
+	r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func GetFileType(filehandle Handle) (n uint32, err error) {
 	r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0)
 	n = uint32(r0)
@@ -2367,11 +2406,8 @@
 	return
 }
 
-func GetStartupInfo(startupInfo *StartupInfo) (err error) {
-	r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
-	if r1 == 0 {
-		err = errnoErr(e1)
-	}
+func getStartupInfo(startupInfo *StartupInfo) {
+	syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
 	return
 }
 
@@ -2854,6 +2890,14 @@
 	return
 }
 
+func RemoveDllDirectory(cookie uintptr) (err error) {
+	r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0)
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func ResetEvent(event Handle) (err error) {
 	r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
 	if r1 == 0 {
@@ -2862,6 +2906,14 @@
 	return
 }
 
+func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
+	r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0)
+	if r0 != 0 {
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
 func ResumeThread(thread Handle) (ret uint32, err error) {
 	r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0)
 	ret = uint32(r0)
@@ -3820,9 +3872,9 @@
 	return
 }
 
-func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) {
+func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
 	r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0)
-	argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0))
+	argv = (**uint16)(unsafe.Pointer(r0))
 	if argv == nil {
 		err = errnoErr(e1)
 	}
@@ -4017,6 +4069,22 @@
 	return
 }
 
+func TimeBeginPeriod(period uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
+	if r1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func TimeEndPeriod(period uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0)
+	if r1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
 	r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
 	if r0 != 0 {
diff --git a/src/cmd/vendor/golang.org/x/term/term_unix.go b/src/cmd/vendor/golang.org/x/term/term_unix.go
index 62c2b3f..1ad0ddf 100644
--- a/src/cmd/vendor/golang.org/x/term/term_unix.go
+++ b/src/cmd/vendor/golang.org/x/term/term_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 package term
 
diff --git a/src/cmd/vendor/golang.org/x/term/term_unix_bsd.go b/src/cmd/vendor/golang.org/x/term/term_unix_bsd.go
index 853b3d6..9dbf546 100644
--- a/src/cmd/vendor/golang.org/x/term/term_unix_bsd.go
+++ b/src/cmd/vendor/golang.org/x/term/term_unix_bsd.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package term
 
diff --git a/src/cmd/vendor/golang.org/x/term/term_unix_other.go b/src/cmd/vendor/golang.org/x/term/term_unix_other.go
index 1e8955c..1b36de7 100644
--- a/src/cmd/vendor/golang.org/x/term/term_unix_other.go
+++ b/src/cmd/vendor/golang.org/x/term/term_unix_other.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || linux || solaris || zos
-// +build aix linux solaris zos
 
 package term
 
diff --git a/src/cmd/vendor/golang.org/x/term/term_unsupported.go b/src/cmd/vendor/golang.org/x/term/term_unsupported.go
index f1df850..3c409e5 100644
--- a/src/cmd/vendor/golang.org/x/term/term_unsupported.go
+++ b/src/cmd/vendor/golang.org/x/term/term_unsupported.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9
 
 package term
 
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/analysis.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/analysis.go
index e51e58b..5da33c7 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/analysis.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/analysis.go
@@ -139,28 +139,24 @@
 	// See comments for ExportObjectFact.
 	ExportPackageFact func(fact Fact)
 
-	// AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes
-	// in unspecified order.
-	// WARNING: This is an experimental API and may change in the future.
+	// AllPackageFacts returns a new slice containing all package
+	// facts of the analysis's FactTypes in unspecified order.
 	AllPackageFacts func() []PackageFact
 
-	// AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes
-	// in unspecified order.
-	// WARNING: This is an experimental API and may change in the future.
+	// AllObjectFacts returns a new slice containing all object
+	// facts of the analysis's FactTypes in unspecified order.
 	AllObjectFacts func() []ObjectFact
 
 	/* Further fields may be added in future. */
 }
 
 // PackageFact is a package together with an associated fact.
-// WARNING: This is an experimental API and may change in the future.
 type PackageFact struct {
 	Package *types.Package
 	Fact    Fact
 }
 
 // ObjectFact is an object together with an associated fact.
-// WARNING: This is an experimental API and may change in the future.
 type ObjectFact struct {
 	Object types.Object
 	Fact   Fact
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go
index 7646ad0..f67c972 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go
@@ -31,14 +31,13 @@
 	// see https://pkg.go.dev/net/url#URL.ResolveReference.
 	URL string
 
-	// SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform
-	// edits to a file that address the diagnostic.
-	// TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
+	// SuggestedFixes contains suggested fixes for a diagnostic
+	// which can be used to perform edits to a file that address
+	// the diagnostic.
+	//
 	// Diagnostics should not contain SuggestedFixes that overlap.
-	// Experimental: This API is experimental and may change in the future.
 	SuggestedFixes []SuggestedFix // optional
 
-	// Experimental: This API is experimental and may change in the future.
 	Related []RelatedInformation // optional
 }
 
@@ -52,12 +51,12 @@
 	Message string
 }
 
-// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
-// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
-// by the diagnostic.
-// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix
-// should not contain edits for other packages.
-// Experimental: This API is experimental and may change in the future.
+// A SuggestedFix is a code change associated with a Diagnostic that a
+// user can choose to apply to their code. Usually the SuggestedFix is
+// meant to fix the issue flagged by the diagnostic.
+//
+// TextEdits for a SuggestedFix should not overlap,
+// nor contain edits for other packages.
 type SuggestedFix struct {
 	// A description for this suggested fix to be shown to a user deciding
 	// whether to accept it.
@@ -67,7 +66,6 @@
 
 // A TextEdit represents the replacement of the code between Pos and End with the new text.
 // Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
-// Experimental: This API is experimental and may change in the future.
 type TextEdit struct {
 	// For a pure insertion, End can either be set to Pos or token.NoPos.
 	Pos     token.Pos
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/doc.go
index c5429c9..44867d5 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/doc.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/doc.go
@@ -191,7 +191,7 @@
 files such as assembly. To report a diagnostic against a line of a
 raw text file, use the following sequence:
 
-	content, err := ioutil.ReadFile(filename)
+	content, err := os.ReadFile(filename)
 	if err != nil { ... }
 	tf := fset.AddFile(filename, -1, len(content))
 	tf.SetLinesForContent(content)
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
index e127a42..9e3fde7 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
@@ -14,7 +14,6 @@
 	"fmt"
 	"go/token"
 	"io"
-	"io/ioutil"
 	"log"
 	"os"
 	"strconv"
@@ -331,7 +330,7 @@
 		if !end.IsValid() {
 			end = posn
 		}
-		data, _ := ioutil.ReadFile(posn.Filename)
+		data, _ := os.ReadFile(posn.Filename)
 		lines := strings.Split(string(data), "\n")
 		for i := posn.Line - Context; i <= end.Line+Context; i++ {
 			if 1 <= i && i <= len(lines) {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go
new file mode 100644
index 0000000..6976f0d
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package appends defines an Analyzer that detects
+// if there is only one variable in append.
+package appends
+
+import (
+	_ "embed"
+	"go/ast"
+	"go/types"
+
+	"golang.org/x/tools/go/analysis"
+	"golang.org/x/tools/go/analysis/passes/inspect"
+	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/inspector"
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+//go:embed doc.go
+var doc string
+
+var Analyzer = &analysis.Analyzer{
+	Name:     "appends",
+	Doc:      analysisutil.MustExtractDoc(doc, "appends"),
+	URL:      "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends",
+	Requires: []*analysis.Analyzer{inspect.Analyzer},
+	Run:      run,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+	inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+	nodeFilter := []ast.Node{
+		(*ast.CallExpr)(nil),
+	}
+	inspect.Preorder(nodeFilter, func(n ast.Node) {
+		call := n.(*ast.CallExpr)
+		b, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Builtin)
+		if ok && b.Name() == "append" && len(call.Args) == 1 {
+			pass.ReportRangef(call, "append with no values")
+		}
+	})
+
+	return nil, nil
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go
new file mode 100644
index 0000000..2e6a2e0
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package appends defines an Analyzer that detects
+// if there is only one variable in append.
+//
+// # Analyzer appends
+//
+// appends: check for missing values after append
+//
+// This checker reports calls to append that pass
+// no values to be appended to the slice.
+//
+//	s := []string{"a", "b", "c"}
+//	_ = append(s)
+//
+// Such calls are always no-ops and often indicate an
+// underlying mistake.
+package appends
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
index 10489be..3bfd501 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
@@ -18,6 +18,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
 )
 
@@ -77,7 +78,7 @@
 
 // isMapIndex returns true if e is a map index expression.
 func isMapIndex(info *types.Info, e ast.Expr) bool {
-	if idx, ok := analysisutil.Unparen(e).(*ast.IndexExpr); ok {
+	if idx, ok := astutil.Unparen(e).(*ast.IndexExpr); ok {
 		if typ := info.Types[idx.X].Type; typ != nil {
 			_, ok := typ.Underlying().(*types.Map)
 			return ok
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go
index b40e081..931f9ca 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go
@@ -8,12 +8,12 @@
 	_ "embed"
 	"go/ast"
 	"go/token"
-	"go/types"
 
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
 	"golang.org/x/tools/go/ast/inspector"
+	"golang.org/x/tools/go/types/typeutil"
 )
 
 //go:embed doc.go
@@ -52,18 +52,8 @@
 			if !ok {
 				continue
 			}
-			sel, ok := call.Fun.(*ast.SelectorExpr)
-			if !ok {
-				continue
-			}
-			pkgIdent, _ := sel.X.(*ast.Ident)
-			pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName)
-			if !ok || pkgName.Imported().Path() != "sync/atomic" {
-				continue
-			}
-
-			switch sel.Sel.Name {
-			case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr":
+			fn := typeutil.StaticCallee(pass.TypesInfo, call)
+			if analysisutil.IsFunctionNamed(fn, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") {
 				checkAtomicAddAssignment(pass, n.Lhs[i], call)
 			}
 		}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go
index 4219f08..5643297 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go
@@ -14,6 +14,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
 )
 
@@ -83,7 +84,7 @@
 	i := 0
 	var sets [][]ast.Expr
 	for j := 0; j <= len(exprs); j++ {
-		if j == len(exprs) || hasSideEffects(info, exprs[j]) {
+		if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) {
 			if i < j {
 				sets = append(sets, exprs[i:j])
 			}
@@ -162,46 +163,13 @@
 	}
 }
 
-// hasSideEffects reports whether evaluation of e has side effects.
-func hasSideEffects(info *types.Info, e ast.Expr) bool {
-	safe := true
-	ast.Inspect(e, func(node ast.Node) bool {
-		switch n := node.(type) {
-		case *ast.CallExpr:
-			typVal := info.Types[n.Fun]
-			switch {
-			case typVal.IsType():
-				// Type conversion, which is safe.
-			case typVal.IsBuiltin():
-				// Builtin func, conservatively assumed to not
-				// be safe for now.
-				safe = false
-				return false
-			default:
-				// A non-builtin func or method call.
-				// Conservatively assume that all of them have
-				// side effects for now.
-				safe = false
-				return false
-			}
-		case *ast.UnaryExpr:
-			if n.Op == token.ARROW {
-				safe = false
-				return false
-			}
-		}
-		return true
-	})
-	return !safe
-}
-
 // split returns a slice of all subexpressions in e that are connected by op.
 // For example, given 'a || (b || c) || d' with the or op,
 // split returns []{d, c, b, a}.
 // seen[e] is already true; any newly processed exprs are added to seen.
 func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.Expr) {
 	for {
-		e = unparen(e)
+		e = astutil.Unparen(e)
 		if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok {
 			seen[b] = true
 			exprs = append(exprs, op.split(b.Y, seen)...)
@@ -213,14 +181,3 @@
 	}
 	return
 }
-
-// unparen returns e with any enclosing parentheses stripped.
-func unparen(e ast.Expr) ast.Expr {
-	for {
-		p, ok := e.(*ast.ParenExpr)
-		if !ok {
-			return e
-		}
-		e = p.X
-	}
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go
index a2a4a89..55bdad7 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go
@@ -40,7 +40,7 @@
 	}
 	for _, name := range pass.IgnoredFiles {
 		if strings.HasSuffix(name, ".go") {
-			f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments)
+			f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments|parser.SkipObjectResolution)
 			if err != nil {
 				// Not valid Go source code - not our job to diagnose, so ignore.
 				return nil, nil
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go
index c18b84b..4e86439 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go
@@ -19,6 +19,7 @@
 
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 )
 
 const debug = false
@@ -64,7 +65,7 @@
 
 		// Is this a C.f() call?
 		var name string
-		if sel, ok := analysisutil.Unparen(call.Fun).(*ast.SelectorExpr); ok {
+		if sel, ok := astutil.Unparen(call.Fun).(*ast.SelectorExpr); ok {
 			if id, ok := sel.X.(*ast.Ident); ok && id.Name == "C" {
 				name = sel.Sel.Name
 			}
@@ -180,7 +181,7 @@
 		// If f is a cgo-generated file, Position reports
 		// the original file, honoring //line directives.
 		filename := fset.Position(raw.Pos()).Filename
-		f, err := parser.ParseFile(fset, filename, nil, parser.Mode(0))
+		f, err := parser.ParseFile(fset, filename, nil, parser.SkipObjectResolution)
 		if err != nil {
 			return nil, nil, fmt.Errorf("can't parse raw cgo file: %v", err)
 		}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go
index 20fb708..847063b 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go
@@ -37,7 +37,7 @@
 var Analyzer = &analysis.Analyzer{
 	Name:             "composites",
 	Doc:              Doc,
-	URL:              "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/composites",
+	URL:              "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/composite",
 	Requires:         []*analysis.Analyzer{inspect.Analyzer},
 	RunDespiteErrors: true,
 	Run:              run,
@@ -72,7 +72,7 @@
 		}
 		var structuralTypes []types.Type
 		switch typ := typ.(type) {
-		case *typeparams.TypeParam:
+		case *types.TypeParam:
 			terms, err := typeparams.StructuralTerms(typ)
 			if err != nil {
 				return // invalid type
@@ -163,7 +163,7 @@
 	case *types.Named:
 		// names in package foo are local to foo_test too
 		return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test")
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test")
 	}
 	return false
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
index b3ca8ad..6cbbc7e 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
@@ -16,6 +16,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
 	"golang.org/x/tools/internal/typeparams"
 )
@@ -223,6 +224,8 @@
 }
 
 func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath {
+	x = astutil.Unparen(x) // ignore parens on rhs
+
 	if _, ok := x.(*ast.CompositeLit); ok {
 		return nil
 	}
@@ -231,7 +234,7 @@
 		return nil
 	}
 	if star, ok := x.(*ast.StarExpr); ok {
-		if _, ok := star.X.(*ast.CallExpr); ok {
+		if _, ok := astutil.Unparen(star.X).(*ast.CallExpr); ok {
 			// A call may return a pointer to a zero value.
 			return nil
 		}
@@ -242,29 +245,23 @@
 // lockPath returns a typePath describing the location of a lock value
 // contained in typ. If there is no contained lock, it returns nil.
 //
-// The seenTParams map is used to short-circuit infinite recursion via type
-// parameters.
-func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.TypeParam]bool) typePath {
-	if typ == nil {
+// The seen map is used to short-circuit infinite recursion due to type cycles.
+func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typePath {
+	if typ == nil || seen[typ] {
 		return nil
 	}
+	if seen == nil {
+		seen = make(map[types.Type]bool)
+	}
+	seen[typ] = true
 
-	if tpar, ok := typ.(*typeparams.TypeParam); ok {
-		if seenTParams == nil {
-			// Lazily allocate seenTParams, since the common case will not involve
-			// any type parameters.
-			seenTParams = make(map[*typeparams.TypeParam]bool)
-		}
-		if seenTParams[tpar] {
-			return nil
-		}
-		seenTParams[tpar] = true
+	if tpar, ok := typ.(*types.TypeParam); ok {
 		terms, err := typeparams.StructuralTerms(tpar)
 		if err != nil {
 			return nil // invalid type
 		}
 		for _, term := range terms {
-			subpath := lockPath(tpkg, term.Type(), seenTParams)
+			subpath := lockPath(tpkg, term.Type(), seen)
 			if len(subpath) > 0 {
 				if term.Tilde() {
 					// Prepend a tilde to our lock path entry to clarify the resulting
@@ -298,7 +295,7 @@
 	ttyp, ok := typ.Underlying().(*types.Tuple)
 	if ok {
 		for i := 0; i < ttyp.Len(); i++ {
-			subpath := lockPath(tpkg, ttyp.At(i).Type(), seenTParams)
+			subpath := lockPath(tpkg, ttyp.At(i).Type(), seen)
 			if subpath != nil {
 				return append(subpath, typ.String())
 			}
@@ -323,16 +320,14 @@
 	// In go1.10, sync.noCopy did not implement Locker.
 	// (The Unlock method was added only in CL 121876.)
 	// TODO(adonovan): remove workaround when we drop go1.10.
-	if named, ok := typ.(*types.Named); ok &&
-		named.Obj().Name() == "noCopy" &&
-		named.Obj().Pkg().Path() == "sync" {
+	if analysisutil.IsNamedType(typ, "sync", "noCopy") {
 		return []string{typ.String()}
 	}
 
 	nfields := styp.NumFields()
 	for i := 0; i < nfields; i++ {
 		ftyp := styp.Field(i).Type()
-		subpath := lockPath(tpkg, ftyp, seenTParams)
+		subpath := lockPath(tpkg, ftyp, seen)
 		if subpath != nil {
 			return append(subpath, typ.String())
 		}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go
new file mode 100644
index 0000000..5e8e80a
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go
@@ -0,0 +1,59 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package defers
+
+import (
+	_ "embed"
+	"go/ast"
+
+	"golang.org/x/tools/go/analysis"
+	"golang.org/x/tools/go/analysis/passes/inspect"
+	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/inspector"
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+//go:embed doc.go
+var doc string
+
+// Analyzer is the defers analyzer.
+var Analyzer = &analysis.Analyzer{
+	Name:     "defers",
+	Requires: []*analysis.Analyzer{inspect.Analyzer},
+	URL:      "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers",
+	Doc:      analysisutil.MustExtractDoc(doc, "defers"),
+	Run:      run,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+	if !analysisutil.Imports(pass.Pkg, "time") {
+		return nil, nil
+	}
+
+	checkDeferCall := func(node ast.Node) bool {
+		switch v := node.(type) {
+		case *ast.CallExpr:
+			if analysisutil.IsFunctionNamed(typeutil.StaticCallee(pass.TypesInfo, v), "time", "Since") {
+				pass.Reportf(v.Pos(), "call to time.Since is not deferred")
+			}
+		case *ast.FuncLit:
+			return false // prune
+		}
+		return true
+	}
+
+	inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+	nodeFilter := []ast.Node{
+		(*ast.DeferStmt)(nil),
+	}
+
+	inspect.Preorder(nodeFilter, func(n ast.Node) {
+		d := n.(*ast.DeferStmt)
+		ast.Inspect(d.Call, checkDeferCall)
+	})
+
+	return nil, nil
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go
new file mode 100644
index 0000000..bdb1351
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go
@@ -0,0 +1,25 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package defers defines an Analyzer that checks for common mistakes in defer
+// statements.
+//
+// # Analyzer defers
+//
+// defers: report common mistakes in defer statements
+//
+// The defers analyzer reports a diagnostic when a defer statement would
+// result in a non-deferred call to time.Since, as experience has shown
+// that this is nearly always a mistake.
+//
+// For example:
+//
+//	start := time.Now()
+//	...
+//	defer recordLatency(time.Since(start)) // error: call to time.Since is not deferred
+//
+// The correct code is:
+//
+//	defer func() { recordLatency(time.Since(start)) }()
+package defers
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go
index 1146d7b..2691f18 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go
@@ -124,7 +124,7 @@
 	for text != "" {
 		offset := len(fullText) - len(text)
 		var line string
-		line, text, _ = stringsCut(text, "\n")
+		line, text, _ = strings.Cut(text, "\n")
 
 		if !inStar && strings.HasPrefix(line, "//") {
 			check.comment(pos+token.Pos(offset), line)
@@ -137,7 +137,7 @@
 			line = strings.TrimSpace(line)
 			if inStar {
 				var ok bool
-				_, line, ok = stringsCut(line, "*/")
+				_, line, ok = strings.Cut(line, "*/")
 				if !ok {
 					break
 				}
@@ -200,14 +200,6 @@
 	}
 }
 
-// Go 1.18 strings.Cut.
-func stringsCut(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
-
 // Go 1.20 strings.CutPrefix.
 func stringsCutPrefix(s, prefix string) (after string, found bool) {
 	if !strings.HasPrefix(s, prefix) {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go
index 2fcbdfa..7f62ad4 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go
@@ -51,15 +51,12 @@
 	inspect.Preorder(nodeFilter, func(n ast.Node) {
 		call := n.(*ast.CallExpr)
 		fn := typeutil.StaticCallee(pass.TypesInfo, call)
-		if fn == nil {
-			return // not a static call
+		if !analysisutil.IsFunctionNamed(fn, "errors", "As") {
+			return
 		}
 		if len(call.Args) < 2 {
 			return // not enough arguments, e.g. called with return values of another function
 		}
-		if fn.FullName() != "errors.As" {
-			return
-		}
 		if err := checkAsTarget(pass, call.Args[1]); err != nil {
 			pass.ReportRangef(call, "%v", err)
 		}
@@ -69,9 +66,6 @@
 
 var errorType = types.Universe.Lookup("error").Type()
 
-// pointerToInterfaceOrError reports whether the type of e is a pointer to an interface or a type implementing error,
-// or is the empty interface.
-
 // checkAsTarget reports an error if the second argument to errors.As is invalid.
 func checkAsTarget(pass *analysis.Pass, e ast.Expr) error {
 	t := pass.TypesInfo.Types[e].Type
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
index 61c3b76..c6b6c81 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
@@ -116,7 +116,7 @@
 	if res.Len() != 2 {
 		return false // the function called does not return two values.
 	}
-	if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !isNamedType(ptr.Elem(), "net/http", "Response") {
+	if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !analysisutil.IsNamedType(ptr.Elem(), "net/http", "Response") {
 		return false // the first return type is not *http.Response.
 	}
 
@@ -131,11 +131,11 @@
 		return ok && id.Name == "http" // function in net/http package.
 	}
 
-	if isNamedType(typ, "net/http", "Client") {
+	if analysisutil.IsNamedType(typ, "net/http", "Client") {
 		return true // method on http.Client.
 	}
 	ptr, ok := typ.(*types.Pointer)
-	return ok && isNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client.
+	return ok && analysisutil.IsNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client.
 }
 
 // restOfBlock, given a traversal stack, finds the innermost containing
@@ -171,13 +171,3 @@
 		return nil
 	}
 }
-
-// isNamedType reports whether t is the named type path.name.
-func isNamedType(t types.Type, path, name string) bool {
-	n, ok := t.(*types.Named)
-	if !ok {
-		return false
-	}
-	obj := n.Obj()
-	return obj.Name() == name && obj.Pkg() != nil && obj.Pkg().Path() == path
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
index b84577f..12507f9 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
@@ -95,14 +95,14 @@
 		return w.isParameterized(t.Elem())
 
 	case *types.Named:
-		list := typeparams.NamedTypeArgs(t)
+		list := t.TypeArgs()
 		for i, n := 0, list.Len(); i < n; i++ {
 			if w.isParameterized(list.At(i)) {
 				return true
 			}
 		}
 
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		return true
 
 	default:
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/extractdoc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/extractdoc.go
deleted file mode 100644
index 0e175ca..0000000
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/extractdoc.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package analysisutil
-
-import (
-	"fmt"
-	"go/parser"
-	"go/token"
-	"strings"
-)
-
-// MustExtractDoc is like [ExtractDoc] but it panics on error.
-//
-// To use, define a doc.go file such as:
-//
-//	// Package halting defines an analyzer of program termination.
-//	//
-//	// # Analyzer halting
-//	//
-//	// halting: reports whether execution will halt.
-//	//
-//	// The halting analyzer reports a diagnostic for functions
-//	// that run forever. To suppress the diagnostics, try inserting
-//	// a 'break' statement into each loop.
-//	package halting
-//
-//	import _ "embed"
-//
-//	//go:embed doc.go
-//	var doc string
-//
-// And declare your analyzer as:
-//
-//	var Analyzer = &analysis.Analyzer{
-//		Name:             "halting",
-//		Doc:              analysisutil.MustExtractDoc(doc, "halting"),
-//		...
-//	}
-func MustExtractDoc(content, name string) string {
-	doc, err := ExtractDoc(content, name)
-	if err != nil {
-		panic(err)
-	}
-	return doc
-}
-
-// ExtractDoc extracts a section of a package doc comment from the
-// provided contents of an analyzer package's doc.go file.
-//
-// A section is a portion of the comment between one heading and
-// the next, using this form:
-//
-//	# Analyzer NAME
-//
-//	NAME: SUMMARY
-//
-//	Full description...
-//
-// where NAME matches the name argument, and SUMMARY is a brief
-// verb-phrase that describes the analyzer. The following lines, up
-// until the next heading or the end of the comment, contain the full
-// description. ExtractDoc returns the portion following the colon,
-// which is the form expected by Analyzer.Doc.
-//
-// Example:
-//
-//	# Analyzer printf
-//
-//	printf: checks consistency of calls to printf
-//
-//	The printf analyzer checks consistency of calls to printf.
-//	Here is the complete description...
-//
-// This notation allows a single doc comment to provide documentation
-// for multiple analyzers, each in its own section.
-// The HTML anchors generated for each heading are predictable.
-//
-// It returns an error if the content was not a valid Go source file
-// containing a package doc comment with a heading of the required
-// form.
-//
-// This machinery enables the package documentation (typically
-// accessible via the web at https://pkg.go.dev/) and the command
-// documentation (typically printed to a terminal) to be derived from
-// the same source and formatted appropriately.
-func ExtractDoc(content, name string) (string, error) {
-	if content == "" {
-		return "", fmt.Errorf("empty Go source file")
-	}
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "", content, parser.ParseComments|parser.PackageClauseOnly)
-	if err != nil {
-		return "", fmt.Errorf("not a Go source file")
-	}
-	if f.Doc == nil {
-		return "", fmt.Errorf("Go source file has no package doc comment")
-	}
-	for _, section := range strings.Split(f.Doc.Text(), "\n# ") {
-		if body := strings.TrimPrefix(section, "Analyzer "+name); body != section &&
-			body != "" &&
-			body[0] == '\r' || body[0] == '\n' {
-			body = strings.TrimSpace(body)
-			rest := strings.TrimPrefix(body, name+":")
-			if rest == body {
-				return "", fmt.Errorf("'Analyzer %s' heading not followed by '%s: summary...' line", name, name)
-			}
-			return strings.TrimSpace(rest), nil
-		}
-	}
-	return "", fmt.Errorf("package doc comment contains no 'Analyzer %s' heading", name)
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
index ac37e47..3f01b3b 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
@@ -12,7 +12,9 @@
 	"go/printer"
 	"go/token"
 	"go/types"
-	"io/ioutil"
+	"os"
+
+	"golang.org/x/tools/internal/analysisinternal"
 )
 
 // Format returns a string representation of the expression.
@@ -55,21 +57,10 @@
 	return !safe
 }
 
-// Unparen returns e with any enclosing parentheses stripped.
-func Unparen(e ast.Expr) ast.Expr {
-	for {
-		p, ok := e.(*ast.ParenExpr)
-		if !ok {
-			return e
-		}
-		e = p.X
-	}
-}
-
 // ReadFile reads a file and adds it to the FileSet
 // so that we can report errors against it using lineStart.
 func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) {
-	content, err := ioutil.ReadFile(filename)
+	content, err := os.ReadFile(filename)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -118,3 +109,48 @@
 	}
 	return false
 }
+
+// IsNamedType reports whether t is the named type with the given package path
+// and one of the given names.
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsNamedType(t types.Type, pkgPath string, names ...string) bool {
+	n, ok := t.(*types.Named)
+	if !ok {
+		return false
+	}
+	obj := n.Obj()
+	if obj == nil || obj.Pkg() == nil || obj.Pkg().Path() != pkgPath {
+		return false
+	}
+	name := obj.Name()
+	for _, n := range names {
+		if name == n {
+			return true
+		}
+	}
+	return false
+}
+
+// IsFunctionNamed reports whether f is a top-level function defined in the
+// given package and has one of the given names.
+// It returns false if f is nil or a method.
+func IsFunctionNamed(f *types.Func, pkgPath string, names ...string) bool {
+	if f == nil {
+		return false
+	}
+	if f.Pkg() == nil || f.Pkg().Path() != pkgPath {
+		return false
+	}
+	if f.Type().(*types.Signature).Recv() != nil {
+		return false
+	}
+	for _, n := range names {
+		if f.Name() == n {
+			return true
+		}
+	}
+	return false
+}
+
+var MustExtractDoc = analysisinternal.MustExtractDoc
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go
index dc544df..c95b1c1 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go
@@ -14,8 +14,12 @@
 // in such a way (e.g. with go or defer) that it may outlive the loop
 // iteration and possibly observe the wrong value of the variable.
 //
+// Note: An iteration variable can only outlive a loop iteration in Go versions <=1.21.
+// In Go 1.22 and later, the loop variable lifetimes changed to create a new
+// iteration variable per loop iteration. (See go.dev/issue/60078.)
+//
 // In this example, all the deferred functions run after the loop has
-// completed, so all observe the final value of v.
+// completed, so all observe the final value of v [<go1.22].
 //
 //	for _, v := range list {
 //	    defer func() {
@@ -32,7 +36,10 @@
 //	    }()
 //	}
 //
-// The next example uses a go statement and has a similar problem.
+// After Go version 1.22, the previous two for loops are equivalent
+// and both are correct.
+//
+// The next example uses a go statement and has a similar problem [<go1.22].
 // In addition, it has a data race because the loop updates v
 // concurrent with the goroutines accessing it.
 //
@@ -56,7 +63,7 @@
 //	}
 //
 // The t.Parallel() call causes the rest of the function to execute
-// concurrent with the loop.
+// concurrent with the loop [<go1.22].
 //
 // The analyzer reports references only in the last statement,
 // as it is not deep enough to understand the effects of subsequent
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
index 5620c35..4724c9f 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
@@ -14,6 +14,7 @@
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
 	"golang.org/x/tools/go/ast/inspector"
 	"golang.org/x/tools/go/types/typeutil"
+	"golang.org/x/tools/internal/versions"
 )
 
 //go:embed doc.go
@@ -31,10 +32,15 @@
 	inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
 
 	nodeFilter := []ast.Node{
+		(*ast.File)(nil),
 		(*ast.RangeStmt)(nil),
 		(*ast.ForStmt)(nil),
 	}
-	inspect.Preorder(nodeFilter, func(n ast.Node) {
+	inspect.Nodes(nodeFilter, func(n ast.Node, push bool) bool {
+		if !push {
+			// inspect.Nodes is slightly suboptimal as we only use push=true.
+			return true
+		}
 		// Find the variables updated by the loop statement.
 		var vars []types.Object
 		addVar := func(expr ast.Expr) {
@@ -46,6 +52,11 @@
 		}
 		var body *ast.BlockStmt
 		switch n := n.(type) {
+		case *ast.File:
+			// Only traverse the file if its goversion is strictly before go1.22.
+			goversion := versions.Lang(versions.FileVersions(pass.TypesInfo, n))
+			// goversion is empty for older go versions (or the version is invalid).
+			return goversion == "" || versions.Compare(goversion, "go1.22") < 0
 		case *ast.RangeStmt:
 			body = n.Body
 			addVar(n.Key)
@@ -64,7 +75,7 @@
 			}
 		}
 		if vars == nil {
-			return
+			return true
 		}
 
 		// Inspect statements to find function literals that may be run outside of
@@ -113,6 +124,7 @@
 				}
 			}
 		}
+		return true
 	})
 	return nil, nil
 }
@@ -359,20 +371,5 @@
 	if ptr, ok := recv.Type().(*types.Pointer); ok {
 		rtype = ptr.Elem()
 	}
-	named, ok := rtype.(*types.Named)
-	if !ok {
-		return false
-	}
-	if named.Obj().Name() != typeName {
-		return false
-	}
-	pkg := f.Pkg()
-	if pkg == nil {
-		return false
-	}
-	if pkg.Path() != pkgPath {
-		return false
-	}
-
-	return true
+	return analysisutil.IsNamedType(rtype, pkgPath, typeName)
 }
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
index 6df1343..778f7f1 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
@@ -62,7 +62,7 @@
 			obj = pass.TypesInfo.Uses[v]
 		case *ast.SelectorExpr:
 			obj = pass.TypesInfo.Uses[v.Sel]
-		case *ast.IndexExpr, *typeparams.IndexListExpr:
+		case *ast.IndexExpr, *ast.IndexListExpr:
 			// Check generic functions such as "f[T1,T2]".
 			x, _, _, _ := typeparams.UnpackIndexExpr(v)
 			if id, ok := x.(*ast.Ident); ok {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
index b2b8c67..070654f 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
@@ -511,15 +511,10 @@
 	sig := fn.Type().(*types.Signature)
 	return sig.Params().Len() == 2 &&
 		sig.Results().Len() == 0 &&
-		isNamed(sig.Params().At(0).Type(), "fmt", "State") &&
+		analysisutil.IsNamedType(sig.Params().At(0).Type(), "fmt", "State") &&
 		types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune])
 }
 
-func isNamed(T types.Type, pkgpath, name string) bool {
-	named, ok := T.(*types.Named)
-	return ok && named.Obj().Pkg().Path() == pkgpath && named.Obj().Name() == name
-}
-
 // formatState holds the parsed representation of a printf directive such as "%3.*[4]d".
 // It is constructed by parsePrintfVerb.
 type formatState struct {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go
index 7cbb0bd..ab98e56 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go
@@ -72,7 +72,7 @@
 		return true
 	}
 
-	if typ, _ := typ.(*typeparams.TypeParam); typ != nil {
+	if typ, _ := typ.(*types.TypeParam); typ != nil {
 		// Avoid infinite recursion through type parameters.
 		if m.seen[typ] {
 			return true
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
index bafb911..e272df7 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
@@ -99,7 +99,7 @@
 	}
 	var structuralTypes []types.Type
 	switch t := t.(type) {
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		terms, err := typeparams.StructuralTerms(t)
 		if err != nil {
 			return // invalid type
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
index 92c1da8..a1323c3 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
@@ -139,7 +139,7 @@
 }
 
 func isAttr(t types.Type) bool {
-	return isNamed(t, "log/slog", "Attr")
+	return analysisutil.IsNamedType(t, "log/slog", "Attr")
 }
 
 // shortName returns a name for the function that is shorter than FullName.
@@ -232,12 +232,3 @@
 	sel := info.Selections[s]
 	return sel != nil && sel.Kind() == types.MethodExpr
 }
-
-// isNamed reports whether t is exactly a named type in a package with a given path.
-func isNamed(t types.Type, path, name string) bool {
-	if n, ok := t.(*types.Named); ok {
-		obj := n.Obj()
-		return obj.Pkg() != nil && obj.Pkg().Path() == path && obj.Name() == name
-	}
-	return false
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
index bb04dae..b2591cc 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
@@ -195,7 +195,7 @@
 func structuralTypes(t types.Type) ([]types.Type, error) {
 	var structuralTypes []types.Type
 	switch t := t.(type) {
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		terms, err := typeparams.StructuralTerms(t)
 		if err != nil {
 			return nil, err
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go
index a68adb1..4cd5b71 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go
@@ -7,7 +7,7 @@
 //
 // # Analyzer testinggoroutine
 //
-// testinggoroutine: report calls to (*testing.T).Fatal from goroutines started by a test.
+// testinggoroutine: report calls to (*testing.T).Fatal from goroutines started by a test
 //
 // Functions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and
 // Skip{,f,Now} methods of *testing.T, must be called from the test goroutine itself.
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
index 907b715..dc5307a 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
@@ -6,18 +6,28 @@
 
 import (
 	_ "embed"
+	"fmt"
 	"go/ast"
+	"go/token"
+	"go/types"
 
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
-	"golang.org/x/tools/internal/typeparams"
+	"golang.org/x/tools/go/types/typeutil"
 )
 
 //go:embed doc.go
 var doc string
 
+var reportSubtest bool
+
+func init() {
+	Analyzer.Flags.BoolVar(&reportSubtest, "subtest", false, "whether to check if t.Run subtest is terminated correctly; experimental")
+}
+
 var Analyzer = &analysis.Analyzer{
 	Name:     "testinggoroutine",
 	Doc:      analysisutil.MustExtractDoc(doc, "testinggoroutine"),
@@ -26,15 +36,6 @@
 	Run:      run,
 }
 
-var forbidden = map[string]bool{
-	"FailNow": true,
-	"Fatal":   true,
-	"Fatalf":  true,
-	"Skip":    true,
-	"Skipf":   true,
-	"SkipNow": true,
-}
-
 func run(pass *analysis.Pass) (interface{}, error) {
 	inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
 
@@ -42,38 +43,90 @@
 		return nil, nil
 	}
 
-	// Filter out anything that isn't a function declaration.
-	onlyFuncs := []ast.Node{
-		(*ast.FuncDecl)(nil),
+	toDecl := localFunctionDecls(pass.TypesInfo, pass.Files)
+
+	// asyncs maps nodes whose statements will be executed concurrently
+	// with respect to some test function, to the call sites where they
+	// are invoked asynchronously. There may be multiple such call sites
+	// for e.g. test helpers.
+	asyncs := make(map[ast.Node][]*asyncCall)
+	var regions []ast.Node
+	addCall := func(c *asyncCall) {
+		if c != nil {
+			r := c.region
+			if asyncs[r] == nil {
+				regions = append(regions, r)
+			}
+			asyncs[r] = append(asyncs[r], c)
+		}
 	}
 
-	inspect.Nodes(onlyFuncs, func(node ast.Node, push bool) bool {
-		fnDecl, ok := node.(*ast.FuncDecl)
-		if !ok {
+	// Collect all of the go callee() and t.Run(name, callee) extents.
+	inspect.Nodes([]ast.Node{
+		(*ast.FuncDecl)(nil),
+		(*ast.GoStmt)(nil),
+		(*ast.CallExpr)(nil),
+	}, func(node ast.Node, push bool) bool {
+		if !push {
 			return false
 		}
+		switch node := node.(type) {
+		case *ast.FuncDecl:
+			return hasBenchmarkOrTestParams(node)
 
-		if !hasBenchmarkOrTestParams(fnDecl) {
-			return false
+		case *ast.GoStmt:
+			c := goAsyncCall(pass.TypesInfo, node, toDecl)
+			addCall(c)
+
+		case *ast.CallExpr:
+			c := tRunAsyncCall(pass.TypesInfo, node)
+			addCall(c)
 		}
+		return true
+	})
 
-		// Now traverse the benchmark/test's body and check that none of the
-		// forbidden methods are invoked in the goroutines within the body.
-		ast.Inspect(fnDecl, func(n ast.Node) bool {
-			goStmt, ok := n.(*ast.GoStmt)
+	// Check for t.Forbidden() calls within each region r that is a
+	// callee in some go r() or a t.Run("name", r).
+	//
+	// Also considers a special case when r is a go t.Forbidden() call.
+	for _, region := range regions {
+		ast.Inspect(region, func(n ast.Node) bool {
+			if n == region {
+				return true // always descend into the region itself.
+			} else if asyncs[n] != nil {
+				return false // will be visited by another region.
+			}
+
+			call, ok := n.(*ast.CallExpr)
 			if !ok {
 				return true
 			}
+			x, sel, fn := forbiddenMethod(pass.TypesInfo, call)
+			if x == nil {
+				return true
+			}
 
-			checkGoStmt(pass, goStmt)
+			for _, e := range asyncs[region] {
+				if !withinScope(e.scope, x) {
+					forbidden := formatMethod(sel, fn) // e.g. "(*testing.T).Forbidden
 
-			// No need to further traverse the GoStmt since right
-			// above we manually traversed it in the ast.Inspect(goStmt, ...)
-			return false
+					var context string
+					var where analysis.Range = e.async // Put the report at the go fun() or t.Run(name, fun).
+					if _, local := e.fun.(*ast.FuncLit); local {
+						where = call // Put the report at the t.Forbidden() call.
+					} else if id, ok := e.fun.(*ast.Ident); ok {
+						context = fmt.Sprintf(" (%s calls %s)", id.Name, forbidden)
+					}
+					if _, ok := e.async.(*ast.GoStmt); ok {
+						pass.ReportRangef(where, "call to %s from a non-test goroutine%s", forbidden, context)
+					} else if reportSubtest {
+						pass.ReportRangef(where, "call to %s on %s defined outside of the subtest%s", forbidden, x.Name(), context)
+					}
+				}
+			}
+			return true
 		})
-
-		return false
-	})
+	}
 
 	return nil, nil
 }
@@ -100,7 +153,6 @@
 	if !ok {
 		return "", false
 	}
-
 	varPkg := selExpr.X.(*ast.Ident)
 	if varPkg.Name != "testing" {
 		return "", false
@@ -111,73 +163,116 @@
 	return varTypeName, ok
 }
 
-// goStmtFunc returns the ast.Node of a call expression
-// that was invoked as a go statement. Currently, only
-// function literals declared in the same function, and
-// static calls within the same package are supported.
-func goStmtFun(goStmt *ast.GoStmt) ast.Node {
-	switch fun := goStmt.Call.Fun.(type) {
-	case *ast.IndexExpr, *typeparams.IndexListExpr:
-		x, _, _, _ := typeparams.UnpackIndexExpr(fun)
-		id, _ := x.(*ast.Ident)
-		if id == nil {
-			break
-		}
-		if id.Obj == nil {
-			break
-		}
-		if funDecl, ok := id.Obj.Decl.(ast.Node); ok {
-			return funDecl
-		}
-	case *ast.Ident:
-		// TODO(cuonglm): improve this once golang/go#48141 resolved.
-		if fun.Obj == nil {
-			break
-		}
-		if funDecl, ok := fun.Obj.Decl.(ast.Node); ok {
-			return funDecl
-		}
-	case *ast.FuncLit:
-		return goStmt.Call.Fun
-	}
-	return goStmt.Call
+// asyncCall describes a region of code that needs to be checked for
+// t.Forbidden() calls as it is started asynchronously from an async
+// node go fun() or t.Run(name, fun).
+type asyncCall struct {
+	region ast.Node // region of code to check for t.Forbidden() calls.
+	async  ast.Node // *ast.GoStmt or *ast.CallExpr (for t.Run)
+	scope  ast.Node // Report t.Forbidden() if t is not declared within scope.
+	fun    ast.Expr // fun in go fun() or t.Run(name, fun)
 }
 
-// checkGoStmt traverses the goroutine and checks for the
-// use of the forbidden *testing.(B, T) methods.
-func checkGoStmt(pass *analysis.Pass, goStmt *ast.GoStmt) {
-	fn := goStmtFun(goStmt)
-	// Otherwise examine the goroutine to check for the forbidden methods.
-	ast.Inspect(fn, func(n ast.Node) bool {
-		selExpr, ok := n.(*ast.SelectorExpr)
-		if !ok {
-			return true
-		}
+// withinScope returns true if x.Pos() is in [scope.Pos(), scope.End()].
+func withinScope(scope ast.Node, x *types.Var) bool {
+	if scope != nil {
+		return x.Pos() != token.NoPos && scope.Pos() <= x.Pos() && x.Pos() <= scope.End()
+	}
+	return false
+}
 
-		_, bad := forbidden[selExpr.Sel.Name]
-		if !bad {
-			return true
-		}
+// goAsyncCall returns the extent of a call from a go fun() statement.
+func goAsyncCall(info *types.Info, goStmt *ast.GoStmt, toDecl func(*types.Func) *ast.FuncDecl) *asyncCall {
+	call := goStmt.Call
 
-		// Now filter out false positives by the import-path/type.
-		ident, ok := selExpr.X.(*ast.Ident)
-		if !ok {
-			return true
+	fun := astutil.Unparen(call.Fun)
+	if id := funcIdent(fun); id != nil {
+		if lit := funcLitInScope(id); lit != nil {
+			return &asyncCall{region: lit, async: goStmt, scope: nil, fun: fun}
 		}
-		if ident.Obj == nil || ident.Obj.Decl == nil {
-			return true
+	}
+
+	if fn := typeutil.StaticCallee(info, call); fn != nil { // static call or method in the package?
+		if decl := toDecl(fn); decl != nil {
+			return &asyncCall{region: decl, async: goStmt, scope: nil, fun: fun}
 		}
-		field, ok := ident.Obj.Decl.(*ast.Field)
-		if !ok {
-			return true
+	}
+
+	// Check go statement for go t.Forbidden() or go func(){t.Forbidden()}().
+	return &asyncCall{region: goStmt, async: goStmt, scope: nil, fun: fun}
+}
+
+// tRunAsyncCall returns the extent of a call from a t.Run("name", fun) expression.
+func tRunAsyncCall(info *types.Info, call *ast.CallExpr) *asyncCall {
+	if len(call.Args) != 2 {
+		return nil
+	}
+	run := typeutil.Callee(info, call)
+	if run, ok := run.(*types.Func); !ok || !isMethodNamed(run, "testing", "Run") {
+		return nil
+	}
+
+	fun := astutil.Unparen(call.Args[1])
+	if lit, ok := fun.(*ast.FuncLit); ok { // function lit?
+		return &asyncCall{region: lit, async: call, scope: lit, fun: fun}
+	}
+
+	if id := funcIdent(fun); id != nil {
+		if lit := funcLitInScope(id); lit != nil { // function lit in variable?
+			return &asyncCall{region: lit, async: call, scope: lit, fun: fun}
 		}
-		if typeName, ok := typeIsTestingDotTOrB(field.Type); ok {
-			var fnRange analysis.Range = goStmt
-			if _, ok := fn.(*ast.FuncLit); ok {
-				fnRange = selExpr
-			}
-			pass.ReportRangef(fnRange, "call to (*%s).%s from a non-test goroutine", typeName, selExpr.Sel)
-		}
-		return true
-	})
+	}
+
+	// Check within t.Run(name, fun) for calls to t.Forbidden,
+	// e.g. t.Run(name, func(t *testing.T){ t.Forbidden() })
+	return &asyncCall{region: call, async: call, scope: fun, fun: fun}
+}
+
+var forbidden = []string{
+	"FailNow",
+	"Fatal",
+	"Fatalf",
+	"Skip",
+	"Skipf",
+	"SkipNow",
+}
+
+// forbiddenMethod decomposes a call x.m() into (x, x.m, m) where
+// x is a variable, x.m is a selection, and m is the static callee m.
+// Returns (nil, nil, nil) if call is not of this form.
+func forbiddenMethod(info *types.Info, call *ast.CallExpr) (*types.Var, *types.Selection, *types.Func) {
+	// Compare to typeutil.StaticCallee.
+	fun := astutil.Unparen(call.Fun)
+	selExpr, ok := fun.(*ast.SelectorExpr)
+	if !ok {
+		return nil, nil, nil
+	}
+	sel := info.Selections[selExpr]
+	if sel == nil {
+		return nil, nil, nil
+	}
+
+	var x *types.Var
+	if id, ok := astutil.Unparen(selExpr.X).(*ast.Ident); ok {
+		x, _ = info.Uses[id].(*types.Var)
+	}
+	if x == nil {
+		return nil, nil, nil
+	}
+
+	fn, _ := sel.Obj().(*types.Func)
+	if fn == nil || !isMethodNamed(fn, "testing", forbidden...) {
+		return nil, nil, nil
+	}
+	return x, sel, fn
+}
+
+func formatMethod(sel *types.Selection, fn *types.Func) string {
+	var ptr string
+	rtype := sel.Recv()
+	if p, ok := rtype.(*types.Pointer); ok {
+		ptr = "*"
+		rtype = p.Elem()
+	}
+	return fmt.Sprintf("(%s%s).%s", ptr, rtype.String(), fn.Name())
 }
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go
new file mode 100644
index 0000000..d156851
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go
@@ -0,0 +1,96 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testinggoroutine
+
+import (
+	"go/ast"
+	"go/types"
+
+	"golang.org/x/tools/go/ast/astutil"
+	"golang.org/x/tools/internal/typeparams"
+)
+
+// AST and types utilities that not specific to testinggoroutines.
+
+// localFunctionDecls returns a mapping from *types.Func to *ast.FuncDecl in files.
+func localFunctionDecls(info *types.Info, files []*ast.File) func(*types.Func) *ast.FuncDecl {
+	var fnDecls map[*types.Func]*ast.FuncDecl // computed lazily
+	return func(f *types.Func) *ast.FuncDecl {
+		if f != nil && fnDecls == nil {
+			fnDecls = make(map[*types.Func]*ast.FuncDecl)
+			for _, file := range files {
+				for _, decl := range file.Decls {
+					if fnDecl, ok := decl.(*ast.FuncDecl); ok {
+						if fn, ok := info.Defs[fnDecl.Name].(*types.Func); ok {
+							fnDecls[fn] = fnDecl
+						}
+					}
+				}
+			}
+		}
+		// TODO: once we only support go1.19+, set f = f.Origin() here.
+		return fnDecls[f]
+	}
+}
+
+// isMethodNamed returns true if f is a method defined
+// in package with the path pkgPath with a name in names.
+func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool {
+	if f == nil {
+		return false
+	}
+	if f.Pkg() == nil || f.Pkg().Path() != pkgPath {
+		return false
+	}
+	if f.Type().(*types.Signature).Recv() == nil {
+		return false
+	}
+	for _, n := range names {
+		if f.Name() == n {
+			return true
+		}
+	}
+	return false
+}
+
+func funcIdent(fun ast.Expr) *ast.Ident {
+	switch fun := astutil.Unparen(fun).(type) {
+	case *ast.IndexExpr, *ast.IndexListExpr:
+		x, _, _, _ := typeparams.UnpackIndexExpr(fun) // necessary?
+		id, _ := x.(*ast.Ident)
+		return id
+	case *ast.Ident:
+		return fun
+	default:
+		return nil
+	}
+}
+
+// funcLitInScope returns a FuncLit that id is at least initially assigned to.
+//
+// TODO: This is closely tied to id.Obj which is deprecated.
+func funcLitInScope(id *ast.Ident) *ast.FuncLit {
+	// Compare to (*ast.Object).Pos().
+	if id.Obj == nil {
+		return nil
+	}
+	var rhs ast.Expr
+	switch d := id.Obj.Decl.(type) {
+	case *ast.AssignStmt:
+		for i, x := range d.Lhs {
+			if ident, isIdent := x.(*ast.Ident); isIdent && ident.Name == id.Name && i < len(d.Rhs) {
+				rhs = d.Rhs[i]
+			}
+		}
+	case *ast.ValueSpec:
+		for i, n := range d.Names {
+			if n.Name == id.Name && i < len(d.Values) {
+				rhs = d.Values[i]
+			}
+		}
+	}
+	lit, _ := rhs.(*ast.FuncLit)
+	return lit
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
index 9589a46..6db12f3 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
@@ -17,7 +17,6 @@
 
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
-	"golang.org/x/tools/internal/typeparams"
 )
 
 //go:embed doc.go
@@ -257,13 +256,7 @@
 	if !ok {
 		return false
 	}
-	named, ok := ptr.Elem().(*types.Named)
-	if !ok {
-		return false
-	}
-	obj := named.Obj()
-	// obj.Pkg is nil for the error type.
-	return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == testingType
+	return analysisutil.IsNamedType(ptr.Elem(), "testing", testingType)
 }
 
 // Validate that fuzz target function's arguments are of accepted types.
@@ -397,7 +390,7 @@
 	if results := fn.Type.Results; results != nil && len(results.List) != 0 {
 		pass.Reportf(fn.Pos(), "%s should return nothing", fnName)
 	}
-	if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
+	if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 {
 		pass.Reportf(fn.Pos(), "%s should not have type params", fnName)
 	}
 
@@ -466,7 +459,7 @@
 		return
 	}
 
-	if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
+	if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 {
 		// Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters.
 		// We have currently decided to also warn before compilation/package loading. This can help users in IDEs.
 		// TODO(adonovan): use ReportRangef(tparams).
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
index c45b9fa..eb84502 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
@@ -88,29 +88,16 @@
 }
 
 func isTimeDotFormat(f *types.Func) bool {
-	if f.Name() != "Format" || f.Pkg().Path() != "time" {
-		return false
-	}
-	sig, ok := f.Type().(*types.Signature)
-	if !ok {
+	if f.Name() != "Format" || f.Pkg() == nil || f.Pkg().Path() != "time" {
 		return false
 	}
 	// Verify that the receiver is time.Time.
-	recv := sig.Recv()
-	if recv == nil {
-		return false
-	}
-	named, ok := recv.Type().(*types.Named)
-	return ok && named.Obj().Name() == "Time"
+	recv := f.Type().(*types.Signature).Recv()
+	return recv != nil && analysisutil.IsNamedType(recv.Type(), "time", "Time")
 }
 
 func isTimeDotParse(f *types.Func) bool {
-	if f.Name() != "Parse" || f.Pkg().Path() != "time" {
-		return false
-	}
-	// Verify that there is no receiver.
-	sig, ok := f.Type().(*types.Signature)
-	return ok && sig.Recv() == nil
+	return analysisutil.IsFunctionNamed(f, "time", "Parse")
 }
 
 // badFormatAt return the start of a bad format in e or -1 if no bad format is found.
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
index 7043baa..f4e7352 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
@@ -14,7 +14,6 @@
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
 	"golang.org/x/tools/go/ast/inspector"
 	"golang.org/x/tools/go/types/typeutil"
-	"golang.org/x/tools/internal/typeparams"
 )
 
 //go:embed doc.go
@@ -92,7 +91,7 @@
 
 		t := pass.TypesInfo.Types[call.Args[argidx]].Type
 		switch t.Underlying().(type) {
-		case *types.Pointer, *types.Interface, *typeparams.TypeParam:
+		case *types.Pointer, *types.Interface, *types.TypeParam:
 			return
 		}
 
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
index e43ac20..32e71ef 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
@@ -15,6 +15,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
 )
 
@@ -68,7 +69,7 @@
 	// Check unsafe.Pointer safety rules according to
 	// https://golang.org/pkg/unsafe/#Pointer.
 
-	switch x := analysisutil.Unparen(x).(type) {
+	switch x := astutil.Unparen(x).(type) {
 	case *ast.SelectorExpr:
 		// "(6) Conversion of a reflect.SliceHeader or
 		// reflect.StringHeader Data field to or from Pointer."
@@ -104,8 +105,7 @@
 		}
 		switch sel.Sel.Name {
 		case "Pointer", "UnsafeAddr":
-			t, ok := info.Types[sel.X].Type.(*types.Named)
-			if ok && t.Obj().Pkg().Path() == "reflect" && t.Obj().Name() == "Value" {
+			if analysisutil.IsNamedType(info.Types[sel.X].Type, "reflect", "Value") {
 				return true
 			}
 		}
@@ -118,7 +118,7 @@
 // isSafeArith reports whether x is a pointer arithmetic expression that is safe
 // to convert to unsafe.Pointer.
 func isSafeArith(info *types.Info, x ast.Expr) bool {
-	switch x := analysisutil.Unparen(x).(type) {
+	switch x := astutil.Unparen(x).(type) {
 	case *ast.CallExpr:
 		// Base case: initial conversion from unsafe.Pointer to uintptr.
 		return len(x.Args) == 1 &&
@@ -153,13 +153,5 @@
 
 // isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader.
 func isReflectHeader(t types.Type) bool {
-	if named, ok := t.(*types.Named); ok {
-		if obj := named.Obj(); obj.Pkg() != nil && obj.Pkg().Path() == "reflect" {
-			switch obj.Name() {
-			case "SliceHeader", "StringHeader":
-				return true
-			}
-		}
-	}
-	return false
+	return analysisutil.IsNamedType(t, "reflect", "SliceHeader", "StringHeader")
 }
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
index cb487a2..7f79b4a 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
@@ -24,6 +24,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/passes/inspect"
 	"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
 	"golang.org/x/tools/go/types/typeutil"
 )
@@ -82,7 +83,7 @@
 		(*ast.ExprStmt)(nil),
 	}
 	inspect.Preorder(nodeFilter, func(n ast.Node) {
-		call, ok := analysisutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr)
+		call, ok := astutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr)
 		if !ok {
 			return // not a call statement
 		}
@@ -92,7 +93,6 @@
 		if !ok {
 			return // e.g. var or builtin
 		}
-
 		if sig := fn.Type().(*types.Signature); sig.Recv() != nil {
 			// method (e.g. foo.String())
 			if types.Identical(sig, sigNoArgsStringResult) {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
index 10c76bc..1fa0d1f 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
@@ -38,7 +38,6 @@
 	"go/token"
 	"go/types"
 	"io"
-	"io/ioutil"
 	"log"
 	"os"
 	"path/filepath"
@@ -51,7 +50,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/analysis/internal/analysisflags"
 	"golang.org/x/tools/internal/facts"
-	"golang.org/x/tools/internal/typeparams"
+	"golang.org/x/tools/internal/versions"
 )
 
 // A Config describes a compilation unit to be analyzed.
@@ -59,19 +58,19 @@
 // whose name ends with ".cfg".
 type Config struct {
 	ID                        string // e.g. "fmt [fmt.test]"
-	Compiler                  string
-	Dir                       string
-	ImportPath                string
+	Compiler                  string // gc or gccgo, provided to MakeImporter
+	Dir                       string // (unused)
+	ImportPath                string // package path
 	GoVersion                 string // minimum required Go version, such as "go1.21.0"
 	GoFiles                   []string
 	NonGoFiles                []string
 	IgnoredFiles              []string
-	ImportMap                 map[string]string
-	PackageFile               map[string]string
-	Standard                  map[string]bool
-	PackageVetx               map[string]string
-	VetxOnly                  bool
-	VetxOutput                string
+	ImportMap                 map[string]string // maps import path to package path
+	PackageFile               map[string]string // maps package path to file of type information
+	Standard                  map[string]bool   // package belongs to standard library
+	PackageVetx               map[string]string // maps package path to file of fact information
+	VetxOnly                  bool              // run analysis only for facts, not diagnostics
+	VetxOutput                string            // where to write file of fact information
 	SucceedOnTypecheckFailure bool
 }
 
@@ -167,7 +166,7 @@
 }
 
 func readConfig(filename string) (*Config, error) {
-	data, err := ioutil.ReadFile(filename)
+	data, err := os.ReadFile(filename)
 	if err != nil {
 		return nil, err
 	}
@@ -184,6 +183,56 @@
 	return cfg, nil
 }
 
+type factImporter = func(pkgPath string) ([]byte, error)
+
+// These four hook variables are a proof of concept of a future
+// parameterization of a unitchecker API that allows the client to
+// determine how and where facts and types are produced and consumed.
+// (Note that the eventual API will likely be quite different.)
+//
+// The defaults honor a Config in a manner compatible with 'go vet'.
+var (
+	makeTypesImporter = func(cfg *Config, fset *token.FileSet) types.Importer {
+		compilerImporter := importer.ForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) {
+			// path is a resolved package path, not an import path.
+			file, ok := cfg.PackageFile[path]
+			if !ok {
+				if cfg.Compiler == "gccgo" && cfg.Standard[path] {
+					return nil, nil // fall back to default gccgo lookup
+				}
+				return nil, fmt.Errorf("no package file for %q", path)
+			}
+			return os.Open(file)
+		})
+		return importerFunc(func(importPath string) (*types.Package, error) {
+			path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc
+			if !ok {
+				return nil, fmt.Errorf("can't resolve import %q", path)
+			}
+			return compilerImporter.Import(path)
+		})
+	}
+
+	exportTypes = func(*Config, *token.FileSet, *types.Package) error {
+		// By default this is a no-op, because "go vet"
+		// makes the compiler produce type information.
+		return nil
+	}
+
+	makeFactImporter = func(cfg *Config) factImporter {
+		return func(pkgPath string) ([]byte, error) {
+			if vetx, ok := cfg.PackageVetx[pkgPath]; ok {
+				return os.ReadFile(vetx)
+			}
+			return nil, nil // no .vetx file, no facts
+		}
+	}
+
+	exportFacts = func(cfg *Config, data []byte) error {
+		return os.WriteFile(cfg.VetxOutput, data, 0666)
+	}
+)
+
 func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]result, error) {
 	// Load, parse, typecheck.
 	var files []*ast.File
@@ -199,27 +248,9 @@
 		}
 		files = append(files, f)
 	}
-	compilerImporter := importer.ForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) {
-		// path is a resolved package path, not an import path.
-		file, ok := cfg.PackageFile[path]
-		if !ok {
-			if cfg.Compiler == "gccgo" && cfg.Standard[path] {
-				return nil, nil // fall back to default gccgo lookup
-			}
-			return nil, fmt.Errorf("no package file for %q", path)
-		}
-		return os.Open(file)
-	})
-	importer := importerFunc(func(importPath string) (*types.Package, error) {
-		path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc
-		if !ok {
-			return nil, fmt.Errorf("can't resolve import %q", path)
-		}
-		return compilerImporter.Import(path)
-	})
 	tc := &types.Config{
-		Importer:  importer,
-		Sizes:     types.SizesFor("gc", build.Default.GOARCH), // assume gccgo ≡ gc?
+		Importer:  makeTypesImporter(cfg, fset),
+		Sizes:     types.SizesFor("gc", build.Default.GOARCH), // TODO(adonovan): use cfg.Compiler
 		GoVersion: cfg.GoVersion,
 	}
 	info := &types.Info{
@@ -227,10 +258,11 @@
 		Defs:       make(map[*ast.Ident]types.Object),
 		Uses:       make(map[*ast.Ident]types.Object),
 		Implicits:  make(map[ast.Node]types.Object),
+		Instances:  make(map[*ast.Ident]types.Instance),
 		Scopes:     make(map[ast.Node]*types.Scope),
 		Selections: make(map[*ast.SelectorExpr]*types.Selection),
 	}
-	typeparams.InitInstanceInfo(info)
+	versions.InitFileVersions(info)
 
 	pkg, err := tc.Check(cfg.ImportPath, fset, files, info)
 	if err != nil {
@@ -288,13 +320,7 @@
 	analyzers = filtered
 
 	// Read facts from imported packages.
-	read := func(imp *types.Package) ([]byte, error) {
-		if vetx, ok := cfg.PackageVetx[imp.Path()]; ok {
-			return ioutil.ReadFile(vetx)
-		}
-		return nil, nil // no .vetx file, no facts
-	}
-	facts, err := facts.NewDecoder(pkg).Decode(read)
+	facts, err := facts.NewDecoder(pkg).Decode(makeFactImporter(cfg))
 	if err != nil {
 		return nil, err
 	}
@@ -394,8 +420,11 @@
 	}
 
 	data := facts.Encode()
-	if err := ioutil.WriteFile(cfg.VetxOutput, data, 0666); err != nil {
-		return nil, fmt.Errorf("failed to write analysis facts: %v", err)
+	if err := exportFacts(cfg, data); err != nil {
+		return nil, fmt.Errorf("failed to export analysis facts: %v", err)
+	}
+	if err := exportTypes(cfg, fset, pkg); err != nil {
+		return nil, fmt.Errorf("failed to export type information: %v", err)
 	}
 
 	return results, nil
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/validate.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/validate.go
index 9da5692..4f2c404 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/validate.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/validate.go
@@ -19,6 +19,8 @@
 // that the Requires graph is acyclic;
 // that analyzer fact types are unique;
 // that each fact type is a pointer.
+//
+// Analyzer names need not be unique, though this may be confusing.
 func Validate(analyzers []*Analyzer) error {
 	// Map each fact type to its sole generating analyzer.
 	factTypes := make(map[reflect.Type]*Analyzer)
diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
index 9fa5aa1..2c4c4e2 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -11,8 +11,6 @@
 	"go/ast"
 	"go/token"
 	"sort"
-
-	"golang.org/x/tools/internal/typeparams"
 )
 
 // PathEnclosingInterval returns the node that encloses the source
@@ -322,7 +320,7 @@
 			children = append(children, n.Recv)
 		}
 		children = append(children, n.Name)
-		if tparams := typeparams.ForFuncType(n.Type); tparams != nil {
+		if tparams := n.Type.TypeParams; tparams != nil {
 			children = append(children, tparams)
 		}
 		if n.Type.Params != nil {
@@ -377,7 +375,7 @@
 			tok(n.Lbrack, len("[")),
 			tok(n.Rbrack, len("]")))
 
-	case *typeparams.IndexListExpr:
+	case *ast.IndexListExpr:
 		children = append(children,
 			tok(n.Lbrack, len("[")),
 			tok(n.Rbrack, len("]")))
@@ -588,7 +586,7 @@
 		return "decrement statement"
 	case *ast.IndexExpr:
 		return "index expression"
-	case *typeparams.IndexListExpr:
+	case *ast.IndexListExpr:
 		return "index list expression"
 	case *ast.InterfaceType:
 		return "interface type"
diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
index f430b21..58934f7 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -9,8 +9,6 @@
 	"go/ast"
 	"reflect"
 	"sort"
-
-	"golang.org/x/tools/internal/typeparams"
 )
 
 // An ApplyFunc is invoked by Apply for each node n, even if n is nil,
@@ -252,7 +250,7 @@
 		a.apply(n, "X", nil, n.X)
 		a.apply(n, "Index", nil, n.Index)
 
-	case *typeparams.IndexListExpr:
+	case *ast.IndexListExpr:
 		a.apply(n, "X", nil, n.X)
 		a.applyList(n, "Indices")
 
@@ -293,7 +291,7 @@
 		a.apply(n, "Fields", nil, n.Fields)
 
 	case *ast.FuncType:
-		if tparams := typeparams.ForFuncType(n); tparams != nil {
+		if tparams := n.TypeParams; tparams != nil {
 			a.apply(n, "TypeParams", nil, tparams)
 		}
 		a.apply(n, "Params", nil, n.Params)
@@ -408,7 +406,7 @@
 	case *ast.TypeSpec:
 		a.apply(n, "Doc", nil, n.Doc)
 		a.apply(n, "Name", nil, n.Name)
-		if tparams := typeparams.ForTypeSpec(n); tparams != nil {
+		if tparams := n.TypeParams; tparams != nil {
 			a.apply(n, "TypeParams", nil, tparams)
 		}
 		a.apply(n, "Type", nil, n.Type)
diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index 703c813..2a872f8 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -12,8 +12,6 @@
 import (
 	"go/ast"
 	"math"
-
-	"golang.org/x/tools/internal/typeparams"
 )
 
 const (
@@ -171,7 +169,7 @@
 		return 1 << nIncDecStmt
 	case *ast.IndexExpr:
 		return 1 << nIndexExpr
-	case *typeparams.IndexListExpr:
+	case *ast.IndexListExpr:
 		return 1 << nIndexListExpr
 	case *ast.InterfaceType:
 		return 1 << nInterfaceType
diff --git a/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index f2ce77f..11d5c8c 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -26,7 +26,6 @@
 import (
 	"fmt"
 	"go/types"
-	"sort"
 	"strconv"
 	"strings"
 
@@ -121,8 +120,7 @@
 // An Encoder amortizes the cost of encoding the paths of multiple objects.
 // The zero value of an Encoder is ready to use.
 type Encoder struct {
-	scopeMemo        map[*types.Scope][]types.Object // memoization of scopeObjects
-	namedMethodsMemo map[*types.Named][]*types.Func  // memoization of namedMethods()
+	scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects
 }
 
 // For returns the path to an object relative to its package,
@@ -225,7 +223,7 @@
 	//    Reject obviously non-viable cases.
 	switch obj := obj.(type) {
 	case *types.TypeName:
-		if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
+		if _, ok := obj.Type().(*types.TypeParam); !ok {
 			// With the exception of type parameters, only package-level type names
 			// have a path.
 			return "", fmt.Errorf("no path for %v", obj)
@@ -285,7 +283,7 @@
 			}
 		} else {
 			if named, _ := T.(*types.Named); named != nil {
-				if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil {
+				if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil {
 					// generic named type
 					return Path(r), nil
 				}
@@ -314,10 +312,12 @@
 		// Inspect declared methods of defined types.
 		if T, ok := o.Type().(*types.Named); ok {
 			path = append(path, opType)
-			// Note that method index here is always with respect
-			// to canonical ordering of methods, regardless of how
-			// they appear in the underlying type.
-			for i, m := range enc.namedMethods(T) {
+			// The method index here is always with respect
+			// to the underlying go/types data structures,
+			// which ultimately derives from source order
+			// and must be preserved by export data.
+			for i := 0; i < T.NumMethods(); i++ {
+				m := T.Method(i)
 				path2 := appendOpArg(path, opMethod, i)
 				if m == obj {
 					return Path(path2), nil // found declared method
@@ -418,8 +418,12 @@
 	path := make([]byte, 0, len(name)+8)
 	path = append(path, name...)
 	path = append(path, opType)
-	for i, m := range enc.namedMethods(named) {
-		if m == meth {
+
+	// Method indices are w.r.t. the go/types data structures,
+	// ultimately deriving from source order,
+	// which is preserved by export data.
+	for i := 0; i < named.NumMethods(); i++ {
+		if named.Method(i) == meth {
 			path = appendOpArg(path, opMethod, i)
 			return Path(path), true
 		}
@@ -458,7 +462,7 @@
 		}
 		return find(obj, T.Elem(), append(path, opElem), seen)
 	case *types.Signature:
-		if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil {
+		if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil {
 			return r
 		}
 		if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
@@ -501,7 +505,7 @@
 			}
 		}
 		return nil
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		name := T.Obj()
 		if name == obj {
 			return append(path, opObj)
@@ -521,7 +525,7 @@
 	panic(T)
 }
 
-func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
+func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
 	for i := 0; i < list.Len(); i++ {
 		tparam := list.At(i)
 		path2 := appendOpArg(path, opTypeParam, i)
@@ -534,11 +538,11 @@
 
 // Object returns the object denoted by path p within the package pkg.
 func Object(pkg *types.Package, p Path) (types.Object, error) {
-	if p == "" {
+	pathstr := string(p)
+	if pathstr == "" {
 		return nil, fmt.Errorf("empty path")
 	}
 
-	pathstr := string(p)
 	var pkgobj, suffix string
 	if dot := strings.IndexByte(pathstr, opType); dot < 0 {
 		pkgobj = pathstr
@@ -558,7 +562,7 @@
 	}
 	// abstraction of *types.{Named,Signature}
 	type hasTypeParams interface {
-		TypeParams() *typeparams.TypeParamList
+		TypeParams() *types.TypeParamList
 	}
 	// abstraction of *types.{Named,TypeParam}
 	type hasObj interface {
@@ -660,7 +664,7 @@
 			t = tparams.At(index)
 
 		case opConstraint:
-			tparam, ok := t.(*typeparams.TypeParam)
+			tparam, ok := t.(*types.TypeParam)
 			if !ok {
 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
 			}
@@ -697,11 +701,10 @@
 				obj = t.Method(index) // Id-ordered
 
 			case *types.Named:
-				methods := namedMethods(t) // (unmemoized)
-				if index >= len(methods) {
-					return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
+				if index >= t.NumMethods() {
+					return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
 				}
-				obj = methods[index] // Id-ordered
+				obj = t.Method(index)
 
 			default:
 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
@@ -728,33 +731,6 @@
 	return obj, nil // success
 }
 
-// namedMethods returns the methods of a Named type in ascending Id order.
-func namedMethods(named *types.Named) []*types.Func {
-	methods := make([]*types.Func, named.NumMethods())
-	for i := range methods {
-		methods[i] = named.Method(i)
-	}
-	sort.Slice(methods, func(i, j int) bool {
-		return methods[i].Id() < methods[j].Id()
-	})
-	return methods
-}
-
-// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
-func (enc *Encoder) namedMethods(named *types.Named) []*types.Func {
-	m := enc.namedMethodsMemo
-	if m == nil {
-		m = make(map[*types.Named][]*types.Func)
-		enc.namedMethodsMemo = m
-	}
-	methods, ok := m[named]
-	if !ok {
-		methods = namedMethods(named) // allocates and sorts
-		m[named] = methods
-	}
-	return methods
-}
-
 // scopeObjects is a memoization of scope objects.
 // Callers must not modify the result.
 func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/callee.go
index 90b3ab0..90dc541 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/callee.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -22,7 +22,7 @@
 	// Look through type instantiation if necessary.
 	isInstance := false
 	switch fun.(type) {
-	case *ast.IndexExpr, *typeparams.IndexListExpr:
+	case *ast.IndexExpr, *ast.IndexListExpr:
 		// When extracting the callee from an *IndexExpr, we need to check that
 		// it is a *types.Func and not a *types.Var.
 		// Example: Don't match a slice m within the expression `m[0]()`.
diff --git a/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go b/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go
index 7bd2fdb..544246d 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -219,7 +219,7 @@
 	// generic types or functions, and instantiated signatures do not have type
 	// parameter lists, we should never encounter a second non-empty type
 	// parameter list when hashing a generic signature.
-	sigTParams *typeparams.TypeParamList
+	sigTParams *types.TypeParamList
 }
 
 // MakeHasher returns a new Hasher instance.
@@ -297,7 +297,7 @@
 		// We should never encounter a generic signature while hashing another
 		// generic signature, but defensively set sigTParams only if h.mask is
 		// unset.
-		tparams := typeparams.ForSignature(t)
+		tparams := t.TypeParams()
 		if h.sigTParams == nil && tparams.Len() != 0 {
 			h = Hasher{
 				// There may be something more efficient than discarding the existing
@@ -318,7 +318,7 @@
 
 		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
 
-	case *typeparams.Union:
+	case *types.Union:
 		return h.hashUnion(t)
 
 	case *types.Interface:
@@ -354,14 +354,14 @@
 
 	case *types.Named:
 		hash := h.hashPtr(t.Obj())
-		targs := typeparams.NamedTypeArgs(t)
+		targs := t.TypeArgs()
 		for i := 0; i < targs.Len(); i++ {
 			targ := targs.At(i)
 			hash += 2 * h.Hash(targ)
 		}
 		return hash
 
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		return h.hashTypeParam(t)
 
 	case *types.Tuple:
@@ -381,7 +381,7 @@
 	return hash
 }
 
-func (h Hasher) hashUnion(t *typeparams.Union) uint32 {
+func (h Hasher) hashUnion(t *types.Union) uint32 {
 	// Hash type restrictions.
 	terms, err := typeparams.UnionTermSet(t)
 	// if err != nil t has invalid type restrictions. Fall back on a non-zero
@@ -392,7 +392,7 @@
 	return h.hashTermSet(terms)
 }
 
-func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 {
+func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
 	hash := 9157 + 2*uint32(len(terms))
 	for _, term := range terms {
 		// term order is not significant.
@@ -416,7 +416,7 @@
 // are not identical.
 //
 // Otherwise the hash of t depends only on t's pointer identity.
-func (h Hasher) hashTypeParam(t *typeparams.TypeParam) uint32 {
+func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
 	if h.sigTParams != nil {
 		i := t.Index()
 		if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
@@ -489,7 +489,7 @@
 	case *types.Pointer:
 		return 4393139
 
-	case *typeparams.Union:
+	case *types.Union:
 		return 562448657
 
 	case *types.Interface:
@@ -504,7 +504,7 @@
 	case *types.Named:
 		return h.hashPtr(t.Obj())
 
-	case *typeparams.TypeParam:
+	case *types.TypeParam:
 		return h.hashPtr(t.Obj())
 	}
 	panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
new file mode 100644
index 0000000..2b29168
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
@@ -0,0 +1,386 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package analysisinternal provides gopls' internal analyses with a
+// number of helper functions that operate on typed syntax trees.
+package analysisinternal
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"strconv"
+)
+
+func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
+	// Get the end position for the type error.
+	offset, end := fset.PositionFor(start, false).Offset, start
+	if offset >= len(src) {
+		return end
+	}
+	if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 {
+		end = start + token.Pos(width)
+	}
+	return end
+}
+
+func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	under := typ
+	if n, ok := typ.(*types.Named); ok {
+		under = n.Underlying()
+	}
+	switch u := under.(type) {
+	case *types.Basic:
+		switch {
+		case u.Info()&types.IsNumeric != 0:
+			return &ast.BasicLit{Kind: token.INT, Value: "0"}
+		case u.Info()&types.IsBoolean != 0:
+			return &ast.Ident{Name: "false"}
+		case u.Info()&types.IsString != 0:
+			return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+		default:
+			panic(fmt.Sprintf("unknown basic type %v", u))
+		}
+	case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array:
+		return ast.NewIdent("nil")
+	case *types.Struct:
+		texpr := TypeExpr(f, pkg, typ) // typ because we want the name here.
+		if texpr == nil {
+			return nil
+		}
+		return &ast.CompositeLit{
+			Type: texpr,
+		}
+	}
+	return nil
+}
+
+// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of
+// analysisinternal.ZeroValue)
+func IsZeroValue(expr ast.Expr) bool {
+	switch e := expr.(type) {
+	case *ast.BasicLit:
+		return e.Value == "0" || e.Value == `""`
+	case *ast.Ident:
+		return e.Name == "nil" || e.Name == "false"
+	default:
+		return false
+	}
+}
+
+// TypeExpr returns syntax for the specified type. References to
+// named types from packages other than pkg are qualified by an appropriate
+// package name, as defined by the import environment of file.
+func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	switch t := typ.(type) {
+	case *types.Basic:
+		switch t.Kind() {
+		case types.UnsafePointer:
+			return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
+		default:
+			return ast.NewIdent(t.Name())
+		}
+	case *types.Pointer:
+		x := TypeExpr(f, pkg, t.Elem())
+		if x == nil {
+			return nil
+		}
+		return &ast.UnaryExpr{
+			Op: token.MUL,
+			X:  x,
+		}
+	case *types.Array:
+		elt := TypeExpr(f, pkg, t.Elem())
+		if elt == nil {
+			return nil
+		}
+		return &ast.ArrayType{
+			Len: &ast.BasicLit{
+				Kind:  token.INT,
+				Value: fmt.Sprintf("%d", t.Len()),
+			},
+			Elt: elt,
+		}
+	case *types.Slice:
+		elt := TypeExpr(f, pkg, t.Elem())
+		if elt == nil {
+			return nil
+		}
+		return &ast.ArrayType{
+			Elt: elt,
+		}
+	case *types.Map:
+		key := TypeExpr(f, pkg, t.Key())
+		value := TypeExpr(f, pkg, t.Elem())
+		if key == nil || value == nil {
+			return nil
+		}
+		return &ast.MapType{
+			Key:   key,
+			Value: value,
+		}
+	case *types.Chan:
+		dir := ast.ChanDir(t.Dir())
+		if t.Dir() == types.SendRecv {
+			dir = ast.SEND | ast.RECV
+		}
+		value := TypeExpr(f, pkg, t.Elem())
+		if value == nil {
+			return nil
+		}
+		return &ast.ChanType{
+			Dir:   dir,
+			Value: value,
+		}
+	case *types.Signature:
+		var params []*ast.Field
+		for i := 0; i < t.Params().Len(); i++ {
+			p := TypeExpr(f, pkg, t.Params().At(i).Type())
+			if p == nil {
+				return nil
+			}
+			params = append(params, &ast.Field{
+				Type: p,
+				Names: []*ast.Ident{
+					{
+						Name: t.Params().At(i).Name(),
+					},
+				},
+			})
+		}
+		var returns []*ast.Field
+		for i := 0; i < t.Results().Len(); i++ {
+			r := TypeExpr(f, pkg, t.Results().At(i).Type())
+			if r == nil {
+				return nil
+			}
+			returns = append(returns, &ast.Field{
+				Type: r,
+			})
+		}
+		return &ast.FuncType{
+			Params: &ast.FieldList{
+				List: params,
+			},
+			Results: &ast.FieldList{
+				List: returns,
+			},
+		}
+	case *types.Named:
+		if t.Obj().Pkg() == nil {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		if t.Obj().Pkg() == pkg {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		pkgName := t.Obj().Pkg().Name()
+
+		// If the file already imports the package under another name, use that.
+		for _, cand := range f.Imports {
+			if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
+				if cand.Name != nil && cand.Name.Name != "" {
+					pkgName = cand.Name.Name
+				}
+			}
+		}
+		if pkgName == "." {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		return &ast.SelectorExpr{
+			X:   ast.NewIdent(pkgName),
+			Sel: ast.NewIdent(t.Obj().Name()),
+		}
+	case *types.Struct:
+		return ast.NewIdent(t.String())
+	case *types.Interface:
+		return ast.NewIdent(t.String())
+	default:
+		return nil
+	}
+}
+
+// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable.
+// Some examples:
+//
+// Basic Example:
+// z := 1
+// y := z + x
+// If x is undeclared, then this function would return `y := z + x`, so that we
+// can insert `x := ` on the line before `y := z + x`.
+//
+// If stmt example:
+// if z == 1 {
+// } else if z == y {}
+// If y is undeclared, then this function would return `if z == 1 {`, because we cannot
+// insert a statement between an if and an else if statement. As a result, we need to find
+// the top of the if chain to insert `y := ` before.
+func StmtToInsertVarBefore(path []ast.Node) ast.Stmt {
+	enclosingIndex := -1
+	for i, p := range path {
+		if _, ok := p.(ast.Stmt); ok {
+			enclosingIndex = i
+			break
+		}
+	}
+	if enclosingIndex == -1 {
+		return nil
+	}
+	enclosingStmt := path[enclosingIndex]
+	switch enclosingStmt.(type) {
+	case *ast.IfStmt:
+		// The enclosingStmt is inside of the if declaration,
+		// We need to check if we are in an else-if stmt and
+		// get the base if statement.
+		return baseIfStmt(path, enclosingIndex)
+	case *ast.CaseClause:
+		// Get the enclosing switch stmt if the enclosingStmt is
+		// inside of the case statement.
+		for i := enclosingIndex + 1; i < len(path); i++ {
+			if node, ok := path[i].(*ast.SwitchStmt); ok {
+				return node
+			} else if node, ok := path[i].(*ast.TypeSwitchStmt); ok {
+				return node
+			}
+		}
+	}
+	if len(path) <= enclosingIndex+1 {
+		return enclosingStmt.(ast.Stmt)
+	}
+	// Check if the enclosing statement is inside another node.
+	switch expr := path[enclosingIndex+1].(type) {
+	case *ast.IfStmt:
+		// Get the base if statement.
+		return baseIfStmt(path, enclosingIndex+1)
+	case *ast.ForStmt:
+		if expr.Init == enclosingStmt || expr.Post == enclosingStmt {
+			return expr
+		}
+	}
+	return enclosingStmt.(ast.Stmt)
+}
+
+// baseIfStmt walks up the if/else-if chain until we get to
+// the top of the current if chain.
+func baseIfStmt(path []ast.Node, index int) ast.Stmt {
+	stmt := path[index]
+	for i := index + 1; i < len(path); i++ {
+		if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt {
+			stmt = node
+			continue
+		}
+		break
+	}
+	return stmt.(ast.Stmt)
+}
+
+// WalkASTWithParent walks the AST rooted at n. The semantics are
+// similar to ast.Inspect except it does not call f(nil).
+func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
+	var ancestors []ast.Node
+	ast.Inspect(n, func(n ast.Node) (recurse bool) {
+		if n == nil {
+			ancestors = ancestors[:len(ancestors)-1]
+			return false
+		}
+
+		var parent ast.Node
+		if len(ancestors) > 0 {
+			parent = ancestors[len(ancestors)-1]
+		}
+		ancestors = append(ancestors, n)
+		return f(n, parent)
+	})
+}
+
+// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types.
+// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within
+// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that
+// is unrecognized.
+func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string {
+
+	// Initialize matches to contain the variable types we are searching for.
+	matches := make(map[types.Type][]string)
+	for _, typ := range typs {
+		if typ == nil {
+			continue // TODO(adonovan): is this reachable?
+		}
+		matches[typ] = nil // create entry
+	}
+
+	seen := map[types.Object]struct{}{}
+	ast.Inspect(node, func(n ast.Node) bool {
+		if n == nil {
+			return false
+		}
+		// Prevent circular definitions. If 'pos' is within an assignment statement, do not
+		// allow any identifiers in that assignment statement to be selected. Otherwise,
+		// we could do the following, where 'x' satisfies the type of 'f0':
+		//
+		// x := fakeStruct{f0: x}
+		//
+		if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() {
+			return false
+		}
+		if n.End() > pos {
+			return n.Pos() <= pos
+		}
+		ident, ok := n.(*ast.Ident)
+		if !ok || ident.Name == "_" {
+			return true
+		}
+		obj := info.Defs[ident]
+		if obj == nil || obj.Type() == nil {
+			return true
+		}
+		if _, ok := obj.(*types.TypeName); ok {
+			return true
+		}
+		// Prevent duplicates in matches' values.
+		if _, ok = seen[obj]; ok {
+			return true
+		}
+		seen[obj] = struct{}{}
+		// Find the scope for the given position. Then, check whether the object
+		// exists within the scope.
+		innerScope := pkg.Scope().Innermost(pos)
+		if innerScope == nil {
+			return true
+		}
+		_, foundObj := innerScope.LookupParent(ident.Name, pos)
+		if foundObj != obj {
+			return true
+		}
+		// The object must match one of the types that we are searching for.
+		// TODO(adonovan): opt: use typeutil.Map?
+		if names, ok := matches[obj.Type()]; ok {
+			matches[obj.Type()] = append(names, ident.Name)
+		} else {
+			// If the object type does not exactly match
+			// any of the target types, greedily find the first
+			// target type that the object type can satisfy.
+			for typ := range matches {
+				if equivalentTypes(obj.Type(), typ) {
+					matches[typ] = append(matches[typ], ident.Name)
+				}
+			}
+		}
+		return true
+	})
+	return matches
+}
+
+func equivalentTypes(want, got types.Type) bool {
+	if types.Identical(want, got) {
+		return true
+	}
+	// Code segment to help check for untyped equality from (golang/go#32146).
+	if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 {
+		if lhs, ok := got.Underlying().(*types.Basic); ok {
+			return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType
+		}
+	}
+	return types.AssignableTo(want, got)
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go
new file mode 100644
index 0000000..3950772
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go
@@ -0,0 +1,113 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysisinternal
+
+import (
+	"fmt"
+	"go/parser"
+	"go/token"
+	"strings"
+)
+
+// MustExtractDoc is like [ExtractDoc] but it panics on error.
+//
+// To use, define a doc.go file such as:
+//
+//	// Package halting defines an analyzer of program termination.
+//	//
+//	// # Analyzer halting
+//	//
+//	// halting: reports whether execution will halt.
+//	//
+//	// The halting analyzer reports a diagnostic for functions
+//	// that run forever. To suppress the diagnostics, try inserting
+//	// a 'break' statement into each loop.
+//	package halting
+//
+//	import _ "embed"
+//
+//	//go:embed doc.go
+//	var doc string
+//
+// And declare your analyzer as:
+//
+//	var Analyzer = &analysis.Analyzer{
+//		Name:             "halting",
+//		Doc:              analysisutil.MustExtractDoc(doc, "halting"),
+//		...
+//	}
+func MustExtractDoc(content, name string) string {
+	doc, err := ExtractDoc(content, name)
+	if err != nil {
+		panic(err)
+	}
+	return doc
+}
+
+// ExtractDoc extracts a section of a package doc comment from the
+// provided contents of an analyzer package's doc.go file.
+//
+// A section is a portion of the comment between one heading and
+// the next, using this form:
+//
+//	# Analyzer NAME
+//
+//	NAME: SUMMARY
+//
+//	Full description...
+//
+// where NAME matches the name argument, and SUMMARY is a brief
+// verb-phrase that describes the analyzer. The following lines, up
+// until the next heading or the end of the comment, contain the full
+// description. ExtractDoc returns the portion following the colon,
+// which is the form expected by Analyzer.Doc.
+//
+// Example:
+//
+//	# Analyzer printf
+//
+//	printf: checks consistency of calls to printf
+//
+//	The printf analyzer checks consistency of calls to printf.
+//	Here is the complete description...
+//
+// This notation allows a single doc comment to provide documentation
+// for multiple analyzers, each in its own section.
+// The HTML anchors generated for each heading are predictable.
+//
+// It returns an error if the content was not a valid Go source file
+// containing a package doc comment with a heading of the required
+// form.
+//
+// This machinery enables the package documentation (typically
+// accessible via the web at https://pkg.go.dev/) and the command
+// documentation (typically printed to a terminal) to be derived from
+// the same source and formatted appropriately.
+func ExtractDoc(content, name string) (string, error) {
+	if content == "" {
+		return "", fmt.Errorf("empty Go source file")
+	}
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", content, parser.ParseComments|parser.PackageClauseOnly)
+	if err != nil {
+		return "", fmt.Errorf("not a Go source file")
+	}
+	if f.Doc == nil {
+		return "", fmt.Errorf("Go source file has no package doc comment")
+	}
+	for _, section := range strings.Split(f.Doc.Text(), "\n# ") {
+		if body := strings.TrimPrefix(section, "Analyzer "+name); body != section &&
+			body != "" &&
+			body[0] == '\r' || body[0] == '\n' {
+			body = strings.TrimSpace(body)
+			rest := strings.TrimPrefix(body, name+":")
+			if rest == body {
+				return "", fmt.Errorf("'Analyzer %s' heading not followed by '%s: summary...' line", name, name)
+			}
+			return strings.TrimSpace(rest), nil
+		}
+	}
+	return "", fmt.Errorf("package doc comment contains no 'Analyzer %s' heading", name)
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/facts/facts.go b/src/cmd/vendor/golang.org/x/tools/internal/facts/facts.go
index 954b42d..f0aa97e 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/facts/facts.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/facts/facts.go
@@ -40,7 +40,7 @@
 	"encoding/gob"
 	"fmt"
 	"go/types"
-	"io/ioutil"
+	"io"
 	"log"
 	"reflect"
 	"sort"
@@ -195,7 +195,7 @@
 type GetPackageFunc = func(pkgPath string) *types.Package
 
 // Decode decodes all the facts relevant to the analysis of package
-// pkg. The read function reads serialized fact data from an external
+// pkgPath. The read function reads serialized fact data from an external
 // source for one of pkg's direct imports, identified by package path.
 // The empty file is a valid encoding of an empty fact set.
 //
@@ -204,7 +204,7 @@
 //
 // Concurrent calls to Decode are safe, so long as the
 // [GetPackageFunc] (if any) is also concurrency-safe.
-func (d *Decoder) Decode(read func(*types.Package) ([]byte, error)) (*Set, error) {
+func (d *Decoder) Decode(read func(pkgPath string) ([]byte, error)) (*Set, error) {
 	// Read facts from imported packages.
 	// Facts may describe indirectly imported packages, or their objects.
 	m := make(map[key]analysis.Fact) // one big bucket
@@ -218,7 +218,7 @@
 		}
 
 		// Read the gob-encoded facts.
-		data, err := read(imp)
+		data, err := read(imp.Path())
 		if err != nil {
 			return nil, fmt.Errorf("in %s, can't import facts for package %q: %v",
 				d.pkg.Path(), imp.Path(), err)
@@ -269,6 +269,7 @@
 // It may fail if one of the Facts could not be gob-encoded, but this is
 // a sign of a bug in an Analyzer.
 func (s *Set) Encode() []byte {
+	encoder := new(objectpath.Encoder)
 
 	// TODO(adonovan): opt: use a more efficient encoding
 	// that avoids repeating PkgPath for each fact.
@@ -281,9 +282,36 @@
 		if debug {
 			log.Printf("%v => %s\n", k, fact)
 		}
+
+		// Don't export facts that we imported from another
+		// package, unless they represent fields or methods,
+		// or package-level types.
+		// (Facts about packages, and other package-level
+		// objects, are only obtained from direct imports so
+		// they needn't be reexported.)
+		//
+		// This is analogous to the pruning done by "deep"
+		// export data for types, but not as precise because
+		// we aren't careful about which structs or methods
+		// we rexport: it should be only those referenced
+		// from the API of s.pkg.
+		// TOOD(adonovan): opt: be more precise. e.g.
+		// intersect with the set of objects computed by
+		// importMap(s.pkg.Imports()).
+		// TOOD(adonovan): opt: implement "shallow" facts.
+		if k.pkg != s.pkg {
+			if k.obj == nil {
+				continue // imported package fact
+			}
+			if _, isType := k.obj.(*types.TypeName); !isType &&
+				k.obj.Parent() == k.obj.Pkg().Scope() {
+				continue // imported fact about package-level non-type object
+			}
+		}
+
 		var object objectpath.Path
 		if k.obj != nil {
-			path, err := objectpath.For(k.obj)
+			path, err := encoder.For(k.obj)
 			if err != nil {
 				if debug {
 					log.Printf("discarding fact %s about %s\n", fact, k.obj)
@@ -322,7 +350,7 @@
 		if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil {
 			// Fact encoding should never fail. Identify the culprit.
 			for _, gf := range gobFacts {
-				if err := gob.NewEncoder(ioutil.Discard).Encode(gf); err != nil {
+				if err := gob.NewEncoder(io.Discard).Encode(gf); err != nil {
 					fact := gf.Fact
 					pkgpath := reflect.TypeOf(fact).Elem().PkgPath()
 					log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q",
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go b/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go
index b18e62d..1fe63ca 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go
@@ -6,8 +6,6 @@
 
 import (
 	"go/types"
-
-	"golang.org/x/tools/internal/typeparams"
 )
 
 // importMap computes the import map for a package by traversing the
@@ -55,7 +53,7 @@
 			// infinite expansions:
 			//     type N[T any] struct { F *N[N[T]] }
 			// importMap() is called on such types when Analyzer.RunDespiteErrors is true.
-			T = typeparams.NamedTypeOrigin(T).(*types.Named)
+			T = T.Origin()
 			if !typs[T] {
 				typs[T] = true
 				addObj(T.Obj())
@@ -63,12 +61,12 @@
 				for i := 0; i < T.NumMethods(); i++ {
 					addObj(T.Method(i))
 				}
-				if tparams := typeparams.ForNamed(T); tparams != nil {
+				if tparams := T.TypeParams(); tparams != nil {
 					for i := 0; i < tparams.Len(); i++ {
 						addType(tparams.At(i))
 					}
 				}
-				if targs := typeparams.NamedTypeArgs(T); targs != nil {
+				if targs := T.TypeArgs(); targs != nil {
 					for i := 0; i < targs.Len(); i++ {
 						addType(targs.At(i))
 					}
@@ -88,7 +86,7 @@
 		case *types.Signature:
 			addType(T.Params())
 			addType(T.Results())
-			if tparams := typeparams.ForSignature(T); tparams != nil {
+			if tparams := T.TypeParams(); tparams != nil {
 				for i := 0; i < tparams.Len(); i++ {
 					addType(tparams.At(i))
 				}
@@ -108,11 +106,11 @@
 			for i := 0; i < T.NumEmbeddeds(); i++ {
 				addType(T.EmbeddedType(i)) // walk Embedded for implicits
 			}
-		case *typeparams.Union:
+		case *types.Union:
 			for i := 0; i < T.Len(); i++ {
 				addType(T.Term(i).Type())
 			}
-		case *typeparams.TypeParam:
+		case *types.TypeParam:
 			if !typs[T] {
 				typs[T] = true
 				addObj(T.Obj())
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go
index b9e87c6..cdab988 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -23,6 +23,7 @@
 package typeparams
 
 import (
+	"fmt"
 	"go/ast"
 	"go/token"
 	"go/types"
@@ -41,7 +42,7 @@
 	switch e := n.(type) {
 	case *ast.IndexExpr:
 		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
-	case *IndexListExpr:
+	case *ast.IndexListExpr:
 		return e.X, e.Lbrack, e.Indices, e.Rbrack
 	}
 	return nil, token.NoPos, nil, token.NoPos
@@ -62,7 +63,7 @@
 			Rbrack: rbrack,
 		}
 	default:
-		return &IndexListExpr{
+		return &ast.IndexListExpr{
 			X:       x,
 			Lbrack:  lbrack,
 			Indices: indices,
@@ -73,7 +74,7 @@
 
 // IsTypeParam reports whether t is a type parameter.
 func IsTypeParam(t types.Type) bool {
-	_, ok := t.(*TypeParam)
+	_, ok := t.(*types.TypeParam)
 	return ok
 }
 
@@ -99,11 +100,11 @@
 		// Receiver is a *types.Interface.
 		return fn
 	}
-	if ForNamed(named).Len() == 0 {
+	if named.TypeParams().Len() == 0 {
 		// Receiver base has no type parameters, so we can avoid the lookup below.
 		return fn
 	}
-	orig := NamedTypeOrigin(named)
+	orig := named.Origin()
 	gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
 
 	// This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In:
@@ -125,6 +126,11 @@
 		}
 	}
 
+	// In golang/go#61196, we observe another crash, this time inexplicable.
+	if gfn == nil {
+		panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods()))
+	}
+
 	return gfn.(*types.Func)
 }
 
@@ -151,7 +157,7 @@
 //
 // In this case, GenericAssignableTo reports that instantiations of Container
 // are assignable to the corresponding instantiation of Interface.
-func GenericAssignableTo(ctxt *Context, V, T types.Type) bool {
+func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
 	// If V and T are not both named, or do not have matching non-empty type
 	// parameter lists, fall back on types.AssignableTo.
 
@@ -161,9 +167,9 @@
 		return types.AssignableTo(V, T)
 	}
 
-	vtparams := ForNamed(VN)
-	ttparams := ForNamed(TN)
-	if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 {
+	vtparams := VN.TypeParams()
+	ttparams := TN.TypeParams()
+	if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
 		return types.AssignableTo(V, T)
 	}
 
@@ -176,7 +182,7 @@
 	// Minor optimization: ensure we share a context across the two
 	// instantiations below.
 	if ctxt == nil {
-		ctxt = NewContext()
+		ctxt = types.NewContext()
 	}
 
 	var targs []types.Type
@@ -184,12 +190,12 @@
 		targs = append(targs, vtparams.At(i))
 	}
 
-	vinst, err := Instantiate(ctxt, V, targs, true)
+	vinst, err := types.Instantiate(ctxt, V, targs, true)
 	if err != nil {
 		panic("type parameters should satisfy their own constraints")
 	}
 
-	tinst, err := Instantiate(ctxt, T, targs, true)
+	tinst, err := types.Instantiate(ctxt, T, targs, true)
 	if err != nil {
 		return false
 	}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/coretype.go
index 993135e..7ea8840 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/coretype.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -81,13 +81,13 @@
 // restrictions may be arbitrarily complex. For example, consider the
 // following:
 //
-//  type A interface{ ~string|~[]byte }
+//	type A interface{ ~string|~[]byte }
 //
-//  type B interface{ int|string }
+//	type B interface{ int|string }
 //
-//  type C interface { ~string|~int }
+//	type C interface { ~string|~int }
 //
-//  type T[P interface{ A|B; C }] int
+//	type T[P interface{ A|B; C }] int
 //
 // In this example, the structural type restriction of P is ~string|int: A|B
 // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
@@ -108,15 +108,15 @@
 //
 // _NormalTerms makes no guarantees about the order of terms, except that it
 // is deterministic.
-func _NormalTerms(typ types.Type) ([]*Term, error) {
+func _NormalTerms(typ types.Type) ([]*types.Term, error) {
 	switch typ := typ.(type) {
-	case *TypeParam:
+	case *types.TypeParam:
 		return StructuralTerms(typ)
-	case *Union:
+	case *types.Union:
 		return UnionTermSet(typ)
 	case *types.Interface:
 		return InterfaceTermSet(typ)
 	default:
-		return []*Term{NewTerm(false, typ)}, nil
+		return []*types.Term{types.NewTerm(false, typ)}, nil
 	}
 }
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
deleted file mode 100644
index 1821239..0000000
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.18
-// +build !go1.18
-
-package typeparams
-
-// Enabled reports whether type parameters are enabled in the current build
-// environment.
-const Enabled = false
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
deleted file mode 100644
index d671488..0000000
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package typeparams
-
-// Note: this constant is in a separate file as this is the only acceptable
-// diff between the <1.18 API of this package and the 1.18 API.
-
-// Enabled reports whether type parameters are enabled in the current build
-// environment.
-const Enabled = true
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go
index 9c631b6..93c80fd 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -60,7 +60,7 @@
 //
 // StructuralTerms makes no guarantees about the order of terms, except that it
 // is deterministic.
-func StructuralTerms(tparam *TypeParam) ([]*Term, error) {
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
 	constraint := tparam.Constraint()
 	if constraint == nil {
 		return nil, fmt.Errorf("%s has nil constraint", tparam)
@@ -78,7 +78,7 @@
 //
 // See the documentation of StructuralTerms for more information on
 // normalization.
-func InterfaceTermSet(iface *types.Interface) ([]*Term, error) {
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
 	return computeTermSet(iface)
 }
 
@@ -88,11 +88,11 @@
 //
 // See the documentation of StructuralTerms for more information on
 // normalization.
-func UnionTermSet(union *Union) ([]*Term, error) {
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
 	return computeTermSet(union)
 }
 
-func computeTermSet(typ types.Type) ([]*Term, error) {
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
 	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
 	if err != nil {
 		return nil, err
@@ -103,9 +103,9 @@
 	if tset.terms.isAll() {
 		return nil, nil
 	}
-	var terms []*Term
+	var terms []*types.Term
 	for _, term := range tset.terms {
-		terms = append(terms, NewTerm(term.tilde, term.typ))
+		terms = append(terms, types.NewTerm(term.tilde, term.typ))
 	}
 	return terms, nil
 }
@@ -162,7 +162,7 @@
 		tset.terms = allTermlist
 		for i := 0; i < u.NumEmbeddeds(); i++ {
 			embedded := u.EmbeddedType(i)
-			if _, ok := embedded.Underlying().(*TypeParam); ok {
+			if _, ok := embedded.Underlying().(*types.TypeParam); ok {
 				return nil, fmt.Errorf("invalid embedded type %T", embedded)
 			}
 			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
@@ -171,7 +171,7 @@
 			}
 			tset.terms = tset.terms.intersect(tset2.terms)
 		}
-	case *Union:
+	case *types.Union:
 		// The term set of a union is the union of term sets of its terms.
 		tset.terms = nil
 		for i := 0; i < u.Len(); i++ {
@@ -184,7 +184,7 @@
 					return nil, err
 				}
 				terms = tset2.terms
-			case *TypeParam, *Union:
+			case *types.TypeParam, *types.Union:
 				// A stand-alone type parameter or union is not permitted as union
 				// term.
 				return nil, fmt.Errorf("invalid union term %T", t)
@@ -199,7 +199,7 @@
 				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
 			}
 		}
-	case *TypeParam:
+	case *types.TypeParam:
 		panic("unreachable")
 	default:
 		// For all other types, the term set is just a single non-tilde term
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/termlist.go
index 933106a..cbd12f8 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/termlist.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -30,7 +30,7 @@
 	var buf bytes.Buffer
 	for i, x := range xl {
 		if i > 0 {
-			buf.WriteString(" ∪ ")
+			buf.WriteString(" | ")
 		}
 		buf.WriteString(x.String())
 	}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
deleted file mode 100644
index b478897..0000000
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.18
-// +build !go1.18
-
-package typeparams
-
-import (
-	"go/ast"
-	"go/token"
-	"go/types"
-)
-
-func unsupported() {
-	panic("type parameters are unsupported at this go version")
-}
-
-// IndexListExpr is a placeholder type, as type parameters are not supported at
-// this Go version. Its methods panic on use.
-type IndexListExpr struct {
-	ast.Expr
-	X       ast.Expr   // expression
-	Lbrack  token.Pos  // position of "["
-	Indices []ast.Expr // index expressions
-	Rbrack  token.Pos  // position of "]"
-}
-
-// ForTypeSpec returns an empty field list, as type parameters on not supported
-// at this Go version.
-func ForTypeSpec(*ast.TypeSpec) *ast.FieldList {
-	return nil
-}
-
-// ForFuncType returns an empty field list, as type parameters are not
-// supported at this Go version.
-func ForFuncType(*ast.FuncType) *ast.FieldList {
-	return nil
-}
-
-// TypeParam is a placeholder type, as type parameters are not supported at
-// this Go version. Its methods panic on use.
-type TypeParam struct{ types.Type }
-
-func (*TypeParam) Index() int             { unsupported(); return 0 }
-func (*TypeParam) Constraint() types.Type { unsupported(); return nil }
-func (*TypeParam) Obj() *types.TypeName   { unsupported(); return nil }
-
-// TypeParamList is a placeholder for an empty type parameter list.
-type TypeParamList struct{}
-
-func (*TypeParamList) Len() int          { return 0 }
-func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil }
-
-// TypeList is a placeholder for an empty type list.
-type TypeList struct{}
-
-func (*TypeList) Len() int          { return 0 }
-func (*TypeList) At(int) types.Type { unsupported(); return nil }
-
-// NewTypeParam is unsupported at this Go version, and panics.
-func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
-	unsupported()
-	return nil
-}
-
-// SetTypeParamConstraint is unsupported at this Go version, and panics.
-func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
-	unsupported()
-}
-
-// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or
-// typeParams is non-empty.
-func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
-	if len(recvTypeParams) != 0 || len(typeParams) != 0 {
-		panic("signatures cannot have type parameters at this Go version")
-	}
-	return types.NewSignature(recv, params, results, variadic)
-}
-
-// ForSignature returns an empty slice.
-func ForSignature(*types.Signature) *TypeParamList {
-	return nil
-}
-
-// RecvTypeParams returns a nil slice.
-func RecvTypeParams(sig *types.Signature) *TypeParamList {
-	return nil
-}
-
-// IsComparable returns false, as no interfaces are type-restricted at this Go
-// version.
-func IsComparable(*types.Interface) bool {
-	return false
-}
-
-// IsMethodSet returns true, as no interfaces are type-restricted at this Go
-// version.
-func IsMethodSet(*types.Interface) bool {
-	return true
-}
-
-// IsImplicit returns false, as no interfaces are implicit at this Go version.
-func IsImplicit(*types.Interface) bool {
-	return false
-}
-
-// MarkImplicit does nothing, because this Go version does not have implicit
-// interfaces.
-func MarkImplicit(*types.Interface) {}
-
-// ForNamed returns an empty type parameter list, as type parameters are not
-// supported at this Go version.
-func ForNamed(*types.Named) *TypeParamList {
-	return nil
-}
-
-// SetForNamed panics if tparams is non-empty.
-func SetForNamed(_ *types.Named, tparams []*TypeParam) {
-	if len(tparams) > 0 {
-		unsupported()
-	}
-}
-
-// NamedTypeArgs returns nil.
-func NamedTypeArgs(*types.Named) *TypeList {
-	return nil
-}
-
-// NamedTypeOrigin is the identity method at this Go version.
-func NamedTypeOrigin(named *types.Named) types.Type {
-	return named
-}
-
-// Term holds information about a structural type restriction.
-type Term struct {
-	tilde bool
-	typ   types.Type
-}
-
-func (m *Term) Tilde() bool      { return m.tilde }
-func (m *Term) Type() types.Type { return m.typ }
-func (m *Term) String() string {
-	pre := ""
-	if m.tilde {
-		pre = "~"
-	}
-	return pre + m.typ.String()
-}
-
-// NewTerm is unsupported at this Go version, and panics.
-func NewTerm(tilde bool, typ types.Type) *Term {
-	return &Term{tilde, typ}
-}
-
-// Union is a placeholder type, as type parameters are not supported at this Go
-// version. Its methods panic on use.
-type Union struct{ types.Type }
-
-func (*Union) Len() int         { return 0 }
-func (*Union) Term(i int) *Term { unsupported(); return nil }
-
-// NewUnion is unsupported at this Go version, and panics.
-func NewUnion(terms []*Term) *Union {
-	unsupported()
-	return nil
-}
-
-// InitInstanceInfo is a noop at this Go version.
-func InitInstanceInfo(*types.Info) {}
-
-// Instance is a placeholder type, as type parameters are not supported at this
-// Go version.
-type Instance struct {
-	TypeArgs *TypeList
-	Type     types.Type
-}
-
-// GetInstances returns a nil map, as type parameters are not supported at this
-// Go version.
-func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil }
-
-// Context is a placeholder type, as type parameters are not supported at
-// this Go version.
-type Context struct{}
-
-// NewContext returns a placeholder Context instance.
-func NewContext() *Context {
-	return &Context{}
-}
-
-// Instantiate is unsupported on this Go version, and panics.
-func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
-	unsupported()
-	return nil, nil
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
deleted file mode 100644
index 114a36b..0000000
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package typeparams
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// IndexListExpr is an alias for ast.IndexListExpr.
-type IndexListExpr = ast.IndexListExpr
-
-// ForTypeSpec returns n.TypeParams.
-func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList {
-	if n == nil {
-		return nil
-	}
-	return n.TypeParams
-}
-
-// ForFuncType returns n.TypeParams.
-func ForFuncType(n *ast.FuncType) *ast.FieldList {
-	if n == nil {
-		return nil
-	}
-	return n.TypeParams
-}
-
-// TypeParam is an alias for types.TypeParam
-type TypeParam = types.TypeParam
-
-// TypeParamList is an alias for types.TypeParamList
-type TypeParamList = types.TypeParamList
-
-// TypeList is an alias for types.TypeList
-type TypeList = types.TypeList
-
-// NewTypeParam calls types.NewTypeParam.
-func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
-	return types.NewTypeParam(name, constraint)
-}
-
-// SetTypeParamConstraint calls tparam.SetConstraint(constraint).
-func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
-	tparam.SetConstraint(constraint)
-}
-
-// NewSignatureType calls types.NewSignatureType.
-func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
-	return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic)
-}
-
-// ForSignature returns sig.TypeParams()
-func ForSignature(sig *types.Signature) *TypeParamList {
-	return sig.TypeParams()
-}
-
-// RecvTypeParams returns sig.RecvTypeParams().
-func RecvTypeParams(sig *types.Signature) *TypeParamList {
-	return sig.RecvTypeParams()
-}
-
-// IsComparable calls iface.IsComparable().
-func IsComparable(iface *types.Interface) bool {
-	return iface.IsComparable()
-}
-
-// IsMethodSet calls iface.IsMethodSet().
-func IsMethodSet(iface *types.Interface) bool {
-	return iface.IsMethodSet()
-}
-
-// IsImplicit calls iface.IsImplicit().
-func IsImplicit(iface *types.Interface) bool {
-	return iface.IsImplicit()
-}
-
-// MarkImplicit calls iface.MarkImplicit().
-func MarkImplicit(iface *types.Interface) {
-	iface.MarkImplicit()
-}
-
-// ForNamed extracts the (possibly empty) type parameter object list from
-// named.
-func ForNamed(named *types.Named) *TypeParamList {
-	return named.TypeParams()
-}
-
-// SetForNamed sets the type params tparams on n. Each tparam must be of
-// dynamic type *types.TypeParam.
-func SetForNamed(n *types.Named, tparams []*TypeParam) {
-	n.SetTypeParams(tparams)
-}
-
-// NamedTypeArgs returns named.TypeArgs().
-func NamedTypeArgs(named *types.Named) *TypeList {
-	return named.TypeArgs()
-}
-
-// NamedTypeOrigin returns named.Orig().
-func NamedTypeOrigin(named *types.Named) types.Type {
-	return named.Origin()
-}
-
-// Term is an alias for types.Term.
-type Term = types.Term
-
-// NewTerm calls types.NewTerm.
-func NewTerm(tilde bool, typ types.Type) *Term {
-	return types.NewTerm(tilde, typ)
-}
-
-// Union is an alias for types.Union
-type Union = types.Union
-
-// NewUnion calls types.NewUnion.
-func NewUnion(terms []*Term) *Union {
-	return types.NewUnion(terms)
-}
-
-// InitInstanceInfo initializes info to record information about type and
-// function instances.
-func InitInstanceInfo(info *types.Info) {
-	info.Instances = make(map[*ast.Ident]types.Instance)
-}
-
-// Instance is an alias for types.Instance.
-type Instance = types.Instance
-
-// GetInstances returns info.Instances.
-func GetInstances(info *types.Info) map[*ast.Ident]Instance {
-	return info.Instances
-}
-
-// Context is an alias for types.Context.
-type Context = types.Context
-
-// NewContext calls types.NewContext.
-func NewContext() *Context {
-	return types.NewContext()
-}
-
-// Instantiate calls types.Instantiate.
-func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
-	return types.Instantiate(ctxt, typ, targs, validate)
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
index 7ddee28..7350bb7 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -10,11 +10,10 @@
 
 // A term describes elementary type sets:
 //
-//   ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
-//   𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
-//   T:  &term{false, T}  == {T}                    // set of type T
-//  ~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
-//
+//	 ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
+//	 𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
+//	 T:  &term{false, T}  == {T}                    // set of type T
+//	~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
 type term struct {
 	tilde bool // valid if typ != nil
 	typ   types.Type
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/versions/gover.go b/src/cmd/vendor/golang.org/x/tools/internal/versions/gover.go
new file mode 100644
index 0000000..bbabcd2
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/versions/gover.go
@@ -0,0 +1,172 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a fork of internal/gover for use by x/tools until
+// go1.21 and earlier are no longer supported by x/tools.
+
+package versions
+
+import "strings"
+
+// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]]
+// The numbers are the original decimal strings to avoid integer overflows
+// and since there is very little actual math. (Probably overflow doesn't matter in practice,
+// but at the time this code was written, there was an existing test that used
+// go1.99999999999, which does not fit in an int on 32-bit platforms.
+// The "big decimal" representation avoids the problem entirely.)
+type gover struct {
+	major string // decimal
+	minor string // decimal or ""
+	patch string // decimal or ""
+	kind  string // "", "alpha", "beta", "rc"
+	pre   string // decimal or ""
+}
+
+// compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as toolchain versions.
+// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
+// Malformed versions compare less than well-formed versions and equal to each other.
+// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
+func compare(x, y string) int {
+	vx := parse(x)
+	vy := parse(y)
+
+	if c := cmpInt(vx.major, vy.major); c != 0 {
+		return c
+	}
+	if c := cmpInt(vx.minor, vy.minor); c != 0 {
+		return c
+	}
+	if c := cmpInt(vx.patch, vy.patch); c != 0 {
+		return c
+	}
+	if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
+		return c
+	}
+	if c := cmpInt(vx.pre, vy.pre); c != 0 {
+		return c
+	}
+	return 0
+}
+
+// lang returns the Go language version. For example, lang("1.2.3") == "1.2".
+func lang(x string) string {
+	v := parse(x)
+	if v.minor == "" || v.major == "1" && v.minor == "0" {
+		return v.major
+	}
+	return v.major + "." + v.minor
+}
+
+// isValid reports whether the version x is valid.
+func isValid(x string) bool {
+	return parse(x) != gover{}
+}
+
+// parse parses the Go version string x into a version.
+// It returns the zero version if x is malformed.
+func parse(x string) gover {
+	var v gover
+
+	// Parse major version.
+	var ok bool
+	v.major, x, ok = cutInt(x)
+	if !ok {
+		return gover{}
+	}
+	if x == "" {
+		// Interpret "1" as "1.0.0".
+		v.minor = "0"
+		v.patch = "0"
+		return v
+	}
+
+	// Parse . before minor version.
+	if x[0] != '.' {
+		return gover{}
+	}
+
+	// Parse minor version.
+	v.minor, x, ok = cutInt(x[1:])
+	if !ok {
+		return gover{}
+	}
+	if x == "" {
+		// Patch missing is same as "0" for older versions.
+		// Starting in Go 1.21, patch missing is different from explicit .0.
+		if cmpInt(v.minor, "21") < 0 {
+			v.patch = "0"
+		}
+		return v
+	}
+
+	// Parse patch if present.
+	if x[0] == '.' {
+		v.patch, x, ok = cutInt(x[1:])
+		if !ok || x != "" {
+			// Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
+			// Allowing them would be a bit confusing because we already have:
+			//	1.21 < 1.21rc1
+			// But a prerelease of a patch would have the opposite effect:
+			//	1.21.3rc1 < 1.21.3
+			// We've never needed them before, so let's not start now.
+			return gover{}
+		}
+		return v
+	}
+
+	// Parse prerelease.
+	i := 0
+	for i < len(x) && (x[i] < '0' || '9' < x[i]) {
+		if x[i] < 'a' || 'z' < x[i] {
+			return gover{}
+		}
+		i++
+	}
+	if i == 0 {
+		return gover{}
+	}
+	v.kind, x = x[:i], x[i:]
+	if x == "" {
+		return v
+	}
+	v.pre, x, ok = cutInt(x)
+	if !ok || x != "" {
+		return gover{}
+	}
+
+	return v
+}
+
+// cutInt scans the leading decimal number at the start of x to an integer
+// and returns that value and the rest of the string.
+func cutInt(x string) (n, rest string, ok bool) {
+	i := 0
+	for i < len(x) && '0' <= x[i] && x[i] <= '9' {
+		i++
+	}
+	if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
+		return "", "", false
+	}
+	return x[:i], x[i:], true
+}
+
+// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
+// (Copied from golang.org/x/mod/semver's compareInt.)
+func cmpInt(x, y string) int {
+	if x == y {
+		return 0
+	}
+	if len(x) < len(y) {
+		return -1
+	}
+	if len(x) > len(y) {
+		return +1
+	}
+	if x < y {
+		return -1
+	} else {
+		return +1
+	}
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/versions/types.go b/src/cmd/vendor/golang.org/x/tools/internal/versions/types.go
new file mode 100644
index 0000000..562eef2
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/versions/types.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import (
+	"go/types"
+)
+
+// GoVersion returns the Go version of the type package.
+// It returns zero if no version can be determined.
+func GoVersion(pkg *types.Package) string {
+	// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
+	if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
+		return pkg.GoVersion()
+	}
+	return ""
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/versions/types_go121.go b/src/cmd/vendor/golang.org/x/tools/internal/versions/types_go121.go
new file mode 100644
index 0000000..a7b7920
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/versions/types_go121.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.22
+// +build !go1.22
+
+package versions
+
+import (
+	"go/ast"
+	"go/types"
+)
+
+// FileVersions always reports the a file's Go version as the
+// zero version at this Go version.
+func FileVersions(info *types.Info, file *ast.File) string { return "" }
+
+// InitFileVersions is a noop at this Go version.
+func InitFileVersions(*types.Info) {}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/versions/types_go122.go b/src/cmd/vendor/golang.org/x/tools/internal/versions/types_go122.go
new file mode 100644
index 0000000..7b9ba89
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/versions/types_go122.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+// +build go1.22
+
+package versions
+
+import (
+	"go/ast"
+	"go/types"
+)
+
+// FileVersions maps a file to the file's semantic Go version.
+// The reported version is the zero version if a version cannot be determined.
+func FileVersions(info *types.Info, file *ast.File) string {
+	return info.FileVersions[file]
+}
+
+// InitFileVersions initializes info to record Go versions for Go files.
+func InitFileVersions(info *types.Info) {
+	info.FileVersions = make(map[*ast.File]string)
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/versions/versions.go b/src/cmd/vendor/golang.org/x/tools/internal/versions/versions.go
new file mode 100644
index 0000000..e16f6c3
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/versions/versions.go
@@ -0,0 +1,52 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+// Note: If we use build tags to use go/versions when go >=1.22,
+// we run into go.dev/issue/53737. Under some operations users would see an
+// import of "go/versions" even if they would not compile the file.
+// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
+// For this reason, this library just a clone of go/versions for the moment.
+
+// Lang returns the Go language version for version x.
+// If x is not a valid version, Lang returns the empty string.
+// For example:
+//
+//	Lang("go1.21rc2") = "go1.21"
+//	Lang("go1.21.2") = "go1.21"
+//	Lang("go1.21") = "go1.21"
+//	Lang("go1") = "go1"
+//	Lang("bad") = ""
+//	Lang("1.21") = ""
+func Lang(x string) string {
+	v := lang(stripGo(x))
+	if v == "" {
+		return ""
+	}
+	return x[:2+len(v)] // "go"+v without allocation
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as Go versions.
+// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
+// Invalid versions, including the empty string, compare less than
+// valid versions and equal to each other.
+// The language version "go1.21" compares less than the
+// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
+// Custom toolchain suffixes are ignored during comparison:
+// "go1.21.0" and "go1.21.0-bigcorp" are equal.
+func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) }
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool { return isValid(stripGo(x)) }
+
+// stripGo converts from a "go1.21" version to a "1.21" version.
+// If v does not start with "go", stripGo returns the empty string (a known invalid version).
+func stripGo(v string) string {
+	if len(v) < 2 || v[:2] != "go" {
+		return ""
+	}
+	return v[2:]
+}
diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt
index fe71cb4..d2caf1f 100644
--- a/src/cmd/vendor/modules.txt
+++ b/src/cmd/vendor/modules.txt
@@ -1,5 +1,5 @@
-# github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26
-## explicit; go 1.18
+# github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17
+## explicit; go 1.19
 github.com/google/pprof/driver
 github.com/google/pprof/internal/binutils
 github.com/google/pprof/internal/driver
@@ -14,17 +14,17 @@
 github.com/google/pprof/profile
 github.com/google/pprof/third_party/d3flamegraph
 github.com/google/pprof/third_party/svgpan
-# github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2
-## explicit; go 1.12
+# github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab
+## explicit; go 1.13
 github.com/ianlancetaylor/demangle
-# golang.org/x/arch v0.4.0
-## explicit; go 1.17
+# golang.org/x/arch v0.6.0
+## explicit; go 1.18
 golang.org/x/arch/arm/armasm
 golang.org/x/arch/arm64/arm64asm
 golang.org/x/arch/ppc64/ppc64asm
 golang.org/x/arch/x86/x86asm
-# golang.org/x/mod v0.12.0
-## explicit; go 1.17
+# golang.org/x/mod v0.14.0
+## explicit; go 1.18
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
@@ -34,24 +34,24 @@
 golang.org/x/mod/sumdb/note
 golang.org/x/mod/sumdb/tlog
 golang.org/x/mod/zip
-# golang.org/x/sync v0.3.0
-## explicit; go 1.17
+# golang.org/x/sync v0.5.0
+## explicit; go 1.18
 golang.org/x/sync/semaphore
-# golang.org/x/sys v0.10.0
-## explicit; go 1.17
-golang.org/x/sys/internal/unsafeheader
+# golang.org/x/sys v0.15.0
+## explicit; go 1.18
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
-# golang.org/x/term v0.10.0
-## explicit; go 1.17
+# golang.org/x/term v0.15.0
+## explicit; go 1.18
 golang.org/x/term
-# golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b
+# golang.org/x/tools v0.16.2-0.20231218185909-83bceaf2424d
 ## explicit; go 1.18
 golang.org/x/tools/cmd/bisect
 golang.org/x/tools/cover
 golang.org/x/tools/go/analysis
 golang.org/x/tools/go/analysis/internal/analysisflags
+golang.org/x/tools/go/analysis/passes/appends
 golang.org/x/tools/go/analysis/passes/asmdecl
 golang.org/x/tools/go/analysis/passes/assign
 golang.org/x/tools/go/analysis/passes/atomic
@@ -61,6 +61,7 @@
 golang.org/x/tools/go/analysis/passes/composite
 golang.org/x/tools/go/analysis/passes/copylock
 golang.org/x/tools/go/analysis/passes/ctrlflow
+golang.org/x/tools/go/analysis/passes/defers
 golang.org/x/tools/go/analysis/passes/directive
 golang.org/x/tools/go/analysis/passes/errorsas
 golang.org/x/tools/go/analysis/passes/framepointer
@@ -91,6 +92,8 @@
 golang.org/x/tools/go/cfg
 golang.org/x/tools/go/types/objectpath
 golang.org/x/tools/go/types/typeutil
+golang.org/x/tools/internal/analysisinternal
 golang.org/x/tools/internal/bisect
 golang.org/x/tools/internal/facts
 golang.org/x/tools/internal/typeparams
+golang.org/x/tools/internal/versions
diff --git a/src/cmd/vet/doc.go b/src/cmd/vet/doc.go
index ba5b5ed..5b2fa3d 100644
--- a/src/cmd/vet/doc.go
+++ b/src/cmd/vet/doc.go
@@ -27,28 +27,38 @@
 
 To list the available checks, run "go tool vet help":
 
-	asmdecl      report mismatches between assembly files and Go declarations
-	assign       check for useless assignments
-	atomic       check for common mistakes using the sync/atomic package
-	bools        check for common mistakes involving boolean operators
-	buildtag     check that +build tags are well-formed and correctly located
-	cgocall      detect some violations of the cgo pointer passing rules
-	composites   check for unkeyed composite literals
-	copylocks    check for locks erroneously passed by value
-	httpresponse check for mistakes using HTTP responses
-	loopclosure  check references to loop variables from within nested functions
-	lostcancel   check cancel func returned by context.WithCancel is called
-	nilfunc      check for useless comparisons between functions and nil
-	printf       check consistency of Printf format strings and arguments
-	shift        check for shifts that equal or exceed the width of the integer
-	slog         check for incorrect arguments to log/slog functions
-	stdmethods   check signature of methods of well-known interfaces
-	structtag    check that struct field tags conform to reflect.StructTag.Get
-	tests        check for common mistaken usages of tests and examples
-	unmarshal    report passing non-pointer or non-interface values to unmarshal
-	unreachable  check for unreachable code
-	unsafeptr    check for invalid conversions of uintptr to unsafe.Pointer
-	unusedresult check for unused results of calls to some functions
+	appends          check for missing values after append
+	asmdecl          report mismatches between assembly files and Go declarations
+	assign           check for useless assignments
+	atomic           check for common mistakes using the sync/atomic package
+	bools            check for common mistakes involving boolean operators
+	buildtag         check //go:build and // +build directives
+	cgocall          detect some violations of the cgo pointer passing rules
+	composites       check for unkeyed composite literals
+	copylocks        check for locks erroneously passed by value
+	defers           report common mistakes in defer statements
+	directive        check Go toolchain directives such as //go:debug
+	errorsas         report passing non-pointer or non-error values to errors.As
+	framepointer     report assembly that clobbers the frame pointer before saving it
+	httpresponse     check for mistakes using HTTP responses
+	ifaceassert      detect impossible interface-to-interface type assertions
+	loopclosure      check references to loop variables from within nested functions
+	lostcancel       check cancel func returned by context.WithCancel is called
+	nilfunc          check for useless comparisons between functions and nil
+	printf           check consistency of Printf format strings and arguments
+	shift            check for shifts that equal or exceed the width of the integer
+	sigchanyzer      check for unbuffered channel of os.Signal
+	slog             check for invalid structured logging calls
+	stdmethods       check signature of methods of well-known interfaces
+	stringintconv    check for string(int) conversions
+	structtag        check that struct field tags conform to reflect.StructTag.Get
+	testinggoroutine report calls to (*testing.T).Fatal from goroutines started by a test
+	tests            check for common mistaken usages of tests and examples
+	timeformat       check for calls of (time.Time).Format or time.Parse with 2006-02-01
+	unmarshal        report passing non-pointer or non-interface values to unmarshal
+	unreachable      check for unreachable code
+	unsafeptr        check for invalid conversions of uintptr to unsafe.Pointer
+	unusedresult     check for unused results of calls to some functions
 
 For details and flags of a particular check, such as printf, run "go tool vet help printf".
 
diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go
index a90758f..c519728 100644
--- a/src/cmd/vet/main.go
+++ b/src/cmd/vet/main.go
@@ -9,6 +9,7 @@
 
 	"golang.org/x/tools/go/analysis/unitchecker"
 
+	"golang.org/x/tools/go/analysis/passes/appends"
 	"golang.org/x/tools/go/analysis/passes/asmdecl"
 	"golang.org/x/tools/go/analysis/passes/assign"
 	"golang.org/x/tools/go/analysis/passes/atomic"
@@ -17,6 +18,7 @@
 	"golang.org/x/tools/go/analysis/passes/cgocall"
 	"golang.org/x/tools/go/analysis/passes/composite"
 	"golang.org/x/tools/go/analysis/passes/copylock"
+	"golang.org/x/tools/go/analysis/passes/defers"
 	"golang.org/x/tools/go/analysis/passes/directive"
 	"golang.org/x/tools/go/analysis/passes/errorsas"
 	"golang.org/x/tools/go/analysis/passes/framepointer"
@@ -45,6 +47,7 @@
 	objabi.AddVersionFlag()
 
 	unitchecker.Main(
+		appends.Analyzer,
 		asmdecl.Analyzer,
 		assign.Analyzer,
 		atomic.Analyzer,
@@ -53,6 +56,7 @@
 		cgocall.Analyzer,
 		composite.Analyzer,
 		copylock.Analyzer,
+		defers.Analyzer,
 		directive.Analyzer,
 		errorsas.Analyzer,
 		framepointer.Analyzer,
diff --git a/src/cmd/vet/testdata/appends/appends.go b/src/cmd/vet/testdata/appends/appends.go
new file mode 100644
index 0000000..09ef3d2
--- /dev/null
+++ b/src/cmd/vet/testdata/appends/appends.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the appends checker.
+
+package appends
+
+func AppendsTest() {
+	sli := []string{"a", "b", "c"}
+	sli = append(sli) // ERROR "append with no values"
+}
diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go
index 8b29907..4bb0de0 100644
--- a/src/cmd/vet/vet_test.go
+++ b/src/cmd/vet/vet_test.go
@@ -62,6 +62,7 @@
 func TestVet(t *testing.T) {
 	t.Parallel()
 	for _, pkg := range []string{
+		"appends",
 		"asm",
 		"assign",
 		"atomic",
diff --git a/src/cmp.bash b/src/cmp.bash
index dac9ca0..5bca266 100644
--- a/src/cmp.bash
+++ b/src/cmp.bash
@@ -14,8 +14,8 @@
 # "go build <pkg>" assembly output for each package
 # and lists the packages with differences.
 #
-# It leaves and old.txt and new.txt file in the package
-# directories for the packages with differences.
+# For packages with differences it leaves files named
+# old.txt and new.txt.
 
 FLAGS1="-newexport=0"
 FLAGS2="-newexport=1"
diff --git a/src/cmp/cmp.go b/src/cmp/cmp.go
index 0fba5c1..4d1af6a 100644
--- a/src/cmp/cmp.go
+++ b/src/cmp/cmp.go
@@ -57,3 +57,15 @@
 func isNaN[T Ordered](x T) bool {
 	return x != x
 }
+
+// Or returns the first of its arguments that is not equal to the zero value.
+// If no argument is non-zero, it returns the zero value.
+func Or[T comparable](vals ...T) T {
+	var zero T
+	for _, val := range vals {
+		if val != zero {
+			return val
+		}
+	}
+	return zero
+}
diff --git a/src/cmp/cmp_test.go b/src/cmp/cmp_test.go
index b0c0dc3..e265464 100644
--- a/src/cmp/cmp_test.go
+++ b/src/cmp/cmp_test.go
@@ -6,12 +6,17 @@
 
 import (
 	"cmp"
+	"fmt"
 	"math"
+	"slices"
 	"sort"
 	"testing"
+	"unsafe"
 )
 
 var negzero = math.Copysign(0, -1)
+var nonnilptr uintptr = uintptr(unsafe.Pointer(&negzero))
+var nilptr uintptr = uintptr(unsafe.Pointer(nil))
 
 var tests = []struct {
 	x, y    any
@@ -43,6 +48,9 @@
 	{0.0, negzero, 0},
 	{negzero, 1.0, -1},
 	{negzero, -1.0, +1},
+	{nilptr, nonnilptr, -1},
+	{nonnilptr, nilptr, 1},
+	{nonnilptr, nonnilptr, 0},
 }
 
 func TestLess(t *testing.T) {
@@ -55,6 +63,8 @@
 			b = cmp.Less(test.x.(string), test.y.(string))
 		case float64:
 			b = cmp.Less(test.x.(float64), test.y.(float64))
+		case uintptr:
+			b = cmp.Less(test.x.(uintptr), test.y.(uintptr))
 		}
 		if b != (test.compare < 0) {
 			t.Errorf("Less(%v, %v) == %t, want %t", test.x, test.y, b, test.compare < 0)
@@ -72,6 +82,8 @@
 			c = cmp.Compare(test.x.(string), test.y.(string))
 		case float64:
 			c = cmp.Compare(test.x.(float64), test.y.(float64))
+		case uintptr:
+			c = cmp.Compare(test.x.(uintptr), test.y.(uintptr))
 		}
 		if c != test.compare {
 			t.Errorf("Compare(%v, %v) == %d, want %d", test.x, test.y, c, test.compare)
@@ -93,3 +105,73 @@
 		}
 	}
 }
+
+func TestOr(t *testing.T) {
+	cases := []struct {
+		in   []int
+		want int
+	}{
+		{nil, 0},
+		{[]int{0}, 0},
+		{[]int{1}, 1},
+		{[]int{0, 2}, 2},
+		{[]int{3, 0}, 3},
+		{[]int{4, 5}, 4},
+		{[]int{0, 6, 7}, 6},
+	}
+	for _, tc := range cases {
+		if got := cmp.Or(tc.in...); got != tc.want {
+			t.Errorf("cmp.Or(%v) = %v; want %v", tc.in, got, tc.want)
+		}
+	}
+}
+
+func ExampleOr() {
+	// Suppose we have some user input
+	// that may or may not be an empty string
+	userInput1 := ""
+	userInput2 := "some text"
+
+	fmt.Println(cmp.Or(userInput1, "default"))
+	fmt.Println(cmp.Or(userInput2, "default"))
+	fmt.Println(cmp.Or(userInput1, userInput2, "default"))
+	// Output:
+	// default
+	// some text
+	// some text
+}
+
+func ExampleOr_sort() {
+	type Order struct {
+		Product  string
+		Customer string
+		Price    float64
+	}
+	orders := []Order{
+		{"foo", "alice", 1.00},
+		{"bar", "bob", 3.00},
+		{"baz", "carol", 4.00},
+		{"foo", "alice", 2.00},
+		{"bar", "carol", 1.00},
+		{"foo", "bob", 4.00},
+	}
+	// Sort by customer first, product second, and last by higher price
+	slices.SortFunc(orders, func(a, b Order) int {
+		return cmp.Or(
+			cmp.Compare(a.Customer, b.Customer),
+			cmp.Compare(a.Product, b.Product),
+			cmp.Compare(b.Price, a.Price),
+		)
+	})
+	for _, order := range orders {
+		fmt.Printf("%s %s %.2f\n", order.Product, order.Customer, order.Price)
+	}
+
+	// Output:
+	// foo alice 2.00
+	// foo alice 1.00
+	// bar bob 3.00
+	// foo bob 4.00
+	// bar carol 1.00
+	// baz carol 4.00
+}
diff --git a/src/compress/bzip2/bit_reader.go b/src/compress/bzip2/bit_reader.go
index b451265..8c40777 100644
--- a/src/compress/bzip2/bit_reader.go
+++ b/src/compress/bzip2/bit_reader.go
@@ -32,7 +32,7 @@
 
 // ReadBits64 reads the given number of bits and returns them in the
 // least-significant part of a uint64. In the event of an error, it returns 0
-// and the error can be obtained by calling Err().
+// and the error can be obtained by calling bitReader.Err().
 func (br *bitReader) ReadBits64(bits uint) (n uint64) {
 	for bits > br.bits {
 		b, err := br.r.ReadByte()
diff --git a/src/compress/bzip2/bzip2.go b/src/compress/bzip2/bzip2.go
index 51054cc..8f88e38 100644
--- a/src/compress/bzip2/bzip2.go
+++ b/src/compress/bzip2/bzip2.go
@@ -41,7 +41,7 @@
 }
 
 // NewReader returns an io.Reader which decompresses bzip2 data from r.
-// If r does not also implement io.ByteReader,
+// If r does not also implement [io.ByteReader],
 // the decompressor may read more data than necessary from r.
 func NewReader(r io.Reader) io.Reader {
 	bz2 := new(reader)
diff --git a/src/compress/flate/deflate.go b/src/compress/flate/deflate.go
index b53764b..ea343b2 100644
--- a/src/compress/flate/deflate.go
+++ b/src/compress/flate/deflate.go
@@ -650,13 +650,13 @@
 	return nil
 }
 
-// NewWriter returns a new Writer compressing data at the given level.
-// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// NewWriter returns a new [Writer] compressing data at the given level.
+// Following zlib, levels range from 1 ([BestSpeed]) to 9 ([BestCompression]);
 // higher levels typically run slower but compress more. Level 0
-// (NoCompression) does not attempt any compression; it only adds the
+// ([NoCompression]) does not attempt any compression; it only adds the
 // necessary DEFLATE framing.
-// Level -1 (DefaultCompression) uses the default compression level.
-// Level -2 (HuffmanOnly) will use Huffman compression only, giving
+// Level -1 ([DefaultCompression]) uses the default compression level.
+// Level -2 ([HuffmanOnly]) will use Huffman compression only, giving
 // a very fast compression for all types of input, but sacrificing considerable
 // compression efficiency.
 //
@@ -670,11 +670,11 @@
 	return &dw, nil
 }
 
-// NewWriterDict is like NewWriter but initializes the new
-// Writer with a preset dictionary. The returned Writer behaves
+// NewWriterDict is like [NewWriter] but initializes the new
+// [Writer] with a preset dictionary. The returned [Writer] behaves
 // as if the dictionary had been written to it without producing
 // any compressed output. The compressed data written to w
-// can only be decompressed by a Reader initialized with the
+// can only be decompressed by a [Reader] initialized with the
 // same dictionary.
 func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
 	dw := &dictWriter{w}
@@ -698,7 +698,7 @@
 var errWriterClosed = errors.New("flate: closed writer")
 
 // A Writer takes data written to it and writes the compressed
-// form of that data to an underlying writer (see NewWriter).
+// form of that data to an underlying writer (see [NewWriter]).
 type Writer struct {
 	d    compressor
 	dict []byte
@@ -714,7 +714,7 @@
 // It is useful mainly in compressed network protocols, to ensure that
 // a remote reader has enough data to reconstruct a packet.
 // Flush does not return until the data has been written.
-// Calling Flush when there is no pending data still causes the Writer
+// Calling Flush when there is no pending data still causes the [Writer]
 // to emit a sync marker of at least 4 bytes.
 // If the underlying writer returns an error, Flush returns that error.
 //
@@ -731,7 +731,7 @@
 }
 
 // Reset discards the writer's state and makes it equivalent to
-// the result of NewWriter or NewWriterDict called with dst
+// the result of [NewWriter] or [NewWriterDict] called with dst
 // and w's level and dictionary.
 func (w *Writer) Reset(dst io.Writer) {
 	if dw, ok := w.d.w.writer.(*dictWriter); ok {
diff --git a/src/compress/flate/inflate.go b/src/compress/flate/inflate.go
index d7375f2..3c04445 100644
--- a/src/compress/flate/inflate.go
+++ b/src/compress/flate/inflate.go
@@ -65,8 +65,8 @@
 	return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
 }
 
-// Resetter resets a ReadCloser returned by NewReader or NewReaderDict
-// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// Resetter resets a ReadCloser returned by [NewReader] or [NewReaderDict]
+// to switch to a new underlying [Reader]. This permits reusing a ReadCloser
 // instead of allocating a new one.
 type Resetter interface {
 	// Reset discards any buffered data and resets the Resetter as if it was
@@ -255,9 +255,9 @@
 	return true
 }
 
-// The actual read interface needed by NewReader.
+// The actual read interface needed by [NewReader].
 // If the passed in io.Reader does not also have ReadByte,
-// the NewReader will introduce its own buffering.
+// the [NewReader] will introduce its own buffering.
 type Reader interface {
 	io.Reader
 	io.ByteReader
@@ -798,12 +798,12 @@
 
 // NewReader returns a new ReadCloser that can be used
 // to read the uncompressed version of r.
-// If r does not also implement io.ByteReader,
+// If r does not also implement [io.ByteReader],
 // the decompressor may read more data than necessary from r.
-// The reader returns io.EOF after the final block in the DEFLATE stream has
+// The reader returns [io.EOF] after the final block in the DEFLATE stream has
 // been encountered. Any trailing data after the final block is ignored.
 //
-// The ReadCloser returned by NewReader also implements Resetter.
+// The [io.ReadCloser] returned by NewReader also implements [Resetter].
 func NewReader(r io.Reader) io.ReadCloser {
 	fixedHuffmanDecoderInit()
 
@@ -816,13 +816,13 @@
 	return &f
 }
 
-// NewReaderDict is like NewReader but initializes the reader
-// with a preset dictionary. The returned Reader behaves as if
+// NewReaderDict is like [NewReader] but initializes the reader
+// with a preset dictionary. The returned [Reader] behaves as if
 // the uncompressed data stream started with the given dictionary,
 // which has already been read. NewReaderDict is typically used
 // to read data compressed by NewWriterDict.
 //
-// The ReadCloser returned by NewReader also implements Resetter.
+// The ReadCloser returned by NewReaderDict also implements [Resetter].
 func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
 	fixedHuffmanDecoderInit()
 
diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
index ba8de97..6ca0552 100644
--- a/src/compress/gzip/gunzip.go
+++ b/src/compress/gzip/gunzip.go
@@ -45,7 +45,7 @@
 }
 
 // The gzip file stores a header giving metadata about the compressed file.
-// That header is exposed as the fields of the Writer and Reader structs.
+// That header is exposed as the fields of the [Writer] and [Reader] structs.
 //
 // Strings must be UTF-8 encoded and may only contain Unicode code points
 // U+0001 through U+00FF, due to limitations of the GZIP file format.
@@ -57,7 +57,7 @@
 	OS      byte      // operating system type
 }
 
-// A Reader is an io.Reader that can be read to retrieve
+// A Reader is an [io.Reader] that can be read to retrieve
 // uncompressed data from a gzip-format compressed file.
 //
 // In general, a gzip file can be a concatenation of gzip files,
@@ -66,10 +66,10 @@
 // Only the first header is recorded in the Reader fields.
 //
 // Gzip files store a length and checksum of the uncompressed data.
-// The Reader will return an ErrChecksum when Read
+// The Reader will return an [ErrChecksum] when [Reader.Read]
 // reaches the end of the uncompressed data if it does not
 // have the expected length or checksum. Clients should treat data
-// returned by Read as tentative until they receive the io.EOF
+// returned by [Reader.Read] as tentative until they receive the [io.EOF]
 // marking the end of the data.
 type Reader struct {
 	Header       // valid after NewReader or Reader.Reset
@@ -82,13 +82,13 @@
 	multistream  bool
 }
 
-// NewReader creates a new Reader reading the given reader.
-// If r does not also implement io.ByteReader,
+// NewReader creates a new [Reader] reading the given reader.
+// If r does not also implement [io.ByteReader],
 // the decompressor may read more data than necessary from r.
 //
-// It is the caller's responsibility to call Close on the Reader when done.
+// It is the caller's responsibility to call Close on the [Reader] when done.
 //
-// The Reader.Header fields will be valid in the Reader returned.
+// The [Reader.Header] fields will be valid in the [Reader] returned.
 func NewReader(r io.Reader) (*Reader, error) {
 	z := new(Reader)
 	if err := z.Reset(r); err != nil {
@@ -97,9 +97,9 @@
 	return z, nil
 }
 
-// Reset discards the Reader z's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
+// Reset discards the [Reader] z's state and makes it equivalent to the
+// result of its original state from [NewReader], but reading from r instead.
+// This permits reusing a [Reader] rather than allocating a new one.
 func (z *Reader) Reset(r io.Reader) error {
 	*z = Reader{
 		decompressor: z.decompressor,
@@ -116,7 +116,7 @@
 
 // Multistream controls whether the reader supports multistream files.
 //
-// If enabled (the default), the Reader expects the input to be a sequence
+// If enabled (the default), the [Reader] expects the input to be a sequence
 // of individually gzipped data streams, each with its own header and
 // trailer, ending at EOF. The effect is that the concatenation of a sequence
 // of gzipped files is treated as equivalent to the gzip of the concatenation
@@ -125,11 +125,11 @@
 // Calling Multistream(false) disables this behavior; disabling the behavior
 // can be useful when reading file formats that distinguish individual gzip
 // data streams or mix gzip data streams with other data streams.
-// In this mode, when the Reader reaches the end of the data stream,
-// Read returns io.EOF. The underlying reader must implement io.ByteReader
+// In this mode, when the [Reader] reaches the end of the data stream,
+// [Reader.Read] returns [io.EOF]. The underlying reader must implement [io.ByteReader]
 // in order to be left positioned just after the gzip stream.
 // To start the next stream, call z.Reset(r) followed by z.Multistream(false).
-// If there is no next stream, z.Reset(r) will return io.EOF.
+// If there is no next stream, z.Reset(r) will return [io.EOF].
 func (z *Reader) Multistream(ok bool) {
 	z.multistream = ok
 }
@@ -242,7 +242,7 @@
 	return hdr, nil
 }
 
-// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+// Read implements [io.Reader], reading uncompressed bytes from its underlying [Reader].
 func (z *Reader) Read(p []byte) (n int, err error) {
 	if z.err != nil {
 		return 0, z.err
@@ -284,7 +284,7 @@
 	return n, nil
 }
 
-// Close closes the Reader. It does not close the underlying io.Reader.
+// Close closes the [Reader]. It does not close the underlying [io.Reader].
 // In order for the GZIP checksum to be verified, the reader must be
-// fully consumed until the io.EOF.
+// fully consumed until the [io.EOF].
 func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/src/compress/gzip/gzip.go b/src/compress/gzip/gzip.go
index eaeb185..ab4598d 100644
--- a/src/compress/gzip/gzip.go
+++ b/src/compress/gzip/gzip.go
@@ -38,10 +38,10 @@
 	err         error
 }
 
-// NewWriter returns a new Writer.
+// NewWriter returns a new [Writer].
 // Writes to the returned writer are compressed and written to w.
 //
-// It is the caller's responsibility to call Close on the Writer when done.
+// It is the caller's responsibility to call Close on the [Writer] when done.
 // Writes may be buffered and not flushed until Close.
 //
 // Callers that wish to set the fields in Writer.Header must do so before
@@ -51,11 +51,11 @@
 	return z
 }
 
-// NewWriterLevel is like NewWriter but specifies the compression level instead
-// of assuming DefaultCompression.
+// NewWriterLevel is like [NewWriter] but specifies the compression level instead
+// of assuming [DefaultCompression].
 //
-// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
-// or any integer value between BestSpeed and BestCompression inclusive.
+// The compression level can be [DefaultCompression], [NoCompression], [HuffmanOnly]
+// or any integer value between [BestSpeed] and [BestCompression] inclusive.
 // The error returned will be nil if the level is valid.
 func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
 	if level < HuffmanOnly || level > BestCompression {
@@ -81,9 +81,9 @@
 	}
 }
 
-// Reset discards the Writer z's state and makes it equivalent to the
-// result of its original state from NewWriter or NewWriterLevel, but
-// writing to w instead. This permits reusing a Writer rather than
+// Reset discards the [Writer] z's state and makes it equivalent to the
+// result of its original state from [NewWriter] or [NewWriterLevel], but
+// writing to w instead. This permits reusing a [Writer] rather than
 // allocating a new one.
 func (z *Writer) Reset(w io.Writer) {
 	z.init(w, z.level)
@@ -134,8 +134,8 @@
 	return err
 }
 
-// Write writes a compressed form of p to the underlying io.Writer. The
-// compressed bytes are not necessarily flushed until the Writer is closed.
+// Write writes a compressed form of p to the underlying [io.Writer]. The
+// compressed bytes are not necessarily flushed until the [Writer] is closed.
 func (z *Writer) Write(p []byte) (int, error) {
 	if z.err != nil {
 		return 0, z.err
@@ -222,9 +222,9 @@
 	return z.err
 }
 
-// Close closes the Writer by flushing any unwritten data to the underlying
-// io.Writer and writing the GZIP footer.
-// It does not close the underlying io.Writer.
+// Close closes the [Writer] by flushing any unwritten data to the underlying
+// [io.Writer] and writing the GZIP footer.
+// It does not close the underlying [io.Writer].
 func (z *Writer) Close() error {
 	if z.err != nil {
 		return z.err
diff --git a/src/compress/lzw/reader.go b/src/compress/lzw/reader.go
index 18df970..2cdfaa1 100644
--- a/src/compress/lzw/reader.go
+++ b/src/compress/lzw/reader.go
@@ -118,7 +118,7 @@
 	return code, nil
 }
 
-// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+// Read implements io.Reader, reading uncompressed bytes from its underlying [Reader].
 func (r *Reader) Read(b []byte) (int, error) {
 	for {
 		if len(r.toRead) > 0 {
@@ -225,23 +225,23 @@
 
 var errClosed = errors.New("lzw: reader/writer is closed")
 
-// Close closes the Reader and returns an error for any future read operation.
-// It does not close the underlying io.Reader.
+// Close closes the [Reader] and returns an error for any future read operation.
+// It does not close the underlying [io.Reader].
 func (r *Reader) Close() error {
 	r.err = errClosed // in case any Reads come along
 	return nil
 }
 
-// Reset clears the Reader's state and allows it to be reused again
-// as a new Reader.
+// Reset clears the [Reader]'s state and allows it to be reused again
+// as a new [Reader].
 func (r *Reader) Reset(src io.Reader, order Order, litWidth int) {
 	*r = Reader{}
 	r.init(src, order, litWidth)
 }
 
-// NewReader creates a new io.ReadCloser.
-// Reads from the returned io.ReadCloser read and decompress data from r.
-// If r does not also implement io.ByteReader,
+// NewReader creates a new [io.ReadCloser].
+// Reads from the returned [io.ReadCloser] read and decompress data from r.
+// If r does not also implement [io.ByteReader],
 // the decompressor may read more data than necessary from r.
 // It is the caller's responsibility to call Close on the ReadCloser when
 // finished reading.
@@ -249,8 +249,8 @@
 // range [2,8] and is typically 8. It must equal the litWidth
 // used during compression.
 //
-// It is guaranteed that the underlying type of the returned io.ReadCloser
-// is a *Reader.
+// It is guaranteed that the underlying type of the returned [io.ReadCloser]
+// is a *[Reader].
 func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
 	return newReader(r, order, litWidth)
 }
diff --git a/src/compress/lzw/writer.go b/src/compress/lzw/writer.go
index cf06ea8..99ad350 100644
--- a/src/compress/lzw/writer.go
+++ b/src/compress/lzw/writer.go
@@ -32,7 +32,7 @@
 )
 
 // Writer is an LZW compressor. It writes the compressed form of the data
-// to an underlying writer (see NewWriter).
+// to an underlying writer (see [NewWriter]).
 type Writer struct {
 	// w is the writer that compressed bytes are written to.
 	w writer
@@ -195,7 +195,7 @@
 	return n, nil
 }
 
-// Close closes the Writer, flushing any pending output. It does not close
+// Close closes the [Writer], flushing any pending output. It does not close
 // w's underlying writer.
 func (w *Writer) Close() error {
 	if w.err != nil {
@@ -238,22 +238,22 @@
 	return w.w.Flush()
 }
 
-// Reset clears the Writer's state and allows it to be reused again
-// as a new Writer.
+// Reset clears the [Writer]'s state and allows it to be reused again
+// as a new [Writer].
 func (w *Writer) Reset(dst io.Writer, order Order, litWidth int) {
 	*w = Writer{}
 	w.init(dst, order, litWidth)
 }
 
-// NewWriter creates a new io.WriteCloser.
-// Writes to the returned io.WriteCloser are compressed and written to w.
+// NewWriter creates a new [io.WriteCloser].
+// Writes to the returned [io.WriteCloser] are compressed and written to w.
 // It is the caller's responsibility to call Close on the WriteCloser when
 // finished writing.
 // The number of bits to use for literal codes, litWidth, must be in the
 // range [2,8] and is typically 8. Input bytes must be less than 1<<litWidth.
 //
-// It is guaranteed that the underlying type of the returned io.WriteCloser
-// is a *Writer.
+// It is guaranteed that the underlying type of the returned [io.WriteCloser]
+// is a *[Writer].
 func NewWriter(w io.Writer, order Order, litWidth int) io.WriteCloser {
 	return newWriter(w, order, litWidth)
 }
diff --git a/src/compress/zlib/reader.go b/src/compress/zlib/reader.go
index 10954ea..b4ba580 100644
--- a/src/compress/zlib/reader.go
+++ b/src/compress/zlib/reader.go
@@ -55,7 +55,7 @@
 	scratch      [4]byte
 }
 
-// Resetter resets a ReadCloser returned by NewReader or NewReaderDict
+// Resetter resets a ReadCloser returned by [NewReader] or [NewReaderDict]
 // to switch to a new underlying Reader. This permits reusing a ReadCloser
 // instead of allocating a new one.
 type Resetter interface {
@@ -66,20 +66,20 @@
 
 // NewReader creates a new ReadCloser.
 // Reads from the returned ReadCloser read and decompress data from r.
-// If r does not implement io.ByteReader, the decompressor may read more
+// If r does not implement [io.ByteReader], the decompressor may read more
 // data than necessary from r.
 // It is the caller's responsibility to call Close on the ReadCloser when done.
 //
-// The ReadCloser returned by NewReader also implements Resetter.
+// The [io.ReadCloser] returned by NewReader also implements [Resetter].
 func NewReader(r io.Reader) (io.ReadCloser, error) {
 	return NewReaderDict(r, nil)
 }
 
-// NewReaderDict is like NewReader but uses a preset dictionary.
+// NewReaderDict is like [NewReader] but uses a preset dictionary.
 // NewReaderDict ignores the dictionary if the compressed data does not refer to it.
-// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary.
+// If the compressed data refers to a different dictionary, NewReaderDict returns [ErrDictionary].
 //
-// The ReadCloser returned by NewReaderDict also implements Resetter.
+// The ReadCloser returned by NewReaderDict also implements [Resetter].
 func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
 	z := new(reader)
 	err := z.Reset(r, dict)
@@ -119,9 +119,9 @@
 	return n, io.EOF
 }
 
-// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
+// Calling Close does not close the wrapped [io.Reader] originally passed to [NewReader].
 // In order for the ZLIB checksum to be verified, the reader must be
-// fully consumed until the io.EOF.
+// fully consumed until the [io.EOF].
 func (z *reader) Close() error {
 	if z.err != nil && z.err != io.EOF {
 		return z.err
diff --git a/src/container/heap/heap.go b/src/container/heap/heap.go
index 27de11e..3ad218e 100644
--- a/src/container/heap/heap.go
+++ b/src/container/heap/heap.go
@@ -21,13 +21,13 @@
 // for a type using the routines in this package.
 // Any type that implements it may be used as a
 // min-heap with the following invariants (established after
-// Init has been called or if the data is empty or sorted):
+// [Init] has been called or if the data is empty or sorted):
 //
 //	!h.Less(j, i) for 0 <= i < h.Len() and 2*i+1 <= j <= 2*i+2 and j < h.Len()
 //
-// Note that Push and Pop in this interface are for package heap's
+// Note that [Push] and [Pop] in this interface are for package heap's
 // implementation to call. To add and remove things from the heap,
-// use heap.Push and heap.Pop.
+// use [heap.Push] and [heap.Pop].
 type Interface interface {
 	sort.Interface
 	Push(x any) // add x as element Len()
@@ -55,7 +55,7 @@
 
 // Pop removes and returns the minimum element (according to Less) from the heap.
 // The complexity is O(log n) where n = h.Len().
-// Pop is equivalent to Remove(h, 0).
+// Pop is equivalent to [Remove](h, 0).
 func Pop(h Interface) any {
 	n := h.Len() - 1
 	h.Swap(0, n)
@@ -78,7 +78,7 @@
 
 // Fix re-establishes the heap ordering after the element at index i has changed its value.
 // Changing the value of the element at index i and then calling Fix is equivalent to,
-// but less expensive than, calling Remove(h, i) followed by a Push of the new value.
+// but less expensive than, calling [Remove](h, i) followed by a Push of the new value.
 // The complexity is O(log n) where n = h.Len().
 func Fix(h Interface, i int) {
 	if !down(h, i, h.Len()) {
diff --git a/src/context/afterfunc_test.go b/src/context/afterfunc_test.go
index 71f639a..7b75295 100644
--- a/src/context/afterfunc_test.go
+++ b/src/context/afterfunc_test.go
@@ -15,7 +15,7 @@
 // defined in context.go, that supports registering AfterFuncs.
 type afterFuncContext struct {
 	mu         sync.Mutex
-	afterFuncs map[*struct{}]func()
+	afterFuncs map[*byte]func()
 	done       chan struct{}
 	err        error
 }
@@ -50,9 +50,9 @@
 func (c *afterFuncContext) AfterFunc(f func()) func() bool {
 	c.mu.Lock()
 	defer c.mu.Unlock()
-	k := &struct{}{}
+	k := new(byte)
 	if c.afterFuncs == nil {
-		c.afterFuncs = make(map[*struct{}]func())
+		c.afterFuncs = make(map[*byte]func())
 	}
 	c.afterFuncs[k] = f
 	return func() bool {
@@ -106,11 +106,13 @@
 
 func TestCustomContextAfterFuncUnregisterCancel(t *testing.T) {
 	ctx0 := &afterFuncContext{}
-	_, cancel := context.WithCancel(ctx0)
-	if got, want := len(ctx0.afterFuncs), 1; got != want {
+	_, cancel1 := context.WithCancel(ctx0)
+	_, cancel2 := context.WithCancel(ctx0)
+	if got, want := len(ctx0.afterFuncs), 2; got != want {
 		t.Errorf("after WithCancel(ctx0): ctx0 has %v afterFuncs, want %v", got, want)
 	}
-	cancel()
+	cancel1()
+	cancel2()
 	if got, want := len(ctx0.afterFuncs), 0; got != want {
 		t.Errorf("after canceling WithCancel(ctx0): ctx0 has %v afterFuncs, want %v", got, want)
 	}
diff --git a/src/context/context.go b/src/context/context.go
index ee66b43..80e1787 100644
--- a/src/context/context.go
+++ b/src/context/context.go
@@ -286,7 +286,12 @@
 		defer cc.mu.Unlock()
 		return cc.cause
 	}
-	return nil
+	// There is no cancelCtxKey value, so we know that c is
+	// not a descendant of some Context created by WithCancelCause.
+	// Therefore, there is no specific cause to return.
+	// If this is not one of the standard Context types,
+	// it might still have an error even though it won't have a cause.
+	return c.Err()
 }
 
 // AfterFunc arranges to call f in its own goroutine after ctx is done
diff --git a/src/context/x_test.go b/src/context/x_test.go
index 57fe60b..b1012fa 100644
--- a/src/context/x_test.go
+++ b/src/context/x_test.go
@@ -408,8 +408,9 @@
 	t.Parallel()
 
 	r := rand.New(rand.NewSource(seed))
+	prefix := fmt.Sprintf("seed=%d", seed)
 	errorf := func(format string, a ...any) {
-		t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
+		t.Errorf(prefix+format, a...)
 	}
 	const (
 		minLayers = 30
@@ -867,6 +868,126 @@
 	}
 }
 
+// customCauseContext is a custom Context used to test context.Cause.
+type customCauseContext struct {
+	mu   sync.Mutex
+	done chan struct{}
+	err  error
+
+	cancelChild CancelFunc
+}
+
+func (ccc *customCauseContext) Deadline() (deadline time.Time, ok bool) {
+	return
+}
+
+func (ccc *customCauseContext) Done() <-chan struct{} {
+	ccc.mu.Lock()
+	defer ccc.mu.Unlock()
+	return ccc.done
+}
+
+func (ccc *customCauseContext) Err() error {
+	ccc.mu.Lock()
+	defer ccc.mu.Unlock()
+	return ccc.err
+}
+
+func (ccc *customCauseContext) Value(key any) any {
+	return nil
+}
+
+func (ccc *customCauseContext) cancel() {
+	ccc.mu.Lock()
+	ccc.err = Canceled
+	close(ccc.done)
+	cancelChild := ccc.cancelChild
+	ccc.mu.Unlock()
+
+	if cancelChild != nil {
+		cancelChild()
+	}
+}
+
+func (ccc *customCauseContext) setCancelChild(cancelChild CancelFunc) {
+	ccc.cancelChild = cancelChild
+}
+
+func TestCustomContextCause(t *testing.T) {
+	// Test if we cancel a custom context, Err and Cause return Canceled.
+	ccc := &customCauseContext{
+		done: make(chan struct{}),
+	}
+	ccc.cancel()
+	if got := ccc.Err(); got != Canceled {
+		t.Errorf("ccc.Err() = %v, want %v", got, Canceled)
+	}
+	if got := Cause(ccc); got != Canceled {
+		t.Errorf("Cause(ccc) = %v, want %v", got, Canceled)
+	}
+
+	// Test that if we pass a custom context to WithCancelCause,
+	// and then cancel that child context with a cause,
+	// that the cause of the child canceled context is correct
+	// but that the parent custom context is not canceled.
+	ccc = &customCauseContext{
+		done: make(chan struct{}),
+	}
+	ctx, causeFunc := WithCancelCause(ccc)
+	cause := errors.New("TestCustomContextCause")
+	causeFunc(cause)
+	if got := ctx.Err(); got != Canceled {
+		t.Errorf("after CancelCauseFunc ctx.Err() = %v, want %v", got, Canceled)
+	}
+	if got := Cause(ctx); got != cause {
+		t.Errorf("after CancelCauseFunc Cause(ctx) = %v, want %v", got, cause)
+	}
+	if got := ccc.Err(); got != nil {
+		t.Errorf("after CancelCauseFunc ccc.Err() = %v, want %v", got, nil)
+	}
+	if got := Cause(ccc); got != nil {
+		t.Errorf("after CancelCauseFunc Cause(ccc) = %v, want %v", got, nil)
+	}
+
+	// Test that if we now cancel the parent custom context,
+	// the cause of the child canceled context is still correct,
+	// and the parent custom context is canceled without a cause.
+	ccc.cancel()
+	if got := ctx.Err(); got != Canceled {
+		t.Errorf("after CancelCauseFunc ctx.Err() = %v, want %v", got, Canceled)
+	}
+	if got := Cause(ctx); got != cause {
+		t.Errorf("after CancelCauseFunc Cause(ctx) = %v, want %v", got, cause)
+	}
+	if got := ccc.Err(); got != Canceled {
+		t.Errorf("after CancelCauseFunc ccc.Err() = %v, want %v", got, Canceled)
+	}
+	if got := Cause(ccc); got != Canceled {
+		t.Errorf("after CancelCauseFunc Cause(ccc) = %v, want %v", got, Canceled)
+	}
+
+	// Test that if we associate a custom context with a child,
+	// then canceling the custom context cancels the child.
+	ccc = &customCauseContext{
+		done: make(chan struct{}),
+	}
+	ctx, cancelFunc := WithCancel(ccc)
+	ccc.setCancelChild(cancelFunc)
+	ccc.cancel()
+	if got := ctx.Err(); got != Canceled {
+		t.Errorf("after CancelCauseFunc ctx.Err() = %v, want %v", got, Canceled)
+	}
+	if got := Cause(ctx); got != Canceled {
+		t.Errorf("after CancelCauseFunc Cause(ctx) = %v, want %v", got, Canceled)
+	}
+	if got := ccc.Err(); got != Canceled {
+		t.Errorf("after CancelCauseFunc ccc.Err() = %v, want %v", got, Canceled)
+	}
+	if got := Cause(ccc); got != Canceled {
+		t.Errorf("after CancelCauseFunc Cause(ccc) = %v, want %v", got, Canceled)
+	}
+}
+
 func TestAfterFuncCalledAfterCancel(t *testing.T) {
 	ctx, cancel := WithCancel(Background())
 	donec := make(chan struct{})
diff --git a/src/crypto/aes/aes_gcm.go b/src/crypto/aes/aes_gcm.go
index f77d279..036705f 100644
--- a/src/crypto/aes/aes_gcm.go
+++ b/src/crypto/aes/aes_gcm.go
@@ -43,7 +43,7 @@
 var _ gcmAble = (*aesCipherGCM)(nil)
 
 // NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
-// called by crypto/cipher.NewGCM via the gcmAble interface.
+// called by [crypto/cipher.NewGCM] via the gcmAble interface.
 func (c *aesCipherGCM) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
 	g := &gcmAsm{ks: c.enc, nonceSize: nonceSize, tagSize: tagSize}
 	gcmAesInit(&g.productTable, g.ks)
@@ -86,7 +86,7 @@
 	return
 }
 
-// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// Seal encrypts and authenticates plaintext. See the [cipher.AEAD] interface for
 // details.
 func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
 	if len(nonce) != g.nonceSize {
@@ -126,7 +126,7 @@
 	return ret
 }
 
-// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// Open authenticates and decrypts ciphertext. See the [cipher.AEAD] interface
 // for details.
 func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
 	if len(nonce) != g.nonceSize {
diff --git a/src/crypto/aes/asm_s390x.s b/src/crypto/aes/asm_s390x.s
index 0c60ac2..a233714 100644
--- a/src/crypto/aes/asm_s390x.s
+++ b/src/crypto/aes/asm_s390x.s
@@ -12,7 +12,7 @@
 	MOVD	length+32(FP), R5
 	MOVD	c+0(FP), R0
 loop:
-	WORD	$0xB92E0024 // cipher message (KM)
+	KM	R2, R4      // cipher message (KM)
 	BVS	loop        // branch back if interrupted
 	XOR	R0, R0
 	RET
@@ -29,7 +29,7 @@
 	MOVD	length+40(FP), R5
 	MOVD	c+0(FP), R0
 loop:
-	WORD	$0xB92F0024       // cipher message with chaining (KMC)
+	KMC	R2, R4            // cipher message with chaining (KMC)
 	BVS	loop              // branch back if interrupted
 	XOR	R0, R0
 	MVC	$16, 0(R1), 0(R8) // update iv
@@ -127,7 +127,7 @@
 	MOVD	src_base+56(FP), R6 // src
 	MOVD	src_len+64(FP), R7  // len
 loop:
-	WORD	$0xB92D2046         // cipher message with counter (KMCTR)
+	KMCTR	R4, R2, R6          // cipher message with counter (KMCTR)
 	BVS	loop                // branch back if interrupted
 	RET
 crash:
@@ -145,7 +145,7 @@
 	STMG	R4, R7, (R1)
 	LMG	data+16(FP), R2, R3 // R2=base, R3=len
 loop:
-	WORD    $0xB93E0002 // compute intermediate message digest (KIMD)
+	KIMD	R0, R2      // compute intermediate message digest (KIMD)
 	BVS     loop        // branch back if interrupted
 	MVC     $16, (R1), (R8)
 	MOVD	$0, R0
@@ -180,7 +180,7 @@
 	MVC	$8, 24(R8), 104(R1)
 
 kma:
-	WORD	$0xb9296024 // kma %r6,%r2,%r4
+	KMA	R2, R6, R4       // Cipher Message with Authentication
 	BVS	kma
 
 	MOVD	tag+104(FP), R2
diff --git a/src/crypto/aes/cipher.go b/src/crypto/aes/cipher.go
index 183c169..a9e6208 100644
--- a/src/crypto/aes/cipher.go
+++ b/src/crypto/aes/cipher.go
@@ -26,7 +26,7 @@
 	return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
 }
 
-// NewCipher creates and returns a new cipher.Block.
+// NewCipher creates and returns a new [cipher.Block].
 // The key argument should be the AES key,
 // either 16, 24, or 32 bytes to select
 // AES-128, AES-192, or AES-256.
diff --git a/src/crypto/aes/ctr_s390x.go b/src/crypto/aes/ctr_s390x.go
index 0d3a58e..9214215 100644
--- a/src/crypto/aes/ctr_s390x.go
+++ b/src/crypto/aes/ctr_s390x.go
@@ -32,7 +32,7 @@
 }
 
 // NewCTR returns a Stream which encrypts/decrypts using the AES block
-// cipher in counter mode. The length of iv must be the same as BlockSize.
+// cipher in counter mode. The length of iv must be the same as [BlockSize].
 func (c *aesCipherAsm) NewCTR(iv []byte) cipher.Stream {
 	if len(iv) != BlockSize {
 		panic("cipher.NewCTR: IV length must equal block size")
diff --git a/src/crypto/aes/gcm_ppc64x.go b/src/crypto/aes/gcm_ppc64x.go
index 44b2705..2a7f898 100644
--- a/src/crypto/aes/gcm_ppc64x.go
+++ b/src/crypto/aes/gcm_ppc64x.go
@@ -51,8 +51,10 @@
 	tagSize int
 }
 
+func counterCryptASM(nr int, out, in []byte, counter *[gcmBlockSize]byte, key *uint32)
+
 // NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
-// called by crypto/cipher.NewGCM via the gcmAble interface.
+// called by [crypto/cipher.NewGCM] via the gcmAble interface.
 func (c *aesCipherAsm) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
 	var h1, h2 uint64
 	g := &gcmAsm{cipher: c, ks: c.enc, nonceSize: nonceSize, tagSize: tagSize}
@@ -114,34 +116,10 @@
 // into out. counter is the initial count value and will be updated with the next
 // count value. The length of out must be greater than or equal to the length
 // of in.
+// counterCryptASM implements counterCrypt which then allows the loop to
+// be unrolled and optimized.
 func (g *gcmAsm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) {
-	var mask [gcmBlockSize]byte
-
-	for len(in) >= gcmBlockSize {
-		// Hint to avoid bounds check
-		_, _ = in[15], out[15]
-		g.cipher.Encrypt(mask[:], counter[:])
-		gcmInc32(counter)
-
-		// XOR 16 bytes each loop iteration in 8 byte chunks
-		in0 := binary.LittleEndian.Uint64(in[0:])
-		in1 := binary.LittleEndian.Uint64(in[8:])
-		m0 := binary.LittleEndian.Uint64(mask[:8])
-		m1 := binary.LittleEndian.Uint64(mask[8:])
-		binary.LittleEndian.PutUint64(out[:8], in0^m0)
-		binary.LittleEndian.PutUint64(out[8:], in1^m1)
-		out = out[16:]
-		in = in[16:]
-	}
-
-	if len(in) > 0 {
-		g.cipher.Encrypt(mask[:], counter[:])
-		gcmInc32(counter)
-		// XOR leftover bytes
-		for i, inb := range in {
-			out[i] = inb ^ mask[i]
-		}
-	}
+	counterCryptASM(len(g.cipher.enc)/4-1, out, in, counter, &g.cipher.enc[0])
 }
 
 // increments the rightmost 32-bits of the count value by 1.
@@ -181,7 +159,7 @@
 	}
 }
 
-// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// Seal encrypts and authenticates plaintext. See the [cipher.AEAD] interface for
 // details.
 func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
 	if len(nonce) != g.nonceSize {
@@ -205,7 +183,7 @@
 	return ret
 }
 
-// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// Open authenticates and decrypts ciphertext. See the [cipher.AEAD] interface
 // for details.
 func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
 	if len(nonce) != g.nonceSize {
diff --git a/src/crypto/aes/gcm_ppc64x.s b/src/crypto/aes/gcm_ppc64x.s
index 72f0b8e..f661b27 100644
--- a/src/crypto/aes/gcm_ppc64x.s
+++ b/src/crypto/aes/gcm_ppc64x.s
@@ -4,7 +4,7 @@
 
 //go:build ppc64 || ppc64le
 
-// Based on CRYPTOGAMS code with the following comment:
+// Portions based on CRYPTOGAMS code with the following comment:
 // # ====================================================================
 // # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
 // # project. The module is, however, dual licensed under OpenSSL and
@@ -12,13 +12,17 @@
 // # details see http://www.openssl.org/~appro/cryptogams/.
 // # ====================================================================
 
-// This implementation is based on the ppc64 asm generated by the
-// script https://github.com/dot-asm/cryptogams/blob/master/ppc/ghashp8-ppc.pl
+// The implementations for gcmHash, gcmInit and gcmMul are based on the generated asm
+// from the script https://github.com/dot-asm/cryptogams/blob/master/ppc/ghashp8-ppc.pl
 // from commit d47afb3c.
 
 // Changes were made due to differences in the ABI and some register usage.
 // Some arguments were changed due to the way the Go code passes them.
 
+// Portions that use the stitched AES-GCM approach in counterCryptASM
+// are based on code found in
+// https://github.com/IBM/ipcri/blob/main/aes/p10_aes_gcm.s
+
 #include "textflag.h"
 
 #define XIP    R3
@@ -87,6 +91,292 @@
 
 #define VIN0   VIN
 
+#define ESPERM V10
+#define TMP2 V11
+
+// The following macros provide appropriate
+// implementations for endianness as well as
+// ISA specific for power8 and power9.
+#ifdef GOARCH_ppc64le
+#  ifdef GOPPC64_power9
+#define P8_LXVB16X(RA,RB,VT)   LXVB16X (RA)(RB), VT
+#define P8_STXVB16X(VS,RA,RB)  STXVB16X VS, (RA)(RB)
+#  else
+#define NEEDS_ESPERM
+#define P8_LXVB16X(RA,RB,VT) \
+	LXVD2X  (RA+RB), VT \
+	VPERM	VT, VT, ESPERM, VT
+
+#define P8_STXVB16X(VS,RA,RB) \
+	VPERM	VS, VS, ESPERM, TMP2; \
+	STXVD2X TMP2, (RA+RB)
+
+#  endif
+#else
+#define P8_LXVB16X(RA,RB,VT) \
+	LXVD2X  (RA+RB), VT
+
+#define P8_STXVB16X(VS,RA,RB) \
+	STXVD2X VS, (RA+RB)
+
+#endif
+
+#define MASK_PTR   R8
+
+#define MASKV   V0
+#define INV     V1
+
+// The following macros are used for
+// the stitched implementation within
+// counterCryptASM.
+
+// Load the initial GCM counter value
+// in V30 and set up the counter increment
+// in V31
+#define SETUP_COUNTER \
+	P8_LXVB16X(COUNTER, R0, V30); \
+	VSPLTISB $1, V28; \
+	VXOR V31, V31, V31; \
+	VSLDOI $1, V31, V28, V31
+
+// These macros set up the initial value
+// for a single encryption, or 4 or 8
+// stitched encryptions implemented
+// with interleaving vciphers.
+//
+// The input value for each encryption
+// is generated by XORing the counter
+// from V30 with the first key in VS0
+// and incrementing the counter.
+//
+// Single encryption in V15
+#define GEN_VCIPHER_INPUT \
+	XXLOR VS0, VS0, V29 \
+	VXOR V30, V29, V15; \
+	VADDUWM V30, V31, V30
+
+// 4 encryptions in V15 - V18
+#define GEN_VCIPHER_4_INPUTS \
+	XXLOR VS0, VS0, V29; \
+	VXOR V30, V29, V15; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V16; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V17; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V18; \
+	VADDUWM V30, V31, V30
+
+// 8 encryptions in V15 - V22
+#define GEN_VCIPHER_8_INPUTS \
+	XXLOR VS0, VS0, V29; \
+	VXOR V30, V29, V15; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V16; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V17; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V18; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V19; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V20; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V21; \
+	VADDUWM V30, V31, V30; \
+	VXOR V30, V29, V22; \
+	VADDUWM V30, V31, V30
+
+// Load the keys to be used for
+// encryption based on key_len.
+// Keys are in VS0 - VS14
+// depending on key_len.
+// Valid keys sizes are verified
+// here. CR2 is set and used
+// throughout to check key_len.
+#define LOAD_KEYS(blk_key, key_len) \
+	MOVD	$16, R16; \
+	MOVD	$32, R17; \
+	MOVD	$48, R18; \
+	MOVD	$64, R19; \
+	LXVD2X (blk_key)(R0), VS0; \
+	LXVD2X (blk_key)(R16), VS1; \
+	LXVD2X (blk_key)(R17), VS2; \
+	LXVD2X (blk_key)(R18), VS3; \
+	LXVD2X (blk_key)(R19), VS4; \
+	ADD $64, R16; \
+	ADD $64, R17; \
+	ADD $64, R18; \
+	ADD $64, R19; \
+	LXVD2X (blk_key)(R16), VS5; \
+	LXVD2X (blk_key)(R17), VS6; \
+	LXVD2X (blk_key)(R18), VS7; \
+	LXVD2X (blk_key)(R19), VS8; \
+	ADD $64, R16; \
+	ADD $64, R17; \
+	ADD $64, R18; \
+	ADD $64, R19; \
+	LXVD2X (blk_key)(R16), VS9; \
+	LXVD2X (blk_key)(R17), VS10; \
+	CMP key_len, $12, CR2; \
+	CMP key_len, $10; \
+	BEQ keysLoaded; \
+	LXVD2X (blk_key)(R18), VS11; \
+	LXVD2X (blk_key)(R19), VS12; \
+	BEQ CR2, keysLoaded; \
+	ADD $64, R16; \
+	ADD $64, R17; \
+	LXVD2X (blk_key)(R16), VS13; \
+	LXVD2X (blk_key)(R17), VS14; \
+	CMP key_len, $14; \
+	BEQ keysLoaded; \
+	MOVD R0,0(R0); \
+keysLoaded:
+
+// Encrypt 1 (vin) with first 9
+// keys from VS1 - VS9.
+#define VCIPHER_1X9_KEYS(vin) \
+	XXLOR VS1, VS1, V23; \
+	XXLOR VS2, VS2, V24; \
+	XXLOR VS3, VS3, V25; \
+	XXLOR VS4, VS4, V26; \
+	XXLOR VS5, VS5, V27; \
+	VCIPHER vin, V23, vin; \
+	VCIPHER vin, V24, vin; \
+	VCIPHER vin, V25, vin; \
+	VCIPHER vin, V26, vin; \
+	VCIPHER vin, V27, vin; \
+	XXLOR VS6, VS6, V23; \
+	XXLOR VS7, VS7, V24; \
+	XXLOR VS8, VS8, V25; \
+	XXLOR VS9, VS9, V26; \
+	VCIPHER vin, V23, vin; \
+	VCIPHER vin, V24, vin; \
+	VCIPHER vin, V25, vin; \
+	VCIPHER	vin, V26, vin
+
+// Encrypt 1 value (vin) with
+// 2 specified keys
+#define VCIPHER_1X2_KEYS(vin, key1, key2) \
+	XXLOR key1, key1, V25; \
+	XXLOR key2, key2, V26; \
+	VCIPHER vin, V25, vin; \
+	VCIPHER vin, V26, vin
+
+// Encrypt 4 values in V15 - V18
+// with the specified key from
+// VS1 - VS9.
+#define VCIPHER_4X1_KEY(key) \
+	XXLOR key, key, V23; \
+	VCIPHER V15, V23, V15; \
+	VCIPHER V16, V23, V16; \
+	VCIPHER V17, V23, V17; \
+	VCIPHER V18, V23, V18
+
+// Encrypt 8 values in V15 - V22
+// with the specified key,
+// assuming it is a VSreg
+#define VCIPHER_8X1_KEY(key) \
+	XXLOR key, key, V23; \
+	VCIPHER V15, V23, V15; \
+	VCIPHER V16, V23, V16; \
+	VCIPHER V17, V23, V17; \
+	VCIPHER V18, V23, V18; \
+	VCIPHER V19, V23, V19; \
+	VCIPHER V20, V23, V20; \
+	VCIPHER V21, V23, V21; \
+	VCIPHER V22, V23, V22
+
+// Load input block into V1-V4
+// in big endian order and
+// update blk_inp by 64.
+#define LOAD_INPUT_BLOCK64(blk_inp) \
+	MOVD $16, R16; \
+	MOVD $32, R17; \
+	MOVD $48, R18; \
+	P8_LXVB16X(blk_inp,R0,V1); \
+	P8_LXVB16X(blk_inp,R16,V2); \
+	P8_LXVB16X(blk_inp,R17,V3); \
+	P8_LXVB16X(blk_inp,R18,V4); \
+	ADD $64, blk_inp
+
+// Load input block into V1-V8
+// in big endian order and
+// Update blk_inp by 128
+#define LOAD_INPUT_BLOCK128(blk_inp) \
+	MOVD $16, R16; \
+	MOVD $32, R17; \
+	MOVD $48, R18; \
+	MOVD $64, R19; \
+	MOVD $80, R20; \
+	MOVD $96, R21; \
+	MOVD $112, R22; \
+	P8_LXVB16X(blk_inp,R0,V1); \
+	P8_LXVB16X(blk_inp,R16,V2); \
+	P8_LXVB16X(blk_inp,R17,V3); \
+	P8_LXVB16X(blk_inp,R18,V4); \
+	P8_LXVB16X(blk_inp,R19,V5); \
+	P8_LXVB16X(blk_inp,R20,V6); \
+	P8_LXVB16X(blk_inp,R21,V7); \
+	P8_LXVB16X(blk_inp,R22,V8); \
+	ADD $128, blk_inp
+
+// Finish encryption on 8 streams and
+// XOR with input block
+#define VCIPHERLAST8_XOR_INPUT \
+	VCIPHERLAST     V15, V23, V15; \
+	VCIPHERLAST     V16, V23, V16; \
+	VCIPHERLAST     V17, V23, V17; \
+	VCIPHERLAST     V18, V23, V18; \
+	VCIPHERLAST     V19, V23, V19; \
+	VCIPHERLAST     V20, V23, V20; \
+	VCIPHERLAST     V21, V23, V21; \
+	VCIPHERLAST     V22, V23, V22; \
+	XXLXOR          V1, V15, V1; \
+	XXLXOR          V2, V16, V2; \
+	XXLXOR          V3, V17, V3; \
+	XXLXOR          V4, V18, V4; \
+	XXLXOR          V5, V19, V5; \
+	XXLXOR          V6, V20, V6; \
+	XXLXOR          V7, V21, V7; \
+	XXLXOR          V8, V22, V8
+
+// Finish encryption on 4 streams and
+// XOR with input block
+#define VCIPHERLAST4_XOR_INPUT \
+	VCIPHERLAST     V15, V23, V15; \
+	VCIPHERLAST     V16, V23, V16; \
+	VCIPHERLAST     V17, V23, V17; \
+	VCIPHERLAST     V18, V23, V18; \
+	XXLXOR          V1, V15, V1; \
+	XXLXOR          V2, V16, V2; \
+	XXLXOR          V3, V17, V3; \
+	XXLXOR          V4, V18, V4
+
+// Store output block from V1-V8
+// in big endian order and
+// Update blk_out by 128
+#define STORE_OUTPUT_BLOCK128(blk_out) \
+	P8_STXVB16X(V1,blk_out,R0); \
+	P8_STXVB16X(V2,blk_out,R16); \
+	P8_STXVB16X(V3,blk_out,R17); \
+	P8_STXVB16X(V4,blk_out,R18); \
+	P8_STXVB16X(V5,blk_out,R19); \
+	P8_STXVB16X(V6,blk_out,R20); \
+	P8_STXVB16X(V7,blk_out,R21); \
+	P8_STXVB16X(V8,blk_out,R22); \
+	ADD $128, blk_out
+
+// Store output block from V1-V4
+// in big endian order and
+// Update blk_out by 64
+#define STORE_OUTPUT_BLOCK64(blk_out) \
+	P8_STXVB16X(V1,blk_out,R0); \
+	P8_STXVB16X(V2,blk_out,R16); \
+	P8_STXVB16X(V3,blk_out,R17); \
+	P8_STXVB16X(V4,blk_out,R18); \
+	ADD $64, blk_out
+
 // func gcmInit(productTable *[256]byte, h []byte)
 TEXT ·gcmInit(SB), NOSPLIT, $0-32
 	MOVD productTable+0(FP), XIP
@@ -588,3 +878,226 @@
 #endif
 	STXVD2X VXL, (XIP+R0)      // write out Xi
 	RET
+
+#define BLK_INP    R3
+#define BLK_OUT    R4
+#define BLK_KEY    R5
+#define KEY_LEN    R6
+#define BLK_IDX    R7
+#define IDX        R8
+#define IN_LEN     R9
+#define COUNTER    R10
+#define CONPTR     R14
+#define MASK       V5
+
+// Implementation of the counterCrypt function in assembler.
+// Original loop is unrolled to allow for multiple encryption
+// streams to be done in parallel, which is achieved by interleaving
+// vcipher instructions from each stream. This is also referred to as
+// stitching, and provides significant performance improvements.
+// Some macros are defined which enable execution for big or little
+// endian as well as different ISA targets.
+//func (g *gcmAsm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte, key[gcmBlockSize]uint32)
+//func counterCryptASM(xr, out, in, counter, key)
+TEXT ·counterCryptASM(SB), NOSPLIT, $16-72
+	MOVD	xr(FP), KEY_LEN
+	MOVD    out+8(FP), BLK_OUT
+	MOVD    out_len+16(FP), R8
+	MOVD    in+32(FP), BLK_INP
+	MOVD    in_len+40(FP), IN_LEN
+	MOVD    counter+56(FP), COUNTER
+	MOVD    key+64(FP), BLK_KEY
+
+// Set up permute string when needed.
+#ifdef NEEDS_ESPERM
+	MOVD    $·rcon(SB), R14
+	LVX     (R14), ESPERM   // Permute value for P8_ macros.
+#endif
+	SETUP_COUNTER		// V30 Counter V31 BE {0, 0, 0, 1}
+	LOAD_KEYS(BLK_KEY, KEY_LEN)	// VS1 - VS10/12/14 based on keysize
+	CMP     IN_LEN, $128
+	BLT	block64
+block128_loop:
+	// Do 8 encryptions in parallel by setting
+	// input values in V15-V22 and executing
+	// vcipher on the updated value and the keys.
+	GEN_VCIPHER_8_INPUTS
+	VCIPHER_8X1_KEY(VS1)
+	VCIPHER_8X1_KEY(VS2)
+	VCIPHER_8X1_KEY(VS3)
+	VCIPHER_8X1_KEY(VS4)
+	VCIPHER_8X1_KEY(VS5)
+	VCIPHER_8X1_KEY(VS6)
+	VCIPHER_8X1_KEY(VS7)
+	VCIPHER_8X1_KEY(VS8)
+	VCIPHER_8X1_KEY(VS9)
+	// Additional encryptions are done based on
+	// the key length, with the last key moved
+	// to V23 for use with VCIPHERLAST.
+	// CR2 = CMP key_len, $12
+	XXLOR VS10, VS10, V23
+	BLT	CR2, block128_last // key_len = 10
+	VCIPHER_8X1_KEY(VS10)
+	VCIPHER_8X1_KEY(VS11)
+	XXLOR VS12,VS12,V23
+	BEQ	CR2, block128_last // ken_len = 12
+	VCIPHER_8X1_KEY(VS12)
+	VCIPHER_8X1_KEY(VS13)
+	XXLOR VS14,VS14,V23	// key_len = 14
+block128_last:
+	// vcipher encryptions are in V15-V22 at this
+	// point with vcipherlast remaining to be done.
+	// Load input block into V1-V8, setting index offsets
+	// in R16-R22 to use with the STORE.
+	LOAD_INPUT_BLOCK128(BLK_INP)
+	// Do VCIPHERLAST on the last key for each encryption
+	// stream and XOR the result with the corresponding
+	// value from the input block.
+	VCIPHERLAST8_XOR_INPUT
+	// Store the results (8*16) and update BLK_OUT by 128.
+	STORE_OUTPUT_BLOCK128(BLK_OUT)
+	ADD	$-128, IN_LEN	// input size
+	CMP     IN_LEN, $128	// check if >= blocksize
+	BGE	block128_loop	// next input block
+	CMP	IN_LEN, $0
+	BEQ	done
+block64:
+	CMP	IN_LEN, $64	// Check if >= 64
+	BLT	block16_loop
+	// Do 4 encryptions in parallel by setting
+	// input values in V15-V18 and executing
+	// vcipher on the updated value and the keys.
+	GEN_VCIPHER_4_INPUTS
+	VCIPHER_4X1_KEY(VS1)
+	VCIPHER_4X1_KEY(VS2)
+	VCIPHER_4X1_KEY(VS3)
+	VCIPHER_4X1_KEY(VS4)
+	VCIPHER_4X1_KEY(VS5)
+	VCIPHER_4X1_KEY(VS6)
+	VCIPHER_4X1_KEY(VS7)
+	VCIPHER_4X1_KEY(VS8)
+	VCIPHER_4X1_KEY(VS9)
+	// Check key length based on CR2
+	// Move last key to V23 for use with later vcipherlast
+	XXLOR	VS10, VS10, V23
+	BLT	CR2, block64_last	// size = 10
+	VCIPHER_4X1_KEY(VS10)		// Encrypt next 2 keys
+	VCIPHER_4X1_KEY(VS11)
+	XXLOR	VS12, VS12, V23
+	BEQ	CR2, block64_last	// size = 12
+	VCIPHER_4X1_KEY(VS12)		// Encrypt last 2 keys
+	VCIPHER_4X1_KEY(VS13)
+	XXLOR	VS14, VS14, V23		// size = 14
+block64_last:
+	LOAD_INPUT_BLOCK64(BLK_INP)	// Load 64 bytes of input
+	// Do VCIPHERLAST on the last for each encryption
+	// stream and XOR the result with the corresponding
+	// value from the input block.
+	VCIPHERLAST4_XOR_INPUT
+	// Store the results (4*16) and update BLK_OUT by 64.
+	STORE_OUTPUT_BLOCK64(BLK_OUT)
+	ADD	$-64, IN_LEN		// decrement input block length
+	CMP	IN_LEN, $0		// check for remaining length
+	BEQ	done
+block16_loop:
+	CMP	IN_LEN, $16		// More input
+	BLT	final_block		// If not, then handle partial block
+	// Single encryption, no stitching
+	GEN_VCIPHER_INPUT		// Generate input value for single encryption
+	VCIPHER_1X9_KEYS(V15)		// Encrypt V15 value with 9 keys
+	XXLOR	VS10, VS10, V23		// Last key -> V23 for later vcipiherlast
+	// Key length based on CR2. (LT=10, EQ=12, GT=14)
+	BLT	CR2, block16_last	// Finish for key size 10
+	VCIPHER_1X2_KEYS(V15, VS10, VS11) // Encrypt V15 with 2 more keys
+	XXLOR	VS12, VS12, V23		// Last key -> V23 for later vcipherlast
+	BEQ	CR2, block16_last	// Finish for key size 12
+	VCIPHER_1X2_KEYS(V15, VS12, VS13) // Encrypt V15 with last 2 keys
+	XXLOR	VS14, VS14, V23		// Last key -> V23 for vcipherlast with key size 14
+block16_last:
+	P8_LXVB16X(BLK_INP, R0, V1)	// Load input
+	VCIPHERLAST V15, V23, V15	// Encrypt last value in V23
+	XXLXOR	V15, V1, V1		// XOR with input
+	P8_STXVB16X(V1,R0,BLK_OUT)	// Store final encryption value to output
+	ADD	$16, BLK_INP		// Increment input pointer
+	ADD	$16, BLK_OUT		// Increment output pointer
+	ADD	$-16, IN_LEN		// Decrement input length
+	BR	block16_loop		// Check for next
+final_block:
+	CMP	IN_LEN, $0
+	BEQ	done
+	GEN_VCIPHER_INPUT		// Generate input value for partial encryption
+	VCIPHER_1X9_KEYS(V15)		// Encrypt V15 with 9 keys
+	XXLOR	VS10, VS10, V23		// Save possible last key
+	BLT	CR2, final_block_last
+	VCIPHER_1X2_KEYS(V15, VS10, VS11)	// Encrypt V15 with next 2 keys
+	XXLOR	VS12, VS12, V23		// Save possible last key
+	BEQ	CR2, final_block_last
+	VCIPHER_1X2_KEYS(V15, VS12, VS13) // Encrypt V15 with last 2 keys
+	XXLOR	VS14, VS14, V23		// Save last key
+final_block_last:
+	VCIPHERLAST V15, V23, V15	// Finish encryption
+#ifdef GOPPC64_power10
+	// set up length
+	SLD	$56, IN_LEN, R17
+	LXVLL	BLK_INP, R17, V25
+	VXOR	V25, V15, V25
+	STXVLL	V25, BLK_OUT, R17
+#else
+	ADD	$32, R1, MASK_PTR
+	MOVD	$0, R16
+	P8_STXVB16X(V15, MASK_PTR, R0)
+	CMP	IN_LEN, $8
+	BLT	next4
+	MOVD	0(MASK_PTR), R14
+	MOVD	0(BLK_INP), R15
+	XOR	R14, R15, R14
+	MOVD	R14, 0(BLK_OUT)
+	ADD	$8, R16
+	ADD	$-8, IN_LEN
+next4:
+	CMP	IN_LEN, $4
+	BLT	next2
+	MOVWZ	(BLK_INP)(R16), R15
+	MOVWZ	(MASK_PTR)(R16), R14
+	XOR	R14, R15, R14
+	MOVW	R14, (R16)(BLK_OUT)
+	ADD	$4, R16
+	ADD	$-4, IN_LEN
+next2:
+	CMP	IN_LEN, $2
+	BLT	next1
+	MOVHZ	(BLK_INP)(R16), R15
+	MOVHZ	(MASK_PTR)(R16), R14
+	XOR	R14, R15, R14
+	MOVH	R14, (R16)(BLK_OUT)
+	ADD	$2, R16
+	ADD	$-2, IN_LEN
+next1:
+	CMP	IN_LEN, $1
+	BLT	done
+	MOVBZ	(MASK_PTR)(R16), R14
+	MOVBZ	(BLK_INP)(R16), R15
+	XOR	R14, R15, R14
+	MOVB	R14, (R16)(BLK_OUT)
+#endif
+done:
+	// Save the updated counter value
+	P8_STXVB16X(V30, COUNTER, R0)
+	// Clear the keys
+	XXLXOR	VS0, VS0, VS0
+	XXLXOR	VS1, VS1, VS1
+	XXLXOR	VS2, VS2, VS2
+	XXLXOR	VS3, VS3, VS3
+	XXLXOR	VS4, VS4, VS4
+	XXLXOR	VS5, VS5, VS5
+	XXLXOR	VS6, VS6, VS6
+	XXLXOR	VS7, VS7, VS7
+	XXLXOR	VS8, VS8, VS8
+	XXLXOR	VS9, VS9, VS9
+	XXLXOR	VS10, VS10, VS10
+	XXLXOR	VS11, VS11, VS11
+	XXLXOR	VS12, VS12, VS12
+	XXLXOR	VS13, VS13, VS13
+	XXLXOR	VS14, VS14, VS14
+	RET
+
diff --git a/src/crypto/aes/gcm_s390x.go b/src/crypto/aes/gcm_s390x.go
index d95f169..cf0e28a 100644
--- a/src/crypto/aes/gcm_s390x.go
+++ b/src/crypto/aes/gcm_s390x.go
@@ -57,7 +57,7 @@
 var _ gcmAble = (*aesCipherAsm)(nil)
 
 // NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
-// called by crypto/cipher.NewGCM via the gcmAble interface.
+// called by [crypto/cipher.NewGCM] via the gcmAble interface.
 func (c *aesCipherAsm) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
 	var hk gcmHashKey
 	c.Encrypt(hk[:], hk[:])
@@ -200,7 +200,7 @@
 	}
 }
 
-// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// Seal encrypts and authenticates plaintext. See the [cipher.AEAD] interface for
 // details.
 func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
 	if len(nonce) != g.nonceSize {
@@ -229,7 +229,7 @@
 	return ret
 }
 
-// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// Open authenticates and decrypts ciphertext. See the [cipher.AEAD] interface
 // for details.
 func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
 	if len(nonce) != g.nonceSize {
@@ -301,7 +301,7 @@
 //go:noescape
 func kmaGCM(fn code, key, dst, src, aad []byte, tag *[16]byte, cnt *gcmCount)
 
-// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// Seal encrypts and authenticates plaintext. See the [cipher.AEAD] interface for
 // details.
 func (g *gcmKMA) Seal(dst, nonce, plaintext, data []byte) []byte {
 	if len(nonce) != g.nonceSize {
@@ -326,7 +326,7 @@
 	return ret
 }
 
-// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// Open authenticates and decrypts ciphertext. See the [cipher.AEAD] interface
 // for details.
 func (g *gcmKMA) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
 	if len(nonce) != g.nonceSize {
diff --git a/src/crypto/boring/notboring_test.go b/src/crypto/boring/notboring_test.go
index ffe18e9..0701628 100644
--- a/src/crypto/boring/notboring_test.go
+++ b/src/crypto/boring/notboring_test.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (goexperiment.boringcrypto && !boringcrypto) || (!goexperiment.boringcrypto && boringcrypto)
-// +build goexperiment.boringcrypto,!boringcrypto !goexperiment.boringcrypto,boringcrypto
 
 package boring_test
 
diff --git a/src/crypto/cipher/cfb.go b/src/crypto/cipher/cfb.go
index aae3575..7e3f969 100644
--- a/src/crypto/cipher/cfb.go
+++ b/src/crypto/cipher/cfb.go
@@ -50,15 +50,15 @@
 	}
 }
 
-// NewCFBEncrypter returns a Stream which encrypts with cipher feedback mode,
-// using the given Block. The iv must be the same length as the Block's block
+// NewCFBEncrypter returns a [Stream] which encrypts with cipher feedback mode,
+// using the given [Block]. The iv must be the same length as the [Block]'s block
 // size.
 func NewCFBEncrypter(block Block, iv []byte) Stream {
 	return newCFB(block, iv, false)
 }
 
-// NewCFBDecrypter returns a Stream which decrypts with cipher feedback mode,
-// using the given Block. The iv must be the same length as the Block's block
+// NewCFBDecrypter returns a [Stream] which decrypts with cipher feedback mode,
+// using the given [Block]. The iv must be the same length as the [Block]'s block
 // size.
 func NewCFBDecrypter(block Block, iv []byte) Stream {
 	return newCFB(block, iv, true)
diff --git a/src/crypto/cipher/ctr.go b/src/crypto/cipher/ctr.go
index 3ac0ff7..eac8e26 100644
--- a/src/crypto/cipher/ctr.go
+++ b/src/crypto/cipher/ctr.go
@@ -34,8 +34,8 @@
 	NewCTR(iv []byte) Stream
 }
 
-// NewCTR returns a Stream which encrypts/decrypts using the given Block in
-// counter mode. The length of iv must be the same as the Block's block size.
+// NewCTR returns a [Stream] which encrypts/decrypts using the given [Block] in
+// counter mode. The length of iv must be the same as the [Block]'s block size.
 func NewCTR(block Block, iv []byte) Stream {
 	if ctr, ok := block.(ctrAble); ok {
 		return ctr.NewCTR(iv)
diff --git a/src/crypto/cipher/gcm.go b/src/crypto/cipher/gcm.go
index 477d26a..928771f 100644
--- a/src/crypto/cipher/gcm.go
+++ b/src/crypto/cipher/gcm.go
@@ -80,8 +80,8 @@
 // with the standard nonce length.
 //
 // In general, the GHASH operation performed by this implementation of GCM is not constant-time.
-// An exception is when the underlying Block was created by aes.NewCipher
-// on systems with hardware support for AES. See the crypto/aes package documentation for details.
+// An exception is when the underlying [Block] was created by aes.NewCipher
+// on systems with hardware support for AES. See the [crypto/aes] package documentation for details.
 func NewGCM(cipher Block) (AEAD, error) {
 	return newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, gcmTagSize)
 }
@@ -92,7 +92,7 @@
 //
 // Only use this function if you require compatibility with an existing
 // cryptosystem that uses non-standard nonce lengths. All other users should use
-// NewGCM, which is faster and more resistant to misuse.
+// [NewGCM], which is faster and more resistant to misuse.
 func NewGCMWithNonceSize(cipher Block, size int) (AEAD, error) {
 	return newGCMWithNonceAndTagSize(cipher, size, gcmTagSize)
 }
@@ -104,7 +104,7 @@
 //
 // Only use this function if you require compatibility with an existing
 // cryptosystem that uses non-standard tag lengths. All other users should use
-// NewGCM, which is more resistant to misuse.
+// [NewGCM], which is more resistant to misuse.
 func NewGCMWithTagSize(cipher Block, tagSize int) (AEAD, error) {
 	return newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, tagSize)
 }
diff --git a/src/crypto/cipher/gcm_test.go b/src/crypto/cipher/gcm_test.go
index 0d53e47..3556146 100644
--- a/src/crypto/cipher/gcm_test.go
+++ b/src/crypto/cipher/gcm_test.go
@@ -19,13 +19,27 @@
 var aesGCMTests = []struct {
 	key, nonce, plaintext, ad, result string
 }{
-	{
+	{ // key=16, plaintext=null
 		"11754cd72aec309bf52f7687212e8957",
 		"3c819d9a9bed087615030b65",
 		"",
 		"",
 		"250327c674aaf477aef2675748cf6971",
 	},
+	{ // key=24, plaintext=null
+		"e2e001a36c60d2bf40d69ff5b2b1161ea218db263be16a4e",
+		"3c819d9a9bed087615030b65",
+		"",
+		"",
+		"c7b8da1fe2e3dccc4071ba92a0a57ba8",
+	},
+	{ // key=32, plaintext=null
+		"5394e890d37ba55ec9d5f327f15680f6a63ef5279c79331643ad0af6d2623525",
+		"3c819d9a9bed087615030b65",
+		"",
+		"",
+		"d9b260d4bc4630733ffb642f5ce45726",
+	},
 	{
 		"ca47248ac0b6f8372a97ac43508308ed",
 		"ffd2b598feabc9019262d2be",
@@ -68,7 +82,8 @@
 		"c94c410194c765e3dcc7964379758ed3",
 		"94dca8edfcf90bb74b153c8d48a17930",
 	},
-	{
+
+	{ // key=16, plaintext=16
 		"7fddb57453c241d03efbed3ac44e371c",
 		"ee283a3fc75575e33efd4887",
 		"d5de42b461646c255c87bd2962d3b9a2",
@@ -82,7 +97,43 @@
 		"",
 		"0e1bde206a07a9c2c1b65300f8c649972b4401346697138c7a4891ee59867d0c",
 	},
-	{
+	{ // key=24, plaintext=16
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"54cc7dc2c37ec006bcc6d1da",
+		"007c5e5b3e59df24a7c355584fc1518d",
+		"",
+		"7bd53594c28b6c6596feb240199cad4c9badb907fd65bde541b8df3bd444d3a8",
+	},
+	{ // key=32, plaintext=16
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"54cc7dc2c37ec006bcc6d1da",
+		"007c5e5b3e59df24a7c355584fc1518d",
+		"",
+		"d50b9e252b70945d4240d351677eb10f937cdaef6f2822b6a3191654ba41b197",
+	},
+	{ // key=16, plaintext=23
+		"ab72c77b97cb5fe9a382d9fe81ffdbed",
+		"54cc7dc2c37ec006bcc6d1da",
+		"007c5e5b3e59df24a7c355584fc1518dabcdefab",
+		"",
+		"0e1bde206a07a9c2c1b65300f8c64997b73381a6ff6bc24c5146fbd73361f4fe",
+	},
+	{ // key=24, plaintext=23
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"54cc7dc2c37ec006bcc6d1da",
+		"007c5e5b3e59df24a7c355584fc1518dabcdefab",
+		"",
+		"7bd53594c28b6c6596feb240199cad4c23b86a96d423cffa929e68541dc16b28",
+	},
+	{ // key=32, plaintext=23
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"54cc7dc2c37ec006bcc6d1da",
+		"007c5e5b3e59df24a7c355584fc1518dabcdefab",
+		"",
+		"d50b9e252b70945d4240d351677eb10f27fd385388ad3b72b96a2d5dea1240ae",
+	},
+
+	{ // key=16, plaintext=51
 		"fe47fcce5fc32665d2ae399e4eec72ba",
 		"5adb9609dbaeb58cbd6e7275",
 		"7c0e88c88899a779228465074797cd4c2e1498d259b54390b85e3eef1c02df60e743f1b840382c4bccaf3bafb4ca8429bea063",
@@ -110,78 +161,172 @@
 		"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
 		"aaadbd5c92e9151ce3db7210b8714126b73e43436d242677afa50384f2149b831f1d573c7891c2a91fbc48db29967ec9542b2321b51ca862cb637cdd03b99a0f93b134",
 	},
-	{
+	{ //key=24 plaintext=51
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"e1934f5db57cc983e6b180e7",
+		"73ed042327f70fe9c572a61545eda8b2a0c6e1d6c291ef19248e973aee6c312012f490c2c6f6166f4a59431e182663fcaea05a",
+		"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
+		"0736378955001d50773305975b3a534a4cd3614dd7300916301ae508cb7b45aa16e79435ca16b5557bcad5991bc52b971806863b15dc0b055748919b8ee91bc8477f68",
+	},
+	{ //key-32 plaintext=51
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"e1934f5db57cc983e6b180e7",
+		"73ed042327f70fe9c572a61545eda8b2a0c6e1d6c291ef19248e973aee6c312012f490c2c6f6166f4a59431e182663fcaea05a",
+		"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
+		"fc1ae2b5dcd2c4176c3f538b4c3cc21197f79e608cc3730167936382e4b1e5a7b75ae1678bcebd876705477eb0e0fdbbcda92fb9a0dc58c8d8f84fb590e0422e6077ef",
+	},
+	{ //key=16 plaintext=138
+		"d9f7d2411091f947b4d6f1e2d1f0fb2e",
+		"e1934f5db57cc983e6b180e7",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b3aabbccddee",
+		"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
+		"be86d00ce4e150190f646eae0f670ad26b3af66db45d2ee3fd71badd2fe763396bdbca498f3f779c70b80ed2695943e15139b406e5147b3855a1441dfb7bd64954b581e3db0ddf26b1c759e2276a4c18a8e4ad4b890f473e61c78e60074bd0633961e87e66d0a1be77c51ab6b9bb3318ccdd43794ffc18a03a83c1d368eeea590a13407c7ef48efc66e26047f3ab9deed0412ce89e",
+	},
+	{ //key=24 plaintext=138
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"e1934f5db57cc983e6b180e7",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b3aabbccddee",
+		"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
+		"131d5ad9230858559b8c1929ec2c18be90d7d4630e49018262ce5c511688bd10622109403db8006014ce93905b0a16bf1d1411acc9e14edf09518bd5967ff4bc202805d4c2810810a093e996a0f56c9a3e3e593c783f68528c1a282ff6f4925902bb2b3d4cdd04b873663bf5fd9dd53b5df462e0424d038f249b10a99c0523200f8c92c3e8a178a25ee8e23b71308c88ec2cfe047e",
+	},
+	{ //key-32 plaintext=138
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"e1934f5db57cc983e6b180e7",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b3aabbccddee",
+		"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
+		"e8318fe5aada811280804f35fb2a89e54bf32b4e55ba7b953547dadb39421d1dc39c7c127c6008b208010177f02fc093c8bbb8b3834d0e060d96dda96ba386c7c01224a4cac1edebffda4f9a64692bfbffb9f7c2999069fab84205224978a10d815d5ab8fa31e4e11630ba01c3b6cb99bef5772357ce86b83b4fb45ea7146402d560b6ad07de635b9366865e788a6bcdb132dcd079",
+	},
+	{ // key=16, plaintext=13
 		"fe9bb47deb3a61e423c2231841cfd1fb",
 		"4d328eb776f500a2f7fb47aa",
 		"f1cc3818e421876bb6b8bbd6c9",
 		"",
 		"b88c5c1977b35b517b0aeae96743fd4727fe5cdb4b5b42818dea7ef8c9",
 	},
-	{
+	{ // key=16, plaintext=13
 		"6703df3701a7f54911ca72e24dca046a",
 		"12823ab601c350ea4bc2488c",
 		"793cd125b0b84a043e3ac67717",
 		"",
 		"b2051c80014f42f08735a7b0cd38e6bcd29962e5f2c13626b85a877101",
 	},
+	{ // key=24, plaintext=13
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"12823ab601c350ea4bc2488c",
+		"793cd125b0b84a043e3ac67717",
+		"",
+		"e888c2f438caedd4189d26c59f53439b8a7caec29e98c33ebf7e5712d6",
+	},
+	{ // key=32, plaintext=13
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"12823ab601c350ea4bc2488c",
+		"793cd125b0b84a043e3ac67717",
+		"",
+		"e796c39074c7783a38193e3f8d46b355adacca7198d16d879fbfeac6e3",
+	},
+
 	// These cases test non-standard nonce sizes.
-	{
+	{ // key=16, plaintext=0
 		"1672c3537afa82004c6b8a46f6f0d026",
 		"05",
 		"",
 		"",
 		"8e2ad721f9455f74d8b53d3141f27e8e",
 	},
-	{
+	{ //key=16, plaintext=32
 		"9a4fea86a621a91ab371e492457796c0",
 		"75",
 		"ca6131faf0ff210e4e693d6c31c109fc5b6f54224eb120f37de31dc59ec669b6",
 		"4f6e2585c161f05a9ae1f2f894e9f0ab52b45d0f",
 		"5698c0a384241d30004290aac56bb3ece6fe8eacc5c4be98954deb9c3ff6aebf5d50e1af100509e1fba2a5e8a0af9670",
 	},
-	{
+	{ //key=24, plaintext=32
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"75",
+		"ca6131faf0ff210e4e693d6c31c109fc5b6f54224eb120f37de31dc59ec669b6",
+		"4f6e2585c161f05a9ae1f2f894e9f0ab52b45d0f",
+		"2709b357ec8334a074dbd5c4c352b216cfd1c8bd66343c5d43bfc6bd3b2b6cd0e3a82315d56ea5e4961c9ef3bc7e4042",
+	},
+	{ //key=32, plaintext=32
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"75",
+		"ca6131faf0ff210e4e693d6c31c109fc5b6f54224eb120f37de31dc59ec669b6",
+		"4f6e2585c161f05a9ae1f2f894e9f0ab52b45d0f",
+		"d73bebe722c5e312fe910ba71d5a6a063a4297203f819103dfa885a8076d095545a999affde3dbac2b5be6be39195ed0",
+	},
+	{ // key=16, plaintext=0
 		"d0f1f4defa1e8c08b4b26d576392027c",
 		"42b4f01eb9f5a1ea5b1eb73b0fb0baed54f387ecaa0393c7d7dffc6af50146ecc021abf7eb9038d4303d91f8d741a11743166c0860208bcc02c6258fd9511a2fa626f96d60b72fcff773af4e88e7a923506e4916ecbd814651e9f445adef4ad6a6b6c7290cc13b956130eef5b837c939fcac0cbbcc9656cd75b13823ee5acdac",
 		"",
 		"",
 		"7ab49b57ddf5f62c427950111c5c4f0d",
 	},
-	{
+	{ //key=16, plaintext=13
 		"4a0c00a3d284dea9d4bf8b8dde86685e",
 		"f8cbe82588e784bcacbe092cd9089b51e01527297f635bf294b3aa787d91057ef23869789698ac960707857f163ecb242135a228ad93964f5dc4a4d7f88fd7b3b07dd0a5b37f9768fb05a523639f108c34c661498a56879e501a2321c8a4a94d7e1b89db255ac1f685e185263368e99735ebe62a7f2931b47282be8eb165e4d7",
 		"6d4bf87640a6a48a50d28797b7",
 		"8d8c7ffc55086d539b5a8f0d1232654c",
 		"0d803ec309482f35b8e6226f2b56303239298e06b281c2d51aaba3c125",
 	},
-	{
+	{ //key=16, plaintext=128
 		"0e18a844ac5bf38e4cd72d9b0942e506",
 		"0870d4b28a2954489a0abcd5",
 		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b3",
 		"05eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea9",
 		"cace28f4976afd72e3c5128167eb788fbf6634dda0a2f53148d00f6fa557f5e9e8f736c12e450894af56cb67f7d99e1027258c8571bd91ee3b7360e0d508aa1f382411a16115f9c05251cc326d4016f62e0eb8151c048465b0c6c8ff12558d43310e18b2cb1889eec91557ce21ba05955cf4c1d4847aadfb1b0a83f3a3b82b7efa62a5f03c5d6eda381a85dd78dbc55c",
 	},
-	{
+	{ //key=24, plaintext=128
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"0870d4b28a2954489a0abcd5",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b3",
+		"05eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea9",
+		"303157d398376a8d51e39eabdd397f45b65f81f09acbe51c726ae85867e1675cad178580bb31c7f37c1af3644bd36ac436e9459139a4903d95944f306e415da709134dccde9d2b2d7d196b6740c196d9d10caa45296cf577a6e15d7ddf3576c20c503617d6a9e6b6d2be09ae28410a1210700a463a5b3b8d391abe9dac217e76a6f78306b5ebe759a5986b7d6682db0b",
+	},
+	{ //key=32, plaintext=128
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"0870d4b28a2954489a0abcd5",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b3",
+		"05eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea9",
+		"e4f13934744125b9c35935ed4c5ac7d0c16434d52eadef1da91c6abb62bc757f01e3e42f628f030d750826adceb961f0675b81de48376b181d8781c6a0ccd0f34872ef6901b97ff7c2e152426b3257fb91f6a43f47befaaf7a2136fd0c97de8c48517ce047a5641141092c717b151b44f0794a164b5861f0a77271d1bdbc332e9e43d3b9828ccfdbd4ae338da5baf7a9",
+	},
+
+	{ //key=16, plaintext=512
 		"1f6c3a3bc0542aabba4ef8f6c7169e73",
 		"f3584606472b260e0dd2ebb2",
 		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9bbce9e525cf08f5e9e25e5360aad2b2d085fa54d835e8d466826498d9a8877565705a8a3f62802944de7ca5894e5759d351adac869580ec17e485f18c0c66f17cc07cbb22fce466da610b63af62bc83b4692f3affaf271693ac071fb86d11342d8def4f89d4b66335c1c7e4248367d8ed9612ec453902d8e50af89d7709d1a596c1f41f",
 		"95aa82ca6c49ae90cd1668baac7aa6f2b4a8ca99b2c2372acb08cf61c9c3805e6e0328da4cd76a19edd2d3994c798b0022569ad418d1fee4d9cd45a391c601ffc92ad91501432fee150287617c13629e69fc7281cd7165a63eab49cf714bce3a75a74f76ea7e64ff81eb61fdfec39b67bf0de98c7e4e32bdf97c8c6ac75ba43c02f4b2ed7216ecf3014df000108b67cf99505b179f8ed4980a6103d1bca70dbe9bbfab0ed59801d6e5f2d6f67d3ec5168e212e2daf02c6b963c98a1f7097de0c56891a2b211b01070dd8fd8b16c2a1a4e3cfd292d2984b3561d555d16c33ddc2bcf7edde13efe520c7e2abdda44d81881c531aeeeb66244c3b791ea8acfb6a68",
 		"55864065117e07650ca650a0f0d9ef4b02aee7c58928462fddb49045bf85355b4653fa26158210a7f3ef5b3ca48612e8b7adf5c025c1b821960af770d935df1c9a1dd25077d6b1c7f937b2e20ce981b07980880214698f3fad72fa370b3b7da257ce1d0cf352bc5304fada3e0f8927bd4e5c1abbffa563bdedcb567daa64faaed748cb361732200ba3506836a3c1c82aafa14c76dc07f6c4277ff2c61325f91fdbd6c1883e745fcaadd5a6d692eeaa5ad56eead6a9d74a595d22757ed89532a4b8831e2b9e2315baea70a9b95d228f09d491a5ed5ab7076766703457e3159bbb9b17b329525669863153079448c68cd2f200c0be9d43061a60639cb59d50993d276c05caaa565db8ce633b2673e4012bebbca02b1a64d779d04066f3e949ece173825885ec816468c819a8129007cc05d8785c48077d09eb1abcba14508dde85a6f16a744bc95faef24888d53a8020515ab20307efaecbdf143a26563c67989bceedc2d6d2bb9699bb6c615d93767e4158c1124e3b6c723aaa47796e59a60d3696cd85adfae9a62f2c02c22009f80ed494bdc587f31dd892c253b5c6d6b7db078fa72d23474ee54f8144d6561182d71c862941dbc0b2cb37a4d4b23cbad5637e6be901cc73f16d5aec39c60dddee631511e57b47520b61ae1892d2d1bd2b486e30faec892f171b6de98d96108016fac805604761f8e74742b3bb7dc8a290a46bf697c3e4446e6e65832cbae7cf1aaad1",
 	},
-	{
+	{ //key=24, plaintext=512
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c",
+		"f3584606472b260e0dd2ebb2",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9bbce9e525cf08f5e9e25e5360aad2b2d085fa54d835e8d466826498d9a8877565705a8a3f62802944de7ca5894e5759d351adac869580ec17e485f18c0c66f17cc07cbb22fce466da610b63af62bc83b4692f3affaf271693ac071fb86d11342d8def4f89d4b66335c1c7e4248367d8ed9612ec453902d8e50af89d7709d1a596c1f41f",
+		"95aa82ca6c49ae90cd1668baac7aa6f2b4a8ca99b2c2372acb08cf61c9c3805e6e0328da4cd76a19edd2d3994c798b0022569ad418d1fee4d9cd45a391c601ffc92ad91501432fee150287617c13629e69fc7281cd7165a63eab49cf714bce3a75a74f76ea7e64ff81eb61fdfec39b67bf0de98c7e4e32bdf97c8c6ac75ba43c02f4b2ed7216ecf3014df000108b67cf99505b179f8ed4980a6103d1bca70dbe9bbfab0ed59801d6e5f2d6f67d3ec5168e212e2daf02c6b963c98a1f7097de0c56891a2b211b01070dd8fd8b16c2a1a4e3cfd292d2984b3561d555d16c33ddc2bcf7edde13efe520c7e2abdda44d81881c531aeeeb66244c3b791ea8acfb6a68",
+		"9daa466c7174dfde72b435fb6041ed7ff8ab8b1b96edb90437c3cc2e7e8a7c2c3629bae3bcaede99ee926ef4c55571e504e1c516975f6c719611c4da74acc23bbc79b3a67491f84d573e0293aa0cf5d775dde93fc466d5babd3e93a6506c0261021ac184f571ab190df83c32b41a67eaaa8dde27c02b08f15cabc75e46d1f9634f32f9233b2cb975386ff3a5e16b6ea2e2e4215cb33beb4de39a861d7f4a02165cd763f8252b2d60ac45d65a70735a8806a8fec3ca9d37c2cdcb21d2bd5c08d350e4bbdfb11dca344b9bee17e71ee0df3449fd9f9581c6b5483843b457534afb4240585f38ac22aa59a68a167fed6f1be0a5b072b2461f16c976b9aa0f5f2f5988818b01faa025ac7788212d92d222f7c14fe6e8f644c8cd117bb8def5a0217dad4f05cbb334ff9ccf819a4a085ed7c19928ddc40edc931b47339f456ccd423b5c0c1cdc96278006b29de945cdceb0737771e14562fff2aba40606f6046da5031647308682060412812317962bb68be3b42876f0905d52da51ec6345677fe86613828f488cc5685a4b973e48babd109a56d1a1effb286133dc2a94b4ada5707d3a7825941fea1a7502693afc7fe5d810bb0050d98aa6b80801e13b563954a35c31f57d5ba1ddb1a2be26426e2fe7bcd13ba183d80ac1c556b7ec2069b01de1450431a1c2e27848e1f5f4af013bce9080aebd2bb0f1de9f7bb460771c266d48ff4cf84a66f82630657db861c032971079",
+	},
+	{ //key=32, plaintext=512
+		"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+		"f3584606472b260e0dd2ebb2",
+		"67c6697351ff4aec29cdbaabf2fbe3467cc254f81be8e78d765a2e63339fc99a66320db73158a35a255d051758e95ed4abb2cdc69bb454110e827441213ddc8770e93ea141e1fc673e017e97eadc6b968f385c2aecb03bfb32af3c54ec18db5c021afe43fbfaaa3afb29d1e6053c7c9475d8be6189f95cbba8990f95b1ebf1b305eff700e9a13ae5ca0bcbd0484764bd1f231ea81c7b64c514735ac55e4b79633b706424119e09dcaad4acf21b10af3b33cde3504847155cbb6f2219ba9b7df50be11a1c7f23f829f8a41b13b5ca4ee8983238e0794d3d34bc5f4e77facb6c05ac86212baa1a55a2be70b5733b045cd33694b3afe2f0e49e4f321549fd824ea90870d4b28a2954489a0abcd50e18a844ac5bf38e4cd72d9b0942e506c433afcda3847f2dadd47647de321cec4ac430f62023856cfbb20704f4ec0bb920ba86c33e05f1ecd96733b79950a3e314d3d934f75ea0f210a8f6059401beb4bc4478fa4969e623d01ada696a7e4c7e5125b34884533a94fb319990325744ee9bbce9e525cf08f5e9e25e5360aad2b2d085fa54d835e8d466826498d9a8877565705a8a3f62802944de7ca5894e5759d351adac869580ec17e485f18c0c66f17cc07cbb22fce466da610b63af62bc83b4692f3affaf271693ac071fb86d11342d8def4f89d4b66335c1c7e4248367d8ed9612ec453902d8e50af89d7709d1a596c1f41f",
+		"95aa82ca6c49ae90cd1668baac7aa6f2b4a8ca99b2c2372acb08cf61c9c3805e6e0328da4cd76a19edd2d3994c798b0022569ad418d1fee4d9cd45a391c601ffc92ad91501432fee150287617c13629e69fc7281cd7165a63eab49cf714bce3a75a74f76ea7e64ff81eb61fdfec39b67bf0de98c7e4e32bdf97c8c6ac75ba43c02f4b2ed7216ecf3014df000108b67cf99505b179f8ed4980a6103d1bca70dbe9bbfab0ed59801d6e5f2d6f67d3ec5168e212e2daf02c6b963c98a1f7097de0c56891a2b211b01070dd8fd8b16c2a1a4e3cfd292d2984b3561d555d16c33ddc2bcf7edde13efe520c7e2abdda44d81881c531aeeeb66244c3b791ea8acfb6a68",
+		"793d34afb982ab70b0e204e1e7243314a19e987d9ab7662f58c3dc6064c9be35667ad53b115c610cfc229f4e5b3e8aae7aac97ce66d1d20b92da3860701b5006dd1385e173e3af7a5a9bb7da85c0434cd55a40fb9c482a0b36f0782846a7f16d05b40a08f0ad9a633f9a1e99e69e6b8039a0f2a91be40f193f4ce3bed1886dab1b0a6112f91503684c1e5afb938b9497166a7147badd1cc19c73e8b9f22e0dcbd18996868d7ad47755e677ee6e6ec87094cab7ee35feb96017c474261ba7391b18a72451e6daa7f38e162358c5d84788c974e614acc362b887c56b756df5aeacdda09b11d35a1f97daaceb5ca1b40a78b6058f7e1d26ad945be6ef74a8e72729f9ab2e3e7dda88d8f803e26e84a34ac07a7cecf5b6be23a4aa1ac6897f23169d894d53369b27673cf2438af9c6b53a2fa412c74dc075c617029e571f4c2951b1cdd63d33765af9d9d20e12430a83784c2bca8603f11521fa97f2e45398b4a385176701c6f416720ca0816bf51a3e0b4c7a28a89f0616a296423760f0f2f471e1def8a2f43956f79790a6b64dfdbb8159236ebd7fe1049e8e005e231e5f1936bfdccbda8cf0cb5116af758dfd6732dfa77ac3e6faf0996c13473292da363f01ddcb6a524dbf1d5d608f57c146173a9b169f979e101fe581f749764fd87119ae301958c8e9a9bfd16249e564ffbb304bc2ca4c34713a20fb858b47c83ce768e04f149884504c0515345631401f829e3259",
+	},
+
+	{ //key=16, plaintext=293
 		"0795d80bc7f40f4d41c280271a2e4f7f",
 		"ff824c906594aff365d3cb1f",
 		"1ad4e74d127f935beee57cff920665babe7ce56227377afe570ba786193ded3412d4812453157f42fafc418c02a746c1232c234a639d49baa8f041c12e2ef540027764568ce49886e0d913e28059a3a485c6eee96337a30b28e4cd5612c2961539fa6bc5de034cbedc5fa15db844013e0bef276e27ca7a4faf47a5c1093bd643354108144454d221b3737e6cb87faac36ed131959babe44af2890cfcc4e23ffa24470e689ce0894f5407bb0c8665cff536008ad2ac6f1c9ef8289abd0bd9b72f21c597bda5210cf928c805af2dd4a464d52e36819d521f967bba5386930ab5b4cf4c71746d7e6e964673457348e9d71d170d9eb560bd4bdb779e610ba816bf776231ebd0af5966f5cdab6815944032ab4dd060ad8dab880549e910f1ffcf6862005432afad",
 		"98a47a430d8fd74dc1829a91e3481f8ed024d8ba34c9b903321b04864db333e558ae28653dffb2",
 		"3b8f91443480e647473a0a0b03d571c622b7e70e4309a02c9bb7980053010d865e6aec161354dc9f481b2cd5213e09432b57ec4e58fbd0a8549dd15c8c4e74a6529f75fad0ce5a9e20e2beeb2f91eb638bf88999968de438d2f1cedbfb0a1c81f9e8e7362c738e0fddd963692a4f4df9276b7f040979ce874cf6fa3de26da0713784bdb25e4efcb840554ef5b38b5fe8380549a496bd8e423a7456df6f4ae78a07ebe2276a8e22fc2243ec4f78abe0c99c733fd67c8c492699fa5ee2289cdd0a8d469bf883520ee74efb854bfadc7366a49ee65ca4e894e3335e2b672618d362eee12a577dd8dc2ba55c49c1fc3ad68180e9b112d0234d4aa28f5661f1e036450ca6f18be0166676bd80f8a4890c6ddea306fabb7ff3cb2860aa32a827e3a312912a2dfa70f6bc1c07de238448f2d751bd0cf15bf7",
 	},
-	{
+	{ //key=24, plaintext=293
 		"e2e001a36c60d2bf40d69ff5b2b1161ea218db263be16a4e",
 		"84230643130d05425826641e",
 		"adb034f3f4a7ca45e2993812d113a9821d50df151af978bccc6d3bc113e15bc0918fb385377dca1916022ce816d56a332649484043c0fc0f2d37d040182b00a9bbb42ef231f80b48fb3730110d9a4433e38c73264c703579a705b9c031b969ec6d98de9f90e9e78b21179c2eb1e061946cd4bbb844f031ecf6eaac27a4151311adf1b03eda97c9fbae66295f468af4b35faf6ba39f9d8f95873bbc2b51cf3dfec0ed3c9b850696336cc093b24a8765a936d14dd56edc6bf518272169f75e67b74ba452d0aae90416a997c8f31e2e9d54ffea296dc69462debc8347b3e1af6a2d53bdfdfda601134f98db42b609df0a08c9347590c8d86e845bb6373d65a26ab85f67b50569c85401a396b8ad76c2b53ff62bcfbf033e435ef47b9b591d05117c6dc681d68e",
 		"d5d7316b8fdee152942148bff007c22e4b2022c6bc7be3c18c5f2e52e004e0b5dc12206bf002bd",
 		"f2c39423ee630dfe961da81909159dba018ce09b1073a12a477108316af5b7a31f86be6a0548b572d604bd115ea737dde899e0bd7f7ac9b23e38910dc457551ecc15c814a9f46d8432a1a36097dc1afe2712d1ba0838fa88cb55d9f65a2e9bece0dbf8999562503989041a2c87d7eb80ef649769d2f4978ce5cf9664f2bd0849646aa81cb976e45e1ade2f17a8126219e917aadbb4bae5e2c4b3f57bbc7f13fcc807df7842d9727a1b389e0b749e5191482adacabd812627c6eae2c7a30caf0844ad2a22e08f39edddf0ae10413e47db433dfe3febbb5a5cec9ade21fbba1e548247579395880b747669a8eb7e2ec0c1bff7fed2defdb92b07a14edf07b1bde29c31ab052ff1214e6b5ebbefcb8f21b5d6f8f6e07ee57ad6e14d4e142cb3f51bb465ab3a28a2a12f01b7514ad0463f2bde0d71d221",
 	},
-	{
+	{ //key=32, plaintext=293
 		"5394e890d37ba55ec9d5f327f15680f6a63ef5279c79331643ad0af6d2623525",
 		"815e840b7aca7af3b324583f",
 		"8e63067cd15359f796b43c68f093f55fdf3589fc5f2fdfad5f9d156668a617f7091d73da71cdd207810e6f71a165d0809a597df9885ca6e8f9bb4e616166586b83cc45f49917fc1a256b8bc7d05c476ab5c4633e20092619c4747b26dad3915e9fd65238ee4e5213badeda8a3a22f5efe6582d0762532026c89b4ca26fdd000eb45347a2a199b55b7790e6b1b2dba19833ce9f9522c0bcea5b088ccae68dd99ae0203c81b9f1dd3181c3e2339e83ccd1526b67742b235e872bea5111772aab574ae7d904d9b6355a79178e179b5ae8edc54f61f172bf789ea9c9af21f45b783e4251421b077776808f04972a5e801723cf781442378ce0e0568f014aea7a882dcbcb48d342be53d1c2ebfb206b12443a8a587cc1e55ca23beca385d61d0d03e9d84cbc1b0a",
diff --git a/src/crypto/cipher/io.go b/src/crypto/cipher/io.go
index 0974ac7..b70285b 100644
--- a/src/crypto/cipher/io.go
+++ b/src/crypto/cipher/io.go
@@ -9,7 +9,7 @@
 // The Stream* objects are so simple that all their members are public. Users
 // can create them themselves.
 
-// StreamReader wraps a Stream into an io.Reader. It calls XORKeyStream
+// StreamReader wraps a [Stream] into an [io.Reader]. It calls XORKeyStream
 // to process each slice of data which passes through.
 type StreamReader struct {
 	S Stream
@@ -22,10 +22,10 @@
 	return
 }
 
-// StreamWriter wraps a Stream into an io.Writer. It calls XORKeyStream
-// to process each slice of data which passes through. If any Write call
-// returns short then the StreamWriter is out of sync and must be discarded.
-// A StreamWriter has no internal buffering; Close does not need
+// StreamWriter wraps a [Stream] into an io.Writer. It calls XORKeyStream
+// to process each slice of data which passes through. If any [StreamWriter.Write]
+// call returns short then the StreamWriter is out of sync and must be discarded.
+// A StreamWriter has no internal buffering; [StreamWriter.Close] does not need
 // to be called to flush write data.
 type StreamWriter struct {
 	S   Stream
diff --git a/src/crypto/cipher/ofb.go b/src/crypto/cipher/ofb.go
index 1195fdd..bdfc977 100644
--- a/src/crypto/cipher/ofb.go
+++ b/src/crypto/cipher/ofb.go
@@ -18,7 +18,7 @@
 	outUsed int
 }
 
-// NewOFB returns a Stream that encrypts or decrypts using the block cipher b
+// NewOFB returns a [Stream] that encrypts or decrypts using the block cipher b
 // in output feedback mode. The initialization vector iv's length must be equal
 // to b's block size.
 func NewOFB(b Block, iv []byte) Stream {
diff --git a/src/crypto/crypto.go b/src/crypto/crypto.go
index 10a1cd8..2774a6b 100644
--- a/src/crypto/crypto.go
+++ b/src/crypto/crypto.go
@@ -15,7 +15,7 @@
 // package.
 type Hash uint
 
-// HashFunc simply returns the value of h so that Hash implements SignerOpts.
+// HashFunc simply returns the value of h so that [Hash] implements [SignerOpts].
 func (h Hash) HashFunc() Hash {
 	return h
 }
@@ -171,7 +171,7 @@
 //	    Equal(x crypto.PrivateKey) bool
 //	}
 //
-// as well as purpose-specific interfaces such as Signer and Decrypter, which
+// as well as purpose-specific interfaces such as [Signer] and [Decrypter], which
 // can be used for increased type safety within applications.
 type PrivateKey any
 
@@ -198,7 +198,7 @@
 	Sign(rand io.Reader, digest []byte, opts SignerOpts) (signature []byte, err error)
 }
 
-// SignerOpts contains options for signing with a Signer.
+// SignerOpts contains options for signing with a [Signer].
 type SignerOpts interface {
 	// HashFunc returns an identifier for the hash function used to produce
 	// the message passed to Signer.Sign, or else zero to indicate that no
diff --git a/src/crypto/des/block.go b/src/crypto/des/block.go
index e029976..c525ab0 100644
--- a/src/crypto/des/block.go
+++ b/src/crypto/des/block.go
@@ -35,16 +35,6 @@
 	binary.BigEndian.PutUint64(dst, permuteFinalBlock(preOutput))
 }
 
-// Encrypt one block from src into dst, using the subkeys.
-func encryptBlock(subkeys []uint64, dst, src []byte) {
-	cryptBlock(subkeys, dst, src, false)
-}
-
-// Decrypt one block from src into dst, using the subkeys.
-func decryptBlock(subkeys []uint64, dst, src []byte) {
-	cryptBlock(subkeys, dst, src, true)
-}
-
 // DES Feistel function. feistelBox must be initialized via
 // feistelBoxOnce.Do(initFeistelBox) first.
 func feistel(l, r uint32, k0, k1 uint64) (lout, rout uint32) {
diff --git a/src/crypto/des/cipher.go b/src/crypto/des/cipher.go
index ece764f..b0f456e 100644
--- a/src/crypto/des/cipher.go
+++ b/src/crypto/des/cipher.go
@@ -25,7 +25,7 @@
 	subkeys [16]uint64
 }
 
-// NewCipher creates and returns a new cipher.Block.
+// NewCipher creates and returns a new [cipher.Block].
 func NewCipher(key []byte) (cipher.Block, error) {
 	if len(key) != 8 {
 		return nil, KeySizeError(len(key))
@@ -48,7 +48,7 @@
 	if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
 		panic("crypto/des: invalid buffer overlap")
 	}
-	encryptBlock(c.subkeys[:], dst, src)
+	cryptBlock(c.subkeys[:], dst, src, false)
 }
 
 func (c *desCipher) Decrypt(dst, src []byte) {
@@ -61,7 +61,7 @@
 	if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
 		panic("crypto/des: invalid buffer overlap")
 	}
-	decryptBlock(c.subkeys[:], dst, src)
+	cryptBlock(c.subkeys[:], dst, src, true)
 }
 
 // A tripleDESCipher is an instance of TripleDES encryption.
@@ -69,7 +69,7 @@
 	cipher1, cipher2, cipher3 desCipher
 }
 
-// NewTripleDESCipher creates and returns a new cipher.Block.
+// NewTripleDESCipher creates and returns a new [cipher.Block].
 func NewTripleDESCipher(key []byte) (cipher.Block, error) {
 	if len(key) != 24 {
 		return nil, KeySizeError(len(key))
diff --git a/src/crypto/des/des_test.go b/src/crypto/des/des_test.go
index 690a49f..7bebcd9 100644
--- a/src/crypto/des/des_test.go
+++ b/src/crypto/des/des_test.go
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package des
+package des_test
 
 import (
 	"bytes"
+	"crypto/cipher"
+	"crypto/des"
 	"testing"
 )
 
@@ -1260,12 +1262,12 @@
 		[]byte{0x63, 0xfa, 0xc0, 0xd0, 0x34, 0xd9, 0xf7, 0x93}},
 }
 
-func newCipher(key []byte) *desCipher {
-	c, err := NewCipher(key)
+func newCipher(key []byte) cipher.Block {
+	c, err := des.NewCipher(key)
 	if err != nil {
 		panic("NewCipher failed: " + err.Error())
 	}
-	return c.(*desCipher)
+	return c
 }
 
 // Use the known weak keys to test DES implementation
@@ -1274,7 +1276,7 @@
 		var encrypt = func(in []byte) (out []byte) {
 			c := newCipher(tt.key)
 			out = make([]byte, len(in))
-			encryptBlock(c.subkeys[:], out, in)
+			c.Encrypt(out, in)
 			return
 		}
 
@@ -1295,7 +1297,7 @@
 		var encrypt = func(key, in []byte) (out []byte) {
 			c := newCipher(key)
 			out = make([]byte, len(in))
-			encryptBlock(c.subkeys[:], out, in)
+			c.Encrypt(out, in)
 			return
 		}
 
@@ -1315,7 +1317,7 @@
 	for i, tt := range encryptDESTests {
 		c := newCipher(tt.key)
 		out := make([]byte, len(tt.in))
-		encryptBlock(c.subkeys[:], out, tt.in)
+		c.Encrypt(out, tt.in)
 
 		if !bytes.Equal(out, tt.out) {
 			t.Errorf("#%d: result: %x want: %x", i, out, tt.out)
@@ -1327,7 +1329,7 @@
 	for i, tt := range encryptDESTests {
 		c := newCipher(tt.key)
 		plain := make([]byte, len(tt.in))
-		decryptBlock(c.subkeys[:], plain, tt.out)
+		c.Decrypt(plain, tt.out)
 
 		if !bytes.Equal(plain, tt.in) {
 			t.Errorf("#%d: result: %x want: %x", i, plain, tt.in)
@@ -1337,7 +1339,7 @@
 
 func TestEncryptTripleDES(t *testing.T) {
 	for i, tt := range encryptTripleDESTests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 		out := make([]byte, len(tt.in))
 		c.Encrypt(out, tt.in)
 
@@ -1349,7 +1351,7 @@
 
 func TestDecryptTripleDES(t *testing.T) {
 	for i, tt := range encryptTripleDESTests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		plain := make([]byte, len(tt.in))
 		c.Decrypt(plain, tt.out)
@@ -1363,7 +1365,7 @@
 // Defined in Pub 800-20
 func TestVariablePlaintextKnownAnswer(t *testing.T) {
 	for i, tt := range tableA1Tests {
-		c, _ := NewTripleDESCipher(tableA1Key)
+		c, _ := des.NewTripleDESCipher(tableA1Key)
 
 		out := make([]byte, len(tt.in))
 		c.Encrypt(out, tt.in)
@@ -1377,7 +1379,7 @@
 // Defined in Pub 800-20
 func TestVariableCiphertextKnownAnswer(t *testing.T) {
 	for i, tt := range tableA1Tests {
-		c, _ := NewTripleDESCipher(tableA1Key)
+		c, _ := des.NewTripleDESCipher(tableA1Key)
 
 		plain := make([]byte, len(tt.out))
 		c.Decrypt(plain, tt.out)
@@ -1393,7 +1395,7 @@
 // 0x01... key produces the original plaintext
 func TestInversePermutationKnownAnswer(t *testing.T) {
 	for i, tt := range tableA1Tests {
-		c, _ := NewTripleDESCipher(tableA1Key)
+		c, _ := des.NewTripleDESCipher(tableA1Key)
 
 		plain := make([]byte, len(tt.in))
 		c.Encrypt(plain, tt.out)
@@ -1409,7 +1411,7 @@
 // 0x01... key produces the corresponding ciphertext
 func TestInitialPermutationKnownAnswer(t *testing.T) {
 	for i, tt := range tableA1Tests {
-		c, _ := NewTripleDESCipher(tableA1Key)
+		c, _ := des.NewTripleDESCipher(tableA1Key)
 
 		out := make([]byte, len(tt.in))
 		c.Decrypt(out, tt.in)
@@ -1423,7 +1425,7 @@
 // Defined in Pub 800-20
 func TestVariableKeyKnownAnswerEncrypt(t *testing.T) {
 	for i, tt := range tableA2Tests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		out := make([]byte, len(tableA2Plaintext))
 		c.Encrypt(out, tableA2Plaintext)
@@ -1437,7 +1439,7 @@
 // Defined in Pub 800-20
 func TestVariableKeyKnownAnswerDecrypt(t *testing.T) {
 	for i, tt := range tableA2Tests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		out := make([]byte, len(tt.out))
 		c.Decrypt(out, tt.out)
@@ -1451,7 +1453,7 @@
 // Defined in Pub 800-20
 func TestPermutationOperationKnownAnswerEncrypt(t *testing.T) {
 	for i, tt := range tableA3Tests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		out := make([]byte, len(tableA3Plaintext))
 		c.Encrypt(out, tableA3Plaintext)
@@ -1465,7 +1467,7 @@
 // Defined in Pub 800-20
 func TestPermutationOperationKnownAnswerDecrypt(t *testing.T) {
 	for i, tt := range tableA3Tests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		out := make([]byte, len(tt.out))
 		c.Decrypt(out, tt.out)
@@ -1479,7 +1481,7 @@
 // Defined in Pub 800-20
 func TestSubstitutionTableKnownAnswerEncrypt(t *testing.T) {
 	for i, tt := range tableA4Tests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		out := make([]byte, len(tt.in))
 		c.Encrypt(out, tt.in)
@@ -1493,7 +1495,7 @@
 // Defined in Pub 800-20
 func TestSubstitutionTableKnownAnswerDecrypt(t *testing.T) {
 	for i, tt := range tableA4Tests {
-		c, _ := NewTripleDESCipher(tt.key)
+		c, _ := des.NewTripleDESCipher(tt.key)
 
 		out := make([]byte, len(tt.out))
 		c.Decrypt(out, tt.out)
@@ -1504,31 +1506,9 @@
 	}
 }
 
-func TestInitialPermute(t *testing.T) {
-	for i := uint(0); i < 64; i++ {
-		bit := uint64(1) << i
-		got := permuteInitialBlock(bit)
-		want := uint64(1) << finalPermutation[63-i]
-		if got != want {
-			t.Errorf("permute(%x) = %x, want %x", bit, got, want)
-		}
-	}
-}
-
-func TestFinalPermute(t *testing.T) {
-	for i := uint(0); i < 64; i++ {
-		bit := uint64(1) << i
-		got := permuteFinalBlock(bit)
-		want := uint64(1) << initialPermutation[63-i]
-		if got != want {
-			t.Errorf("permute(%x) = %x, want %x", bit, got, want)
-		}
-	}
-}
-
 func BenchmarkEncrypt(b *testing.B) {
 	tt := encryptDESTests[0]
-	c, err := NewCipher(tt.key)
+	c, err := des.NewCipher(tt.key)
 	if err != nil {
 		b.Fatal("NewCipher:", err)
 	}
@@ -1542,7 +1522,7 @@
 
 func BenchmarkDecrypt(b *testing.B) {
 	tt := encryptDESTests[0]
-	c, err := NewCipher(tt.key)
+	c, err := des.NewCipher(tt.key)
 	if err != nil {
 		b.Fatal("NewCipher:", err)
 	}
@@ -1556,7 +1536,7 @@
 
 func BenchmarkTDESEncrypt(b *testing.B) {
 	tt := encryptTripleDESTests[0]
-	c, err := NewTripleDESCipher(tt.key)
+	c, err := des.NewTripleDESCipher(tt.key)
 	if err != nil {
 		b.Fatal("NewCipher:", err)
 	}
@@ -1570,7 +1550,7 @@
 
 func BenchmarkTDESDecrypt(b *testing.B) {
 	tt := encryptTripleDESTests[0]
-	c, err := NewTripleDESCipher(tt.key)
+	c, err := des.NewTripleDESCipher(tt.key)
 	if err != nil {
 		b.Fatal("NewCipher:", err)
 	}
diff --git a/src/crypto/des/internal_test.go b/src/crypto/des/internal_test.go
new file mode 100644
index 0000000..f309b01
--- /dev/null
+++ b/src/crypto/des/internal_test.go
@@ -0,0 +1,29 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package des
+
+import "testing"
+
+func TestInitialPermute(t *testing.T) {
+	for i := uint(0); i < 64; i++ {
+		bit := uint64(1) << i
+		got := permuteInitialBlock(bit)
+		want := uint64(1) << finalPermutation[63-i]
+		if got != want {
+			t.Errorf("permute(%x) = %x, want %x", bit, got, want)
+		}
+	}
+}
+
+func TestFinalPermute(t *testing.T) {
+	for i := uint(0); i < 64; i++ {
+		bit := uint64(1) << i
+		got := permuteFinalBlock(bit)
+		want := uint64(1) << initialPermutation[63-i]
+		if got != want {
+			t.Errorf("permute(%x) = %x, want %x", bit, got, want)
+		}
+	}
+}
diff --git a/src/crypto/dsa/dsa.go b/src/crypto/dsa/dsa.go
index a833599..4524bd4 100644
--- a/src/crypto/dsa/dsa.go
+++ b/src/crypto/dsa/dsa.go
@@ -155,7 +155,7 @@
 }
 
 // GenerateKey generates a public&private key pair. The Parameters of the
-// PrivateKey must already be valid (see GenerateParameters).
+// [PrivateKey] must already be valid (see [GenerateParameters]).
 func GenerateKey(priv *PrivateKey, rand io.Reader) error {
 	if priv.P == nil || priv.Q == nil || priv.G == nil {
 		return errors.New("crypto/dsa: parameters not set up before generating key")
@@ -200,7 +200,7 @@
 // to the byte-length of the subgroup. This function does not perform that
 // truncation itself.
 //
-// Be aware that calling Sign with an attacker-controlled PrivateKey may
+// Be aware that calling Sign with an attacker-controlled [PrivateKey] may
 // require an arbitrary amount of CPU.
 func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
 	randutil.MaybeReadByte(rand)
diff --git a/src/crypto/ecdh/ecdh.go b/src/crypto/ecdh/ecdh.go
index b86f521..b7c26f9 100644
--- a/src/crypto/ecdh/ecdh.go
+++ b/src/crypto/ecdh/ecdh.go
@@ -44,7 +44,7 @@
 	// selected public keys can cause ECDH to return an error.
 	NewPublicKey(key []byte) (*PublicKey, error)
 
-	// ecdh performs a ECDH exchange and returns the shared secret. It's exposed
+	// ecdh performs an ECDH exchange and returns the shared secret. It's exposed
 	// as the PrivateKey.ECDH method.
 	//
 	// The private method also allow us to expand the ECDH interface with more
@@ -114,14 +114,14 @@
 	publicKeyOnce sync.Once
 }
 
-// ECDH performs a ECDH exchange and returns the shared secret. The PrivateKey
-// and PublicKey must use the same curve.
+// ECDH performs an ECDH exchange and returns the shared secret. The [PrivateKey]
+// and [PublicKey] must use the same curve.
 //
 // For NIST curves, this performs ECDH as specified in SEC 1, Version 2.0,
 // Section 3.3.1, and returns the x-coordinate encoded according to SEC 1,
 // Version 2.0, Section 2.3.5. The result is never the point at infinity.
 //
-// For X25519, this performs ECDH as specified in RFC 7748, Section 6.1. If
+// For [X25519], this performs ECDH as specified in RFC 7748, Section 6.1. If
 // the result is the all-zero value, ECDH returns an error.
 func (k *PrivateKey) ECDH(remote *PublicKey) ([]byte, error) {
 	if k.curve != remote.curve {
@@ -141,7 +141,7 @@
 // Equal returns whether x represents the same private key as k.
 //
 // Note that there can be equivalent private keys with different encodings which
-// would return false from this check but behave the same way as inputs to ECDH.
+// would return false from this check but behave the same way as inputs to [ECDH].
 //
 // This check is performed in constant time as long as the key types and their
 // curve match.
@@ -182,7 +182,7 @@
 }
 
 // Public implements the implicit interface of all standard library private
-// keys. See the docs of crypto.PrivateKey.
+// keys. See the docs of [crypto.PrivateKey].
 func (k *PrivateKey) Public() crypto.PublicKey {
 	return k.PublicKey()
 }
diff --git a/src/crypto/ecdh/nist.go b/src/crypto/ecdh/nist.go
index 01354fa..b366491 100644
--- a/src/crypto/ecdh/nist.go
+++ b/src/crypto/ecdh/nist.go
@@ -211,7 +211,7 @@
 	return p.BytesX()
 }
 
-// P256 returns a Curve which implements NIST P-256 (FIPS 186-3, section D.2.3),
+// P256 returns a [Curve] which implements NIST P-256 (FIPS 186-3, section D.2.3),
 // also known as secp256r1 or prime256v1.
 //
 // Multiple invocations of this function will return the same value, which can
@@ -230,7 +230,7 @@
 	0xbc, 0xe6, 0xfa, 0xad, 0xa7, 0x17, 0x9e, 0x84,
 	0xf3, 0xb9, 0xca, 0xc2, 0xfc, 0x63, 0x25, 0x51}
 
-// P384 returns a Curve which implements NIST P-384 (FIPS 186-3, section D.2.4),
+// P384 returns a [Curve] which implements NIST P-384 (FIPS 186-3, section D.2.4),
 // also known as secp384r1.
 //
 // Multiple invocations of this function will return the same value, which can
@@ -251,7 +251,7 @@
 	0x58, 0x1a, 0x0d, 0xb2, 0x48, 0xb0, 0xa7, 0x7a,
 	0xec, 0xec, 0x19, 0x6a, 0xcc, 0xc5, 0x29, 0x73}
 
-// P521 returns a Curve which implements NIST P-521 (FIPS 186-3, section D.2.5),
+// P521 returns a [Curve] which implements NIST P-521 (FIPS 186-3, section D.2.5),
 // also known as secp521r1.
 //
 // Multiple invocations of this function will return the same value, which can
diff --git a/src/crypto/ecdh/x25519.go b/src/crypto/ecdh/x25519.go
index dbc3ea9..998e758 100644
--- a/src/crypto/ecdh/x25519.go
+++ b/src/crypto/ecdh/x25519.go
@@ -17,7 +17,7 @@
 	x25519SharedSecretSize = 32
 )
 
-// X25519 returns a Curve which implements the X25519 function over Curve25519
+// X25519 returns a [Curve] which implements the X25519 function over Curve25519
 // (RFC 7748, Section 5).
 //
 // Multiple invocations of this function will return the same value, so it can
diff --git a/src/crypto/ecdsa/ecdsa.go b/src/crypto/ecdsa/ecdsa.go
index e150377..3ed15a8 100644
--- a/src/crypto/ecdsa/ecdsa.go
+++ b/src/crypto/ecdsa/ecdsa.go
@@ -68,7 +68,7 @@
 // Equal reports whether pub and x have the same value.
 //
 // Two keys are only considered to have the same value if they have the same Curve value.
-// Note that for example elliptic.P256() and elliptic.P256().Params() are different
+// Note that for example [elliptic.P256] and elliptic.P256().Params() are different
 // values, as the latter is a generic not constant time implementation.
 func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
 	xx, ok := x.(*PublicKey)
@@ -91,7 +91,7 @@
 
 // ECDH returns k as a [ecdh.PrivateKey]. It returns an error if the key is
 // invalid according to the definition of [ecdh.Curve.NewPrivateKey], or if the
-// Curve is not supported by crypto/ecdh.
+// Curve is not supported by [crypto/ecdh].
 func (k *PrivateKey) ECDH() (*ecdh.PrivateKey, error) {
 	c := curveToECDH(k.Curve)
 	if c == nil {
@@ -124,7 +124,7 @@
 
 // Equal reports whether priv and x have the same value.
 //
-// See PublicKey.Equal for details on how Curve is compared.
+// See [PublicKey.Equal] for details on how Curve is compared.
 func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
 	xx, ok := x.(*PrivateKey)
 	if !ok {
@@ -145,7 +145,7 @@
 //
 // This method implements crypto.Signer, which is an interface to support keys
 // where the private part is kept in, for example, a hardware module. Common
-// uses can use the SignASN1 function in this package directly.
+// uses can use the [SignASN1] function in this package directly.
 func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
 	return SignASN1(rand, priv, digest)
 }
diff --git a/src/crypto/ecdsa/ecdsa_legacy.go b/src/crypto/ecdsa/ecdsa_legacy.go
index 12a40e4..0b8489a 100644
--- a/src/crypto/ecdsa/ecdsa_legacy.go
+++ b/src/crypto/ecdsa/ecdsa_legacy.go
@@ -54,7 +54,7 @@
 // using the private key, priv. If the hash is longer than the bit-length of the
 // private key's curve order, the hash will be truncated to that length. It
 // returns the signature as a pair of integers. Most applications should use
-// SignASN1 instead of dealing directly with r, s.
+// [SignASN1] instead of dealing directly with r, s.
 func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
 	sig, err := SignASN1(rand, priv, hash)
 	if err != nil {
diff --git a/src/crypto/ecdsa/ecdsa_s390x.s b/src/crypto/ecdsa/ecdsa_s390x.s
index ba5b3bf..ea1f446 100644
--- a/src/crypto/ecdsa/ecdsa_s390x.s
+++ b/src/crypto/ecdsa/ecdsa_s390x.s
@@ -10,7 +10,7 @@
 	MOVD params+8(FP), R1 // address parameter block
 
 loop:
-	WORD $0xB93A0008 // compute digital signature authentication
+	KDSA R0, R4      // compute digital signature authentication
 	BVS  loop        // branch back if interrupted
 	BGT  retry       // signing unsuccessful, but retry with new CSPRN
 	BLT  error       // condition code of 1 indicates a failure
diff --git a/src/crypto/elliptic/elliptic.go b/src/crypto/elliptic/elliptic.go
index 96555ad..290a8e5 100644
--- a/src/crypto/elliptic/elliptic.go
+++ b/src/crypto/elliptic/elliptic.go
@@ -24,10 +24,10 @@
 //
 // Note that the conventional point at infinity (0, 0) is not considered on the
 // curve, although it can be returned by Add, Double, ScalarMult, or
-// ScalarBaseMult (but not the Unmarshal or UnmarshalCompressed functions).
+// ScalarBaseMult (but not the [Unmarshal] or [UnmarshalCompressed] functions).
 //
-// Using Curve implementations besides those returned by P224(), P256(), P384(),
-// and P521() is deprecated.
+// Using Curve implementations besides those returned by [P224], [P256], [P384],
+// and [P521] is deprecated.
 type Curve interface {
 	// Params returns the parameters for the curve.
 	Params() *CurveParams
@@ -70,7 +70,7 @@
 // GenerateKey returns a public/private key pair. The private key is
 // generated using the given reader, which must return random data.
 //
-// Deprecated: for ECDH, use the GenerateKey methods of the crypto/ecdh package;
+// Deprecated: for ECDH, use the GenerateKey methods of the [crypto/ecdh] package;
 // for ECDSA, use the GenerateKey function of the crypto/ecdsa package.
 func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
 	N := curve.Params().N
@@ -144,7 +144,7 @@
 // Assert that the known curves implement unmarshaler.
 var _ = []unmarshaler{p224, p256, p384, p521}
 
-// Unmarshal converts a point, serialized by Marshal, into an x, y pair. It is
+// Unmarshal converts a point, serialized by [Marshal], into an x, y pair. It is
 // an error if the point is not in uncompressed form, is not on the curve, or is
 // the point at infinity. On error, x = nil.
 //
@@ -174,7 +174,7 @@
 	return
 }
 
-// UnmarshalCompressed converts a point, serialized by MarshalCompressed, into
+// UnmarshalCompressed converts a point, serialized by [MarshalCompressed], into
 // an x, y pair. It is an error if the point is not in compressed form, is not
 // on the curve, or is the point at infinity. On error, x = nil.
 func UnmarshalCompressed(curve Curve, data []byte) (x, y *big.Int) {
@@ -230,8 +230,8 @@
 	initP521()
 }
 
-// P224 returns a Curve which implements NIST P-224 (FIPS 186-3, section D.2.2),
-// also known as secp224r1. The CurveParams.Name of this Curve is "P-224".
+// P224 returns a [Curve] which implements NIST P-224 (FIPS 186-3, section D.2.2),
+// also known as secp224r1. The CurveParams.Name of this [Curve] is "P-224".
 //
 // Multiple invocations of this function will return the same value, so it can
 // be used for equality checks and switch statements.
@@ -242,8 +242,8 @@
 	return p224
 }
 
-// P256 returns a Curve which implements NIST P-256 (FIPS 186-3, section D.2.3),
-// also known as secp256r1 or prime256v1. The CurveParams.Name of this Curve is
+// P256 returns a [Curve] which implements NIST P-256 (FIPS 186-3, section D.2.3),
+// also known as secp256r1 or prime256v1. The CurveParams.Name of this [Curve] is
 // "P-256".
 //
 // Multiple invocations of this function will return the same value, so it can
@@ -255,8 +255,8 @@
 	return p256
 }
 
-// P384 returns a Curve which implements NIST P-384 (FIPS 186-3, section D.2.4),
-// also known as secp384r1. The CurveParams.Name of this Curve is "P-384".
+// P384 returns a [Curve] which implements NIST P-384 (FIPS 186-3, section D.2.4),
+// also known as secp384r1. The CurveParams.Name of this [Curve] is "P-384".
 //
 // Multiple invocations of this function will return the same value, so it can
 // be used for equality checks and switch statements.
@@ -267,8 +267,8 @@
 	return p384
 }
 
-// P521 returns a Curve which implements NIST P-521 (FIPS 186-3, section D.2.5),
-// also known as secp521r1. The CurveParams.Name of this Curve is "P-521".
+// P521 returns a [Curve] which implements NIST P-521 (FIPS 186-3, section D.2.5),
+// also known as secp521r1. The CurveParams.Name of this [Curve] is "P-521".
 //
 // Multiple invocations of this function will return the same value, so it can
 // be used for equality checks and switch statements.
diff --git a/src/crypto/elliptic/params.go b/src/crypto/elliptic/params.go
index 1ae57fa..716e2c0 100644
--- a/src/crypto/elliptic/params.go
+++ b/src/crypto/elliptic/params.go
@@ -7,10 +7,10 @@
 import "math/big"
 
 // CurveParams contains the parameters of an elliptic curve and also provides
-// a generic, non-constant time implementation of Curve.
+// a generic, non-constant time implementation of [Curve].
 //
 // The generic Curve implementation is deprecated, and using custom curves
-// (those not returned by P224(), P256(), P384(), and P521()) is not guaranteed
+// (those not returned by [P224], [P256], [P384], and [P521]) is not guaranteed
 // to provide any security property.
 type CurveParams struct {
 	P       *big.Int // the order of the underlying field
@@ -47,12 +47,12 @@
 	return x3
 }
 
-// IsOnCurve implements Curve.IsOnCurve.
+// IsOnCurve implements [Curve.IsOnCurve].
 //
-// Deprecated: the CurveParams methods are deprecated and are not guaranteed to
-// provide any security property. For ECDH, use the crypto/ecdh package.
-// For ECDSA, use the crypto/ecdsa package with a Curve value returned directly
-// from P224(), P256(), P384(), or P521().
+// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
+// provide any security property. For ECDH, use the [crypto/ecdh] package.
+// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
+// from [P224], [P256], [P384], or [P521].
 func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
 	// If there is a dedicated constant-time implementation for this curve operation,
 	// use that instead of the generic one.
@@ -101,12 +101,12 @@
 	return
 }
 
-// Add implements Curve.Add.
+// Add implements [Curve.Add].
 //
-// Deprecated: the CurveParams methods are deprecated and are not guaranteed to
-// provide any security property. For ECDH, use the crypto/ecdh package.
-// For ECDSA, use the crypto/ecdsa package with a Curve value returned directly
-// from P224(), P256(), P384(), or P521().
+// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
+// provide any security property. For ECDH, use the [crypto/ecdh] package.
+// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
+// from [P224], [P256], [P384], or [P521].
 func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
 	// If there is a dedicated constant-time implementation for this curve operation,
 	// use that instead of the generic one.
@@ -199,12 +199,12 @@
 	return x3, y3, z3
 }
 
-// Double implements Curve.Double.
+// Double implements [Curve.Double].
 //
-// Deprecated: the CurveParams methods are deprecated and are not guaranteed to
-// provide any security property. For ECDH, use the crypto/ecdh package.
-// For ECDSA, use the crypto/ecdsa package with a Curve value returned directly
-// from P224(), P256(), P384(), or P521().
+// Deprecated: the [CurveParams】 methods are deprecated and are not guaranteed to
+// provide any security property. For ECDH, use the [crypto/ecdh] package.
+// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
+// from [P224], [P256], [P384], or [P521].
 func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
 	// If there is a dedicated constant-time implementation for this curve operation,
 	// use that instead of the generic one.
@@ -278,12 +278,12 @@
 	return x3, y3, z3
 }
 
-// ScalarMult implements Curve.ScalarMult.
+// ScalarMult implements [Curve.ScalarMult].
 //
-// Deprecated: the CurveParams methods are deprecated and are not guaranteed to
-// provide any security property. For ECDH, use the crypto/ecdh package.
-// For ECDSA, use the crypto/ecdsa package with a Curve value returned directly
-// from P224(), P256(), P384(), or P521().
+// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
+// provide any security property. For ECDH, use the [crypto/ecdh] package.
+// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
+// from [P224], [P256], [P384], or [P521].
 func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
 	// If there is a dedicated constant-time implementation for this curve operation,
 	// use that instead of the generic one.
@@ -308,12 +308,12 @@
 	return curve.affineFromJacobian(x, y, z)
 }
 
-// ScalarBaseMult implements Curve.ScalarBaseMult.
+// ScalarBaseMult implements [Curve.ScalarBaseMult].
 //
-// Deprecated: the CurveParams methods are deprecated and are not guaranteed to
-// provide any security property. For ECDH, use the crypto/ecdh package.
-// For ECDSA, use the crypto/ecdsa package with a Curve value returned directly
-// from P224(), P256(), P384(), or P521().
+// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
+// provide any security property. For ECDH, use the [crypto/ecdh] package.
+// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
+// from [P224], [P256], [P384], or [P521].
 func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
 	// If there is a dedicated constant-time implementation for this curve operation,
 	// use that instead of the generic one.
diff --git a/src/crypto/hmac/hmac.go b/src/crypto/hmac/hmac.go
index 35b9d5a..46ec81b 100644
--- a/src/crypto/hmac/hmac.go
+++ b/src/crypto/hmac/hmac.go
@@ -120,12 +120,12 @@
 	h.marshaled = true
 }
 
-// New returns a new HMAC hash using the given hash.Hash type and key.
-// New functions like sha256.New from crypto/sha256 can be used as h.
+// New returns a new HMAC hash using the given [hash.Hash] type and key.
+// New functions like sha256.New from [crypto/sha256] can be used as h.
 // h must return a new Hash every time it is called.
 // Note that unlike other hash implementations in the standard library,
-// the returned Hash does not implement encoding.BinaryMarshaler
-// or encoding.BinaryUnmarshaler.
+// the returned Hash does not implement [encoding.BinaryMarshaler]
+// or [encoding.BinaryUnmarshaler].
 func New(h func() hash.Hash, key []byte) hash.Hash {
 	if boring.Enabled {
 		hm := boring.NewHMAC(h, key)
diff --git a/src/crypto/internal/bigmod/nat.go b/src/crypto/internal/bigmod/nat.go
index 5605e9f..7fdd8ef 100644
--- a/src/crypto/internal/bigmod/nat.go
+++ b/src/crypto/internal/bigmod/nat.go
@@ -318,14 +318,48 @@
 // rr returns R*R with R = 2^(_W * n) and n = len(m.nat.limbs).
 func rr(m *Modulus) *Nat {
 	rr := NewNat().ExpandFor(m)
-	// R*R is 2^(2 * _W * n). We can safely get 2^(_W * (n - 1)) by setting the
-	// most significant limb to 1. We then get to R*R by shifting left by _W
-	// n + 1 times.
-	n := len(rr.limbs)
-	rr.limbs[n-1] = 1
-	for i := n - 1; i < 2*n; i++ {
-		rr.shiftIn(0, m) // x = x * 2^_W mod m
+	n := uint(len(rr.limbs))
+	mLen := uint(m.BitLen())
+	logR := _W * n
+
+	// We start by computing R = 2^(_W * n) mod m. We can get pretty close, to
+	// 2^⌊log₂m⌋, by setting the highest bit we can without having to reduce.
+	rr.limbs[n-1] = 1 << ((mLen - 1) % _W)
+	// Then we double until we reach 2^(_W * n).
+	for i := mLen - 1; i < logR; i++ {
+		rr.Add(rr, m)
 	}
+
+	// Next we need to get from R to 2^(_W * n) R mod m (aka from one to R in
+	// the Montgomery domain, meaning we can use Montgomery multiplication now).
+	// We could do that by doubling _W * n times, or with a square-and-double
+	// chain log2(_W * n) long. Turns out the fastest thing is to start out with
+	// doublings, and switch to square-and-double once the exponent is large
+	// enough to justify the cost of the multiplications.
+
+	// The threshold is selected experimentally as a linear function of n.
+	threshold := n / 4
+
+	// We calculate how many of the most-significant bits of the exponent we can
+	// compute before crossing the threshold, and we do it with doublings.
+	i := bits.UintSize
+	for logR>>i <= threshold {
+		i--
+	}
+	for k := uint(0); k < logR>>i; k++ {
+		rr.Add(rr, m)
+	}
+
+	// Then we process the remaining bits of the exponent with a
+	// square-and-double chain.
+	for i > 0 {
+		rr.montgomeryMul(rr, rr, m)
+		i--
+		if logR>>i&1 != 0 {
+			rr.Add(rr, m)
+		}
+	}
+
 	return rr
 }
 
@@ -745,26 +779,21 @@
 	return out.montgomeryReduction(m)
 }
 
-// ExpShort calculates out = x^e mod m.
+// ExpShortVarTime calculates out = x^e mod m.
 //
 // The output will be resized to the size of m and overwritten. x must already
-// be reduced modulo m. This leaks the exact bit size of the exponent.
-func (out *Nat) ExpShort(x *Nat, e uint, m *Modulus) *Nat {
-	xR := NewNat().set(x).montgomeryRepresentation(m)
-
-	out.resetFor(m)
-	out.limbs[0] = 1
-	out.montgomeryRepresentation(m)
-
+// be reduced modulo m. This leaks the exponent through timing side-channels.
+func (out *Nat) ExpShortVarTime(x *Nat, e uint, m *Modulus) *Nat {
 	// For short exponents, precomputing a table and using a window like in Exp
-	// doesn't pay off. Instead, we do a simple constant-time conditional
-	// square-and-multiply chain, skipping the initial run of zeroes.
-	tmp := NewNat().ExpandFor(m)
-	for i := bits.UintSize - bitLen(e); i < bits.UintSize; i++ {
+	// doesn't pay off. Instead, we do a simple conditional square-and-multiply
+	// chain, skipping the initial run of zeroes.
+	xR := NewNat().set(x).montgomeryRepresentation(m)
+	out.set(xR)
+	for i := bits.UintSize - bitLen(e) + 1; i < bits.UintSize; i++ {
 		out.montgomeryMul(out, out, m)
-		k := (e >> (bits.UintSize - i - 1)) & 1
-		tmp.montgomeryMul(out, xR, m)
-		out.assign(ctEq(k, 1), tmp)
+		if k := (e >> (bits.UintSize - i - 1)) & 1; k != 0 {
+			out.montgomeryMul(out, xR, m)
+		}
 	}
 	return out.montgomeryReduction(m)
 }
diff --git a/src/crypto/internal/bigmod/nat_asm.go b/src/crypto/internal/bigmod/nat_asm.go
index 5eb91e1..0283b07 100644
--- a/src/crypto/internal/bigmod/nat_asm.go
+++ b/src/crypto/internal/bigmod/nat_asm.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && (386 || amd64 || arm || arm64 || ppc64 || ppc64le || s390x)
+//go:build !purego && (386 || amd64 || arm || arm64 || ppc64 || ppc64le || riscv64 || s390x)
 
 package bigmod
 
diff --git a/src/crypto/internal/bigmod/nat_noasm.go b/src/crypto/internal/bigmod/nat_noasm.go
index eff1253..71f38da 100644
--- a/src/crypto/internal/bigmod/nat_noasm.go
+++ b/src/crypto/internal/bigmod/nat_noasm.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build purego || !(386 || amd64 || arm || arm64 || ppc64 || ppc64le || s390x)
+//go:build purego || !(386 || amd64 || arm || arm64 || ppc64 || ppc64le || riscv64 || s390x)
 
 package bigmod
 
diff --git a/src/crypto/internal/bigmod/nat_riscv64.s b/src/crypto/internal/bigmod/nat_riscv64.s
new file mode 100644
index 0000000..c1d9cc0
--- /dev/null
+++ b/src/crypto/internal/bigmod/nat_riscv64.s
@@ -0,0 +1,91 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !purego
+
+#include "textflag.h"
+
+// func addMulVVW1024(z, x *uint, y uint) (c uint)
+TEXT ·addMulVVW1024(SB),$0-32
+	MOV	$16, X30
+	JMP	addMulVVWx(SB)
+
+// func addMulVVW1536(z, x *uint, y uint) (c uint)
+TEXT ·addMulVVW1536(SB),$0-32
+	MOV	$24, X30
+	JMP	addMulVVWx(SB)
+
+// func addMulVVW2048(z, x *uint, y uint) (c uint)
+TEXT ·addMulVVW2048(SB),$0-32
+	MOV	$32, X30
+	JMP	addMulVVWx(SB)
+
+TEXT addMulVVWx(SB),NOFRAME|NOSPLIT,$0
+	MOV	z+0(FP), X5
+	MOV	x+8(FP), X7
+	MOV	y+16(FP), X6
+	MOV	$0, X29
+
+	BEQZ	X30, done
+loop:
+	MOV	0*8(X5), X10	// z[0]
+	MOV	1*8(X5), X13	// z[1]
+	MOV	2*8(X5), X16	// z[2]
+	MOV	3*8(X5), X19	// z[3]
+
+	MOV	0*8(X7), X8	// x[0]
+	MOV	1*8(X7), X11	// x[1]
+	MOV	2*8(X7), X14	// x[2]
+	MOV	3*8(X7), X17	// x[3]
+
+	MULHU	X8, X6, X9	// z_hi[0] = x[0] * y
+	MUL	X8, X6, X8	// z_lo[0] = x[0] * y
+	ADD	X8, X10, X21	// z_lo[0] = x[0] * y + z[0]
+	SLTU	X8, X21, X22
+	ADD	X9, X22, X9	// z_hi[0] = x[0] * y + z[0]
+	ADD	X21, X29, X10	// z_lo[0] = x[0] * y + z[0] + c
+	SLTU	X21, X10, X22
+	ADD	X9, X22, X29	// next c
+
+	MULHU	X11, X6, X12	// z_hi[1] = x[1] * y
+	MUL	X11, X6, X11	// z_lo[1] = x[1] * y
+	ADD	X11, X13, X21	// z_lo[1] = x[1] * y + z[1]
+	SLTU	X11, X21, X22
+	ADD	X12, X22, X12	// z_hi[1] = x[1] * y + z[1]
+	ADD	X21, X29, X13	// z_lo[1] = x[1] * y + z[1] + c
+	SLTU	X21, X13, X22
+	ADD	X12, X22, X29	// next c
+
+	MULHU	X14, X6, X15	// z_hi[2] = x[2] * y
+	MUL	X14, X6, X14	// z_lo[2] = x[2] * y
+	ADD	X14, X16, X21	// z_lo[2] = x[2] * y + z[2]
+	SLTU	X14, X21, X22
+	ADD	X15, X22, X15	// z_hi[2] = x[2] * y + z[2]
+	ADD	X21, X29, X16	// z_lo[2] = x[2] * y + z[2] + c
+	SLTU	X21, X16, X22
+	ADD	X15, X22, X29	// next c
+
+	MULHU	X17, X6, X18	// z_hi[3] = x[3] * y
+	MUL	X17, X6, X17	// z_lo[3] = x[3] * y
+	ADD	X17, X19, X21	// z_lo[3] = x[3] * y + z[3]
+	SLTU	X17, X21, X22
+	ADD	X18, X22, X18	// z_hi[3] = x[3] * y + z[3]
+	ADD	X21, X29, X19	// z_lo[3] = x[3] * y + z[3] + c
+	SLTU	X21, X19, X22
+	ADD	X18, X22, X29	// next c
+
+	MOV	X10, 0*8(X5)	// z[0]
+	MOV	X13, 1*8(X5)	// z[1]
+	MOV	X16, 2*8(X5)	// z[2]
+	MOV	X19, 3*8(X5)	// z[3]
+
+	ADD	$32, X5
+	ADD	$32, X7
+
+	SUB	$4, X30
+	BNEZ	X30, loop
+
+done:
+	MOV	X29, c+24(FP)
+	RET
diff --git a/src/crypto/internal/bigmod/nat_test.go b/src/crypto/internal/bigmod/nat_test.go
index 76e5570..7a956e3 100644
--- a/src/crypto/internal/bigmod/nat_test.go
+++ b/src/crypto/internal/bigmod/nat_test.go
@@ -303,7 +303,7 @@
 	m := modulusFromBytes([]byte{13})
 	x := &Nat{[]uint{3}}
 	out := &Nat{[]uint{0}}
-	out.ExpShort(x, 12, m)
+	out.ExpShortVarTime(x, 12, m)
 	expected := &Nat{[]uint{1}}
 	if out.Equal(expected) != 1 {
 		t.Errorf("%+v != %+v", out, expected)
diff --git a/src/crypto/internal/boring/aes.go b/src/crypto/internal/boring/aes.go
index 6fae1d5..8819f57 100644
--- a/src/crypto/internal/boring/aes.go
+++ b/src/crypto/internal/boring/aes.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/boring.go b/src/crypto/internal/boring/boring.go
index 102380a..ded36a9 100644
--- a/src/crypto/internal/boring/boring.go
+++ b/src/crypto/internal/boring/boring.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/doc.go b/src/crypto/internal/boring/doc.go
index 6060fe5..091e0d6 100644
--- a/src/crypto/internal/boring/doc.go
+++ b/src/crypto/internal/boring/doc.go
@@ -10,7 +10,7 @@
 // Enabled reports whether BoringCrypto is available.
 // When enabled is false, all functions in this package panic.
 //
-// BoringCrypto is only available on linux/amd64 systems.
+// BoringCrypto is only available on linux/amd64 and linux/arm64 systems.
 const Enabled = available
 
 // A BigInt is the raw words from a BigInt.
diff --git a/src/crypto/internal/boring/ecdh.go b/src/crypto/internal/boring/ecdh.go
index 8f46d81..6a5d174 100644
--- a/src/crypto/internal/boring/ecdh.go
+++ b/src/crypto/internal/boring/ecdh.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/ecdsa.go b/src/crypto/internal/boring/ecdsa.go
index e15f368..2adfdb2 100644
--- a/src/crypto/internal/boring/ecdsa.go
+++ b/src/crypto/internal/boring/ecdsa.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/hmac.go b/src/crypto/internal/boring/hmac.go
index 6241a65..ae926da 100644
--- a/src/crypto/internal/boring/hmac.go
+++ b/src/crypto/internal/boring/hmac.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/notboring.go b/src/crypto/internal/boring/notboring.go
index 1c5e4c7..361dec9 100644
--- a/src/crypto/internal/boring/notboring.go
+++ b/src/crypto/internal/boring/notboring.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !(boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan && cgo)
+//go:build !(boringcrypto && linux && (amd64 || arm64) && !android && !msan && cgo)
 
 package boring
 
diff --git a/src/crypto/internal/boring/rand.go b/src/crypto/internal/boring/rand.go
index 7639c01..556b98a 100644
--- a/src/crypto/internal/boring/rand.go
+++ b/src/crypto/internal/boring/rand.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/rsa.go b/src/crypto/internal/boring/rsa.go
index fa693ea..e3baa44 100644
--- a/src/crypto/internal/boring/rsa.go
+++ b/src/crypto/internal/boring/rsa.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/sha.go b/src/crypto/internal/boring/sha.go
index cf82f3f..a49c119 100644
--- a/src/crypto/internal/boring/sha.go
+++ b/src/crypto/internal/boring/sha.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !msan
 
 package boring
 
diff --git a/src/crypto/internal/boring/sig/sig_other.s b/src/crypto/internal/boring/sig/sig_other.s
index 2bbb1df..f7ef4cd 100644
--- a/src/crypto/internal/boring/sig/sig_other.s
+++ b/src/crypto/internal/boring/sig/sig_other.s
@@ -8,7 +8,6 @@
 // On other platforms (those using this source file), they don't.
 
 //go:build !amd64
-// +build !amd64
 
 TEXT ·BoringCrypto(SB),$0
 	RET
diff --git a/src/crypto/internal/edwards25519/field/fe_alias_test.go b/src/crypto/internal/edwards25519/field/fe_alias_test.go
index bf1efdc..0c81239 100644
--- a/src/crypto/internal/edwards25519/field/fe_alias_test.go
+++ b/src/crypto/internal/edwards25519/field/fe_alias_test.go
@@ -129,9 +129,9 @@
 		var err error
 		switch {
 		case tt.oneArgF != nil:
-			err = quick.Check(checkAliasingOneArg(tt.oneArgF), &quick.Config{MaxCountScale: 1 << 8})
+			err = quick.Check(checkAliasingOneArg(tt.oneArgF), quickCheckConfig(256))
 		case tt.twoArgsF != nil:
-			err = quick.Check(checkAliasingTwoArgs(tt.twoArgsF), &quick.Config{MaxCountScale: 1 << 8})
+			err = quick.Check(checkAliasingTwoArgs(tt.twoArgsF), quickCheckConfig(256))
 		}
 		if err != nil {
 			t.Errorf("%v: %v", tt.name, err)
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64.s b/src/crypto/internal/edwards25519/field/fe_arm64.s
index 751ab2a..3126a43 100644
--- a/src/crypto/internal/edwards25519/field/fe_arm64.s
+++ b/src/crypto/internal/edwards25519/field/fe_arm64.s
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build arm64,gc,!purego
+//go:build arm64 && gc && !purego
 
 #include "textflag.h"
 
diff --git a/src/crypto/internal/edwards25519/field/fe_generic.go b/src/crypto/internal/edwards25519/field/fe_generic.go
index 3582df8..86f5fd9 100644
--- a/src/crypto/internal/edwards25519/field/fe_generic.go
+++ b/src/crypto/internal/edwards25519/field/fe_generic.go
@@ -156,7 +156,7 @@
 	rr4 := r4.lo&maskLow51Bits + c3
 
 	// Now all coefficients fit into 64-bit registers but are still too large to
-	// be passed around as a Element. We therefore do one last carry chain,
+	// be passed around as an Element. We therefore do one last carry chain,
 	// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
 	*v = Element{rr0, rr1, rr2, rr3, rr4}
 	v.carryPropagate()
diff --git a/src/crypto/internal/edwards25519/field/fe_test.go b/src/crypto/internal/edwards25519/field/fe_test.go
index 945a024..a24fbfeb 100644
--- a/src/crypto/internal/edwards25519/field/fe_test.go
+++ b/src/crypto/internal/edwards25519/field/fe_test.go
@@ -21,9 +21,15 @@
 	return hex.EncodeToString(v.Bytes())
 }
 
-// quickCheckConfig1024 will make each quickcheck test run (1024 * -quickchecks)
-// times. The default value of -quickchecks is 100.
-var quickCheckConfig1024 = &quick.Config{MaxCountScale: 1 << 10}
+// quickCheckConfig returns a quick.Config that scales the max count by the
+// given factor if the -short flag is not set.
+func quickCheckConfig(slowScale int) *quick.Config {
+	cfg := new(quick.Config)
+	if !testing.Short() {
+		cfg.MaxCountScale = float64(slowScale)
+	}
+	return cfg
+}
 
 func generateFieldElement(rand *mathrand.Rand) Element {
 	const maskLow52Bits = (1 << 52) - 1
@@ -114,7 +120,7 @@
 		return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
 	}
 
-	if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig1024); err != nil {
+	if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
@@ -419,7 +425,7 @@
 		return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
 	}
 
-	if err := quick.Check(mult32EquivalentToMul, quickCheckConfig1024); err != nil {
+	if err := quick.Check(mult32EquivalentToMul, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
@@ -498,7 +504,7 @@
 		return *t1 == *t2 && isInBounds(t2)
 	}
 
-	if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+	if err := quick.Check(asmLikeGeneric, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 
@@ -522,7 +528,7 @@
 		return t1 == t2 && isInBounds(&t2)
 	}
 
-	if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+	if err := quick.Check(asmLikeGeneric, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
@@ -546,7 +552,7 @@
 			b1 == b2 && isInBounds(&b2)
 	}
 
-	if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+	if err := quick.Check(asmLikeGeneric, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
diff --git a/src/crypto/internal/edwards25519/scalar_alias_test.go b/src/crypto/internal/edwards25519/scalar_alias_test.go
index 4d83441..1893a7f 100644
--- a/src/crypto/internal/edwards25519/scalar_alias_test.go
+++ b/src/crypto/internal/edwards25519/scalar_alias_test.go
@@ -100,7 +100,7 @@
 			}, v, x, y)
 		},
 	} {
-		err := quick.Check(f, &quick.Config{MaxCountScale: 1 << 5})
+		err := quick.Check(f, quickCheckConfig(32))
 		if err != nil {
 			t.Errorf("%v: %v", name, err)
 		}
diff --git a/src/crypto/internal/edwards25519/scalar_test.go b/src/crypto/internal/edwards25519/scalar_test.go
index 67bcdaf..05551ef 100644
--- a/src/crypto/internal/edwards25519/scalar_test.go
+++ b/src/crypto/internal/edwards25519/scalar_test.go
@@ -14,6 +14,16 @@
 	"testing/quick"
 )
 
+// quickCheckConfig returns a quick.Config that scales the max count by the
+// given factor if the -short flag is not set.
+func quickCheckConfig(slowScale int) *quick.Config {
+	cfg := new(quick.Config)
+	if !testing.Short() {
+		cfg.MaxCountScale = float64(slowScale)
+	}
+	return cfg
+}
+
 var scOneBytes = [32]byte{1}
 var scOne, _ = new(Scalar).SetCanonicalBytes(scOneBytes[:])
 var scMinusOne, _ = new(Scalar).SetCanonicalBytes(scalarMinusOneBytes[:])
@@ -53,15 +63,11 @@
 	return reflect.ValueOf(val)
 }
 
-// quickCheckConfig1024 will make each quickcheck test run (1024 * -quickchecks)
-// times. The default value of -quickchecks is 100.
-var quickCheckConfig1024 = &quick.Config{MaxCountScale: 1 << 10}
-
 func TestScalarGenerate(t *testing.T) {
 	f := func(sc Scalar) bool {
 		return isReduced(sc.Bytes())
 	}
-	if err := quick.Check(f, quickCheckConfig1024); err != nil {
+	if err := quick.Check(f, quickCheckConfig(1024)); err != nil {
 		t.Errorf("generated unreduced scalar: %v", err)
 	}
 }
@@ -76,7 +82,7 @@
 		repr := sc.Bytes()
 		return bytes.Equal(in[:], repr) && isReduced(repr)
 	}
-	if err := quick.Check(f1, quickCheckConfig1024); err != nil {
+	if err := quick.Check(f1, quickCheckConfig(1024)); err != nil {
 		t.Errorf("failed bytes->scalar->bytes round-trip: %v", err)
 	}
 
@@ -86,7 +92,7 @@
 		}
 		return sc1 == sc2
 	}
-	if err := quick.Check(f2, quickCheckConfig1024); err != nil {
+	if err := quick.Check(f2, quickCheckConfig(1024)); err != nil {
 		t.Errorf("failed scalar->bytes->scalar round-trip: %v", err)
 	}
 
@@ -115,7 +121,7 @@
 		inBig := bigIntFromLittleEndianBytes(in[:])
 		return inBig.Mod(inBig, mod).Cmp(scBig) == 0
 	}
-	if err := quick.Check(f, quickCheckConfig1024); err != nil {
+	if err := quick.Check(f, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
@@ -175,7 +181,7 @@
 		return t1 == t2 && isReduced(reprT1) && isReduced(reprT2)
 	}
 
-	if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig1024); err != nil {
+	if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
@@ -194,7 +200,7 @@
 		return t1 == t2 && isReduced(t1.Bytes())
 	}
 
-	if err := quick.Check(addLikeSubNeg, quickCheckConfig1024); err != nil {
+	if err := quick.Check(addLikeSubNeg, quickCheckConfig(1024)); err != nil {
 		t.Error(err)
 	}
 }
diff --git a/src/crypto/internal/edwards25519/scalarmult_test.go b/src/crypto/internal/edwards25519/scalarmult_test.go
index 6c92ab3..4a00c79 100644
--- a/src/crypto/internal/edwards25519/scalarmult_test.go
+++ b/src/crypto/internal/edwards25519/scalarmult_test.go
@@ -10,10 +10,6 @@
 )
 
 var (
-	// quickCheckConfig32 will make each quickcheck test run (32 * -quickchecks)
-	// times. The default value of -quickchecks is 100.
-	quickCheckConfig32 = &quick.Config{MaxCountScale: 1 << 5}
-
 	// a random scalar generated using dalek.
 	dalekScalar, _ = (&Scalar{}).SetCanonicalBytes([]byte{219, 106, 114, 9, 174, 249, 155, 89, 69, 203, 201, 93, 92, 116, 234, 187, 78, 115, 103, 172, 182, 98, 62, 103, 187, 136, 13, 100, 248, 110, 12, 4})
 	// the above, times the edwards25519 basepoint.
@@ -83,7 +79,7 @@
 		return check.Equal(&r) == 1
 	}
 
-	if err := quick.Check(scalarMultDistributesOverAdd, quickCheckConfig32); err != nil {
+	if err := quick.Check(scalarMultDistributesOverAdd, quickCheckConfig(32)); err != nil {
 		t.Error(err)
 	}
 }
@@ -105,7 +101,7 @@
 		return p.Equal(&q) == 1
 	}
 
-	if err := quick.Check(scalarMultNonIdentityPoint, quickCheckConfig32); err != nil {
+	if err := quick.Check(scalarMultNonIdentityPoint, quickCheckConfig(32)); err != nil {
 		t.Error(err)
 	}
 }
@@ -149,7 +145,7 @@
 		return p.Equal(&q) == 1
 	}
 
-	if err := quick.Check(scalarMultMatchesBaseMult, quickCheckConfig32); err != nil {
+	if err := quick.Check(scalarMultMatchesBaseMult, quickCheckConfig(32)); err != nil {
 		t.Error(err)
 	}
 }
@@ -177,7 +173,7 @@
 		return p.Equal(&check) == 1
 	}
 
-	if err := quick.Check(varTimeDoubleBaseMultMatchesBaseMult, quickCheckConfig32); err != nil {
+	if err := quick.Check(varTimeDoubleBaseMultMatchesBaseMult, quickCheckConfig(32)); err != nil {
 		t.Error(err)
 	}
 }
diff --git a/src/crypto/md5/md5.go b/src/crypto/md5/md5.go
index ccee4ea..83e9e4c 100644
--- a/src/crypto/md5/md5.go
+++ b/src/crypto/md5/md5.go
@@ -96,7 +96,7 @@
 }
 
 // New returns a new hash.Hash computing the MD5 checksum. The Hash also
-// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
+// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
 // marshal and unmarshal the internal state of the hash.
 func New() hash.Hash {
 	d := new(digest)
diff --git a/src/crypto/md5/md5block_amd64.s b/src/crypto/md5/md5block_amd64.s
index 7c7d92d..75c8074 100644
--- a/src/crypto/md5/md5block_amd64.s
+++ b/src/crypto/md5/md5block_amd64.s
@@ -25,6 +25,7 @@
 	MOVL	(1*4)(BP),	BX
 	MOVL	(2*4)(BP),	CX
 	MOVL	(3*4)(BP),	DX
+	MOVL	$0xffffffff,	R11
 
 	CMPQ	SI,		DI
 	JEQ	end
@@ -40,14 +41,15 @@
 
 #define ROUND1(a, b, c, d, index, const, shift) \
 	XORL	c, R9; \
-	LEAL	const(a)(R8*1), a; \
+	ADDL	$const, a; \
+	ADDL	R8, a; \
 	ANDL	b, R9; \
-	XORL d, R9; \
-	MOVL (index*4)(SI), R8; \
-	ADDL R9, a; \
-	ROLL $shift, a; \
-	MOVL c, R9; \
-	ADDL b, a
+	XORL	d, R9; \
+	MOVL	(index*4)(SI), R8; \
+	ADDL	R9, a; \
+	ROLL	$shift, a; \
+	MOVL	c, R9; \
+	ADDL	b, a
 
 	ROUND1(AX,BX,CX,DX, 1,0xd76aa478, 7);
 	ROUND1(DX,AX,BX,CX, 2,0xe8c7b756,12);
@@ -64,21 +66,23 @@
 	ROUND1(AX,BX,CX,DX,13,0x6b901122, 7);
 	ROUND1(DX,AX,BX,CX,14,0xfd987193,12);
 	ROUND1(CX,DX,AX,BX,15,0xa679438e,17);
-	ROUND1(BX,CX,DX,AX, 0,0x49b40821,22);
+	ROUND1(BX,CX,DX,AX, 1,0x49b40821,22);
 
-	MOVL	(1*4)(SI),	R8
 	MOVL	DX,		R9
 	MOVL	DX,		R10
 
+// Uses https://github.com/animetosho/md5-optimisation#dependency-shortcut-in-g-function
+
 #define ROUND2(a, b, c, d, index, const, shift) \
-	NOTL	R9; \
-	LEAL	const(a)(R8*1),a; \
+	XORL	R11, R9; \
+	ADDL	$const,	a; \
+	ADDL	R8,	a; \
 	ANDL	b,		R10; \
 	ANDL	c,		R9; \
 	MOVL	(index*4)(SI),R8; \
-	ORL	R9,		R10; \
+	ADDL	R9,	a; \
+	ADDL	R10,	a; \
 	MOVL	c,		R9; \
-	ADDL	R10,		a; \
 	MOVL	c,		R10; \
 	ROLL	$shift,	a; \
 	ADDL	b,		a
@@ -98,22 +102,34 @@
 	ROUND2(AX,BX,CX,DX, 2,0xa9e3e905, 5);
 	ROUND2(DX,AX,BX,CX, 7,0xfcefa3f8, 9);
 	ROUND2(CX,DX,AX,BX,12,0x676f02d9,14);
-	ROUND2(BX,CX,DX,AX, 0,0x8d2a4c8a,20);
+	ROUND2(BX,CX,DX,AX, 5,0x8d2a4c8a,20);
 
-	MOVL	(5*4)(SI),	R8
 	MOVL	CX,		R9
 
-#define ROUND3(a, b, c, d, index, const, shift) \
-	LEAL	const(a)(R8*1),a; \
-	MOVL	(index*4)(SI),R8; \
-	XORL	d,		R9; \
+// Uses https://github.com/animetosho/md5-optimisation#h-function-re-use
+
+#define ROUND3FIRST(a, b, c, d, index, const, shift) \
+	MOVL	d,		R9; \
+	XORL	c,		R9; \
 	XORL	b,		R9; \
+	ADDL	$const,	a; \
+	ADDL	R8,		a; \
+	MOVL	(index*4)(SI),R8; \
 	ADDL	R9,		a; \
 	ROLL	$shift,		a; \
-	MOVL	b,		R9; \
 	ADDL	b,		a
 
-	ROUND3(AX,BX,CX,DX, 8,0xfffa3942, 4);
+#define ROUND3(a, b, c, d, index, const, shift) \
+	XORL	a,		R9; \
+	XORL	b,		R9; \
+	ADDL	$const,	a; \
+	ADDL	R8,		a; \
+	MOVL	(index*4)(SI),R8; \
+	ADDL	R9,		a; \
+	ROLL	$shift,		a; \
+	ADDL	b,		a
+
+	ROUND3FIRST(AX,BX,CX,DX, 8,0xfffa3942, 4);
 	ROUND3(DX,AX,BX,CX,11,0x8771f681,11);
 	ROUND3(CX,DX,AX,BX,14,0x6d9d6122,16);
 	ROUND3(BX,CX,DX,AX, 1,0xfde5380c,23);
@@ -130,13 +146,13 @@
 	ROUND3(CX,DX,AX,BX, 2,0x1fa27cf8,16);
 	ROUND3(BX,CX,DX,AX, 0,0xc4ac5665,23);
 
-	MOVL	(0*4)(SI),	R8
-	MOVL	$0xffffffff,	R9
+	MOVL	R11,	R9
 	XORL	DX,		R9
 
 #define ROUND4(a, b, c, d, index, const, shift) \
-	LEAL	const(a)(R8*1),a; \
-	ORL	b,		R9; \
+	ADDL	$const,	a; \
+	ADDL	R8,		a; \
+	ORL		b,		R9; \
 	XORL	c,		R9; \
 	ADDL	R9,		a; \
 	MOVL	(index*4)(SI),R8; \
diff --git a/src/crypto/rand/rand.go b/src/crypto/rand/rand.go
index 62738e2..d0dcc7c 100644
--- a/src/crypto/rand/rand.go
+++ b/src/crypto/rand/rand.go
@@ -15,7 +15,7 @@
 // available, /dev/urandom otherwise.
 // On OpenBSD and macOS, Reader uses getentropy(2).
 // On other Unix-like systems, Reader reads from /dev/urandom.
-// On Windows systems, Reader uses the RtlGenRandom API.
+// On Windows systems, Reader uses the ProcessPrng API.
 // On JS/Wasm, Reader uses the Web Crypto API.
 // On WASIP1/Wasm, Reader uses random_get from wasi_snapshot_preview1.
 var Reader io.Reader
diff --git a/src/crypto/rand/rand_getentropy.go b/src/crypto/rand/rand_getentropy.go
index 68f921b..2102504 100644
--- a/src/crypto/rand/rand_getentropy.go
+++ b/src/crypto/rand/rand_getentropy.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (darwin && !ios) || openbsd
+//go:build (darwin && !ios) || openbsd || netbsd
 
 package rand
 
diff --git a/src/crypto/rand/rand_getrandom.go b/src/crypto/rand/rand_getrandom.go
index 46c4133..09e9ae8 100644
--- a/src/crypto/rand/rand_getrandom.go
+++ b/src/crypto/rand/rand_getrandom.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build dragonfly || freebsd || linux || netbsd || solaris
+//go:build dragonfly || freebsd || linux || solaris
 
 package rand
 
@@ -21,7 +21,7 @@
 		//     is returned by a single call to getrandom() on systems where int
 		//     has a size of 32 bits.
 		maxGetRandomRead = (1 << 25) - 1
-	case "dragonfly", "freebsd", "illumos", "netbsd", "solaris":
+	case "dragonfly", "freebsd", "illumos", "solaris":
 		maxGetRandomRead = 1 << 8
 	default:
 		panic("no maximum specified for GetRandom")
diff --git a/src/crypto/rand/rand_windows.go b/src/crypto/rand/rand_windows.go
index 6c0655c..7380f1f 100644
--- a/src/crypto/rand/rand_windows.go
+++ b/src/crypto/rand/rand_windows.go
@@ -15,11 +15,8 @@
 
 type rngReader struct{}
 
-func (r *rngReader) Read(b []byte) (n int, err error) {
-	// RtlGenRandom only returns 1<<32-1 bytes at a time. We only read at
-	// most 1<<31-1 bytes at a time so that  this works the same on 32-bit
-	// and 64-bit systems.
-	if err := batched(windows.RtlGenRandom, 1<<31-1)(b); err != nil {
+func (r *rngReader) Read(b []byte) (int, error) {
+	if err := windows.ProcessPrng(b); err != nil {
 		return 0, err
 	}
 	return len(b), nil
diff --git a/src/crypto/rand/util.go b/src/crypto/rand/util.go
index 11b1a28..fd67ba2 100644
--- a/src/crypto/rand/util.go
+++ b/src/crypto/rand/util.go
@@ -12,7 +12,7 @@
 )
 
 // Prime returns a number of the given bit length that is prime with high probability.
-// Prime will return error for any error returned by rand.Read or if bits < 2.
+// Prime will return error for any error returned by [rand.Read] or if bits < 2.
 func Prime(rand io.Reader, bits int) (*big.Int, error) {
 	if bits < 2 {
 		return nil, errors.New("crypto/rand: prime size must be at least 2-bit")
diff --git a/src/crypto/rc4/rc4.go b/src/crypto/rc4/rc4.go
index f08da0e..67452ec 100644
--- a/src/crypto/rc4/rc4.go
+++ b/src/crypto/rc4/rc4.go
@@ -26,7 +26,7 @@
 	return "crypto/rc4: invalid key size " + strconv.Itoa(int(k))
 }
 
-// NewCipher creates and returns a new Cipher. The key argument should be the
+// NewCipher creates and returns a new [Cipher]. The key argument should be the
 // RC4 key, at least 1 byte and at most 256 bytes.
 func NewCipher(key []byte) (*Cipher, error) {
 	k := len(key)
@@ -45,7 +45,7 @@
 	return &c, nil
 }
 
-// Reset zeros the key data and makes the Cipher unusable.
+// Reset zeros the key data and makes the [Cipher] unusable.
 //
 // Deprecated: Reset can't guarantee that the key will be entirely removed from
 // the process's memory.
diff --git a/src/crypto/rsa/example_test.go b/src/crypto/rsa/example_test.go
index d07ee7d..d176743 100644
--- a/src/crypto/rsa/example_test.go
+++ b/src/crypto/rsa/example_test.go
@@ -78,7 +78,7 @@
 		return
 	}
 
-	fmt.Printf("Plaintext: %s\n", string(plaintext))
+	fmt.Printf("Plaintext: %s\n", plaintext)
 }
 
 func ExampleSignPKCS1v15() {
@@ -149,7 +149,7 @@
 		return
 	}
 
-	fmt.Printf("Plaintext: %s\n", string(plaintext))
+	fmt.Printf("Plaintext: %s\n", plaintext)
 
 	// Remember that encryption only provides confidentiality. The
 	// ciphertext should be signed before authenticity is assumed and, even
diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go
index 55fea1a..2705036 100644
--- a/src/crypto/rsa/pkcs1v15.go
+++ b/src/crypto/rsa/pkcs1v15.go
@@ -16,7 +16,7 @@
 // This file implements encryption and decryption using PKCS #1 v1.5 padding.
 
 // PKCS1v15DecryptOptions is for passing options to PKCS #1 v1.5 decryption using
-// the crypto.Decrypter interface.
+// the [crypto.Decrypter] interface.
 type PKCS1v15DecryptOptions struct {
 	// SessionKeyLen is the length of the session key that is being
 	// decrypted. If not zero, then a padding error during decryption will
diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
index 3a377cc..b63b6eb 100644
--- a/src/crypto/rsa/pss.go
+++ b/src/crypto/rsa/pss.go
@@ -266,7 +266,7 @@
 	Hash crypto.Hash
 }
 
-// HashFunc returns opts.Hash so that PSSOptions implements crypto.SignerOpts.
+// HashFunc returns opts.Hash so that [PSSOptions] implements [crypto.SignerOpts].
 func (opts *PSSOptions) HashFunc() crypto.Hash {
 	return opts.Hash
 }
diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
index f0aef1f..9342930 100644
--- a/src/crypto/rsa/rsa.go
+++ b/src/crypto/rsa/rsa.go
@@ -43,6 +43,10 @@
 var bigOne = big.NewInt(1)
 
 // A PublicKey represents the public part of an RSA key.
+//
+// The value of the modulus N is considered secret by this library and protected
+// from leaking through timing side-channels. However, neither the value of the
+// exponent E nor the precise bit size of N are similarly protected.
 type PublicKey struct {
 	N *big.Int // modulus
 	E int      // public exponent
@@ -150,11 +154,11 @@
 }
 
 // Sign signs digest with priv, reading randomness from rand. If opts is a
-// *PSSOptions then the PSS algorithm will be used, otherwise PKCS #1 v1.5 will
+// *[PSSOptions] then the PSS algorithm will be used, otherwise PKCS #1 v1.5 will
 // be used. digest must be the result of hashing the input message using
 // opts.HashFunc().
 //
-// This method implements crypto.Signer, which is an interface to support keys
+// This method implements [crypto.Signer], which is an interface to support keys
 // where the private part is kept in, for example, a hardware module. Common
 // uses should use the Sign* functions in this package directly.
 func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
@@ -166,8 +170,8 @@
 }
 
 // Decrypt decrypts ciphertext with priv. If opts is nil or of type
-// *PKCS1v15DecryptOptions then PKCS #1 v1.5 decryption is performed. Otherwise
-// opts must have type *OAEPOptions and OAEP decryption is done.
+// *[PKCS1v15DecryptOptions] then PKCS #1 v1.5 decryption is performed. Otherwise
+// opts must have type *[OAEPOptions] and OAEP decryption is done.
 func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
 	if opts == nil {
 		return DecryptPKCS1v15(rand, priv, ciphertext)
@@ -288,7 +292,7 @@
 //
 // Deprecated: The use of this function with a number of primes different from
 // two is not recommended for the above security, compatibility, and performance
-// reasons. Use GenerateKey instead.
+// reasons. Use [GenerateKey] instead.
 //
 // [On the Security of Multi-prime RSA]: http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
 func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (*PrivateKey, error) {
@@ -471,17 +475,13 @@
 }
 
 // ErrMessageTooLong is returned when attempting to encrypt or sign a message
-// which is too large for the size of the key. When using SignPSS, this can also
+// which is too large for the size of the key. When using [SignPSS], this can also
 // be returned if the size of the salt is too large.
 var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA key size")
 
 func encrypt(pub *PublicKey, plaintext []byte) ([]byte, error) {
 	boring.Unreachable()
 
-	// Most of the CPU time for encryption and verification is spent in this
-	// NewModulusFromBig call, because PublicKey doesn't have a Precomputed
-	// field. If performance becomes an issue, consider placing a private
-	// sync.Once on PublicKey to compute this.
 	N, err := bigmod.NewModulusFromBig(pub.N)
 	if err != nil {
 		return nil, err
@@ -492,7 +492,7 @@
 	}
 	e := uint(pub.E)
 
-	return bigmod.NewNat().ExpShort(m, e, N).Bytes(N), nil
+	return bigmod.NewNat().ExpShortVarTime(m, e, N).Bytes(N), nil
 }
 
 // EncryptOAEP encrypts the given message with RSA-OAEP.
@@ -686,7 +686,7 @@
 	}
 
 	if check {
-		c1 := bigmod.NewNat().ExpShort(m, uint(priv.E), N)
+		c1 := bigmod.NewNat().ExpShortVarTime(m, uint(priv.E), N)
 		if c1.Equal(c) != 1 {
 			return nil, ErrDecryption
 		}
@@ -704,7 +704,7 @@
 // The random parameter is legacy and ignored, and it can be nil.
 //
 // The label parameter must match the value given when encrypting. See
-// EncryptOAEP for details.
+// [EncryptOAEP] for details.
 func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error) {
 	return decryptOAEP(hash, hash, random, priv, ciphertext, label)
 }
diff --git a/src/crypto/rsa/rsa_test.go b/src/crypto/rsa/rsa_test.go
index 3278a7f..2afa045 100644
--- a/src/crypto/rsa/rsa_test.go
+++ b/src/crypto/rsa/rsa_test.go
@@ -330,6 +330,13 @@
 
 func parseKey(s string) *PrivateKey {
 	p, _ := pem.Decode([]byte(s))
+	if p.Type == "PRIVATE KEY" {
+		k, err := x509.ParsePKCS8PrivateKey(p.Bytes)
+		if err != nil {
+			panic(err)
+		}
+		return k.(*PrivateKey)
+	}
 	k, err := x509.ParsePKCS1PrivateKey(p.Bytes)
 	if err != nil {
 		panic(err)
@@ -337,125 +344,128 @@
 	return k
 }
 
-var test2048Key = parseKey(testingKey(`-----BEGIN RSA TESTING KEY-----
-MIIEnwIBAAKCAQBxY8hCshkKiXCUKydkrtQtQSRke28w4JotocDiVqou4k55DEDJ
-akvWbXXDcakV4HA8R2tOGgbxvTjFo8EK470w9O9ipapPUSrRRaBsSOlkaaIs6OYh
-4FLwZpqMNBVVEtguVUR/C34Y2pS9kRrHs6q+cGhDZolkWT7nGy5eSEvPDHg0EBq1
-1hu6HmPmI3r0BInONqJg2rcK3U++wk1lnbD3ysCZsKOqRUms3n/IWKeTqXXmz2XK
-J2t0NSXwiDmA9q0Gm+w0bXh3lzhtUP4MlzS+lnx9hK5bjzSbCUB5RXwMDG/uNMQq
-C4MmA4BPceSfMyAIFjdRLGy/K7gbb2viOYRtAgEDAoIBAEuX2tchZgcGSw1yGkMf
-OB4rbZhSSiCVvB5r1ew5xsnsNFCy1ducMo7zo9ehG2Pq9X2E8jQRWfZ+JdkX1gdC
-fiCjSkHDxt+LceDZFZ2F8O2bwXNF7sFAN0rvEbLNY44MkB7jgv9c/rs8YykLZy/N
-HH71mteZsO2Q1JoSHumFh99cwWHFhLxYh64qFeeH6Gqx6AM2YVBWHgs7OuKOvc8y
-zUbf8xftPht1kMwwDR1XySiEYtBtn74JflK3DcT8oxOuCZBuX6sMJHKbVP41zDj+
-FJZBmpAvNfCEYJUr1Hg+DpMLqLUg+D6v5vpliburbk9LxcKFZyyZ9QVe7GoqMLBu
-eGsCgYEAummUj4MMKWJC2mv5rj/dt2pj2/B2HtP2RLypai4et1/Ru9nNk8cjMLzC
-qXz6/RLuJ7/eD7asFS3y7EqxKxEmW0G8tTHjnzR/3wnpVipuWnwCDGU032HJVd13
-LMe51GH97qLzuDZjMCz+VlbCNdSslMgWWK0XmRnN7Yqxvh6ao2kCgYEAm7fTRBhF
-JtKcaJ7d8BQb9l8BNHfjayYOMq5CxoCyxa2pGBv/Mrnxv73Twp9Z/MP0ue5M5nZt
-GMovpP5cGdJLQ2w5p4H3opcuWeYW9Yyru2EyCEAI/hD/Td3QVP0ukc19BDuPl5Wg
-eIFs218uiVOU4pw3w+Et5B1PZ/F+ZLr5LGUCgYB8RmMKV11w7CyRnVEe1T56Ru09
-Svlp4qQt0xucHr8k6ovSkTO32hd10yxw/fyot0lv1T61JHK4yUydhyDHYMQ81n3O
-IUJqIv/qBpuOxvQ8UqwIQ3iU69uOk6TIhSaNlqlJwffQJEIgHf7kOdbOjchjMA7l
-yLpmETPzscvUFGcXmwKBgGfP4i1lg283EvBp6Uq4EqQ/ViL6l5zECXce1y8Ady5z
-xhASqiHRS9UpN9cU5qiCoyae3e75nhCGym3+6BE23Nede8UBT8G6HuaZZKOzHSeW
-IVrVW1QLVN6T4DioybaI/gLSX7pjwFBWSJI/dFuNDexoJS1AyUK+NO/2VEMnUMhD
-AoGAOsdn3Prnh/mjC95vraHCLap0bRBSexMdx77ImHgtFUUcSaT8DJHs+NZw1RdM
-SZA0J+zVQ8q7B11jIgz5hMz+chedwoRjTL7a8VRTKHFmmBH0zlEuV7L79w6HkRCQ
-VRg10GUN6heGLv0aOHbPdobcuVDH4sgOqpT1QnOuce34sQs=
------END RSA TESTING KEY-----`))
+var test2048Key = parseKey(testingKey(`-----BEGIN TESTING KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDNoyFUYeDuqw+k
+iyv47iBy/udbWmQdpbUZ8JobHv8uQrvL7sQN6l83teHgNJsXqtiLF3MC+K+XI6Dq
+hxUWfQwLip8WEnv7Jx/+53S8yp/CS4Jw86Q1bQHbZjFDpcoqSuwAxlegw18HNZCY
+fpipYnA1lYCm+MTjtgXJQbjA0dwUGCf4BDMqt+76Jk3XZF5975rftbkGoT9eu8Jt
+Xs5F5Xkwd8q3fkQz+fpLW4u9jrfFyQ61RRFkYrCjlhtGjYIzBHGgQM4n/sNXhiy5
+h0tA7Xa6NyYrN/OXe/Y1K8Rz/tzlvbMoxgZgtBuKo1N3m8ckFi7hUVK2eNv7GoAb
+teTTPrg/AgMBAAECggEAAnfsVpmsL3R0Bh4gXRpPeM63H6e1a8B8kyVwiO9o0cXX
+gKp9+P39izfB0Kt6lyCj/Wg+wOQT7rg5qy1yIw7fBHGmcjquxh3uN0s3YZ+Vcym6
+SAY5f0vh/OyJN9r3Uv8+Pc4jtb7So7QDzdWeZurssBmUB0avAMRdGNFGP5SyILcz
+l3Q59hTxQ4czRHKjZ06L1/sA+tFVbO1j39FN8nMOU/ovLF4lAmZTkQ6AP6n6XPHP
+B8Nq7jSYz6RDO200jzp6UsdrnjjkJRbzOxN/fn+ckCP+WYuq+y/d05ET9PdVa4qI
+Jyr80D9QgHmfztcecvYwoskGnkb2F4Tmp0WnAj/xVQKBgQD4TrMLyyHdbAr5hoSi
+p+r7qBQxnHxPe2FKO7aqagi4iPEHauEDgwPIcsOYota1ACiSs3BaESdJAClbqPYd
+HDI4c2DZ6opux6WYkSju+tVXYW6qarR3fzrP3fUCdz2c2NfruWOqq8YmjzAhTNPm
+YzvtzTdwheNYV0Vi71t1SfZmfQKBgQDUAgSUcrgXdGDnSbaNe6KwjY5oZWOQfZe2
+DUhqfN/JRFZj+EMfIIh6OQXnZqkp0FeRdfRAFl8Yz8ESHEs4j+TikLJEeOdfmYLS
+TWxlMPDTUGbUvSf4g358NJ8TlfYA7dYpSTNPXMRSLtsz1palmaDBTE/V2xKtTH6p
+VglRNRUKawKBgCPqBh2TkN9czC2RFkgMb4FcqycN0jEQ0F6TSnVVhtNiAzKmc8s1
+POvWJZJDIzjkv/mP+JUeXAdD/bdjNc26EU126rA6KzGgsMPjYv9FymusDPybGGUc
+Qt5j5RcpNgEkn/5ZPyAlXjCfjz+RxChTfAyGHRmqU9qoLMIFir3pJ7llAoGBAMNH
+sIxENwlzqyafoUUlEq/pU7kZWuJmrO2FwqRDraYoCiM/NCRhxRQ/ng6NY1gejepw
+abD2alXiV4alBSxubne6rFmhvA00y2mG40c6Ezmxn2ZpbX3dMQ6bMcPKp7QnXtLc
+mCSL4FGK02ImUNDsd0RVVFw51DRId4rmsuJYMK9NAoGAKlYdc4784ixTD2ZICIOC
+ZWPxPAyQUEA7EkuUhAX1bVNG6UJTYA8kmGcUCG4jPTgWzi00IyUUr8jK7efyU/zs
+qiJuVs1bia+flYIQpysMl1VzZh8gW1nkB4SVPm5l2wBvVJDIr9Mc6rueC/oVNkh2
+fLVGuFoTVIu2bF0cWAjNNMg=
+-----END TESTING KEY-----`))
 
-var test3072Key = parseKey(testingKey(`-----BEGIN RSA TESTING KEY-----
-MIIG5AIBAAKCAYEAuvg7HHdVlr2kKZzRw9xs/uZqR6JK21izBdg8D52YPqEdMIhG
-BSuOrejT6HiDaJcyCkeNxj7E2dKWacIV4UytlPvDnSL9dQduytl31YQ01J5i20r3
-Kp1etZDEDltly1eVKcbdQTsr26oSQCojYYiYOj+q8w/rzH3WSEuMs04TMwxCR0CC
-nStVsNWw5zL45n26mxDgDdPK/i3OJTinTvPUDysB/V0c8tRaQ8U6YesmgSYGIMe0
-bx5l9k1RJbISGIipmS1IVdNAHSnxqJTUG+9k8SHzp5bvqPeiuVLMZeEdqPHwkNHW
-37LNO28nN+B0xhc4wvEFvggrMf58oO3oy16AzBsWDKSOQnsagc4gQtrJ4N4WOibT
-/LJB76RLoNyJ+Ov7Ue8ngqR3r3EM8I9AAkj2+3fo+DxcLuE9qOVnrHYFRqq+EYQe
-lKSg3Z0EHb7XF35xXeAFbpEXSVuidBRm+emgLkZ2n313hz6jUg3FdE3vLMYHvxly
-ROzgsz0cNOAH3jnXAgMBAAECggGBAILJqe/buk9cET3aqRGtW8FjRO0fJeYSQgjQ
-nhL+VsVYxqZwbSqosYIN4E46HxJG0YZHT3Fh7ynAGd+ZGN0lWjdhdhCxrUL0FBhp
-z13YwWwJ73UfF48DzoCL59lzLd30Qi+bIKLE1YUvjty7nUxY1MPKTbcBaBz/2alw
-z9eNwfhvlt1ozvVKnwK4OKtCCMKTKLnYMCL8CH+NYyq+Wqrr/Wcu2pF1VQ64ZPwL
-Ny/P4nttMdQ0Xo9sYD7PDvije+0VivsoT8ZatLt06fCwxEIue2uucIQjXCgO8Igm
-pZwBEWDfy+NHtTKrFpyKf357S8veDwdU14GjviY8JFH8Bg8PBn3i38635m0o7xMG
-pRlQi5x1zbHy4riOEjyjCIRVCKwKT5HEYNK5Uu3aQnlOV7CzxBLNp5lyioAIGOBC
-RKJabN5vbUqJjxaQ39tA29DtfA3+r30aMO+QzOl5hrjJV7A7ueV3dbjp+fDV0LPq
-QrJ68IvHPi3hfqVlP1UM2s4T69kcYQKBwQDoj+rZVb3Aq0JZ8LraR3nA1yFw4NfA
-SZ/Ne36rIySiy5z+qY9p6WRNLGLrusSIfmbmvStswAliIdh1cZTAUsIF5+kQvBQg
-VlxJW/nY5hTktIDOZPDaI77jid1iZLID3VXEm6dXY/Hv7DiUORudXAHoy6HZm2Jt
-kSkIplSeSfASqidj9Bv7V27ttCcMLu0ImdX4JyWoXkVuzBuxKAgiemtLS5IPN8tw
-m/o2lMaP8/sCMpXrlo2VS3TMsfJyRI/JGoMCgcEAzdAH1TKNeQ3ghzRdlw5NAs31
-VbcYzjz8HRkNhOsQCs++1ib7H2MZ3HPLpAa3mBJ+VfXO479G23yI7f2zpiUzRuVY
-cTMHw5Ln7FXfBro5eu/ruyNzKiWPElP8VK814HI5u5XqUU05BsQbe6AjSGHoU6P6
-PfSDzaw8hGW78GcZu4/EkZp/0TXW+1HUGgU+ObgmG+PnyIMHIt99i7qrOVbNmG9n
-uNwGwmfFzNqAtVLbLcVyBV5TR+Ze3ZAwjnVaH5MdAoHBAOg5ncd8KMjVuqHZEpyY
-tulraQcwXgCzBBHJ+YimxRSSwahCZOTbm768TeMaUtoBbnuF9nDXqgcFyQItct5B
-RWFkXITLakWINwtB/tEpnz9pRx3SCfeprhnENv7jkibtw5FZ5NYNBTAQ78aC6CJQ
-F9AAVxPWZ4kFZLYwcVrGdiYNJtxWjAKFIk3WkQ9HZIYsJ09ut9nSmP60bgqO8OCM
-4csEIUt06X7/IfGSylxAwytEnBPt+F9WQ8GLB5A3CmVERQKBwGmBR0Knk5aG4p7s
-3T1ee2QAqM+z+Odgo+1WtnN4/NROAwpNGVbRuqQkSDRhrSQr9s+iHtjpaS2C/b7i
-24FEeLDTSS9edZBwcqvYqWgNdwHqk/FvDs6ASoOewi+3UespIydihqf+6kjppx0M
-zomAh1S5LsMr4ZVBwhQtAtcOQ0a/QIlTpkpdS0OygwSDw45bNE3/2wYTBUl/QCCt
-JLFUKjkGgylkwaJPCDsnl+tb+jfQi87st8yX7/GsxPeCeRzOkQKBwGPcu2OgZfsl
-dMHz0LwKOEattrkDujpIoNxyTrBN4fX0RdhTgfRrqsEkrH/4XG5VTtc7K7sBgx7f
-IwP1uUAx5v16QDA1Z+oFBXwmI7atdKRM34kl1Q0i60z83ahgA/9bAsSpcA23LtM4
-u2PRX3YNXb9kUcSbod2tVfXyiu7wl6NlsYw5PeF8A8m7QicaeXR6t8NB02XqQ4k+
-BoyV2DVuoxSZKOMti0piQIIacSZWEbgyslwNxW99JRVfA2xKJGjUqA==
------END RSA TESTING KEY-----`))
+var test3072Key = parseKey(testingKey(`-----BEGIN TESTING KEY-----
+MIIG/gIBADANBgkqhkiG9w0BAQEFAASCBugwggbkAgEAAoIBgQDJrvevql7G07LM
+xQAwAA1Oo8qUAkWfmpgrpxIUZE1QTyMCDaspQJGBBR2+iStrzi2NnWvyBz3jJWFZ
+LepnsMUFSXj5Ez6bEt2x9YbLAAVGhI6USrGAKqRdJ77+F7yIVCJWcV4vtTyN86IO
+UaHObwCR8GX7MUwJiRxDUZtYxJcwTMHSs4OWxNnqc+A8yRKn85CsCx0X9I1DULq+
+5BL8gF3MUXvb2zYzIOGI1s3lXOo9tHVcRVB1eV7dZHDyYGxZ4Exj9eKhiOL52hE6
+ZPTWCCKbQnyBV3HYe+t8DscOG/IzaAzLrx1s6xnqKEe5lUQ03Ty9QN3tpqqLsC4b
+CUkdk6Ma43KXGkCmoPaGCkssSc9qOrwHrqoMkOnZDWOJ5mKHhINKWV/U7p54T7tx
+FWI3PFvvYevoPf7cQdJcChbIBvQ+LEuVZvmljhONUjIGKBaqBz5Sjv7Fd5BNnBGz
+8NwH6tYdT9kdTkCZdfrazbuhLxN0mhhXp2sePRV2KZsB7i7cUJMCAwEAAQKCAYAT
+fqunbxmehhu237tUaHTg1e6WHvVu54kaUxm+ydvlTY5N5ldV801Sl4AtXjdJwjy0
+qcj430qpTarawsLxMezhcB2BlKLNEjucC5EeHIrmAEMt7LMP90868prAweJHRTv/
+zLvfcwPURClf0Uk0L0Dyr7Y+hnXZ8scTb2x2M06FQdjMY+4Yy+oKgm05mEVgNv1p
+e+DcjhbSMRf+rVoeeSQCmhprATCnLDWmE1QEqIC7OoR2SPxC1rAHnhatfwo00nwz
+rciN5YSOqoGa1WMNv6ut0HJWZnu5nR1OuZpaf+zrxlthMxPwhhPq0211J4fZviTO
+WLnubXD3/G9TN1TszeFuO7Ty8HYYkTJ3RLRrTRrfwhOtOJ4tkuwSJol3QIs1asab
+wYabuqyTv4+6JeoMBSLnMoA8rXSW9ti4gvJ1h8xMqmMF6e91Z0Fn7fvP5MCn/t8H
+8cIPhYLOhdPH5JMqxozb/a1s+JKvRTLnAXxNjlmyXzNvC+3Ixp4q9O8dWJ8Gt+EC
+gcEA+12m6iMXU3tBw1cYDcs/Jc0hOVgMAMgtnWZ4+p8RSucO/74bq82kdyAOJxao
+spAcK03NnpRBDcYsSyuQrE6AXQYel1Gj98mMtOirwt2T9vH5fHT6oKsqEu03hYIB
+5cggeie4wqKAOb9tVdShJk7YBJUgIXnAcqqmkD4oeUGzUV0QseQtspEHUJSqBQ9n
+yR4DmyMECgLm47S9LwPMtgRh9ADLBaZeuIRdBEKCDPgNkdya/dLb8u8kE8Ox3T3R
++r2hAoHBAM1m1ZNqP9bEa74jZkpMxDN+vUdN7rZcxcpHu1nyii8OzXEopB+jByFA
+lmMqnKt8z5DRD0dmHXzOggnKJGO2j63/XFaVmsaXcM2B8wlRCqwm4mBE/bYCEKJl
+xqkDveICzwb1paWSgmFkjc6DN2g1jUd3ptOORuU38onrSphPHFxgyNlNTcOcXvxb
+GW4R8iPinvpkY3shluWqRQTvai1+gNQlmKMdqXvreUjKqJFCOhoRUVG/MDv8IdP2
+tXq43+UZswKBwQDSErOzi74r25/bVAdbR9gvjF7O4OGvKZzNpd1HfvbhxXcIjuXr
+UEK5+AU777ju+ndATZahiD9R9qP/8pnHFxg6JiocxnMlW8EHVEhv4+SMBjA+Ljlj
+W4kfJjc3ka5qTjWuQVIs/8fv+yayC7DeJhhsxACFWY5Xhn0LoZcLt7fYMNIKCauT
+R5d4ZbYt4nEXaMkUt0/h2gkCloNhLmjAWatPU/ZYc3FH/f8K11Z+5jPZCihSJw4A
+2pEpH2yffNHnHuECgcEAmxIWEHNYuwYT6brEETgfsFjxAZI+tIMZ+HtrYJ8R4DEm
+vVXXguMMEPi4ESosmfNiqYyMInVfscgeuNFZ48YCd3Sg++V6so/G5ABFwjTi/9Fj
+exbbDLxGXrTD5PokMyu3rSNr6bLQqELIJK8/93bmsJwO4Q07TPaOL73p1U90s/GF
+8TjBivrVY2RLsKPv0VPYfmWoDV/wkneYH/+4g5xMGt4/fHZ6bEn8iQ4ncXM0dlW4
+tSTIf6D80RAjNwG4VzitAoHAA8GLh22w+Cx8RPsj6xdrUiVFE+nNMMgeY8Mdjsrq
+Fh4jJb+4zwSML9R6iJu/LH5B7Fre2Te8QrYP+k/jIHPYJtGesVt/WlAtpDCNsC3j
+8CBzxwL6zkN+46pph35jPKUSaQQ2r8euNMp/sirkYcP8PpbdtifXCjN08QQIKsqj
+17IGHe9jZX/EVnSshCkXOBHG31buV10k5GSkeKcoDrkpp25wQ6FjW9L3Q68y6Y8r
+8h02sdAMB9Yc2A4EgzOySWoD
+-----END TESTING KEY-----`))
 
-var test4096Key = parseKey(testingKey(`-----BEGIN RSA TESTING KEY-----
-MIIJKQIBAAKCAgEAwTmi+2MLTSm6GbsKksOHCMdIRsPwLlPtJQiMEjnKq4YEPSaC
-HXWQTza0KL/PySjhgw3Go5pC7epXlA9o1I+rbx4J3AwxC+xUUJqs3U0AETtzC1JD
-r3+/aP5KJzXp7IQXe1twEyHbQDCy3XUFhB0tZpIuAx82VSzMv4c6h6KPaES24ljd
-OxJJLPTYVECG2NbYBeKZPxyGNIkHn6/6rJDxnlICvLVBMrPaxsvN04ck55SRIglw
-MWmxpPTRFkMFERY7b2C33BuVICB8tXccvNwgtrNOmaWd6yjESZOYMyJQLi0QHMan
-ObuZw2PeUR+9gAE3R8/ji/i1VLYeVfC6TKzhziq5NKeBXzjSGOS7XyjvxrzypUco
-HiAUyVGKtouRFyOe4gr4xxZpljIEoN4TsBWSbM8GH6n5uFmEKvFnBR5KDRCwFfvI
-JudWm/oWptzQUyqRvzNtv4OgU9YVnx/fY3hyaD5ZnVZjUZzAjo3o8WSwmuTbZbJ1
-gX3pDRPw3g0naBm6rMEWPV4YR93be/mBERxWua6IrPPptRh9WYAJP4bkwk9V0F8U
-Ydk1URLeETAyFScNgukcKzpNw+OeCze2Blvrenf9goHefIpMzv4/ulEr7/v80ESq
-qd9CAwpz7cRe5+g18v5rFTEHESTCCq+rOFI5L59UX4VvE7CGOzcPLLZjlcMCAwEA
-AQKCAgB3/09UR0IxfYRxjlMWqg8mSHx+VhjG7KANq60xdGqE8wmW4F9V5DjmuNZR
-qC1mg9jpBpkh6R8/mZUiAh/cQgz5SPJekcOz3+TM2gIYvUUZbo4XrdMTHobEsYdj
-qnvHwpDCrxp/BzueNaAfIBl43pXfaVDh53RamSPeniCfMzlUS7g4AXACy2xeWwAt
-8pTL/UDTBtKc+x3talwts6A9oxYqeEvy3a3Lyx5G7zK39unYV896D9p5FWaZRuDC
-roRrBB+NH8ePDiIifYp1N6/FKf+29swNZ2kXLY4ZE2wl9V1OD/Y9qLEZjYQEb/UU
-9F0/LYIjOtvZhW83WJKmVIWeMI9Z4UooOztJJK0XOqSDsXVaEMgrF9D4E8BnKdWp
-ddM5E0nNXpLEV/SsoUyAMjArjImf8HjmJA45Px+BBGxdIv5PCyvUUD2R/6WbHOdh
-glH49I4SpVKGICV+qhLdSZkjWaItECwbsw5CeXrcOPjVCrNGOOKI8FdQN7S9JRiN
-Th6pTL1ezDUOx2Sq1M/h++ucd7akzsxm6my3leNYHxxyX7/PnQgUDyoXwQ1azAtB
-8PmMe7JAxuMjwFJJXn1Sgoq0ml0RkRkrj18+UMiz32qX8OtN+x44LkC7TnMNXqiA
-ohmzYy4WJRc3LyyTMWGrH00Zckc8oBvjf/rWy5X1nWz+DcuQIQKCAQEA6x92d8A9
-WR4qeHRY6zfHaaza8Z9vFUUUwebPxDy82Q6znu6nXNB/Q+OuhGohqcODUC8dj2qv
-7AyKdukzZzPTNSWNoSnr3c3nGpOzXxFntGOMFB83nmeoYGJEo3RertQO8QG2Dkis
-Ix9uKU6u2m5ijnH5cqHs2OcRbl2b+6mkRvPY2YxI0EqSXnMa1jpjeCKpZDW89iLU
-rm7x6vqyffqVaTt4PHj47p5QIUj8cRkVtAvUuIzM/R2g/epiytTo4iRM28rVLRnK
-28BtTtXZBT6Xy4UWX0fLSOUm2Hr1jiUJIc+Adb2h+zh69MBtBG6JRBiK7zwx7HxE
-c6sFzNvfMei99QKCAQEA0mHNpqmHuHb+wNdAEiKz4hCnYyuLDy+lZp/uQRkiODqV
-eUxAHRK1OP8yt45ZBxyaLcuRvAgK/ETg/QlYWUuAXvUWVGq9Ycv3WrpjUL0DHvuo
-rBfWTSiTNWH9sbDoCapiJMDe28ELBXVp1dCKuei/EReRHYg/vJn+GdPaZL60rlQg
-qCMou3jOXw94/Y05QcJQNkoLmVEEEwkbwrfXWvjShRbKNsv5kJydgPRfnsu5JSue
-Ydkx/Io4+4xz6vjfDDjgFFfvOJJjouFkYGWIDuT5JViIVBVK1F3XrkzOYUjoBzo7
-xDJkZrgNyNIpWXdzwfb8WTCJAOTHMk9DSB4lkk651wKCAQBKMTtovjidjm9IYy5L
-yuYZ6nmMFQswYwQRy4t0GNZeh80WMaiOGRyPh6DiF7tXnmIpQzTItJmemrZ2n0+h
-GTFka90tJdVPwFFUiZboQM3Alkj1cIRUb9Ep2Nhf27Ck6jVsx2VzTGtFCf3w+ush
-8gMXf89+5KmgKAnQEanO19EGspuSyjmPwHg/ZYLqZrJMjmN1Q5/E62jBQjEEPOdl
-6VSMSD/AlUu3wCz409cUuR2oGrOdKJDmrhrHBNb3ugdilKHMGUz7VlA015umbMR2
-azHq/qv4lOcIsYZ4eRRTLkybZqbagGREqaXi5XWBGIAoBLaSlyQJw4y2ExlZc2gS
-j6ahAoIBAQCwzdsL1mumHfMI050X4Kw2L3LNCBoMwCkL7xpHAT1d7fYSg39aL4+3
-f9j6pBmzvVjhZbRrRoMc8TH31XO3T5lptCV4+l+AIe8WA5BVmRNXZX2ia0IBhDj6
-4whW3eqTvOpQIvrnyfteMgeo1mLPzIdOcPTW0dtmwC/pOr7Obergmvj69NlVfDhL
-cXBn/diBqDDK/z1yMsDu0nfPE7tby8L4cGeu14s7+jLv3e/CP0mwsFChwOueZfdv
-h+EfNtoUpnPDBQeZDoXHrA40aP+ILOwpc5bWuzIw+VC6PfgvkBrXgBwcTZFNNh73
-h4+Sja3t84it1/k7lAjIAg70O8mthJXvAoIBAQDUUqWxqQN76gY2CPuXrwIvWvfP
-Z9U2Lv5ZTmY75L20CWRY0os0hAF68vCwxLpfeUMUTSokwa5L/l1gHwA2Zqm1977W
-9wV2Iiyqmkz9u3fu5YNOlezSoffOvAf/GUvSQ9HJ/VGqFdy2bC6NE81HRxojxeeY
-7ZmNlJrcsupyWmpUTpAd4cRVaCjcZQRoj+uIYCbgtV6/RD5VXHcPTd9wR7pjZPv7
-239qVdVU4ahkSZP6ikeN/wOEegWS0i/cKSgYmLBpWFGze3EKvHdEzurqPNCr5zo2
-jd7HGMtCpvqFx/7wUl09ac/kHeY+Ob2KduWinSPm5+jI6dPohnGx/wBEVCWh
------END RSA TESTING KEY-----`))
+var test4096Key = parseKey(testingKey(`-----BEGIN TESTING KEY-----
+MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCmH55T2e8fdUaL
+iWVL2yI7d/wOu/sxI4nVGoiRMiSMlMZlOEZ4oJY6l2y9N/b8ftwoIpjYO8CBk5au
+x2Odgpuz+FJyHppvKakUIeAn4940zoNkRe/iptybIuH5tCBygjs0y1617TlR/c5+
+FF5YRkzsEJrGcLqXzj0hDyrwdplBOv1xz2oHYlvKWWcVMR/qgwoRuj65Ef262t/Q
+ELH3+fFLzIIstFTk2co2WaALquOsOB6xGOJSAAr8cIAWe+3MqWM8DOcgBuhABA42
+9IhbBBw0uqTXUv/TGi6tcF29H2buSxAx/Wm6h2PstLd6IJAbWHAa6oTz87H0S6XZ
+v42cYoFhHma1OJw4id1oOZMFDTPDbHxgUnr2puSU+Fpxrj9+FWwViKE4j0YatbG9
+cNVpx9xo4NdvOkejWUrqziRorMZTk/zWKz0AkGQzTN3PrX0yy61BoWfznH/NXZ+o
+j3PqVtkUs6schoIYvrUcdhTCrlLwGSHhU1VKNGAUlLbNrIYTQNgt2gqvjLEsn4/i
+PgS1IsuDHIc7nGjzvKcuR0UeYCDkmBQqKrdhGbdJ1BRohzLdm+woRpjrqmUCbMa5
+VWWldJen0YyAlxNILvXMD117azeduseM1sZeGA9L8MmE12auzNbKr371xzgANSXn
+jRuyrblAZKc10kYStrcEmJdfNlzYAwIDAQABAoICABdQBpsD0W/buFuqm2GKzgIE
+c4Xp0XVy5EvYnmOp4sEru6/GtvUErDBqwaLIMMv8TY8AU+y8beaBPLsoVg1rn8gg
+yAklzExfT0/49QkEDFHizUOMIP7wpbLLsWSmZ4tKRV7CT3c+ZDXiZVECML84lmDm
+b6H7feQB2EhEZaU7L4Sc76ZCEkIZBoKeCz5JF46EdyxHs7erE61eO9xqC1+eXsNh
+Xr9BS0yWV69K4o/gmnS3p2747AHP6brFWuRM3fFDsB5kPScccQlSyF/j7yK+r+qi
+arGg/y+z0+sZAr6gooQ8Wnh5dJXtnBNCxSDJYw/DWHAeiyvk/gsndo3ZONlCZZ9u
+bpwBYx3hA2wTa5GUQxFM0KlI7Ftr9Cescf2jN6Ia48C6FcQsepMzD3jaMkLir8Jk
+/YD/s5KPzNvwPAyLnf7x574JeWuuxTIPx6b/fHVtboDK6j6XQnzrN2Hy3ngvlEFo
+zuGYVvtrz5pJXWGVSjZWG1kc9iXCdHKpmFdPj7XhU0gugTzQ/e5uRIqdOqfNLI37
+fppSuWkWd5uaAg0Zuhd+2L4LG2GhVdfFa1UeHBe/ncFKz1km9Bmjvt04TpxlRnVG
+wHxJZKlxpxCZ3AuLNUMP/QazPXO8OIfGOCbwkgFiqRY32mKDUvmEADBBoYpk/wBv
+qV99g5gvYFC5Le4QLzOJAoIBAQDcnqnK2tgkISJhsLs2Oj8vEcT7dU9vVnPSxTcC
+M0F+8ITukn33K0biUlA+ktcQaF+eeLjfbjkn/H0f2Ajn++ldT56MgAFutZkYvwxJ
+2A6PVB3jesauSpe8aqoKMDIj8HSA3+AwH+yU+yA9r5EdUq1S6PscP+5Wj22+thAa
+l65CFD77C0RX0lly5zdjQo3Vyca2HYGm/cshFCPRZc66TPjNAHFthbqktKjMQ91H
+Hg+Gun2zv8KqeSzMDeHnef4rVaWMIyIBzpu3QdkKPUXMQQxvJ+RW7+MORV9VjE7Z
+KVnHa/6x9n+jvtQ0ydHc2n0NOp6BQghTCB2G3w3JJfmPcRSNAoIBAQDAw6mPddoz
+UUzANMOYcFtos4EaWfTQE2okSLVAmLY2gtAK6ldTv6X9xl0IiC/DmWqiNZJ/WmVI
+glkp6iZhxBSmqov0X9P0M+jdz7CRnbZDFhQWPxSPicurYuPKs52IC08HgIrwErzT
+/lh+qRXEqzT8rTdftywj5fE89w52NPHBsMS07VhFsJtU4aY2Yl8y1PHeumXU6h66
+yTvoCLLxJPiLIg9PgvbMF+RiYyomIg75gwfx4zWvIvWdXifQBC88fE7lP2u5gtWL
+JUJaMy6LNKHn8YezvwQp0dRecvvoqzoApOuHfsPASHb9cfvcy/BxDXFMJO4QWCi1
+6WLaR835nKLPAoIBAFw7IHSjxNRl3b/FaJ6k/yEoZpdRVaIQHF+y/uo2j10IJCqw
+p2SbfQjErLNcI/jCCadwhKkzpUVoMs8LO73v/IF79aZ7JR4pYRWNWQ/N+VhGLDCb
+dVAL8x9b4DZeK7gGoE34SfsUfY1S5wmiyiHeHIOazs/ikjsxvwmJh3X2j20klafR
+8AJe9/InY2plunHz5tTfxQIQ+8iaaNbzntcXsrPRSZol2/9bX231uR4wHQGQGVj6
+A+HMwsOT0is5Pt7S8WCCl4b13vdf2eKD9xgK4a3emYEWzG985PwYqiXzOYs7RMEV
+cgr8ji57aPbRiJHtPbJ/7ob3z5BA07yR2aDz/0kCggEAZDyajHYNLAhHr98AIuGy
+NsS5CpnietzNoeaJEfkXL0tgoXxwQqVyzH7827XtmHnLgGP5NO4tosHdWbVflhEf
+Z/dhZYb7MY5YthcMyvvGziXJ9jOBHo7Z8Nowd7Rk41x2EQGfve0QcfBd1idYoXch
+y47LL6OReW1Vv4z84Szw1fZ0o1yUPVDzxPS9uKP4uvcOevJUh53isuB3nVYArvK5
+p6fjbEY+zaxS33KPdVrajJa9Z+Ptg4/bRqSycTHr2jkN0ZnkC4hkQMH0OfFJb6vD
+0VfAaBCZOqHZG/AQ3FFFjRY1P7UEV5WXAn3mKU+HTVJfKug9PxSIvueIttcF3Zm8
+8wKCAQAM43+DnGW1w34jpsTAeOXC5mhIz7J8spU6Uq5bJIheEE2AbX1z+eRVErZX
+1WsRNPsNrQfdt/b5IKboBbSYKoGxxRMngJI1eJqyj4LxZrACccS3euAlcU1q+3oN
+T10qfQol54KjGld/HVDhzbsZJxzLDqvPlroWgwLdOLDMXhwJYfTnqMEQkaG4Aawr
+3P14+Zp/woLiPWw3iZFcL/bt23IOa9YI0NoLhp5MFNXfIuzx2FhVz6BUSeVfQ6Ko
+Nx2YZ03g6Kt6B6c43LJx1a/zEPYSZcPERgWOSHlcjmwRfTs6uoN9xt1qs4zEUaKv
+Axreud3rJ0rekUp6rI1joG717Wls
+-----END TESTING KEY-----`))
 
 func BenchmarkDecryptPKCS1v15(b *testing.B) {
 	b.Run("2048", func(b *testing.B) { benchmarkDecryptPKCS1v15(b, test2048Key) })
diff --git a/src/crypto/sha1/boring.go b/src/crypto/sha1/boring.go
deleted file mode 100644
index b5786d1..0000000
--- a/src/crypto/sha1/boring.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Extra indirection here so that when building go_bootstrap
-// cmd/internal/boring is not even imported, so that we don't
-// have to maintain changes to cmd/dist's deps graph.
-
-//go:build !cmd_go_bootstrap && cgo
-// +build !cmd_go_bootstrap,cgo
-
-package sha1
-
-import (
-	"crypto/internal/boring"
-	"hash"
-)
-
-const boringEnabled = boring.Enabled
-
-func boringNewSHA1() hash.Hash { return boring.NewSHA1() }
-
-func boringUnreachable() { boring.Unreachable() }
-
-func boringSHA1(p []byte) [20]byte { return boring.SHA1(p) }
diff --git a/src/crypto/sha1/fallback_test.go b/src/crypto/sha1/fallback_test.go
index 45d1f57..aa1d413 100644
--- a/src/crypto/sha1/fallback_test.go
+++ b/src/crypto/sha1/fallback_test.go
@@ -16,7 +16,7 @@
 // implementation cannot be used.
 // See also TestBlockGeneric.
 func TestGenericPath(t *testing.T) {
-	if useAsm == false {
+	if !useAsm {
 		t.Skipf("assembly implementation unavailable")
 	}
 	useAsm = false
diff --git a/src/crypto/sha1/notboring.go b/src/crypto/sha1/notboring.go
deleted file mode 100644
index 42ef879..0000000
--- a/src/crypto/sha1/notboring.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build cmd_go_bootstrap || !cgo
-// +build cmd_go_bootstrap !cgo
-
-package sha1
-
-import (
-	"hash"
-)
-
-const boringEnabled = false
-
-func boringNewSHA1() hash.Hash { panic("boringcrypto: not available") }
-
-func boringUnreachable() {}
-
-func boringSHA1([]byte) [20]byte { panic("boringcrypto: not available") }
diff --git a/src/crypto/sha1/sha1.go b/src/crypto/sha1/sha1.go
index 43ab72a..ac10fa1 100644
--- a/src/crypto/sha1/sha1.go
+++ b/src/crypto/sha1/sha1.go
@@ -10,6 +10,7 @@
 
 import (
 	"crypto"
+	"crypto/internal/boring"
 	"encoding/binary"
 	"errors"
 	"hash"
@@ -104,11 +105,11 @@
 }
 
 // New returns a new hash.Hash computing the SHA1 checksum. The Hash also
-// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
+// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
 // marshal and unmarshal the internal state of the hash.
 func New() hash.Hash {
-	if boringEnabled {
-		return boringNewSHA1()
+	if boring.Enabled {
+		return boring.NewSHA1()
 	}
 	d := new(digest)
 	d.Reset()
@@ -120,7 +121,7 @@
 func (d *digest) BlockSize() int { return BlockSize }
 
 func (d *digest) Write(p []byte) (nn int, err error) {
-	boringUnreachable()
+	boring.Unreachable()
 	nn = len(p)
 	d.len += uint64(nn)
 	if d.nx > 0 {
@@ -144,7 +145,7 @@
 }
 
 func (d *digest) Sum(in []byte) []byte {
-	boringUnreachable()
+	boring.Unreachable()
 	// Make a copy of d so that caller can keep writing and summing.
 	d0 := *d
 	hash := d0.checkSum()
@@ -184,7 +185,7 @@
 	return digest
 }
 
-// ConstantTimeSum computes the same result of Sum() but in constant time
+// ConstantTimeSum computes the same result of [Sum] but in constant time
 func (d *digest) ConstantTimeSum(in []byte) []byte {
 	d0 := *d
 	hash := d0.constSum()
@@ -254,8 +255,8 @@
 
 // Sum returns the SHA-1 checksum of the data.
 func Sum(data []byte) [Size]byte {
-	if boringEnabled {
-		return boringSHA1(data)
+	if boring.Enabled {
+		return boring.SHA1(data)
 	}
 	var d digest
 	d.Reset()
diff --git a/src/crypto/sha1/sha1block_s390x.s b/src/crypto/sha1/sha1block_s390x.s
index 6ba6883..0fb7aef 100644
--- a/src/crypto/sha1/sha1block_s390x.s
+++ b/src/crypto/sha1/sha1block_s390x.s
@@ -12,7 +12,7 @@
 	CMPBEQ R4, $0, generic
 
 loop:
-	WORD $0xB93E0002 // KIMD R2
+	KIMD R0, R2      // compute intermediate message digest (KIMD)
 	BVS  loop        // continue if interrupted
 	RET
 
diff --git a/src/crypto/sha256/sha256.go b/src/crypto/sha256/sha256.go
index 2deafbc..0cc7fca 100644
--- a/src/crypto/sha256/sha256.go
+++ b/src/crypto/sha256/sha256.go
@@ -144,8 +144,8 @@
 }
 
 // New returns a new hash.Hash computing the SHA256 checksum. The Hash
-// also implements encoding.BinaryMarshaler and
-// encoding.BinaryUnmarshaler to marshal and unmarshal the internal
+// also implements [encoding.BinaryMarshaler] and
+// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
 // state of the hash.
 func New() hash.Hash {
 	if boring.Enabled {
diff --git a/src/crypto/sha256/sha256block_s390x.s b/src/crypto/sha256/sha256block_s390x.s
index 81b1b38..9c30136 100644
--- a/src/crypto/sha256/sha256block_s390x.s
+++ b/src/crypto/sha256/sha256block_s390x.s
@@ -12,7 +12,7 @@
 	CMPBEQ R4, $0, generic
 
 loop:
-	WORD $0xB93E0002 // KIMD R2
+	KIMD R0, R2      // compute intermediate message digest (KIMD)
 	BVS  loop        // continue if interrupted
 	RET
 
diff --git a/src/crypto/sha512/fallback_test.go b/src/crypto/sha512/fallback_test.go
index db5b13c..ac0c2c3 100644
--- a/src/crypto/sha512/fallback_test.go
+++ b/src/crypto/sha512/fallback_test.go
@@ -16,7 +16,7 @@
 // implementation cannot be used.
 // See also TestBlockGeneric.
 func TestGenericPath(t *testing.T) {
-	if useAsm == false {
+	if !useAsm {
 		t.Skipf("assembly implementation unavailable")
 	}
 	useAsm = false
diff --git a/src/crypto/sha512/sha512block_s390x.s b/src/crypto/sha512/sha512block_s390x.s
index f221bd1..9fdf343 100644
--- a/src/crypto/sha512/sha512block_s390x.s
+++ b/src/crypto/sha512/sha512block_s390x.s
@@ -12,7 +12,7 @@
 	CMPBEQ R4, $0, generic
 
 loop:
-	WORD $0xB93E0002 // KIMD R2
+	KIMD R0, R2      // compute intermediate message digest (KIMD)
 	BVS  loop        // continue if interrupted
 	RET
 
diff --git a/src/crypto/subtle/xor_amd64.s b/src/crypto/subtle/xor_amd64.s
index 8b04b58..949424f 100644
--- a/src/crypto/subtle/xor_amd64.s
+++ b/src/crypto/subtle/xor_amd64.s
@@ -18,6 +18,7 @@
 aligned:
 	MOVQ $0, AX // position in slices
 
+	PCALIGN $16
 loop16b:
 	MOVOU (SI)(AX*1), X0   // XOR 16byte forwards.
 	MOVOU (CX)(AX*1), X1
@@ -28,6 +29,7 @@
 	JNE   loop16b
 	RET
 
+	PCALIGN $16
 loop_1b:
 	SUBQ  $1, DX           // XOR 1byte backwards.
 	MOVB  (SI)(DX*1), DI
diff --git a/src/crypto/subtle/xor_ppc64x.s b/src/crypto/subtle/xor_ppc64x.s
index 72bb80d..0de4350 100644
--- a/src/crypto/subtle/xor_ppc64x.s
+++ b/src/crypto/subtle/xor_ppc64x.s
@@ -13,75 +13,130 @@
 	MOVD	b+16(FP), R5	// R5 = b
 	MOVD	n+24(FP), R6	// R6 = n
 
-	CMPU	R6, $32, CR7	// Check if n ≥ 32 bytes
+	CMPU	R6, $64, CR7	// Check if n ≥ 64 bytes
 	MOVD	R0, R8		// R8 = index
-	CMPU	R6, $8, CR6	// Check if 8 ≤ n < 32 bytes
-	BLT	CR6, small	// Smaller than 8
-	BLT	CR7, xor16	// Case for 16 ≤ n < 32 bytes
+	CMPU	R6, $8, CR6	// Check if 8 ≤ n < 64 bytes
+	BLE	CR6, small	// <= 8
+	BLT	CR7, xor32	// Case for 32 ≤ n < 64 bytes
 
-	// Case for n ≥ 32 bytes
-preloop32:
-	SRD	$5, R6, R7	// Setup loop counter
+	// Case for n ≥ 64 bytes
+preloop64:
+	SRD	$6, R6, R7	// Set up loop counter
 	MOVD	R7, CTR
 	MOVD	$16, R10
-	ANDCC	$31, R6, R9	// Check for tailing bytes for later
-loop32:
-	LXVD2X		(R4)(R8), VS32		// VS32 = a[i,...,i+15]
-	LXVD2X		(R4)(R10), VS34
-	LXVD2X		(R5)(R8), VS33		// VS33 = b[i,...,i+15]
-	LXVD2X		(R5)(R10), VS35
-	XXLXOR		VS32, VS33, VS32	// VS34 = a[] ^ b[]
-	XXLXOR		VS34, VS35, VS34
-	STXVD2X		VS32, (R3)(R8)		// Store to dst
-	STXVD2X		VS34, (R3)(R10)
-	ADD		$32, R8			// Update index
-	ADD		$32, R10
-	BC		16, 0, loop32		// bdnz loop16
-
-	BEQ		CR0, done
-
-	MOVD		R9, R6
-	CMP		R6, $8
-	BLT		small
+	MOVD	$32, R14
+	MOVD	$48, R15
+	ANDCC	$63, R6, R9	// Check for tailing bytes for later
+	PCALIGN $16
+	// Case for >= 64 bytes
+	// Process 64 bytes per iteration
+	// Load 4 vectors of a and b
+	// XOR the corresponding vectors
+	// from a and b and store the result
+loop64:
+	LXVD2X	(R4)(R8), VS32
+	LXVD2X	(R4)(R10), VS34
+	LXVD2X	(R4)(R14), VS36
+	LXVD2X	(R4)(R15), VS38
+	LXVD2X	(R5)(R8), VS33
+	LXVD2X	(R5)(R10), VS35
+	LXVD2X	(R5)(R14), VS37
+	LXVD2X	(R5)(R15), VS39
+	XXLXOR	VS32, VS33, VS32
+	XXLXOR	VS34, VS35, VS34
+	XXLXOR	VS36, VS37, VS36
+	XXLXOR	VS38, VS39, VS38
+	STXVD2X	VS32, (R3)(R8)
+	STXVD2X	VS34, (R3)(R10)
+	STXVD2X	VS36, (R3)(R14)
+	STXVD2X	VS38, (R3)(R15)
+	ADD	$64, R8
+	ADD	$64, R10
+	ADD	$64, R14
+	ADD	$64, R15
+	BDNZ	loop64
+	BC	12,2,LR		// BEQLR
+	MOVD	R9, R6
+	CMP	R6, $8
+	BLE	small
+	// Case for 8 <= n < 64 bytes
+	// Process 32 bytes if available
+xor32:
+	CMP	R6, $32
+	BLT	xor16
+	ADD	$16, R8, R9
+	LXVD2X	(R4)(R8), VS32
+	LXVD2X	(R4)(R9), VS33
+	LXVD2X	(R5)(R8), VS34
+	LXVD2X	(R5)(R9), VS35
+	XXLXOR	VS32, VS34, VS32
+	XXLXOR	VS33, VS35, VS33
+	STXVD2X	VS32, (R3)(R8)
+	STXVD2X	VS33, (R3)(R9)
+	ADD	$32, R8
+	ADD	$-32, R6
+	CMP	R6, $8
+	BLE	small
+	// Case for 8 <= n < 32 bytes
+	// Process 16 bytes if available
 xor16:
-	CMP		R6, $16
-	BLT		xor8
-	LXVD2X		(R4)(R8), VS32
-	LXVD2X		(R5)(R8), VS33
-	XXLXOR		VS32, VS33, VS32
-	STXVD2X		VS32, (R3)(R8)
-	ADD		$16, R8
-	ADD		$-16, R6
-	CMP		R6, $8
-	BLT		small
-xor8:
-	// Case for 8 ≤ n < 16 bytes
-	MOVD    (R4)(R8), R14   // R14 = a[i,...,i+7]
-	MOVD    (R5)(R8), R15   // R15 = b[i,...,i+7]
-	XOR     R14, R15, R16   // R16 = a[] ^ b[]
-	SUB     $8, R6          // n = n - 8
-	MOVD    R16, (R3)(R8)   // Store to dst
-	ADD     $8, R8
-
-	// Check if we're finished
-	CMP     R6, R0
-	BGT     small
-	RET
-
-	// Case for n < 8 bytes and tailing bytes from the
-	// previous cases.
+	CMP	R6, $16
+	BLT	xor8
+	LXVD2X	(R4)(R8), VS32
+	LXVD2X	(R5)(R8), VS33
+	XXLXOR	VS32, VS33, VS32
+	STXVD2X	VS32, (R3)(R8)
+	ADD	$16, R8
+	ADD	$-16, R6
 small:
 	CMP	R6, R0
-	BEQ	done
-	MOVD	R6, CTR		// Setup loop counter
-
-loop:
+	BC	12,2,LR		// BEQLR
+xor8:
+#ifdef GOPPC64_power10
+	SLD	$56,R6,R17
+	ADD	R4,R8,R18
+	ADD	R5,R8,R19
+	ADD	R3,R8,R20
+	LXVL	R18,R17,V0
+	LXVL	R19,R17,V1
+	VXOR	V0,V1,V1
+	STXVL	V1,R20,R17
+	RET
+#else
+	CMP	R6, $8
+	BLT	xor4
+	// Case for 8 ≤ n < 16 bytes
+	MOVD	(R4)(R8), R14   // R14 = a[i,...,i+7]
+	MOVD	(R5)(R8), R15   // R15 = b[i,...,i+7]
+	XOR	R14, R15, R16   // R16 = a[] ^ b[]
+	SUB	$8, R6          // n = n - 8
+	MOVD	R16, (R3)(R8)   // Store to dst
+	ADD	$8, R8
+xor4:
+	CMP	R6, $4
+	BLT	xor2
+	MOVWZ	(R4)(R8), R14
+	MOVWZ	(R5)(R8), R15
+	XOR	R14, R15, R16
+	MOVW	R16, (R3)(R8)
+	ADD	$4,R8
+	ADD	$-4,R6
+xor2:
+	CMP	R6, $2
+	BLT	xor1
+	MOVHZ	(R4)(R8), R14
+	MOVHZ	(R5)(R8), R15
+	XOR	R14, R15, R16
+	MOVH	R16, (R3)(R8)
+	ADD	$2,R8
+	ADD	$-2,R6
+xor1:
+	CMP	R6, R0
+	BC	12,2,LR		// BEQLR
 	MOVBZ	(R4)(R8), R14	// R14 = a[i]
 	MOVBZ	(R5)(R8), R15	// R15 = b[i]
 	XOR	R14, R15, R16	// R16 = a[i] ^ b[i]
 	MOVB	R16, (R3)(R8)	// Store to dst
-	ADD	$1, R8
-	BC	16, 0, loop	// bdnz loop
-
+#endif
 done:
 	RET
diff --git a/src/crypto/tls/boring_test.go b/src/crypto/tls/boring_test.go
index ba68f35..085ff57 100644
--- a/src/crypto/tls/boring_test.go
+++ b/src/crypto/tls/boring_test.go
@@ -200,7 +200,7 @@
 	}()
 
 	for _, sigHash := range defaultSupportedSignatureAlgorithms {
-		t.Run(fmt.Sprintf("%#x", sigHash), func(t *testing.T) {
+		t.Run(fmt.Sprintf("%v", sigHash), func(t *testing.T) {
 			serverConfig := testConfig.Clone()
 			serverConfig.Certificates = make([]Certificate, 1)
 
diff --git a/src/crypto/tls/cipher_suites.go b/src/crypto/tls/cipher_suites.go
index 589e8b6..6f5bc37 100644
--- a/src/crypto/tls/cipher_suites.go
+++ b/src/crypto/tls/cipher_suites.go
@@ -45,18 +45,13 @@
 
 // CipherSuites returns a list of cipher suites currently implemented by this
 // package, excluding those with security issues, which are returned by
-// InsecureCipherSuites.
+// [InsecureCipherSuites].
 //
 // The list is sorted by ID. Note that the default cipher suites selected by
 // this package might depend on logic that can't be captured by a static list,
 // and might not match those returned by this function.
 func CipherSuites() []*CipherSuite {
 	return []*CipherSuite{
-		{TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
-		{TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
-		{TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
-		{TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
-
 		{TLS_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", supportedOnlyTLS13, false},
 		{TLS_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", supportedOnlyTLS13, false},
 		{TLS_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", supportedOnlyTLS13, false},
@@ -78,14 +73,18 @@
 // this package and which have security issues.
 //
 // Most applications should not use the cipher suites in this list, and should
-// only use those returned by CipherSuites.
+// only use those returned by [CipherSuites].
 func InsecureCipherSuites() []*CipherSuite {
 	// This list includes RC4, CBC_SHA256, and 3DES cipher suites. See
 	// cipherSuitesPreferenceOrder for details.
 	return []*CipherSuite{
 		{TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
 		{TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
+		{TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, true},
+		{TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, true},
 		{TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
+		{TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, true},
+		{TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, true},
 		{TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
 		{TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
 		{TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
@@ -322,22 +321,47 @@
 	TLS_RSA_WITH_RC4_128_SHA,
 }
 
-// disabledCipherSuites are not used unless explicitly listed in
-// Config.CipherSuites. They MUST be at the end of cipherSuitesPreferenceOrder.
-var disabledCipherSuites = []uint16{
+// disabledCipherSuites are not used unless explicitly listed in Config.CipherSuites.
+var disabledCipherSuites = map[uint16]bool{
 	// CBC_SHA256
-	TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
-	TLS_RSA_WITH_AES_128_CBC_SHA256,
+	TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: true,
+	TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   true,
+	TLS_RSA_WITH_AES_128_CBC_SHA256:         true,
 
 	// RC4
-	TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
-	TLS_RSA_WITH_RC4_128_SHA,
+	TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: true,
+	TLS_ECDHE_RSA_WITH_RC4_128_SHA:   true,
+	TLS_RSA_WITH_RC4_128_SHA:         true,
 }
 
-var (
-	defaultCipherSuitesLen = len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites)
-	defaultCipherSuites    = cipherSuitesPreferenceOrder[:defaultCipherSuitesLen]
-)
+// rsaKexCiphers contains the ciphers which use RSA based key exchange,
+// which we also disable by default unless a GODEBUG is set.
+var rsaKexCiphers = map[uint16]bool{
+	TLS_RSA_WITH_RC4_128_SHA:        true,
+	TLS_RSA_WITH_3DES_EDE_CBC_SHA:   true,
+	TLS_RSA_WITH_AES_128_CBC_SHA:    true,
+	TLS_RSA_WITH_AES_256_CBC_SHA:    true,
+	TLS_RSA_WITH_AES_128_CBC_SHA256: true,
+	TLS_RSA_WITH_AES_128_GCM_SHA256: true,
+	TLS_RSA_WITH_AES_256_GCM_SHA384: true,
+}
+
+var defaultCipherSuites []uint16
+var defaultCipherSuitesWithRSAKex []uint16
+
+func init() {
+	defaultCipherSuites = make([]uint16, 0, len(cipherSuitesPreferenceOrder))
+	defaultCipherSuitesWithRSAKex = make([]uint16, 0, len(cipherSuitesPreferenceOrder))
+	for _, c := range cipherSuitesPreferenceOrder {
+		if disabledCipherSuites[c] {
+			continue
+		}
+		if !rsaKexCiphers[c] {
+			defaultCipherSuites = append(defaultCipherSuites, c)
+		}
+		defaultCipherSuitesWithRSAKex = append(defaultCipherSuitesWithRSAKex, c)
+	}
+}
 
 // defaultCipherSuitesTLS13 is also the preference order, since there are no
 // disabled by default TLS 1.3 cipher suites. The same AES vs ChaCha20 logic as
diff --git a/src/crypto/tls/common.go b/src/crypto/tls/common.go
index e0885a0..849e8b0 100644
--- a/src/crypto/tls/common.go
+++ b/src/crypto/tls/common.go
@@ -18,6 +18,7 @@
 	"crypto/x509"
 	"errors"
 	"fmt"
+	"internal/godebug"
 	"io"
 	"net"
 	"strings"
@@ -303,11 +304,13 @@
 // ExportKeyingMaterial returns length bytes of exported key material in a new
 // slice as defined in RFC 5705. If context is nil, it is not used as part of
 // the seed. If the connection was set to allow renegotiation via
-// Config.Renegotiation, this function will return an error.
+// Config.Renegotiation, or if the connections supports neither TLS 1.3 nor
+// Extended Master Secret, this function will return an error.
 //
-// There are conditions in which the returned values might not be unique to a
-// connection. See the Security Considerations sections of RFC 5705 and RFC 7627,
-// and https://mitls.org/pages/attacks/3SHAKE#channelbindings.
+// Exporting key material without Extended Master Secret or TLS 1.3 was disabled
+// in Go 1.22 due to security issues (see the Security Considerations sections
+// of RFC 5705 and RFC 7627), but can be re-enabled with the GODEBUG setting
+// tlsunsafeekm=1.
 func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
 	return cs.ekm(label, context, length)
 }
@@ -669,7 +672,9 @@
 	// the list is ignored. Note that TLS 1.3 ciphersuites are not configurable.
 	//
 	// If CipherSuites is nil, a safe default list is used. The default cipher
-	// suites might change over time.
+	// suites might change over time. In Go 1.22 RSA key exchange based cipher
+	// suites were removed from the default list, but can be re-added with the
+	// GODEBUG setting tlsrsakex=1.
 	CipherSuites []uint16
 
 	// PreferServerCipherSuites is a legacy field and has no effect.
@@ -732,14 +737,11 @@
 
 	// MinVersion contains the minimum TLS version that is acceptable.
 	//
-	// By default, TLS 1.2 is currently used as the minimum when acting as a
-	// client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum
-	// supported by this package, both as a client and as a server.
+	// By default, TLS 1.2 is currently used as the minimum. TLS 1.0 is the
+	// minimum supported by this package.
 	//
-	// The client-side default can temporarily be reverted to TLS 1.0 by
-	// including the value "x509sha1=1" in the GODEBUG environment variable.
-	// Note that this option will be removed in Go 1.19 (but it will still be
-	// possible to set this field to VersionTLS10 explicitly).
+	// The server-side default can be reverted to TLS 1.0 by including the value
+	// "tls10server=1" in the GODEBUG environment variable.
 	MinVersion uint16
 
 	// MaxVersion contains the maximum TLS version that is acceptable.
@@ -822,7 +824,7 @@
 // ticket, and the lifetime we set for all tickets we send.
 const maxSessionTicketLifetime = 7 * 24 * time.Hour
 
-// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a Config that is
+// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a [Config] that is
 // being used concurrently by a TLS client or server.
 func (c *Config) Clone() *Config {
 	if c == nil {
@@ -1006,6 +1008,8 @@
 	return t()
 }
 
+var tlsrsakex = godebug.New("tlsrsakex")
+
 func (c *Config) cipherSuites() []uint16 {
 	if needFIPS() {
 		return fipsCipherSuites(c)
@@ -1013,6 +1017,9 @@
 	if c.CipherSuites != nil {
 		return c.CipherSuites
 	}
+	if tlsrsakex.Value() == "1" {
+		return defaultCipherSuitesWithRSAKex
+	}
 	return defaultCipherSuites
 }
 
@@ -1028,15 +1035,18 @@
 const roleClient = true
 const roleServer = false
 
+var tls10server = godebug.New("tls10server")
+
 func (c *Config) supportedVersions(isClient bool) []uint16 {
 	versions := make([]uint16, 0, len(supportedVersions))
 	for _, v := range supportedVersions {
 		if needFIPS() && (v < fipsMinVersion(c) || v > fipsMaxVersion(c)) {
 			continue
 		}
-		if (c == nil || c.MinVersion == 0) &&
-			isClient && v < VersionTLS12 {
-			continue
+		if (c == nil || c.MinVersion == 0) && v < VersionTLS12 {
+			if isClient || tls10server.Value() != "1" {
+				continue
+			}
 		}
 		if c != nil && c.MinVersion != 0 && v < c.MinVersion {
 			continue
@@ -1157,9 +1167,9 @@
 // the client that sent the ClientHello. Otherwise, it returns an error
 // describing the reason for the incompatibility.
 //
-// If this ClientHelloInfo was passed to a GetConfigForClient or GetCertificate
-// callback, this method will take into account the associated Config. Note that
-// if GetConfigForClient returns a different Config, the change can't be
+// If this [ClientHelloInfo] was passed to a GetConfigForClient or GetCertificate
+// callback, this method will take into account the associated [Config]. Note that
+// if GetConfigForClient returns a different [Config], the change can't be
 // accounted for by this method.
 //
 // This function will call x509.ParseCertificate unless c.Leaf is set, which can
@@ -1450,7 +1460,7 @@
 	state      *ClientSessionState
 }
 
-// NewLRUClientSessionCache returns a ClientSessionCache with the given
+// NewLRUClientSessionCache returns a [ClientSessionCache] with the given
 // capacity that uses an LRU strategy. If capacity is < 1, a default capacity
 // is used instead.
 func NewLRUClientSessionCache(capacity int) ClientSessionCache {
@@ -1499,7 +1509,7 @@
 	c.m[sessionKey] = elem
 }
 
-// Get returns the ClientSessionState value associated with a given key. It
+// Get returns the [ClientSessionState] value associated with a given key. It
 // returns (nil, false) if no value is found.
 func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) {
 	c.Lock()
diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go
index c04bd48..0e46698 100644
--- a/src/crypto/tls/conn.go
+++ b/src/crypto/tls/conn.go
@@ -15,6 +15,7 @@
 	"errors"
 	"fmt"
 	"hash"
+	"internal/godebug"
 	"io"
 	"net"
 	"sync"
@@ -136,21 +137,21 @@
 }
 
 // SetDeadline sets the read and write deadlines associated with the connection.
-// A zero value for t means Read and Write will not time out.
+// A zero value for t means [Conn.Read] and [Conn.Write] will not time out.
 // After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
 func (c *Conn) SetDeadline(t time.Time) error {
 	return c.conn.SetDeadline(t)
 }
 
 // SetReadDeadline sets the read deadline on the underlying connection.
-// A zero value for t means Read will not time out.
+// A zero value for t means [Conn.Read] will not time out.
 func (c *Conn) SetReadDeadline(t time.Time) error {
 	return c.conn.SetReadDeadline(t)
 }
 
 // SetWriteDeadline sets the write deadline on the underlying connection.
-// A zero value for t means Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
+// A zero value for t means [Conn.Write] will not time out.
+// After a [Conn.Write] has timed out, the TLS state is corrupt and all future writes will return the same error.
 func (c *Conn) SetWriteDeadline(t time.Time) error {
 	return c.conn.SetWriteDeadline(t)
 }
@@ -1173,10 +1174,10 @@
 
 // Write writes data to the connection.
 //
-// As Write calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Write is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
+// As Write calls [Conn.Handshake], in order to prevent indefinite blocking a deadline
+// must be set for both [Conn.Read] and Write before Write is called when the handshake
+// has not yet completed. See [Conn.SetDeadline], [Conn.SetReadDeadline], and
+// [Conn.SetWriteDeadline].
 func (c *Conn) Write(b []byte) (int, error) {
 	// interlock with Close below
 	for {
@@ -1348,10 +1349,10 @@
 
 // Read reads data from the connection.
 //
-// As Read calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Read is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
+// As Read calls [Conn.Handshake], in order to prevent indefinite blocking a deadline
+// must be set for both Read and [Conn.Write] before Read is called when the handshake
+// has not yet completed. See [Conn.SetDeadline], [Conn.SetReadDeadline], and
+// [Conn.SetWriteDeadline].
 func (c *Conn) Read(b []byte) (int, error) {
 	if err := c.Handshake(); err != nil {
 		return 0, err
@@ -1435,7 +1436,7 @@
 
 // CloseWrite shuts down the writing side of the connection. It should only be
 // called once the handshake has completed and does not call CloseWrite on the
-// underlying connection. Most callers should just use Close.
+// underlying connection. Most callers should just use [Conn.Close].
 func (c *Conn) CloseWrite() error {
 	if !c.isHandshakeComplete.Load() {
 		return errEarlyCloseWrite
@@ -1463,10 +1464,10 @@
 // protocol if it has not yet been run.
 //
 // Most uses of this package need not call Handshake explicitly: the
-// first Read or Write will call it automatically.
+// first [Conn.Read] or [Conn.Write] will call it automatically.
 //
 // For control over canceling or setting a timeout on a handshake, use
-// HandshakeContext or the Dialer's DialContext method instead.
+// [Conn.HandshakeContext] or the [Dialer]'s DialContext method instead.
 //
 // In order to avoid denial of service attacks, the maximum RSA key size allowed
 // in certificates sent by either the TLS server or client is limited to 8192
@@ -1485,7 +1486,7 @@
 // connection.
 //
 // Most uses of this package need not call HandshakeContext explicitly: the
-// first Read or Write will call it automatically.
+// first [Conn.Read] or [Conn.Write] will call it automatically.
 func (c *Conn) HandshakeContext(ctx context.Context) error {
 	// Delegate to unexported method for named return
 	// without confusing documented signature.
@@ -1599,6 +1600,8 @@
 	return c.connectionStateLocked()
 }
 
+var tlsunsafeekm = godebug.New("tlsunsafeekm")
+
 func (c *Conn) connectionStateLocked() ConnectionState {
 	var state ConnectionState
 	state.HandshakeComplete = c.isHandshakeComplete.Load()
@@ -1620,7 +1623,15 @@
 		}
 	}
 	if c.config.Renegotiation != RenegotiateNever {
-		state.ekm = noExportedKeyingMaterial
+		state.ekm = noEKMBecauseRenegotiation
+	} else if c.vers != VersionTLS13 && !c.extMasterSecret {
+		state.ekm = func(label string, context []byte, length int) ([]byte, error) {
+			if tlsunsafeekm.Value() == "1" {
+				tlsunsafeekm.IncNonDefault()
+				return c.ekm(label, context, length)
+			}
+			return noEKMBecauseNoEMS(label, context, length)
+		}
 	} else {
 		state.ekm = c.ekm
 	}
diff --git a/src/crypto/tls/handshake_client.go b/src/crypto/tls/handshake_client.go
index 4649f36..f016e01 100644
--- a/src/crypto/tls/handshake_client.go
+++ b/src/crypto/tls/handshake_client.go
@@ -526,6 +526,10 @@
 		return errors.New("tls: server chose an unconfigured cipher suite")
 	}
 
+	if hs.c.config.CipherSuites == nil && rsaKexCiphers[hs.suite.id] {
+		tlsrsakex.IncNonDefault()
+	}
+
 	hs.c.cipherSuite = hs.suite.id
 	return nil
 }
diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go
index a2052ceb..ee9e79a 100644
--- a/src/crypto/tls/handshake_client_test.go
+++ b/src/crypto/tls/handshake_client_test.go
@@ -2783,44 +2783,19 @@
 -----END CERTIFICATE-----`
 
 func TestHandshakeRSATooBig(t *testing.T) {
-	for _, tc := range []struct {
-		name              string
-		godebug           string
-		expectedServerErr string
-		expectedClientErr string
-	}{
-		{
-			name:              "key too large",
-			expectedServerErr: "tls: server sent certificate containing RSA key larger than 8192 bits",
-			expectedClientErr: "tls: client sent certificate containing RSA key larger than 8192 bits",
-		},
-		{
-			name:    "acceptable key (GODEBUG=tlsmaxrsasize=8193)",
-			godebug: "tlsmaxrsasize=8193",
-		},
-	} {
-		t.Run(tc.name, func(t *testing.T) {
-			if tc.godebug != "" {
-				t.Setenv("GODEBUG", tc.godebug)
-			}
+	testCert, _ := pem.Decode([]byte(largeRSAKeyCertPEM))
 
-			testCert, _ := pem.Decode([]byte(largeRSAKeyCertPEM))
+	c := &Conn{conn: &discardConn{}, config: testConfig.Clone()}
 
-			c := &Conn{conn: &discardConn{}, config: testConfig.Clone()}
+	expectedErr := "tls: server sent certificate containing RSA key larger than 8192 bits"
+	err := c.verifyServerCertificate([][]byte{testCert.Bytes})
+	if err == nil || err.Error() != expectedErr {
+		t.Errorf("Conn.verifyServerCertificate unexpected error: want %q, got %q", expectedErr, err)
+	}
 
-			err := c.verifyServerCertificate([][]byte{testCert.Bytes})
-			if tc.expectedServerErr == "" && err != nil {
-				t.Errorf("Conn.verifyServerCertificate unexpected error: %s", err)
-			} else if tc.expectedServerErr != "" && (err == nil || err.Error() != tc.expectedServerErr) {
-				t.Errorf("Conn.verifyServerCertificate unexpected error: want %q, got %q", tc.expectedServerErr, err)
-			}
-
-			err = c.processCertsFromClient(Certificate{Certificate: [][]byte{testCert.Bytes}})
-			if tc.expectedClientErr == "" && err != nil {
-				t.Errorf("Conn.processCertsFromClient unexpected error: %s", err)
-			} else if tc.expectedClientErr != "" && (err == nil || err.Error() != tc.expectedClientErr) {
-				t.Errorf("Conn.processCertsFromClient unexpected error: want %q, got %q", tc.expectedClientErr, err)
-			}
-		})
+	expectedErr = "tls: client sent certificate containing RSA key larger than 8192 bits"
+	err = c.processCertsFromClient(Certificate{Certificate: [][]byte{testCert.Bytes}})
+	if err == nil || err.Error() != expectedErr {
+		t.Errorf("Conn.processCertsFromClient unexpected error: want %q, got %q", expectedErr, err)
 	}
 }
diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go
index 996b23b..8129e9c 100644
--- a/src/crypto/tls/handshake_server.go
+++ b/src/crypto/tls/handshake_server.go
@@ -168,6 +168,10 @@
 	c.in.version = c.vers
 	c.out.version = c.vers
 
+	if c.config.MinVersion == 0 && c.vers < VersionTLS12 {
+		tls10server.IncNonDefault()
+	}
+
 	return clientHello, nil
 }
 
@@ -366,6 +370,10 @@
 	}
 	c.cipherSuite = hs.suite.id
 
+	if c.config.CipherSuites == nil && rsaKexCiphers[hs.suite.id] {
+		tlsrsakex.IncNonDefault()
+	}
+
 	for _, id := range hs.clientHello.cipherSuites {
 		if id == TLS_FALLBACK_SCSV {
 			// The client is doing a fallback connection. See RFC 7507.
diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go
index 04abdcc..15db760 100644
--- a/src/crypto/tls/handshake_server_test.go
+++ b/src/crypto/tls/handshake_server_test.go
@@ -389,21 +389,22 @@
 func TestVersion(t *testing.T) {
 	serverConfig := &Config{
 		Certificates: testConfig.Certificates,
-		MaxVersion:   VersionTLS11,
+		MaxVersion:   VersionTLS13,
 	}
 	clientConfig := &Config{
 		InsecureSkipVerify: true,
-		MinVersion:         VersionTLS10,
+		MinVersion:         VersionTLS12,
 	}
 	state, _, err := testHandshake(t, clientConfig, serverConfig)
 	if err != nil {
 		t.Fatalf("handshake failed: %s", err)
 	}
-	if state.Version != VersionTLS11 {
+	if state.Version != VersionTLS13 {
 		t.Fatalf("incorrect version %x, should be %x", state.Version, VersionTLS11)
 	}
 
 	clientConfig.MinVersion = 0
+	serverConfig.MaxVersion = VersionTLS11
 	_, _, err = testHandshake(t, clientConfig, serverConfig)
 	if err == nil {
 		t.Fatalf("expected failure to connect with TLS 1.0/1.1")
@@ -487,17 +488,17 @@
 		InsecureSkipVerify: true,
 		ClientSessionCache: NewLRUClientSessionCache(1),
 		ServerName:         "servername",
-		MinVersion:         VersionTLS10,
+		MinVersion:         VersionTLS12,
 	}
 
-	// Establish a session at TLS 1.1.
-	clientConfig.MaxVersion = VersionTLS11
+	// Establish a session at TLS 1.3.
+	clientConfig.MaxVersion = VersionTLS13
 	_, _, err := testHandshake(t, clientConfig, serverConfig)
 	if err != nil {
 		t.Fatalf("handshake failed: %s", err)
 	}
 
-	// The client session cache now contains a TLS 1.1 session.
+	// The client session cache now contains a TLS 1.3 session.
 	state, _, err := testHandshake(t, clientConfig, serverConfig)
 	if err != nil {
 		t.Fatalf("handshake failed: %s", err)
@@ -507,7 +508,7 @@
 	}
 
 	// Test that the server will decline to resume at a lower version.
-	clientConfig.MaxVersion = VersionTLS10
+	clientConfig.MaxVersion = VersionTLS12
 	state, _, err = testHandshake(t, clientConfig, serverConfig)
 	if err != nil {
 		t.Fatalf("handshake failed: %s", err)
@@ -516,7 +517,7 @@
 		t.Fatalf("handshake resumed at a lower version")
 	}
 
-	// The client session cache now contains a TLS 1.0 session.
+	// The client session cache now contains a TLS 1.2 session.
 	state, _, err = testHandshake(t, clientConfig, serverConfig)
 	if err != nil {
 		t.Fatalf("handshake failed: %s", err)
@@ -526,7 +527,7 @@
 	}
 
 	// Test that the server will decline to resume at a higher version.
-	clientConfig.MaxVersion = VersionTLS11
+	clientConfig.MaxVersion = VersionTLS13
 	state, _, err = testHandshake(t, clientConfig, serverConfig)
 	if err != nil {
 		t.Fatalf("handshake failed: %s", err)
@@ -1170,6 +1171,7 @@
 func TestFallbackSCSV(t *testing.T) {
 	serverConfig := Config{
 		Certificates: testConfig.Certificates,
+		MinVersion:   VersionTLS11,
 	}
 	test := &serverTest{
 		name:   "FallbackSCSV",
diff --git a/src/crypto/tls/handshake_server_tls13.go b/src/crypto/tls/handshake_server_tls13.go
index 07b1a38..21d798d 100644
--- a/src/crypto/tls/handshake_server_tls13.go
+++ b/src/crypto/tls/handshake_server_tls13.go
@@ -240,8 +240,15 @@
 	c.clientProtocol = selectedProto
 
 	if c.quic != nil {
+		// RFC 9001 Section 4.2: Clients MUST NOT offer TLS versions older than 1.3.
+		for _, v := range hs.clientHello.supportedVersions {
+			if v < VersionTLS13 {
+				c.sendAlert(alertProtocolVersion)
+				return errors.New("tls: client offered TLS version older than TLS 1.3")
+			}
+		}
+		// RFC 9001 Section 8.2.
 		if hs.clientHello.quicTransportParameters == nil {
-			// RFC 9001 Section 8.2.
 			c.sendAlert(alertMissingExtension)
 			return errors.New("tls: client did not send a quic_transport_parameters extension")
 		}
diff --git a/src/crypto/tls/prf.go b/src/crypto/tls/prf.go
index 20bac96..a7fa337 100644
--- a/src/crypto/tls/prf.go
+++ b/src/crypto/tls/prf.go
@@ -252,13 +252,20 @@
 	h.buffer = nil
 }
 
-// noExportedKeyingMaterial is used as a value of
+// noEKMBecauseRenegotiation is used as a value of
 // ConnectionState.ekm when renegotiation is enabled and thus
 // we wish to fail all key-material export requests.
-func noExportedKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
+func noEKMBecauseRenegotiation(label string, context []byte, length int) ([]byte, error) {
 	return nil, errors.New("crypto/tls: ExportKeyingMaterial is unavailable when renegotiation is enabled")
 }
 
+// noEKMBecauseNoEMS is used as a value of ConnectionState.ekm when Extended
+// Master Secret is not negotiated and thus we wish to fail all key-material
+// export requests.
+func noEKMBecauseNoEMS(label string, context []byte, length int) ([]byte, error) {
+	return nil, errors.New("crypto/tls: ExportKeyingMaterial is unavailable when neither TLS 1.3 nor Extended Master Secret are negotiated; override with GODEBUG=tlsunsafeekm=1")
+}
+
 // ekmFromMasterSecret generates exported keying material as defined in RFC 5705.
 func ekmFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte) func(string, []byte, int) ([]byte, error) {
 	return func(label string, context []byte, length int) ([]byte, error) {
diff --git a/src/crypto/tls/quic.go b/src/crypto/tls/quic.go
index ba5c2af..3518169 100644
--- a/src/crypto/tls/quic.go
+++ b/src/crypto/tls/quic.go
@@ -46,7 +46,7 @@
 	sessionTicketSent bool
 }
 
-// A QUICConfig configures a QUICConn.
+// A QUICConfig configures a [QUICConn].
 type QUICConfig struct {
 	TLSConfig *Config
 }
@@ -163,7 +163,7 @@
 }
 
 // Start starts the client or server handshake protocol.
-// It may produce connection events, which may be read with NextEvent.
+// It may produce connection events, which may be read with [QUICConn.NextEvent].
 //
 // Start must be called at most once.
 func (q *QUICConn) Start(ctx context.Context) error {
@@ -182,7 +182,7 @@
 }
 
 // NextEvent returns the next event occurring on the connection.
-// It returns an event with a Kind of QUICNoEvent when no events are available.
+// It returns an event with a Kind of [QUICNoEvent] when no events are available.
 func (q *QUICConn) NextEvent() QUICEvent {
 	qs := q.conn.quic
 	if last := qs.nextEvent - 1; last >= 0 && len(qs.events[last].Data) > 0 {
@@ -214,7 +214,7 @@
 }
 
 // HandleData handles handshake bytes received from the peer.
-// It may produce connection events, which may be read with NextEvent.
+// It may produce connection events, which may be read with [QUICConn.NextEvent].
 func (q *QUICConn) HandleData(level QUICEncryptionLevel, data []byte) error {
 	c := q.conn
 	if c.in.level != level {
@@ -258,7 +258,7 @@
 }
 
 // SendSessionTicket sends a session ticket to the client.
-// It produces connection events, which may be read with NextEvent.
+// It produces connection events, which may be read with [QUICConn.NextEvent].
 // Currently, it can only be called once.
 func (q *QUICConn) SendSessionTicket(opts QUICSessionTicketOptions) error {
 	c := q.conn
@@ -283,7 +283,7 @@
 // SetTransportParameters sets the transport parameters to send to the peer.
 //
 // Server connections may delay setting the transport parameters until after
-// receiving the client's transport parameters. See QUICTransportParametersRequired.
+// receiving the client's transport parameters. See [QUICTransportParametersRequired].
 func (q *QUICConn) SetTransportParameters(params []byte) {
 	if params == nil {
 		params = []byte{}
diff --git a/src/crypto/tls/ticket.go b/src/crypto/tls/ticket.go
index b43101f..b71e3af 100644
--- a/src/crypto/tls/ticket.go
+++ b/src/crypto/tls/ticket.go
@@ -69,7 +69,7 @@
 	// To allow different layers in a protocol stack to share this field,
 	// applications must only append to it, not replace it, and must use entries
 	// that can be recognized even if out of order (for example, by starting
-	// with a id and version prefix).
+	// with an id and version prefix).
 	Extra [][]byte
 
 	// EarlyData indicates whether the ticket can be used for 0-RTT in a QUIC
@@ -305,7 +305,7 @@
 	}, nil
 }
 
-// EncryptTicket encrypts a ticket with the Config's configured (or default)
+// EncryptTicket encrypts a ticket with the [Config]'s configured (or default)
 // session ticket keys. It can be used as a [Config.WrapSession] implementation.
 func (c *Config) EncryptTicket(cs ConnectionState, ss *SessionState) ([]byte, error) {
 	ticketKeys := c.ticketKeys(nil)
diff --git a/src/crypto/tls/tls.go b/src/crypto/tls/tls.go
index b529c70..8509b7d 100644
--- a/src/crypto/tls/tls.go
+++ b/src/crypto/tls/tls.go
@@ -71,7 +71,7 @@
 }
 
 // NewListener creates a Listener which accepts connections from an inner
-// Listener and wraps each connection with Server.
+// Listener and wraps each connection with [Server].
 // The configuration config must be non-nil and must include
 // at least one certificate or else set GetCertificate.
 func NewListener(inner net.Listener, config *Config) net.Listener {
@@ -109,10 +109,10 @@
 // handshake as a whole.
 //
 // DialWithDialer interprets a nil configuration as equivalent to the zero
-// configuration; see the documentation of Config for the defaults.
+// configuration; see the documentation of [Config] for the defaults.
 //
 // DialWithDialer uses context.Background internally; to specify the context,
-// use Dialer.DialContext with NetDialer set to the desired dialer.
+// use [Dialer.DialContext] with NetDialer set to the desired dialer.
 func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (*Conn, error) {
 	return dial(context.Background(), dialer, network, addr, config)
 }
@@ -189,10 +189,10 @@
 // Dial connects to the given network address and initiates a TLS
 // handshake, returning the resulting TLS connection.
 //
-// The returned Conn, if any, will always be of type *Conn.
+// The returned [Conn], if any, will always be of type *[Conn].
 //
 // Dial uses context.Background internally; to specify the context,
-// use DialContext.
+// use [Dialer.DialContext].
 func (d *Dialer) Dial(network, addr string) (net.Conn, error) {
 	return d.DialContext(context.Background(), network, addr)
 }
@@ -212,7 +212,7 @@
 // connected, any expiration of the context will not affect the
 // connection.
 //
-// The returned Conn, if any, will always be of type *Conn.
+// The returned [Conn], if any, will always be of type *[Conn].
 func (d *Dialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
 	c, err := dial(ctx, d.netDialer(), network, addr, d.Config)
 	if err != nil {
diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go
index c3f16c7..42a0272 100644
--- a/src/crypto/tls/tls_test.go
+++ b/src/crypto/tls/tls_test.go
@@ -1288,7 +1288,8 @@
 			SignatureSchemes:  []SignatureScheme{PKCS1WithSHA1},
 			SupportedVersions: []uint16{VersionTLS13, VersionTLS12},
 			config: &Config{
-				MaxVersion: VersionTLS12,
+				CipherSuites: []uint16{TLS_RSA_WITH_AES_128_GCM_SHA256},
+				MaxVersion:   VersionTLS12,
 			},
 		}, ""}, // Check that mutual version selection works.
 
@@ -1365,6 +1366,7 @@
 			SupportedPoints:   []uint8{pointFormatUncompressed},
 			SignatureSchemes:  []SignatureScheme{Ed25519},
 			SupportedVersions: []uint16{VersionTLS10},
+			config:            &Config{MinVersion: VersionTLS10},
 		}, "doesn't support Ed25519"},
 		{ed25519Cert, &ClientHelloInfo{
 			CipherSuites:      []uint16{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256},
@@ -1379,10 +1381,14 @@
 			SupportedCurves:   []CurveID{CurveP256}, // only relevant for ECDHE support
 			SupportedPoints:   []uint8{pointFormatUncompressed},
 			SupportedVersions: []uint16{VersionTLS10},
+			config:            &Config{MinVersion: VersionTLS10},
 		}, ""},
 		{rsaCert, &ClientHelloInfo{
 			CipherSuites:      []uint16{TLS_RSA_WITH_AES_128_GCM_SHA256},
 			SupportedVersions: []uint16{VersionTLS12},
+			config: &Config{
+				CipherSuites: []uint16{TLS_RSA_WITH_AES_128_GCM_SHA256},
+			},
 		}, ""}, // static RSA fallback
 	}
 	for i, tt := range tests {
@@ -1484,24 +1490,21 @@
 	if len(cipherSuitesPreferenceOrderNoAES) != len(cipherSuitesPreferenceOrder) {
 		t.Errorf("cipherSuitesPreferenceOrderNoAES is not the same size as cipherSuitesPreferenceOrder")
 	}
+	if len(defaultCipherSuites) >= len(defaultCipherSuitesWithRSAKex) {
+		t.Errorf("defaultCipherSuitesWithRSAKex should be longer than defaultCipherSuites")
+	}
 
-	// Check that disabled suites are at the end of the preference lists, and
-	// that they are marked insecure.
-	for i, id := range disabledCipherSuites {
-		offset := len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites)
-		if cipherSuitesPreferenceOrder[offset+i] != id {
-			t.Errorf("disabledCipherSuites[%d]: not at the end of cipherSuitesPreferenceOrder", i)
-		}
-		if cipherSuitesPreferenceOrderNoAES[offset+i] != id {
-			t.Errorf("disabledCipherSuites[%d]: not at the end of cipherSuitesPreferenceOrderNoAES", i)
-		}
-		c := CipherSuiteByID(id)
-		if c == nil {
-			t.Errorf("%#04x: no CipherSuite entry", id)
-			continue
-		}
-		if !c.Insecure {
-			t.Errorf("%#04x: disabled by default but not marked insecure", id)
+	// Check that disabled suites are marked insecure.
+	for _, badSuites := range []map[uint16]bool{disabledCipherSuites, rsaKexCiphers} {
+		for id := range badSuites {
+			c := CipherSuiteByID(id)
+			if c == nil {
+				t.Errorf("%#04x: no CipherSuite entry", id)
+				continue
+			}
+			if !c.Insecure {
+				t.Errorf("%#04x: disabled by default but not marked insecure", id)
+			}
 		}
 	}
 
diff --git a/src/crypto/x509/cert_pool.go b/src/crypto/x509/cert_pool.go
index e9b2c12..e4c5694 100644
--- a/src/crypto/x509/cert_pool.go
+++ b/src/crypto/x509/cert_pool.go
@@ -44,6 +44,11 @@
 	// fewer allocations.
 	rawSubject []byte
 
+	// constraint is a function to run against a chain when it is a candidate to
+	// be added to the chain. This allows adding arbitrary constraints that are
+	// not specified in the certificate itself.
+	constraint func([]*Certificate) error
+
 	// getCert returns the certificate.
 	//
 	// It is not meant to do network operations or anything else
@@ -73,8 +78,9 @@
 }
 
 // cert returns cert index n in s.
-func (s *CertPool) cert(n int) (*Certificate, error) {
-	return s.lazyCerts[n].getCert()
+func (s *CertPool) cert(n int) (*Certificate, func([]*Certificate) error, error) {
+	cert, err := s.lazyCerts[n].getCert()
+	return cert, s.lazyCerts[n].constraint, err
 }
 
 // Clone returns a copy of s.
@@ -116,9 +122,14 @@
 	return loadSystemRoots()
 }
 
-// findPotentialParents returns the indexes of certificates in s which might
-// have signed cert.
-func (s *CertPool) findPotentialParents(cert *Certificate) []*Certificate {
+type potentialParent struct {
+	cert       *Certificate
+	constraint func([]*Certificate) error
+}
+
+// findPotentialParents returns the certificates in s which might have signed
+// cert.
+func (s *CertPool) findPotentialParents(cert *Certificate) []potentialParent {
 	if s == nil {
 		return nil
 	}
@@ -129,21 +140,21 @@
 	//   AKID and SKID match
 	//   AKID present, SKID missing / AKID missing, SKID present
 	//   AKID and SKID don't match
-	var matchingKeyID, oneKeyID, mismatchKeyID []*Certificate
+	var matchingKeyID, oneKeyID, mismatchKeyID []potentialParent
 	for _, c := range s.byName[string(cert.RawIssuer)] {
-		candidate, err := s.cert(c)
+		candidate, constraint, err := s.cert(c)
 		if err != nil {
 			continue
 		}
 		kidMatch := bytes.Equal(candidate.SubjectKeyId, cert.AuthorityKeyId)
 		switch {
 		case kidMatch:
-			matchingKeyID = append(matchingKeyID, candidate)
+			matchingKeyID = append(matchingKeyID, potentialParent{candidate, constraint})
 		case (len(candidate.SubjectKeyId) == 0 && len(cert.AuthorityKeyId) > 0) ||
 			(len(candidate.SubjectKeyId) > 0 && len(cert.AuthorityKeyId) == 0):
-			oneKeyID = append(oneKeyID, candidate)
+			oneKeyID = append(oneKeyID, potentialParent{candidate, constraint})
 		default:
-			mismatchKeyID = append(mismatchKeyID, candidate)
+			mismatchKeyID = append(mismatchKeyID, potentialParent{candidate, constraint})
 		}
 	}
 
@@ -151,7 +162,7 @@
 	if found == 0 {
 		return nil
 	}
-	candidates := make([]*Certificate, 0, found)
+	candidates := make([]potentialParent, 0, found)
 	candidates = append(candidates, matchingKeyID...)
 	candidates = append(candidates, oneKeyID...)
 	candidates = append(candidates, mismatchKeyID...)
@@ -172,7 +183,7 @@
 	}
 	s.addCertFunc(sha256.Sum224(cert.Raw), string(cert.RawSubject), func() (*Certificate, error) {
 		return cert, nil
-	})
+	}, nil)
 }
 
 // addCertFunc adds metadata about a certificate to a pool, along with
@@ -180,7 +191,7 @@
 //
 // The rawSubject is Certificate.RawSubject and must be non-empty.
 // The getCert func may be called 0 or more times.
-func (s *CertPool) addCertFunc(rawSum224 sum224, rawSubject string, getCert func() (*Certificate, error)) {
+func (s *CertPool) addCertFunc(rawSum224 sum224, rawSubject string, getCert func() (*Certificate, error), constraint func([]*Certificate) error) {
 	if getCert == nil {
 		panic("getCert can't be nil")
 	}
@@ -194,6 +205,7 @@
 	s.lazyCerts = append(s.lazyCerts, lazyCert{
 		rawSubject: []byte(rawSubject),
 		getCert:    getCert,
+		constraint: constraint,
 	})
 	s.byName[rawSubject] = append(s.byName[rawSubject], len(s.lazyCerts)-1)
 }
@@ -231,7 +243,7 @@
 				certBytes = nil
 			})
 			return lazyCert.v, nil
-		})
+		}, nil)
 		ok = true
 	}
 
@@ -241,7 +253,7 @@
 // Subjects returns a list of the DER-encoded subjects of
 // all of the certificates in the pool.
 //
-// Deprecated: if s was returned by SystemCertPool, Subjects
+// Deprecated: if s was returned by [SystemCertPool], Subjects
 // will not include the system roots.
 func (s *CertPool) Subjects() [][]byte {
 	res := make([][]byte, s.len())
@@ -266,3 +278,17 @@
 	}
 	return true
 }
+
+// AddCertWithConstraint adds a certificate to the pool with the additional
+// constraint. When Certificate.Verify builds a chain which is rooted by cert,
+// it will additionally pass the whole chain to constraint to determine its
+// validity. If constraint returns a non-nil error, the chain will be discarded.
+// constraint may be called concurrently from multiple goroutines.
+func (s *CertPool) AddCertWithConstraint(cert *Certificate, constraint func([]*Certificate) error) {
+	if cert == nil {
+		panic("adding nil Certificate to CertPool")
+	}
+	s.addCertFunc(sha256.Sum224(cert.Raw), string(cert.RawSubject), func() (*Certificate, error) {
+		return cert, nil
+	}, constraint)
+}
diff --git a/src/crypto/x509/oid.go b/src/crypto/x509/oid.go
new file mode 100644
index 0000000..5359af6
--- /dev/null
+++ b/src/crypto/x509/oid.go
@@ -0,0 +1,273 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+	"bytes"
+	"encoding/asn1"
+	"errors"
+	"math"
+	"math/big"
+	"math/bits"
+	"strconv"
+	"strings"
+)
+
+var (
+	errInvalidOID = errors.New("invalid oid")
+)
+
+// An OID represents an ASN.1 OBJECT IDENTIFIER.
+type OID struct {
+	der []byte
+}
+
+func newOIDFromDER(der []byte) (OID, bool) {
+	if len(der) == 0 || der[len(der)-1]&0x80 != 0 {
+		return OID{}, false
+	}
+
+	start := 0
+	for i, v := range der {
+		// ITU-T X.690, section 8.19.2:
+		// The subidentifier shall be encoded in the fewest possible octets,
+		// that is, the leading octet of the subidentifier shall not have the value 0x80.
+		if i == start && v == 0x80 {
+			return OID{}, false
+		}
+		if v&0x80 == 0 {
+			start = i + 1
+		}
+	}
+
+	return OID{der}, true
+}
+
+// OIDFromInts creates a new OID using ints, each integer is a separate component.
+func OIDFromInts(oid []uint64) (OID, error) {
+	if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+		return OID{}, errInvalidOID
+	}
+
+	length := base128IntLength(oid[0]*40 + oid[1])
+	for _, v := range oid[2:] {
+		length += base128IntLength(v)
+	}
+
+	der := make([]byte, 0, length)
+	der = appendBase128Int(der, oid[0]*40+oid[1])
+	for _, v := range oid[2:] {
+		der = appendBase128Int(der, v)
+	}
+	return OID{der}, nil
+}
+
+func base128IntLength(n uint64) int {
+	if n == 0 {
+		return 1
+	}
+	return (bits.Len64(n) + 6) / 7
+}
+
+func appendBase128Int(dst []byte, n uint64) []byte {
+	for i := base128IntLength(n) - 1; i >= 0; i-- {
+		o := byte(n >> uint(i*7))
+		o &= 0x7f
+		if i != 0 {
+			o |= 0x80
+		}
+		dst = append(dst, o)
+	}
+	return dst
+}
+
+// Equal returns true when oid and other represents the same Object Identifier.
+func (oid OID) Equal(other OID) bool {
+	// There is only one possible DER encoding of
+	// each unique Object Identifier.
+	return bytes.Equal(oid.der, other.der)
+}
+
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, failed bool) {
+	offset = initOffset
+	var ret64 int64
+	for shifted := 0; offset < len(bytes); shifted++ {
+		// 5 * 7 bits per byte == 35 bits of data
+		// Thus the representation is either non-minimal or too large for an int32
+		if shifted == 5 {
+			failed = true
+			return
+		}
+		ret64 <<= 7
+		b := bytes[offset]
+		// integers should be minimally encoded, so the leading octet should
+		// never be 0x80
+		if shifted == 0 && b == 0x80 {
+			failed = true
+			return
+		}
+		ret64 |= int64(b & 0x7f)
+		offset++
+		if b&0x80 == 0 {
+			ret = int(ret64)
+			// Ensure that the returned value fits in an int on all platforms
+			if ret64 > math.MaxInt32 {
+				failed = true
+			}
+			return
+		}
+	}
+	failed = true
+	return
+}
+
+// EqualASN1OID returns whether an OID equals an asn1.ObjectIdentifier. If
+// asn1.ObjectIdentifier cannot represent the OID specified by oid, because
+// a component of OID requires more than 31 bits, it returns false.
+func (oid OID) EqualASN1OID(other asn1.ObjectIdentifier) bool {
+	if len(other) < 2 {
+		return false
+	}
+	v, offset, failed := parseBase128Int(oid.der, 0)
+	if failed {
+		// This should never happen, since we've already parsed the OID,
+		// but just in case.
+		return false
+	}
+	if v < 80 {
+		a, b := v/40, v%40
+		if other[0] != a || other[1] != b {
+			return false
+		}
+	} else {
+		a, b := 2, v-80
+		if other[0] != a || other[1] != b {
+			return false
+		}
+	}
+
+	i := 2
+	for ; offset < len(oid.der); i++ {
+		v, offset, failed = parseBase128Int(oid.der, offset)
+		if failed {
+			// Again, shouldn't happen, since we've already parsed
+			// the OID, but better safe than sorry.
+			return false
+		}
+		if v != other[i] {
+			return false
+		}
+	}
+
+	return i == len(other)
+}
+
+// Strings returns the string representation of the Object Identifier.
+func (oid OID) String() string {
+	var b strings.Builder
+	b.Grow(32)
+	const (
+		valSize         = 64 // size in bits of val.
+		bitsPerByte     = 7
+		maxValSafeShift = (1 << (valSize - bitsPerByte)) - 1
+	)
+	var (
+		start    = 0
+		val      = uint64(0)
+		numBuf   = make([]byte, 0, 21)
+		bigVal   *big.Int
+		overflow bool
+	)
+	for i, v := range oid.der {
+		curVal := v & 0x7F
+		valEnd := v&0x80 == 0
+		if valEnd {
+			if start != 0 {
+				b.WriteByte('.')
+			}
+		}
+		if !overflow && val > maxValSafeShift {
+			if bigVal == nil {
+				bigVal = new(big.Int)
+			}
+			bigVal = bigVal.SetUint64(val)
+			overflow = true
+		}
+		if overflow {
+			bigVal = bigVal.Lsh(bigVal, bitsPerByte).Or(bigVal, big.NewInt(int64(curVal)))
+			if valEnd {
+				if start == 0 {
+					b.WriteString("2.")
+					bigVal = bigVal.Sub(bigVal, big.NewInt(80))
+				}
+				numBuf = bigVal.Append(numBuf, 10)
+				b.Write(numBuf)
+				numBuf = numBuf[:0]
+				val = 0
+				start = i + 1
+				overflow = false
+			}
+			continue
+		}
+		val <<= bitsPerByte
+		val |= uint64(curVal)
+		if valEnd {
+			if start == 0 {
+				if val < 80 {
+					b.Write(strconv.AppendUint(numBuf, val/40, 10))
+					b.WriteByte('.')
+					b.Write(strconv.AppendUint(numBuf, val%40, 10))
+				} else {
+					b.WriteString("2.")
+					b.Write(strconv.AppendUint(numBuf, val-80, 10))
+				}
+			} else {
+				b.Write(strconv.AppendUint(numBuf, val, 10))
+			}
+			val = 0
+			start = i + 1
+		}
+	}
+	return b.String()
+}
+
+func (oid OID) toASN1OID() (asn1.ObjectIdentifier, bool) {
+	out := make([]int, 0, len(oid.der)+1)
+
+	const (
+		valSize         = 31 // amount of usable bits of val for OIDs.
+		bitsPerByte     = 7
+		maxValSafeShift = (1 << (valSize - bitsPerByte)) - 1
+	)
+
+	val := 0
+
+	for _, v := range oid.der {
+		if val > maxValSafeShift {
+			return nil, false
+		}
+
+		val <<= bitsPerByte
+		val |= int(v & 0x7F)
+
+		if v&0x80 == 0 {
+			if len(out) == 0 {
+				if val < 80 {
+					out = append(out, val/40)
+					out = append(out, val%40)
+				} else {
+					out = append(out, 2)
+					out = append(out, val-80)
+				}
+				val = 0
+				continue
+			}
+			out = append(out, val)
+			val = 0
+		}
+	}
+
+	return out, true
+}
diff --git a/src/crypto/x509/oid_test.go b/src/crypto/x509/oid_test.go
new file mode 100644
index 0000000..b2be107
--- /dev/null
+++ b/src/crypto/x509/oid_test.go
@@ -0,0 +1,110 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+	"encoding/asn1"
+	"math"
+	"testing"
+)
+
+func TestOID(t *testing.T) {
+	var tests = []struct {
+		raw   []byte
+		valid bool
+		str   string
+		ints  []uint64
+	}{
+		{[]byte{}, false, "", nil},
+		{[]byte{0x80, 0x01}, false, "", nil},
+		{[]byte{0x01, 0x80, 0x01}, false, "", nil},
+
+		{[]byte{1, 2, 3}, true, "0.1.2.3", []uint64{0, 1, 2, 3}},
+		{[]byte{41, 2, 3}, true, "1.1.2.3", []uint64{1, 1, 2, 3}},
+		{[]byte{86, 2, 3}, true, "2.6.2.3", []uint64{2, 6, 2, 3}},
+
+		{[]byte{41, 255, 255, 255, 127}, true, "1.1.268435455", []uint64{1, 1, 268435455}},
+		{[]byte{41, 0x87, 255, 255, 255, 127}, true, "1.1.2147483647", []uint64{1, 1, 2147483647}},
+		{[]byte{41, 255, 255, 255, 255, 127}, true, "1.1.34359738367", []uint64{1, 1, 34359738367}},
+		{[]byte{42, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "1.2.9223372036854775807", []uint64{1, 2, 9223372036854775807}},
+		{[]byte{43, 0x81, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "1.3.18446744073709551615", []uint64{1, 3, 18446744073709551615}},
+		{[]byte{44, 0x83, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "1.4.36893488147419103231", nil},
+		{[]byte{85, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.5.1180591620717411303423", nil},
+		{[]byte{85, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.5.19342813113834066795298815", nil},
+
+		{[]byte{255, 255, 255, 127}, true, "2.268435375", []uint64{2, 268435375}},
+		{[]byte{0x87, 255, 255, 255, 127}, true, "2.2147483567", []uint64{2, 2147483567}},
+		{[]byte{255, 127}, true, "2.16303", []uint64{2, 16303}},
+		{[]byte{255, 255, 255, 255, 127}, true, "2.34359738287", []uint64{2, 34359738287}},
+		{[]byte{255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.9223372036854775727", []uint64{2, 9223372036854775727}},
+		{[]byte{0x81, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.18446744073709551535", []uint64{2, 18446744073709551535}},
+		{[]byte{0x83, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.36893488147419103151", nil},
+		{[]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.1180591620717411303343", nil},
+		{[]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127}, true, "2.19342813113834066795298735", nil},
+	}
+
+	for _, v := range tests {
+		oid, ok := newOIDFromDER(v.raw)
+		if ok != v.valid {
+			if ok {
+				t.Errorf("%v: unexpected success while parsing: %v", v.raw, oid)
+			} else {
+				t.Errorf("%v: unexpected failure while parsing", v.raw)
+			}
+			continue
+		}
+
+		if !ok {
+			continue
+		}
+
+		if str := oid.String(); str != v.str {
+			t.Errorf("%v: oid.String() = %v, want; %v", v.raw, str, v.str)
+		}
+
+		var asn1OID asn1.ObjectIdentifier
+		for _, v := range v.ints {
+			if v > math.MaxInt32 {
+				asn1OID = nil
+				break
+			}
+			asn1OID = append(asn1OID, int(v))
+		}
+
+		o, ok := oid.toASN1OID()
+		if shouldOk := asn1OID != nil; shouldOk != ok {
+			if ok {
+				t.Errorf("%v: oid.toASN1OID() unexpected success", v.raw)
+			} else {
+				t.Errorf("%v: oid.toASN1OID() unexpected fauilure", v.raw)
+			}
+			continue
+		}
+
+		if asn1OID != nil {
+			if !o.Equal(asn1OID) {
+				t.Errorf("%v: oid.toASN1OID(asn1OID).Equal(oid) = false, want: true", v.raw)
+			}
+		}
+
+		if v.ints != nil {
+			oid2, err := OIDFromInts(v.ints)
+			if err != nil {
+				t.Errorf("%v: OIDFromInts() unexpected error: %v", v.raw, err)
+			}
+			if !oid2.Equal(oid) {
+				t.Errorf("%v: %#v.Equal(%#v) = false, want: true", v.raw, oid2, oid)
+			}
+		}
+	}
+}
+
+func mustNewOIDFromInts(t *testing.T, ints []uint64) OID {
+	oid, err := OIDFromInts(ints)
+	if err != nil {
+		t.Fatalf("OIDFromInts(%v) unexpected error: %v", ints, err)
+	}
+	return oid
+}
diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go
index 6695212..812b0d2 100644
--- a/src/crypto/x509/parser.go
+++ b/src/crypto/x509/parser.go
@@ -435,23 +435,23 @@
 	return extKeyUsages, unknownUsages, nil
 }
 
-func parseCertificatePoliciesExtension(der cryptobyte.String) ([]asn1.ObjectIdentifier, error) {
-	var oids []asn1.ObjectIdentifier
+func parseCertificatePoliciesExtension(der cryptobyte.String) ([]OID, error) {
+	var oids []OID
 	if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
 		return nil, errors.New("x509: invalid certificate policies")
 	}
 	for !der.Empty() {
 		var cp cryptobyte.String
-		if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) {
+		var OIDBytes cryptobyte.String
+		if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) || !cp.ReadASN1(&OIDBytes, cryptobyte_asn1.OBJECT_IDENTIFIER) {
 			return nil, errors.New("x509: invalid certificate policies")
 		}
-		var oid asn1.ObjectIdentifier
-		if !cp.ReadASN1ObjectIdentifier(&oid) {
+		oid, ok := newOIDFromDER(OIDBytes)
+		if !ok {
 			return nil, errors.New("x509: invalid certificate policies")
 		}
 		oids = append(oids, oid)
 	}
-
 	return oids, nil
 }
 
@@ -748,10 +748,16 @@
 				}
 				out.SubjectKeyId = skid
 			case 32:
-				out.PolicyIdentifiers, err = parseCertificatePoliciesExtension(e.Value)
+				out.Policies, err = parseCertificatePoliciesExtension(e.Value)
 				if err != nil {
 					return err
 				}
+				out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, 0, len(out.Policies))
+				for _, oid := range out.Policies {
+					if oid, ok := oid.toASN1OID(); ok {
+						out.PolicyIdentifiers = append(out.PolicyIdentifiers, oid)
+					}
+				}
 			default:
 				// Unknown extensions are recorded if critical.
 				unhandled = true
@@ -1011,7 +1017,7 @@
 // the actual encoded version, so the version for X.509v2 is 1.
 const x509v2Version = 1
 
-// ParseRevocationList parses a X509 v2 Certificate Revocation List from the given
+// ParseRevocationList parses a X509 v2 [Certificate] Revocation List from the given
 // ASN.1 DER data.
 func ParseRevocationList(der []byte) (*RevocationList, error) {
 	rl := &RevocationList{}
diff --git a/src/crypto/x509/pem_decrypt.go b/src/crypto/x509/pem_decrypt.go
index 682923a..4f96cde 100644
--- a/src/crypto/x509/pem_decrypt.go
+++ b/src/crypto/x509/pem_decrypt.go
@@ -113,7 +113,7 @@
 // password used to encrypt it and returns a slice of decrypted DER encoded
 // bytes. It inspects the DEK-Info header to determine the algorithm used for
 // decryption. If no DEK-Info header is present, an error is returned. If an
-// incorrect password is detected an IncorrectPasswordError is returned. Because
+// incorrect password is detected an [IncorrectPasswordError] is returned. Because
 // of deficiencies in the format, it's not always possible to detect an
 // incorrect password. In these cases no error will be returned but the
 // decrypted DER bytes will be random noise.
diff --git a/src/crypto/x509/pkcs1.go b/src/crypto/x509/pkcs1.go
index f9d3840..94c7bbb 100644
--- a/src/crypto/x509/pkcs1.go
+++ b/src/crypto/x509/pkcs1.go
@@ -41,7 +41,7 @@
 	E int
 }
 
-// ParsePKCS1PrivateKey parses an RSA private key in PKCS #1, ASN.1 DER form.
+// ParsePKCS1PrivateKey parses an [RSA] private key in PKCS #1, ASN.1 DER form.
 //
 // This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY".
 func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
@@ -96,11 +96,11 @@
 	return key, nil
 }
 
-// MarshalPKCS1PrivateKey converts an RSA private key to PKCS #1, ASN.1 DER form.
+// MarshalPKCS1PrivateKey converts an [RSA] private key to PKCS #1, ASN.1 DER form.
 //
 // This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY".
-// For a more flexible key format which is not RSA specific, use
-// MarshalPKCS8PrivateKey.
+// For a more flexible key format which is not [RSA] specific, use
+// [MarshalPKCS8PrivateKey].
 func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
 	key.Precompute()
 
@@ -132,7 +132,7 @@
 	return b
 }
 
-// ParsePKCS1PublicKey parses an RSA public key in PKCS #1, ASN.1 DER form.
+// ParsePKCS1PublicKey parses an [RSA] public key in PKCS #1, ASN.1 DER form.
 //
 // This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY".
 func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) {
@@ -161,7 +161,7 @@
 	}, nil
 }
 
-// MarshalPKCS1PublicKey converts an RSA public key to PKCS #1, ASN.1 DER form.
+// MarshalPKCS1PublicKey converts an [RSA] public key to PKCS #1, ASN.1 DER form.
 //
 // This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY".
 func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte {
diff --git a/src/crypto/x509/pkcs8.go b/src/crypto/x509/pkcs8.go
index 74b2f99..08e9da4 100644
--- a/src/crypto/x509/pkcs8.go
+++ b/src/crypto/x509/pkcs8.go
@@ -27,8 +27,8 @@
 
 // ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form.
 //
-// It returns a *rsa.PrivateKey, an *ecdsa.PrivateKey, an ed25519.PrivateKey (not
-// a pointer), or an *ecdh.PrivateKey (for X25519). More types might be supported
+// It returns a *[rsa.PrivateKey], an *[ecdsa.PrivateKey], an [ed25519.PrivateKey] (not
+// a pointer), or an *[ecdh.PrivateKey] (for X25519). More types might be supported
 // in the future.
 //
 // This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY".
@@ -93,8 +93,8 @@
 
 // MarshalPKCS8PrivateKey converts a private key to PKCS #8, ASN.1 DER form.
 //
-// The following key types are currently supported: *rsa.PrivateKey,
-// *ecdsa.PrivateKey, ed25519.PrivateKey (not a pointer), and *ecdh.PrivateKey.
+// The following key types are currently supported: *[rsa.PrivateKey],
+// *[ecdsa.PrivateKey], [ed25519.PrivateKey] (not a pointer), and *[ecdh.PrivateKey].
 // Unsupported key types result in an error.
 //
 // This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY".
diff --git a/src/crypto/x509/pkix/pkix.go b/src/crypto/x509/pkix/pkix.go
index 22a50ee..dfc6abc 100644
--- a/src/crypto/x509/pkix/pkix.go
+++ b/src/crypto/x509/pkix/pkix.go
@@ -102,7 +102,7 @@
 }
 
 // AttributeTypeAndValueSET represents a set of ASN.1 sequences of
-// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
+// [AttributeTypeAndValue] sequences from RFC 2986 (PKCS #10).
 type AttributeTypeAndValueSET struct {
 	Type  asn1.ObjectIdentifier
 	Value [][]AttributeTypeAndValue `asn1:"set"`
@@ -119,7 +119,7 @@
 // Name represents an X.509 distinguished name. This only includes the common
 // elements of a DN. Note that Name is only an approximation of the X.509
 // structure. If an accurate representation is needed, asn1.Unmarshal the raw
-// subject or issuer as an RDNSequence.
+// subject or issuer as an [RDNSequence].
 type Name struct {
 	Country, Organization, OrganizationalUnit []string
 	Locality, Province                        []string
@@ -138,7 +138,7 @@
 	ExtraNames []AttributeTypeAndValue
 }
 
-// FillFromRDNSequence populates n from the provided RDNSequence.
+// FillFromRDNSequence populates n from the provided [RDNSequence].
 // Multi-entry RDNs are flattened, all entries are added to the
 // relevant n fields, and the grouping is not preserved.
 func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
@@ -211,7 +211,7 @@
 	return append(in, s)
 }
 
-// ToRDNSequence converts n into a single RDNSequence. The following
+// ToRDNSequence converts n into a single [RDNSequence]. The following
 // attributes are encoded as multi-value RDNs:
 //
 //   - Country
diff --git a/src/crypto/x509/root_linux.go b/src/crypto/x509/root_linux.go
index e32989b..8e79ccb 100644
--- a/src/crypto/x509/root_linux.go
+++ b/src/crypto/x509/root_linux.go
@@ -4,6 +4,8 @@
 
 package x509
 
+import "internal/goos"
+
 // Possible certificate files; stop after finding one.
 var certFiles = []string{
 	"/etc/ssl/certs/ca-certificates.crt",                // Debian/Ubuntu/Gentoo etc.
@@ -16,7 +18,15 @@
 
 // Possible directories with certificate files; all will be read.
 var certDirectories = []string{
-	"/etc/ssl/certs",               // SLES10/SLES11, https://golang.org/issue/12139
-	"/etc/pki/tls/certs",           // Fedora/RHEL
-	"/system/etc/security/cacerts", // Android
+	"/etc/ssl/certs",     // SLES10/SLES11, https://golang.org/issue/12139
+	"/etc/pki/tls/certs", // Fedora/RHEL
+}
+
+func init() {
+	if goos.IsAndroid == 1 {
+		certDirectories = append(certDirectories,
+			"/system/etc/security/cacerts",    // Android system roots
+			"/data/misc/keychain/certs-added", // User trusted CA folder
+		)
+	}
 }
diff --git a/src/crypto/x509/root_windows.go b/src/crypto/x509/root_windows.go
index 11a4257..4bea108 100644
--- a/src/crypto/x509/root_windows.go
+++ b/src/crypto/x509/root_windows.go
@@ -45,7 +45,7 @@
 
 	if opts.Intermediates != nil {
 		for i := 0; i < opts.Intermediates.len(); i++ {
-			intermediate, err := opts.Intermediates.cert(i)
+			intermediate, _, err := opts.Intermediates.cert(i)
 			if err != nil {
 				return nil, err
 			}
diff --git a/src/crypto/x509/sec1.go b/src/crypto/x509/sec1.go
index 6bfba0d..8e81bd4 100644
--- a/src/crypto/x509/sec1.go
+++ b/src/crypto/x509/sec1.go
@@ -42,7 +42,7 @@
 //
 // This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
 // For a more flexible key format which is not EC specific, use
-// MarshalPKCS8PrivateKey.
+// [MarshalPKCS8PrivateKey].
 func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
 	oid, ok := oidFromNamedCurve(key.Curve)
 	if !ok {
diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go
index 345d434..6efbff2 100644
--- a/src/crypto/x509/verify.go
+++ b/src/crypto/x509/verify.go
@@ -752,7 +752,7 @@
 		return nil, errNotParsed
 	}
 	for i := 0; i < opts.Intermediates.len(); i++ {
-		c, err := opts.Intermediates.cert(i)
+		c, _, err := opts.Intermediates.cert(i)
 		if err != nil {
 			return nil, fmt.Errorf("crypto/x509: error fetching intermediate: %w", err)
 		}
@@ -898,8 +898,8 @@
 		hintCert *Certificate
 	)
 
-	considerCandidate := func(certType int, candidate *Certificate) {
-		if alreadyInChain(candidate, currentChain) {
+	considerCandidate := func(certType int, candidate potentialParent) {
+		if candidate.cert.PublicKey == nil || alreadyInChain(candidate.cert, currentChain) {
 			return
 		}
 
@@ -912,29 +912,39 @@
 			return
 		}
 
-		if err := c.CheckSignatureFrom(candidate); err != nil {
+		if err := c.CheckSignatureFrom(candidate.cert); err != nil {
 			if hintErr == nil {
 				hintErr = err
-				hintCert = candidate
+				hintCert = candidate.cert
 			}
 			return
 		}
 
-		err = candidate.isValid(certType, currentChain, opts)
+		err = candidate.cert.isValid(certType, currentChain, opts)
 		if err != nil {
 			if hintErr == nil {
 				hintErr = err
-				hintCert = candidate
+				hintCert = candidate.cert
 			}
 			return
 		}
 
+		if candidate.constraint != nil {
+			if err := candidate.constraint(currentChain); err != nil {
+				if hintErr == nil {
+					hintErr = err
+					hintCert = candidate.cert
+				}
+				return
+			}
+		}
+
 		switch certType {
 		case rootCertificate:
-			chains = append(chains, appendToFreshChain(currentChain, candidate))
+			chains = append(chains, appendToFreshChain(currentChain, candidate.cert))
 		case intermediateCertificate:
 			var childChains [][]*Certificate
-			childChains, err = candidate.buildChains(appendToFreshChain(currentChain, candidate), sigChecks, opts)
+			childChains, err = candidate.cert.buildChains(appendToFreshChain(currentChain, candidate.cert), sigChecks, opts)
 			chains = append(chains, childChains...)
 		}
 	}
diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go
index 3551b47..8a7a5f6 100644
--- a/src/crypto/x509/verify_test.go
+++ b/src/crypto/x509/verify_test.go
@@ -16,9 +16,11 @@
 	"fmt"
 	"internal/testenv"
 	"math/big"
+	"os/exec"
 	"reflect"
 	"runtime"
 	"sort"
+	"strconv"
 	"strings"
 	"testing"
 	"time"
@@ -368,7 +370,7 @@
 		},
 	},
 	{
-		// When there are two parents, one with a incorrect subject but matching SKID
+		// When there are two parents, one with an incorrect subject but matching SKID
 		// and one with a correct subject but missing SKID, the latter should be
 		// considered as a possible parent.
 		leaf:        leafMatchingAKIDMatchingIssuer,
@@ -1867,17 +1869,40 @@
 	}
 }
 
+func macosMajorVersion(t *testing.T) (int, error) {
+	cmd := testenv.Command(t, "sw_vers", "-productVersion")
+	out, err := cmd.Output()
+	if err != nil {
+		if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+			return 0, fmt.Errorf("%v: %v\n%s", cmd, err, ee.Stderr)
+		}
+		return 0, fmt.Errorf("%v: %v", cmd, err)
+	}
+	before, _, ok := strings.Cut(string(out), ".")
+	major, err := strconv.Atoi(before)
+	if !ok || err != nil {
+		return 0, fmt.Errorf("%v: unexpected output: %q", cmd, out)
+	}
+
+	return major, nil
+}
+
 func TestIssue51759(t *testing.T) {
 	if runtime.GOOS != "darwin" {
 		t.Skip("only affects darwin")
 	}
-	builder := testenv.Builder()
-	if builder == "" {
-		t.Skip("only run this test on the builders, as we have no reasonable way to gate tests on macOS versions elsewhere")
-	}
-	if builder == "darwin-amd64-10_14" || builder == "darwin-amd64-10_15" {
+
+	testenv.MustHaveExecPath(t, "sw_vers")
+	if vers, err := macosMajorVersion(t); err != nil {
+		if builder := testenv.Builder(); builder != "" {
+			t.Fatalf("unable to determine macOS version: %s", err)
+		} else {
+			t.Skip("unable to determine macOS version")
+		}
+	} else if vers < 11 {
 		t.Skip("behavior only enforced in macOS 11 and after")
 	}
+
 	// badCertData contains a cert that we parse as valid
 	// but that macOS SecCertificateCreateWithData rejects.
 	const badCertData = "0\x82\x01U0\x82\x01\a\xa0\x03\x02\x01\x02\x02\x01\x020\x05\x06\x03+ep0R1P0N\x06\x03U\x04\x03\x13Gderpkey8dc58100b2493614ee1692831a461f3f4dd3f9b3b088e244f887f81b4906ac260\x1e\x17\r220112235755Z\x17\r220313235755Z0R1P0N\x06\x03U\x04\x03\x13Gderpkey8dc58100b2493614ee1692831a461f3f4dd3f9b3b088e244f887f81b4906ac260*0\x05\x06\x03+ep\x03!\x00bA\xd8e\xadW\xcb\xefZ\x89\xb5\"\x1eR\x9d\xba\x0e:\x1042Q@\u007f\xbd\xfb{ks\x04\xd1£\x020\x000\x05\x06\x03+ep\x03A\x00[\xa7\x06y\x86(\x94\x97\x9eLwA\x00\x01x\xaa\xbc\xbd Ê]\n(΅!ف0\xf5\x9a%I\x19<\xffo\xf1\xeaaf@\xb1\xa7\xaf\xfd\xe9R\xc7\x0f\x8d&\xd5\xfc\x0f;Ϙ\x82\x84a\xbc\r"
@@ -1918,11 +1943,13 @@
 	Subject        string
 	Type           int
 	MutateTemplate func(*Certificate)
+	Constraint     func([]*Certificate) error
 }
 
 type rootDescription struct {
 	Subject        string
 	MutateTemplate func(*Certificate)
+	Constraint     func([]*Certificate) error
 }
 
 type trustGraphDescription struct {
@@ -1975,19 +2002,23 @@
 
 	certs := map[string]*Certificate{}
 	keys := map[string]crypto.Signer{}
-	roots := []*Certificate{}
+	rootPool := NewCertPool()
 	for _, r := range d.Roots {
 		k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
 		if err != nil {
 			t.Fatalf("failed to generate test key: %s", err)
 		}
 		root := genCertEdge(t, r.Subject, k, r.MutateTemplate, rootCertificate, nil, nil)
-		roots = append(roots, root)
+		if r.Constraint != nil {
+			rootPool.AddCertWithConstraint(root, r.Constraint)
+		} else {
+			rootPool.AddCert(root)
+		}
 		certs[r.Subject] = root
 		keys[r.Subject] = k
 	}
 
-	intermediates := []*Certificate{}
+	intermediatePool := NewCertPool()
 	var leaf *Certificate
 	for _, e := range d.Graph {
 		issuerCert, ok := certs[e.Issuer]
@@ -2013,18 +2044,14 @@
 		if e.Subject == d.Leaf {
 			leaf = cert
 		} else {
-			intermediates = append(intermediates, cert)
+			if e.Constraint != nil {
+				intermediatePool.AddCertWithConstraint(cert, e.Constraint)
+			} else {
+				intermediatePool.AddCert(cert)
+			}
 		}
 	}
 
-	rootPool, intermediatePool := NewCertPool(), NewCertPool()
-	for i := len(roots) - 1; i >= 0; i-- {
-		rootPool.AddCert(roots[i])
-	}
-	for i := len(intermediates) - 1; i >= 0; i-- {
-		intermediatePool.AddCert(intermediates[i])
-	}
-
 	return rootPool, intermediatePool, leaf
 }
 
@@ -2480,6 +2507,78 @@
 			},
 			expectedChains: []string{"CN=leaf -> CN=inter -> CN=root"},
 		},
+		{
+			// A code constraint on the root, applying to one of two intermediates in the graph, should
+			// result in only one valid chain.
+			name: "code constrained root, two paths, one valid",
+			graph: trustGraphDescription{
+				Roots: []rootDescription{{Subject: "root", Constraint: func(chain []*Certificate) error {
+					for _, c := range chain {
+						if c.Subject.CommonName == "inter a" {
+							return errors.New("bad")
+						}
+					}
+					return nil
+				}}},
+				Leaf: "leaf",
+				Graph: []trustGraphEdge{
+					{
+						Issuer:  "root",
+						Subject: "inter a",
+						Type:    intermediateCertificate,
+					},
+					{
+						Issuer:  "root",
+						Subject: "inter b",
+						Type:    intermediateCertificate,
+					},
+					{
+						Issuer:  "inter a",
+						Subject: "inter c",
+						Type:    intermediateCertificate,
+					},
+					{
+						Issuer:  "inter b",
+						Subject: "inter c",
+						Type:    intermediateCertificate,
+					},
+					{
+						Issuer:  "inter c",
+						Subject: "leaf",
+						Type:    leafCertificate,
+					},
+				},
+			},
+			expectedChains: []string{"CN=leaf -> CN=inter c -> CN=inter b -> CN=root"},
+		},
+		{
+			// A code constraint on the root, applying to the only path, should result in an error.
+			name: "code constrained root, one invalid path",
+			graph: trustGraphDescription{
+				Roots: []rootDescription{{Subject: "root", Constraint: func(chain []*Certificate) error {
+					for _, c := range chain {
+						if c.Subject.CommonName == "leaf" {
+							return errors.New("bad")
+						}
+					}
+					return nil
+				}}},
+				Leaf: "leaf",
+				Graph: []trustGraphEdge{
+					{
+						Issuer:  "root",
+						Subject: "inter",
+						Type:    intermediateCertificate,
+					},
+					{
+						Issuer:  "inter",
+						Subject: "leaf",
+						Type:    leafCertificate,
+					},
+				},
+			},
+			expectedErr: "x509: certificate signed by unknown authority (possibly because of \"bad\" while trying to verify candidate authority certificate \"root\")",
+		},
 	}
 
 	for _, tc := range tests {
@@ -2693,3 +2792,22 @@
 	}
 
 }
+
+func TestVerifyNilPubKey(t *testing.T) {
+	c := &Certificate{
+		RawIssuer:      []byte{1, 2, 3},
+		AuthorityKeyId: []byte{1, 2, 3},
+	}
+	opts := &VerifyOptions{}
+	opts.Roots = NewCertPool()
+	r := &Certificate{
+		RawSubject:   []byte{1, 2, 3},
+		SubjectKeyId: []byte{1, 2, 3},
+	}
+	opts.Roots.AddCert(r)
+
+	_, err := c.buildChains([]*Certificate{r}, nil, opts)
+	if _, ok := err.(UnknownAuthorityError); !ok {
+		t.Fatalf("buildChains returned unexpected error, got: %v, want %v", err, UnknownAuthorityError{})
+	}
+}
diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go
index 9d80b1d..f33283b 100644
--- a/src/crypto/x509/x509.go
+++ b/src/crypto/x509/x509.go
@@ -63,8 +63,8 @@
 // ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form. The encoded
 // public key is a SubjectPublicKeyInfo structure (see RFC 5280, Section 4.1).
 //
-// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
-// ed25519.PublicKey (not a pointer), or *ecdh.PublicKey (for X25519).
+// It returns a *[rsa.PublicKey], *[dsa.PublicKey], *[ecdsa.PublicKey],
+// [ed25519.PublicKey] (not a pointer), or *[ecdh.PublicKey] (for X25519).
 // More types might be supported in the future.
 //
 // This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
@@ -142,8 +142,8 @@
 // The encoded public key is a SubjectPublicKeyInfo structure
 // (see RFC 5280, Section 4.1).
 //
-// The following key types are currently supported: *rsa.PublicKey,
-// *ecdsa.PublicKey, ed25519.PublicKey (not a pointer), and *ecdh.PublicKey.
+// The following key types are currently supported: *[rsa.PublicKey],
+// *[ecdsa.PublicKey], [ed25519.PublicKey] (not a pointer), and *[ecdh.PublicKey].
 // Unsupported key types result in an error.
 //
 // This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
@@ -772,14 +772,22 @@
 	// CRL Distribution Points
 	CRLDistributionPoints []string
 
+	// PolicyIdentifiers contains asn1.ObjectIdentifiers, the components
+	// of which are limited to int32. If a certificate contains a policy which
+	// cannot be represented by asn1.ObjectIdentifier, it will not be included in
+	// PolicyIdentifiers, but will be present in Policies, which contains all parsed
+	// policy OIDs.
 	PolicyIdentifiers []asn1.ObjectIdentifier
+
+	// Policies contains all policy identifiers included in the certificate.
+	Policies []OID
 }
 
 // ErrUnsupportedAlgorithm results from attempting to perform an operation that
 // involves algorithms that are not currently implemented.
 var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented")
 
-// An InsecureAlgorithmError indicates that the SignatureAlgorithm used to
+// An InsecureAlgorithmError indicates that the [SignatureAlgorithm] used to
 // generate the signature is not secure, and the signature has been rejected.
 //
 // To temporarily restore support for SHA-1 signatures, include the value
@@ -881,6 +889,7 @@
 		if details.algo == algo {
 			hashType = details.hash
 			pubKeyAlgo = details.pubKeyAlgo
+			break
 		}
 	}
 
@@ -941,7 +950,7 @@
 
 // CheckCRLSignature checks that the signature in crl is from c.
 //
-// Deprecated: Use RevocationList.CheckSignatureFrom instead.
+// Deprecated: Use [RevocationList.CheckSignatureFrom] instead.
 func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error {
 	algo := getSignatureAlgorithmFromAI(crl.SignatureAlgorithm)
 	return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
@@ -1092,6 +1101,8 @@
 	return nil
 }
 
+var usePoliciesField = godebug.New("x509usepolicies")
+
 func buildCertExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte, subjectKeyId []byte) (ret []pkix.Extension, err error) {
 	ret = make([]pkix.Extension, 10 /* maximum number of elements. */)
 	n := 0
@@ -1177,9 +1188,10 @@
 		n++
 	}
 
-	if len(template.PolicyIdentifiers) > 0 &&
+	usePolicies := usePoliciesField.Value() == "1"
+	if ((!usePolicies && len(template.PolicyIdentifiers) > 0) || (usePolicies && len(template.Policies) > 0)) &&
 		!oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) {
-		ret[n], err = marshalCertificatePolicies(template.PolicyIdentifiers)
+		ret[n], err = marshalCertificatePolicies(template.Policies, template.PolicyIdentifiers)
 		if err != nil {
 			return nil, err
 		}
@@ -1364,14 +1376,35 @@
 	return ext, err
 }
 
-func marshalCertificatePolicies(policyIdentifiers []asn1.ObjectIdentifier) (pkix.Extension, error) {
+func marshalCertificatePolicies(policies []OID, policyIdentifiers []asn1.ObjectIdentifier) (pkix.Extension, error) {
 	ext := pkix.Extension{Id: oidExtensionCertificatePolicies}
-	policies := make([]policyInformation, len(policyIdentifiers))
-	for i, policy := range policyIdentifiers {
-		policies[i].Policy = policy
-	}
+
+	b := cryptobyte.NewBuilder(make([]byte, 0, 128))
+	b.AddASN1(cryptobyte_asn1.SEQUENCE, func(child *cryptobyte.Builder) {
+		if usePoliciesField.Value() == "1" {
+			usePoliciesField.IncNonDefault()
+			for _, v := range policies {
+				child.AddASN1(cryptobyte_asn1.SEQUENCE, func(child *cryptobyte.Builder) {
+					child.AddASN1(cryptobyte_asn1.OBJECT_IDENTIFIER, func(child *cryptobyte.Builder) {
+						if len(v.der) == 0 {
+							child.SetError(errors.New("invalid policy object identifier"))
+							return
+						}
+						child.AddBytes(v.der)
+					})
+				})
+			}
+		} else {
+			for _, v := range policyIdentifiers {
+				child.AddASN1(cryptobyte_asn1.SEQUENCE, func(child *cryptobyte.Builder) {
+					child.AddASN1ObjectIdentifier(v)
+				})
+			}
+		}
+	})
+
 	var err error
-	ext.Value, err = asn1.Marshal(policies)
+	ext.Value, err = b.Bytes()
 	return ext, err
 }
 
@@ -1511,7 +1544,8 @@
 //   - PermittedEmailAddresses
 //   - PermittedIPRanges
 //   - PermittedURIDomains
-//   - PolicyIdentifiers
+//   - PolicyIdentifiers (see note below)
+//   - Policies (see note below)
 //   - SerialNumber
 //   - SignatureAlgorithm
 //   - Subject
@@ -1535,6 +1569,13 @@
 //
 // If SubjectKeyId from template is empty and the template is a CA, SubjectKeyId
 // will be generated from the hash of the public key.
+//
+// The PolicyIdentifier and Policies fields are both used to marshal certificate
+// policy OIDs. By default, only the PolicyIdentifier is marshaled, but if the
+// GODEBUG setting "x509usepolicies" has the value "1", the Policies field will
+// be marshalled instead of the PolicyIdentifier field. The Policies field can
+// be used to marshal policy OIDs which have components that are larger than 31
+// bits.
 func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv any) ([]byte, error) {
 	key, ok := priv.(crypto.Signer)
 	if !ok {
@@ -1679,7 +1720,7 @@
 // will transparently handle PEM encoding as long as there isn't any leading
 // garbage.
 //
-// Deprecated: Use ParseRevocationList instead.
+// Deprecated: Use [ParseRevocationList] instead.
 func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) {
 	if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
 		block, _ := pem.Decode(crlBytes)
@@ -1692,7 +1733,7 @@
 
 // ParseDERCRL parses a DER encoded CRL from the given bytes.
 //
-// Deprecated: Use ParseRevocationList instead.
+// Deprecated: Use [ParseRevocationList] instead.
 func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) {
 	certList := new(pkix.CertificateList)
 	if rest, err := asn1.Unmarshal(derBytes, certList); err != nil {
@@ -1707,7 +1748,7 @@
 // contains the given list of revoked certificates.
 //
 // Deprecated: this method does not generate an RFC 5280 conformant X.509 v2 CRL.
-// To generate a standards compliant CRL, use CreateRevocationList instead.
+// To generate a standards compliant CRL, use [CreateRevocationList] instead.
 func (c *Certificate) CreateCRL(rand io.Reader, priv any, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
 	key, ok := priv.(crypto.Signer)
 	if !ok {
@@ -2192,7 +2233,7 @@
 	ExtraExtensions []pkix.Extension
 }
 
-// RevocationList represents a Certificate Revocation List (CRL) as specified
+// RevocationList represents a [Certificate] Revocation List (CRL) as specified
 // by RFC 5280.
 type RevocationList struct {
 	// Raw contains the complete ASN.1 DER content of the CRL (tbsCertList,
@@ -2277,13 +2318,13 @@
 	Extensions          []pkix.Extension          `asn1:"tag:0,optional,explicit"`
 }
 
-// CreateRevocationList creates a new X.509 v2 Certificate Revocation List,
+// CreateRevocationList creates a new X.509 v2 [Certificate] Revocation List,
 // according to RFC 5280, based on template.
 //
 // The CRL is signed by priv which should be the private key associated with
 // the public key in the issuer certificate.
 //
-// The issuer may not be nil, and the crlSign bit must be set in KeyUsage in
+// The issuer may not be nil, and the crlSign bit must be set in [KeyUsage] in
 // order to use it as a CRL issuer.
 //
 // The issuer distinguished name CRL field and authority key identifier
diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go
index 19deeab..ead0453 100644
--- a/src/crypto/x509/x509_test.go
+++ b/src/crypto/x509/x509_test.go
@@ -24,12 +24,14 @@
 	"fmt"
 	"internal/testenv"
 	"io"
+	"math"
 	"math/big"
 	"net"
 	"net/url"
 	"os/exec"
 	"reflect"
 	"runtime"
+	"slices"
 	"strings"
 	"testing"
 	"time"
@@ -671,6 +673,7 @@
 			URIs:           []*url.URL{parseURI("https://foo.com/wibble#foo")},
 
 			PolicyIdentifiers:       []asn1.ObjectIdentifier{[]int{1, 2, 3}},
+			Policies:                []OID{mustNewOIDFromInts(t, []uint64{1, 2, 3, math.MaxUint32, math.MaxUint64})},
 			PermittedDNSDomains:     []string{".example.com", "example.com"},
 			ExcludedDNSDomains:      []string{"bar.example.com"},
 			PermittedIPRanges:       []*net.IPNet{parseCIDR("192.168.1.1/16"), parseCIDR("1.2.3.4/8")},
@@ -3652,7 +3655,7 @@
 	}
 
 	// This is an unrelated OCSP response, which will fail signature verification
-	// but shouldn't return a InsecureAlgorithmError, since SHA1 should be allowed
+	// but shouldn't return an InsecureAlgorithmError, since SHA1 should be allowed
 	// for OCSP.
 	ocspTBSHex := "30819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f32303133303631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c021100f78b13b946fc9635d8ab49de9d2148218000180f32303133303631383037323434335aa011180f32303133303632323037323434335a"
 	ocspTBS, err := hex.DecodeString(ocspTBSHex)
@@ -3917,3 +3920,82 @@
 		t.Fatal("ParseCertificateRequest should succeed when parsing CSR with duplicate attributes")
 	}
 }
+
+func TestCertificateOIDPolicies(t *testing.T) {
+	template := Certificate{
+		SerialNumber:      big.NewInt(1),
+		Subject:           pkix.Name{CommonName: "Cert"},
+		NotBefore:         time.Unix(1000, 0),
+		NotAfter:          time.Unix(100000, 0),
+		PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
+	}
+
+	var expectPolicyIdentifiers = []asn1.ObjectIdentifier{
+		[]int{1, 2, 3},
+	}
+
+	var expectPolicies = []OID{
+		mustNewOIDFromInts(t, []uint64{1, 2, 3}),
+	}
+
+	certDER, err := CreateCertificate(rand.Reader, &template, &template, rsaPrivateKey.Public(), rsaPrivateKey)
+	if err != nil {
+		t.Fatalf("CreateCertificate() unexpected error: %v", err)
+	}
+
+	cert, err := ParseCertificate(certDER)
+	if err != nil {
+		t.Fatalf("ParseCertificate() unexpected error: %v", err)
+	}
+
+	if !slices.EqualFunc(cert.PolicyIdentifiers, expectPolicyIdentifiers, slices.Equal) {
+		t.Errorf("cert.PolicyIdentifiers = %v, want: %v", cert.PolicyIdentifiers, expectPolicyIdentifiers)
+	}
+
+	if !slices.EqualFunc(cert.Policies, expectPolicies, OID.Equal) {
+		t.Errorf("cert.Policies = %v, want: %v", cert.Policies, expectPolicies)
+	}
+}
+
+func TestCertificatePoliciesGODEBUG(t *testing.T) {
+	template := Certificate{
+		SerialNumber:      big.NewInt(1),
+		Subject:           pkix.Name{CommonName: "Cert"},
+		NotBefore:         time.Unix(1000, 0),
+		NotAfter:          time.Unix(100000, 0),
+		PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
+		Policies:          []OID{mustNewOIDFromInts(t, []uint64{1, 2, math.MaxUint32 + 1})},
+	}
+
+	expectPolicies := []OID{mustNewOIDFromInts(t, []uint64{1, 2, 3})}
+	certDER, err := CreateCertificate(rand.Reader, &template, &template, rsaPrivateKey.Public(), rsaPrivateKey)
+	if err != nil {
+		t.Fatalf("CreateCertificate() unexpected error: %v", err)
+	}
+
+	cert, err := ParseCertificate(certDER)
+	if err != nil {
+		t.Fatalf("ParseCertificate() unexpected error: %v", err)
+	}
+
+	if !slices.EqualFunc(cert.Policies, expectPolicies, OID.Equal) {
+		t.Errorf("cert.Policies = %v, want: %v", cert.Policies, expectPolicies)
+	}
+
+	t.Setenv("GODEBUG", "x509usepolicies=1")
+	expectPolicies = []OID{mustNewOIDFromInts(t, []uint64{1, 2, math.MaxUint32 + 1})}
+
+	certDER, err = CreateCertificate(rand.Reader, &template, &template, rsaPrivateKey.Public(), rsaPrivateKey)
+	if err != nil {
+		t.Fatalf("CreateCertificate() unexpected error: %v", err)
+	}
+
+	cert, err = ParseCertificate(certDER)
+	if err != nil {
+		t.Fatalf("ParseCertificate() unexpected error: %v", err)
+	}
+
+	if !slices.EqualFunc(cert.Policies, expectPolicies, OID.Equal) {
+		t.Errorf("cert.Policies = %v, want: %v", cert.Policies, expectPolicies)
+	}
+}
diff --git a/src/database/sql/convert.go b/src/database/sql/convert.go
index ffc4e49..cca5d15 100644
--- a/src/database/sql/convert.go
+++ b/src/database/sql/convert.go
@@ -203,7 +203,6 @@
 	}
 
 	return nvargs, nil
-
 }
 
 // convertAssign is the same as convertAssignRows, but without the optional
@@ -529,7 +528,7 @@
 	return
 }
 
-var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+var valuerReflectType = reflect.TypeFor[driver.Valuer]()
 
 // callValuerValue returns vr.Value(), with one exception:
 // If vr.Value is an auto-generated method on a pointer type and the
diff --git a/src/database/sql/driver/driver.go b/src/database/sql/driver/driver.go
index daf282b..da310bf 100644
--- a/src/database/sql/driver/driver.go
+++ b/src/database/sql/driver/driver.go
@@ -5,36 +5,37 @@
 // Package driver defines interfaces to be implemented by database
 // drivers as used by package sql.
 //
-// Most code should use package sql.
+// Most code should use the [database/sql] package.
 //
 // The driver interface has evolved over time. Drivers should implement
-// Connector and DriverContext interfaces.
-// The Connector.Connect and Driver.Open methods should never return ErrBadConn.
-// ErrBadConn should only be returned from Validator, SessionResetter, or
+// [Connector] and [DriverContext] interfaces.
+// The Connector.Connect and Driver.Open methods should never return [ErrBadConn].
+// [ErrBadConn] should only be returned from [Validator], [SessionResetter], or
 // a query method if the connection is already in an invalid (e.g. closed) state.
 //
-// All Conn implementations should implement the following interfaces:
-// Pinger, SessionResetter, and Validator.
+// All [Conn] implementations should implement the following interfaces:
+// [Pinger], [SessionResetter], and [Validator].
 //
-// If named parameters or context are supported, the driver's Conn should implement:
-// ExecerContext, QueryerContext, ConnPrepareContext, and ConnBeginTx.
+// If named parameters or context are supported, the driver's [Conn] should implement:
+// [ExecerContext], [QueryerContext], [ConnPrepareContext], and [ConnBeginTx].
 //
-// To support custom data types, implement NamedValueChecker. NamedValueChecker
+// To support custom data types, implement [NamedValueChecker]. [NamedValueChecker]
 // also allows queries to accept per-query options as a parameter by returning
-// ErrRemoveArgument from CheckNamedValue.
+// [ErrRemoveArgument] from CheckNamedValue.
 //
-// If multiple result sets are supported, Rows should implement RowsNextResultSet.
+// If multiple result sets are supported, [Rows] should implement [RowsNextResultSet].
 // If the driver knows how to describe the types present in the returned result
-// it should implement the following interfaces: RowsColumnTypeScanType,
-// RowsColumnTypeDatabaseTypeName, RowsColumnTypeLength, RowsColumnTypeNullable,
-// and RowsColumnTypePrecisionScale. A given row value may also return a Rows
+// it should implement the following interfaces: [RowsColumnTypeScanType],
+// [RowsColumnTypeDatabaseTypeName], [RowsColumnTypeLength], [RowsColumnTypeNullable],
+// and [RowsColumnTypePrecisionScale]. A given row value may also return a [Rows]
 // type, which may represent a database cursor value.
 //
-// Before a connection is returned to the connection pool after use, IsValid is
-// called if implemented. Before a connection is reused for another query,
-// ResetSession is called if implemented. If a connection is never returned to the
-// connection pool but immediately reused, then ResetSession is called prior to
-// reuse but IsValid is not called.
+// If a [Conn] implements [Validator], then the IsValid method is called
+// before returning the connection to the connection pool. If an entry in the
+// connection pool implements [SessionResetter], then ResetSession
+// is called before reusing the connection for another query. If a connection is
+// never returned to the connection pool but is immediately reused, then
+// ResetSession is called prior to reuse but IsValid is not called.
 package driver
 
 import (
@@ -44,7 +45,7 @@
 )
 
 // Value is a value that drivers must be able to handle.
-// It is either nil, a type handled by a database driver's NamedValueChecker
+// It is either nil, a type handled by a database driver's [NamedValueChecker]
 // interface, or an instance of one of these types:
 //
 //	int64
@@ -54,10 +55,10 @@
 //	string
 //	time.Time
 //
-// If the driver supports cursors, a returned Value may also implement the Rows interface
+// If the driver supports cursors, a returned Value may also implement the [Rows] interface
 // in this package. This is used, for example, when a user selects a cursor
-// such as "select cursor(select * from my_table) from dual". If the Rows
-// from the select is closed, the cursor Rows will also be closed.
+// such as "select cursor(select * from my_table) from dual". If the [Rows]
+// from the select is closed, the cursor [Rows] will also be closed.
 type Value any
 
 // NamedValue holds both the value name and value.
@@ -78,7 +79,7 @@
 // Driver is the interface that must be implemented by a database
 // driver.
 //
-// Database drivers may implement DriverContext for access
+// Database drivers may implement [DriverContext] for access
 // to contexts and to parse the name only once for a pool of connections,
 // instead of once per connection.
 type Driver interface {
@@ -94,12 +95,12 @@
 	Open(name string) (Conn, error)
 }
 
-// If a Driver implements DriverContext, then sql.DB will call
-// OpenConnector to obtain a Connector and then invoke
-// that Connector's Connect method to obtain each needed connection,
-// instead of invoking the Driver's Open method for each connection.
+// If a [Driver] implements DriverContext, then [database/sql.DB] will call
+// OpenConnector to obtain a [Connector] and then invoke
+// that [Connector]'s Connect method to obtain each needed connection,
+// instead of invoking the [Driver]'s Open method for each connection.
 // The two-step sequence allows drivers to parse the name just once
-// and also provides access to per-Conn contexts.
+// and also provides access to per-[Conn] contexts.
 type DriverContext interface {
 	// OpenConnector must parse the name in the same format that Driver.Open
 	// parses the name parameter.
@@ -110,14 +111,14 @@
 // and can create any number of equivalent Conns for use
 // by multiple goroutines.
 //
-// A Connector can be passed to sql.OpenDB, to allow drivers
-// to implement their own sql.DB constructors, or returned by
-// DriverContext's OpenConnector method, to allow drivers
+// A Connector can be passed to [database/sql.OpenDB], to allow drivers
+// to implement their own [database/sql.DB] constructors, or returned by
+// [DriverContext]'s OpenConnector method, to allow drivers
 // access to context and to avoid repeated parsing of driver
 // configuration.
 //
-// If a Connector implements io.Closer, the sql package's DB.Close
-// method will call Close and return error (if any).
+// If a Connector implements [io.Closer], the [database/sql.DB.Close]
+// method will call the Close method and return error (if any).
 type Connector interface {
 	// Connect returns a connection to the database.
 	// Connect may return a cached connection (one previously
@@ -147,9 +148,9 @@
 // documented.
 var ErrSkip = errors.New("driver: skip fast-path; continue as if unimplemented")
 
-// ErrBadConn should be returned by a driver to signal to the sql
-// package that a driver.Conn is in a bad state (such as the server
-// having earlier closed the connection) and the sql package should
+// ErrBadConn should be returned by a driver to signal to the [database/sql]
+// package that a driver.[Conn] is in a bad state (such as the server
+// having earlier closed the connection) and the [database/sql] package should
 // retry on a new connection.
 //
 // To prevent duplicate operations, ErrBadConn should NOT be returned
@@ -157,69 +158,69 @@
 // performed the operation. Even if the server sends back an error,
 // you shouldn't return ErrBadConn.
 //
-// Errors will be checked using errors.Is. An error may
+// Errors will be checked using [errors.Is]. An error may
 // wrap ErrBadConn or implement the Is(error) bool method.
 var ErrBadConn = errors.New("driver: bad connection")
 
-// Pinger is an optional interface that may be implemented by a Conn.
+// Pinger is an optional interface that may be implemented by a [Conn].
 //
-// If a Conn does not implement Pinger, the sql package's DB.Ping and
-// DB.PingContext will check if there is at least one Conn available.
+// If a [Conn] does not implement Pinger, the [database/sql.DB.Ping] and
+// [database/sql.DB.PingContext] will check if there is at least one [Conn] available.
 //
-// If Conn.Ping returns ErrBadConn, DB.Ping and DB.PingContext will remove
-// the Conn from pool.
+// If Conn.Ping returns [ErrBadConn], [database/sql.DB.Ping] and [database/sql.DB.PingContext] will remove
+// the [Conn] from pool.
 type Pinger interface {
 	Ping(ctx context.Context) error
 }
 
-// Execer is an optional interface that may be implemented by a Conn.
+// Execer is an optional interface that may be implemented by a [Conn].
 //
-// If a Conn implements neither ExecerContext nor Execer,
-// the sql package's DB.Exec will first prepare a query, execute the statement,
+// If a [Conn] implements neither [ExecerContext] nor [Execer],
+// the [database/sql.DB.Exec] will first prepare a query, execute the statement,
 // and then close the statement.
 //
-// Exec may return ErrSkip.
+// Exec may return [ErrSkip].
 //
-// Deprecated: Drivers should implement ExecerContext instead.
+// Deprecated: Drivers should implement [ExecerContext] instead.
 type Execer interface {
 	Exec(query string, args []Value) (Result, error)
 }
 
-// ExecerContext is an optional interface that may be implemented by a Conn.
+// ExecerContext is an optional interface that may be implemented by a [Conn].
 //
-// If a Conn does not implement ExecerContext, the sql package's DB.Exec
-// will fall back to Execer; if the Conn does not implement Execer either,
-// DB.Exec will first prepare a query, execute the statement, and then
+// If a [Conn] does not implement [ExecerContext], the [database/sql.DB.Exec]
+// will fall back to [Execer]; if the Conn does not implement Execer either,
+// [database/sql.DB.Exec] will first prepare a query, execute the statement, and then
 // close the statement.
 //
-// ExecContext may return ErrSkip.
+// ExecContext may return [ErrSkip].
 //
 // ExecContext must honor the context timeout and return when the context is canceled.
 type ExecerContext interface {
 	ExecContext(ctx context.Context, query string, args []NamedValue) (Result, error)
 }
 
-// Queryer is an optional interface that may be implemented by a Conn.
+// Queryer is an optional interface that may be implemented by a [Conn].
 //
-// If a Conn implements neither QueryerContext nor Queryer,
-// the sql package's DB.Query will first prepare a query, execute the statement,
+// If a [Conn] implements neither [QueryerContext] nor [Queryer],
+// the [database/sql.DB.Query] will first prepare a query, execute the statement,
 // and then close the statement.
 //
-// Query may return ErrSkip.
+// Query may return [ErrSkip].
 //
-// Deprecated: Drivers should implement QueryerContext instead.
+// Deprecated: Drivers should implement [QueryerContext] instead.
 type Queryer interface {
 	Query(query string, args []Value) (Rows, error)
 }
 
-// QueryerContext is an optional interface that may be implemented by a Conn.
+// QueryerContext is an optional interface that may be implemented by a [Conn].
 //
-// If a Conn does not implement QueryerContext, the sql package's DB.Query
-// will fall back to Queryer; if the Conn does not implement Queryer either,
-// DB.Query will first prepare a query, execute the statement, and then
+// If a [Conn] does not implement QueryerContext, the [database/sql.DB.Query]
+// will fall back to [Queryer]; if the [Conn] does not implement [Queryer] either,
+// [database/sql.DB.Query] will first prepare a query, execute the statement, and then
 // close the statement.
 //
-// QueryContext may return ErrSkip.
+// QueryContext may return [ErrSkip].
 //
 // QueryContext must honor the context timeout and return when the context is canceled.
 type QueryerContext interface {
@@ -253,7 +254,7 @@
 	Begin() (Tx, error)
 }
 
-// ConnPrepareContext enhances the Conn interface with context.
+// ConnPrepareContext enhances the [Conn] interface with context.
 type ConnPrepareContext interface {
 	// PrepareContext returns a prepared statement, bound to this connection.
 	// context is for the preparation of the statement,
@@ -261,21 +262,21 @@
 	PrepareContext(ctx context.Context, query string) (Stmt, error)
 }
 
-// IsolationLevel is the transaction isolation level stored in TxOptions.
+// IsolationLevel is the transaction isolation level stored in [TxOptions].
 //
-// This type should be considered identical to sql.IsolationLevel along
+// This type should be considered identical to [database/sql.IsolationLevel] along
 // with any values defined on it.
 type IsolationLevel int
 
 // TxOptions holds the transaction options.
 //
-// This type should be considered identical to sql.TxOptions.
+// This type should be considered identical to [database/sql.TxOptions].
 type TxOptions struct {
 	Isolation IsolationLevel
 	ReadOnly  bool
 }
 
-// ConnBeginTx enhances the Conn interface with context and TxOptions.
+// ConnBeginTx enhances the [Conn] interface with context and [TxOptions].
 type ConnBeginTx interface {
 	// BeginTx starts and returns a new transaction.
 	// If the context is canceled by the user the sql package will
@@ -292,7 +293,7 @@
 	BeginTx(ctx context.Context, opts TxOptions) (Tx, error)
 }
 
-// SessionResetter may be implemented by Conn to allow drivers to reset the
+// SessionResetter may be implemented by [Conn] to allow drivers to reset the
 // session state associated with the connection and to signal a bad connection.
 type SessionResetter interface {
 	// ResetSession is called prior to executing a query on the connection
@@ -301,7 +302,7 @@
 	ResetSession(ctx context.Context) error
 }
 
-// Validator may be implemented by Conn to allow drivers to
+// Validator may be implemented by [Conn] to allow drivers to
 // signal if a connection is valid or if it should be discarded.
 //
 // If implemented, drivers may return the underlying error from queries,
@@ -324,7 +325,7 @@
 	RowsAffected() (int64, error)
 }
 
-// Stmt is a prepared statement. It is bound to a Conn and not
+// Stmt is a prepared statement. It is bound to a [Conn] and not
 // used by multiple goroutines concurrently.
 type Stmt interface {
 	// Close closes the statement.
@@ -360,7 +361,7 @@
 	Query(args []Value) (Rows, error)
 }
 
-// StmtExecContext enhances the Stmt interface by providing Exec with context.
+// StmtExecContext enhances the [Stmt] interface by providing Exec with context.
 type StmtExecContext interface {
 	// ExecContext executes a query that doesn't return rows, such
 	// as an INSERT or UPDATE.
@@ -369,7 +370,7 @@
 	ExecContext(ctx context.Context, args []NamedValue) (Result, error)
 }
 
-// StmtQueryContext enhances the Stmt interface by providing Query with context.
+// StmtQueryContext enhances the [Stmt] interface by providing Query with context.
 type StmtQueryContext interface {
 	// QueryContext executes a query that may return rows, such as a
 	// SELECT.
@@ -378,26 +379,26 @@
 	QueryContext(ctx context.Context, args []NamedValue) (Rows, error)
 }
 
-// ErrRemoveArgument may be returned from NamedValueChecker to instruct the
-// sql package to not pass the argument to the driver query interface.
+// ErrRemoveArgument may be returned from [NamedValueChecker] to instruct the
+// [database/sql] package to not pass the argument to the driver query interface.
 // Return when accepting query specific options or structures that aren't
 // SQL query arguments.
 var ErrRemoveArgument = errors.New("driver: remove argument from query")
 
-// NamedValueChecker may be optionally implemented by Conn or Stmt. It provides
+// NamedValueChecker may be optionally implemented by [Conn] or [Stmt]. It provides
 // the driver more control to handle Go and database types beyond the default
-// Values types allowed.
+// [Value] types allowed.
 //
-// The sql package checks for value checkers in the following order,
+// The [database/sql] package checks for value checkers in the following order,
 // stopping at the first found match: Stmt.NamedValueChecker, Conn.NamedValueChecker,
-// Stmt.ColumnConverter, DefaultParameterConverter.
+// Stmt.ColumnConverter, [DefaultParameterConverter].
 //
-// If CheckNamedValue returns ErrRemoveArgument, the NamedValue will not be included in
+// If CheckNamedValue returns [ErrRemoveArgument], the [NamedValue] will not be included in
 // the final query arguments. This may be used to pass special options to
 // the query itself.
 //
-// If ErrSkip is returned the column converter error checking
-// path is used for the argument. Drivers may wish to return ErrSkip after
+// If [ErrSkip] is returned the column converter error checking
+// path is used for the argument. Drivers may wish to return [ErrSkip] after
 // they have exhausted their own special cases.
 type NamedValueChecker interface {
 	// CheckNamedValue is called before passing arguments to the driver
@@ -406,11 +407,11 @@
 	CheckNamedValue(*NamedValue) error
 }
 
-// ColumnConverter may be optionally implemented by Stmt if the
+// ColumnConverter may be optionally implemented by [Stmt] if the
 // statement is aware of its own columns' types and can convert from
-// any type to a driver Value.
+// any type to a driver [Value].
 //
-// Deprecated: Drivers should implement NamedValueChecker.
+// Deprecated: Drivers should implement [NamedValueChecker].
 type ColumnConverter interface {
 	// ColumnConverter returns a ValueConverter for the provided
 	// column index. If the type of a specific column isn't known
@@ -442,7 +443,7 @@
 	Next(dest []Value) error
 }
 
-// RowsNextResultSet extends the Rows interface by providing a way to signal
+// RowsNextResultSet extends the [Rows] interface by providing a way to signal
 // the driver to advance to the next result set.
 type RowsNextResultSet interface {
 	Rows
@@ -458,15 +459,15 @@
 	NextResultSet() error
 }
 
-// RowsColumnTypeScanType may be implemented by Rows. It should return
+// RowsColumnTypeScanType may be implemented by [Rows]. It should return
 // the value type that can be used to scan types into. For example, the database
-// column type "bigint" this should return "reflect.TypeOf(int64(0))".
+// column type "bigint" this should return "[reflect.TypeOf](int64(0))".
 type RowsColumnTypeScanType interface {
 	Rows
 	ColumnTypeScanType(index int) reflect.Type
 }
 
-// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the
+// RowsColumnTypeDatabaseTypeName may be implemented by [Rows]. It should return the
 // database system type name without the length. Type names should be uppercase.
 // Examples of returned types: "VARCHAR", "NVARCHAR", "VARCHAR2", "CHAR", "TEXT",
 // "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML",
@@ -476,10 +477,10 @@
 	ColumnTypeDatabaseTypeName(index int) string
 }
 
-// RowsColumnTypeLength may be implemented by Rows. It should return the length
+// RowsColumnTypeLength may be implemented by [Rows]. It should return the length
 // of the column type if the column is a variable length type. If the column is
 // not a variable length type ok should return false.
-// If length is not limited other than system limits, it should return math.MaxInt64.
+// If length is not limited other than system limits, it should return [math.MaxInt64].
 // The following are examples of returned values for various types:
 //
 //	TEXT          (math.MaxInt64, true)
@@ -493,7 +494,7 @@
 	ColumnTypeLength(index int) (length int64, ok bool)
 }
 
-// RowsColumnTypeNullable may be implemented by Rows. The nullable value should
+// RowsColumnTypeNullable may be implemented by [Rows]. The nullable value should
 // be true if it is known the column may be null, or false if the column is known
 // to be not nullable.
 // If the column nullability is unknown, ok should be false.
@@ -502,7 +503,7 @@
 	ColumnTypeNullable(index int) (nullable, ok bool)
 }
 
-// RowsColumnTypePrecisionScale may be implemented by Rows. It should return
+// RowsColumnTypePrecisionScale may be implemented by [Rows]. It should return
 // the precision and scale for decimal types. If not applicable, ok should be false.
 // The following are examples of returned values for various types:
 //
@@ -520,7 +521,7 @@
 	Rollback() error
 }
 
-// RowsAffected implements Result for an INSERT or UPDATE operation
+// RowsAffected implements [Result] for an INSERT or UPDATE operation
 // which mutates a number of rows.
 type RowsAffected int64
 
@@ -534,9 +535,9 @@
 	return int64(v), nil
 }
 
-// ResultNoRows is a pre-defined Result for drivers to return when a DDL
+// ResultNoRows is a pre-defined [Result] for drivers to return when a DDL
 // command (such as a CREATE TABLE) succeeds. It returns an error for both
-// LastInsertId and RowsAffected.
+// LastInsertId and [RowsAffected].
 var ResultNoRows noRows
 
 type noRows struct{}
diff --git a/src/database/sql/driver/types.go b/src/database/sql/driver/types.go
index fa98df7..0380572 100644
--- a/src/database/sql/driver/types.go
+++ b/src/database/sql/driver/types.go
@@ -17,15 +17,15 @@
 // driver package to provide consistent implementations of conversions
 // between drivers. The ValueConverters have several uses:
 //
-//   - converting from the Value types as provided by the sql package
+//   - converting from the [Value] types as provided by the sql package
 //     into a database table's specific column type and making sure it
 //     fits, such as making sure a particular int64 fits in a
 //     table's uint16 column.
 //
 //   - converting a value as given from the database into one of the
-//     driver Value types.
+//     driver [Value] types.
 //
-//   - by the sql package, for converting from a driver's Value type
+//   - by the [database/sql] package, for converting from a driver's [Value] type
 //     to a user's type in a scan.
 type ValueConverter interface {
 	// ConvertValue converts a value to a driver Value.
@@ -35,14 +35,14 @@
 // Valuer is the interface providing the Value method.
 //
 // Types implementing Valuer interface are able to convert
-// themselves to a driver Value.
+// themselves to a driver [Value].
 type Valuer interface {
 	// Value returns a driver Value.
 	// Value must not panic.
 	Value() (Value, error)
 }
 
-// Bool is a ValueConverter that converts input values to bools.
+// Bool is a [ValueConverter] that converts input values to bool.
 //
 // The conversion rules are:
 //   - booleans are returned unchanged
@@ -50,7 +50,7 @@
 //     1 is true
 //     0 is false,
 //     other integers are an error
-//   - for strings and []byte, same rules as strconv.ParseBool
+//   - for strings and []byte, same rules as [strconv.ParseBool]
 //   - all other types are an error
 var Bool boolType
 
@@ -97,7 +97,7 @@
 	return nil, fmt.Errorf("sql/driver: couldn't convert %v (%T) into type bool", src, src)
 }
 
-// Int32 is a ValueConverter that converts input values to int64,
+// Int32 is a [ValueConverter] that converts input values to int64,
 // respecting the limits of an int32 value.
 var Int32 int32Type
 
@@ -130,7 +130,7 @@
 	return nil, fmt.Errorf("sql/driver: unsupported value %v (type %T) converting to int32", v, v)
 }
 
-// String is a ValueConverter that converts its input to a string.
+// String is a [ValueConverter] that converts its input to a string.
 // If the value is already a string or []byte, it's unchanged.
 // If the value is of another type, conversion to string is done
 // with fmt.Sprintf("%v", v).
@@ -146,8 +146,8 @@
 	return fmt.Sprintf("%v", v), nil
 }
 
-// Null is a type that implements ValueConverter by allowing nil
-// values but otherwise delegating to another ValueConverter.
+// Null is a type that implements [ValueConverter] by allowing nil
+// values but otherwise delegating to another [ValueConverter].
 type Null struct {
 	Converter ValueConverter
 }
@@ -159,8 +159,8 @@
 	return n.Converter.ConvertValue(v)
 }
 
-// NotNull is a type that implements ValueConverter by disallowing nil
-// values but otherwise delegating to another ValueConverter.
+// NotNull is a type that implements [ValueConverter] by disallowing nil
+// values but otherwise delegating to another [ValueConverter].
 type NotNull struct {
 	Converter ValueConverter
 }
@@ -172,7 +172,7 @@
 	return n.Converter.ConvertValue(v)
 }
 
-// IsValue reports whether v is a valid Value parameter type.
+// IsValue reports whether v is a valid [Value] parameter type.
 func IsValue(v any) bool {
 	if v == nil {
 		return true
@@ -186,32 +186,33 @@
 	return false
 }
 
-// IsScanValue is equivalent to IsValue.
+// IsScanValue is equivalent to [IsValue].
 // It exists for compatibility.
 func IsScanValue(v any) bool {
 	return IsValue(v)
 }
 
 // DefaultParameterConverter is the default implementation of
-// ValueConverter that's used when a Stmt doesn't implement
-// ColumnConverter.
+// [ValueConverter] that's used when a [Stmt] doesn't implement
+// [ColumnConverter].
 //
 // DefaultParameterConverter returns its argument directly if
-// IsValue(arg). Otherwise, if the argument implements Valuer, its
-// Value method is used to return a Value. As a fallback, the provided
-// argument's underlying type is used to convert it to a Value:
+// IsValue(arg). Otherwise, if the argument implements [Valuer], its
+// Value method is used to return a [Value]. As a fallback, the provided
+// argument's underlying type is used to convert it to a [Value]:
 // underlying integer types are converted to int64, floats to float64,
 // bool, string, and []byte to themselves. If the argument is a nil
-// pointer, ConvertValue returns a nil Value. If the argument is a
-// non-nil pointer, it is dereferenced and ConvertValue is called
-// recursively. Other types are an error.
+// pointer, defaultConverter.ConvertValue returns a nil [Value].
+// If the argument is a non-nil pointer, it is dereferenced and
+// defaultConverter.ConvertValue is called recursively. Other types
+// are an error.
 var DefaultParameterConverter defaultConverter
 
 type defaultConverter struct{}
 
 var _ ValueConverter = defaultConverter{}
 
-var valuerReflectType = reflect.TypeOf((*Valuer)(nil)).Elem()
+var valuerReflectType = reflect.TypeFor[Valuer]()
 
 // callValuerValue returns vr.Value(), with one exception:
 // If vr.Value is an auto-generated method on a pointer type and the
diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go
index cfeb3b3..c6c3172 100644
--- a/src/database/sql/fakedb_test.go
+++ b/src/database/sql/fakedb_test.go
@@ -1251,33 +1251,33 @@
 func colTypeToReflectType(typ string) reflect.Type {
 	switch typ {
 	case "bool":
-		return reflect.TypeOf(false)
+		return reflect.TypeFor[bool]()
 	case "nullbool":
-		return reflect.TypeOf(NullBool{})
+		return reflect.TypeFor[NullBool]()
 	case "int16":
-		return reflect.TypeOf(int16(0))
+		return reflect.TypeFor[int16]()
 	case "nullint16":
-		return reflect.TypeOf(NullInt16{})
+		return reflect.TypeFor[NullInt16]()
 	case "int32":
-		return reflect.TypeOf(int32(0))
+		return reflect.TypeFor[int32]()
 	case "nullint32":
-		return reflect.TypeOf(NullInt32{})
+		return reflect.TypeFor[NullInt32]()
 	case "string":
-		return reflect.TypeOf("")
+		return reflect.TypeFor[string]()
 	case "nullstring":
-		return reflect.TypeOf(NullString{})
+		return reflect.TypeFor[NullString]()
 	case "int64":
-		return reflect.TypeOf(int64(0))
+		return reflect.TypeFor[int64]()
 	case "nullint64":
-		return reflect.TypeOf(NullInt64{})
+		return reflect.TypeFor[NullInt64]()
 	case "float64":
-		return reflect.TypeOf(float64(0))
+		return reflect.TypeFor[float64]()
 	case "nullfloat64":
-		return reflect.TypeOf(NullFloat64{})
+		return reflect.TypeFor[NullFloat64]()
 	case "datetime":
-		return reflect.TypeOf(time.Time{})
+		return reflect.TypeFor[time.Time]()
 	case "any":
-		return reflect.TypeOf(new(any)).Elem()
+		return reflect.TypeFor[any]()
 	}
 	panic("invalid fakedb column type of " + typ)
 }
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index 836fe83..4f1197d 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -73,11 +73,11 @@
 }
 
 // A NamedArg is a named argument. NamedArg values may be used as
-// arguments to Query or Exec and bind to the corresponding named
+// arguments to [DB.Query] or [DB.Exec] and bind to the corresponding named
 // parameter in the SQL statement.
 //
 // For a more concise way to create NamedArg values, see
-// the Named function.
+// the [Named] function.
 type NamedArg struct {
 	_NamedFieldsRequired struct{}
 
@@ -95,7 +95,7 @@
 	Value any
 }
 
-// Named provides a more concise way to create NamedArg values.
+// Named provides a more concise way to create [NamedArg] values.
 //
 // Example usage:
 //
@@ -115,10 +115,10 @@
 	return NamedArg{Name: name, Value: value}
 }
 
-// IsolationLevel is the transaction isolation level used in TxOptions.
+// IsolationLevel is the transaction isolation level used in [TxOptions].
 type IsolationLevel int
 
-// Various isolation levels that drivers may support in BeginTx.
+// Various isolation levels that drivers may support in [DB.BeginTx].
 // If a driver does not support a given isolation level an error may be returned.
 //
 // See https://en.wikipedia.org/wiki/Isolation_(database_systems)#Isolation_levels.
@@ -159,7 +159,7 @@
 
 var _ fmt.Stringer = LevelDefault
 
-// TxOptions holds the transaction options to be used in DB.BeginTx.
+// TxOptions holds the transaction options to be used in [DB.BeginTx].
 type TxOptions struct {
 	// Isolation is the transaction isolation level.
 	// If zero, the driver or database's default level is used.
@@ -168,12 +168,12 @@
 }
 
 // RawBytes is a byte slice that holds a reference to memory owned by
-// the database itself. After a Scan into a RawBytes, the slice is only
-// valid until the next call to Next, Scan, or Close.
+// the database itself. After a [Rows.Scan] into a RawBytes, the slice is only
+// valid until the next call to [Rows.Next], [Rows.Scan], or [Rows.Close].
 type RawBytes []byte
 
 // NullString represents a string that may be null.
-// NullString implements the Scanner interface so
+// NullString implements the [Scanner] interface so
 // it can be used as a scan destination:
 //
 //	var s NullString
@@ -189,7 +189,7 @@
 	Valid  bool // Valid is true if String is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (ns *NullString) Scan(value any) error {
 	if value == nil {
 		ns.String, ns.Valid = "", false
@@ -199,7 +199,7 @@
 	return convertAssign(&ns.String, value)
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (ns NullString) Value() (driver.Value, error) {
 	if !ns.Valid {
 		return nil, nil
@@ -208,14 +208,14 @@
 }
 
 // NullInt64 represents an int64 that may be null.
-// NullInt64 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullInt64 implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullInt64 struct {
 	Int64 int64
 	Valid bool // Valid is true if Int64 is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullInt64) Scan(value any) error {
 	if value == nil {
 		n.Int64, n.Valid = 0, false
@@ -225,7 +225,7 @@
 	return convertAssign(&n.Int64, value)
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullInt64) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -234,14 +234,14 @@
 }
 
 // NullInt32 represents an int32 that may be null.
-// NullInt32 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullInt32 implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullInt32 struct {
 	Int32 int32
 	Valid bool // Valid is true if Int32 is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullInt32) Scan(value any) error {
 	if value == nil {
 		n.Int32, n.Valid = 0, false
@@ -251,7 +251,7 @@
 	return convertAssign(&n.Int32, value)
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullInt32) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -260,14 +260,14 @@
 }
 
 // NullInt16 represents an int16 that may be null.
-// NullInt16 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullInt16 implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullInt16 struct {
 	Int16 int16
 	Valid bool // Valid is true if Int16 is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullInt16) Scan(value any) error {
 	if value == nil {
 		n.Int16, n.Valid = 0, false
@@ -278,7 +278,7 @@
 	return err
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullInt16) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -287,14 +287,14 @@
 }
 
 // NullByte represents a byte that may be null.
-// NullByte implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullByte implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullByte struct {
 	Byte  byte
 	Valid bool // Valid is true if Byte is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullByte) Scan(value any) error {
 	if value == nil {
 		n.Byte, n.Valid = 0, false
@@ -305,7 +305,7 @@
 	return err
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullByte) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -314,14 +314,14 @@
 }
 
 // NullFloat64 represents a float64 that may be null.
-// NullFloat64 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullFloat64 implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullFloat64 struct {
 	Float64 float64
 	Valid   bool // Valid is true if Float64 is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullFloat64) Scan(value any) error {
 	if value == nil {
 		n.Float64, n.Valid = 0, false
@@ -331,7 +331,7 @@
 	return convertAssign(&n.Float64, value)
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullFloat64) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -340,14 +340,14 @@
 }
 
 // NullBool represents a bool that may be null.
-// NullBool implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullBool implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullBool struct {
 	Bool  bool
 	Valid bool // Valid is true if Bool is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullBool) Scan(value any) error {
 	if value == nil {
 		n.Bool, n.Valid = false, false
@@ -357,7 +357,7 @@
 	return convertAssign(&n.Bool, value)
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullBool) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -365,15 +365,15 @@
 	return n.Bool, nil
 }
 
-// NullTime represents a time.Time that may be null.
-// NullTime implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
+// NullTime represents a [time.Time] that may be null.
+// NullTime implements the [Scanner] interface so
+// it can be used as a scan destination, similar to [NullString].
 type NullTime struct {
 	Time  time.Time
 	Valid bool // Valid is true if Time is not NULL
 }
 
-// Scan implements the Scanner interface.
+// Scan implements the [Scanner] interface.
 func (n *NullTime) Scan(value any) error {
 	if value == nil {
 		n.Time, n.Valid = time.Time{}, false
@@ -383,7 +383,7 @@
 	return convertAssign(&n.Time, value)
 }
 
-// Value implements the driver Valuer interface.
+// Value implements the [driver.Valuer] interface.
 func (n NullTime) Value() (driver.Value, error) {
 	if !n.Valid {
 		return nil, nil
@@ -391,7 +391,40 @@
 	return n.Time, nil
 }
 
-// Scanner is an interface used by Scan.
+// Null represents a value that may be null.
+// Null implements the [Scanner] interface so
+// it can be used as a scan destination:
+//
+//	var s Null[string]
+//	err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
+//	...
+//	if s.Valid {
+//	   // use s.V
+//	} else {
+//	   // NULL value
+//	}
+type Null[T any] struct {
+	V     T
+	Valid bool
+}
+
+func (n *Null[T]) Scan(value any) error {
+	if value == nil {
+		n.V, n.Valid = *new(T), false
+		return nil
+	}
+	n.Valid = true
+	return convertAssign(&n.V, value)
+}
+
+func (n Null[T]) Value() (driver.Value, error) {
+	if !n.Valid {
+		return nil, nil
+	}
+	return n.V, nil
+}
+
+// Scanner is an interface used by [Rows.Scan].
 type Scanner interface {
 	// Scan assigns a value from a database driver.
 	//
@@ -435,8 +468,8 @@
 	In bool
 }
 
-// ErrNoRows is returned by Scan when QueryRow doesn't return a
-// row. In such a case, QueryRow returns a placeholder *Row value that
+// ErrNoRows is returned by [Row.Scan] when [DB.QueryRow] doesn't return a
+// row. In such a case, QueryRow returns a placeholder [*Row] value that
 // defers this error until a Scan.
 var ErrNoRows = errors.New("sql: no rows in result set")
 
@@ -447,11 +480,11 @@
 // The sql package creates and frees connections automatically; it
 // also maintains a free pool of idle connections. If the database has
 // a concept of per-connection state, such state can be reliably observed
-// within a transaction (Tx) or connection (Conn). Once DB.Begin is called, the
-// returned Tx is bound to a single connection. Once Commit or
-// Rollback is called on the transaction, that transaction's
-// connection is returned to DB's idle connection pool. The pool size
-// can be controlled with SetMaxIdleConns.
+// within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the
+// returned [Tx] is bound to a single connection. Once [Tx.Commit] or
+// [Tx.Rollback] is called on the transaction, that transaction's
+// connection is returned to [DB]'s idle connection pool. The pool size
+// can be controlled with [DB.SetMaxIdleConns].
 type DB struct {
 	// Total time waited for new connections.
 	waitDuration atomic.Int64
@@ -744,7 +777,7 @@
 
 // This is the size of the connectionOpener request chan (DB.openerCh).
 // This value should be larger than the maximum typical value
-// used for db.maxOpen. If maxOpen is significantly larger than
+// used for DB.maxOpen. If maxOpen is significantly larger than
 // connectionRequestQueueSize then it is possible for ALL calls into the *DB
 // to block until the connectionOpener can satisfy the backlog of requests.
 var connectionRequestQueueSize = 1000000
@@ -762,22 +795,22 @@
 	return t.driver
 }
 
-// OpenDB opens a database using a Connector, allowing drivers to
+// OpenDB opens a database using a [driver.Connector], allowing drivers to
 // bypass a string based data source name.
 //
 // Most users will open a database via a driver-specific connection
-// helper function that returns a *DB. No database drivers are included
+// helper function that returns a [*DB]. No database drivers are included
 // in the Go standard library. See https://golang.org/s/sqldrivers for
 // a list of third-party drivers.
 //
 // OpenDB may just validate its arguments without creating a connection
 // to the database. To verify that the data source name is valid, call
-// Ping.
+// [DB.Ping].
 //
-// The returned DB is safe for concurrent use by multiple goroutines
+// The returned [DB] is safe for concurrent use by multiple goroutines
 // and maintains its own pool of idle connections. Thus, the OpenDB
 // function should be called just once. It is rarely necessary to
-// close a DB.
+// close a [DB].
 func OpenDB(c driver.Connector) *DB {
 	ctx, cancel := context.WithCancel(context.Background())
 	db := &DB{
@@ -798,18 +831,18 @@
 // database name and connection information.
 //
 // Most users will open a database via a driver-specific connection
-// helper function that returns a *DB. No database drivers are included
+// helper function that returns a [*DB]. No database drivers are included
 // in the Go standard library. See https://golang.org/s/sqldrivers for
 // a list of third-party drivers.
 //
 // Open may just validate its arguments without creating a connection
 // to the database. To verify that the data source name is valid, call
-// Ping.
+// [DB.Ping].
 //
-// The returned DB is safe for concurrent use by multiple goroutines
+// The returned [DB] is safe for concurrent use by multiple goroutines
 // and maintains its own pool of idle connections. Thus, the Open
 // function should be called just once. It is rarely necessary to
-// close a DB.
+// close a [DB].
 func Open(driverName, dataSourceName string) (*DB, error) {
 	driversMu.RLock()
 	driveri, ok := drivers[driverName]
@@ -861,8 +894,8 @@
 // Ping verifies a connection to the database is still alive,
 // establishing a connection if necessary.
 //
-// Ping uses context.Background internally; to specify the context, use
-// PingContext.
+// Ping uses [context.Background] internally; to specify the context, use
+// [DB.PingContext].
 func (db *DB) Ping() error {
 	return db.PingContext(context.Background())
 }
@@ -871,7 +904,7 @@
 // Close then waits for all queries that have started processing on the server
 // to finish.
 //
-// It is rare to Close a DB, as the DB handle is meant to be
+// It is rare to Close a [DB], as the [DB] handle is meant to be
 // long-lived and shared between many goroutines.
 func (db *DB) Close() error {
 	db.mu.Lock()
@@ -931,12 +964,7 @@
 	if db.maxLifetime <= 0 {
 		return db.maxIdleTime
 	}
-
-	min := db.maxIdleTime
-	if min > db.maxLifetime {
-		min = db.maxLifetime
-	}
-	return min
+	return min(db.maxIdleTime, db.maxLifetime)
 }
 
 // SetMaxIdleConns sets the maximum number of connections in the idle
@@ -1548,7 +1576,7 @@
 // PrepareContext creates a prepared statement for later queries or executions.
 // Multiple queries or executions may be run concurrently from the
 // returned statement.
-// The caller must call the statement's Close method
+// The caller must call the statement's [*Stmt.Close] method
 // when the statement is no longer needed.
 //
 // The provided context is used for the preparation of the statement, not for the
@@ -1568,11 +1596,11 @@
 // Prepare creates a prepared statement for later queries or executions.
 // Multiple queries or executions may be run concurrently from the
 // returned statement.
-// The caller must call the statement's Close method
+// The caller must call the statement's [*Stmt.Close] method
 // when the statement is no longer needed.
 //
-// Prepare uses context.Background internally; to specify the context, use
-// PrepareContext.
+// Prepare uses [context.Background] internally; to specify the context, use
+// [DB.PrepareContext].
 func (db *DB) Prepare(query string) (*Stmt, error) {
 	return db.PrepareContext(context.Background(), query)
 }
@@ -1641,8 +1669,8 @@
 // Exec executes a query without returning any rows.
 // The args are for any placeholder parameters in the query.
 //
-// Exec uses context.Background internally; to specify the context, use
-// ExecContext.
+// Exec uses [context.Background] internally; to specify the context, use
+// [DB.ExecContext].
 func (db *DB) Exec(query string, args ...any) (Result, error) {
 	return db.ExecContext(context.Background(), query, args...)
 }
@@ -1711,8 +1739,8 @@
 // Query executes a query that returns rows, typically a SELECT.
 // The args are for any placeholder parameters in the query.
 //
-// Query uses context.Background internally; to specify the context, use
-// QueryContext.
+// Query uses [context.Background] internally; to specify the context, use
+// [DB.QueryContext].
 func (db *DB) Query(query string, args ...any) (*Rows, error) {
 	return db.QueryContext(context.Background(), query, args...)
 }
@@ -1796,9 +1824,9 @@
 
 // QueryRowContext executes a query that is expected to return at most one row.
 // QueryRowContext always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// [Row]'s Scan method is called.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, [*Row.Scan] scans the first selected row and discards
 // the rest.
 func (db *DB) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
 	rows, err := db.QueryContext(ctx, query, args...)
@@ -1807,13 +1835,13 @@
 
 // QueryRow executes a query that is expected to return at most one row.
 // QueryRow always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// [Row]'s Scan method is called.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, [*Row.Scan] scans the first selected row and discards
 // the rest.
 //
-// QueryRow uses context.Background internally; to specify the context, use
-// QueryRowContext.
+// QueryRow uses [context.Background] internally; to specify the context, use
+// [DB.QueryRowContext].
 func (db *DB) QueryRow(query string, args ...any) *Row {
 	return db.QueryRowContext(context.Background(), query, args...)
 }
@@ -1822,10 +1850,10 @@
 //
 // The provided context is used until the transaction is committed or rolled back.
 // If the context is canceled, the sql package will roll back
-// the transaction. Tx.Commit will return an error if the context provided to
+// the transaction. [Tx.Commit] will return an error if the context provided to
 // BeginTx is canceled.
 //
-// The provided TxOptions is optional and may be nil if defaults should be used.
+// The provided [TxOptions] is optional and may be nil if defaults should be used.
 // If a non-default isolation level is used that the driver doesn't support,
 // an error will be returned.
 func (db *DB) BeginTx(ctx context.Context, opts *TxOptions) (*Tx, error) {
@@ -1843,8 +1871,8 @@
 // Begin starts a transaction. The default isolation level is dependent on
 // the driver.
 //
-// Begin uses context.Background internally; to specify the context, use
-// BeginTx.
+// Begin uses [context.Background] internally; to specify the context, use
+// [DB.BeginTx].
 func (db *DB) Begin() (*Tx, error) {
 	return db.BeginTx(context.Background(), nil)
 }
@@ -1903,7 +1931,7 @@
 // Queries run on the same Conn will be run in the same database session.
 //
 // Every Conn must be returned to the database pool after use by
-// calling Conn.Close.
+// calling [Conn.Close].
 func (db *DB) Conn(ctx context.Context) (*Conn, error) {
 	var dc *driverConn
 	var err error
@@ -1927,14 +1955,14 @@
 type releaseConn func(error)
 
 // Conn represents a single database connection rather than a pool of database
-// connections. Prefer running queries from DB unless there is a specific
+// connections. Prefer running queries from [DB] unless there is a specific
 // need for a continuous single database connection.
 //
-// A Conn must call Close to return the connection to the database pool
+// A Conn must call [Conn.Close] to return the connection to the database pool
 // and may do so concurrently with a running query.
 //
-// After a call to Close, all operations on the
-// connection fail with ErrConnDone.
+// After a call to [Conn.Close], all operations on the
+// connection fail with [ErrConnDone].
 type Conn struct {
 	db *DB
 
@@ -1951,9 +1979,9 @@
 	// Once done, all operations fail with ErrConnDone.
 	done atomic.Bool
 
-	// releaseConn is a cache of c.closemuRUnlockCondReleaseConn
+	releaseConnOnce sync.Once
+	// releaseConnCache is a cache of c.closemuRUnlockCondReleaseConn
 	// to save allocations in a call to grabConn.
-	releaseConnOnce  sync.Once
 	releaseConnCache releaseConn
 }
 
@@ -2001,9 +2029,9 @@
 
 // QueryRowContext executes a query that is expected to return at most one row.
 // QueryRowContext always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// the [*Row.Scan] method is called.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, the [*Row.Scan] scans the first selected row and discards
 // the rest.
 func (c *Conn) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
 	rows, err := c.QueryContext(ctx, query, args...)
@@ -2013,7 +2041,7 @@
 // PrepareContext creates a prepared statement for later queries or executions.
 // Multiple queries or executions may be run concurrently from the
 // returned statement.
-// The caller must call the statement's Close method
+// The caller must call the statement's [*Stmt.Close] method
 // when the statement is no longer needed.
 //
 // The provided context is used for the preparation of the statement, not for the
@@ -2029,8 +2057,8 @@
 // Raw executes f exposing the underlying driver connection for the
 // duration of f. The driverConn must not be used outside of f.
 //
-// Once f returns and err is not driver.ErrBadConn, the Conn will continue to be usable
-// until Conn.Close is called.
+// Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable
+// until [Conn.Close] is called.
 func (c *Conn) Raw(f func(driverConn any) error) (err error) {
 	var dc *driverConn
 	var release releaseConn
@@ -2063,10 +2091,10 @@
 //
 // The provided context is used until the transaction is committed or rolled back.
 // If the context is canceled, the sql package will roll back
-// the transaction. Tx.Commit will return an error if the context provided to
+// the transaction. [Tx.Commit] will return an error if the context provided to
 // BeginTx is canceled.
 //
-// The provided TxOptions is optional and may be nil if defaults should be used.
+// The provided [TxOptions] is optional and may be nil if defaults should be used.
 // If a non-default isolation level is used that the driver doesn't support,
 // an error will be returned.
 func (c *Conn) BeginTx(ctx context.Context, opts *TxOptions) (*Tx, error) {
@@ -2107,7 +2135,7 @@
 }
 
 // Close returns the connection to the connection pool.
-// All operations after a Close will return with ErrConnDone.
+// All operations after a Close will return with [ErrConnDone].
 // Close is safe to call concurrently with other operations and will
 // block until all other operations finish. It may be useful to first
 // cancel any used context and then call close directly after.
@@ -2117,14 +2145,14 @@
 
 // Tx is an in-progress database transaction.
 //
-// A transaction must end with a call to Commit or Rollback.
+// A transaction must end with a call to [Tx.Commit] or [Tx.Rollback].
 //
-// After a call to Commit or Rollback, all operations on the
-// transaction fail with ErrTxDone.
+// After a call to [Tx.Commit] or [Tx.Rollback], all operations on the
+// transaction fail with [ErrTxDone].
 //
 // The statements prepared for a transaction by calling
-// the transaction's Prepare or Stmt methods are closed
-// by the call to Commit or Rollback.
+// the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed
+// by the call to [Tx.Commit] or [Tx.Rollback].
 type Tx struct {
 	db *DB
 
@@ -2229,7 +2257,7 @@
 }
 
 // closemuRUnlockRelease is used as a func(error) method value in
-// ExecContext and QueryContext. Unlocking in the releaseConn keeps
+// [DB.ExecContext] and [DB.QueryContext]. Unlocking in the releaseConn keeps
 // the driver conn from being returned to the connection pool until
 // the Rows has been closed.
 func (tx *Tx) closemuRUnlockRelease(error) {
@@ -2326,7 +2354,7 @@
 // The returned statement operates within the transaction and will be closed
 // when the transaction has been committed or rolled back.
 //
-// To use an existing prepared statement on this transaction, see Tx.Stmt.
+// To use an existing prepared statement on this transaction, see [Tx.Stmt].
 //
 // The provided context will be used for the preparation of the context, not
 // for the execution of the returned statement. The returned statement
@@ -2352,10 +2380,10 @@
 // The returned statement operates within the transaction and will be closed
 // when the transaction has been committed or rolled back.
 //
-// To use an existing prepared statement on this transaction, see Tx.Stmt.
+// To use an existing prepared statement on this transaction, see [Tx.Stmt].
 //
-// Prepare uses context.Background internally; to specify the context, use
-// PrepareContext.
+// Prepare uses [context.Background] internally; to specify the context, use
+// [Tx.PrepareContext].
 func (tx *Tx) Prepare(query string) (*Stmt, error) {
 	return tx.PrepareContext(context.Background(), query)
 }
@@ -2462,8 +2490,8 @@
 // The returned statement operates within the transaction and will be closed
 // when the transaction has been committed or rolled back.
 //
-// Stmt uses context.Background internally; to specify the context, use
-// StmtContext.
+// Stmt uses [context.Background] internally; to specify the context, use
+// [Tx.StmtContext].
 func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
 	return tx.StmtContext(context.Background(), stmt)
 }
@@ -2481,8 +2509,8 @@
 // Exec executes a query that doesn't return rows.
 // For example: an INSERT and UPDATE.
 //
-// Exec uses context.Background internally; to specify the context, use
-// ExecContext.
+// Exec uses [context.Background] internally; to specify the context, use
+// [Tx.ExecContext].
 func (tx *Tx) Exec(query string, args ...any) (Result, error) {
 	return tx.ExecContext(context.Background(), query, args...)
 }
@@ -2499,17 +2527,17 @@
 
 // Query executes a query that returns rows, typically a SELECT.
 //
-// Query uses context.Background internally; to specify the context, use
-// QueryContext.
+// Query uses [context.Background] internally; to specify the context, use
+// [Tx.QueryContext].
 func (tx *Tx) Query(query string, args ...any) (*Rows, error) {
 	return tx.QueryContext(context.Background(), query, args...)
 }
 
 // QueryRowContext executes a query that is expected to return at most one row.
 // QueryRowContext always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// [Row]'s Scan method is called.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, the [*Row.Scan] scans the first selected row and discards
 // the rest.
 func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
 	rows, err := tx.QueryContext(ctx, query, args...)
@@ -2518,13 +2546,13 @@
 
 // QueryRow executes a query that is expected to return at most one row.
 // QueryRow always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// [Row]'s Scan method is called.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, the [*Row.Scan] scans the first selected row and discards
 // the rest.
 //
-// QueryRow uses context.Background internally; to specify the context, use
-// QueryRowContext.
+// QueryRow uses [context.Background] internally; to specify the context, use
+// [Tx.QueryRowContext].
 func (tx *Tx) QueryRow(query string, args ...any) *Row {
 	return tx.QueryRowContext(context.Background(), query, args...)
 }
@@ -2556,11 +2584,11 @@
 // Stmt is a prepared statement.
 // A Stmt is safe for concurrent use by multiple goroutines.
 //
-// If a Stmt is prepared on a Tx or Conn, it will be bound to a single
-// underlying connection forever. If the Tx or Conn closes, the Stmt will
+// If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single
+// underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will
 // become unusable and all operations will return an error.
-// If a Stmt is prepared on a DB, it will remain usable for the lifetime of the
-// DB. When the Stmt needs to execute on a new underlying connection, it will
+// If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the
+// [DB]. When the Stmt needs to execute on a new underlying connection, it will
 // prepare itself on the new connection automatically.
 type Stmt struct {
 	// Immutable:
@@ -2601,7 +2629,7 @@
 }
 
 // ExecContext executes a prepared statement with the given arguments and
-// returns a Result summarizing the effect of the statement.
+// returns a [Result] summarizing the effect of the statement.
 func (s *Stmt) ExecContext(ctx context.Context, args ...any) (Result, error) {
 	s.closemu.RLock()
 	defer s.closemu.RUnlock()
@@ -2622,10 +2650,10 @@
 }
 
 // Exec executes a prepared statement with the given arguments and
-// returns a Result summarizing the effect of the statement.
+// returns a [Result] summarizing the effect of the statement.
 //
-// Exec uses context.Background internally; to specify the context, use
-// ExecContext.
+// Exec uses [context.Background] internally; to specify the context, use
+// [Stmt.ExecContext].
 func (s *Stmt) Exec(args ...any) (Result, error) {
 	return s.ExecContext(context.Background(), args...)
 }
@@ -2741,7 +2769,7 @@
 }
 
 // QueryContext executes a prepared query statement with the given arguments
-// and returns the query results as a *Rows.
+// and returns the query results as a [*Rows].
 func (s *Stmt) QueryContext(ctx context.Context, args ...any) (*Rows, error) {
 	s.closemu.RLock()
 	defer s.closemu.RUnlock()
@@ -2792,8 +2820,8 @@
 // Query executes a prepared query statement with the given arguments
 // and returns the query results as a *Rows.
 //
-// Query uses context.Background internally; to specify the context, use
-// QueryContext.
+// Query uses [context.Background] internally; to specify the context, use
+// [Stmt.QueryContext].
 func (s *Stmt) Query(args ...any) (*Rows, error) {
 	return s.QueryContext(context.Background(), args...)
 }
@@ -2810,9 +2838,9 @@
 
 // QueryRowContext executes a prepared query statement with the given arguments.
 // If an error occurs during the execution of the statement, that error will
-// be returned by a call to Scan on the returned *Row, which is always non-nil.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// be returned by a call to Scan on the returned [*Row], which is always non-nil.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, the [*Row.Scan] scans the first selected row and discards
 // the rest.
 func (s *Stmt) QueryRowContext(ctx context.Context, args ...any) *Row {
 	rows, err := s.QueryContext(ctx, args...)
@@ -2824,9 +2852,9 @@
 
 // QueryRow executes a prepared query statement with the given arguments.
 // If an error occurs during the execution of the statement, that error will
-// be returned by a call to Scan on the returned *Row, which is always non-nil.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
+// be returned by a call to Scan on the returned [*Row], which is always non-nil.
+// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
+// Otherwise, the [*Row.Scan] scans the first selected row and discards
 // the rest.
 //
 // Example usage:
@@ -2834,8 +2862,8 @@
 //	var name string
 //	err := nameByUseridStmt.QueryRow(id).Scan(&name)
 //
-// QueryRow uses context.Background internally; to specify the context, use
-// QueryRowContext.
+// QueryRow uses [context.Background] internally; to specify the context, use
+// [Stmt.QueryRowContext].
 func (s *Stmt) QueryRow(args ...any) *Row {
 	return s.QueryRowContext(context.Background(), args...)
 }
@@ -2885,7 +2913,7 @@
 }
 
 // Rows is the result of a query. Its cursor starts before the first row
-// of the result set. Use Next to advance from row to row.
+// of the result set. Use [Rows.Next] to advance from row to row.
 type Rows struct {
 	dc          *driverConn // owned; must call releaseConn when closed to release
 	releaseConn func(error)
@@ -2973,12 +3001,12 @@
 	rs.close(ctx.Err())
 }
 
-// Next prepares the next result row for reading with the Scan method. It
+// Next prepares the next result row for reading with the [Rows.Scan] method. It
 // returns true on success, or false if there is no next result row or an error
-// happened while preparing it. Err should be consulted to distinguish between
+// happened while preparing it. [Rows.Err] should be consulted to distinguish between
 // the two cases.
 //
-// Every call to Scan, even the first one, must be preceded by a call to Next.
+// Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next].
 func (rs *Rows) Next() bool {
 	// If the user's calling Next, they're done with their previous row's Scan
 	// results (any RawBytes memory), so we can release the read lock that would
@@ -3039,10 +3067,10 @@
 
 // NextResultSet prepares the next result set for reading. It reports whether
 // there is further result sets, or false if there is no further result set
-// or if there is an error advancing to it. The Err method should be consulted
+// or if there is an error advancing to it. The [Rows.Err] method should be consulted
 // to distinguish between the two cases.
 //
-// After calling NextResultSet, the Next method should always be called before
+// After calling NextResultSet, the [Rows.Next] method should always be called before
 // scanning. If there are further result sets they may not have rows in the result
 // set.
 func (rs *Rows) NextResultSet() bool {
@@ -3085,7 +3113,7 @@
 }
 
 // Err returns the error, if any, that was encountered during iteration.
-// Err may be called after an explicit or implicit Close.
+// Err may be called after an explicit or implicit [Rows.Close].
 func (rs *Rows) Err() error {
 	// Return any context error that might've happened during row iteration,
 	// but only if we haven't reported the final Next() = false after rows
@@ -3162,7 +3190,7 @@
 
 // Length returns the column type length for variable length column types such
 // as text and binary field types. If the type length is unbounded the value will
-// be math.MaxInt64 (any database limits will still apply).
+// be [math.MaxInt64] (any database limits will still apply).
 // If the column type is not variable length, such as an int, or if not supported
 // by the driver ok is false.
 func (ci *ColumnType) Length() (length int64, ok bool) {
@@ -3175,7 +3203,7 @@
 	return ci.precision, ci.scale, ci.hasPrecisionScale
 }
 
-// ScanType returns a Go type suitable for scanning into using Rows.Scan.
+// ScanType returns a Go type suitable for scanning into using [Rows.Scan].
 // If a driver does not support this property ScanType will return
 // the type of an empty interface.
 func (ci *ColumnType) ScanType() reflect.Type {
@@ -3190,7 +3218,7 @@
 
 // DatabaseTypeName returns the database system name of the column type. If an empty
 // string is returned, then the driver type name is not supported.
-// Consult your driver documentation for a list of driver data types. Length specifiers
+// Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers
 // are not included.
 // Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL",
 // "INT", and "BIGINT".
@@ -3211,7 +3239,7 @@
 		if prop, ok := rowsi.(driver.RowsColumnTypeScanType); ok {
 			ci.scanType = prop.ColumnTypeScanType(i)
 		} else {
-			ci.scanType = reflect.TypeOf(new(any)).Elem()
+			ci.scanType = reflect.TypeFor[any]()
 		}
 		if prop, ok := rowsi.(driver.RowsColumnTypeDatabaseTypeName); ok {
 			ci.databaseType = prop.ColumnTypeDatabaseTypeName(i)
@@ -3231,7 +3259,7 @@
 
 // Scan copies the columns in the current row into the values pointed
 // at by dest. The number of values in dest must be the same as the
-// number of columns in Rows.
+// number of columns in [Rows].
 //
 // Scan converts columns read from the database into the following
 // common Go types and special types provided by the sql package:
@@ -3264,30 +3292,30 @@
 // If a dest argument has type *[]byte, Scan saves in that argument a
 // copy of the corresponding data. The copy is owned by the caller and
 // can be modified and held indefinitely. The copy can be avoided by
-// using an argument of type *RawBytes instead; see the documentation
-// for RawBytes for restrictions on its use.
+// using an argument of type [*RawBytes] instead; see the documentation
+// for [RawBytes] for restrictions on its use.
 //
 // If an argument has type *interface{}, Scan copies the value
 // provided by the underlying driver without conversion. When scanning
 // from a source value of type []byte to *interface{}, a copy of the
 // slice is made and the caller owns the result.
 //
-// Source values of type time.Time may be scanned into values of type
+// Source values of type [time.Time] may be scanned into values of type
 // *time.Time, *interface{}, *string, or *[]byte. When converting to
-// the latter two, time.RFC3339Nano is used.
+// the latter two, [time.RFC3339Nano] is used.
 //
 // Source values of type bool may be scanned into types *bool,
-// *interface{}, *string, *[]byte, or *RawBytes.
+// *interface{}, *string, *[]byte, or [*RawBytes].
 //
 // For scanning into *bool, the source may be true, false, 1, 0, or
-// string inputs parseable by strconv.ParseBool.
+// string inputs parseable by [strconv.ParseBool].
 //
 // Scan can also convert a cursor returned from a query, such as
 // "select cursor(select * from my_table) from dual", into a
-// *Rows value that can itself be scanned from. The parent
-// select query will close any cursor *Rows if the parent *Rows is closed.
+// [*Rows] value that can itself be scanned from. The parent
+// select query will close any cursor [*Rows] if the parent [*Rows] is closed.
 //
-// If any of the first arguments implementing Scanner returns an error,
+// If any of the first arguments implementing [Scanner] returns an error,
 // that error will be wrapped in the returned error.
 func (rs *Rows) Scan(dest ...any) error {
 	if rs.closemuScanHold {
@@ -3354,10 +3382,10 @@
 // hook through a test only mutex.
 var rowsCloseHook = func() func(*Rows, *error) { return nil }
 
-// Close closes the Rows, preventing further enumeration. If Next is called
+// Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called
 // and returns false and there are no further result sets,
-// the Rows are closed automatically and it will suffice to check the
-// result of Err. Close is idempotent and does not affect the result of Err.
+// the [Rows] are closed automatically and it will suffice to check the
+// result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err].
 func (rs *Rows) Close() error {
 	// If the user's calling Close, they're done with their previous row's Scan
 	// results (any RawBytes memory), so we can release the read lock that would
@@ -3399,7 +3427,7 @@
 	return err
 }
 
-// Row is the result of calling QueryRow to select a single row.
+// Row is the result of calling [DB.QueryRow] to select a single row.
 type Row struct {
 	// One of these two will be non-nil:
 	err  error // deferred error for easy chaining
@@ -3407,10 +3435,10 @@
 }
 
 // Scan copies the columns from the matched row into the values
-// pointed at by dest. See the documentation on Rows.Scan for details.
+// pointed at by dest. See the documentation on [Rows.Scan] for details.
 // If more than one row matches the query,
 // Scan uses the first row and discards the rest. If no row matches
-// the query, Scan returns ErrNoRows.
+// the query, Scan returns [ErrNoRows].
 func (r *Row) Scan(dest ...any) error {
 	if r.err != nil {
 		return r.err
@@ -3451,9 +3479,9 @@
 }
 
 // Err provides a way for wrapping packages to check for
-// query errors without calling Scan.
+// query errors without calling [Row.Scan].
 // Err returns the error, if any, that was encountered while running the query.
-// If this error is not nil, this error will also be returned from Scan.
+// If this error is not nil, this error will also be returned from [Row.Scan].
 func (r *Row) Err() error {
 	return r.err
 }
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index e6a5cd9..c38a348 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -1803,6 +1803,18 @@
 	nullTestRun(t, spec)
 }
 
+func TestGenericNullStringParam(t *testing.T) {
+	spec := nullTestSpec{"nullstring", "string", [6]nullTestRow{
+		{Null[string]{"aqua", true}, "", Null[string]{"aqua", true}},
+		{Null[string]{"brown", false}, "", Null[string]{"", false}},
+		{"chartreuse", "", Null[string]{"chartreuse", true}},
+		{Null[string]{"darkred", true}, "", Null[string]{"darkred", true}},
+		{Null[string]{"eel", false}, "", Null[string]{"", false}},
+		{"foo", Null[string]{"black", false}, nil},
+	}}
+	nullTestRun(t, spec)
+}
+
 func TestNullInt64Param(t *testing.T) {
 	spec := nullTestSpec{"nullint64", "int64", [6]nullTestRow{
 		{NullInt64{31, true}, 1, NullInt64{31, true}},
@@ -1916,8 +1928,9 @@
 	}
 
 	// Can't put null val into non-null col
-	if _, err := stmt.Exec(6, "bob", spec.rows[5].nullParam, spec.rows[5].notNullParam); err == nil {
-		t.Errorf("expected error inserting nil val with prepared statement Exec")
+	row5 := spec.rows[5]
+	if _, err := stmt.Exec(6, "bob", row5.nullParam, row5.notNullParam); err == nil {
+		t.Errorf("expected error inserting nil val with prepared statement Exec: NULL=%#v, NOT-NULL=%#v", row5.nullParam, row5.notNullParam)
 	}
 
 	_, err = db.Exec("INSERT|t|id=?,name=?,nullf=?", 999, nil, nil)
@@ -3756,7 +3769,7 @@
 		cancel()
 
 		// Wait for the context to cancel and tx to rollback.
-		for tx.isDone() == false {
+		for !tx.isDone() {
 			time.Sleep(pollDuration)
 		}
 	}
diff --git a/src/debug/buildinfo/buildinfo.go b/src/debug/buildinfo/buildinfo.go
index 3409356..1bfdd02 100644
--- a/src/debug/buildinfo/buildinfo.go
+++ b/src/debug/buildinfo/buildinfo.go
@@ -89,10 +89,10 @@
 	// ReadData reads and returns up to size bytes starting at virtual address addr.
 	ReadData(addr, size uint64) ([]byte, error)
 
-	// DataStart returns the virtual address of the segment or section that
+	// DataStart returns the virtual address and size of the segment or section that
 	// should contain build information. This is either a specially named section
 	// or the first writable non-zero data segment.
-	DataStart() uint64
+	DataStart() (uint64, uint64)
 }
 
 // readRawBuildInfo extracts the Go toolchain version and module information
@@ -148,13 +148,16 @@
 		return "", "", errUnrecognizedFormat
 	}
 
-	// Read the first 64kB of dataAddr to find the build info blob.
+	// Read segment or section to find the build info blob.
 	// On some platforms, the blob will be in its own section, and DataStart
 	// returns the address of that section. On others, it's somewhere in the
 	// data segment; the linker puts it near the beginning.
 	// See cmd/link/internal/ld.Link.buildinfo.
-	dataAddr := x.DataStart()
-	data, err := x.ReadData(dataAddr, 64*1024)
+	dataAddr, dataSize := x.DataStart()
+	if dataSize == 0 {
+		return "", "", errNotGoExe
+	}
+	data, err := x.ReadData(dataAddr, dataSize)
 	if err != nil {
 		return "", "", err
 	}
@@ -234,7 +237,7 @@
 
 func decodeString(data []byte) (s string, rest []byte) {
 	u, n := binary.Uvarint(data)
-	if n <= 0 || u >= uint64(len(data)-n) {
+	if n <= 0 || u > uint64(len(data)-n) {
 		return "", nil
 	}
 	return string(data[n : uint64(n)+u]), data[uint64(n)+u:]
@@ -273,18 +276,18 @@
 	return nil, errUnrecognizedFormat
 }
 
-func (x *elfExe) DataStart() uint64 {
+func (x *elfExe) DataStart() (uint64, uint64) {
 	for _, s := range x.f.Sections {
 		if s.Name == ".go.buildinfo" {
-			return s.Addr
+			return s.Addr, s.Size
 		}
 	}
 	for _, p := range x.f.Progs {
 		if p.Type == elf.PT_LOAD && p.Flags&(elf.PF_X|elf.PF_W) == elf.PF_W {
-			return p.Vaddr
+			return p.Vaddr, p.Memsz
 		}
 	}
-	return 0
+	return 0, 0
 }
 
 // peExe is the PE (Windows Portable Executable) implementation of the exe interface.
@@ -316,7 +319,7 @@
 	return nil, errUnrecognizedFormat
 }
 
-func (x *peExe) DataStart() uint64 {
+func (x *peExe) DataStart() (uint64, uint64) {
 	// Assume data is first writable section.
 	const (
 		IMAGE_SCN_CNT_CODE               = 0x00000020
@@ -332,10 +335,10 @@
 	for _, sect := range x.f.Sections {
 		if sect.VirtualAddress != 0 && sect.Size != 0 &&
 			sect.Characteristics&^IMAGE_SCN_ALIGN_32BYTES == IMAGE_SCN_CNT_INITIALIZED_DATA|IMAGE_SCN_MEM_READ|IMAGE_SCN_MEM_WRITE {
-			return uint64(sect.VirtualAddress) + x.imageBase()
+			return uint64(sect.VirtualAddress) + x.imageBase(), uint64(sect.VirtualSize)
 		}
 	}
-	return 0
+	return 0, 0
 }
 
 // machoExe is the Mach-O (Apple macOS/iOS) implementation of the exe interface.
@@ -363,11 +366,11 @@
 	return nil, errUnrecognizedFormat
 }
 
-func (x *machoExe) DataStart() uint64 {
+func (x *machoExe) DataStart() (uint64, uint64) {
 	// Look for section named "__go_buildinfo".
 	for _, sec := range x.f.Sections {
 		if sec.Name == "__go_buildinfo" {
-			return sec.Addr
+			return sec.Addr, sec.Size
 		}
 	}
 	// Try the first non-empty writable segment.
@@ -375,10 +378,10 @@
 	for _, load := range x.f.Loads {
 		seg, ok := load.(*macho.Segment)
 		if ok && seg.Addr != 0 && seg.Filesz != 0 && seg.Prot == RW && seg.Maxprot == RW {
-			return seg.Addr
+			return seg.Addr, seg.Memsz
 		}
 	}
-	return 0
+	return 0, 0
 }
 
 // xcoffExe is the XCOFF (AIX eXtended COFF) implementation of the exe interface.
@@ -399,11 +402,11 @@
 	return nil, errors.New("address not mapped")
 }
 
-func (x *xcoffExe) DataStart() uint64 {
+func (x *xcoffExe) DataStart() (uint64, uint64) {
 	if s := x.f.SectionByType(xcoff.STYP_DATA); s != nil {
-		return s.VirtualAddress
+		return s.VirtualAddress, s.Size
 	}
-	return 0
+	return 0, 0
 }
 
 // plan9objExe is the Plan 9 a.out implementation of the exe interface.
@@ -411,11 +414,11 @@
 	f *plan9obj.File
 }
 
-func (x *plan9objExe) DataStart() uint64 {
+func (x *plan9objExe) DataStart() (uint64, uint64) {
 	if s := x.f.Section("data"); s != nil {
-		return uint64(s.Offset)
+		return uint64(s.Offset), uint64(s.Size)
 	}
-	return 0
+	return 0, 0
 }
 
 func (x *plan9objExe) ReadData(addr, size uint64) ([]byte, error) {
diff --git a/src/debug/buildinfo/buildinfo_test.go b/src/debug/buildinfo/buildinfo_test.go
index 290e370..ea7228c 100644
--- a/src/debug/buildinfo/buildinfo_test.go
+++ b/src/debug/buildinfo/buildinfo_test.go
@@ -236,7 +236,7 @@
 									t.Fatalf("unexpected success; want error containing %q", tc.wantErr)
 								}
 								got := info.String()
-								if clean := cleanOutputForComparison(string(got)); got != tc.want && clean != tc.want {
+								if clean := cleanOutputForComparison(got); got != tc.want && clean != tc.want {
 									t.Fatalf("got:\n%s\nwant:\n%s", got, tc.want)
 								}
 							}
diff --git a/src/debug/dwarf/const.go b/src/debug/dwarf/const.go
index c0a74b0..ea52460 100644
--- a/src/debug/dwarf/const.go
+++ b/src/debug/dwarf/const.go
@@ -8,7 +8,7 @@
 
 //go:generate stringer -type Attr -trimprefix=Attr
 
-// An Attr identifies the attribute type in a DWARF Entry's Field.
+// An Attr identifies the attribute type in a DWARF [Entry.Field].
 type Attr uint32
 
 const (
@@ -203,7 +203,7 @@
 
 //go:generate stringer -type Tag -trimprefix=Tag
 
-// A Tag is the classification (the type) of an Entry.
+// A Tag is the classification (the type) of an [Entry].
 type Tag uint32
 
 const (
diff --git a/src/debug/dwarf/entry.go b/src/debug/dwarf/entry.go
index f1215d2..4541d74 100644
--- a/src/debug/dwarf/entry.go
+++ b/src/debug/dwarf/entry.go
@@ -237,7 +237,7 @@
 	Field    []Field
 }
 
-// A Field is a single attribute/value pair in an Entry.
+// A Field is a single attribute/value pair in an [Entry].
 //
 // A value can be one of several "attribute classes" defined by DWARF.
 // The Go types corresponding to each class are:
@@ -258,8 +258,8 @@
 //	macptr            int64          ClassMacPtr
 //	rangelistptr      int64          ClassRangeListPtr
 //
-// For unrecognized or vendor-defined attributes, Class may be
-// ClassUnknown.
+// For unrecognized or vendor-defined attributes, [Class] may be
+// [ClassUnknown].
 type Field struct {
 	Attr  Attr
 	Val   any
@@ -376,7 +376,7 @@
 	return "dwarf." + i.String()
 }
 
-// Val returns the value associated with attribute Attr in Entry,
+// Val returns the value associated with attribute [Attr] in [Entry],
 // or nil if there is no such attribute.
 //
 // A common idiom is to merge the check for nil return with
@@ -390,8 +390,8 @@
 	return nil
 }
 
-// AttrField returns the Field associated with attribute Attr in
-// Entry, or nil if there is no such attribute.
+// AttrField returns the [Field] associated with attribute [Attr] in
+// [Entry], or nil if there is no such attribute.
 func (e *Entry) AttrField(a Attr) *Field {
 	for i, f := range e.Field {
 		if f.Attr == a {
@@ -401,8 +401,8 @@
 	return nil
 }
 
-// An Offset represents the location of an Entry within the DWARF info.
-// (See Reader.Seek.)
+// An Offset represents the location of an [Entry] within the DWARF info.
+// (See [Reader.Seek].)
 type Offset uint32
 
 // Entry reads a single entry from buf, decoding
@@ -791,11 +791,11 @@
 	return e
 }
 
-// A Reader allows reading Entry structures from a DWARF “info” section.
-// The Entry structures are arranged in a tree. The Reader's Next function
+// A Reader allows reading [Entry] structures from a DWARF “info” section.
+// The [Entry] structures are arranged in a tree. The [Reader.Next] function
 // return successive entries from a pre-order traversal of the tree.
 // If an entry has children, its Children field will be true, and the children
-// follow, terminated by an Entry with Tag 0.
+// follow, terminated by an [Entry] with [Tag] 0.
 type Reader struct {
 	b            buf
 	d            *Data
@@ -807,7 +807,7 @@
 	cu           *Entry // current compilation unit
 }
 
-// Reader returns a new Reader for Data.
+// Reader returns a new Reader for [Data].
 // The reader is positioned at byte offset 0 in the DWARF “info” section.
 func (d *Data) Reader() *Reader {
 	r := &Reader{d: d}
@@ -826,7 +826,7 @@
 	return r.b.order
 }
 
-// Seek positions the Reader at offset off in the encoded entry stream.
+// Seek positions the [Reader] at offset off in the encoded entry stream.
 // Offset 0 can be used to denote the first entry.
 func (r *Reader) Seek(off Offset) {
 	d := r.d
@@ -874,7 +874,7 @@
 // Next reads the next entry from the encoded entry stream.
 // It returns nil, nil when it reaches the end of the section.
 // It returns an error if the current offset is invalid or the data at the
-// offset cannot be decoded as a valid Entry.
+// offset cannot be decoded as a valid [Entry].
 func (r *Reader) Next() (*Entry, error) {
 	if r.err != nil {
 		return nil, r.err
@@ -906,8 +906,8 @@
 }
 
 // SkipChildren skips over the child entries associated with
-// the last Entry returned by Next. If that Entry did not have
-// children or Next has not been called, SkipChildren is a no-op.
+// the last [Entry] returned by [Reader.Next]. If that [Entry] did not have
+// children or [Reader.Next] has not been called, SkipChildren is a no-op.
 func (r *Reader) SkipChildren() {
 	if r.err != nil || !r.lastChildren {
 		return
@@ -950,9 +950,9 @@
 	return r.b.off
 }
 
-// SeekPC returns the Entry for the compilation unit that includes pc,
+// SeekPC returns the [Entry] for the compilation unit that includes pc,
 // and positions the reader to read the children of that unit.  If pc
-// is not covered by any unit, SeekPC returns ErrUnknownPC and the
+// is not covered by any unit, SeekPC returns [ErrUnknownPC] and the
 // position of the reader is undefined.
 //
 // Because compilation units can describe multiple regions of the
@@ -996,7 +996,7 @@
 }
 
 // Ranges returns the PC ranges covered by e, a slice of [low,high) pairs.
-// Only some entry types, such as TagCompileUnit or TagSubprogram, have PC
+// Only some entry types, such as [TagCompileUnit] or [TagSubprogram], have PC
 // ranges; for others, this will return nil with no error.
 func (d *Data) Ranges(e *Entry) ([][2]uint64, error) {
 	var ret [][2]uint64
diff --git a/src/debug/dwarf/line.go b/src/debug/dwarf/line.go
index 4df4a17..3a02c8e 100644
--- a/src/debug/dwarf/line.go
+++ b/src/debug/dwarf/line.go
@@ -12,11 +12,11 @@
 	"strings"
 )
 
-// A LineReader reads a sequence of LineEntry structures from a DWARF
+// A LineReader reads a sequence of [LineEntry] structures from a DWARF
 // "line" section for a single compilation unit. LineEntries occur in
-// order of increasing PC and each LineEntry gives metadata for the
-// instructions from that LineEntry's PC to just before the next
-// LineEntry's PC. The last entry will have its EndSequence field set.
+// order of increasing PC and each [LineEntry] gives metadata for the
+// instructions from that [LineEntry]'s PC to just before the next
+// [LineEntry]'s PC. The last entry will have the [LineEntry.EndSequence] field set.
 type LineReader struct {
 	buf buf
 
@@ -137,7 +137,7 @@
 }
 
 // LineReader returns a new reader for the line table of compilation
-// unit cu, which must be an Entry with tag TagCompileUnit.
+// unit cu, which must be an [Entry] with tag [TagCompileUnit].
 //
 // If this compilation unit has no line table, it returns nil, nil.
 func (d *Data) LineReader(cu *Entry) (*LineReader, error) {
@@ -474,7 +474,7 @@
 
 // Next sets *entry to the next row in this line table and moves to
 // the next row. If there are no more entries and the line table is
-// properly terminated, it returns io.EOF.
+// properly terminated, it returns [io.EOF].
 //
 // Rows are always in order of increasing entry.Address, but
 // entry.Line may go forward or backward.
@@ -662,9 +662,9 @@
 	return LineReaderPos{r.buf.off, len(r.fileEntries), r.state, r.fileIndex}
 }
 
-// Seek restores the line table reader to a position returned by Tell.
+// Seek restores the line table reader to a position returned by [LineReader.Tell].
 //
-// The argument pos must have been returned by a call to Tell on this
+// The argument pos must have been returned by a call to [LineReader.Tell] on this
 // line table.
 func (r *LineReader) Seek(pos LineReaderPos) {
 	r.buf.off = pos.off
@@ -712,7 +712,7 @@
 // Files returns the file name table of this compilation unit as of
 // the current position in the line table. The file name table may be
 // referenced from attributes in this compilation unit such as
-// AttrDeclFile.
+// [AttrDeclFile].
 //
 // Entry 0 is always nil, since file index 0 represents "no file".
 //
@@ -729,12 +729,12 @@
 // seek PC is not covered by any entry in the line table.
 var ErrUnknownPC = errors.New("ErrUnknownPC")
 
-// SeekPC sets *entry to the LineEntry that includes pc and positions
+// SeekPC sets *entry to the [LineEntry] that includes pc and positions
 // the reader on the next entry in the line table. If necessary, this
 // will seek backwards to find pc.
 //
 // If pc is not covered by any entry in this line table, SeekPC
-// returns ErrUnknownPC. In this case, *entry and the final seek
+// returns [ErrUnknownPC]. In this case, *entry and the final seek
 // position are unspecified.
 //
 // Note that DWARF line tables only permit sequential, forward scans.
diff --git a/src/debug/dwarf/open.go b/src/debug/dwarf/open.go
index 994b726..0901341 100644
--- a/src/debug/dwarf/open.go
+++ b/src/debug/dwarf/open.go
@@ -52,10 +52,10 @@
 
 var errSegmentSelector = errors.New("non-zero segment_selector size not supported")
 
-// New returns a new Data object initialized from the given parameters.
+// New returns a new [Data] object initialized from the given parameters.
 // Rather than calling this function directly, clients should typically use
-// the DWARF method of the File type of the appropriate package debug/elf,
-// debug/macho, or debug/pe.
+// the DWARF method of the File type of the appropriate package [debug/elf],
+// [debug/macho], or [debug/pe].
 //
 // The []byte arguments are the data from the corresponding debug section
 // in the object file; for example, for an ELF object, abbrev is the contents of
diff --git a/src/debug/dwarf/type.go b/src/debug/dwarf/type.go
index a95c4c7..627d3a1 100644
--- a/src/debug/dwarf/type.go
+++ b/src/debug/dwarf/type.go
@@ -11,7 +11,7 @@
 import "strconv"
 
 // A Type conventionally represents a pointer to any of the
-// specific Type structures (CharType, StructType, etc.).
+// specific Type structures ([CharType], [StructType], etc.).
 type Type interface {
 	Common() *CommonType
 	String() string
@@ -34,7 +34,7 @@
 
 // A BasicType holds fields common to all basic types.
 //
-// See the documentation for StructField for more info on the interpretation of
+// See the documentation for [StructField] for more info on the interpretation of
 // the BitSize/BitOffset/DataBitOffset fields.
 type BasicType struct {
 	CommonType
@@ -277,7 +277,7 @@
 
 // An EnumType represents an enumerated type.
 // The only indication of its native integer type is its ByteSize
-// (inside CommonType).
+// (inside [CommonType]).
 type EnumType struct {
 	CommonType
 	EnumName string
diff --git a/src/debug/dwarf/typeunit.go b/src/debug/dwarf/typeunit.go
index 27aa078..8ecf876 100644
--- a/src/debug/dwarf/typeunit.go
+++ b/src/debug/dwarf/typeunit.go
@@ -129,7 +129,7 @@
 	return tur.tu.unit.asize
 }
 
-// Next reads the next Entry from the type unit.
+// Next reads the next [Entry] from the type unit.
 func (tur *typeUnitReader) Next() (*Entry, error) {
 	if tur.err != nil {
 		return nil, tur.err
diff --git a/src/debug/elf/elf.go b/src/debug/elf/elf.go
index c982c68..e783677 100644
--- a/src/debug/elf/elf.go
+++ b/src/debug/elf/elf.go
@@ -13,7 +13,7 @@
  * $FreeBSD: src/sys/sparc64/include/elf.h,v 1.12 2003/09/25 01:10:26 peter Exp $
  * "System V ABI" (http://www.sco.com/developers/gabi/latest/ch4.eheader.html)
  * "ELF for the ARM® 64-bit Architecture (AArch64)" (ARM IHI 0056B)
- * "RISC-V ELF psABI specification" (https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.adoc)
+ * "RISC-V ELF psABI specification" (https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-elf.adoc)
  * llvm/BinaryFormat/ELF.h - ELF constants and structures
  *
  * Copyright (c) 1996-1998 John D. Polstra.  All rights reserved.
@@ -2216,6 +2216,8 @@
 	R_MIPS_TLS_TPREL64     R_MIPS = 48 /* TP-relative offset, 64 bit */
 	R_MIPS_TLS_TPREL_HI16  R_MIPS = 49 /* TP-relative offset, high 16 bits */
 	R_MIPS_TLS_TPREL_LO16  R_MIPS = 50 /* TP-relative offset, low 16 bits */
+
+	R_MIPS_PC32 R_MIPS = 248 /* 32 bit PC relative reference */
 )
 
 var rmipsStrings = []intName{
@@ -2267,6 +2269,7 @@
 	{48, "R_MIPS_TLS_TPREL64"},
 	{49, "R_MIPS_TLS_TPREL_HI16"},
 	{50, "R_MIPS_TLS_TPREL_LO16"},
+	{248, "R_MIPS_PC32"},
 }
 
 func (i R_MIPS) String() string   { return stringName(uint32(i), rmipsStrings, false) }
@@ -2365,6 +2368,15 @@
 	R_LARCH_TLS_GD_HI20                R_LARCH = 98
 	R_LARCH_32_PCREL                   R_LARCH = 99
 	R_LARCH_RELAX                      R_LARCH = 100
+	R_LARCH_DELETE                     R_LARCH = 101
+	R_LARCH_ALIGN                      R_LARCH = 102
+	R_LARCH_PCREL20_S2                 R_LARCH = 103
+	R_LARCH_CFA                        R_LARCH = 104
+	R_LARCH_ADD6                       R_LARCH = 105
+	R_LARCH_SUB6                       R_LARCH = 106
+	R_LARCH_ADD_ULEB128                R_LARCH = 107
+	R_LARCH_SUB_ULEB128                R_LARCH = 108
+	R_LARCH_64_PCREL                   R_LARCH = 109
 )
 
 var rlarchStrings = []intName{
@@ -2457,6 +2469,15 @@
 	{98, "R_LARCH_TLS_GD_HI20"},
 	{99, "R_LARCH_32_PCREL"},
 	{100, "R_LARCH_RELAX"},
+	{101, "R_LARCH_DELETE"},
+	{102, "R_LARCH_ALIGN"},
+	{103, "R_LARCH_PCREL20_S2"},
+	{104, "R_LARCH_CFA"},
+	{105, "R_LARCH_ADD6"},
+	{106, "R_LARCH_SUB6"},
+	{107, "R_LARCH_ADD_ULEB128"},
+	{108, "R_LARCH_SUB_ULEB128"},
+	{109, "R_LARCH_64_PCREL"},
 }
 
 func (i R_LARCH) String() string   { return stringName(uint32(i), rlarchStrings, false) }
diff --git a/src/debug/elf/file.go b/src/debug/elf/file.go
index 7485337..7228447 100644
--- a/src/debug/elf/file.go
+++ b/src/debug/elf/file.go
@@ -29,17 +29,6 @@
 	"strings"
 )
 
-// seekStart, seekCurrent, seekEnd are copies of
-// io.SeekStart, io.SeekCurrent, and io.SeekEnd.
-// We can't use the ones from package io because
-// we want this code to build with Go 1.4 during
-// cmd/dist bootstrap.
-const (
-	seekStart   int = 0
-	seekCurrent int = 1
-	seekEnd     int = 2
-)
-
 // TODO: error reporting detail
 
 /*
@@ -114,7 +103,7 @@
 // Even if the section is stored compressed in the ELF file,
 // Data returns uncompressed data.
 //
-// For an SHT_NOBITS section, Data always returns a non-nil error.
+// For an [SHT_NOBITS] section, Data always returns a non-nil error.
 func (s *Section) Data() ([]byte, error) {
 	return saferio.ReadData(s.Open(), s.Size)
 }
@@ -132,7 +121,7 @@
 // Even if the section is stored compressed in the ELF file,
 // the ReadSeeker reads uncompressed data.
 //
-// For an SHT_NOBITS section, all calls to the opened reader
+// For an [SHT_NOBITS] section, all calls to the opened reader
 // will return a non-nil error.
 func (s *Section) Open() io.ReadSeeker {
 	if s.Type == SHT_NOBITS {
@@ -245,7 +234,7 @@
 	return msg
 }
 
-// Open opens the named file using os.Open and prepares it for use as an ELF binary.
+// Open opens the named file using [os.Open] and prepares it for use as an ELF binary.
 func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
@@ -260,8 +249,8 @@
 	return ff, nil
 }
 
-// Close closes the File.
-// If the File was created using NewFile directly instead of Open,
+// Close closes the [File].
+// If the [File] was created using [NewFile] directly instead of [Open],
 // Close has no effect.
 func (f *File) Close() error {
 	var err error
@@ -283,7 +272,7 @@
 	return nil
 }
 
-// NewFile creates a new File for accessing an ELF binary in an underlying reader.
+// NewFile creates a new [File] for accessing an ELF binary in an underlying reader.
 // The ELF binary is expected to start at position 0 in the ReaderAt.
 func NewFile(r io.ReaderAt) (*File, error) {
 	sr := io.NewSectionReader(r, 0, 1<<63-1)
@@ -332,7 +321,7 @@
 	switch f.Class {
 	case ELFCLASS32:
 		hdr := new(Header32)
-		sr.Seek(0, seekStart)
+		sr.Seek(0, io.SeekStart)
 		if err := binary.Read(sr, f.ByteOrder, hdr); err != nil {
 			return nil, err
 		}
@@ -351,7 +340,7 @@
 		shstrndx = int(hdr.Shstrndx)
 	case ELFCLASS64:
 		hdr := new(Header64)
-		sr.Seek(0, seekStart)
+		sr.Seek(0, io.SeekStart)
 		if err := binary.Read(sr, f.ByteOrder, hdr); err != nil {
 			return nil, err
 		}
@@ -402,7 +391,7 @@
 	f.Progs = make([]*Prog, phnum)
 	for i := 0; i < phnum; i++ {
 		off := phoff + int64(i)*int64(phentsize)
-		sr.Seek(off, seekStart)
+		sr.Seek(off, io.SeekStart)
 		p := new(Prog)
 		switch f.Class {
 		case ELFCLASS32:
@@ -453,7 +442,7 @@
 	// header at index 0.
 	if shoff > 0 && shnum == 0 {
 		var typ, link uint32
-		sr.Seek(shoff, seekStart)
+		sr.Seek(shoff, io.SeekStart)
 		switch f.Class {
 		case ELFCLASS32:
 			sh := new(Section32)
@@ -498,7 +487,7 @@
 	}
 
 	// Read section headers
-	c := saferio.SliceCap((*Section)(nil), uint64(shnum))
+	c := saferio.SliceCap[Section](uint64(shnum))
 	if c < 0 {
 		return nil, &FormatError{0, "too many sections", shnum}
 	}
@@ -506,7 +495,7 @@
 	names := make([]uint32, 0, c)
 	for i := 0; i < shnum; i++ {
 		off := shoff + int64(i)*int64(shentsize)
-		sr.Seek(off, seekStart)
+		sr.Seek(off, io.SeekStart)
 		s := new(Section)
 		switch f.Class {
 		case ELFCLASS32:
@@ -625,7 +614,7 @@
 	return nil, nil, errors.New("not implemented")
 }
 
-// ErrNoSymbols is returned by File.Symbols and File.DynamicSymbols
+// ErrNoSymbols is returned by [File.Symbols] and [File.DynamicSymbols]
 // if there is no such section in the File.
 var ErrNoSymbols = errors.New("no symbol section")
 
@@ -639,8 +628,10 @@
 	if err != nil {
 		return nil, nil, fmt.Errorf("cannot load symbol section: %w", err)
 	}
-	symtab := bytes.NewReader(data)
-	if symtab.Len()%Sym32Size != 0 {
+	if len(data) == 0 {
+		return nil, nil, errors.New("symbol section is empty")
+	}
+	if len(data)%Sym32Size != 0 {
 		return nil, nil, errors.New("length of symbol section is not a multiple of SymSize")
 	}
 
@@ -650,15 +641,19 @@
 	}
 
 	// The first entry is all zeros.
-	var skip [Sym32Size]byte
-	symtab.Read(skip[:])
+	data = data[Sym32Size:]
 
-	symbols := make([]Symbol, symtab.Len()/Sym32Size)
+	symbols := make([]Symbol, len(data)/Sym32Size)
 
 	i := 0
 	var sym Sym32
-	for symtab.Len() > 0 {
-		binary.Read(symtab, f.ByteOrder, &sym)
+	for len(data) > 0 {
+		sym.Name = f.ByteOrder.Uint32(data[0:4])
+		sym.Value = f.ByteOrder.Uint32(data[4:8])
+		sym.Size = f.ByteOrder.Uint32(data[8:12])
+		sym.Info = data[12]
+		sym.Other = data[13]
+		sym.Shndx = f.ByteOrder.Uint16(data[14:16])
 		str, _ := getString(strdata, int(sym.Name))
 		symbols[i].Name = str
 		symbols[i].Info = sym.Info
@@ -667,6 +662,7 @@
 		symbols[i].Value = uint64(sym.Value)
 		symbols[i].Size = uint64(sym.Size)
 		i++
+		data = data[Sym32Size:]
 	}
 
 	return symbols, strdata, nil
@@ -682,8 +678,7 @@
 	if err != nil {
 		return nil, nil, fmt.Errorf("cannot load symbol section: %w", err)
 	}
-	symtab := bytes.NewReader(data)
-	if symtab.Len()%Sym64Size != 0 {
+	if len(data)%Sym64Size != 0 {
 		return nil, nil, errors.New("length of symbol section is not a multiple of Sym64Size")
 	}
 
@@ -693,15 +688,19 @@
 	}
 
 	// The first entry is all zeros.
-	var skip [Sym64Size]byte
-	symtab.Read(skip[:])
+	data = data[Sym64Size:]
 
-	symbols := make([]Symbol, symtab.Len()/Sym64Size)
+	symbols := make([]Symbol, len(data)/Sym64Size)
 
 	i := 0
 	var sym Sym64
-	for symtab.Len() > 0 {
-		binary.Read(symtab, f.ByteOrder, &sym)
+	for len(data) > 0 {
+		sym.Name = f.ByteOrder.Uint32(data[0:4])
+		sym.Info = data[4]
+		sym.Other = data[5]
+		sym.Shndx = f.ByteOrder.Uint16(data[6:8])
+		sym.Value = f.ByteOrder.Uint64(data[8:16])
+		sym.Size = f.ByteOrder.Uint64(data[16:24])
 		str, _ := getString(strdata, int(sym.Name))
 		symbols[i].Name = str
 		symbols[i].Info = sym.Info
@@ -710,6 +709,7 @@
 		symbols[i].Value = sym.Value
 		symbols[i].Size = sym.Size
 		i++
+		data = data[Sym64Size:]
 	}
 
 	return symbols, strdata, nil
@@ -1434,10 +1434,10 @@
 // DynamicSymbols returns the dynamic symbol table for f. The symbols
 // will be listed in the order they appear in f.
 //
-// If f has a symbol version table, the returned Symbols will have
-// initialized Version and Library fields.
+// If f has a symbol version table, the returned [File.Symbols] will have
+// initialized [Version] and Library fields.
 //
-// For compatibility with Symbols, DynamicSymbols omits the null symbol at index 0.
+// For compatibility with [File.Symbols], [File.DynamicSymbols] omits the null symbol at index 0.
 // After retrieving the symbols as symtab, an externally supplied index x
 // corresponds to symtab[x-1], not symtab[x].
 func (f *File) DynamicSymbols() ([]Symbol, error) {
@@ -1590,8 +1590,8 @@
 // DynString returns the strings listed for the given tag in the file's dynamic
 // section.
 //
-// The tag must be one that takes string values: DT_NEEDED, DT_SONAME, DT_RPATH, or
-// DT_RUNPATH.
+// The tag must be one that takes string values: [DT_NEEDED], [DT_SONAME], [DT_RPATH], or
+// [DT_RUNPATH].
 func (f *File) DynString(tag DynTag) ([]string, error) {
 	switch tag {
 	case DT_NEEDED, DT_SONAME, DT_RPATH, DT_RUNPATH:
@@ -1607,6 +1607,15 @@
 	if err != nil {
 		return nil, err
 	}
+
+	dynSize := 8
+	if f.Class == ELFCLASS64 {
+		dynSize = 16
+	}
+	if len(d)%dynSize != 0 {
+		return nil, errors.New("length of dynamic section is not a multiple of dynamic entry size")
+	}
+
 	str, err := f.stringTable(ds.Link)
 	if err != nil {
 		return nil, err
@@ -1647,6 +1656,14 @@
 		return nil, err
 	}
 
+	dynSize := 8
+	if f.Class == ELFCLASS64 {
+		dynSize = 16
+	}
+	if len(d)%dynSize != 0 {
+		return nil, errors.New("length of dynamic section is not a multiple of dynamic entry size")
+	}
+
 	// Parse the .dynamic section as a string of bytes.
 	var vals []uint64
 	for len(d) > 0 {
diff --git a/src/debug/elf/file_test.go b/src/debug/elf/file_test.go
index 51a3634..5dd83a2 100644
--- a/src/debug/elf/file_test.go
+++ b/src/debug/elf/file_test.go
@@ -10,6 +10,7 @@
 	"compress/zlib"
 	"debug/dwarf"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"io"
 	"math/rand"
@@ -18,6 +19,7 @@
 	"path"
 	"reflect"
 	"runtime"
+	"slices"
 	"strings"
 	"testing"
 )
@@ -28,6 +30,7 @@
 	sections []SectionHeader
 	progs    []ProgHeader
 	needed   []string
+	symbols  []Symbol
 }
 
 var fileTests = []fileTest{
@@ -74,6 +77,82 @@
 			{PT_DYNAMIC, PF_R + PF_W, 0x60c, 0x804960c, 0x804960c, 0x98, 0x98, 0x4},
 		},
 		[]string{"libc.so.6"},
+		[]Symbol{
+			{"", 3, 0, 1, 134512852, 0, "", ""},
+			{"", 3, 0, 2, 134512876, 0, "", ""},
+			{"", 3, 0, 3, 134513020, 0, "", ""},
+			{"", 3, 0, 4, 134513292, 0, "", ""},
+			{"", 3, 0, 5, 134513480, 0, "", ""},
+			{"", 3, 0, 6, 134513512, 0, "", ""},
+			{"", 3, 0, 7, 134513532, 0, "", ""},
+			{"", 3, 0, 8, 134513612, 0, "", ""},
+			{"", 3, 0, 9, 134513996, 0, "", ""},
+			{"", 3, 0, 10, 134514008, 0, "", ""},
+			{"", 3, 0, 11, 134518268, 0, "", ""},
+			{"", 3, 0, 12, 134518280, 0, "", ""},
+			{"", 3, 0, 13, 134518284, 0, "", ""},
+			{"", 3, 0, 14, 134518436, 0, "", ""},
+			{"", 3, 0, 15, 134518444, 0, "", ""},
+			{"", 3, 0, 16, 134518452, 0, "", ""},
+			{"", 3, 0, 17, 134518456, 0, "", ""},
+			{"", 3, 0, 18, 134518484, 0, "", ""},
+			{"", 3, 0, 19, 0, 0, "", ""},
+			{"", 3, 0, 20, 0, 0, "", ""},
+			{"", 3, 0, 21, 0, 0, "", ""},
+			{"", 3, 0, 22, 0, 0, "", ""},
+			{"", 3, 0, 23, 0, 0, "", ""},
+			{"", 3, 0, 24, 0, 0, "", ""},
+			{"", 3, 0, 25, 0, 0, "", ""},
+			{"", 3, 0, 26, 0, 0, "", ""},
+			{"", 3, 0, 27, 0, 0, "", ""},
+			{"", 3, 0, 28, 0, 0, "", ""},
+			{"", 3, 0, 29, 0, 0, "", ""},
+			{"crt1.c", 4, 0, 65521, 0, 0, "", ""},
+			{"/usr/src/lib/csu/i386-elf/crti.S", 4, 0, 65521, 0, 0, "", ""},
+			{"<command line>", 4, 0, 65521, 0, 0, "", ""},
+			{"<built-in>", 4, 0, 65521, 0, 0, "", ""},
+			{"/usr/src/lib/csu/i386-elf/crti.S", 4, 0, 65521, 0, 0, "", ""},
+			{"crtstuff.c", 4, 0, 65521, 0, 0, "", ""},
+			{"__CTOR_LIST__", 1, 0, 14, 134518436, 0, "", ""},
+			{"__DTOR_LIST__", 1, 0, 15, 134518444, 0, "", ""},
+			{"__EH_FRAME_BEGIN__", 1, 0, 12, 134518280, 0, "", ""},
+			{"__JCR_LIST__", 1, 0, 16, 134518452, 0, "", ""},
+			{"p.0", 1, 0, 11, 134518276, 0, "", ""},
+			{"completed.1", 1, 0, 18, 134518484, 1, "", ""},
+			{"__do_global_dtors_aux", 2, 0, 8, 134513760, 0, "", ""},
+			{"object.2", 1, 0, 18, 134518488, 24, "", ""},
+			{"frame_dummy", 2, 0, 8, 134513836, 0, "", ""},
+			{"crtstuff.c", 4, 0, 65521, 0, 0, "", ""},
+			{"__CTOR_END__", 1, 0, 14, 134518440, 0, "", ""},
+			{"__DTOR_END__", 1, 0, 15, 134518448, 0, "", ""},
+			{"__FRAME_END__", 1, 0, 12, 134518280, 0, "", ""},
+			{"__JCR_END__", 1, 0, 16, 134518452, 0, "", ""},
+			{"__do_global_ctors_aux", 2, 0, 8, 134513960, 0, "", ""},
+			{"/usr/src/lib/csu/i386-elf/crtn.S", 4, 0, 65521, 0, 0, "", ""},
+			{"<command line>", 4, 0, 65521, 0, 0, "", ""},
+			{"<built-in>", 4, 0, 65521, 0, 0, "", ""},
+			{"/usr/src/lib/csu/i386-elf/crtn.S", 4, 0, 65521, 0, 0, "", ""},
+			{"hello.c", 4, 0, 65521, 0, 0, "", ""},
+			{"printf", 18, 0, 0, 0, 44, "", ""},
+			{"_DYNAMIC", 17, 0, 65521, 134518284, 0, "", ""},
+			{"__dso_handle", 17, 2, 11, 134518272, 0, "", ""},
+			{"_init", 18, 0, 6, 134513512, 0, "", ""},
+			{"environ", 17, 0, 18, 134518512, 4, "", ""},
+			{"__deregister_frame_info", 32, 0, 0, 0, 0, "", ""},
+			{"__progname", 17, 0, 11, 134518268, 4, "", ""},
+			{"_start", 18, 0, 8, 134513612, 145, "", ""},
+			{"__bss_start", 16, 0, 65521, 134518484, 0, "", ""},
+			{"main", 18, 0, 8, 134513912, 46, "", ""},
+			{"_init_tls", 18, 0, 0, 0, 5, "", ""},
+			{"_fini", 18, 0, 9, 134513996, 0, "", ""},
+			{"atexit", 18, 0, 0, 0, 43, "", ""},
+			{"_edata", 16, 0, 65521, 134518484, 0, "", ""},
+			{"_GLOBAL_OFFSET_TABLE_", 17, 0, 65521, 134518456, 0, "", ""},
+			{"_end", 16, 0, 65521, 134518516, 0, "", ""},
+			{"exit", 18, 0, 0, 0, 68, "", ""},
+			{"_Jv_RegisterClasses", 32, 0, 0, 0, 0, "", ""},
+			{"__register_frame_info", 32, 0, 0, 0, 0, "", ""},
+		},
 	},
 	{
 		"testdata/gcc-amd64-linux-exec",
@@ -128,6 +207,81 @@
 			{PT_LOOS + 0x474E551, PF_R + PF_W, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8},
 		},
 		[]string{"libc.so.6"},
+		[]Symbol{
+			{"", 3, 0, 1, 4194816, 0, "", ""},
+			{"", 3, 0, 2, 4194844, 0, "", ""},
+			{"", 3, 0, 3, 4194880, 0, "", ""},
+			{"", 3, 0, 4, 4194920, 0, "", ""},
+			{"", 3, 0, 5, 4194952, 0, "", ""},
+			{"", 3, 0, 6, 4195048, 0, "", ""},
+			{"", 3, 0, 7, 4195110, 0, "", ""},
+			{"", 3, 0, 8, 4195120, 0, "", ""},
+			{"", 3, 0, 9, 4195152, 0, "", ""},
+			{"", 3, 0, 10, 4195176, 0, "", ""},
+			{"", 3, 0, 11, 4195224, 0, "", ""},
+			{"", 3, 0, 12, 4195248, 0, "", ""},
+			{"", 3, 0, 13, 4195296, 0, "", ""},
+			{"", 3, 0, 14, 4195732, 0, "", ""},
+			{"", 3, 0, 15, 4195748, 0, "", ""},
+			{"", 3, 0, 16, 4195768, 0, "", ""},
+			{"", 3, 0, 17, 4195808, 0, "", ""},
+			{"", 3, 0, 18, 6293128, 0, "", ""},
+			{"", 3, 0, 19, 6293144, 0, "", ""},
+			{"", 3, 0, 20, 6293160, 0, "", ""},
+			{"", 3, 0, 21, 6293168, 0, "", ""},
+			{"", 3, 0, 22, 6293584, 0, "", ""},
+			{"", 3, 0, 23, 6293592, 0, "", ""},
+			{"", 3, 0, 24, 6293632, 0, "", ""},
+			{"", 3, 0, 25, 6293656, 0, "", ""},
+			{"", 3, 0, 26, 0, 0, "", ""},
+			{"", 3, 0, 27, 0, 0, "", ""},
+			{"", 3, 0, 28, 0, 0, "", ""},
+			{"", 3, 0, 29, 0, 0, "", ""},
+			{"", 3, 0, 30, 0, 0, "", ""},
+			{"", 3, 0, 31, 0, 0, "", ""},
+			{"", 3, 0, 32, 0, 0, "", ""},
+			{"", 3, 0, 33, 0, 0, "", ""},
+			{"init.c", 4, 0, 65521, 0, 0, "", ""},
+			{"initfini.c", 4, 0, 65521, 0, 0, "", ""},
+			{"call_gmon_start", 2, 0, 13, 4195340, 0, "", ""},
+			{"crtstuff.c", 4, 0, 65521, 0, 0, "", ""},
+			{"__CTOR_LIST__", 1, 0, 18, 6293128, 0, "", ""},
+			{"__DTOR_LIST__", 1, 0, 19, 6293144, 0, "", ""},
+			{"__JCR_LIST__", 1, 0, 20, 6293160, 0, "", ""},
+			{"__do_global_dtors_aux", 2, 0, 13, 4195376, 0, "", ""},
+			{"completed.6183", 1, 0, 25, 6293656, 1, "", ""},
+			{"p.6181", 1, 0, 24, 6293648, 0, "", ""},
+			{"frame_dummy", 2, 0, 13, 4195440, 0, "", ""},
+			{"crtstuff.c", 4, 0, 65521, 0, 0, "", ""},
+			{"__CTOR_END__", 1, 0, 18, 6293136, 0, "", ""},
+			{"__DTOR_END__", 1, 0, 19, 6293152, 0, "", ""},
+			{"__FRAME_END__", 1, 0, 17, 4195968, 0, "", ""},
+			{"__JCR_END__", 1, 0, 20, 6293160, 0, "", ""},
+			{"__do_global_ctors_aux", 2, 0, 13, 4195680, 0, "", ""},
+			{"initfini.c", 4, 0, 65521, 0, 0, "", ""},
+			{"hello.c", 4, 0, 65521, 0, 0, "", ""},
+			{"_GLOBAL_OFFSET_TABLE_", 1, 2, 23, 6293592, 0, "", ""},
+			{"__init_array_end", 0, 2, 18, 6293124, 0, "", ""},
+			{"__init_array_start", 0, 2, 18, 6293124, 0, "", ""},
+			{"_DYNAMIC", 1, 2, 21, 6293168, 0, "", ""},
+			{"data_start", 32, 0, 24, 6293632, 0, "", ""},
+			{"__libc_csu_fini", 18, 0, 13, 4195520, 2, "", ""},
+			{"_start", 18, 0, 13, 4195296, 0, "", ""},
+			{"__gmon_start__", 32, 0, 0, 0, 0, "", ""},
+			{"_Jv_RegisterClasses", 32, 0, 0, 0, 0, "", ""},
+			{"puts@@GLIBC_2.2.5", 18, 0, 0, 0, 396, "", ""},
+			{"_fini", 18, 0, 14, 4195732, 0, "", ""},
+			{"__libc_start_main@@GLIBC_2.2.5", 18, 0, 0, 0, 450, "", ""},
+			{"_IO_stdin_used", 17, 0, 15, 4195748, 4, "", ""},
+			{"__data_start", 16, 0, 24, 6293632, 0, "", ""},
+			{"__dso_handle", 17, 2, 24, 6293640, 0, "", ""},
+			{"__libc_csu_init", 18, 0, 13, 4195536, 137, "", ""},
+			{"__bss_start", 16, 0, 65521, 6293656, 0, "", ""},
+			{"_end", 16, 0, 65521, 6293664, 0, "", ""},
+			{"_edata", 16, 0, 65521, 6293656, 0, "", ""},
+			{"main", 18, 0, 13, 4195480, 27, "", ""},
+			{"_init", 18, 0, 11, 4195224, 0, "", ""},
+		},
 	},
 	{
 		"testdata/hello-world-core.gz",
@@ -153,6 +307,7 @@
 			{Type: PT_LOAD, Flags: PF_X + PF_R, Off: 0x3b000, Vaddr: 0xffffffffff600000, Paddr: 0x0, Filesz: 0x1000, Memsz: 0x1000, Align: 0x1000},
 		},
 		nil,
+		nil,
 	},
 	{
 		"testdata/compressed-32.obj",
@@ -182,6 +337,23 @@
 		},
 		[]ProgHeader{},
 		nil,
+		[]Symbol{
+			{"hello.c", 4, 0, 65521, 0, 0, "", ""},
+			{"", 3, 0, 1, 0, 0, "", ""},
+			{"", 3, 0, 3, 0, 0, "", ""},
+			{"", 3, 0, 4, 0, 0, "", ""},
+			{"", 3, 0, 5, 0, 0, "", ""},
+			{"", 3, 0, 6, 0, 0, "", ""},
+			{"", 3, 0, 8, 0, 0, "", ""},
+			{"", 3, 0, 9, 0, 0, "", ""},
+			{"", 3, 0, 11, 0, 0, "", ""},
+			{"", 3, 0, 13, 0, 0, "", ""},
+			{"", 3, 0, 15, 0, 0, "", ""},
+			{"", 3, 0, 16, 0, 0, "", ""},
+			{"", 3, 0, 14, 0, 0, "", ""},
+			{"main", 18, 0, 1, 0, 23, "", ""},
+			{"puts", 16, 0, 0, 0, 0, "", ""},
+		},
 	},
 	{
 		"testdata/compressed-64.obj",
@@ -211,6 +383,69 @@
 		},
 		[]ProgHeader{},
 		nil,
+		[]Symbol{
+			{"hello.c", 4, 0, 65521, 0, 0, "", ""},
+			{"", 3, 0, 1, 0, 0, "", ""},
+			{"", 3, 0, 3, 0, 0, "", ""},
+			{"", 3, 0, 4, 0, 0, "", ""},
+			{"", 3, 0, 5, 0, 0, "", ""},
+			{"", 3, 0, 6, 0, 0, "", ""},
+			{"", 3, 0, 8, 0, 0, "", ""},
+			{"", 3, 0, 9, 0, 0, "", ""},
+			{"", 3, 0, 11, 0, 0, "", ""},
+			{"", 3, 0, 13, 0, 0, "", ""},
+			{"", 3, 0, 15, 0, 0, "", ""},
+			{"", 3, 0, 16, 0, 0, "", ""},
+			{"", 3, 0, 14, 0, 0, "", ""},
+			{"main", 18, 0, 1, 0, 27, "", ""},
+			{"puts", 16, 0, 0, 0, 0, "", ""},
+		},
+	},
+	{
+		"testdata/go-relocation-test-gcc620-sparc64.obj",
+		FileHeader{Class: ELFCLASS64, Data: ELFDATA2MSB, Version: EV_CURRENT, OSABI: ELFOSABI_NONE, ABIVersion: 0x0, ByteOrder: binary.BigEndian, Type: ET_REL, Machine: EM_SPARCV9, Entry: 0x0},
+		[]SectionHeader{
+			{"", SHT_NULL, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+			{".text", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x0, 0x40, 0x2c, 0x0, 0x0, 0x4, 0x0, 0x2c},
+			{".rela.text", SHT_RELA, SHF_INFO_LINK, 0x0, 0xa58, 0x48, 0x13, 0x1, 0x8, 0x18, 0x48},
+			{".data", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x0, 0x6c, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+			{".bss", SHT_NOBITS, SHF_WRITE + SHF_ALLOC, 0x0, 0x6c, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+			{".rodata", SHT_PROGBITS, SHF_ALLOC, 0x0, 0x70, 0xd, 0x0, 0x0, 0x8, 0x0, 0xd},
+			{".debug_info", SHT_PROGBITS, 0x0, 0x0, 0x7d, 0x346, 0x0, 0x0, 0x1, 0x0, 0x346},
+			{".rela.debug_info", SHT_RELA, SHF_INFO_LINK, 0x0, 0xaa0, 0x630, 0x13, 0x6, 0x8, 0x18, 0x630},
+			{".debug_abbrev", SHT_PROGBITS, 0x0, 0x0, 0x3c3, 0xf1, 0x0, 0x0, 0x1, 0x0, 0xf1},
+			{".debug_aranges", SHT_PROGBITS, 0x0, 0x0, 0x4b4, 0x30, 0x0, 0x0, 0x1, 0x0, 0x30},
+			{".rela.debug_aranges", SHT_RELA, SHF_INFO_LINK, 0x0, 0x10d0, 0x30, 0x13, 0x9, 0x8, 0x18, 0x30},
+			{".debug_line", SHT_PROGBITS, 0x0, 0x0, 0x4e4, 0xd3, 0x0, 0x0, 0x1, 0x0, 0xd3},
+			{".rela.debug_line", SHT_RELA, SHF_INFO_LINK, 0x0, 0x1100, 0x18, 0x13, 0xb, 0x8, 0x18, 0x18},
+			{".debug_str", SHT_PROGBITS, SHF_MERGE + SHF_STRINGS, 0x0, 0x5b7, 0x2a3, 0x0, 0x0, 0x1, 0x1, 0x2a3},
+			{".comment", SHT_PROGBITS, SHF_MERGE + SHF_STRINGS, 0x0, 0x85a, 0x2e, 0x0, 0x0, 0x1, 0x1, 0x2e},
+			{".note.GNU-stack", SHT_PROGBITS, 0x0, 0x0, 0x888, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+			{".debug_frame", SHT_PROGBITS, 0x0, 0x0, 0x888, 0x38, 0x0, 0x0, 0x8, 0x0, 0x38},
+			{".rela.debug_frame", SHT_RELA, SHF_INFO_LINK, 0x0, 0x1118, 0x30, 0x13, 0x10, 0x8, 0x18, 0x30},
+			{".shstrtab", SHT_STRTAB, 0x0, 0x0, 0x1148, 0xb3, 0x0, 0x0, 0x1, 0x0, 0xb3},
+			{".symtab", SHT_SYMTAB, 0x0, 0x0, 0x8c0, 0x180, 0x14, 0xe, 0x8, 0x18, 0x180},
+			{".strtab", SHT_STRTAB, 0x0, 0x0, 0xa40, 0x13, 0x0, 0x0, 0x1, 0x0, 0x13},
+		},
+		[]ProgHeader{},
+		nil,
+		[]Symbol{
+			{"hello.c", 4, 0, 65521, 0, 0, "", ""},
+			{"", 3, 0, 1, 0, 0, "", ""},
+			{"", 3, 0, 3, 0, 0, "", ""},
+			{"", 3, 0, 4, 0, 0, "", ""},
+			{"", 3, 0, 5, 0, 0, "", ""},
+			{"", 3, 0, 6, 0, 0, "", ""},
+			{"", 3, 0, 8, 0, 0, "", ""},
+			{"", 3, 0, 9, 0, 0, "", ""},
+			{"", 3, 0, 11, 0, 0, "", ""},
+			{"", 3, 0, 13, 0, 0, "", ""},
+			{"", 3, 0, 15, 0, 0, "", ""},
+			{"", 3, 0, 16, 0, 0, "", ""},
+			{"", 3, 0, 14, 0, 0, "", ""},
+			{"main", 18, 0, 1, 0, 44, "", ""},
+			{"puts", 16, 0, 0, 0, 0, "", ""},
+		},
 	},
 }
 
@@ -273,6 +508,22 @@
 		if !reflect.DeepEqual(tl, fl) {
 			t.Errorf("open %s: DT_NEEDED = %v, want %v", tt.file, tl, fl)
 		}
+		symbols, err := f.Symbols()
+		if tt.symbols == nil {
+			if !errors.Is(err, ErrNoSymbols) {
+				t.Errorf("open %s: Symbols() expected ErrNoSymbols, have nil", tt.file)
+			}
+			if symbols != nil {
+				t.Errorf("open %s: Symbols() expected no symbols, have %v", tt.file, symbols)
+			}
+		} else {
+			if err != nil {
+				t.Errorf("open %s: Symbols() unexpected error %v", tt.file, err)
+			}
+			if !slices.Equal(symbols, tt.symbols) {
+				t.Errorf("open %s: Symbols() = %v, want %v", tt.file, symbols, tt.symbols)
+			}
+		}
 	}
 }
 
@@ -1088,7 +1339,7 @@
 		binary.Write(&buf, binary.LittleEndian, Section32{
 			Name:      0x1B,
 			Type:      uint32(SHT_PROGBITS),
-			Flags:     uint32(uint32(SHF_ALLOC | SHF_EXECINSTR)),
+			Flags:     uint32(SHF_ALLOC | SHF_EXECINSTR),
 			Off:       0x34,
 			Addralign: 0x01,
 		})
@@ -1288,3 +1539,41 @@
 		t.Errorf("DWARF = %v; want %q", err, want)
 	}
 }
+
+func BenchmarkSymbols64(b *testing.B) {
+	const testdata = "testdata/gcc-amd64-linux-exec"
+	f, err := Open(testdata)
+	if err != nil {
+		b.Fatalf("could not read %s: %v", testdata, err)
+	}
+	defer f.Close()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		symbols, err := f.Symbols()
+		if err != nil {
+			b.Fatalf("Symbols(): got unexpected error %v", err)
+		}
+		if len(symbols) != 73 {
+			b.Errorf("\nhave %d symbols\nwant %d symbols\n", len(symbols), 73)
+		}
+	}
+}
+
+func BenchmarkSymbols32(b *testing.B) {
+	const testdata = "testdata/gcc-386-freebsd-exec"
+	f, err := Open(testdata)
+	if err != nil {
+		b.Fatalf("could not read %s: %v", testdata, err)
+	}
+	defer f.Close()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		symbols, err := f.Symbols()
+		if err != nil {
+			b.Fatalf("Symbols(): got unexpected error %v", err)
+		}
+		if len(symbols) != 74 {
+			b.Errorf("\nhave %d symbols\nwant %d symbols\n", len(symbols), 74)
+		}
+	}
+}
diff --git a/src/debug/elf/reader.go b/src/debug/elf/reader.go
index a458436..eab4373 100644
--- a/src/debug/elf/reader.go
+++ b/src/debug/elf/reader.go
@@ -63,11 +63,11 @@
 func (r *readSeekerFromReader) Seek(offset int64, whence int) (int64, error) {
 	var newOffset int64
 	switch whence {
-	case seekStart:
+	case io.SeekStart:
 		newOffset = offset
-	case seekCurrent:
+	case io.SeekCurrent:
 		newOffset = r.offset + offset
-	case seekEnd:
+	case io.SeekEnd:
 		newOffset = r.size + offset
 	default:
 		return 0, os.ErrInvalid
diff --git a/src/debug/gosym/pclntab.go b/src/debug/gosym/pclntab.go
index a87e6cf..6592932 100644
--- a/src/debug/gosym/pclntab.go
+++ b/src/debug/gosym/pclntab.go
@@ -29,7 +29,7 @@
 
 // A LineTable is a data structure mapping program counters to line numbers.
 //
-// In Go 1.1 and earlier, each function (represented by a Func) had its own LineTable,
+// In Go 1.1 and earlier, each function (represented by a [Func]) had its own LineTable,
 // and the line number corresponded to a numbering of all source lines in the
 // program, across all files. That absolute line number would then have to be
 // converted separately to a file name and line number within the file.
@@ -39,7 +39,7 @@
 // numbers, just line numbers within specific files.
 //
 // For the most part, LineTable's methods should be treated as an internal
-// detail of the package; callers should use the methods on Table instead.
+// detail of the package; callers should use the methods on [Table] instead.
 type LineTable struct {
 	Data []byte
 	PC   uint64
diff --git a/src/debug/gosym/symtab.go b/src/debug/gosym/symtab.go
index d87b312..bf38927 100644
--- a/src/debug/gosym/symtab.go
+++ b/src/debug/gosym/symtab.go
@@ -567,7 +567,7 @@
 }
 
 // LineToPC looks up the first program counter on the given line in
-// the named file. It returns UnknownPathError or UnknownLineError if
+// the named file. It returns [UnknownFileError] or [UnknownLineError] if
 // there is an error looking up this line.
 func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err error) {
 	obj, ok := t.Files[file]
diff --git a/src/debug/macho/fat.go b/src/debug/macho/fat.go
index 679cefb..f9601f8 100644
--- a/src/debug/macho/fat.go
+++ b/src/debug/macho/fat.go
@@ -36,11 +36,11 @@
 	*File
 }
 
-// ErrNotFat is returned from NewFatFile or OpenFat when the file is not a
+// ErrNotFat is returned from [NewFatFile] or [OpenFat] when the file is not a
 // universal binary but may be a thin binary, based on its magic number.
 var ErrNotFat = &FormatError{0, "not a fat Mach-O file", nil}
 
-// NewFatFile creates a new FatFile for accessing all the Mach-O images in a
+// NewFatFile creates a new [FatFile] for accessing all the Mach-O images in a
 // universal binary. The Mach-O binary is expected to start at position 0 in
 // the ReaderAt.
 func NewFatFile(r io.ReaderAt) (*FatFile, error) {
@@ -86,7 +86,7 @@
 
 	// Following the fat_header comes narch fat_arch structs that index
 	// Mach-O images further in the file.
-	c := saferio.SliceCap((*FatArch)(nil), uint64(narch))
+	c := saferio.SliceCap[FatArch](uint64(narch))
 	if c < 0 {
 		return nil, &FormatError{offset, "too many images", nil}
 	}
@@ -127,7 +127,7 @@
 	return &ff, nil
 }
 
-// OpenFat opens the named file using os.Open and prepares it for use as a Mach-O
+// OpenFat opens the named file using [os.Open] and prepares it for use as a Mach-O
 // universal binary.
 func OpenFat(name string) (*FatFile, error) {
 	f, err := os.Open(name)
diff --git a/src/debug/macho/file.go b/src/debug/macho/file.go
index 7cba339..7b11bb2 100644
--- a/src/debug/macho/file.go
+++ b/src/debug/macho/file.go
@@ -197,7 +197,7 @@
 	return msg
 }
 
-// Open opens the named file using os.Open and prepares it for use as a Mach-O binary.
+// Open opens the named file using [os.Open] and prepares it for use as a Mach-O binary.
 func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
@@ -212,8 +212,8 @@
 	return ff, nil
 }
 
-// Close closes the File.
-// If the File was created using NewFile directly instead of Open,
+// Close closes the [File].
+// If the [File] was created using [NewFile] directly instead of [Open],
 // Close has no effect.
 func (f *File) Close() error {
 	var err error
@@ -224,7 +224,7 @@
 	return err
 }
 
-// NewFile creates a new File for accessing a Mach-O binary in an underlying reader.
+// NewFile creates a new [File] for accessing a Mach-O binary in an underlying reader.
 // The Mach-O binary is expected to start at position 0 in the ReaderAt.
 func NewFile(r io.ReaderAt) (*File, error) {
 	f := new(File)
@@ -263,7 +263,7 @@
 	if err != nil {
 		return nil, err
 	}
-	c := saferio.SliceCap((*Load)(nil), uint64(f.Ncmd))
+	c := saferio.SliceCap[Load](uint64(f.Ncmd))
 	if c < 0 {
 		return nil, &FormatError{offset, "too many load commands", nil}
 	}
@@ -472,7 +472,7 @@
 
 func (f *File) parseSymtab(symdat, strtab, cmddat []byte, hdr *SymtabCmd, offset int64) (*Symtab, error) {
 	bo := f.ByteOrder
-	c := saferio.SliceCap((*Symbol)(nil), uint64(hdr.Nsyms))
+	c := saferio.SliceCap[Symbol](uint64(hdr.Nsyms))
 	if c < 0 {
 		return nil, &FormatError{offset, "too many symbols", nil}
 	}
diff --git a/src/debug/pe/file.go b/src/debug/pe/file.go
index 06c1601..ed63a11 100644
--- a/src/debug/pe/file.go
+++ b/src/debug/pe/file.go
@@ -27,9 +27,6 @@
 	"strings"
 )
 
-// Avoid use of post-Go 1.4 io features, to make safe for toolchain bootstrap.
-const seekStart = 0
-
 // A File represents an open PE file.
 type File struct {
 	FileHeader
@@ -42,7 +39,7 @@
 	closer io.Closer
 }
 
-// Open opens the named file using os.Open and prepares it for use as a PE binary.
+// Open opens the named file using [os.Open] and prepares it for use as a PE binary.
 func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
@@ -57,8 +54,8 @@
 	return ff, nil
 }
 
-// Close closes the File.
-// If the File was created using NewFile directly instead of Open,
+// Close closes the [File].
+// If the [File] was created using [NewFile] directly instead of [Open],
 // Close has no effect.
 func (f *File) Close() error {
 	var err error
@@ -71,7 +68,7 @@
 
 // TODO(brainman): add Load function, as a replacement for NewFile, that does not call removeAuxSymbols (for performance)
 
-// NewFile creates a new File for accessing a PE binary in an underlying reader.
+// NewFile creates a new [File] for accessing a PE binary in an underlying reader.
 func NewFile(r io.ReaderAt) (*File, error) {
 	f := new(File)
 	sr := io.NewSectionReader(r, 0, 1<<63-1)
@@ -92,7 +89,7 @@
 	} else {
 		base = int64(0)
 	}
-	sr.Seek(base, seekStart)
+	sr.Seek(base, io.SeekStart)
 	if err := binary.Read(sr, binary.LittleEndian, &f.FileHeader); err != nil {
 		return nil, err
 	}
@@ -129,7 +126,7 @@
 	}
 
 	// Seek past file header.
-	_, err = sr.Seek(base+int64(binary.Size(f.FileHeader)), seekStart)
+	_, err = sr.Seek(base+int64(binary.Size(f.FileHeader)), io.SeekStart)
 	if err != nil {
 		return nil, err
 	}
diff --git a/src/debug/pe/section.go b/src/debug/pe/section.go
index 70d0c22..f34134b 100644
--- a/src/debug/pe/section.go
+++ b/src/debug/pe/section.go
@@ -54,7 +54,7 @@
 	if sh.NumberOfRelocations <= 0 {
 		return nil, nil
 	}
-	_, err := r.Seek(int64(sh.PointerToRelocations), seekStart)
+	_, err := r.Seek(int64(sh.PointerToRelocations), io.SeekStart)
 	if err != nil {
 		return nil, fmt.Errorf("fail to seek to %q section relocations: %v", sh.Name, err)
 	}
@@ -66,7 +66,7 @@
 	return relocs, nil
 }
 
-// SectionHeader is similar to SectionHeader32 with Name
+// SectionHeader is similar to [SectionHeader32] with Name
 // field replaced by Go string.
 type SectionHeader struct {
 	Name                 string
diff --git a/src/debug/pe/string.go b/src/debug/pe/string.go
index a156bbe..6cd08ae 100644
--- a/src/debug/pe/string.go
+++ b/src/debug/pe/string.go
@@ -31,7 +31,7 @@
 		return nil, nil
 	}
 	offset := fh.PointerToSymbolTable + COFFSymbolSize*fh.NumberOfSymbols
-	_, err := r.Seek(int64(offset), seekStart)
+	_, err := r.Seek(int64(offset), io.SeekStart)
 	if err != nil {
 		return nil, fmt.Errorf("fail to seek to string table: %v", err)
 	}
diff --git a/src/debug/pe/symbol.go b/src/debug/pe/symbol.go
index c33a4fc..6e8d9d1 100644
--- a/src/debug/pe/symbol.go
+++ b/src/debug/pe/symbol.go
@@ -55,11 +55,11 @@
 	if fh.NumberOfSymbols <= 0 {
 		return nil, nil
 	}
-	_, err := r.Seek(int64(fh.PointerToSymbolTable), seekStart)
+	_, err := r.Seek(int64(fh.PointerToSymbolTable), io.SeekStart)
 	if err != nil {
 		return nil, fmt.Errorf("fail to seek to symbol table: %v", err)
 	}
-	c := saferio.SliceCap((*COFFSymbol)(nil), uint64(fh.NumberOfSymbols))
+	c := saferio.SliceCap[COFFSymbol](uint64(fh.NumberOfSymbols))
 	if c < 0 {
 		return nil, errors.New("too many symbols; file may be corrupt")
 	}
@@ -141,7 +141,7 @@
 	return syms, nil
 }
 
-// Symbol is similar to COFFSymbol with Name field replaced
+// Symbol is similar to [COFFSymbol] with Name field replaced
 // by Go string. Symbol also does not have NumberOfAuxSymbols.
 type Symbol struct {
 	Name          string
@@ -182,7 +182,7 @@
 
 // COFFSymbolReadSectionDefAux returns a blob of auxiliary information
 // (including COMDAT info) for a section definition symbol. Here 'idx'
-// is the index of a section symbol in the main COFFSymbol array for
+// is the index of a section symbol in the main [COFFSymbol] array for
 // the File. Return value is a pointer to the appropriate aux symbol
 // struct. For more info, see:
 //
diff --git a/src/debug/plan9obj/file.go b/src/debug/plan9obj/file.go
index ad74c72..0880c3c 100644
--- a/src/debug/plan9obj/file.go
+++ b/src/debug/plan9obj/file.go
@@ -100,7 +100,7 @@
 	return msg
 }
 
-// Open opens the named file using os.Open and prepares it for use as a Plan 9 a.out binary.
+// Open opens the named file using [os.Open] and prepares it for use as a Plan 9 a.out binary.
 func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
@@ -115,8 +115,8 @@
 	return ff, nil
 }
 
-// Close closes the File.
-// If the File was created using NewFile directly instead of Open,
+// Close closes the [File].
+// If the [File] was created using [NewFile] directly instead of [Open],
 // Close has no effect.
 func (f *File) Close() error {
 	var err error
@@ -136,7 +136,7 @@
 	return 0, &formatError{0, "bad magic number", magic}
 }
 
-// NewFile creates a new File for accessing a Plan 9 binary in an underlying reader.
+// NewFile creates a new [File] for accessing a Plan 9 binary in an underlying reader.
 // The Plan 9 binary is expected to start at position 0 in the ReaderAt.
 func NewFile(r io.ReaderAt) (*File, error) {
 	sr := io.NewSectionReader(r, 0, 1<<63-1)
@@ -309,7 +309,7 @@
 	return syms, nil
 }
 
-// ErrNoSymbols is returned by File.Symbols if there is no such section
+// ErrNoSymbols is returned by [File.Symbols] if there is no such section
 // in the File.
 var ErrNoSymbols = errors.New("no symbol section")
 
diff --git a/src/embed/embed.go b/src/embed/embed.go
index 8d155eb..b7bb160 100644
--- a/src/embed/embed.go
+++ b/src/embed/embed.go
@@ -5,7 +5,7 @@
 // Package embed provides access to files embedded in the running Go program.
 //
 // Go source files that import "embed" can use the //go:embed directive
-// to initialize a variable of type string, []byte, or FS with the contents of
+// to initialize a variable of type string, []byte, or [FS] with the contents of
 // files read from the package directory or subdirectories at compile time.
 //
 // For example, here are three ways to embed a file named hello.txt
@@ -45,7 +45,7 @@
 // Only blank lines and ‘//’ line comments are permitted between the directive and the declaration.
 //
 // The type of the variable must be a string type, or a slice of a byte type,
-// or FS (or an alias of FS).
+// or [FS] (or an alias of [FS]).
 //
 // For example:
 //
@@ -104,16 +104,16 @@
 // the contents of that file.
 //
 // The //go:embed directive requires importing "embed", even when using a string or []byte.
-// In source files that don't refer to embed.FS, use a blank import (import _ "embed").
+// In source files that don't refer to [embed.FS], use a blank import (import _ "embed").
 //
 // # File Systems
 //
 // For embedding a single file, a variable of type string or []byte is often best.
-// The FS type enables embedding a tree of files, such as a directory of static
+// The [FS] type enables embedding a tree of files, such as a directory of static
 // web server content, as in the example above.
 //
-// FS implements the io/fs package's FS interface, so it can be used with any package that
-// understands file systems, including net/http, text/template, and html/template.
+// FS implements the [io/fs] package's [FS] interface, so it can be used with any package that
+// understands file systems, including [net/http], [text/template], and [html/template].
 //
 // For example, given the content variable in the example above, we can write:
 //
@@ -299,9 +299,9 @@
 	return files[i:j]
 }
 
-// Open opens the named file for reading and returns it as an fs.File.
+// Open opens the named file for reading and returns it as an [fs.File].
 //
-// The returned file implements io.Seeker and io.ReaderAt when the file is not a directory.
+// The returned file implements [io.Seeker] and [io.ReaderAt] when the file is not a directory.
 func (f FS) Open(name string) (fs.File, error) {
 	file := f.lookup(name)
 	if file == nil {
diff --git a/src/embed/example_test.go b/src/embed/example_test.go
index 5498c27..b92eb52 100644
--- a/src/embed/example_test.go
+++ b/src/embed/example_test.go
@@ -14,9 +14,9 @@
 var content embed.FS
 
 func Example() {
-	mutex := http.NewServeMux()
-	mutex.Handle("/", http.FileServer(http.FS(content)))
-	err := http.ListenAndServe(":8080", mutex)
+	mux := http.NewServeMux()
+	mux.Handle("/", http.FileServer(http.FS(content)))
+	err := http.ListenAndServe(":8080", mux)
 	if err != nil {
 		log.Fatal(err)
 	}
diff --git a/src/encoding/ascii85/ascii85.go b/src/encoding/ascii85/ascii85.go
index 1f1fb00..18bf9f0 100644
--- a/src/encoding/ascii85/ascii85.go
+++ b/src/encoding/ascii85/ascii85.go
@@ -15,12 +15,12 @@
  * Encoder
  */
 
-// Encode encodes src into at most MaxEncodedLen(len(src))
+// Encode encodes src into at most [MaxEncodedLen](len(src))
 // bytes of dst, returning the actual number of bytes written.
 //
 // The encoding handles 4-byte chunks, using a special encoding
 // for the last fragment, so Encode is not appropriate for use on
-// individual blocks of a large data stream. Use NewEncoder() instead.
+// individual blocks of a large data stream. Use [NewEncoder] instead.
 //
 // Often, ascii85-encoded data is wrapped in <~ and ~> symbols.
 // Encode does not add these.
@@ -173,7 +173,7 @@
 // Decode decodes src into dst, returning both the number
 // of bytes written to dst and the number consumed from src.
 // If src contains invalid ascii85 data, Decode will return the
-// number of bytes successfully written and a CorruptInputError.
+// number of bytes successfully written and a [CorruptInputError].
 // Decode ignores space and control characters in src.
 // Often, ascii85-encoded data is wrapped in <~ and ~> symbols.
 // Decode expects these to have been stripped by the caller.
@@ -182,7 +182,7 @@
 // end of the input stream and processes it completely rather
 // than wait for the completion of another 32-bit block.
 //
-// NewDecoder wraps an io.Reader interface around Decode.
+// [NewDecoder] wraps an [io.Reader] interface around Decode.
 func Decode(dst, src []byte, flush bool) (ndst, nsrc int, err error) {
 	var v uint32
 	var nb int
diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go
index e7bf793..781ab87 100644
--- a/src/encoding/asn1/asn1.go
+++ b/src/encoding/asn1/asn1.go
@@ -211,7 +211,7 @@
 
 // NULL
 
-// NullRawValue is a RawValue with its Tag set to the ASN.1 NULL type tag (5).
+// NullRawValue is a [RawValue] with its Tag set to the ASN.1 NULL type tag (5).
 var NullRawValue = RawValue{Tag: TagNull}
 
 // NullBytes contains bytes representing the DER-encoded ASN.1 NULL type.
@@ -656,14 +656,14 @@
 }
 
 var (
-	bitStringType        = reflect.TypeOf(BitString{})
-	objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
-	enumeratedType       = reflect.TypeOf(Enumerated(0))
-	flagType             = reflect.TypeOf(Flag(false))
-	timeType             = reflect.TypeOf(time.Time{})
-	rawValueType         = reflect.TypeOf(RawValue{})
-	rawContentsType      = reflect.TypeOf(RawContent(nil))
-	bigIntType           = reflect.TypeOf((*big.Int)(nil))
+	bitStringType        = reflect.TypeFor[BitString]()
+	objectIdentifierType = reflect.TypeFor[ObjectIdentifier]()
+	enumeratedType       = reflect.TypeFor[Enumerated]()
+	flagType             = reflect.TypeFor[Flag]()
+	timeType             = reflect.TypeFor[time.Time]()
+	rawValueType         = reflect.TypeFor[RawValue]()
+	rawContentsType      = reflect.TypeFor[RawContent]()
+	bigIntType           = reflect.TypeFor[*big.Int]()
 )
 
 // invalidLength reports whether offset + length > sliceLength, or if the
@@ -1031,34 +1031,33 @@
 // fields in val will not be included in rest, as these are considered
 // valid elements of the SEQUENCE and not trailing data.
 //
-// An ASN.1 INTEGER can be written to an int, int32, int64,
-// or *big.Int (from the math/big package).
-// If the encoded value does not fit in the Go type,
-// Unmarshal returns a parse error.
+//   - An ASN.1 INTEGER can be written to an int, int32, int64,
+//     or *[big.Int].
+//     If the encoded value does not fit in the Go type,
+//     Unmarshal returns a parse error.
 //
-// An ASN.1 BIT STRING can be written to a BitString.
+//   - An ASN.1 BIT STRING can be written to a [BitString].
 //
-// An ASN.1 OCTET STRING can be written to a []byte.
+//   - An ASN.1 OCTET STRING can be written to a []byte.
 //
-// An ASN.1 OBJECT IDENTIFIER can be written to an
-// ObjectIdentifier.
+//   - An ASN.1 OBJECT IDENTIFIER can be written to an [ObjectIdentifier].
 //
-// An ASN.1 ENUMERATED can be written to an Enumerated.
+//   - An ASN.1 ENUMERATED can be written to an [Enumerated].
 //
-// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//   - An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a [time.Time].
 //
-// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string.
+//   - An ASN.1 PrintableString, IA5String, or NumericString can be written to a string.
 //
-// Any of the above ASN.1 values can be written to an interface{}.
-// The value stored in the interface has the corresponding Go type.
-// For integers, that type is int64.
+//   - Any of the above ASN.1 values can be written to an interface{}.
+//     The value stored in the interface has the corresponding Go type.
+//     For integers, that type is int64.
 //
-// An ASN.1 SEQUENCE OF x or SET OF x can be written
-// to a slice if an x can be written to the slice's element type.
+//   - An ASN.1 SEQUENCE OF x or SET OF x can be written
+//     to a slice if an x can be written to the slice's element type.
 //
-// An ASN.1 SEQUENCE or SET can be written to a struct
-// if each of the elements in the sequence can be
-// written to the corresponding element in the struct.
+//   - An ASN.1 SEQUENCE or SET can be written to a struct
+//     if each of the elements in the sequence can be
+//     written to the corresponding element in the struct.
 //
 // The following tags on struct fields have special meaning to Unmarshal:
 //
diff --git a/src/encoding/asn1/marshal.go b/src/encoding/asn1/marshal.go
index c243349..d8c8fe1 100644
--- a/src/encoding/asn1/marshal.go
+++ b/src/encoding/asn1/marshal.go
@@ -721,7 +721,7 @@
 
 // Marshal returns the ASN.1 encoding of val.
 //
-// In addition to the struct tags recognised by Unmarshal, the following can be
+// In addition to the struct tags recognized by Unmarshal, the following can be
 // used:
 //
 //	ia5:         causes strings to be marshaled as ASN.1, IA5String values
diff --git a/src/encoding/base32/base32.go b/src/encoding/base32/base32.go
index 3dc37b0..4a61199 100644
--- a/src/encoding/base32/base32.go
+++ b/src/encoding/base32/base32.go
@@ -7,6 +7,7 @@
 
 import (
 	"io"
+	"slices"
 	"strconv"
 )
 
@@ -19,15 +20,18 @@
 // introduced for SASL GSSAPI and standardized in RFC 4648.
 // The alternate "base32hex" encoding is used in DNSSEC.
 type Encoding struct {
-	encode    [32]byte
-	decodeMap [256]byte
+	encode    [32]byte   // mapping of symbol index to symbol byte value
+	decodeMap [256]uint8 // mapping of symbol byte value to symbol index
 	padChar   rune
 }
 
 const (
-	StdPadding          rune = '=' // Standard padding character
-	NoPadding           rune = -1  // No padding
-	decodeMapInitialize      = "" +
+	StdPadding rune = '=' // Standard padding character
+	NoPadding  rune = -1  // No padding
+)
+
+const (
+	decodeMapInitialize = "" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
@@ -44,14 +48,16 @@
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+	invalidIndex = '\xff'
 )
 
-const encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
-const encodeHex = "0123456789ABCDEFGHIJKLMNOPQRSTUV"
-
-// NewEncoding returns a new Encoding defined by the given alphabet,
-// which must be a 32-byte string. The alphabet is treated as sequence
-// of byte values without any special treatment for multi-byte UTF-8.
+// NewEncoding returns a new padded Encoding defined by the given alphabet,
+// which must be a 32-byte string that contains unique byte values and
+// does not contain the padding character or CR / LF ('\r', '\n').
+// The alphabet is treated as a sequence of byte values
+// without any special treatment for multi-byte UTF-8.
+// The resulting Encoding uses the default padding character ('='),
+// which may be changed or disabled via [Encoding.WithPadding].
 func NewEncoding(encoder string) *Encoding {
 	if len(encoder) != 32 {
 		panic("encoding alphabet is not 32-bytes long")
@@ -63,37 +69,41 @@
 	copy(e.decodeMap[:], decodeMapInitialize)
 
 	for i := 0; i < len(encoder); i++ {
-		e.decodeMap[encoder[i]] = byte(i)
+		// Note: While we document that the alphabet cannot contain
+		// the padding character, we do not enforce it since we do not know
+		// if the caller intends to switch the padding from StdPadding later.
+		switch {
+		case encoder[i] == '\n' || encoder[i] == '\r':
+			panic("encoding alphabet contains newline character")
+		case e.decodeMap[encoder[i]] != invalidIndex:
+			panic("encoding alphabet includes duplicate symbols")
+		}
+		e.decodeMap[encoder[i]] = uint8(i)
 	}
 	return e
 }
 
-// StdEncoding is the standard base32 encoding, as defined in
-// RFC 4648.
-var StdEncoding = NewEncoding(encodeStd)
+// StdEncoding is the standard base32 encoding, as defined in RFC 4648.
+var StdEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567")
 
 // HexEncoding is the “Extended Hex Alphabet” defined in RFC 4648.
 // It is typically used in DNS.
-var HexEncoding = NewEncoding(encodeHex)
+var HexEncoding = NewEncoding("0123456789ABCDEFGHIJKLMNOPQRSTUV")
 
 // WithPadding creates a new encoding identical to enc except
 // with a specified padding character, or NoPadding to disable padding.
-// The padding character must not be '\r' or '\n', must not
-// be contained in the encoding's alphabet and must be a rune equal or
-// below '\xff'.
+// The padding character must not be '\r' or '\n',
+// must not be contained in the encoding's alphabet,
+// must not be negative, and must be a rune equal or below '\xff'.
 // Padding characters above '\x7f' are encoded as their exact byte value
 // rather than using the UTF-8 representation of the codepoint.
 func (enc Encoding) WithPadding(padding rune) *Encoding {
-	if padding == '\r' || padding == '\n' || padding > 0xff {
+	switch {
+	case padding < NoPadding || padding == '\r' || padding == '\n' || padding > 0xff:
 		panic("invalid padding")
+	case padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:
+		panic("padding contained in alphabet")
 	}
-
-	for i := 0; i < len(enc.encode); i++ {
-		if rune(enc.encode[i]) == padding {
-			panic("padding contained in alphabet")
-		}
-	}
-
 	enc.padChar = padding
 	return &enc
 }
@@ -102,85 +112,87 @@
  * Encoder
  */
 
-// Encode encodes src using the encoding enc, writing
-// EncodedLen(len(src)) bytes to dst.
+// Encode encodes src using the encoding enc,
+// writing [Encoding.EncodedLen](len(src)) bytes to dst.
 //
 // The encoding pads the output to a multiple of 8 bytes,
 // so Encode is not appropriate for use on individual blocks
-// of a large data stream. Use NewEncoder() instead.
+// of a large data stream. Use [NewEncoder] instead.
 func (enc *Encoding) Encode(dst, src []byte) {
-	for len(src) > 0 {
-		var b [8]byte
-
-		// Unpack 8x 5-bit source blocks into a 5 byte
-		// destination quantum
-		switch len(src) {
-		default:
-			b[7] = src[4] & 0x1F
-			b[6] = src[4] >> 5
-			fallthrough
-		case 4:
-			b[6] |= (src[3] << 3) & 0x1F
-			b[5] = (src[3] >> 2) & 0x1F
-			b[4] = src[3] >> 7
-			fallthrough
-		case 3:
-			b[4] |= (src[2] << 1) & 0x1F
-			b[3] = (src[2] >> 4) & 0x1F
-			fallthrough
-		case 2:
-			b[3] |= (src[1] << 4) & 0x1F
-			b[2] = (src[1] >> 1) & 0x1F
-			b[1] = (src[1] >> 6) & 0x1F
-			fallthrough
-		case 1:
-			b[1] |= (src[0] << 2) & 0x1F
-			b[0] = src[0] >> 3
-		}
-
-		// Encode 5-bit blocks using the base32 alphabet
-		size := len(dst)
-		if size >= 8 {
-			// Common case, unrolled for extra performance
-			dst[0] = enc.encode[b[0]&31]
-			dst[1] = enc.encode[b[1]&31]
-			dst[2] = enc.encode[b[2]&31]
-			dst[3] = enc.encode[b[3]&31]
-			dst[4] = enc.encode[b[4]&31]
-			dst[5] = enc.encode[b[5]&31]
-			dst[6] = enc.encode[b[6]&31]
-			dst[7] = enc.encode[b[7]&31]
-		} else {
-			for i := 0; i < size; i++ {
-				dst[i] = enc.encode[b[i]&31]
-			}
-		}
-
-		// Pad the final quantum
-		if len(src) < 5 {
-			if enc.padChar == NoPadding {
-				break
-			}
-
-			dst[7] = byte(enc.padChar)
-			if len(src) < 4 {
-				dst[6] = byte(enc.padChar)
-				dst[5] = byte(enc.padChar)
-				if len(src) < 3 {
-					dst[4] = byte(enc.padChar)
-					if len(src) < 2 {
-						dst[3] = byte(enc.padChar)
-						dst[2] = byte(enc.padChar)
-					}
-				}
-			}
-
-			break
-		}
-
-		src = src[5:]
-		dst = dst[8:]
+	if len(src) == 0 {
+		return
 	}
+	// enc is a pointer receiver, so the use of enc.encode within the hot
+	// loop below means a nil check at every operation. Lift that nil check
+	// outside of the loop to speed up the encoder.
+	_ = enc.encode
+
+	di, si := 0, 0
+	n := (len(src) / 5) * 5
+	for si < n {
+		// Combining two 32 bit loads allows the same code to be used
+		// for 32 and 64 bit platforms.
+		hi := uint32(src[si+0])<<24 | uint32(src[si+1])<<16 | uint32(src[si+2])<<8 | uint32(src[si+3])
+		lo := hi<<8 | uint32(src[si+4])
+
+		dst[di+0] = enc.encode[(hi>>27)&0x1F]
+		dst[di+1] = enc.encode[(hi>>22)&0x1F]
+		dst[di+2] = enc.encode[(hi>>17)&0x1F]
+		dst[di+3] = enc.encode[(hi>>12)&0x1F]
+		dst[di+4] = enc.encode[(hi>>7)&0x1F]
+		dst[di+5] = enc.encode[(hi>>2)&0x1F]
+		dst[di+6] = enc.encode[(lo>>5)&0x1F]
+		dst[di+7] = enc.encode[(lo)&0x1F]
+
+		si += 5
+		di += 8
+	}
+
+	// Add the remaining small block
+	remain := len(src) - si
+	if remain == 0 {
+		return
+	}
+
+	// Encode the remaining bytes in reverse order.
+	val := uint32(0)
+	switch remain {
+	case 4:
+		val |= uint32(src[si+3])
+		dst[di+6] = enc.encode[val<<3&0x1F]
+		dst[di+5] = enc.encode[val>>2&0x1F]
+		fallthrough
+	case 3:
+		val |= uint32(src[si+2]) << 8
+		dst[di+4] = enc.encode[val>>7&0x1F]
+		fallthrough
+	case 2:
+		val |= uint32(src[si+1]) << 16
+		dst[di+3] = enc.encode[val>>12&0x1F]
+		dst[di+2] = enc.encode[val>>17&0x1F]
+		fallthrough
+	case 1:
+		val |= uint32(src[si+0]) << 24
+		dst[di+1] = enc.encode[val>>22&0x1F]
+		dst[di+0] = enc.encode[val>>27&0x1F]
+	}
+
+	// Pad the final quantum
+	if enc.padChar != NoPadding {
+		nPad := (remain * 8 / 5) + 1
+		for i := nPad; i < 8; i++ {
+			dst[di+i] = byte(enc.padChar)
+		}
+	}
+}
+
+// AppendEncode appends the base32 encoded src to dst
+// and returns the extended buffer.
+func (enc *Encoding) AppendEncode(dst, src []byte) []byte {
+	n := enc.EncodedLen(len(src))
+	dst = slices.Grow(dst, n)
+	enc.Encode(dst[len(dst):][:n], src)
+	return dst[:len(dst)+n]
 }
 
 // EncodeToString returns the base32 encoding of src.
@@ -271,7 +283,7 @@
 // of an input buffer of length n.
 func (enc *Encoding) EncodedLen(n int) int {
 	if enc.padChar == NoPadding {
-		return (n*8 + 4) / 5
+		return n/5*8 + (n%5*8+4)/5
 	}
 	return (n + 4) / 5 * 8
 }
@@ -374,10 +386,10 @@
 }
 
 // Decode decodes src using the encoding enc. It writes at most
-// DecodedLen(len(src)) bytes to dst and returns the number of bytes
+// [Encoding.DecodedLen](len(src)) bytes to dst and returns the number of bytes
 // written. If src contains invalid base32 data, it will return the
-// number of bytes successfully written and CorruptInputError.
-// New line characters (\r and \n) are ignored.
+// number of bytes successfully written and [CorruptInputError].
+// Newline characters (\r and \n) are ignored.
 func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
 	buf := make([]byte, len(src))
 	l := stripNewlines(buf, src)
@@ -385,6 +397,22 @@
 	return
 }
 
+// AppendDecode appends the base32 decoded src to dst
+// and returns the extended buffer.
+// If the input is malformed, it returns the partially decoded src and an error.
+func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) {
+	// Compute the output size without padding to avoid over allocating.
+	n := len(src)
+	for n > 0 && rune(src[n-1]) == enc.padChar {
+		n--
+	}
+	n = decodedLen(n, NoPadding)
+
+	dst = slices.Grow(dst, n)
+	n, err := enc.Decode(dst[len(dst):][:n], src)
+	return dst[:len(dst)+n], err
+}
+
 // DecodeString returns the bytes represented by the base32 string s.
 func (enc *Encoding) DecodeString(s string) ([]byte, error) {
 	buf := []byte(s)
@@ -544,9 +572,12 @@
 // DecodedLen returns the maximum length in bytes of the decoded data
 // corresponding to n bytes of base32-encoded data.
 func (enc *Encoding) DecodedLen(n int) int {
-	if enc.padChar == NoPadding {
-		return n * 5 / 8
-	}
+	return decodedLen(n, enc.padChar)
+}
 
+func decodedLen(n int, padChar rune) int {
+	if padChar == NoPadding {
+		return n/8*5 + n%8*5/8
+	}
 	return n / 8 * 5
 }
diff --git a/src/encoding/base32/base32_test.go b/src/encoding/base32/base32_test.go
index 8118531..33638ad 100644
--- a/src/encoding/base32/base32_test.go
+++ b/src/encoding/base32/base32_test.go
@@ -8,6 +8,8 @@
 	"bytes"
 	"errors"
 	"io"
+	"math"
+	"strconv"
 	"strings"
 	"testing"
 )
@@ -55,6 +57,8 @@
 	for _, p := range pairs {
 		got := StdEncoding.EncodeToString([]byte(p.decoded))
 		testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, p.encoded)
+		dst := StdEncoding.AppendEncode([]byte("lead"), []byte(p.decoded))
+		testEqual(t, `AppendEncode("lead", %q) = %q, want %q`, p.decoded, string(dst), "lead"+p.encoded)
 	}
 }
 
@@ -97,13 +101,22 @@
 		if len(p.encoded) > 0 {
 			testEqual(t, "Decode(%q) = end %v, want %v", p.encoded, end, (p.encoded[len(p.encoded)-1] == '='))
 		}
-		testEqual(t, "Decode(%q) = %q, want %q", p.encoded,
-			string(dbuf[0:count]),
-			p.decoded)
+		testEqual(t, "Decode(%q) = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded)
 
 		dbuf, err = StdEncoding.DecodeString(p.encoded)
 		testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil))
 		testEqual(t, "DecodeString(%q) = %q, want %q", p.encoded, string(dbuf), p.decoded)
+
+		dst, err := StdEncoding.AppendDecode([]byte("lead"), []byte(p.encoded))
+		testEqual(t, "AppendDecode(%q) = error %v, want %v", p.encoded, err, error(nil))
+		testEqual(t, `AppendDecode("lead", %q) = %q, want %q`, p.encoded, string(dst), "lead"+p.decoded)
+
+		dst2, err := StdEncoding.AppendDecode(dst[:0:len(p.decoded)], []byte(p.encoded))
+		testEqual(t, "AppendDecode(%q) = error %v, want %v", p.encoded, err, error(nil))
+		testEqual(t, `AppendDecode("", %q) = %q, want %q`, p.encoded, string(dst2), p.decoded)
+		if len(dst) > 0 && len(dst2) > 0 && &dst[0] != &dst2[0] {
+			t.Errorf("unexpected capacity growth: got %d, want %d", cap(dst2), cap(dst))
+		}
 	}
 }
 
@@ -679,52 +692,86 @@
 	}
 }
 
-func TestEncodedDecodedLen(t *testing.T) {
+func TestEncodedLen(t *testing.T) {
+	var rawStdEncoding = StdEncoding.WithPadding(NoPadding)
 	type test struct {
-		in      int
-		wantEnc int
-		wantDec int
+		enc  *Encoding
+		n    int
+		want int64
 	}
-	data := bytes.Repeat([]byte("x"), 100)
-	for _, test := range []struct {
-		name  string
-		enc   *Encoding
-		cases []test
-	}{
-		{"StdEncoding", StdEncoding, []test{
-			{0, 0, 0},
-			{1, 8, 5},
-			{5, 8, 5},
-			{6, 16, 10},
-			{10, 16, 10},
-		}},
-		{"NoPadding", StdEncoding.WithPadding(NoPadding), []test{
-			{0, 0, 0},
-			{1, 2, 1},
-			{2, 4, 2},
-			{5, 8, 5},
-			{6, 10, 6},
-			{7, 12, 7},
-			{10, 16, 10},
-			{11, 18, 11},
-		}},
-	} {
-		t.Run(test.name, func(t *testing.T) {
-			for _, tc := range test.cases {
-				encLen := test.enc.EncodedLen(tc.in)
-				decLen := test.enc.DecodedLen(encLen)
-				enc := test.enc.EncodeToString(data[:tc.in])
-				if len(enc) != encLen {
-					t.Fatalf("EncodedLen(%d) = %d but encoded to %q (%d)", tc.in, encLen, enc, len(enc))
-				}
-				if encLen != tc.wantEnc {
-					t.Fatalf("EncodedLen(%d) = %d; want %d", tc.in, encLen, tc.wantEnc)
-				}
-				if decLen != tc.wantDec {
-					t.Fatalf("DecodedLen(%d) = %d; want %d", encLen, decLen, tc.wantDec)
-				}
-			}
-		})
+	tests := []test{
+		{StdEncoding, 0, 0},
+		{StdEncoding, 1, 8},
+		{StdEncoding, 2, 8},
+		{StdEncoding, 3, 8},
+		{StdEncoding, 4, 8},
+		{StdEncoding, 5, 8},
+		{StdEncoding, 6, 16},
+		{StdEncoding, 10, 16},
+		{StdEncoding, 11, 24},
+		{rawStdEncoding, 0, 0},
+		{rawStdEncoding, 1, 2},
+		{rawStdEncoding, 2, 4},
+		{rawStdEncoding, 3, 5},
+		{rawStdEncoding, 4, 7},
+		{rawStdEncoding, 5, 8},
+		{rawStdEncoding, 6, 10},
+		{rawStdEncoding, 7, 12},
+		{rawStdEncoding, 10, 16},
+		{rawStdEncoding, 11, 18},
+	}
+	// check overflow
+	switch strconv.IntSize {
+	case 32:
+		tests = append(tests, test{rawStdEncoding, (math.MaxInt-4)/8 + 1, 429496730})
+		tests = append(tests, test{rawStdEncoding, math.MaxInt/8*5 + 4, math.MaxInt})
+	case 64:
+		tests = append(tests, test{rawStdEncoding, (math.MaxInt-4)/8 + 1, 1844674407370955162})
+		tests = append(tests, test{rawStdEncoding, math.MaxInt/8*5 + 4, math.MaxInt})
+	}
+	for _, tt := range tests {
+		if got := tt.enc.EncodedLen(tt.n); int64(got) != tt.want {
+			t.Errorf("EncodedLen(%d): got %d, want %d", tt.n, got, tt.want)
+		}
+	}
+}
+
+func TestDecodedLen(t *testing.T) {
+	var rawStdEncoding = StdEncoding.WithPadding(NoPadding)
+	type test struct {
+		enc  *Encoding
+		n    int
+		want int64
+	}
+	tests := []test{
+		{StdEncoding, 0, 0},
+		{StdEncoding, 8, 5},
+		{StdEncoding, 16, 10},
+		{StdEncoding, 24, 15},
+		{rawStdEncoding, 0, 0},
+		{rawStdEncoding, 2, 1},
+		{rawStdEncoding, 4, 2},
+		{rawStdEncoding, 5, 3},
+		{rawStdEncoding, 7, 4},
+		{rawStdEncoding, 8, 5},
+		{rawStdEncoding, 10, 6},
+		{rawStdEncoding, 12, 7},
+		{rawStdEncoding, 16, 10},
+		{rawStdEncoding, 18, 11},
+	}
+	// check overflow
+	switch strconv.IntSize {
+	case 32:
+		tests = append(tests, test{rawStdEncoding, math.MaxInt/5 + 1, 268435456})
+		tests = append(tests, test{rawStdEncoding, math.MaxInt, 1342177279})
+	case 64:
+		tests = append(tests, test{rawStdEncoding, math.MaxInt/5 + 1, 1152921504606846976})
+		tests = append(tests, test{rawStdEncoding, math.MaxInt, 5764607523034234879})
+	}
+	for _, tt := range tests {
+		if got := tt.enc.DecodedLen(tt.n); int64(got) != tt.want {
+			t.Errorf("DecodedLen(%d): got %d, want %d", tt.n, got, tt.want)
+		}
 	}
 }
 
diff --git a/src/encoding/base64/base64.go b/src/encoding/base64/base64.go
index 6aa8a15..87f4586 100644
--- a/src/encoding/base64/base64.go
+++ b/src/encoding/base64/base64.go
@@ -8,6 +8,7 @@
 import (
 	"encoding/binary"
 	"io"
+	"slices"
 	"strconv"
 )
 
@@ -21,16 +22,19 @@
 // (RFC 1421).  RFC 4648 also defines an alternate encoding, which is
 // the standard encoding with - and _ substituted for + and /.
 type Encoding struct {
-	encode    [64]byte
-	decodeMap [256]byte
+	encode    [64]byte   // mapping of symbol index to symbol byte value
+	decodeMap [256]uint8 // mapping of symbol byte value to symbol index
 	padChar   rune
 	strict    bool
 }
 
 const (
-	StdPadding          rune = '=' // Standard padding character
-	NoPadding           rune = -1  // No padding
-	decodeMapInitialize      = "" +
+	StdPadding rune = '=' // Standard padding character
+	NoPadding  rune = -1  // No padding
+)
+
+const (
+	decodeMapInitialize = "" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
@@ -47,26 +51,20 @@
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
 		"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+	invalidIndex = '\xff'
 )
 
-const encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-const encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
-
 // NewEncoding returns a new padded Encoding defined by the given alphabet,
-// which must be a 64-byte string that does not contain the padding character
-// or CR / LF ('\r', '\n'). The alphabet is treated as sequence of byte values
+// which must be a 64-byte string that contains unique byte values and
+// does not contain the padding character or CR / LF ('\r', '\n').
+// The alphabet is treated as a sequence of byte values
 // without any special treatment for multi-byte UTF-8.
 // The resulting Encoding uses the default padding character ('='),
-// which may be changed or disabled via WithPadding.
+// which may be changed or disabled via [Encoding.WithPadding].
 func NewEncoding(encoder string) *Encoding {
 	if len(encoder) != 64 {
 		panic("encoding alphabet is not 64-bytes long")
 	}
-	for i := 0; i < len(encoder); i++ {
-		if encoder[i] == '\n' || encoder[i] == '\r' {
-			panic("encoding alphabet contains newline character")
-		}
-	}
 
 	e := new(Encoding)
 	e.padChar = StdPadding
@@ -74,29 +72,34 @@
 	copy(e.decodeMap[:], decodeMapInitialize)
 
 	for i := 0; i < len(encoder); i++ {
-		e.decodeMap[encoder[i]] = byte(i)
+		// Note: While we document that the alphabet cannot contain
+		// the padding character, we do not enforce it since we do not know
+		// if the caller intends to switch the padding from StdPadding later.
+		switch {
+		case encoder[i] == '\n' || encoder[i] == '\r':
+			panic("encoding alphabet contains newline character")
+		case e.decodeMap[encoder[i]] != invalidIndex:
+			panic("encoding alphabet includes duplicate symbols")
+		}
+		e.decodeMap[encoder[i]] = uint8(i)
 	}
 	return e
 }
 
 // WithPadding creates a new encoding identical to enc except
-// with a specified padding character, or NoPadding to disable padding.
-// The padding character must not be '\r' or '\n', must not
-// be contained in the encoding's alphabet and must be a rune equal or
-// below '\xff'.
+// with a specified padding character, or [NoPadding] to disable padding.
+// The padding character must not be '\r' or '\n',
+// must not be contained in the encoding's alphabet,
+// must not be negative, and must be a rune equal or below '\xff'.
 // Padding characters above '\x7f' are encoded as their exact byte value
 // rather than using the UTF-8 representation of the codepoint.
 func (enc Encoding) WithPadding(padding rune) *Encoding {
-	if padding == '\r' || padding == '\n' || padding > 0xff {
+	switch {
+	case padding < NoPadding || padding == '\r' || padding == '\n' || padding > 0xff:
 		panic("invalid padding")
+	case padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:
+		panic("padding contained in alphabet")
 	}
-
-	for i := 0; i < len(enc.encode); i++ {
-		if rune(enc.encode[i]) == padding {
-			panic("padding contained in alphabet")
-		}
-	}
-
 	enc.padChar = padding
 	return &enc
 }
@@ -112,34 +115,33 @@
 	return &enc
 }
 
-// StdEncoding is the standard base64 encoding, as defined in
-// RFC 4648.
-var StdEncoding = NewEncoding(encodeStd)
+// StdEncoding is the standard base64 encoding, as defined in RFC 4648.
+var StdEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
 
 // URLEncoding is the alternate base64 encoding defined in RFC 4648.
 // It is typically used in URLs and file names.
-var URLEncoding = NewEncoding(encodeURL)
+var URLEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_")
 
 // RawStdEncoding is the standard raw, unpadded base64 encoding,
 // as defined in RFC 4648 section 3.2.
-// This is the same as StdEncoding but omits padding characters.
+// This is the same as [StdEncoding] but omits padding characters.
 var RawStdEncoding = StdEncoding.WithPadding(NoPadding)
 
 // RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648.
 // It is typically used in URLs and file names.
-// This is the same as URLEncoding but omits padding characters.
+// This is the same as [URLEncoding] but omits padding characters.
 var RawURLEncoding = URLEncoding.WithPadding(NoPadding)
 
 /*
  * Encoder
  */
 
-// Encode encodes src using the encoding enc, writing
-// EncodedLen(len(src)) bytes to dst.
+// Encode encodes src using the encoding enc,
+// writing [Encoding.EncodedLen](len(src)) bytes to dst.
 //
 // The encoding pads the output to a multiple of 4 bytes,
 // so Encode is not appropriate for use on individual blocks
-// of a large data stream. Use NewEncoder() instead.
+// of a large data stream. Use [NewEncoder] instead.
 func (enc *Encoding) Encode(dst, src []byte) {
 	if len(src) == 0 {
 		return
@@ -191,6 +193,15 @@
 	}
 }
 
+// AppendEncode appends the base64 encoded src to dst
+// and returns the extended buffer.
+func (enc *Encoding) AppendEncode(dst, src []byte) []byte {
+	n := enc.EncodedLen(len(src))
+	dst = slices.Grow(dst, n)
+	enc.Encode(dst[len(dst):][:n], src)
+	return dst[:len(dst)+n]
+}
+
 // EncodeToString returns the base64 encoding of src.
 func (enc *Encoding) EncodeToString(src []byte) string {
 	buf := make([]byte, enc.EncodedLen(len(src)))
@@ -278,7 +289,7 @@
 // of an input buffer of length n.
 func (enc *Encoding) EncodedLen(n int) int {
 	if enc.padChar == NoPadding {
-		return (n*8 + 5) / 6 // minimum # chars at 6 bits per char
+		return n/3*4 + (n%3*8+5)/6 // minimum # chars at 6 bits per char
 	}
 	return (n + 2) / 3 * 4 // minimum # 4-char quanta, 3 bytes each
 }
@@ -395,6 +406,22 @@
 	return si, dlen - 1, err
 }
 
+// AppendDecode appends the base64 decoded src to dst
+// and returns the extended buffer.
+// If the input is malformed, it returns the partially decoded src and an error.
+func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) {
+	// Compute the output size without padding to avoid over allocating.
+	n := len(src)
+	for n > 0 && rune(src[n-1]) == enc.padChar {
+		n--
+	}
+	n = decodedLen(n, NoPadding)
+
+	dst = slices.Grow(dst, n)
+	n, err := enc.Decode(dst[len(dst):][:n], src)
+	return dst[:len(dst)+n], err
+}
+
 // DecodeString returns the bytes represented by the base64 string s.
 func (enc *Encoding) DecodeString(s string) ([]byte, error) {
 	dbuf := make([]byte, enc.DecodedLen(len(s)))
@@ -480,9 +507,9 @@
 }
 
 // Decode decodes src using the encoding enc. It writes at most
-// DecodedLen(len(src)) bytes to dst and returns the number of bytes
+// [Encoding.DecodedLen](len(src)) bytes to dst and returns the number of bytes
 // written. If src contains invalid base64 data, it will return the
-// number of bytes successfully written and CorruptInputError.
+// number of bytes successfully written and [CorruptInputError].
 // New line characters (\r and \n) are ignored.
 func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
 	if len(src) == 0 {
@@ -621,9 +648,13 @@
 // DecodedLen returns the maximum length in bytes of the decoded data
 // corresponding to n bytes of base64-encoded data.
 func (enc *Encoding) DecodedLen(n int) int {
-	if enc.padChar == NoPadding {
+	return decodedLen(n, enc.padChar)
+}
+
+func decodedLen(n int, padChar rune) int {
+	if padChar == NoPadding {
 		// Unpadded data may end with partial block of 2-3 characters.
-		return n * 6 / 8
+		return n/4*3 + n%4*6/8
 	}
 	// Padded base64 should always be a multiple of 4 characters in length.
 	return n / 4 * 3
diff --git a/src/encoding/base64/base64_test.go b/src/encoding/base64/base64_test.go
index 0ad88eb..7f5ebd8 100644
--- a/src/encoding/base64/base64_test.go
+++ b/src/encoding/base64/base64_test.go
@@ -9,8 +9,10 @@
 	"errors"
 	"fmt"
 	"io"
+	"math"
 	"reflect"
 	"runtime/debug"
+	"strconv"
 	"strings"
 	"testing"
 	"time"
@@ -68,6 +70,8 @@
 	return rawRef(urlRef(ref))
 }
 
+const encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+
 // A nonstandard encoding with a funny padding character, for testing
 var funnyEncoding = NewEncoding(encodeStd).WithPadding(rune('@'))
 
@@ -111,8 +115,9 @@
 	for _, p := range pairs {
 		for _, tt := range encodingTests {
 			got := tt.enc.EncodeToString([]byte(p.decoded))
-			testEqual(t, "Encode(%q) = %q, want %q", p.decoded,
-				got, tt.conv(p.encoded))
+			testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, tt.conv(p.encoded))
+			dst := tt.enc.AppendEncode([]byte("lead"), []byte(p.decoded))
+			testEqual(t, `AppendEncode("lead", %q) = %q, want %q`, p.decoded, string(dst), "lead"+tt.conv(p.encoded))
 		}
 	}
 }
@@ -160,6 +165,17 @@
 			dbuf, err = tt.enc.DecodeString(encoded)
 			testEqual(t, "DecodeString(%q) = error %v, want %v", encoded, err, error(nil))
 			testEqual(t, "DecodeString(%q) = %q, want %q", encoded, string(dbuf), p.decoded)
+
+			dst, err := tt.enc.AppendDecode([]byte("lead"), []byte(encoded))
+			testEqual(t, "AppendDecode(%q) = error %v, want %v", p.encoded, err, error(nil))
+			testEqual(t, `AppendDecode("lead", %q) = %q, want %q`, p.encoded, string(dst), "lead"+p.decoded)
+
+			dst2, err := tt.enc.AppendDecode(dst[:0:len(p.decoded)], []byte(encoded))
+			testEqual(t, "AppendDecode(%q) = error %v, want %v", p.encoded, err, error(nil))
+			testEqual(t, `AppendDecode("", %q) = %q, want %q`, p.encoded, string(dst2), p.decoded)
+			if len(dst) > 0 && len(dst2) > 0 && &dst[0] != &dst2[0] {
+				t.Errorf("unexpected capacity growth: got %d, want %d", cap(dst2), cap(dst))
+			}
 		}
 	}
 }
@@ -262,11 +278,12 @@
 }
 
 func TestEncodedLen(t *testing.T) {
-	for _, tt := range []struct {
+	type test struct {
 		enc  *Encoding
 		n    int
-		want int
-	}{
+		want int64
+	}
+	tests := []test{
 		{RawStdEncoding, 0, 0},
 		{RawStdEncoding, 1, 2},
 		{RawStdEncoding, 2, 3},
@@ -278,19 +295,30 @@
 		{StdEncoding, 3, 4},
 		{StdEncoding, 4, 8},
 		{StdEncoding, 7, 12},
-	} {
-		if got := tt.enc.EncodedLen(tt.n); got != tt.want {
+	}
+	// check overflow
+	switch strconv.IntSize {
+	case 32:
+		tests = append(tests, test{RawStdEncoding, (math.MaxInt-5)/8 + 1, 357913942})
+		tests = append(tests, test{RawStdEncoding, math.MaxInt/4*3 + 2, math.MaxInt})
+	case 64:
+		tests = append(tests, test{RawStdEncoding, (math.MaxInt-5)/8 + 1, 1537228672809129302})
+		tests = append(tests, test{RawStdEncoding, math.MaxInt/4*3 + 2, math.MaxInt})
+	}
+	for _, tt := range tests {
+		if got := tt.enc.EncodedLen(tt.n); int64(got) != tt.want {
 			t.Errorf("EncodedLen(%d): got %d, want %d", tt.n, got, tt.want)
 		}
 	}
 }
 
 func TestDecodedLen(t *testing.T) {
-	for _, tt := range []struct {
+	type test struct {
 		enc  *Encoding
 		n    int
-		want int
-	}{
+		want int64
+	}
+	tests := []test{
 		{RawStdEncoding, 0, 0},
 		{RawStdEncoding, 2, 1},
 		{RawStdEncoding, 3, 2},
@@ -299,8 +327,18 @@
 		{StdEncoding, 0, 0},
 		{StdEncoding, 4, 3},
 		{StdEncoding, 8, 6},
-	} {
-		if got := tt.enc.DecodedLen(tt.n); got != tt.want {
+	}
+	// check overflow
+	switch strconv.IntSize {
+	case 32:
+		tests = append(tests, test{RawStdEncoding, math.MaxInt/6 + 1, 268435456})
+		tests = append(tests, test{RawStdEncoding, math.MaxInt, 1610612735})
+	case 64:
+		tests = append(tests, test{RawStdEncoding, math.MaxInt/6 + 1, 1152921504606846976})
+		tests = append(tests, test{RawStdEncoding, math.MaxInt, 6917529027641081855})
+	}
+	for _, tt := range tests {
+		if got := tt.enc.DecodedLen(tt.n); int64(got) != tt.want {
 			t.Errorf("DecodedLen(%d): got %d, want %d", tt.n, got, tt.want)
 		}
 	}
diff --git a/src/encoding/binary/binary.go b/src/encoding/binary/binary.go
index 3fb18a7..f001be8 100644
--- a/src/encoding/binary/binary.go
+++ b/src/encoding/binary/binary.go
@@ -17,8 +17,8 @@
 //
 // This package favors simplicity over efficiency. Clients that require
 // high-performance serialization, especially for large data structures,
-// should look at more advanced solutions such as the encoding/gob
-// package or protocol buffers.
+// should look at more advanced solutions such as the [encoding/gob]
+// package or [google.golang.org/protobuf] for protocol buffers.
 package binary
 
 import (
@@ -31,6 +31,8 @@
 
 // A ByteOrder specifies how to convert byte slices into
 // 16-, 32-, or 64-bit unsigned integers.
+//
+// It is implemented by [LittleEndian], [BigEndian], and [NativeEndian].
 type ByteOrder interface {
 	Uint16([]byte) uint16
 	Uint32([]byte) uint32
@@ -43,6 +45,8 @@
 
 // AppendByteOrder specifies how to append 16-, 32-, or 64-bit unsigned integers
 // into a byte slice.
+//
+// It is implemented by [LittleEndian], [BigEndian], and [NativeEndian].
 type AppendByteOrder interface {
 	AppendUint16([]byte, uint16) []byte
 	AppendUint32([]byte, uint32) []byte
@@ -50,10 +54,10 @@
 	String() string
 }
 
-// LittleEndian is the little-endian implementation of ByteOrder and AppendByteOrder.
+// LittleEndian is the little-endian implementation of [ByteOrder] and [AppendByteOrder].
 var LittleEndian littleEndian
 
-// BigEndian is the big-endian implementation of ByteOrder and AppendByteOrder.
+// BigEndian is the big-endian implementation of [ByteOrder] and [AppendByteOrder].
 var BigEndian bigEndian
 
 type littleEndian struct{}
@@ -227,9 +231,9 @@
 // When reading into a struct, all non-blank fields must be exported
 // or Read may panic.
 //
-// The error is EOF only if no bytes were read.
-// If an EOF happens after reading some but not all the bytes,
-// Read returns ErrUnexpectedEOF.
+// The error is [io.EOF] only if no bytes were read.
+// If an [io.EOF] happens after reading some but not all the bytes,
+// Read returns [io.ErrUnexpectedEOF].
 func Read(r io.Reader, order ByteOrder, data any) error {
 	// Fast path for basic types and slices.
 	if n := intDataSize(data); n != 0 {
@@ -460,7 +464,7 @@
 	return err
 }
 
-// Size returns how many bytes Write would generate to encode the value v, which
+// Size returns how many bytes [Write] would generate to encode the value v, which
 // must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
 // If v is neither of these, Size returns -1.
 func Size(v any) int {
diff --git a/src/encoding/binary/native_endian_big.go b/src/encoding/binary/native_endian_big.go
index 1a24354..bcc8e30 100644
--- a/src/encoding/binary/native_endian_big.go
+++ b/src/encoding/binary/native_endian_big.go
@@ -10,5 +10,5 @@
 	bigEndian
 }
 
-// NativeEndian is the native-endian implementation of ByteOrder and AppendByteOrder.
+// NativeEndian is the native-endian implementation of [ByteOrder] and [AppendByteOrder].
 var NativeEndian nativeEndian
diff --git a/src/encoding/binary/native_endian_little.go b/src/encoding/binary/native_endian_little.go
index 67b41ae..38d3e9b 100644
--- a/src/encoding/binary/native_endian_little.go
+++ b/src/encoding/binary/native_endian_little.go
@@ -10,5 +10,5 @@
 	littleEndian
 }
 
-// NativeEndian is the native-endian implementation of ByteOrder and AppendByteOrder.
+// NativeEndian is the native-endian implementation of [ByteOrder] and [AppendByteOrder].
 var NativeEndian nativeEndian
diff --git a/src/encoding/binary/varint.go b/src/encoding/binary/varint.go
index 7b14fb2..64dd9d6 100644
--- a/src/encoding/binary/varint.go
+++ b/src/encoding/binary/varint.go
@@ -37,7 +37,7 @@
 )
 
 // AppendUvarint appends the varint-encoded form of x,
-// as generated by PutUvarint, to buf and returns the extended buffer.
+// as generated by [PutUvarint], to buf and returns the extended buffer.
 func AppendUvarint(buf []byte, x uint64) []byte {
 	for x >= 0x80 {
 		buf = append(buf, byte(x)|0x80)
@@ -88,7 +88,7 @@
 }
 
 // AppendVarint appends the varint-encoded form of x,
-// as generated by PutVarint, to buf and returns the extended buffer.
+// as generated by [PutVarint], to buf and returns the extended buffer.
 func AppendVarint(buf []byte, x int64) []byte {
 	ux := uint64(x) << 1
 	if x < 0 {
@@ -126,9 +126,9 @@
 var errOverflow = errors.New("binary: varint overflows a 64-bit integer")
 
 // ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
-// The error is EOF only if no bytes were read.
-// If an EOF happens after reading some but not all the bytes,
-// ReadUvarint returns io.ErrUnexpectedEOF.
+// The error is [io.EOF] only if no bytes were read.
+// If an [io.EOF] happens after reading some but not all the bytes,
+// ReadUvarint returns [io.ErrUnexpectedEOF].
 func ReadUvarint(r io.ByteReader) (uint64, error) {
 	var x uint64
 	var s uint
@@ -153,9 +153,9 @@
 }
 
 // ReadVarint reads an encoded signed integer from r and returns it as an int64.
-// The error is EOF only if no bytes were read.
-// If an EOF happens after reading some but not all the bytes,
-// ReadVarint returns io.ErrUnexpectedEOF.
+// The error is [io.EOF] only if no bytes were read.
+// If an [io.EOF] happens after reading some but not all the bytes,
+// ReadVarint returns [io.ErrUnexpectedEOF].
 func ReadVarint(r io.ByteReader) (int64, error) {
 	ux, err := ReadUvarint(r) // ok to continue in presence of error
 	x := int64(ux >> 1)
diff --git a/src/encoding/csv/reader.go b/src/encoding/csv/reader.go
index c6a8ed0..d9cab86 100644
--- a/src/encoding/csv/reader.go
+++ b/src/encoding/csv/reader.go
@@ -62,7 +62,7 @@
 )
 
 // A ParseError is returned for parsing errors.
-// Line numbers are 1-indexed and columns are 0-indexed.
+// Line and column numbers are 1-indexed.
 type ParseError struct {
 	StartLine int   // Line where the record starts
 	Line      int   // Line where the error occurred
@@ -82,7 +82,7 @@
 
 func (e *ParseError) Unwrap() error { return e.Err }
 
-// These are the errors that can be returned in ParseError.Err.
+// These are the errors that can be returned in [ParseError.Err].
 var (
 	ErrBareQuote  = errors.New("bare \" in non-quoted-field")
 	ErrQuote      = errors.New("extraneous or missing \" in quoted-field")
@@ -100,9 +100,9 @@
 
 // A Reader reads records from a CSV-encoded file.
 //
-// As returned by NewReader, a Reader expects input conforming to RFC 4180.
+// As returned by [NewReader], a Reader expects input conforming to RFC 4180.
 // The exported fields can be changed to customize the details before the
-// first call to Read or ReadAll.
+// first call to [Reader.Read] or [Reader.ReadAll].
 //
 // The Reader converts all \r\n sequences in its input to plain \n,
 // including in multiline field values, so that the returned data does
@@ -186,12 +186,12 @@
 
 // Read reads one record (a slice of fields) from r.
 // If the record has an unexpected number of fields,
-// Read returns the record along with the error ErrFieldCount.
+// Read returns the record along with the error [ErrFieldCount].
 // If the record contains a field that cannot be parsed,
 // Read returns a partial record along with the parse error.
 // The partial record contains all fields read before the error.
-// If there is no data left to be read, Read returns nil, io.EOF.
-// If ReuseRecord is true, the returned slice may be shared
+// If there is no data left to be read, Read returns nil, [io.EOF].
+// If [Reader.ReuseRecord] is true, the returned slice may be shared
 // between multiple calls to Read.
 func (r *Reader) Read() (record []string, err error) {
 	if r.ReuseRecord {
@@ -205,7 +205,7 @@
 
 // FieldPos returns the line and column corresponding to
 // the start of the field with the given index in the slice most recently
-// returned by Read. Numbering of lines and columns starts at 1;
+// returned by [Reader.Read]. Numbering of lines and columns starts at 1;
 // columns are counted in bytes, not runes.
 //
 // If this is called with an out-of-bounds index, it panics.
@@ -231,7 +231,7 @@
 
 // ReadAll reads all the remaining records from r.
 // Each record is a slice of fields.
-// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
+// A successful call returns err == nil, not err == [io.EOF]. Because ReadAll is
 // defined to read until EOF, it does not treat end of file as an error to be
 // reported.
 func (r *Reader) ReadAll() (records [][]string, err error) {
@@ -249,7 +249,7 @@
 
 // readLine reads the next line (with the trailing endline).
 // If EOF is hit without a trailing endline, it will be omitted.
-// If some bytes were read, then the error is never io.EOF.
+// If some bytes were read, then the error is never [io.EOF].
 // The result is only valid until the next call to readLine.
 func (r *Reader) readLine() ([]byte, error) {
 	line, err := r.r.ReadSlice('\n')
diff --git a/src/encoding/csv/writer.go b/src/encoding/csv/writer.go
index ac64b4d..ff3142f 100644
--- a/src/encoding/csv/writer.go
+++ b/src/encoding/csv/writer.go
@@ -14,19 +14,21 @@
 
 // A Writer writes records using CSV encoding.
 //
-// As returned by NewWriter, a Writer writes records terminated by a
+// As returned by [NewWriter], a Writer writes records terminated by a
 // newline and uses ',' as the field delimiter. The exported fields can be
-// changed to customize the details before the first call to Write or WriteAll.
+// changed to customize the details before
+// the first call to [Writer.Write] or [Writer.WriteAll].
 //
-// Comma is the field delimiter.
+// [Writer.Comma] is the field delimiter.
 //
-// If UseCRLF is true, the Writer ends each output line with \r\n instead of \n.
+// If [Writer.UseCRLF] is true,
+// the Writer ends each output line with \r\n instead of \n.
 //
 // The writes of individual records are buffered.
 // After all data has been written, the client should call the
-// Flush method to guarantee all data has been forwarded to
-// the underlying io.Writer.  Any errors that occurred should
-// be checked by calling the Error method.
+// [Writer.Flush] method to guarantee all data has been forwarded to
+// the underlying [io.Writer].  Any errors that occurred should
+// be checked by calling the [Writer.Error] method.
 type Writer struct {
 	Comma   rune // Field delimiter (set to ',' by NewWriter)
 	UseCRLF bool // True to use \r\n as the line terminator
@@ -43,8 +45,8 @@
 
 // Write writes a single CSV record to w along with any necessary quoting.
 // A record is a slice of strings with each string being one field.
-// Writes are buffered, so Flush must eventually be called to ensure
-// that the record is written to the underlying io.Writer.
+// Writes are buffered, so [Writer.Flush] must eventually be called to ensure
+// that the record is written to the underlying [io.Writer].
 func (w *Writer) Write(record []string) error {
 	if !validDelim(w.Comma) {
 		return errInvalidDelim
@@ -118,20 +120,21 @@
 	return err
 }
 
-// Flush writes any buffered data to the underlying io.Writer.
-// To check if an error occurred during the Flush, call Error.
+// Flush writes any buffered data to the underlying [io.Writer].
+// To check if an error occurred during Flush, call [Writer.Error].
 func (w *Writer) Flush() {
 	w.w.Flush()
 }
 
-// Error reports any error that has occurred during a previous Write or Flush.
+// Error reports any error that has occurred during
+// a previous [Writer.Write] or [Writer.Flush].
 func (w *Writer) Error() error {
 	_, err := w.w.Write(nil)
 	return err
 }
 
-// WriteAll writes multiple CSV records to w using Write and then calls Flush,
-// returning any error from the Flush.
+// WriteAll writes multiple CSV records to w using [Writer.Write] and
+// then calls [Writer.Flush], returning any error from the Flush.
 func (w *Writer) WriteAll(records [][]string) error {
 	for _, record := range records {
 		err := w.Write(record)
diff --git a/src/encoding/gob/codec_test.go b/src/encoding/gob/codec_test.go
index 1b8f195..ec56ad5 100644
--- a/src/encoding/gob/codec_test.go
+++ b/src/encoding/gob/codec_test.go
@@ -14,6 +14,7 @@
 	"strings"
 	"testing"
 	"time"
+	"unsafe"
 )
 
 var doFuzzTests = flag.Bool("gob.fuzz", false, "run the fuzz tests, which are large and very slow")
@@ -1566,7 +1567,9 @@
 
 func TestLargeSlice(t *testing.T) {
 	t.Run("byte", func(t *testing.T) {
-		t.Parallel()
+		if unsafe.Sizeof(uintptr(0)) > 4 {
+			t.Parallel() // Only run in parallel in a large address space
+		}
 		s := make([]byte, 10<<21)
 		for i := range s {
 			s[i] = byte(i)
@@ -1576,7 +1579,9 @@
 		testEncodeDecode(t, st, rt)
 	})
 	t.Run("int8", func(t *testing.T) {
-		t.Parallel()
+		if unsafe.Sizeof(uintptr(0)) > 4 {
+			t.Parallel()
+		}
 		s := make([]int8, 10<<21)
 		for i := range s {
 			s[i] = int8(i)
@@ -1586,7 +1591,9 @@
 		testEncodeDecode(t, st, rt)
 	})
 	t.Run("struct", func(t *testing.T) {
-		t.Parallel()
+		if unsafe.Sizeof(uintptr(0)) > 4 {
+			t.Parallel()
+		}
 		s := make([]StringPair, 1<<21)
 		for i := range s {
 			s[i].A = string(rune(i))
@@ -1597,7 +1604,9 @@
 		testEncodeDecode(t, st, rt)
 	})
 	t.Run("string", func(t *testing.T) {
-		t.Parallel()
+		if unsafe.Sizeof(uintptr(0)) > 4 {
+			t.Parallel()
+		}
 		s := make([]string, 1<<21)
 		for i := range s {
 			s[i] = string(rune(i))
diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
index c0b054e..d178b2b 100644
--- a/src/encoding/gob/decode.go
+++ b/src/encoding/gob/decode.go
@@ -222,7 +222,7 @@
 // decAlloc takes a value and returns a settable value that can
 // be assigned to. If the value is a pointer, decAlloc guarantees it points to storage.
 // The callers to the individual decoders are expected to have used decAlloc.
-// The individual decoders don't need to it.
+// The individual decoders don't need it.
 func decAlloc(v reflect.Value) reflect.Value {
 	for v.Kind() == reflect.Pointer {
 		if v.IsNil() {
@@ -370,7 +370,7 @@
 		errorf("bad %s slice length: %d", value.Type(), n)
 	}
 	if value.Cap() < n {
-		safe := saferio.SliceCap((*byte)(nil), uint64(n))
+		safe := saferio.SliceCap[byte](uint64(n))
 		if safe < 0 {
 			errorf("%s slice too big: %d elements", value.Type(), n)
 		}
@@ -395,7 +395,7 @@
 			value.SetLen(ln)
 			sub := value.Slice(i, ln)
 			if _, err := state.b.Read(sub.Bytes()); err != nil {
-				errorf("error decoding []byte at %d: %s", err, i)
+				errorf("error decoding []byte at %d: %s", i, err)
 			}
 			i = ln
 		}
@@ -656,7 +656,7 @@
 		errorf("%s slice too big: %d elements of %d bytes", typ.Elem(), u, size)
 	}
 	if value.Cap() < n {
-		safe := saferio.SliceCap(reflect.Zero(reflect.PtrTo(typ.Elem())).Interface(), uint64(n))
+		safe := saferio.SliceCapWithSize(size, uint64(n))
 		if safe < 0 {
 			errorf("%s slice too big: %d elements of %d bytes", typ.Elem(), u, size)
 		}
@@ -1197,7 +1197,7 @@
 // emptyStruct is the type we compile into when ignoring a struct value.
 type emptyStruct struct{}
 
-var emptyStructType = reflect.TypeOf((*emptyStruct)(nil)).Elem()
+var emptyStructType = reflect.TypeFor[emptyStruct]()
 
 // getIgnoreEnginePtr returns the engine for the specified type when the value is to be discarded.
 func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err error) {
diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go
index 5b77adc..c4b6088 100644
--- a/src/encoding/gob/decoder.go
+++ b/src/encoding/gob/decoder.go
@@ -37,9 +37,9 @@
 	err          error
 }
 
-// NewDecoder returns a new decoder that reads from the io.Reader.
-// If r does not also implement io.ByteReader, it will be wrapped in a
-// bufio.Reader.
+// NewDecoder returns a new decoder that reads from the [io.Reader].
+// If r does not also implement [io.ByteReader], it will be wrapped in a
+// [bufio.Reader].
 func NewDecoder(r io.Reader) *Decoder {
 	dec := new(Decoder)
 	// We use the ability to read bytes as a plausible surrogate for buffering.
@@ -188,7 +188,7 @@
 // If e is nil, the value will be discarded. Otherwise,
 // the value underlying e must be a pointer to the
 // correct type for the next data item received.
-// If the input is at EOF, Decode returns io.EOF and
+// If the input is at EOF, Decode returns [io.EOF] and
 // does not modify e.
 func (dec *Decoder) Decode(e any) error {
 	if e == nil {
@@ -208,7 +208,7 @@
 // If v is the zero reflect.Value (v.Kind() == Invalid), DecodeValue discards the value.
 // Otherwise, it stores the value into v. In that case, v must represent
 // a non-nil pointer to data or be an assignable reflect.Value (v.CanSet())
-// If the input is at EOF, DecodeValue returns io.EOF and
+// If the input is at EOF, DecodeValue returns [io.EOF] and
 // does not modify v.
 func (dec *Decoder) DecodeValue(v reflect.Value) error {
 	if v.IsValid() {
diff --git a/src/encoding/gob/doc.go b/src/encoding/gob/doc.go
index 53c47e7..3f26ed8 100644
--- a/src/encoding/gob/doc.go
+++ b/src/encoding/gob/doc.go
@@ -4,12 +4,12 @@
 
 /*
 Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is transporting
+[Encoder] (transmitter) and a [Decoder] (receiver). A typical use is transporting
 arguments and results of remote procedure calls (RPCs) such as those provided by
 [net/rpc].
 
 The implementation compiles a custom codec for each data type in the stream and
-is most efficient when a single Encoder is used to transmit a stream of values,
+is most efficient when a single [Encoder] is used to transmit a stream of values,
 amortizing the cost of compilation.
 
 # Basics
@@ -21,10 +21,10 @@
 as they have no value. Recursive types work fine, but
 recursive values (data with cycles) are problematic. This may change.
 
-To use gobs, create an Encoder and present it with a series of data items as
-values or addresses that can be dereferenced to values. The Encoder makes sure
+To use gobs, create an [Encoder] and present it with a series of data items as
+values or addresses that can be dereferenced to values. The [Encoder] makes sure
 all type information is sent before it is needed. At the receive side, a
-Decoder retrieves values from the encoded stream and unpacks them into local
+[Decoder] retrieves values from the encoded stream and unpacks them into local
 variables.
 
 # Types and Values
@@ -93,12 +93,12 @@
 at the top level will fail. A struct field of chan or func type is treated exactly
 like an unexported field and is ignored.
 
-Gob can encode a value of any type implementing the GobEncoder or
-encoding.BinaryMarshaler interfaces by calling the corresponding method,
+Gob can encode a value of any type implementing the [GobEncoder] or
+[encoding.BinaryMarshaler] interfaces by calling the corresponding method,
 in that order of preference.
 
-Gob can decode a value of any type implementing the GobDecoder or
-encoding.BinaryUnmarshaler interfaces by calling the corresponding method,
+Gob can decode a value of any type implementing the [GobDecoder] or
+[encoding.BinaryUnmarshaler] interfaces by calling the corresponding method,
 again in that order of preference.
 
 # Encoding Details
@@ -131,7 +131,7 @@
 example, -129=^128=(^256>>1) encodes as (FE 01 01).
 
 Floating-point numbers are always sent as a representation of a float64 value.
-That value is converted to a uint64 using math.Float64bits. The uint64 is then
+That value is converted to a uint64 using [math.Float64bits]. The uint64 is then
 byte-reversed and sent as a regular unsigned integer. The byte-reversal means the
 exponent and high-precision part of the mantissa go first. Since the low bits are
 often zero, this can save encoding bytes. For instance, 17.0 is encoded in only
@@ -168,22 +168,22 @@
 treated, for transmission, as members of a single "interface" type, analogous to
 int or []byte - in effect they're all treated as interface{}. Interface values
 are transmitted as a string identifying the concrete type being sent (a name
-that must be pre-defined by calling Register), followed by a byte count of the
+that must be pre-defined by calling [Register]), followed by a byte count of the
 length of the following data (so the value can be skipped if it cannot be
 stored), followed by the usual encoding of concrete (dynamic) value stored in
 the interface value. (A nil interface value is identified by the empty string
 and transmits no value.) Upon receipt, the decoder verifies that the unpacked
 concrete item satisfies the interface of the receiving variable.
 
-If a value is passed to Encode and the type is not a struct (or pointer to struct,
+If a value is passed to [Encoder.Encode] and the type is not a struct (or pointer to struct,
 etc.), for simplicity of processing it is represented as a struct of one field.
 The only visible effect of this is to encode a zero byte after the value, just as
 after the last field of an encoded struct, so that the decode algorithm knows when
 the top-level value is complete.
 
 The representation of types is described below. When a type is defined on a given
-connection between an Encoder and Decoder, it is assigned a signed integer type
-id. When Encoder.Encode(v) is called, it makes sure there is an id assigned for
+connection between an [Encoder] and [Decoder], it is assigned a signed integer type
+id. When [Encoder.Encode](v) is called, it makes sure there is an id assigned for
 the type of v and all its elements and then it sends the pair (typeid, encoded-v)
 where typeid is the type id of the encoded type of v and encoded-v is the gob
 encoding of the value v.
@@ -280,7 +280,7 @@
 # Security
 
 This package is not designed to be hardened against adversarial inputs, and is
-outside the scope of https://go.dev/security/policy. In particular, the Decoder
+outside the scope of https://go.dev/security/policy. In particular, the [Decoder]
 does only basic sanity checking on decoded input sizes, and its limits are not
 configurable. Care should be taken when decoding gob data from untrusted
 sources, which may consume significant resources.
diff --git a/src/encoding/gob/encoder.go b/src/encoding/gob/encoder.go
index aa41393..7d46152 100644
--- a/src/encoding/gob/encoder.go
+++ b/src/encoding/gob/encoder.go
@@ -30,7 +30,7 @@
 const maxLength = 9 // Maximum size of an encoded length.
 var spaceForLength = make([]byte, maxLength)
 
-// NewEncoder returns a new encoder that will transmit on the io.Writer.
+// NewEncoder returns a new encoder that will transmit on the [io.Writer].
 func NewEncoder(w io.Writer) *Encoder {
 	enc := new(Encoder)
 	enc.w = []io.Writer{w}
@@ -39,7 +39,7 @@
 	return enc
 }
 
-// writer() returns the innermost writer the encoder is using
+// writer returns the innermost writer the encoder is using.
 func (enc *Encoder) writer() io.Writer {
 	return enc.w[len(enc.w)-1]
 }
diff --git a/src/encoding/gob/encoder_test.go b/src/encoding/gob/encoder_test.go
index 484be43..d99b071 100644
--- a/src/encoding/gob/encoder_test.go
+++ b/src/encoding/gob/encoder_test.go
@@ -1017,7 +1017,7 @@
 
 // Test that a failed compilation doesn't leave around an executable encoder.
 // Issue 3723.
-func TestMutipleEncodingsOfBadType(t *testing.T) {
+func TestMultipleEncodingsOfBadType(t *testing.T) {
 	x := Bug4Public{
 		Name:   "name",
 		Secret: Bug4Secret{1},
diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
index 6fefd36..ae806fc 100644
--- a/src/encoding/gob/gobencdec_test.go
+++ b/src/encoding/gob/gobencdec_test.go
@@ -806,7 +806,7 @@
 	defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
 	b := new(bytes.Buffer)
 	enc := NewEncoder(b)
-	typ := reflect.TypeOf(int(0))
+	typ := reflect.TypeFor[int]()
 	nested := reflect.ArrayOf(1, typ)
 	for i := 0; i < 100; i++ {
 		nested = reflect.ArrayOf(1, nested)
diff --git a/src/encoding/gob/type.go b/src/encoding/gob/type.go
index bd7d919..30d8ca6 100644
--- a/src/encoding/gob/type.go
+++ b/src/encoding/gob/type.go
@@ -24,7 +24,7 @@
 	base        reflect.Type // the base type after all indirections
 	indir       int          // number of indirections to reach the base type
 	externalEnc int          // xGob, xBinary, or xText
-	externalDec int          // xGob, xBinary or xText
+	externalDec int          // xGob, xBinary, or xText
 	encIndir    int8         // number of indirections to reach the receiver type; may be negative
 	decIndir    int8         // number of indirections to reach the receiver type; may be negative
 }
@@ -103,14 +103,14 @@
 }
 
 var (
-	gobEncoderInterfaceType        = reflect.TypeOf((*GobEncoder)(nil)).Elem()
-	gobDecoderInterfaceType        = reflect.TypeOf((*GobDecoder)(nil)).Elem()
-	binaryMarshalerInterfaceType   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
-	binaryUnmarshalerInterfaceType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
-	textMarshalerInterfaceType     = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
-	textUnmarshalerInterfaceType   = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+	gobEncoderInterfaceType        = reflect.TypeFor[GobEncoder]()
+	gobDecoderInterfaceType        = reflect.TypeFor[GobDecoder]()
+	binaryMarshalerInterfaceType   = reflect.TypeFor[encoding.BinaryMarshaler]()
+	binaryUnmarshalerInterfaceType = reflect.TypeFor[encoding.BinaryUnmarshaler]()
+	textMarshalerInterfaceType     = reflect.TypeFor[encoding.TextMarshaler]()
+	textUnmarshalerInterfaceType   = reflect.TypeFor[encoding.TextUnmarshaler]()
 
-	wireTypeType = reflect.TypeOf((*wireType)(nil)).Elem()
+	wireTypeType = reflect.TypeFor[wireType]()
 )
 
 // implementsInterface reports whether the type implements the
@@ -279,12 +279,12 @@
 func init() {
 	// Some magic numbers to make sure there are no surprises.
 	checkId(16, tWireType)
-	checkId(17, mustGetTypeInfo(reflect.TypeOf((*arrayType)(nil)).Elem()).id)
-	checkId(18, mustGetTypeInfo(reflect.TypeOf((*CommonType)(nil)).Elem()).id)
-	checkId(19, mustGetTypeInfo(reflect.TypeOf((*sliceType)(nil)).Elem()).id)
-	checkId(20, mustGetTypeInfo(reflect.TypeOf((*structType)(nil)).Elem()).id)
-	checkId(21, mustGetTypeInfo(reflect.TypeOf((*fieldType)(nil)).Elem()).id)
-	checkId(23, mustGetTypeInfo(reflect.TypeOf((*mapType)(nil)).Elem()).id)
+	checkId(17, mustGetTypeInfo(reflect.TypeFor[arrayType]()).id)
+	checkId(18, mustGetTypeInfo(reflect.TypeFor[CommonType]()).id)
+	checkId(19, mustGetTypeInfo(reflect.TypeFor[sliceType]()).id)
+	checkId(20, mustGetTypeInfo(reflect.TypeFor[structType]()).id)
+	checkId(21, mustGetTypeInfo(reflect.TypeFor[fieldType]()).id)
+	checkId(23, mustGetTypeInfo(reflect.TypeFor[mapType]()).id)
 
 	copy(builtinIdToTypeSlice[:], idToTypeSlice)
 
@@ -828,7 +828,7 @@
 	concreteTypeToName sync.Map // map[reflect.Type]string
 )
 
-// RegisterName is like Register but uses the provided name rather than the
+// RegisterName is like [Register] but uses the provided name rather than the
 // type's default.
 func RegisterName(name string, value any) {
 	if name == "" {
diff --git a/src/encoding/gob/type_test.go b/src/encoding/gob/type_test.go
index f5f8db8..8d4c6d7 100644
--- a/src/encoding/gob/type_test.go
+++ b/src/encoding/gob/type_test.go
@@ -49,15 +49,15 @@
 
 // Reregister some basic types to check registration is idempotent.
 func TestReregistration(t *testing.T) {
-	newtyp := getTypeUnlocked("int", reflect.TypeOf(int(0)))
+	newtyp := getTypeUnlocked("int", reflect.TypeFor[int]())
 	if newtyp != tInt.gobType() {
 		t.Errorf("reregistration of %s got new type", newtyp.string())
 	}
-	newtyp = getTypeUnlocked("uint", reflect.TypeOf(uint(0)))
+	newtyp = getTypeUnlocked("uint", reflect.TypeFor[uint]())
 	if newtyp != tUint.gobType() {
 		t.Errorf("reregistration of %s got new type", newtyp.string())
 	}
-	newtyp = getTypeUnlocked("string", reflect.TypeOf("hello"))
+	newtyp = getTypeUnlocked("string", reflect.TypeFor[string]())
 	if newtyp != tString.gobType() {
 		t.Errorf("reregistration of %s got new type", newtyp.string())
 	}
@@ -145,7 +145,7 @@
 }
 
 func TestStructType(t *testing.T) {
-	sstruct := getTypeUnlocked("Foo", reflect.TypeOf(Foo{}))
+	sstruct := getTypeUnlocked("Foo", reflect.TypeFor[Foo]())
 	str := sstruct.string()
 	// If we can print it correctly, we built it correctly.
 	expected := "Foo = struct { A int; B int; C string; D bytes; E float; F float; G Bar = struct { X string; }; H Bar; I Foo; }"
diff --git a/src/encoding/hex/hex.go b/src/encoding/hex/hex.go
index 375f583..791d2bd 100644
--- a/src/encoding/hex/hex.go
+++ b/src/encoding/hex/hex.go
@@ -9,6 +9,7 @@
 	"errors"
 	"fmt"
 	"io"
+	"slices"
 	"strings"
 )
 
@@ -37,9 +38,9 @@
 // Specifically, it returns n * 2.
 func EncodedLen(n int) int { return n * 2 }
 
-// Encode encodes src into EncodedLen(len(src))
+// Encode encodes src into [EncodedLen](len(src))
 // bytes of dst. As a convenience, it returns the number
-// of bytes written to dst, but this value is always EncodedLen(len(src)).
+// of bytes written to dst, but this value is always [EncodedLen](len(src)).
 // Encode implements hexadecimal encoding.
 func Encode(dst, src []byte) int {
 	j := 0
@@ -51,9 +52,18 @@
 	return len(src) * 2
 }
 
+// AppendEncode appends the hexadecimally encoded src to dst
+// and returns the extended buffer.
+func AppendEncode(dst, src []byte) []byte {
+	n := EncodedLen(len(src))
+	dst = slices.Grow(dst, n)
+	Encode(dst[len(dst):][:n], src)
+	return dst[:len(dst)+n]
+}
+
 // ErrLength reports an attempt to decode an odd-length input
-// using Decode or DecodeString.
-// The stream-based Decoder returns io.ErrUnexpectedEOF instead of ErrLength.
+// using [Decode] or [DecodeString].
+// The stream-based Decoder returns [io.ErrUnexpectedEOF] instead of ErrLength.
 var ErrLength = errors.New("encoding/hex: odd length hex string")
 
 // InvalidByteError values describe errors resulting from an invalid byte in a hex string.
@@ -67,7 +77,7 @@
 // Specifically, it returns x / 2.
 func DecodedLen(x int) int { return x / 2 }
 
-// Decode decodes src into DecodedLen(len(src)) bytes,
+// Decode decodes src into [DecodedLen](len(src)) bytes,
 // returning the actual number of bytes written to dst.
 //
 // Decode expects that src contains only hexadecimal
@@ -102,6 +112,16 @@
 	return i, nil
 }
 
+// AppendDecode appends the hexadecimally decoded src to dst
+// and returns the extended buffer.
+// If the input is malformed, it returns the partially decoded src and an error.
+func AppendDecode(dst, src []byte) ([]byte, error) {
+	n := DecodedLen(len(src))
+	dst = slices.Grow(dst, n)
+	n, err := Decode(dst[len(dst):][:n], src)
+	return dst[:len(dst)+n], err
+}
+
 // EncodeToString returns the hexadecimal encoding of src.
 func EncodeToString(src []byte) string {
 	dst := make([]byte, EncodedLen(len(src)))
@@ -151,7 +171,7 @@
 	out [bufferSize]byte // output buffer
 }
 
-// NewEncoder returns an io.Writer that writes lowercase hexadecimal characters to w.
+// NewEncoder returns an [io.Writer] that writes lowercase hexadecimal characters to w.
 func NewEncoder(w io.Writer) io.Writer {
 	return &encoder{w: w}
 }
@@ -179,7 +199,7 @@
 	arr [bufferSize]byte // backing array for in
 }
 
-// NewDecoder returns an io.Reader that decodes hexadecimal characters from r.
+// NewDecoder returns an [io.Reader] that decodes hexadecimal characters from r.
 // NewDecoder expects that r contain only an even number of hexadecimal characters.
 func NewDecoder(r io.Reader) io.Reader {
 	return &decoder{r: r}
@@ -218,7 +238,7 @@
 	return numDec, nil
 }
 
-// Dumper returns a WriteCloser that writes a hex dump of all written data to
+// Dumper returns a [io.WriteCloser] that writes a hex dump of all written data to
 // w. The format of the dump matches the output of `hexdump -C` on the command
 // line.
 func Dumper(w io.Writer) io.WriteCloser {
diff --git a/src/encoding/hex/hex_test.go b/src/encoding/hex/hex_test.go
index a820fe7..03331ea 100644
--- a/src/encoding/hex/hex_test.go
+++ b/src/encoding/hex/hex_test.go
@@ -37,6 +37,11 @@
 		if string(dst) != test.enc {
 			t.Errorf("#%d: got: %#v want: %#v", i, dst, test.enc)
 		}
+		dst = []byte("lead")
+		dst = AppendEncode(dst, test.dec)
+		if string(dst) != "lead"+test.enc {
+			t.Errorf("#%d: got: %#v want: %#v", i, dst, "lead"+test.enc)
+		}
 	}
 }
 
@@ -52,6 +57,13 @@
 		} else if !bytes.Equal(dst, test.dec) {
 			t.Errorf("#%d: got: %#v want: %#v", i, dst, test.dec)
 		}
+		dst = []byte("lead")
+		dst, err = AppendDecode(dst, []byte(test.enc))
+		if err != nil {
+			t.Errorf("#%d: AppendDecode error: %v", i, err)
+		} else if string(dst) != "lead"+string(test.dec) {
+			t.Errorf("#%d: got: %#v want: %#v", i, dst, "lead"+string(test.dec))
+		}
 	}
 }
 
diff --git a/src/encoding/json/bench_test.go b/src/encoding/json/bench_test.go
index d3af0dc..f7bcf80 100644
--- a/src/encoding/json/bench_test.go
+++ b/src/encoding/json/bench_test.go
@@ -93,7 +93,7 @@
 		enc := NewEncoder(io.Discard)
 		for pb.Next() {
 			if err := enc.Encode(&codeStruct); err != nil {
-				b.Fatal("Encode:", err)
+				b.Fatalf("Encode error: %v", err)
 			}
 		}
 	})
@@ -120,10 +120,10 @@
 		enc := NewEncoder(io.Discard)
 		for pb.Next() {
 			if err := enc.Encode(&codeStruct); err != nil {
-				b.Fatal("Encode:", err)
+				b.Fatalf("Encode error: %v", err)
 			}
 			if _, err := Marshal(dummy); err == nil {
-				b.Fatal("expect an error here")
+				b.Fatal("Marshal error: got nil, want non-nil")
 			}
 		}
 	})
@@ -140,7 +140,7 @@
 	b.RunParallel(func(pb *testing.PB) {
 		for pb.Next() {
 			if _, err := Marshal(&codeStruct); err != nil {
-				b.Fatal("Marshal:", err)
+				b.Fatalf("Marshal error: %v", err)
 			}
 		}
 	})
@@ -166,10 +166,10 @@
 	b.RunParallel(func(pb *testing.PB) {
 		for pb.Next() {
 			if _, err := Marshal(&codeStruct); err != nil {
-				b.Fatal("Marshal:", err)
+				b.Fatalf("Marshal error: %v", err)
 			}
 			if _, err := Marshal(dummy); err == nil {
-				b.Fatal("expect an error here")
+				b.Fatal("Marshal error: got nil, want non-nil")
 			}
 		}
 	})
@@ -188,7 +188,7 @@
 	return func(b *testing.B) {
 		for i := 0; i < b.N; i++ {
 			if _, err := Marshal(v); err != nil {
-				b.Fatal("Marshal:", err)
+				b.Fatalf("Marshal error: %v", err)
 			}
 		}
 	}
@@ -215,10 +215,10 @@
 	return func(b *testing.B) {
 		for i := 0; i < b.N; i++ {
 			if _, err := Marshal(v); err != nil {
-				b.Fatal("Marshal:", err)
+				b.Fatalf("Marshal error: %v", err)
 			}
 			if _, err := Marshal(dummy); err == nil {
-				b.Fatal("expect an error here")
+				b.Fatal("Marshal error: got nil, want non-nil")
 			}
 		}
 	}
@@ -246,6 +246,22 @@
 	b.Run("4096", benchMarshalBytesError(4096))
 }
 
+func BenchmarkMarshalMap(b *testing.B) {
+	b.ReportAllocs()
+	m := map[string]int{
+		"key3": 3,
+		"key2": 2,
+		"key1": 1,
+	}
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			if _, err := Marshal(m); err != nil {
+				b.Fatal("Marshal:", err)
+			}
+		}
+	})
+}
+
 func BenchmarkCodeDecoder(b *testing.B) {
 	b.ReportAllocs()
 	if codeJSON == nil {
@@ -264,7 +280,7 @@
 			buf.WriteByte('\n')
 			buf.WriteByte('\n')
 			if err := dec.Decode(&r); err != nil {
-				b.Fatal("Decode:", err)
+				b.Fatalf("Decode error: %v", err)
 			}
 		}
 	})
@@ -281,7 +297,7 @@
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
 		if err := dec.Decode(&out); err != nil {
-			b.Fatal("Decode:", err)
+			b.Fatalf("Decode error: %v", err)
 		}
 		r.Seek(0, 0)
 	}
@@ -295,7 +311,7 @@
 	buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
 	var x any
 	if err := dec.Decode(&x); err != nil {
-		b.Fatal("Decode:", err)
+		b.Fatalf("Decode error: %v", err)
 	}
 	ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
 	b.StartTimer()
@@ -304,8 +320,11 @@
 			buf.WriteString(ones)
 		}
 		x = nil
-		if err := dec.Decode(&x); err != nil || x != 1.0 {
-			b.Fatalf("Decode: %v after %d", err, i)
+		switch err := dec.Decode(&x); {
+		case err != nil:
+			b.Fatalf("Decode error: %v", err)
+		case x != 1.0:
+			b.Fatalf("Decode: got %v want 1.0", i)
 		}
 	}
 }
@@ -321,7 +340,7 @@
 		for pb.Next() {
 			var r codeResponse
 			if err := Unmarshal(codeJSON, &r); err != nil {
-				b.Fatal("Unmarshal:", err)
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -339,7 +358,7 @@
 		var r codeResponse
 		for pb.Next() {
 			if err := Unmarshal(codeJSON, &r); err != nil {
-				b.Fatal("Unmarshal:", err)
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -353,7 +372,7 @@
 		var s string
 		for pb.Next() {
 			if err := Unmarshal(data, &s); err != nil {
-				b.Fatal("Unmarshal:", err)
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -366,7 +385,7 @@
 		var f float64
 		for pb.Next() {
 			if err := Unmarshal(data, &f); err != nil {
-				b.Fatal("Unmarshal:", err)
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -379,7 +398,20 @@
 		var x int64
 		for pb.Next() {
 			if err := Unmarshal(data, &x); err != nil {
-				b.Fatal("Unmarshal:", err)
+				b.Fatalf("Unmarshal error: %v", err)
+			}
+		}
+	})
+}
+
+func BenchmarkUnmarshalMap(b *testing.B) {
+	b.ReportAllocs()
+	data := []byte(`{"key1":"value1","key2":"value2","key3":"value3"}`)
+	b.RunParallel(func(pb *testing.PB) {
+		x := make(map[string]string, 3)
+		for pb.Next() {
+			if err := Unmarshal(data, &x); err != nil {
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -392,7 +424,7 @@
 		var s struct{}
 		for pb.Next() {
 			if err := Unmarshal(j, &s); err != nil {
-				b.Fatal(err)
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -408,7 +440,7 @@
 	b.RunParallel(func(pb *testing.PB) {
 		for pb.Next() {
 			if _, err := Marshal(&j); err != nil {
-				b.Fatal(err)
+				b.Fatalf("Marshal error: %v", err)
 			}
 		}
 	})
@@ -421,7 +453,7 @@
 		var s struct{}
 		for pb.Next() {
 			if err := Unmarshal(j, &s); err != nil {
-				b.Fatal(err)
+				b.Fatalf("Unmarshal error: %v", err)
 			}
 		}
 	})
@@ -437,7 +469,7 @@
 	// Dynamically generate many new types.
 	types := make([]reflect.Type, maxTypes)
 	fs := []reflect.StructField{{
-		Type:  reflect.TypeOf(""),
+		Type:  reflect.TypeFor[string](),
 		Index: []int{0},
 	}}
 	for i := range types {
@@ -504,7 +536,7 @@
 
 		for pb.Next() {
 			if err := enc.Encode(&m); err != nil {
-				b.Fatal("Encode:", err)
+				b.Fatalf("Encode error: %v", err)
 			}
 		}
 	})
@@ -519,7 +551,7 @@
 	b.RunParallel(func(pb *testing.PB) {
 		for pb.Next() {
 			if err := NewEncoder(io.Discard).Encode(v); err != nil {
-				b.Fatal(err)
+				b.Fatalf("Encode error: %v", err)
 			}
 		}
 	})
diff --git a/src/encoding/json/decode.go b/src/encoding/json/decode.go
index 53470d8..bc1891f 100644
--- a/src/encoding/json/decode.go
+++ b/src/encoding/json/decode.go
@@ -21,10 +21,10 @@
 
 // Unmarshal parses the JSON-encoded data and stores the result
 // in the value pointed to by v. If v is nil or not a pointer,
-// Unmarshal returns an InvalidUnmarshalError.
+// Unmarshal returns an [InvalidUnmarshalError].
 //
 // Unmarshal uses the inverse of the encodings that
-// Marshal uses, allocating maps, slices, and pointers as necessary,
+// [Marshal] uses, allocating maps, slices, and pointers as necessary,
 // with the following additional rules:
 //
 // To unmarshal JSON into a pointer, Unmarshal first handles the case of
@@ -33,28 +33,28 @@
 // the value pointed at by the pointer. If the pointer is nil, Unmarshal
 // allocates a new value for it to point to.
 //
-// To unmarshal JSON into a value implementing the Unmarshaler interface,
-// Unmarshal calls that value's UnmarshalJSON method, including
+// To unmarshal JSON into a value implementing [Unmarshaler],
+// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including
 // when the input is a JSON null.
-// Otherwise, if the value implements encoding.TextUnmarshaler
-// and the input is a JSON quoted string, Unmarshal calls that value's
-// UnmarshalText method with the unquoted form of the string.
+// Otherwise, if the value implements [encoding.TextUnmarshaler]
+// and the input is a JSON quoted string, Unmarshal calls
+// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string.
 //
 // To unmarshal JSON into a struct, Unmarshal matches incoming object
-// keys to the keys used by Marshal (either the struct field name or its tag),
+// keys to the keys used by [Marshal] (either the struct field name or its tag),
 // preferring an exact match but also accepting a case-insensitive match. By
 // default, object keys which don't have a corresponding struct field are
-// ignored (see Decoder.DisallowUnknownFields for an alternative).
+// ignored (see [Decoder.DisallowUnknownFields] for an alternative).
 //
 // To unmarshal JSON into an interface value,
 // Unmarshal stores one of these in the interface value:
 //
-//	bool, for JSON booleans
-//	float64, for JSON numbers
-//	string, for JSON strings
-//	[]interface{}, for JSON arrays
-//	map[string]interface{}, for JSON objects
-//	nil for JSON null
+//   - bool, for JSON booleans
+//   - float64, for JSON numbers
+//   - string, for JSON strings
+//   - []interface{}, for JSON arrays
+//   - map[string]interface{}, for JSON objects
+//   - nil for JSON null
 //
 // To unmarshal a JSON array into a slice, Unmarshal resets the slice length
 // to zero and then appends each element to the slice.
@@ -72,16 +72,16 @@
 // use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
 // reuses the existing map, keeping existing entries. Unmarshal then stores
 // key-value pairs from the JSON object into the map. The map's key type must
-// either be any string type, an integer, implement json.Unmarshaler, or
-// implement encoding.TextUnmarshaler.
+// either be any string type, an integer, implement [json.Unmarshaler], or
+// implement [encoding.TextUnmarshaler].
 //
-// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
+// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError].
 //
 // If a JSON value is not appropriate for a given target type,
 // or if a JSON number overflows the target type, Unmarshal
 // skips that field and completes the unmarshaling as best it can.
 // If no more serious errors are encountered, Unmarshal returns
-// an UnmarshalTypeError describing the earliest such error. In any
+// an [UnmarshalTypeError] describing the earliest such error. In any
 // case, it's not guaranteed that all the remaining fields following
 // the problematic one will be unmarshaled into the target object.
 //
@@ -114,7 +114,7 @@
 // a JSON value. UnmarshalJSON must copy the JSON data
 // if it wishes to retain the data after returning.
 //
-// By convention, to approximate the behavior of Unmarshal itself,
+// By convention, to approximate the behavior of [Unmarshal] itself,
 // Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
 type Unmarshaler interface {
 	UnmarshalJSON([]byte) error
@@ -151,8 +151,8 @@
 	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
 }
 
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
+// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal].
+// (The argument to [Unmarshal] must be a non-nil pointer.)
 type InvalidUnmarshalError struct {
 	Type reflect.Type
 }
@@ -591,7 +591,7 @@
 }
 
 var nullLiteral = []byte("null")
-var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
 
 // object consumes an object from d.data[d.off-1:], decoding into v.
 // The first byte ('{') of the object has been read already.
@@ -762,17 +762,17 @@
 		if v.Kind() == reflect.Map {
 			kt := t.Key()
 			var kv reflect.Value
-			switch {
-			case reflect.PointerTo(kt).Implements(textUnmarshalerType):
+			if reflect.PointerTo(kt).Implements(textUnmarshalerType) {
 				kv = reflect.New(kt)
 				if err := d.literalStore(item, kv, true); err != nil {
 					return err
 				}
 				kv = kv.Elem()
-			case kt.Kind() == reflect.String:
-				kv = reflect.ValueOf(key).Convert(kt)
-			default:
+			} else {
 				switch kt.Kind() {
+				case reflect.String:
+					kv = reflect.New(kt).Elem()
+					kv.SetString(string(key))
 				case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 					s := string(key)
 					n, err := strconv.ParseInt(s, 10, 64)
@@ -780,7 +780,8 @@
 						d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
 						break
 					}
-					kv = reflect.ValueOf(n).Convert(kt)
+					kv = reflect.New(kt).Elem()
+					kv.SetInt(n)
 				case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
 					s := string(key)
 					n, err := strconv.ParseUint(s, 10, 64)
@@ -788,7 +789,8 @@
 						d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
 						break
 					}
-					kv = reflect.ValueOf(n).Convert(kt)
+					kv = reflect.New(kt).Elem()
+					kv.SetUint(n)
 				default:
 					panic("json: Unexpected key type") // should never occur
 				}
@@ -827,12 +829,12 @@
 	}
 	f, err := strconv.ParseFloat(s, 64)
 	if err != nil {
-		return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
+		return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)}
 	}
 	return f, nil
 }
 
-var numberType = reflect.TypeOf(Number(""))
+var numberType = reflect.TypeFor[Number]()
 
 // literalStore decodes a literal stored in item into v.
 //
@@ -842,7 +844,7 @@
 func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
 	// Check for unmarshaler.
 	if len(item) == 0 {
-		//Empty string given
+		// Empty string given.
 		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
 		return nil
 	}
@@ -960,13 +962,12 @@
 			}
 			panic(phasePanicMsg)
 		}
-		s := string(item)
 		switch v.Kind() {
 		default:
 			if v.Kind() == reflect.String && v.Type() == numberType {
 				// s must be a valid number, because it's
 				// already been tokenized.
-				v.SetString(s)
+				v.SetString(string(item))
 				break
 			}
 			if fromQuoted {
@@ -974,7 +975,7 @@
 			}
 			d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
 		case reflect.Interface:
-			n, err := d.convertNumber(s)
+			n, err := d.convertNumber(string(item))
 			if err != nil {
 				d.saveError(err)
 				break
@@ -986,25 +987,25 @@
 			v.Set(reflect.ValueOf(n))
 
 		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-			n, err := strconv.ParseInt(s, 10, 64)
+			n, err := strconv.ParseInt(string(item), 10, 64)
 			if err != nil || v.OverflowInt(n) {
-				d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+				d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
 				break
 			}
 			v.SetInt(n)
 
 		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-			n, err := strconv.ParseUint(s, 10, 64)
+			n, err := strconv.ParseUint(string(item), 10, 64)
 			if err != nil || v.OverflowUint(n) {
-				d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+				d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
 				break
 			}
 			v.SetUint(n)
 
 		case reflect.Float32, reflect.Float64:
-			n, err := strconv.ParseFloat(s, v.Type().Bits())
+			n, err := strconv.ParseFloat(string(item), v.Type().Bits())
 			if err != nil || v.OverflowFloat(n) {
-				d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+				d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
 				break
 			}
 			v.SetFloat(n)
diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go
index c2c036b..a10c1e1 100644
--- a/src/encoding/json/decode_test.go
+++ b/src/encoding/json/decode_test.go
@@ -57,7 +57,7 @@
 type SS string
 
 func (*SS) UnmarshalJSON(data []byte) error {
-	return &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(SS(""))}
+	return &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[SS]()}
 }
 
 // ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and
@@ -387,16 +387,6 @@
 	Data map[string]string `json:"data"`
 }
 
-type unmarshalTest struct {
-	in                    string
-	ptr                   any // new(type)
-	out                   any
-	err                   error
-	useNumber             bool
-	golden                bool
-	disallowUnknownFields bool
-}
-
 type B struct {
 	B bool `json:",string"`
 }
@@ -406,179 +396,203 @@
 	J **int
 }
 
-var unmarshalTests = []unmarshalTest{
+var unmarshalTests = []struct {
+	CaseName
+	in                    string
+	ptr                   any // new(type)
+	out                   any
+	err                   error
+	useNumber             bool
+	golden                bool
+	disallowUnknownFields bool
+}{
 	// basic types
-	{in: `true`, ptr: new(bool), out: true},
-	{in: `1`, ptr: new(int), out: 1},
-	{in: `1.2`, ptr: new(float64), out: 1.2},
-	{in: `-5`, ptr: new(int16), out: int16(-5)},
-	{in: `2`, ptr: new(Number), out: Number("2"), useNumber: true},
-	{in: `2`, ptr: new(Number), out: Number("2")},
-	{in: `2`, ptr: new(any), out: float64(2.0)},
-	{in: `2`, ptr: new(any), out: Number("2"), useNumber: true},
-	{in: `"a\u1234"`, ptr: new(string), out: "a\u1234"},
-	{in: `"http:\/\/"`, ptr: new(string), out: "http://"},
-	{in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"},
-	{in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"},
-	{in: "null", ptr: new(any), out: nil},
-	{in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7, "T", "X"}},
-	{in: `{"X": 23}`, ptr: new(T), out: T{}, err: &UnmarshalTypeError{"number", reflect.TypeOf(""), 8, "T", "X"}}, {in: `{"x": 1}`, ptr: new(tx), out: tx{}},
-	{in: `{"x": 1}`, ptr: new(tx), out: tx{}},
-	{in: `{"x": 1}`, ptr: new(tx), err: fmt.Errorf("json: unknown field \"x\""), disallowUnknownFields: true},
-	{in: `{"S": 23}`, ptr: new(W), out: W{}, err: &UnmarshalTypeError{"number", reflect.TypeOf(SS("")), 0, "W", "S"}},
-	{in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}},
-	{in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true},
-	{in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(any), out: ifaceNumAsFloat64},
-	{in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(any), out: ifaceNumAsNumber, useNumber: true},
+	{CaseName: Name(""), in: `true`, ptr: new(bool), out: true},
+	{CaseName: Name(""), in: `1`, ptr: new(int), out: 1},
+	{CaseName: Name(""), in: `1.2`, ptr: new(float64), out: 1.2},
+	{CaseName: Name(""), in: `-5`, ptr: new(int16), out: int16(-5)},
+	{CaseName: Name(""), in: `2`, ptr: new(Number), out: Number("2"), useNumber: true},
+	{CaseName: Name(""), in: `2`, ptr: new(Number), out: Number("2")},
+	{CaseName: Name(""), in: `2`, ptr: new(any), out: float64(2.0)},
+	{CaseName: Name(""), in: `2`, ptr: new(any), out: Number("2"), useNumber: true},
+	{CaseName: Name(""), in: `"a\u1234"`, ptr: new(string), out: "a\u1234"},
+	{CaseName: Name(""), in: `"http:\/\/"`, ptr: new(string), out: "http://"},
+	{CaseName: Name(""), in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"},
+	{CaseName: Name(""), in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"},
+	{CaseName: Name(""), in: "null", ptr: new(any), out: nil},
+	{CaseName: Name(""), in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeFor[string](), 7, "T", "X"}},
+	{CaseName: Name(""), in: `{"X": 23}`, ptr: new(T), out: T{}, err: &UnmarshalTypeError{"number", reflect.TypeFor[string](), 8, "T", "X"}},
+	{CaseName: Name(""), in: `{"x": 1}`, ptr: new(tx), out: tx{}},
+	{CaseName: Name(""), in: `{"x": 1}`, ptr: new(tx), out: tx{}},
+	{CaseName: Name(""), in: `{"x": 1}`, ptr: new(tx), err: fmt.Errorf("json: unknown field \"x\""), disallowUnknownFields: true},
+	{CaseName: Name(""), in: `{"S": 23}`, ptr: new(W), out: W{}, err: &UnmarshalTypeError{"number", reflect.TypeFor[SS](), 0, "W", "S"}},
+	{CaseName: Name(""), in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}},
+	{CaseName: Name(""), in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true},
+	{CaseName: Name(""), in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(any), out: ifaceNumAsFloat64},
+	{CaseName: Name(""), in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(any), out: ifaceNumAsNumber, useNumber: true},
 
 	// raw values with whitespace
-	{in: "\n true ", ptr: new(bool), out: true},
-	{in: "\t 1 ", ptr: new(int), out: 1},
-	{in: "\r 1.2 ", ptr: new(float64), out: 1.2},
-	{in: "\t -5 \n", ptr: new(int16), out: int16(-5)},
-	{in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"},
+	{CaseName: Name(""), in: "\n true ", ptr: new(bool), out: true},
+	{CaseName: Name(""), in: "\t 1 ", ptr: new(int), out: 1},
+	{CaseName: Name(""), in: "\r 1.2 ", ptr: new(float64), out: 1.2},
+	{CaseName: Name(""), in: "\t -5 \n", ptr: new(int16), out: int16(-5)},
+	{CaseName: Name(""), in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"},
 
 	// Z has a "-" tag.
-	{in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}},
-	{in: `{"Y": 1, "Z": 2}`, ptr: new(T), err: fmt.Errorf("json: unknown field \"Z\""), disallowUnknownFields: true},
+	{CaseName: Name(""), in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}},
+	{CaseName: Name(""), in: `{"Y": 1, "Z": 2}`, ptr: new(T), err: fmt.Errorf("json: unknown field \"Z\""), disallowUnknownFields: true},
 
-	{in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}},
-	{in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true},
-	{in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}},
-	{in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}},
-	{in: `{"alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true},
+	{CaseName: Name(""), in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+	{CaseName: Name(""), in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true},
+	{CaseName: Name(""), in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+	{CaseName: Name(""), in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}},
+	{CaseName: Name(""), in: `{"alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true},
 
 	// syntax errors
-	{in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
-	{in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
-	{in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
-	{in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: 5}},
-	{in: `{"F3": -}`, ptr: new(V), out: V{F3: Number("-")}, err: &SyntaxError{msg: "invalid character '}' in numeric literal", Offset: 9}},
+	{CaseName: Name(""), in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
+	{CaseName: Name(""), in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
+	{CaseName: Name(""), in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
+	{CaseName: Name(""), in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: 5}},
+	{CaseName: Name(""), in: `{"F3": -}`, ptr: new(V), out: V{F3: Number("-")}, err: &SyntaxError{msg: "invalid character '}' in numeric literal", Offset: 9}},
 
 	// raw value errors
-	{in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
-	{in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}},
-	{in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
-	{in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}},
-	{in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
-	{in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}},
-	{in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
-	{in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}},
+	{CaseName: Name(""), in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+	{CaseName: Name(""), in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}},
+	{CaseName: Name(""), in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+	{CaseName: Name(""), in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}},
+	{CaseName: Name(""), in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+	{CaseName: Name(""), in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}},
+	{CaseName: Name(""), in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+	{CaseName: Name(""), in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}},
 
 	// array tests
-	{in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}},
-	{in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}},
-	{in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}},
-	{in: `[1, 2, 3]`, ptr: new(MustNotUnmarshalJSON), err: errors.New("MustNotUnmarshalJSON was used")},
+	{CaseName: Name(""), in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}},
+	{CaseName: Name(""), in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}},
+	{CaseName: Name(""), in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}},
+	{CaseName: Name(""), in: `[1, 2, 3]`, ptr: new(MustNotUnmarshalJSON), err: errors.New("MustNotUnmarshalJSON was used")},
 
 	// empty array to interface test
-	{in: `[]`, ptr: new([]any), out: []any{}},
-	{in: `null`, ptr: new([]any), out: []any(nil)},
-	{in: `{"T":[]}`, ptr: new(map[string]any), out: map[string]any{"T": []any{}}},
-	{in: `{"T":null}`, ptr: new(map[string]any), out: map[string]any{"T": any(nil)}},
+	{CaseName: Name(""), in: `[]`, ptr: new([]any), out: []any{}},
+	{CaseName: Name(""), in: `null`, ptr: new([]any), out: []any(nil)},
+	{CaseName: Name(""), in: `{"T":[]}`, ptr: new(map[string]any), out: map[string]any{"T": []any{}}},
+	{CaseName: Name(""), in: `{"T":null}`, ptr: new(map[string]any), out: map[string]any{"T": any(nil)}},
 
 	// composite tests
-	{in: allValueIndent, ptr: new(All), out: allValue},
-	{in: allValueCompact, ptr: new(All), out: allValue},
-	{in: allValueIndent, ptr: new(*All), out: &allValue},
-	{in: allValueCompact, ptr: new(*All), out: &allValue},
-	{in: pallValueIndent, ptr: new(All), out: pallValue},
-	{in: pallValueCompact, ptr: new(All), out: pallValue},
-	{in: pallValueIndent, ptr: new(*All), out: &pallValue},
-	{in: pallValueCompact, ptr: new(*All), out: &pallValue},
+	{CaseName: Name(""), in: allValueIndent, ptr: new(All), out: allValue},
+	{CaseName: Name(""), in: allValueCompact, ptr: new(All), out: allValue},
+	{CaseName: Name(""), in: allValueIndent, ptr: new(*All), out: &allValue},
+	{CaseName: Name(""), in: allValueCompact, ptr: new(*All), out: &allValue},
+	{CaseName: Name(""), in: pallValueIndent, ptr: new(All), out: pallValue},
+	{CaseName: Name(""), in: pallValueCompact, ptr: new(All), out: pallValue},
+	{CaseName: Name(""), in: pallValueIndent, ptr: new(*All), out: &pallValue},
+	{CaseName: Name(""), in: pallValueCompact, ptr: new(*All), out: &pallValue},
 
 	// unmarshal interface test
-	{in: `{"T":false}`, ptr: new(unmarshaler), out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called
-	{in: `{"T":false}`, ptr: new(*unmarshaler), out: &umtrue},
-	{in: `[{"T":false}]`, ptr: new([]unmarshaler), out: umslice},
-	{in: `[{"T":false}]`, ptr: new(*[]unmarshaler), out: &umslice},
-	{in: `{"M":{"T":"x:y"}}`, ptr: new(ustruct), out: umstruct},
+	{CaseName: Name(""), in: `{"T":false}`, ptr: new(unmarshaler), out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called
+	{CaseName: Name(""), in: `{"T":false}`, ptr: new(*unmarshaler), out: &umtrue},
+	{CaseName: Name(""), in: `[{"T":false}]`, ptr: new([]unmarshaler), out: umslice},
+	{CaseName: Name(""), in: `[{"T":false}]`, ptr: new(*[]unmarshaler), out: &umslice},
+	{CaseName: Name(""), in: `{"M":{"T":"x:y"}}`, ptr: new(ustruct), out: umstruct},
 
 	// UnmarshalText interface test
-	{in: `"x:y"`, ptr: new(unmarshalerText), out: umtrueXY},
-	{in: `"x:y"`, ptr: new(*unmarshalerText), out: &umtrueXY},
-	{in: `["x:y"]`, ptr: new([]unmarshalerText), out: umsliceXY},
-	{in: `["x:y"]`, ptr: new(*[]unmarshalerText), out: &umsliceXY},
-	{in: `{"M":"x:y"}`, ptr: new(ustructText), out: umstructXY},
+	{CaseName: Name(""), in: `"x:y"`, ptr: new(unmarshalerText), out: umtrueXY},
+	{CaseName: Name(""), in: `"x:y"`, ptr: new(*unmarshalerText), out: &umtrueXY},
+	{CaseName: Name(""), in: `["x:y"]`, ptr: new([]unmarshalerText), out: umsliceXY},
+	{CaseName: Name(""), in: `["x:y"]`, ptr: new(*[]unmarshalerText), out: &umsliceXY},
+	{CaseName: Name(""), in: `{"M":"x:y"}`, ptr: new(ustructText), out: umstructXY},
 
 	// integer-keyed map test
 	{
-		in:  `{"-1":"a","0":"b","1":"c"}`,
-		ptr: new(map[int]string),
-		out: map[int]string{-1: "a", 0: "b", 1: "c"},
+		CaseName: Name(""),
+		in:       `{"-1":"a","0":"b","1":"c"}`,
+		ptr:      new(map[int]string),
+		out:      map[int]string{-1: "a", 0: "b", 1: "c"},
 	},
 	{
-		in:  `{"0":"a","10":"c","9":"b"}`,
-		ptr: new(map[u8]string),
-		out: map[u8]string{0: "a", 9: "b", 10: "c"},
+		CaseName: Name(""),
+		in:       `{"0":"a","10":"c","9":"b"}`,
+		ptr:      new(map[u8]string),
+		out:      map[u8]string{0: "a", 9: "b", 10: "c"},
 	},
 	{
-		in:  `{"-9223372036854775808":"min","9223372036854775807":"max"}`,
-		ptr: new(map[int64]string),
-		out: map[int64]string{math.MinInt64: "min", math.MaxInt64: "max"},
+		CaseName: Name(""),
+		in:       `{"-9223372036854775808":"min","9223372036854775807":"max"}`,
+		ptr:      new(map[int64]string),
+		out:      map[int64]string{math.MinInt64: "min", math.MaxInt64: "max"},
 	},
 	{
-		in:  `{"18446744073709551615":"max"}`,
-		ptr: new(map[uint64]string),
-		out: map[uint64]string{math.MaxUint64: "max"},
+		CaseName: Name(""),
+		in:       `{"18446744073709551615":"max"}`,
+		ptr:      new(map[uint64]string),
+		out:      map[uint64]string{math.MaxUint64: "max"},
 	},
 	{
-		in:  `{"0":false,"10":true}`,
-		ptr: new(map[uintptr]bool),
-		out: map[uintptr]bool{0: false, 10: true},
+		CaseName: Name(""),
+		in:       `{"0":false,"10":true}`,
+		ptr:      new(map[uintptr]bool),
+		out:      map[uintptr]bool{0: false, 10: true},
 	},
 
 	// Check that MarshalText and UnmarshalText take precedence
 	// over default integer handling in map keys.
 	{
-		in:  `{"u2":4}`,
-		ptr: new(map[u8marshal]int),
-		out: map[u8marshal]int{2: 4},
+		CaseName: Name(""),
+		in:       `{"u2":4}`,
+		ptr:      new(map[u8marshal]int),
+		out:      map[u8marshal]int{2: 4},
 	},
 	{
-		in:  `{"2":4}`,
-		ptr: new(map[u8marshal]int),
-		err: errMissingU8Prefix,
+		CaseName: Name(""),
+		in:       `{"2":4}`,
+		ptr:      new(map[u8marshal]int),
+		err:      errMissingU8Prefix,
 	},
 
 	// integer-keyed map errors
 	{
-		in:  `{"abc":"abc"}`,
-		ptr: new(map[int]string),
-		err: &UnmarshalTypeError{Value: "number abc", Type: reflect.TypeOf(0), Offset: 2},
+		CaseName: Name(""),
+		in:       `{"abc":"abc"}`,
+		ptr:      new(map[int]string),
+		err:      &UnmarshalTypeError{Value: "number abc", Type: reflect.TypeFor[int](), Offset: 2},
 	},
 	{
-		in:  `{"256":"abc"}`,
-		ptr: new(map[uint8]string),
-		err: &UnmarshalTypeError{Value: "number 256", Type: reflect.TypeOf(uint8(0)), Offset: 2},
+		CaseName: Name(""),
+		in:       `{"256":"abc"}`,
+		ptr:      new(map[uint8]string),
+		err:      &UnmarshalTypeError{Value: "number 256", Type: reflect.TypeFor[uint8](), Offset: 2},
 	},
 	{
-		in:  `{"128":"abc"}`,
-		ptr: new(map[int8]string),
-		err: &UnmarshalTypeError{Value: "number 128", Type: reflect.TypeOf(int8(0)), Offset: 2},
+		CaseName: Name(""),
+		in:       `{"128":"abc"}`,
+		ptr:      new(map[int8]string),
+		err:      &UnmarshalTypeError{Value: "number 128", Type: reflect.TypeFor[int8](), Offset: 2},
 	},
 	{
-		in:  `{"-1":"abc"}`,
-		ptr: new(map[uint8]string),
-		err: &UnmarshalTypeError{Value: "number -1", Type: reflect.TypeOf(uint8(0)), Offset: 2},
+		CaseName: Name(""),
+		in:       `{"-1":"abc"}`,
+		ptr:      new(map[uint8]string),
+		err:      &UnmarshalTypeError{Value: "number -1", Type: reflect.TypeFor[uint8](), Offset: 2},
 	},
 	{
-		in:  `{"F":{"a":2,"3":4}}`,
-		ptr: new(map[string]map[int]int),
-		err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(int(0)), Offset: 7},
+		CaseName: Name(""),
+		in:       `{"F":{"a":2,"3":4}}`,
+		ptr:      new(map[string]map[int]int),
+		err:      &UnmarshalTypeError{Value: "number a", Type: reflect.TypeFor[int](), Offset: 7},
 	},
 	{
-		in:  `{"F":{"a":2,"3":4}}`,
-		ptr: new(map[string]map[uint]int),
-		err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(uint(0)), Offset: 7},
+		CaseName: Name(""),
+		in:       `{"F":{"a":2,"3":4}}`,
+		ptr:      new(map[string]map[uint]int),
+		err:      &UnmarshalTypeError{Value: "number a", Type: reflect.TypeFor[uint](), Offset: 7},
 	},
 
 	// Map keys can be encoding.TextUnmarshalers.
-	{in: `{"x:y":true}`, ptr: new(map[unmarshalerText]bool), out: ummapXY},
+	{CaseName: Name(""), in: `{"x:y":true}`, ptr: new(map[unmarshalerText]bool), out: ummapXY},
 	// If multiple values for the same key exists, only the most recent value is used.
-	{in: `{"x:y":false,"x:y":true}`, ptr: new(map[unmarshalerText]bool), out: ummapXY},
+	{CaseName: Name(""), in: `{"x:y":false,"x:y":true}`, ptr: new(map[unmarshalerText]bool), out: ummapXY},
 
 	{
+		CaseName: Name(""),
 		in: `{
 			"Level0": 1,
 			"Level1b": 2,
@@ -634,93 +648,109 @@
 		},
 	},
 	{
-		in:  `{"hello": 1}`,
-		ptr: new(Ambig),
-		out: Ambig{First: 1},
+		CaseName: Name(""),
+		in:       `{"hello": 1}`,
+		ptr:      new(Ambig),
+		out:      Ambig{First: 1},
 	},
 
 	{
-		in:  `{"X": 1,"Y":2}`,
-		ptr: new(S5),
-		out: S5{S8: S8{S9: S9{Y: 2}}},
+		CaseName: Name(""),
+		in:       `{"X": 1,"Y":2}`,
+		ptr:      new(S5),
+		out:      S5{S8: S8{S9: S9{Y: 2}}},
 	},
 	{
+		CaseName:              Name(""),
 		in:                    `{"X": 1,"Y":2}`,
 		ptr:                   new(S5),
 		err:                   fmt.Errorf("json: unknown field \"X\""),
 		disallowUnknownFields: true,
 	},
 	{
-		in:  `{"X": 1,"Y":2}`,
-		ptr: new(S10),
-		out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
+		CaseName: Name(""),
+		in:       `{"X": 1,"Y":2}`,
+		ptr:      new(S10),
+		out:      S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
 	},
 	{
+		CaseName:              Name(""),
 		in:                    `{"X": 1,"Y":2}`,
 		ptr:                   new(S10),
 		err:                   fmt.Errorf("json: unknown field \"X\""),
 		disallowUnknownFields: true,
 	},
 	{
-		in:  `{"I": 0, "I": null, "J": null}`,
-		ptr: new(DoublePtr),
-		out: DoublePtr{I: nil, J: nil},
+		CaseName: Name(""),
+		in:       `{"I": 0, "I": null, "J": null}`,
+		ptr:      new(DoublePtr),
+		out:      DoublePtr{I: nil, J: nil},
 	},
 
 	// invalid UTF-8 is coerced to valid UTF-8.
 	{
-		in:  "\"hello\xffworld\"",
-		ptr: new(string),
-		out: "hello\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\xffworld\"",
+		ptr:      new(string),
+		out:      "hello\ufffdworld",
 	},
 	{
-		in:  "\"hello\xc2\xc2world\"",
-		ptr: new(string),
-		out: "hello\ufffd\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\xc2\xc2world\"",
+		ptr:      new(string),
+		out:      "hello\ufffd\ufffdworld",
 	},
 	{
-		in:  "\"hello\xc2\xffworld\"",
-		ptr: new(string),
-		out: "hello\ufffd\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\xc2\xffworld\"",
+		ptr:      new(string),
+		out:      "hello\ufffd\ufffdworld",
 	},
 	{
-		in:  "\"hello\\ud800world\"",
-		ptr: new(string),
-		out: "hello\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\\ud800world\"",
+		ptr:      new(string),
+		out:      "hello\ufffdworld",
 	},
 	{
-		in:  "\"hello\\ud800\\ud800world\"",
-		ptr: new(string),
-		out: "hello\ufffd\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\\ud800\\ud800world\"",
+		ptr:      new(string),
+		out:      "hello\ufffd\ufffdworld",
 	},
 	{
-		in:  "\"hello\\ud800\\ud800world\"",
-		ptr: new(string),
-		out: "hello\ufffd\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\\ud800\\ud800world\"",
+		ptr:      new(string),
+		out:      "hello\ufffd\ufffdworld",
 	},
 	{
-		in:  "\"hello\xed\xa0\x80\xed\xb0\x80world\"",
-		ptr: new(string),
-		out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld",
+		CaseName: Name(""),
+		in:       "\"hello\xed\xa0\x80\xed\xb0\x80world\"",
+		ptr:      new(string),
+		out:      "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld",
 	},
 
 	// Used to be issue 8305, but time.Time implements encoding.TextUnmarshaler so this works now.
 	{
-		in:  `{"2009-11-10T23:00:00Z": "hello world"}`,
-		ptr: new(map[time.Time]string),
-		out: map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"},
+		CaseName: Name(""),
+		in:       `{"2009-11-10T23:00:00Z": "hello world"}`,
+		ptr:      new(map[time.Time]string),
+		out:      map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"},
 	},
 
 	// issue 8305
 	{
-		in:  `{"2009-11-10T23:00:00Z": "hello world"}`,
-		ptr: new(map[Point]string),
-		err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(map[Point]string{}), Offset: 1},
+		CaseName: Name(""),
+		in:       `{"2009-11-10T23:00:00Z": "hello world"}`,
+		ptr:      new(map[Point]string),
+		err:      &UnmarshalTypeError{Value: "object", Type: reflect.TypeFor[map[Point]string](), Offset: 1},
 	},
 	{
-		in:  `{"asdf": "hello world"}`,
-		ptr: new(map[unmarshaler]string),
-		err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(map[unmarshaler]string{}), Offset: 1},
+		CaseName: Name(""),
+		in:       `{"asdf": "hello world"}`,
+		ptr:      new(map[unmarshaler]string),
+		err:      &UnmarshalTypeError{Value: "object", Type: reflect.TypeFor[map[unmarshaler]string](), Offset: 1},
 	},
 
 	// related to issue 13783.
@@ -731,124 +761,139 @@
 	// successfully unmarshaled. The custom unmarshalers were accessible in earlier
 	// versions of Go, even though the custom marshaler was not.
 	{
-		in:  `"AQID"`,
-		ptr: new([]byteWithMarshalJSON),
-		out: []byteWithMarshalJSON{1, 2, 3},
+		CaseName: Name(""),
+		in:       `"AQID"`,
+		ptr:      new([]byteWithMarshalJSON),
+		out:      []byteWithMarshalJSON{1, 2, 3},
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]byteWithMarshalJSON),
-		out:    []byteWithMarshalJSON{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]byteWithMarshalJSON),
+		out:      []byteWithMarshalJSON{1, 2, 3},
+		golden:   true,
 	},
 	{
-		in:  `"AQID"`,
-		ptr: new([]byteWithMarshalText),
-		out: []byteWithMarshalText{1, 2, 3},
+		CaseName: Name(""),
+		in:       `"AQID"`,
+		ptr:      new([]byteWithMarshalText),
+		out:      []byteWithMarshalText{1, 2, 3},
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]byteWithMarshalText),
-		out:    []byteWithMarshalText{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]byteWithMarshalText),
+		out:      []byteWithMarshalText{1, 2, 3},
+		golden:   true,
 	},
 	{
-		in:  `"AQID"`,
-		ptr: new([]byteWithPtrMarshalJSON),
-		out: []byteWithPtrMarshalJSON{1, 2, 3},
+		CaseName: Name(""),
+		in:       `"AQID"`,
+		ptr:      new([]byteWithPtrMarshalJSON),
+		out:      []byteWithPtrMarshalJSON{1, 2, 3},
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]byteWithPtrMarshalJSON),
-		out:    []byteWithPtrMarshalJSON{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]byteWithPtrMarshalJSON),
+		out:      []byteWithPtrMarshalJSON{1, 2, 3},
+		golden:   true,
 	},
 	{
-		in:  `"AQID"`,
-		ptr: new([]byteWithPtrMarshalText),
-		out: []byteWithPtrMarshalText{1, 2, 3},
+		CaseName: Name(""),
+		in:       `"AQID"`,
+		ptr:      new([]byteWithPtrMarshalText),
+		out:      []byteWithPtrMarshalText{1, 2, 3},
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]byteWithPtrMarshalText),
-		out:    []byteWithPtrMarshalText{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]byteWithPtrMarshalText),
+		out:      []byteWithPtrMarshalText{1, 2, 3},
+		golden:   true,
 	},
 
 	// ints work with the marshaler but not the base64 []byte case
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]intWithMarshalJSON),
-		out:    []intWithMarshalJSON{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]intWithMarshalJSON),
+		out:      []intWithMarshalJSON{1, 2, 3},
+		golden:   true,
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]intWithMarshalText),
-		out:    []intWithMarshalText{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]intWithMarshalText),
+		out:      []intWithMarshalText{1, 2, 3},
+		golden:   true,
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]intWithPtrMarshalJSON),
-		out:    []intWithPtrMarshalJSON{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]intWithPtrMarshalJSON),
+		out:      []intWithPtrMarshalJSON{1, 2, 3},
+		golden:   true,
 	},
 	{
-		in:     `["Z01","Z02","Z03"]`,
-		ptr:    new([]intWithPtrMarshalText),
-		out:    []intWithPtrMarshalText{1, 2, 3},
-		golden: true,
+		CaseName: Name(""),
+		in:       `["Z01","Z02","Z03"]`,
+		ptr:      new([]intWithPtrMarshalText),
+		out:      []intWithPtrMarshalText{1, 2, 3},
+		golden:   true,
 	},
 
-	{in: `0.000001`, ptr: new(float64), out: 0.000001, golden: true},
-	{in: `1e-7`, ptr: new(float64), out: 1e-7, golden: true},
-	{in: `100000000000000000000`, ptr: new(float64), out: 100000000000000000000.0, golden: true},
-	{in: `1e+21`, ptr: new(float64), out: 1e21, golden: true},
-	{in: `-0.000001`, ptr: new(float64), out: -0.000001, golden: true},
-	{in: `-1e-7`, ptr: new(float64), out: -1e-7, golden: true},
-	{in: `-100000000000000000000`, ptr: new(float64), out: -100000000000000000000.0, golden: true},
-	{in: `-1e+21`, ptr: new(float64), out: -1e21, golden: true},
-	{in: `999999999999999900000`, ptr: new(float64), out: 999999999999999900000.0, golden: true},
-	{in: `9007199254740992`, ptr: new(float64), out: 9007199254740992.0, golden: true},
-	{in: `9007199254740993`, ptr: new(float64), out: 9007199254740992.0, golden: false},
+	{CaseName: Name(""), in: `0.000001`, ptr: new(float64), out: 0.000001, golden: true},
+	{CaseName: Name(""), in: `1e-7`, ptr: new(float64), out: 1e-7, golden: true},
+	{CaseName: Name(""), in: `100000000000000000000`, ptr: new(float64), out: 100000000000000000000.0, golden: true},
+	{CaseName: Name(""), in: `1e+21`, ptr: new(float64), out: 1e21, golden: true},
+	{CaseName: Name(""), in: `-0.000001`, ptr: new(float64), out: -0.000001, golden: true},
+	{CaseName: Name(""), in: `-1e-7`, ptr: new(float64), out: -1e-7, golden: true},
+	{CaseName: Name(""), in: `-100000000000000000000`, ptr: new(float64), out: -100000000000000000000.0, golden: true},
+	{CaseName: Name(""), in: `-1e+21`, ptr: new(float64), out: -1e21, golden: true},
+	{CaseName: Name(""), in: `999999999999999900000`, ptr: new(float64), out: 999999999999999900000.0, golden: true},
+	{CaseName: Name(""), in: `9007199254740992`, ptr: new(float64), out: 9007199254740992.0, golden: true},
+	{CaseName: Name(""), in: `9007199254740993`, ptr: new(float64), out: 9007199254740992.0, golden: false},
 
 	{
-		in:  `{"V": {"F2": "hello"}}`,
-		ptr: new(VOuter),
+		CaseName: Name(""),
+		in:       `{"V": {"F2": "hello"}}`,
+		ptr:      new(VOuter),
 		err: &UnmarshalTypeError{
 			Value:  "string",
 			Struct: "V",
 			Field:  "V.F2",
-			Type:   reflect.TypeOf(int32(0)),
+			Type:   reflect.TypeFor[int32](),
 			Offset: 20,
 		},
 	},
 	{
-		in:  `{"V": {"F4": {}, "F2": "hello"}}`,
-		ptr: new(VOuter),
+		CaseName: Name(""),
+		in:       `{"V": {"F4": {}, "F2": "hello"}}`,
+		ptr:      new(VOuter),
 		err: &UnmarshalTypeError{
 			Value:  "string",
 			Struct: "V",
 			Field:  "V.F2",
-			Type:   reflect.TypeOf(int32(0)),
+			Type:   reflect.TypeFor[int32](),
 			Offset: 30,
 		},
 	},
 
 	// issue 15146.
 	// invalid inputs in wrongStringTests below.
-	{in: `{"B":"true"}`, ptr: new(B), out: B{true}, golden: true},
-	{in: `{"B":"false"}`, ptr: new(B), out: B{false}, golden: true},
-	{in: `{"B": "maybe"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "maybe" into bool`)},
-	{in: `{"B": "tru"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "tru" into bool`)},
-	{in: `{"B": "False"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "False" into bool`)},
-	{in: `{"B": "null"}`, ptr: new(B), out: B{false}},
-	{in: `{"B": "nul"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "nul" into bool`)},
-	{in: `{"B": [2, 3]}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal unquoted value into bool`)},
+	{CaseName: Name(""), in: `{"B":"true"}`, ptr: new(B), out: B{true}, golden: true},
+	{CaseName: Name(""), in: `{"B":"false"}`, ptr: new(B), out: B{false}, golden: true},
+	{CaseName: Name(""), in: `{"B": "maybe"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "maybe" into bool`)},
+	{CaseName: Name(""), in: `{"B": "tru"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "tru" into bool`)},
+	{CaseName: Name(""), in: `{"B": "False"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "False" into bool`)},
+	{CaseName: Name(""), in: `{"B": "null"}`, ptr: new(B), out: B{false}},
+	{CaseName: Name(""), in: `{"B": "nul"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "nul" into bool`)},
+	{CaseName: Name(""), in: `{"B": [2, 3]}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal unquoted value into bool`)},
 
 	// additional tests for disallowUnknownFields
 	{
+		CaseName: Name(""),
 		in: `{
 			"Level0": 1,
 			"Level1b": 2,
@@ -876,6 +921,7 @@
 		disallowUnknownFields: true,
 	},
 	{
+		CaseName: Name(""),
 		in: `{
 			"Level0": 1,
 			"Level1b": 2,
@@ -905,122 +951,136 @@
 	// issue 26444
 	// UnmarshalTypeError without field & struct values
 	{
-		in:  `{"data":{"test1": "bob", "test2": 123}}`,
-		ptr: new(mapStringToStringData),
-		err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(""), Offset: 37, Struct: "mapStringToStringData", Field: "data"},
+		CaseName: Name(""),
+		in:       `{"data":{"test1": "bob", "test2": 123}}`,
+		ptr:      new(mapStringToStringData),
+		err:      &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[string](), Offset: 37, Struct: "mapStringToStringData", Field: "data"},
 	},
 	{
-		in:  `{"data":{"test1": 123, "test2": "bob"}}`,
-		ptr: new(mapStringToStringData),
-		err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(""), Offset: 21, Struct: "mapStringToStringData", Field: "data"},
+		CaseName: Name(""),
+		in:       `{"data":{"test1": 123, "test2": "bob"}}`,
+		ptr:      new(mapStringToStringData),
+		err:      &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[string](), Offset: 21, Struct: "mapStringToStringData", Field: "data"},
 	},
 
 	// trying to decode JSON arrays or objects via TextUnmarshaler
 	{
-		in:  `[1, 2, 3]`,
-		ptr: new(MustNotUnmarshalText),
-		err: &UnmarshalTypeError{Value: "array", Type: reflect.TypeOf(&MustNotUnmarshalText{}), Offset: 1},
+		CaseName: Name(""),
+		in:       `[1, 2, 3]`,
+		ptr:      new(MustNotUnmarshalText),
+		err:      &UnmarshalTypeError{Value: "array", Type: reflect.TypeFor[*MustNotUnmarshalText](), Offset: 1},
 	},
 	{
-		in:  `{"foo": "bar"}`,
-		ptr: new(MustNotUnmarshalText),
-		err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(&MustNotUnmarshalText{}), Offset: 1},
+		CaseName: Name(""),
+		in:       `{"foo": "bar"}`,
+		ptr:      new(MustNotUnmarshalText),
+		err:      &UnmarshalTypeError{Value: "object", Type: reflect.TypeFor[*MustNotUnmarshalText](), Offset: 1},
 	},
 	// #22369
 	{
-		in:  `{"PP": {"T": {"Y": "bad-type"}}}`,
-		ptr: new(P),
+		CaseName: Name(""),
+		in:       `{"PP": {"T": {"Y": "bad-type"}}}`,
+		ptr:      new(P),
 		err: &UnmarshalTypeError{
 			Value:  "string",
 			Struct: "T",
 			Field:  "PP.T.Y",
-			Type:   reflect.TypeOf(int(0)),
+			Type:   reflect.TypeFor[int](),
 			Offset: 29,
 		},
 	},
 	{
-		in:  `{"Ts": [{"Y": 1}, {"Y": 2}, {"Y": "bad-type"}]}`,
-		ptr: new(PP),
+		CaseName: Name(""),
+		in:       `{"Ts": [{"Y": 1}, {"Y": 2}, {"Y": "bad-type"}]}`,
+		ptr:      new(PP),
 		err: &UnmarshalTypeError{
 			Value:  "string",
 			Struct: "T",
 			Field:  "Ts.Y",
-			Type:   reflect.TypeOf(int(0)),
+			Type:   reflect.TypeFor[int](),
 			Offset: 29,
 		},
 	},
 	// #14702
 	{
-		in:  `invalid`,
-		ptr: new(Number),
+		CaseName: Name(""),
+		in:       `invalid`,
+		ptr:      new(Number),
 		err: &SyntaxError{
 			msg:    "invalid character 'i' looking for beginning of value",
 			Offset: 1,
 		},
 	},
 	{
-		in:  `"invalid"`,
-		ptr: new(Number),
-		err: fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", `"invalid"`),
+		CaseName: Name(""),
+		in:       `"invalid"`,
+		ptr:      new(Number),
+		err:      fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", `"invalid"`),
 	},
 	{
-		in:  `{"A":"invalid"}`,
-		ptr: new(struct{ A Number }),
-		err: fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", `"invalid"`),
+		CaseName: Name(""),
+		in:       `{"A":"invalid"}`,
+		ptr:      new(struct{ A Number }),
+		err:      fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", `"invalid"`),
 	},
 	{
-		in: `{"A":"invalid"}`,
+		CaseName: Name(""),
+		in:       `{"A":"invalid"}`,
 		ptr: new(struct {
 			A Number `json:",string"`
 		}),
 		err: fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into json.Number", `invalid`),
 	},
 	{
-		in:  `{"A":"invalid"}`,
-		ptr: new(map[string]Number),
-		err: fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", `"invalid"`),
+		CaseName: Name(""),
+		in:       `{"A":"invalid"}`,
+		ptr:      new(map[string]Number),
+		err:      fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", `"invalid"`),
 	},
 }
 
 func TestMarshal(t *testing.T) {
 	b, err := Marshal(allValue)
 	if err != nil {
-		t.Fatalf("Marshal allValue: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if string(b) != allValueCompact {
-		t.Errorf("Marshal allValueCompact")
+		t.Errorf("Marshal:")
 		diff(t, b, []byte(allValueCompact))
 		return
 	}
 
 	b, err = Marshal(pallValue)
 	if err != nil {
-		t.Fatalf("Marshal pallValue: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if string(b) != pallValueCompact {
-		t.Errorf("Marshal pallValueCompact")
+		t.Errorf("Marshal:")
 		diff(t, b, []byte(pallValueCompact))
 		return
 	}
 }
 
-var badUTF8 = []struct {
-	in, out string
-}{
-	{"hello\xffworld", `"hello\ufffdworld"`},
-	{"", `""`},
-	{"\xff", `"\ufffd"`},
-	{"\xff\xff", `"\ufffd\ufffd"`},
-	{"a\xffb", `"a\ufffdb"`},
-	{"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
-}
-
-func TestMarshalBadUTF8(t *testing.T) {
-	for _, tt := range badUTF8 {
-		b, err := Marshal(tt.in)
-		if string(b) != tt.out || err != nil {
-			t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out)
-		}
+func TestMarshalInvalidUTF8(t *testing.T) {
+	tests := []struct {
+		CaseName
+		in   string
+		want string
+	}{
+		{Name(""), "hello\xffworld", `"hello\ufffdworld"`},
+		{Name(""), "", `""`},
+		{Name(""), "\xff", `"\ufffd"`},
+		{Name(""), "\xff\xff", `"\ufffd\ufffd"`},
+		{Name(""), "a\xffb", `"a\ufffdb"`},
+		{Name(""), "\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			got, err := Marshal(tt.in)
+			if string(got) != tt.want || err != nil {
+				t.Errorf("%s: Marshal(%q):\n\tgot:  (%q, %v)\n\twant: (%q, nil)", tt.Where, tt.in, got, err, tt.want)
+			}
+		})
 	}
 }
 
@@ -1028,11 +1088,11 @@
 	var n Number
 	out, err := Marshal(n)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	outStr := string(out)
-	if outStr != "0" {
-		t.Fatalf("Invalid zero val for Number: %q", outStr)
+	got := string(out)
+	if got != "0" {
+		t.Fatalf("Marshal: got %s, want 0", got)
 	}
 }
 
@@ -1068,109 +1128,98 @@
 			Q: 18,
 		},
 	}
-	b, err := Marshal(top)
+	got, err := Marshal(top)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}"
-	if string(b) != want {
-		t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want)
+	if string(got) != want {
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
 func equalError(a, b error) bool {
-	if a == nil {
-		return b == nil
-	}
-	if b == nil {
-		return a == nil
+	if a == nil || b == nil {
+		return a == nil && b == nil
 	}
 	return a.Error() == b.Error()
 }
 
 func TestUnmarshal(t *testing.T) {
-	for i, tt := range unmarshalTests {
-		var scan scanner
-		in := []byte(tt.in)
-		if err := checkValid(in, &scan); err != nil {
-			if !equalError(err, tt.err) {
-				t.Errorf("#%d: checkValid: %#v", i, err)
-				continue
+	for _, tt := range unmarshalTests {
+		t.Run(tt.Name, func(t *testing.T) {
+			in := []byte(tt.in)
+			var scan scanner
+			if err := checkValid(in, &scan); err != nil {
+				if !equalError(err, tt.err) {
+					t.Fatalf("%s: checkValid error: %#v", tt.Where, err)
+				}
 			}
-		}
-		if tt.ptr == nil {
-			continue
-		}
-
-		typ := reflect.TypeOf(tt.ptr)
-		if typ.Kind() != reflect.Pointer {
-			t.Errorf("#%d: unmarshalTest.ptr %T is not a pointer type", i, tt.ptr)
-			continue
-		}
-		typ = typ.Elem()
-
-		// v = new(right-type)
-		v := reflect.New(typ)
-
-		if !reflect.DeepEqual(tt.ptr, v.Interface()) {
-			// There's no reason for ptr to point to non-zero data,
-			// as we decode into new(right-type), so the data is
-			// discarded.
-			// This can easily mean tests that silently don't test
-			// what they should. To test decoding into existing
-			// data, see TestPrefilled.
-			t.Errorf("#%d: unmarshalTest.ptr %#v is not a pointer to a zero value", i, tt.ptr)
-			continue
-		}
-
-		dec := NewDecoder(bytes.NewReader(in))
-		if tt.useNumber {
-			dec.UseNumber()
-		}
-		if tt.disallowUnknownFields {
-			dec.DisallowUnknownFields()
-		}
-		if err := dec.Decode(v.Interface()); !equalError(err, tt.err) {
-			t.Errorf("#%d: %v, want %v", i, err, tt.err)
-			continue
-		} else if err != nil {
-			continue
-		}
-		if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
-			t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
-			data, _ := Marshal(v.Elem().Interface())
-			println(string(data))
-			data, _ = Marshal(tt.out)
-			println(string(data))
-			continue
-		}
-
-		// Check round trip also decodes correctly.
-		if tt.err == nil {
-			enc, err := Marshal(v.Interface())
-			if err != nil {
-				t.Errorf("#%d: error re-marshaling: %v", i, err)
-				continue
+			if tt.ptr == nil {
+				return
 			}
-			if tt.golden && !bytes.Equal(enc, in) {
-				t.Errorf("#%d: remarshal mismatch:\nhave: %s\nwant: %s", i, enc, in)
+
+			typ := reflect.TypeOf(tt.ptr)
+			if typ.Kind() != reflect.Pointer {
+				t.Fatalf("%s: unmarshalTest.ptr %T is not a pointer type", tt.Where, tt.ptr)
 			}
-			vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
-			dec = NewDecoder(bytes.NewReader(enc))
+			typ = typ.Elem()
+
+			// v = new(right-type)
+			v := reflect.New(typ)
+
+			if !reflect.DeepEqual(tt.ptr, v.Interface()) {
+				// There's no reason for ptr to point to non-zero data,
+				// as we decode into new(right-type), so the data is
+				// discarded.
+				// This can easily mean tests that silently don't test
+				// what they should. To test decoding into existing
+				// data, see TestPrefilled.
+				t.Fatalf("%s: unmarshalTest.ptr %#v is not a pointer to a zero value", tt.Where, tt.ptr)
+			}
+
+			dec := NewDecoder(bytes.NewReader(in))
 			if tt.useNumber {
 				dec.UseNumber()
 			}
-			if err := dec.Decode(vv.Interface()); err != nil {
-				t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err)
-				continue
+			if tt.disallowUnknownFields {
+				dec.DisallowUnknownFields()
 			}
-			if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
-				t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface())
-				t.Errorf("     In: %q", strings.Map(noSpace, string(in)))
-				t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc)))
-				continue
+			if err := dec.Decode(v.Interface()); !equalError(err, tt.err) {
+				t.Fatalf("%s: Decode error:\n\tgot:  %v\n\twant: %v", tt.Where, err, tt.err)
+			} else if err != nil {
+				return
 			}
-		}
+			if got := v.Elem().Interface(); !reflect.DeepEqual(got, tt.out) {
+				gotJSON, _ := Marshal(got)
+				wantJSON, _ := Marshal(tt.out)
+				t.Fatalf("%s: Decode:\n\tgot:  %#+v\n\twant: %#+v\n\n\tgotJSON:  %s\n\twantJSON: %s", tt.Where, got, tt.out, gotJSON, wantJSON)
+			}
+
+			// Check round trip also decodes correctly.
+			if tt.err == nil {
+				enc, err := Marshal(v.Interface())
+				if err != nil {
+					t.Fatalf("%s: Marshal error after roundtrip: %v", tt.Where, err)
+				}
+				if tt.golden && !bytes.Equal(enc, in) {
+					t.Errorf("%s: Marshal:\n\tgot:  %s\n\twant: %s", tt.Where, enc, in)
+				}
+				vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+				dec = NewDecoder(bytes.NewReader(enc))
+				if tt.useNumber {
+					dec.UseNumber()
+				}
+				if err := dec.Decode(vv.Interface()); err != nil {
+					t.Fatalf("%s: Decode(%#q) error after roundtrip: %v", tt.Where, enc, err)
+				}
+				if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
+					t.Fatalf("%s: Decode:\n\tgot:  %#+v\n\twant: %#+v\n\n\tgotJSON:  %s\n\twantJSON: %s",
+						tt.Where, v.Elem().Interface(), vv.Elem().Interface(),
+						stripWhitespace(string(enc)), stripWhitespace(string(in)))
+				}
+			}
+		})
 	}
 }
 
@@ -1178,48 +1227,50 @@
 	initBig()
 	var v any
 	if err := Unmarshal(jsonBig, &v); err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	b, err := Marshal(v)
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if !bytes.Equal(jsonBig, b) {
-		t.Errorf("Marshal jsonBig")
+		t.Errorf("Marshal:")
 		diff(t, b, jsonBig)
 		return
 	}
 }
 
-var numberTests = []struct {
-	in       string
-	i        int64
-	intErr   string
-	f        float64
-	floatErr string
-}{
-	{in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1},
-	{in: "-12", i: -12, f: -12.0},
-	{in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"},
-}
-
 // Independent of Decode, basic coverage of the accessors in Number
 func TestNumberAccessors(t *testing.T) {
-	for _, tt := range numberTests {
-		n := Number(tt.in)
-		if s := n.String(); s != tt.in {
-			t.Errorf("Number(%q).String() is %q", tt.in, s)
-		}
-		if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i {
-			t.Errorf("Number(%q).Int64() is %d", tt.in, i)
-		} else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) {
-			t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err)
-		}
-		if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f {
-			t.Errorf("Number(%q).Float64() is %g", tt.in, f)
-		} else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) {
-			t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err)
-		}
+	tests := []struct {
+		CaseName
+		in       string
+		i        int64
+		intErr   string
+		f        float64
+		floatErr string
+	}{
+		{CaseName: Name(""), in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1},
+		{CaseName: Name(""), in: "-12", i: -12, f: -12.0},
+		{CaseName: Name(""), in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			n := Number(tt.in)
+			if got := n.String(); got != tt.in {
+				t.Errorf("%s: Number(%q).String() = %s, want %s", tt.Where, tt.in, got, tt.in)
+			}
+			if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i {
+				t.Errorf("%s: Number(%q).Int64() = %d, want %d", tt.Where, tt.in, i, tt.i)
+			} else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) {
+				t.Errorf("%s: Number(%q).Int64() error:\n\tgot:  %v\n\twant: %v", tt.Where, tt.in, err, tt.intErr)
+			}
+			if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f {
+				t.Errorf("%s: Number(%q).Float64() = %g, want %g", tt.Where, tt.in, f, tt.f)
+			} else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) {
+				t.Errorf("%s: Number(%q).Float64() error:\n\tgot  %v\n\twant: %v", tt.Where, tt.in, err, tt.floatErr)
+			}
+		})
 	}
 }
 
@@ -1230,14 +1281,14 @@
 	}
 	b, err := Marshal(s0)
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	var s1 []byte
 	if err := Unmarshal(b, &s1); err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	if !bytes.Equal(s0, s1) {
-		t.Errorf("Marshal large byte slice")
+		t.Errorf("Marshal:")
 		diff(t, s0, s1)
 	}
 }
@@ -1250,10 +1301,10 @@
 	var xint Xint
 	var i any = &xint
 	if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	if xint.X != 1 {
-		t.Fatalf("Did not write to xint")
+		t.Fatalf("xint.X = %d, want 1", xint.X)
 	}
 }
 
@@ -1264,59 +1315,51 @@
 		t.Fatalf("Unmarshal: %v", err)
 	}
 	if xint.X != 1 {
-		t.Fatalf("Did not write to xint")
+		t.Fatalf("xint.X = %d, want 1", xint.X)
 	}
 }
 
 func TestEscape(t *testing.T) {
 	const input = `"foobar"<html>` + " [\u2028 \u2029]"
-	const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"`
-	b, err := Marshal(input)
+	const want = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"`
+	got, err := Marshal(input)
 	if err != nil {
 		t.Fatalf("Marshal error: %v", err)
 	}
-	if s := string(b); s != expected {
-		t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected)
+	if string(got) != want {
+		t.Errorf("Marshal(%#q):\n\tgot:  %s\n\twant: %s", input, got, want)
 	}
 }
 
-// WrongString is a struct that's misusing the ,string modifier.
-type WrongString struct {
-	Message string `json:"result,string"`
-}
-
-type wrongStringTest struct {
-	in, err string
-}
-
-var wrongStringTests = []wrongStringTest{
-	{`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`},
-	{`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
-	{`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
-	{`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`},
-	{`{"result":"\""}`, `json: invalid use of ,string struct tag, trying to unmarshal "\"" into string`},
-	{`{"result":"\"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "\"foo" into string`},
-}
-
 // If people misuse the ,string modifier, the error message should be
 // helpful, telling the user that they're doing it wrong.
 func TestErrorMessageFromMisusedString(t *testing.T) {
-	for n, tt := range wrongStringTests {
-		r := strings.NewReader(tt.in)
-		var s WrongString
-		err := NewDecoder(r).Decode(&s)
-		got := fmt.Sprintf("%v", err)
-		if got != tt.err {
-			t.Errorf("%d. got err = %q, want %q", n, got, tt.err)
-		}
+	// WrongString is a struct that's misusing the ,string modifier.
+	type WrongString struct {
+		Message string `json:"result,string"`
 	}
-}
-
-func noSpace(c rune) rune {
-	if isSpace(byte(c)) { //only used for ascii
-		return -1
+	tests := []struct {
+		CaseName
+		in, err string
+	}{
+		{Name(""), `{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`},
+		{Name(""), `{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
+		{Name(""), `{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
+		{Name(""), `{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`},
+		{Name(""), `{"result":"\""}`, `json: invalid use of ,string struct tag, trying to unmarshal "\"" into string`},
+		{Name(""), `{"result":"\"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "\"foo" into string`},
 	}
-	return c
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			r := strings.NewReader(tt.in)
+			var s WrongString
+			err := NewDecoder(r).Decode(&s)
+			got := fmt.Sprintf("%v", err)
+			if got != tt.err {
+				t.Errorf("%s: Decode error:\n\tgot:  %s\n\twant: %s", tt.Where, got, tt.err)
+			}
+		})
+	}
 }
 
 type All struct {
@@ -1546,7 +1589,7 @@
 	"PInterface": null
 }`
 
-var allValueCompact = strings.Map(noSpace, allValueIndent)
+var allValueCompact = stripWhitespace(allValueIndent)
 
 var pallValueIndent = `{
 	"Bool": false,
@@ -1635,7 +1678,7 @@
 	"PInterface": 5.2
 }`
 
-var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+var pallValueCompact = stripWhitespace(pallValueIndent)
 
 func TestRefUnmarshal(t *testing.T) {
 	type S struct {
@@ -1656,10 +1699,10 @@
 
 	var got S
 	if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	if !reflect.DeepEqual(got, want) {
-		t.Errorf("got %+v, want %+v", got, want)
+		t.Errorf("Unmarsha:\n\tgot:  %+v\n\twant: %+v", got, want)
 	}
 }
 
@@ -1672,13 +1715,12 @@
 	}
 	data := `{"Number1":"1", "Number2":""}`
 	dec := NewDecoder(strings.NewReader(data))
-	var t2 T2
-	err := dec.Decode(&t2)
-	if err == nil {
-		t.Fatal("Decode: did not return error")
-	}
-	if t2.Number1 != 1 {
-		t.Fatal("Decode: did not set Number1")
+	var got T2
+	switch err := dec.Decode(&got); {
+	case err == nil:
+		t.Fatalf("Decode error: got nil, want non-nil")
+	case got.Number1 != 1:
+		t.Fatalf("Decode: got.Number1 = %d, want 1", got.Number1)
 	}
 }
 
@@ -1695,12 +1737,13 @@
 	s.B = 1
 	s.C = new(int)
 	*s.C = 2
-	err := Unmarshal(data, &s)
-	if err != nil {
-		t.Fatalf("Unmarshal: %v", err)
-	}
-	if s.B != 1 || s.C != nil {
-		t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C)
+	switch err := Unmarshal(data, &s); {
+	case err != nil:
+		t.Fatalf("Unmarshal error: %v", err)
+	case s.B != 1:
+		t.Fatalf("Unmarshal: s.B = %d, want 1", s.B)
+	case s.C != nil:
+		t.Fatalf("Unmarshal: s.C = %d, want non-nil", s.C)
 	}
 }
 
@@ -1716,37 +1759,38 @@
 	return pp
 }
 
-var interfaceSetTests = []struct {
-	pre  any
-	json string
-	post any
-}{
-	{"foo", `"bar"`, "bar"},
-	{"foo", `2`, 2.0},
-	{"foo", `true`, true},
-	{"foo", `null`, nil},
-
-	{nil, `null`, nil},
-	{new(int), `null`, nil},
-	{(*int)(nil), `null`, nil},
-	{new(*int), `null`, new(*int)},
-	{(**int)(nil), `null`, nil},
-	{intp(1), `null`, nil},
-	{intpp(nil), `null`, intpp(nil)},
-	{intpp(intp(1)), `null`, intpp(nil)},
-}
-
 func TestInterfaceSet(t *testing.T) {
-	for _, tt := range interfaceSetTests {
-		b := struct{ X any }{tt.pre}
-		blob := `{"X":` + tt.json + `}`
-		if err := Unmarshal([]byte(blob), &b); err != nil {
-			t.Errorf("Unmarshal %#q: %v", blob, err)
-			continue
-		}
-		if !reflect.DeepEqual(b.X, tt.post) {
-			t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post)
-		}
+	tests := []struct {
+		CaseName
+		pre  any
+		json string
+		post any
+	}{
+		{Name(""), "foo", `"bar"`, "bar"},
+		{Name(""), "foo", `2`, 2.0},
+		{Name(""), "foo", `true`, true},
+		{Name(""), "foo", `null`, nil},
+
+		{Name(""), nil, `null`, nil},
+		{Name(""), new(int), `null`, nil},
+		{Name(""), (*int)(nil), `null`, nil},
+		{Name(""), new(*int), `null`, new(*int)},
+		{Name(""), (**int)(nil), `null`, nil},
+		{Name(""), intp(1), `null`, nil},
+		{Name(""), intpp(nil), `null`, intpp(nil)},
+		{Name(""), intpp(intp(1)), `null`, intpp(nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			b := struct{ X any }{tt.pre}
+			blob := `{"X":` + tt.json + `}`
+			if err := Unmarshal([]byte(blob), &b); err != nil {
+				t.Fatalf("%s: Unmarshal(%#q) error: %v", tt.Where, blob, err)
+			}
+			if !reflect.DeepEqual(b.X, tt.post) {
+				t.Errorf("%s: Unmarshal(%#q):\n\tpre.X:  %#v\n\tgot.X:  %#v\n\twant.X: %#v", tt.Where, blob, tt.pre, b.X, tt.post)
+			}
+		})
 	}
 }
 
@@ -1924,24 +1968,18 @@
 
 func TestStringKind(t *testing.T) {
 	type stringKind string
-
-	var m1, m2 map[stringKind]int
-	m1 = map[stringKind]int{
-		"foo": 42,
-	}
-
-	data, err := Marshal(m1)
+	want := map[stringKind]int{"foo": 42}
+	data, err := Marshal(want)
 	if err != nil {
-		t.Errorf("Unexpected error marshaling: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-
-	err = Unmarshal(data, &m2)
+	var got map[stringKind]int
+	err = Unmarshal(data, &got)
 	if err != nil {
-		t.Errorf("Unexpected error unmarshaling: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
-
-	if !reflect.DeepEqual(m1, m2) {
-		t.Error("Items should be equal after encoding and then decoding")
+	if !reflect.DeepEqual(got, want) {
+		t.Fatalf("Marshal/Unmarshal mismatch:\n\tgot:  %v\n\twant: %v", got, want)
 	}
 }
 
@@ -1950,20 +1988,18 @@
 // Issue 8962.
 func TestByteKind(t *testing.T) {
 	type byteKind []byte
-
-	a := byteKind("hello")
-
-	data, err := Marshal(a)
+	want := byteKind("hello")
+	data, err := Marshal(want)
 	if err != nil {
-		t.Error(err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	var b byteKind
-	err = Unmarshal(data, &b)
+	var got byteKind
+	err = Unmarshal(data, &got)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
-	if !reflect.DeepEqual(a, b) {
-		t.Errorf("expected %v == %v", a, b)
+	if !reflect.DeepEqual(got, want) {
+		t.Fatalf("Marshal/Unmarshal mismatch:\n\tgot:  %v\n\twant: %v", got, want)
 	}
 }
 
@@ -1971,63 +2007,68 @@
 // Issue 12921.
 func TestSliceOfCustomByte(t *testing.T) {
 	type Uint8 uint8
-
-	a := []Uint8("hello")
-
-	data, err := Marshal(a)
+	want := []Uint8("hello")
+	data, err := Marshal(want)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	var b []Uint8
-	err = Unmarshal(data, &b)
+	var got []Uint8
+	err = Unmarshal(data, &got)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
-	if !reflect.DeepEqual(a, b) {
-		t.Fatalf("expected %v == %v", a, b)
+	if !reflect.DeepEqual(got, want) {
+		t.Fatalf("Marshal/Unmarshal mismatch:\n\tgot:  %v\n\twant: %v", got, want)
 	}
 }
 
-var decodeTypeErrorTests = []struct {
-	dest any
-	src  string
-}{
-	{new(string), `{"user": "name"}`}, // issue 4628.
-	{new(error), `{}`},                // issue 4222
-	{new(error), `[]`},
-	{new(error), `""`},
-	{new(error), `123`},
-	{new(error), `true`},
-}
-
 func TestUnmarshalTypeError(t *testing.T) {
-	for _, item := range decodeTypeErrorTests {
-		err := Unmarshal([]byte(item.src), item.dest)
-		if _, ok := err.(*UnmarshalTypeError); !ok {
-			t.Errorf("expected type error for Unmarshal(%q, type %T): got %T",
-				item.src, item.dest, err)
-		}
+	tests := []struct {
+		CaseName
+		dest any
+		in   string
+	}{
+		{Name(""), new(string), `{"user": "name"}`}, // issue 4628.
+		{Name(""), new(error), `{}`},                // issue 4222
+		{Name(""), new(error), `[]`},
+		{Name(""), new(error), `""`},
+		{Name(""), new(error), `123`},
+		{Name(""), new(error), `true`},
 	}
-}
-
-var unmarshalSyntaxTests = []string{
-	"tru",
-	"fals",
-	"nul",
-	"123e",
-	`"hello`,
-	`[1,2,3`,
-	`{"key":1`,
-	`{"key":1,`,
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			err := Unmarshal([]byte(tt.in), tt.dest)
+			if _, ok := err.(*UnmarshalTypeError); !ok {
+				t.Errorf("%s: Unmarshal(%#q, %T):\n\tgot:  %T\n\twant: %T",
+					tt.Where, tt.in, tt.dest, err, new(UnmarshalTypeError))
+			}
+		})
+	}
 }
 
 func TestUnmarshalSyntax(t *testing.T) {
 	var x any
-	for _, src := range unmarshalSyntaxTests {
-		err := Unmarshal([]byte(src), &x)
-		if _, ok := err.(*SyntaxError); !ok {
-			t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err)
-		}
+	tests := []struct {
+		CaseName
+		in string
+	}{
+		{Name(""), "tru"},
+		{Name(""), "fals"},
+		{Name(""), "nul"},
+		{Name(""), "123e"},
+		{Name(""), `"hello`},
+		{Name(""), `[1,2,3`},
+		{Name(""), `{"key":1`},
+		{Name(""), `{"key":1,`},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			err := Unmarshal([]byte(tt.in), &x)
+			if _, ok := err.(*SyntaxError); !ok {
+				t.Errorf("%s: Unmarshal(%#q, any):\n\tgot:  %T\n\twant: %T",
+					tt.Where, tt.in, err, new(SyntaxError))
+			}
+		})
 	}
 }
 
@@ -2048,10 +2089,10 @@
 	out := &unexportedFields{}
 	err := Unmarshal([]byte(input), out)
 	if err != nil {
-		t.Errorf("got error %v, expected nil", err)
+		t.Errorf("Unmarshal error: %v", err)
 	}
 	if !reflect.DeepEqual(out, want) {
-		t.Errorf("got %q, want %q", out, want)
+		t.Errorf("Unmarshal:\n\tgot:  %+v\n\twant: %+v", out, want)
 	}
 }
 
@@ -2073,12 +2114,11 @@
 
 func TestUnmarshalJSONLiteralError(t *testing.T) {
 	var t3 Time3339
-	err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3)
-	if err == nil {
-		t.Fatalf("expected error; got time %v", time.Time(t3))
-	}
-	if !strings.Contains(err.Error(), "range") {
-		t.Errorf("got err = %v; want out of range error", err)
+	switch err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3); {
+	case err == nil:
+		t.Fatalf("Unmarshal error: got nil, want non-nil")
+	case !strings.Contains(err.Error(), "range"):
+		t.Errorf("Unmarshal error:\n\tgot:  %v\n\twant: out of range", err)
 	}
 }
 
@@ -2091,7 +2131,7 @@
 
 	err := Unmarshal([]byte(json), &dest)
 	if err != nil {
-		t.Errorf("got error %q, want nil", err)
+		t.Errorf("Unmarshal error: %v", err)
 	}
 }
 
@@ -2100,99 +2140,102 @@
 // Issues 4900 and 8837, among others.
 func TestPrefilled(t *testing.T) {
 	// Values here change, cannot reuse table across runs.
-	var prefillTests = []struct {
+	tests := []struct {
+		CaseName
 		in  string
 		ptr any
 		out any
-	}{
-		{
-			in:  `{"X": 1, "Y": 2}`,
-			ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5},
-			out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5},
-		},
-		{
-			in:  `{"X": 1, "Y": 2}`,
-			ptr: &map[string]any{"X": float32(3), "Y": int16(4), "Z": 1.5},
-			out: &map[string]any{"X": float64(1), "Y": float64(2), "Z": 1.5},
-		},
-		{
-			in:  `[2]`,
-			ptr: &[]int{1},
-			out: &[]int{2},
-		},
-		{
-			in:  `[2, 3]`,
-			ptr: &[]int{1},
-			out: &[]int{2, 3},
-		},
-		{
-			in:  `[2, 3]`,
-			ptr: &[...]int{1},
-			out: &[...]int{2},
-		},
-		{
-			in:  `[3]`,
-			ptr: &[...]int{1, 2},
-			out: &[...]int{3, 0},
-		},
+	}{{
+		CaseName: Name(""),
+		in:       `{"X": 1, "Y": 2}`,
+		ptr:      &XYZ{X: float32(3), Y: int16(4), Z: 1.5},
+		out:      &XYZ{X: float64(1), Y: float64(2), Z: 1.5},
+	}, {
+		CaseName: Name(""),
+		in:       `{"X": 1, "Y": 2}`,
+		ptr:      &map[string]any{"X": float32(3), "Y": int16(4), "Z": 1.5},
+		out:      &map[string]any{"X": float64(1), "Y": float64(2), "Z": 1.5},
+	}, {
+		CaseName: Name(""),
+		in:       `[2]`,
+		ptr:      &[]int{1},
+		out:      &[]int{2},
+	}, {
+		CaseName: Name(""),
+		in:       `[2, 3]`,
+		ptr:      &[]int{1},
+		out:      &[]int{2, 3},
+	}, {
+		CaseName: Name(""),
+		in:       `[2, 3]`,
+		ptr:      &[...]int{1},
+		out:      &[...]int{2},
+	}, {
+		CaseName: Name(""),
+		in:       `[3]`,
+		ptr:      &[...]int{1, 2},
+		out:      &[...]int{3, 0},
+	}}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			ptrstr := fmt.Sprintf("%v", tt.ptr)
+			err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here
+			if err != nil {
+				t.Errorf("%s: Unmarshal error: %v", tt.Where, err)
+			}
+			if !reflect.DeepEqual(tt.ptr, tt.out) {
+				t.Errorf("%s: Unmarshal(%#q, %T):\n\tgot:  %v\n\twant: %v", tt.Where, tt.in, ptrstr, tt.ptr, tt.out)
+			}
+		})
 	}
-
-	for _, tt := range prefillTests {
-		ptrstr := fmt.Sprintf("%v", tt.ptr)
-		err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here
-		if err != nil {
-			t.Errorf("Unmarshal: %v", err)
-		}
-		if !reflect.DeepEqual(tt.ptr, tt.out) {
-			t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out)
-		}
-	}
-}
-
-var invalidUnmarshalTests = []struct {
-	v    any
-	want string
-}{
-	{nil, "json: Unmarshal(nil)"},
-	{struct{}{}, "json: Unmarshal(non-pointer struct {})"},
-	{(*int)(nil), "json: Unmarshal(nil *int)"},
 }
 
 func TestInvalidUnmarshal(t *testing.T) {
 	buf := []byte(`{"a":"1"}`)
-	for _, tt := range invalidUnmarshalTests {
-		err := Unmarshal(buf, tt.v)
-		if err == nil {
-			t.Errorf("Unmarshal expecting error, got nil")
-			continue
-		}
-		if got := err.Error(); got != tt.want {
-			t.Errorf("Unmarshal = %q; want %q", got, tt.want)
-		}
+	tests := []struct {
+		CaseName
+		v    any
+		want string
+	}{
+		{Name(""), nil, "json: Unmarshal(nil)"},
+		{Name(""), struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+		{Name(""), (*int)(nil), "json: Unmarshal(nil *int)"},
 	}
-}
-
-var invalidUnmarshalTextTests = []struct {
-	v    any
-	want string
-}{
-	{nil, "json: Unmarshal(nil)"},
-	{struct{}{}, "json: Unmarshal(non-pointer struct {})"},
-	{(*int)(nil), "json: Unmarshal(nil *int)"},
-	{new(net.IP), "json: cannot unmarshal number into Go value of type *net.IP"},
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			err := Unmarshal(buf, tt.v)
+			if err == nil {
+				t.Fatalf("%s: Unmarshal error: got nil, want non-nil", tt.Where)
+			}
+			if got := err.Error(); got != tt.want {
+				t.Errorf("%s: Unmarshal error:\n\tgot:  %s\n\twant: %s", tt.Where, got, tt.want)
+			}
+		})
+	}
 }
 
 func TestInvalidUnmarshalText(t *testing.T) {
 	buf := []byte(`123`)
-	for _, tt := range invalidUnmarshalTextTests {
-		err := Unmarshal(buf, tt.v)
-		if err == nil {
-			t.Errorf("Unmarshal expecting error, got nil")
-			continue
-		}
-		if got := err.Error(); got != tt.want {
-			t.Errorf("Unmarshal = %q; want %q", got, tt.want)
-		}
+	tests := []struct {
+		CaseName
+		v    any
+		want string
+	}{
+		{Name(""), nil, "json: Unmarshal(nil)"},
+		{Name(""), struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+		{Name(""), (*int)(nil), "json: Unmarshal(nil *int)"},
+		{Name(""), new(net.IP), "json: cannot unmarshal number into Go value of type *net.IP"},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			err := Unmarshal(buf, tt.v)
+			if err == nil {
+				t.Fatalf("%s: Unmarshal error: got nil, want non-nil", tt.Where)
+			}
+			if got := err.Error(); got != tt.want {
+				t.Errorf("%s: Unmarshal error:\n\tgot:  %s\n\twant: %s", tt.Where, got, tt.want)
+			}
+		})
 	}
 }
 
@@ -2211,12 +2254,12 @@
 
 	data, err := Marshal(item)
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 
 	err = Unmarshal(data, &item)
 	if err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 }
 
@@ -2275,43 +2318,50 @@
 	)
 
 	tests := []struct {
+		CaseName
 		in  string
 		ptr any
 		out any
 		err error
 	}{{
 		// Error since we cannot set S1.embed1, but still able to set S1.R.
-		in:  `{"R":2,"Q":1}`,
-		ptr: new(S1),
-		out: &S1{R: 2},
-		err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed1"),
+		CaseName: Name(""),
+		in:       `{"R":2,"Q":1}`,
+		ptr:      new(S1),
+		out:      &S1{R: 2},
+		err:      fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed1"),
 	}, {
 		// The top level Q field takes precedence.
-		in:  `{"Q":1}`,
-		ptr: new(S2),
-		out: &S2{Q: 1},
+		CaseName: Name(""),
+		in:       `{"Q":1}`,
+		ptr:      new(S2),
+		out:      &S2{Q: 1},
 	}, {
 		// No issue with non-pointer variant.
-		in:  `{"R":2,"Q":1}`,
-		ptr: new(S3),
-		out: &S3{embed1: embed1{Q: 1}, R: 2},
+		CaseName: Name(""),
+		in:       `{"R":2,"Q":1}`,
+		ptr:      new(S3),
+		out:      &S3{embed1: embed1{Q: 1}, R: 2},
 	}, {
 		// No error since both embedded structs have field R, which annihilate each other.
 		// Thus, no attempt is made at setting S4.embed1.
-		in:  `{"R":2}`,
-		ptr: new(S4),
-		out: new(S4),
+		CaseName: Name(""),
+		in:       `{"R":2}`,
+		ptr:      new(S4),
+		out:      new(S4),
 	}, {
 		// Error since we cannot set S5.embed1, but still able to set S5.R.
-		in:  `{"R":2,"Q":1}`,
-		ptr: new(S5),
-		out: &S5{R: 2},
-		err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed3"),
+		CaseName: Name(""),
+		in:       `{"R":2,"Q":1}`,
+		ptr:      new(S5),
+		out:      &S5{R: 2},
+		err:      fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed3"),
 	}, {
 		// Issue 24152, ensure decodeState.indirect does not panic.
-		in:  `{"embed1": {"Q": 1}}`,
-		ptr: new(S6),
-		out: &S6{embed1{1}},
+		CaseName: Name(""),
+		in:       `{"embed1": {"Q": 1}}`,
+		ptr:      new(S6),
+		out:      &S6{embed1{1}},
 	}, {
 		// Issue 24153, check that we can still set forwarded fields even in
 		// the presence of a name conflict.
@@ -2325,64 +2375,74 @@
 		// it should be impossible for an external package to set either Q.
 		//
 		// It is probably okay for a future reflect change to break this.
-		in:  `{"embed1": {"Q": 1}, "Q": 2}`,
-		ptr: new(S7),
-		out: &S7{embed1{1}, embed2{2}},
+		CaseName: Name(""),
+		in:       `{"embed1": {"Q": 1}, "Q": 2}`,
+		ptr:      new(S7),
+		out:      &S7{embed1{1}, embed2{2}},
 	}, {
 		// Issue 24153, similar to the S7 case.
-		in:  `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`,
-		ptr: new(S8),
-		out: &S8{embed1{1}, embed2{2}, 3},
+		CaseName: Name(""),
+		in:       `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`,
+		ptr:      new(S8),
+		out:      &S8{embed1{1}, embed2{2}, 3},
 	}, {
 		// Issue 228145, similar to the cases above.
-		in:  `{"embed": {}}`,
-		ptr: new(S9),
-		out: &S9{},
+		CaseName: Name(""),
+		in:       `{"embed": {}}`,
+		ptr:      new(S9),
+		out:      &S9{},
 	}}
-
-	for i, tt := range tests {
-		err := Unmarshal([]byte(tt.in), tt.ptr)
-		if !equalError(err, tt.err) {
-			t.Errorf("#%d: %v, want %v", i, err, tt.err)
-		}
-		if !reflect.DeepEqual(tt.ptr, tt.out) {
-			t.Errorf("#%d: mismatch\ngot:  %#+v\nwant: %#+v", i, tt.ptr, tt.out)
-		}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			err := Unmarshal([]byte(tt.in), tt.ptr)
+			if !equalError(err, tt.err) {
+				t.Errorf("%s: Unmarshal error:\n\tgot:  %v\n\twant: %v", tt.Where, err, tt.err)
+			}
+			if !reflect.DeepEqual(tt.ptr, tt.out) {
+				t.Errorf("%s: Unmarshal:\n\tgot:  %#+v\n\twant: %#+v", tt.Where, tt.ptr, tt.out)
+			}
+		})
 	}
 }
 
 func TestUnmarshalErrorAfterMultipleJSON(t *testing.T) {
 	tests := []struct {
+		CaseName
 		in  string
 		err error
 	}{{
-		in:  `1 false null :`,
-		err: &SyntaxError{"invalid character ':' looking for beginning of value", 14},
+		CaseName: Name(""),
+		in:       `1 false null :`,
+		err:      &SyntaxError{"invalid character ':' looking for beginning of value", 14},
 	}, {
-		in:  `1 [] [,]`,
-		err: &SyntaxError{"invalid character ',' looking for beginning of value", 7},
+		CaseName: Name(""),
+		in:       `1 [] [,]`,
+		err:      &SyntaxError{"invalid character ',' looking for beginning of value", 7},
 	}, {
-		in:  `1 [] [true:]`,
-		err: &SyntaxError{"invalid character ':' after array element", 11},
+		CaseName: Name(""),
+		in:       `1 [] [true:]`,
+		err:      &SyntaxError{"invalid character ':' after array element", 11},
 	}, {
-		in:  `1  {}    {"x"=}`,
-		err: &SyntaxError{"invalid character '=' after object key", 14},
+		CaseName: Name(""),
+		in:       `1  {}    {"x"=}`,
+		err:      &SyntaxError{"invalid character '=' after object key", 14},
 	}, {
-		in:  `falsetruenul#`,
-		err: &SyntaxError{"invalid character '#' in literal null (expecting 'l')", 13},
+		CaseName: Name(""),
+		in:       `falsetruenul#`,
+		err:      &SyntaxError{"invalid character '#' in literal null (expecting 'l')", 13},
 	}}
-	for i, tt := range tests {
-		dec := NewDecoder(strings.NewReader(tt.in))
-		var err error
-		for {
-			var v any
-			if err = dec.Decode(&v); err != nil {
-				break
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			dec := NewDecoder(strings.NewReader(tt.in))
+			var err error
+			for err == nil {
+				var v any
+				err = dec.Decode(&v)
 			}
-		}
-		if !reflect.DeepEqual(err, tt.err) {
-			t.Errorf("#%d: got %#v, want %#v", i, err, tt.err)
-		}
+			if !reflect.DeepEqual(err, tt.err) {
+				t.Errorf("%s: Decode error:\n\tgot:  %v\n\twant: %v", tt.Where, err, tt.err)
+			}
+		})
 	}
 }
 
@@ -2408,7 +2468,7 @@
 	data := []byte(`{"a": "b"}`)
 
 	if err := Unmarshal(data, v); err != nil {
-		t.Fatal(err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 }
 
@@ -2424,11 +2484,11 @@
 func TestUnmarshalMapWithTextUnmarshalerStringKey(t *testing.T) {
 	var p map[textUnmarshalerString]string
 	if err := Unmarshal([]byte(`{"FOO": "1"}`), &p); err != nil {
-		t.Fatalf("Unmarshal unexpected error: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 
 	if _, ok := p["foo"]; !ok {
-		t.Errorf(`Key "foo" does not exist in map: %v`, p)
+		t.Errorf(`key "foo" missing in map: %v`, p)
 	}
 }
 
@@ -2436,28 +2496,28 @@
 	// See golang.org/issues/38105.
 	var p map[textUnmarshalerString]string
 	if err := Unmarshal([]byte(`{"开源":"12345开源"}`), &p); err != nil {
-		t.Fatalf("Unmarshal unexpected error: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	if _, ok := p["开源"]; !ok {
-		t.Errorf(`Key "开源" does not exist in map: %v`, p)
+		t.Errorf(`key "开源" missing in map: %v`, p)
 	}
 
 	// See golang.org/issues/38126.
 	type T struct {
 		F1 string `json:"F1,string"`
 	}
-	t1 := T{"aaa\tbbb"}
+	wantT := T{"aaa\tbbb"}
 
-	b, err := Marshal(t1)
+	b, err := Marshal(wantT)
 	if err != nil {
-		t.Fatalf("Marshal unexpected error: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	var t2 T
-	if err := Unmarshal(b, &t2); err != nil {
-		t.Fatalf("Unmarshal unexpected error: %v", err)
+	var gotT T
+	if err := Unmarshal(b, &gotT); err != nil {
+		t.Fatalf("Unmarshal error: %v", err)
 	}
-	if t1 != t2 {
-		t.Errorf("Marshal and Unmarshal roundtrip mismatch: want %q got %q", t1, t2)
+	if gotT != wantT {
+		t.Errorf("Marshal/Unmarshal roundtrip:\n\tgot:  %q\n\twant: %q", gotT, wantT)
 	}
 
 	// See golang.org/issues/39555.
@@ -2465,107 +2525,93 @@
 
 	encoded, err := Marshal(input)
 	if err != nil {
-		t.Fatalf("Marshal unexpected error: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	var got map[textUnmarshalerString]string
 	if err := Unmarshal(encoded, &got); err != nil {
-		t.Fatalf("Unmarshal unexpected error: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	want := map[textUnmarshalerString]string{"foo": "", `"`: ""}
-	if !reflect.DeepEqual(want, got) {
-		t.Fatalf("Unexpected roundtrip result:\nwant: %q\ngot:  %q", want, got)
+	if !reflect.DeepEqual(got, want) {
+		t.Errorf("Marshal/Unmarshal roundtrip:\n\tgot:  %q\n\twant: %q", gotT, wantT)
 	}
 }
 
 func TestUnmarshalMaxDepth(t *testing.T) {
-	testcases := []struct {
-		name        string
+	tests := []struct {
+		CaseName
 		data        string
 		errMaxDepth bool
-	}{
-		{
-			name:        "ArrayUnderMaxNestingDepth",
-			data:        `{"a":` + strings.Repeat(`[`, 10000-1) + strings.Repeat(`]`, 10000-1) + `}`,
-			errMaxDepth: false,
-		},
-		{
-			name:        "ArrayOverMaxNestingDepth",
-			data:        `{"a":` + strings.Repeat(`[`, 10000) + strings.Repeat(`]`, 10000) + `}`,
-			errMaxDepth: true,
-		},
-		{
-			name:        "ArrayOverStackDepth",
-			data:        `{"a":` + strings.Repeat(`[`, 3000000) + strings.Repeat(`]`, 3000000) + `}`,
-			errMaxDepth: true,
-		},
-		{
-			name:        "ObjectUnderMaxNestingDepth",
-			data:        `{"a":` + strings.Repeat(`{"a":`, 10000-1) + `0` + strings.Repeat(`}`, 10000-1) + `}`,
-			errMaxDepth: false,
-		},
-		{
-			name:        "ObjectOverMaxNestingDepth",
-			data:        `{"a":` + strings.Repeat(`{"a":`, 10000) + `0` + strings.Repeat(`}`, 10000) + `}`,
-			errMaxDepth: true,
-		},
-		{
-			name:        "ObjectOverStackDepth",
-			data:        `{"a":` + strings.Repeat(`{"a":`, 3000000) + `0` + strings.Repeat(`}`, 3000000) + `}`,
-			errMaxDepth: true,
-		},
-	}
+	}{{
+		CaseName:    Name("ArrayUnderMaxNestingDepth"),
+		data:        `{"a":` + strings.Repeat(`[`, 10000-1) + strings.Repeat(`]`, 10000-1) + `}`,
+		errMaxDepth: false,
+	}, {
+		CaseName:    Name("ArrayOverMaxNestingDepth"),
+		data:        `{"a":` + strings.Repeat(`[`, 10000) + strings.Repeat(`]`, 10000) + `}`,
+		errMaxDepth: true,
+	}, {
+		CaseName:    Name("ArrayOverStackDepth"),
+		data:        `{"a":` + strings.Repeat(`[`, 3000000) + strings.Repeat(`]`, 3000000) + `}`,
+		errMaxDepth: true,
+	}, {
+		CaseName:    Name("ObjectUnderMaxNestingDepth"),
+		data:        `{"a":` + strings.Repeat(`{"a":`, 10000-1) + `0` + strings.Repeat(`}`, 10000-1) + `}`,
+		errMaxDepth: false,
+	}, {
+		CaseName:    Name("ObjectOverMaxNestingDepth"),
+		data:        `{"a":` + strings.Repeat(`{"a":`, 10000) + `0` + strings.Repeat(`}`, 10000) + `}`,
+		errMaxDepth: true,
+	}, {
+		CaseName:    Name("ObjectOverStackDepth"),
+		data:        `{"a":` + strings.Repeat(`{"a":`, 3000000) + `0` + strings.Repeat(`}`, 3000000) + `}`,
+		errMaxDepth: true,
+	}}
 
 	targets := []struct {
-		name     string
+		CaseName
 		newValue func() any
-	}{
-		{
-			name: "unstructured",
-			newValue: func() any {
-				var v any
-				return &v
-			},
+	}{{
+		CaseName: Name("unstructured"),
+		newValue: func() any {
+			var v any
+			return &v
 		},
-		{
-			name: "typed named field",
-			newValue: func() any {
-				v := struct {
-					A any `json:"a"`
-				}{}
-				return &v
-			},
+	}, {
+		CaseName: Name("typed named field"),
+		newValue: func() any {
+			v := struct {
+				A any `json:"a"`
+			}{}
+			return &v
 		},
-		{
-			name: "typed missing field",
-			newValue: func() any {
-				v := struct {
-					B any `json:"b"`
-				}{}
-				return &v
-			},
+	}, {
+		CaseName: Name("typed missing field"),
+		newValue: func() any {
+			v := struct {
+				B any `json:"b"`
+			}{}
+			return &v
 		},
-		{
-			name: "custom unmarshaler",
-			newValue: func() any {
-				v := unmarshaler{}
-				return &v
-			},
+	}, {
+		CaseName: Name("custom unmarshaler"),
+		newValue: func() any {
+			v := unmarshaler{}
+			return &v
 		},
-	}
+	}}
 
-	for _, tc := range testcases {
+	for _, tt := range tests {
 		for _, target := range targets {
-			t.Run(target.name+"-"+tc.name, func(t *testing.T) {
-				err := Unmarshal([]byte(tc.data), target.newValue())
-				if !tc.errMaxDepth {
+			t.Run(target.Name+"-"+tt.Name, func(t *testing.T) {
+				err := Unmarshal([]byte(tt.data), target.newValue())
+				if !tt.errMaxDepth {
 					if err != nil {
-						t.Errorf("unexpected error: %v", err)
+						t.Errorf("%s: %s: Unmarshal error: %v", tt.Where, target.Where, err)
 					}
 				} else {
-					if err == nil {
-						t.Errorf("expected error containing 'exceeded max depth', got none")
-					} else if !strings.Contains(err.Error(), "exceeded max depth") {
-						t.Errorf("expected error containing 'exceeded max depth', got: %v", err)
+					if err == nil || !strings.Contains(err.Error(), "exceeded max depth") {
+						t.Errorf("%s: %s: Unmarshal error:\n\tgot:  %v\n\twant: exceeded max depth", tt.Where, target.Where, err)
 					}
 				}
 			})
diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go
index 6da0bd9..d6f6900 100644
--- a/src/encoding/json/encode.go
+++ b/src/encoding/json/encode.go
@@ -17,6 +17,7 @@
 	"fmt"
 	"math"
 	"reflect"
+	"slices"
 	"sort"
 	"strconv"
 	"strings"
@@ -28,30 +29,30 @@
 // Marshal returns the JSON encoding of v.
 //
 // Marshal traverses the value v recursively.
-// If an encountered value implements the Marshaler interface
-// and is not a nil pointer, Marshal calls its MarshalJSON method
-// to produce JSON. If no MarshalJSON method is present but the
-// value implements encoding.TextMarshaler instead, Marshal calls
-// its MarshalText method and encodes the result as a JSON string.
+// If an encountered value implements [Marshaler]
+// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON]
+// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the
+// value implements [encoding.TextMarshaler] instead, Marshal calls
+// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string.
 // The nil pointer exception is not strictly necessary
 // but mimics a similar, necessary exception in the behavior of
-// UnmarshalJSON.
+// [Unmarshaler.UnmarshalJSON].
 //
 // Otherwise, Marshal uses the following type-dependent default encodings:
 //
 // Boolean values encode as JSON booleans.
 //
-// Floating point, integer, and Number values encode as JSON numbers.
+// Floating point, integer, and [Number] values encode as JSON numbers.
 // NaN and +/-Inf values will return an [UnsupportedValueError].
 //
 // String values encode as JSON strings coerced to valid UTF-8,
 // replacing invalid bytes with the Unicode replacement rune.
 // So that the JSON will be safe to embed inside HTML <script> tags,
-// the string is encoded using HTMLEscape,
+// the string is encoded using [HTMLEscape],
 // which replaces "<", ">", "&", U+2028, and U+2029 are escaped
 // to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029".
-// This replacement can be disabled when using an Encoder,
-// by calling SetEscapeHTML(false).
+// This replacement can be disabled when using an [Encoder],
+// by calling [Encoder.SetEscapeHTML](false).
 //
 // Array and slice values encode as JSON arrays, except that
 // []byte encodes as a base64-encoded string, and a nil slice
@@ -108,7 +109,7 @@
 // only Unicode letters, digits, and ASCII punctuation except quotation
 // marks, backslash, and comma.
 //
-// Anonymous struct fields are usually marshaled as if their inner exported fields
+// Embedded struct fields are usually marshaled as if their inner exported fields
 // were fields in the outer struct, subject to the usual Go visibility rules amended
 // as described in the next paragraph.
 // An anonymous struct field with a name given in its JSON tag is treated as
@@ -135,11 +136,11 @@
 // a JSON tag of "-".
 //
 // Map values encode as JSON objects. The map's key type must either be a
-// string, an integer type, or implement encoding.TextMarshaler. The map keys
+// string, an integer type, or implement [encoding.TextMarshaler]. The map keys
 // are sorted and used as JSON object keys by applying the following rules,
 // subject to the UTF-8 coercion described for string values above:
 //   - keys of any string type are used directly
-//   - encoding.TextMarshalers are marshaled
+//   - [encoding.TextMarshalers] are marshaled
 //   - integer keys are converted to strings
 //
 // Pointer values encode as the value pointed to.
@@ -150,7 +151,7 @@
 //
 // Channel, complex, and function values cannot be encoded in JSON.
 // Attempting to encode such a value causes Marshal to return
-// an UnsupportedTypeError.
+// an [UnsupportedTypeError].
 //
 // JSON cannot represent cyclic data structures and Marshal does not
 // handle them. Passing cyclic structures to Marshal will result in
@@ -168,7 +169,7 @@
 	return buf, nil
 }
 
-// MarshalIndent is like Marshal but applies Indent to format the output.
+// MarshalIndent is like [Marshal] but applies [Indent] to format the output.
 // Each JSON element in the output will begin on a new line beginning with prefix
 // followed by one or more copies of indent according to the indentation nesting.
 func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
@@ -190,7 +191,7 @@
 	MarshalJSON() ([]byte, error)
 }
 
-// An UnsupportedTypeError is returned by Marshal when attempting
+// An UnsupportedTypeError is returned by [Marshal] when attempting
 // to encode an unsupported value type.
 type UnsupportedTypeError struct {
 	Type reflect.Type
@@ -200,7 +201,7 @@
 	return "json: unsupported type: " + e.Type.String()
 }
 
-// An UnsupportedValueError is returned by Marshal when attempting
+// An UnsupportedValueError is returned by [Marshal] when attempting
 // to encode an unsupported value.
 type UnsupportedValueError struct {
 	Value reflect.Value
@@ -211,9 +212,9 @@
 	return "json: unsupported value: " + e.Str
 }
 
-// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// Before Go 1.2, an InvalidUTF8Error was returned by [Marshal] when
 // attempting to encode a string value with invalid UTF-8 sequences.
-// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// As of Go 1.2, [Marshal] instead coerces the string to valid UTF-8 by
 // replacing invalid bytes with the Unicode replacement rune U+FFFD.
 //
 // Deprecated: No longer used; kept for compatibility.
@@ -225,7 +226,8 @@
 	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
 }
 
-// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method.
+// A MarshalerError represents an error from calling a
+// [Marshaler.MarshalJSON] or [encoding.TextMarshaler.MarshalText] method.
 type MarshalerError struct {
 	Type       reflect.Type
 	Err        error
@@ -245,7 +247,7 @@
 // Unwrap returns the underlying error.
 func (e *MarshalerError) Unwrap() error { return e.Err }
 
-var hex = "0123456789abcdef"
+const hex = "0123456789abcdef"
 
 // An encodeState encodes JSON into a bytes.Buffer.
 type encodeState struct {
@@ -305,16 +307,12 @@
 	switch v.Kind() {
 	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
 		return v.Len() == 0
-	case reflect.Bool:
-		return v.Bool() == false
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Interface, reflect.Pointer:
-		return v.IsNil()
+	case reflect.Bool,
+		reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+		reflect.Float32, reflect.Float64,
+		reflect.Interface, reflect.Pointer:
+		return v.IsZero()
 	}
 	return false
 }
@@ -371,8 +369,8 @@
 }
 
 var (
-	marshalerType     = reflect.TypeOf((*Marshaler)(nil)).Elem()
-	textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+	marshalerType     = reflect.TypeFor[Marshaler]()
+	textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]()
 )
 
 // newTypeEncoder constructs an encoderFunc for a type.
@@ -739,16 +737,20 @@
 	e.WriteByte('{')
 
 	// Extract and sort the keys.
-	sv := make([]reflectWithString, v.Len())
-	mi := v.MapRange()
+	var (
+		sv  = make([]reflectWithString, v.Len())
+		mi  = v.MapRange()
+		err error
+	)
 	for i := 0; mi.Next(); i++ {
-		sv[i].k = mi.Key()
-		sv[i].v = mi.Value()
-		if err := sv[i].resolve(); err != nil {
+		if sv[i].ks, err = resolveKeyName(mi.Key()); err != nil {
 			e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error()))
 		}
+		sv[i].v = mi.Value()
 	}
-	sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks })
+	slices.SortFunc(sv, func(i, j reflectWithString) int {
+		return strings.Compare(i.ks, j.ks)
+	})
 
 	for i, kv := range sv {
 		if i > 0 {
@@ -781,15 +783,11 @@
 		e.WriteString("null")
 		return
 	}
-	s := v.Bytes()
-	encodedLen := base64.StdEncoding.EncodedLen(len(s))
-	e.Grow(len(`"`) + encodedLen + len(`"`))
 
-	// TODO(https://go.dev/issue/53693): Use base64.Encoding.AppendEncode.
+	s := v.Bytes()
 	b := e.AvailableBuffer()
 	b = append(b, '"')
-	base64.StdEncoding.Encode(b[len(b):][:encodedLen], s)
-	b = b[:len(b)+encodedLen]
+	b = base64.StdEncoding.AppendEncode(b, s)
 	b = append(b, '"')
 	e.Write(b)
 }
@@ -931,31 +929,26 @@
 }
 
 type reflectWithString struct {
-	k  reflect.Value
 	v  reflect.Value
 	ks string
 }
 
-func (w *reflectWithString) resolve() error {
-	if w.k.Kind() == reflect.String {
-		w.ks = w.k.String()
-		return nil
+func resolveKeyName(k reflect.Value) (string, error) {
+	if k.Kind() == reflect.String {
+		return k.String(), nil
 	}
-	if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok {
-		if w.k.Kind() == reflect.Pointer && w.k.IsNil() {
-			return nil
+	if tm, ok := k.Interface().(encoding.TextMarshaler); ok {
+		if k.Kind() == reflect.Pointer && k.IsNil() {
+			return "", nil
 		}
 		buf, err := tm.MarshalText()
-		w.ks = string(buf)
-		return err
+		return string(buf), err
 	}
-	switch w.k.Kind() {
+	switch k.Kind() {
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		w.ks = strconv.FormatInt(w.k.Int(), 10)
-		return nil
+		return strconv.FormatInt(k.Int(), 10), nil
 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		w.ks = strconv.FormatUint(w.k.Uint(), 10)
-		return nil
+		return strconv.FormatUint(k.Uint(), 10), nil
 	}
 	panic("unexpected map key type")
 }
@@ -973,6 +966,10 @@
 			switch b {
 			case '\\', '"':
 				dst = append(dst, '\\', b)
+			case '\b':
+				dst = append(dst, '\\', 'b')
+			case '\f':
+				dst = append(dst, '\\', 'f')
 			case '\n':
 				dst = append(dst, '\\', 'n')
 			case '\r':
@@ -980,7 +977,7 @@
 			case '\t':
 				dst = append(dst, '\\', 't')
 			default:
-				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// This encodes bytes < 0x20 except for \b, \f, \n, \r and \t.
 				// If escapeHTML is set, it also escapes <, >, and &
 				// because they can lead to security holes when
 				// user-controlled strings are rendered into JSON
@@ -1013,7 +1010,7 @@
 		// but don't work in JSONP, which has to be evaluated as JavaScript,
 		// and can lead to security holes there. It is valid JSON to
 		// escape them, so we do so unconditionally.
-		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		// See https://en.wikipedia.org/wiki/JSON#Safety.
 		if c == '\u2028' || c == '\u2029' {
 			dst = append(dst, src[start:i]...)
 			dst = append(dst, '\\', 'u', '2', '0', '2', hex[c&0xF])
@@ -1168,7 +1165,7 @@
 					if count[f.typ] > 1 {
 						// If there were multiple instances, add a second,
 						// so that the annihilation code will see a duplicate.
-						// It only cares about the distinction between 1 or 2,
+						// It only cares about the distinction between 1 and 2,
 						// so don't bother generating any more copies.
 						fields = append(fields, fields[len(fields)-1])
 					}
diff --git a/src/encoding/json/encode_test.go b/src/encoding/json/encode_test.go
index d027972..53259f4 100644
--- a/src/encoding/json/encode_test.go
+++ b/src/encoding/json/encode_test.go
@@ -44,7 +44,8 @@
 	Sto struct{} `json:"sto,omitempty"`
 }
 
-var optionalsExpected = `{
+func TestOmitEmpty(t *testing.T) {
+	var want = `{
  "sr": "",
  "omitempty": 0,
  "slr": null,
@@ -55,8 +56,6 @@
  "str": {},
  "sto": {}
 }`
-
-func TestOmitEmpty(t *testing.T) {
 	var o Optionals
 	o.Sw = "something"
 	o.Mr = map[string]any{}
@@ -64,10 +63,10 @@
 
 	got, err := MarshalIndent(&o, "", " ")
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("MarshalIndent error: %v", err)
 	}
-	if got := string(got); got != optionalsExpected {
-		t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
+	if got := string(got); got != want {
+		t.Errorf("MarshalIndent:\n\tgot:  %s\n\twant: %s\n", indentNewlines(got), indentNewlines(want))
 	}
 }
 
@@ -81,62 +80,57 @@
 
 func TestRoundtripStringTag(t *testing.T) {
 	tests := []struct {
-		name string
+		CaseName
 		in   StringTag
 		want string // empty to just test that we roundtrip
-	}{
-		{
-			name: "AllTypes",
-			in: StringTag{
-				BoolStr:    true,
-				IntStr:     42,
-				UintptrStr: 44,
-				StrStr:     "xzbit",
-				NumberStr:  "46",
-			},
-			want: `{
-				"BoolStr": "true",
-				"IntStr": "42",
-				"UintptrStr": "44",
-				"StrStr": "\"xzbit\"",
-				"NumberStr": "46"
-			}`,
+	}{{
+		CaseName: Name("AllTypes"),
+		in: StringTag{
+			BoolStr:    true,
+			IntStr:     42,
+			UintptrStr: 44,
+			StrStr:     "xzbit",
+			NumberStr:  "46",
 		},
-		{
-			// See golang.org/issues/38173.
-			name: "StringDoubleEscapes",
-			in: StringTag{
-				StrStr:    "\b\f\n\r\t\"\\",
-				NumberStr: "0", // just to satisfy the roundtrip
-			},
-			want: `{
-				"BoolStr": "false",
-				"IntStr": "0",
-				"UintptrStr": "0",
-				"StrStr": "\"\\u0008\\u000c\\n\\r\\t\\\"\\\\\"",
-				"NumberStr": "0"
-			}`,
+		want: `{
+	"BoolStr": "true",
+	"IntStr": "42",
+	"UintptrStr": "44",
+	"StrStr": "\"xzbit\"",
+	"NumberStr": "46"
+}`,
+	}, {
+		// See golang.org/issues/38173.
+		CaseName: Name("StringDoubleEscapes"),
+		in: StringTag{
+			StrStr:    "\b\f\n\r\t\"\\",
+			NumberStr: "0", // just to satisfy the roundtrip
 		},
-	}
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			// Indent with a tab prefix to make the multi-line string
-			// literals in the table nicer to read.
-			got, err := MarshalIndent(&test.in, "\t\t\t", "\t")
+		want: `{
+	"BoolStr": "false",
+	"IntStr": "0",
+	"UintptrStr": "0",
+	"StrStr": "\"\\b\\f\\n\\r\\t\\\"\\\\\"",
+	"NumberStr": "0"
+}`,
+	}}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			got, err := MarshalIndent(&tt.in, "", "\t")
 			if err != nil {
-				t.Fatal(err)
+				t.Fatalf("%s: MarshalIndent error: %v", tt.Where, err)
 			}
-			if got := string(got); got != test.want {
-				t.Fatalf(" got: %s\nwant: %s\n", got, test.want)
+			if got := string(got); got != tt.want {
+				t.Fatalf("%s: MarshalIndent:\n\tgot:  %s\n\twant: %s", tt.Where, stripWhitespace(got), stripWhitespace(tt.want))
 			}
 
 			// Verify that it round-trips.
 			var s2 StringTag
 			if err := Unmarshal(got, &s2); err != nil {
-				t.Fatalf("Decode: %v", err)
+				t.Fatalf("%s: Decode error: %v", tt.Where, err)
 			}
-			if !reflect.DeepEqual(test.in, s2) {
-				t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", test.in, string(got), s2)
+			if !reflect.DeepEqual(s2, tt.in) {
+				t.Fatalf("%s: Decode:\n\tinput: %s\n\tgot:  %#v\n\twant: %#v", tt.Where, indentNewlines(string(got)), s2, tt.in)
 			}
 		})
 	}
@@ -149,21 +143,21 @@
 
 func TestEncodeRenamedByteSlice(t *testing.T) {
 	s := renamedByteSlice("abc")
-	result, err := Marshal(s)
+	got, err := Marshal(s)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	expect := `"YWJj"`
-	if string(result) != expect {
-		t.Errorf(" got %s want %s", result, expect)
+	want := `"YWJj"`
+	if string(got) != want {
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 	r := renamedRenamedByteSlice("abc")
-	result, err = Marshal(r)
+	got, err = Marshal(r)
 	if err != nil {
-		t.Fatal(err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	if string(result) != expect {
-		t.Errorf(" got %s want %s", result, expect)
+	if string(got) != want {
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -212,36 +206,40 @@
 
 func TestSamePointerNoCycle(t *testing.T) {
 	if _, err := Marshal(samePointerNoCycle); err != nil {
-		t.Fatalf("unexpected error: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 }
 
 func TestSliceNoCycle(t *testing.T) {
 	if _, err := Marshal(sliceNoCycle); err != nil {
-		t.Fatalf("unexpected error: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 }
 
-var unsupportedValues = []any{
-	math.NaN(),
-	math.Inf(-1),
-	math.Inf(1),
-	pointerCycle,
-	pointerCycleIndirect,
-	mapCycle,
-	sliceCycle,
-	recursiveSliceCycle,
-}
-
 func TestUnsupportedValues(t *testing.T) {
-	for _, v := range unsupportedValues {
-		if _, err := Marshal(v); err != nil {
-			if _, ok := err.(*UnsupportedValueError); !ok {
-				t.Errorf("for %v, got %T want UnsupportedValueError", v, err)
+	tests := []struct {
+		CaseName
+		in any
+	}{
+		{Name(""), math.NaN()},
+		{Name(""), math.Inf(-1)},
+		{Name(""), math.Inf(1)},
+		{Name(""), pointerCycle},
+		{Name(""), pointerCycleIndirect},
+		{Name(""), mapCycle},
+		{Name(""), sliceCycle},
+		{Name(""), recursiveSliceCycle},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			if _, err := Marshal(tt.in); err != nil {
+				if _, ok := err.(*UnsupportedValueError); !ok {
+					t.Errorf("%s: Marshal error:\n\tgot:  %T\n\twant: %T", tt.Where, err, new(UnsupportedValueError))
+				}
+			} else {
+				t.Errorf("%s: Marshal error: got nil, want non-nil", tt.Where)
 			}
-		} else {
-			t.Errorf("for %v, expected error", v)
-		}
+		})
 	}
 }
 
@@ -253,11 +251,11 @@
 	}
 	got, err := Marshal(m)
 	if err != nil {
-		t.Errorf("Marshal() error: %v", err)
+		t.Errorf("Marshal error: %v", err)
 	}
 	want := `{"TF:NaN":"1","TF:NaN":"1"}`
 	if string(got) != want {
-		t.Errorf("Marshal() = %s, want %s", got, want)
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -322,10 +320,10 @@
 	const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
 	b, err := Marshal(&s)
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if got := string(b); got != want {
-		t.Errorf("got %q, want %q", got, want)
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -348,33 +346,33 @@
 	want := `"\u003c\u0026\u003e"`
 	b, err := Marshal(c)
 	if err != nil {
-		t.Fatalf("Marshal(c): %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if got := string(b); got != want {
-		t.Errorf("Marshal(c) = %#q, want %#q", got, want)
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 
 	var ct CText
 	want = `"\"\u003c\u0026\u003e\""`
 	b, err = Marshal(ct)
 	if err != nil {
-		t.Fatalf("Marshal(ct): %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if got := string(b); got != want {
-		t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
 func TestAnonymousFields(t *testing.T) {
 	tests := []struct {
-		label     string     // Test name
+		CaseName
 		makeInput func() any // Function to create input value
 		want      string     // Expected JSON output
 	}{{
 		// Both S1 and S2 have a field named X. From the perspective of S,
 		// it is ambiguous which one X refers to.
 		// This should not serialize either field.
-		label: "AmbiguousField",
+		CaseName: Name("AmbiguousField"),
 		makeInput: func() any {
 			type (
 				S1 struct{ x, X int }
@@ -388,7 +386,7 @@
 		},
 		want: `{}`,
 	}, {
-		label: "DominantField",
+		CaseName: Name("DominantField"),
 		// Both S1 and S2 have a field named X, but since S has an X field as
 		// well, it takes precedence over S1.X and S2.X.
 		makeInput: func() any {
@@ -406,7 +404,7 @@
 		want: `{"X":6}`,
 	}, {
 		// Unexported embedded field of non-struct type should not be serialized.
-		label: "UnexportedEmbeddedInt",
+		CaseName: Name("UnexportedEmbeddedInt"),
 		makeInput: func() any {
 			type (
 				myInt int
@@ -417,7 +415,7 @@
 		want: `{}`,
 	}, {
 		// Exported embedded field of non-struct type should be serialized.
-		label: "ExportedEmbeddedInt",
+		CaseName: Name("ExportedEmbeddedInt"),
 		makeInput: func() any {
 			type (
 				MyInt int
@@ -429,7 +427,7 @@
 	}, {
 		// Unexported embedded field of pointer to non-struct type
 		// should not be serialized.
-		label: "UnexportedEmbeddedIntPointer",
+		CaseName: Name("UnexportedEmbeddedIntPointer"),
 		makeInput: func() any {
 			type (
 				myInt int
@@ -443,7 +441,7 @@
 	}, {
 		// Exported embedded field of pointer to non-struct type
 		// should be serialized.
-		label: "ExportedEmbeddedIntPointer",
+		CaseName: Name("ExportedEmbeddedIntPointer"),
 		makeInput: func() any {
 			type (
 				MyInt int
@@ -458,7 +456,7 @@
 		// Exported fields of embedded structs should have their
 		// exported fields be serialized regardless of whether the struct types
 		// themselves are exported.
-		label: "EmbeddedStruct",
+		CaseName: Name("EmbeddedStruct"),
 		makeInput: func() any {
 			type (
 				s1 struct{ x, X int }
@@ -475,7 +473,7 @@
 		// Exported fields of pointers to embedded structs should have their
 		// exported fields be serialized regardless of whether the struct types
 		// themselves are exported.
-		label: "EmbeddedStructPointer",
+		CaseName: Name("EmbeddedStructPointer"),
 		makeInput: func() any {
 			type (
 				s1 struct{ x, X int }
@@ -491,7 +489,7 @@
 	}, {
 		// Exported fields on embedded unexported structs at multiple levels
 		// of nesting should still be serialized.
-		label: "NestedStructAndInts",
+		CaseName: Name("NestedStructAndInts"),
 		makeInput: func() any {
 			type (
 				MyInt1 int
@@ -518,7 +516,7 @@
 		// If an anonymous struct pointer field is nil, we should ignore
 		// the embedded fields behind it. Not properly doing so may
 		// result in the wrong output or reflect panics.
-		label: "EmbeddedFieldBehindNilPointer",
+		CaseName: Name("EmbeddedFieldBehindNilPointer"),
 		makeInput: func() any {
 			type (
 				S2 struct{ Field string }
@@ -530,13 +528,13 @@
 	}}
 
 	for _, tt := range tests {
-		t.Run(tt.label, func(t *testing.T) {
+		t.Run(tt.Name, func(t *testing.T) {
 			b, err := Marshal(tt.makeInput())
 			if err != nil {
-				t.Fatalf("Marshal() = %v, want nil error", err)
+				t.Fatalf("%s: Marshal error: %v", tt.Where, err)
 			}
 			if string(b) != tt.want {
-				t.Fatalf("Marshal() = %q, want %q", b, tt.want)
+				t.Fatalf("%s: Marshal:\n\tgot:  %s\n\twant: %s", tt.Where, b, tt.want)
 			}
 		})
 	}
@@ -588,31 +586,34 @@
 
 // See golang.org/issue/16042 and golang.org/issue/34235.
 func TestNilMarshal(t *testing.T) {
-	testCases := []struct {
-		v    any
+	tests := []struct {
+		CaseName
+		in   any
 		want string
 	}{
-		{v: nil, want: `null`},
-		{v: new(float64), want: `0`},
-		{v: []any(nil), want: `null`},
-		{v: []string(nil), want: `null`},
-		{v: map[string]string(nil), want: `null`},
-		{v: []byte(nil), want: `null`},
-		{v: struct{ M string }{"gopher"}, want: `{"M":"gopher"}`},
-		{v: struct{ M Marshaler }{}, want: `{"M":null}`},
-		{v: struct{ M Marshaler }{(*nilJSONMarshaler)(nil)}, want: `{"M":"0zenil0"}`},
-		{v: struct{ M any }{(*nilJSONMarshaler)(nil)}, want: `{"M":null}`},
-		{v: struct{ M encoding.TextMarshaler }{}, want: `{"M":null}`},
-		{v: struct{ M encoding.TextMarshaler }{(*nilTextMarshaler)(nil)}, want: `{"M":"0zenil0"}`},
-		{v: struct{ M any }{(*nilTextMarshaler)(nil)}, want: `{"M":null}`},
+		{Name(""), nil, `null`},
+		{Name(""), new(float64), `0`},
+		{Name(""), []any(nil), `null`},
+		{Name(""), []string(nil), `null`},
+		{Name(""), map[string]string(nil), `null`},
+		{Name(""), []byte(nil), `null`},
+		{Name(""), struct{ M string }{"gopher"}, `{"M":"gopher"}`},
+		{Name(""), struct{ M Marshaler }{}, `{"M":null}`},
+		{Name(""), struct{ M Marshaler }{(*nilJSONMarshaler)(nil)}, `{"M":"0zenil0"}`},
+		{Name(""), struct{ M any }{(*nilJSONMarshaler)(nil)}, `{"M":null}`},
+		{Name(""), struct{ M encoding.TextMarshaler }{}, `{"M":null}`},
+		{Name(""), struct{ M encoding.TextMarshaler }{(*nilTextMarshaler)(nil)}, `{"M":"0zenil0"}`},
+		{Name(""), struct{ M any }{(*nilTextMarshaler)(nil)}, `{"M":null}`},
 	}
-
-	for _, tt := range testCases {
-		out, err := Marshal(tt.v)
-		if err != nil || string(out) != tt.want {
-			t.Errorf("Marshal(%#v) = %#q, %#v, want %#q, nil", tt.v, out, err, tt.want)
-			continue
-		}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			switch got, err := Marshal(tt.in); {
+			case err != nil:
+				t.Fatalf("%s: Marshal error: %v", tt.Where, err)
+			case string(got) != tt.want:
+				t.Fatalf("%s: Marshal:\n\tgot:  %s\n\twant: %s", tt.Where, got, tt.want)
+			}
+		})
 	}
 }
 
@@ -624,12 +625,12 @@
 	}
 	b, err := Marshal(v)
 	if err != nil {
-		t.Fatal("Marshal:", err)
+		t.Fatal("Marshal error:", err)
 	}
 	want := `{"S":"B"}`
 	got := string(b)
 	if got != want {
-		t.Fatalf("Marshal: got %s want %s", got, want)
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 	// Now check that the duplicate field, S, does not appear.
 	x := BugX{
@@ -637,12 +638,12 @@
 	}
 	b, err = Marshal(x)
 	if err != nil {
-		t.Fatal("Marshal:", err)
+		t.Fatal("Marshal error:", err)
 	}
 	want = `{"A":23}`
 	got = string(b)
 	if got != want {
-		t.Fatalf("Marshal: got %s want %s", got, want)
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -664,12 +665,12 @@
 	}
 	b, err := Marshal(v)
 	if err != nil {
-		t.Fatal("Marshal:", err)
+		t.Fatal("Marshal error:", err)
 	}
 	want := `{"S":"BugD"}`
 	got := string(b)
 	if got != want {
-		t.Fatalf("Marshal: got %s want %s", got, want)
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -691,12 +692,12 @@
 	}
 	b, err := Marshal(v)
 	if err != nil {
-		t.Fatal("Marshal:", err)
+		t.Fatal("Marshal error:", err)
 	}
 	want := `{}`
 	got := string(b)
 	if got != want {
-		t.Fatalf("Marshal: got %s want %s", got, want)
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -706,9 +707,8 @@
 	}
 	x := Foo{Number(`invalid`)}
 
-	b, err := Marshal(&x)
-	if err == nil {
-		t.Errorf("Marshal(&x) = %#q; want error", b)
+	if _, err := Marshal(&x); err == nil {
+		t.Fatalf("Marshal error: got nil, want non-nil")
 	}
 }
 
@@ -724,26 +724,26 @@
 	}
 	dummy := Dummy{Name: "Dummy"}
 	dummy.Next = &dummy
-	if b, err := Marshal(dummy); err == nil {
-		t.Errorf("Marshal(dummy) = %#q; want error", b)
+	if _, err := Marshal(dummy); err == nil {
+		t.Errorf("Marshal error: got nil, want non-nil")
 	}
 
 	type Data struct {
 		A string
 		I int
 	}
-	data := Data{A: "a", I: 1}
-	b, err := Marshal(data)
+	want := Data{A: "a", I: 1}
+	b, err := Marshal(want)
 	if err != nil {
-		t.Errorf("Marshal(%v) = %v", data, err)
+		t.Errorf("Marshal error: %v", err)
 	}
 
-	var data2 Data
-	if err := Unmarshal(b, &data2); err != nil {
-		t.Errorf("Unmarshal(%v) = %v", data2, err)
+	var got Data
+	if err := Unmarshal(b, &got); err != nil {
+		t.Errorf("Unmarshal error: %v", err)
 	}
-	if data2 != data {
-		t.Errorf("expect: %v, but get: %v", data, data2)
+	if got != want {
+		t.Errorf("Unmarshal:\n\tgot:  %v\n\twant: %v", got, want)
 	}
 }
 
@@ -753,7 +753,7 @@
 	want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
 	HTMLEscape(&b, []byte(m))
 	if !bytes.Equal(b.Bytes(), want.Bytes()) {
-		t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
+		t.Errorf("HTMLEscape:\n\tgot:  %s\n\twant: %s", b.Bytes(), want.Bytes())
 	}
 }
 
@@ -765,21 +765,19 @@
 	var n int64 = 42
 	b, err := Marshal(stringPointer{N: &n})
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	if got, want := string(b), `{"n":"42"}`; got != want {
-		t.Errorf("Marshal = %s, want %s", got, want)
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 	var back stringPointer
-	err = Unmarshal(b, &back)
-	if err != nil {
-		t.Fatalf("Unmarshal: %v", err)
-	}
-	if back.N == nil {
-		t.Fatalf("Unmarshaled nil N field")
-	}
-	if *back.N != 42 {
-		t.Fatalf("*N = %d; want 42", *back.N)
+	switch err = Unmarshal(b, &back); {
+	case err != nil:
+		t.Fatalf("Unmarshal error: %v", err)
+	case back.N == nil:
+		t.Fatalf("Unmarshal: back.N = nil, want non-nil")
+	case *back.N != 42:
+		t.Fatalf("Unmarshal: *back.N = %d, want 42", *back.N)
 	}
 }
 
@@ -795,11 +793,11 @@
 	{"\x05", `"\u0005"`},
 	{"\x06", `"\u0006"`},
 	{"\x07", `"\u0007"`},
-	{"\x08", `"\u0008"`},
+	{"\x08", `"\b"`},
 	{"\x09", `"\t"`},
 	{"\x0a", `"\n"`},
 	{"\x0b", `"\u000b"`},
-	{"\x0c", `"\u000c"`},
+	{"\x0c", `"\f"`},
 	{"\x0d", `"\r"`},
 	{"\x0e", `"\u000e"`},
 	{"\x0f", `"\u000f"`},
@@ -825,7 +823,7 @@
 	for _, tt := range encodeStringTests {
 		b, err := Marshal(tt.in)
 		if err != nil {
-			t.Errorf("Marshal(%q): %v", tt.in, err)
+			t.Errorf("Marshal(%q) error: %v", tt.in, err)
 			continue
 		}
 		out := string(b)
@@ -863,65 +861,67 @@
 
 // Issue 13783
 func TestEncodeBytekind(t *testing.T) {
-	testdata := []struct {
-		data any
+	tests := []struct {
+		CaseName
+		in   any
 		want string
 	}{
-		{byte(7), "7"},
-		{jsonbyte(7), `{"JB":7}`},
-		{textbyte(4), `"TB:4"`},
-		{jsonint(5), `{"JI":5}`},
-		{textint(1), `"TI:1"`},
-		{[]byte{0, 1}, `"AAE="`},
-		{[]jsonbyte{0, 1}, `[{"JB":0},{"JB":1}]`},
-		{[][]jsonbyte{{0, 1}, {3}}, `[[{"JB":0},{"JB":1}],[{"JB":3}]]`},
-		{[]textbyte{2, 3}, `["TB:2","TB:3"]`},
-		{[]jsonint{5, 4}, `[{"JI":5},{"JI":4}]`},
-		{[]textint{9, 3}, `["TI:9","TI:3"]`},
-		{[]int{9, 3}, `[9,3]`},
-		{[]textfloat{12, 3}, `["TF:12.00","TF:3.00"]`},
+		{Name(""), byte(7), "7"},
+		{Name(""), jsonbyte(7), `{"JB":7}`},
+		{Name(""), textbyte(4), `"TB:4"`},
+		{Name(""), jsonint(5), `{"JI":5}`},
+		{Name(""), textint(1), `"TI:1"`},
+		{Name(""), []byte{0, 1}, `"AAE="`},
+		{Name(""), []jsonbyte{0, 1}, `[{"JB":0},{"JB":1}]`},
+		{Name(""), [][]jsonbyte{{0, 1}, {3}}, `[[{"JB":0},{"JB":1}],[{"JB":3}]]`},
+		{Name(""), []textbyte{2, 3}, `["TB:2","TB:3"]`},
+		{Name(""), []jsonint{5, 4}, `[{"JI":5},{"JI":4}]`},
+		{Name(""), []textint{9, 3}, `["TI:9","TI:3"]`},
+		{Name(""), []int{9, 3}, `[9,3]`},
+		{Name(""), []textfloat{12, 3}, `["TF:12.00","TF:3.00"]`},
 	}
-	for _, d := range testdata {
-		js, err := Marshal(d.data)
-		if err != nil {
-			t.Error(err)
-			continue
-		}
-		got, want := string(js), d.want
-		if got != want {
-			t.Errorf("got %s, want %s", got, want)
-		}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			b, err := Marshal(tt.in)
+			if err != nil {
+				t.Errorf("%s: Marshal error: %v", tt.Where, err)
+			}
+			got, want := string(b), tt.want
+			if got != want {
+				t.Errorf("%s: Marshal:\n\tgot:  %s\n\twant: %s", tt.Where, got, want)
+			}
+		})
 	}
 }
 
 func TestTextMarshalerMapKeysAreSorted(t *testing.T) {
-	b, err := Marshal(map[unmarshalerText]int{
+	got, err := Marshal(map[unmarshalerText]int{
 		{"x", "y"}: 1,
 		{"y", "x"}: 2,
 		{"a", "z"}: 3,
 		{"z", "a"}: 4,
 	})
 	if err != nil {
-		t.Fatalf("Failed to Marshal text.Marshaler: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	const want = `{"a:z":3,"x:y":1,"y:x":2,"z:a":4}`
-	if string(b) != want {
-		t.Errorf("Marshal map with text.Marshaler keys: got %#q, want %#q", b, want)
+	if string(got) != want {
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
 // https://golang.org/issue/33675
 func TestNilMarshalerTextMapKey(t *testing.T) {
-	b, err := Marshal(map[*unmarshalerText]int{
+	got, err := Marshal(map[*unmarshalerText]int{
 		(*unmarshalerText)(nil): 1,
 		{"A", "B"}:              2,
 	})
 	if err != nil {
-		t.Fatalf("Failed to Marshal *text.Marshaler: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
 	const want = `{"":1,"A:B":2}`
-	if string(b) != want {
-		t.Errorf("Marshal map with *text.Marshaler keys: got %#q, want %#q", b, want)
+	if string(got) != want {
+		t.Errorf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -960,7 +960,7 @@
 		}
 		bout, err := Marshal(vf)
 		if err != nil {
-			t.Errorf("Marshal(%T(%g)): %v", vf, vf, err)
+			t.Errorf("Marshal(%T(%g)) error: %v", vf, vf, err)
 			nfail++
 			return
 		}
@@ -969,12 +969,12 @@
 		// result must convert back to the same float
 		g, err := strconv.ParseFloat(out, bits)
 		if err != nil {
-			t.Errorf("Marshal(%T(%g)) = %q, cannot parse back: %v", vf, vf, out, err)
+			t.Errorf("ParseFloat(%q) error: %v", out, err)
 			nfail++
 			return
 		}
 		if f != g || fmt.Sprint(f) != fmt.Sprint(g) { // fmt.Sprint handles ±0
-			t.Errorf("Marshal(%T(%g)) = %q (is %g, not %g)", vf, vf, out, float32(g), vf)
+			t.Errorf("ParseFloat(%q):\n\tgot:  %g\n\twant: %g", out, float32(g), vf)
 			nfail++
 			return
 		}
@@ -985,7 +985,7 @@
 		}
 		for _, re := range bad {
 			if re.MatchString(out) {
-				t.Errorf("Marshal(%T(%g)) = %q, must not match /%s/", vf, vf, out, re)
+				t.Errorf("Marshal(%T(%g)) = %q; must not match /%s/", vf, vf, out, re)
 				nfail++
 				return
 			}
@@ -1049,87 +1049,90 @@
 	)
 
 	tests := []struct {
+		CaseName
 		in   any
 		want string
 		ok   bool
 	}{
 		// Test with nil RawMessage.
-		{rawNil, "null", true},
-		{&rawNil, "null", true},
-		{[]any{rawNil}, "[null]", true},
-		{&[]any{rawNil}, "[null]", true},
-		{[]any{&rawNil}, "[null]", true},
-		{&[]any{&rawNil}, "[null]", true},
-		{struct{ M RawMessage }{rawNil}, `{"M":null}`, true},
-		{&struct{ M RawMessage }{rawNil}, `{"M":null}`, true},
-		{struct{ M *RawMessage }{&rawNil}, `{"M":null}`, true},
-		{&struct{ M *RawMessage }{&rawNil}, `{"M":null}`, true},
-		{map[string]any{"M": rawNil}, `{"M":null}`, true},
-		{&map[string]any{"M": rawNil}, `{"M":null}`, true},
-		{map[string]any{"M": &rawNil}, `{"M":null}`, true},
-		{&map[string]any{"M": &rawNil}, `{"M":null}`, true},
-		{T1{rawNil}, "{}", true},
-		{T2{&rawNil}, `{"M":null}`, true},
-		{&T1{rawNil}, "{}", true},
-		{&T2{&rawNil}, `{"M":null}`, true},
+		{Name(""), rawNil, "null", true},
+		{Name(""), &rawNil, "null", true},
+		{Name(""), []any{rawNil}, "[null]", true},
+		{Name(""), &[]any{rawNil}, "[null]", true},
+		{Name(""), []any{&rawNil}, "[null]", true},
+		{Name(""), &[]any{&rawNil}, "[null]", true},
+		{Name(""), struct{ M RawMessage }{rawNil}, `{"M":null}`, true},
+		{Name(""), &struct{ M RawMessage }{rawNil}, `{"M":null}`, true},
+		{Name(""), struct{ M *RawMessage }{&rawNil}, `{"M":null}`, true},
+		{Name(""), &struct{ M *RawMessage }{&rawNil}, `{"M":null}`, true},
+		{Name(""), map[string]any{"M": rawNil}, `{"M":null}`, true},
+		{Name(""), &map[string]any{"M": rawNil}, `{"M":null}`, true},
+		{Name(""), map[string]any{"M": &rawNil}, `{"M":null}`, true},
+		{Name(""), &map[string]any{"M": &rawNil}, `{"M":null}`, true},
+		{Name(""), T1{rawNil}, "{}", true},
+		{Name(""), T2{&rawNil}, `{"M":null}`, true},
+		{Name(""), &T1{rawNil}, "{}", true},
+		{Name(""), &T2{&rawNil}, `{"M":null}`, true},
 
 		// Test with empty, but non-nil, RawMessage.
-		{rawEmpty, "", false},
-		{&rawEmpty, "", false},
-		{[]any{rawEmpty}, "", false},
-		{&[]any{rawEmpty}, "", false},
-		{[]any{&rawEmpty}, "", false},
-		{&[]any{&rawEmpty}, "", false},
-		{struct{ X RawMessage }{rawEmpty}, "", false},
-		{&struct{ X RawMessage }{rawEmpty}, "", false},
-		{struct{ X *RawMessage }{&rawEmpty}, "", false},
-		{&struct{ X *RawMessage }{&rawEmpty}, "", false},
-		{map[string]any{"nil": rawEmpty}, "", false},
-		{&map[string]any{"nil": rawEmpty}, "", false},
-		{map[string]any{"nil": &rawEmpty}, "", false},
-		{&map[string]any{"nil": &rawEmpty}, "", false},
-		{T1{rawEmpty}, "{}", true},
-		{T2{&rawEmpty}, "", false},
-		{&T1{rawEmpty}, "{}", true},
-		{&T2{&rawEmpty}, "", false},
+		{Name(""), rawEmpty, "", false},
+		{Name(""), &rawEmpty, "", false},
+		{Name(""), []any{rawEmpty}, "", false},
+		{Name(""), &[]any{rawEmpty}, "", false},
+		{Name(""), []any{&rawEmpty}, "", false},
+		{Name(""), &[]any{&rawEmpty}, "", false},
+		{Name(""), struct{ X RawMessage }{rawEmpty}, "", false},
+		{Name(""), &struct{ X RawMessage }{rawEmpty}, "", false},
+		{Name(""), struct{ X *RawMessage }{&rawEmpty}, "", false},
+		{Name(""), &struct{ X *RawMessage }{&rawEmpty}, "", false},
+		{Name(""), map[string]any{"nil": rawEmpty}, "", false},
+		{Name(""), &map[string]any{"nil": rawEmpty}, "", false},
+		{Name(""), map[string]any{"nil": &rawEmpty}, "", false},
+		{Name(""), &map[string]any{"nil": &rawEmpty}, "", false},
+		{Name(""), T1{rawEmpty}, "{}", true},
+		{Name(""), T2{&rawEmpty}, "", false},
+		{Name(""), &T1{rawEmpty}, "{}", true},
+		{Name(""), &T2{&rawEmpty}, "", false},
 
 		// Test with RawMessage with some text.
 		//
 		// The tests below marked with Issue6458 used to generate "ImZvbyI=" instead "foo".
 		// This behavior was intentionally changed in Go 1.8.
 		// See https://golang.org/issues/14493#issuecomment-255857318
-		{rawText, `"foo"`, true}, // Issue6458
-		{&rawText, `"foo"`, true},
-		{[]any{rawText}, `["foo"]`, true},  // Issue6458
-		{&[]any{rawText}, `["foo"]`, true}, // Issue6458
-		{[]any{&rawText}, `["foo"]`, true},
-		{&[]any{&rawText}, `["foo"]`, true},
-		{struct{ M RawMessage }{rawText}, `{"M":"foo"}`, true}, // Issue6458
-		{&struct{ M RawMessage }{rawText}, `{"M":"foo"}`, true},
-		{struct{ M *RawMessage }{&rawText}, `{"M":"foo"}`, true},
-		{&struct{ M *RawMessage }{&rawText}, `{"M":"foo"}`, true},
-		{map[string]any{"M": rawText}, `{"M":"foo"}`, true},  // Issue6458
-		{&map[string]any{"M": rawText}, `{"M":"foo"}`, true}, // Issue6458
-		{map[string]any{"M": &rawText}, `{"M":"foo"}`, true},
-		{&map[string]any{"M": &rawText}, `{"M":"foo"}`, true},
-		{T1{rawText}, `{"M":"foo"}`, true}, // Issue6458
-		{T2{&rawText}, `{"M":"foo"}`, true},
-		{&T1{rawText}, `{"M":"foo"}`, true},
-		{&T2{&rawText}, `{"M":"foo"}`, true},
+		{Name(""), rawText, `"foo"`, true}, // Issue6458
+		{Name(""), &rawText, `"foo"`, true},
+		{Name(""), []any{rawText}, `["foo"]`, true},  // Issue6458
+		{Name(""), &[]any{rawText}, `["foo"]`, true}, // Issue6458
+		{Name(""), []any{&rawText}, `["foo"]`, true},
+		{Name(""), &[]any{&rawText}, `["foo"]`, true},
+		{Name(""), struct{ M RawMessage }{rawText}, `{"M":"foo"}`, true}, // Issue6458
+		{Name(""), &struct{ M RawMessage }{rawText}, `{"M":"foo"}`, true},
+		{Name(""), struct{ M *RawMessage }{&rawText}, `{"M":"foo"}`, true},
+		{Name(""), &struct{ M *RawMessage }{&rawText}, `{"M":"foo"}`, true},
+		{Name(""), map[string]any{"M": rawText}, `{"M":"foo"}`, true},  // Issue6458
+		{Name(""), &map[string]any{"M": rawText}, `{"M":"foo"}`, true}, // Issue6458
+		{Name(""), map[string]any{"M": &rawText}, `{"M":"foo"}`, true},
+		{Name(""), &map[string]any{"M": &rawText}, `{"M":"foo"}`, true},
+		{Name(""), T1{rawText}, `{"M":"foo"}`, true}, // Issue6458
+		{Name(""), T2{&rawText}, `{"M":"foo"}`, true},
+		{Name(""), &T1{rawText}, `{"M":"foo"}`, true},
+		{Name(""), &T2{&rawText}, `{"M":"foo"}`, true},
 	}
 
-	for i, tt := range tests {
-		b, err := Marshal(tt.in)
-		if ok := (err == nil); ok != tt.ok {
-			if err != nil {
-				t.Errorf("test %d, unexpected failure: %v", i, err)
-			} else {
-				t.Errorf("test %d, unexpected success", i)
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			b, err := Marshal(tt.in)
+			if ok := (err == nil); ok != tt.ok {
+				if err != nil {
+					t.Errorf("%s: Marshal error: %v", tt.Where, err)
+				} else {
+					t.Errorf("%s: Marshal error: got nil, want non-nil", tt.Where)
+				}
 			}
-		}
-		if got := string(b); got != tt.want {
-			t.Errorf("test %d, Marshal(%#v) = %q, want %q", i, tt.in, got, tt.want)
-		}
+			if got := string(b); got != tt.want {
+				t.Errorf("%s: Marshal:\n\tinput: %#v\n\tgot:  %s\n\twant: %s", tt.Where, tt.in, got, tt.want)
+			}
+		})
 	}
 }
 
@@ -1153,12 +1156,12 @@
 	}{}
 	b, err := Marshal(v)
 	if err != nil {
-		t.Fatal("Marshal:", err)
+		t.Fatal("Marshal error:", err)
 	}
 	want := `{"A0":0,"À":0,"Aβ":0}`
 	got := string(b)
 	if got != want {
-		t.Fatalf("Marshal: got %s want %s", got, want)
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -1168,23 +1171,51 @@
 	errText := "json: test error"
 
 	tests := []struct {
+		CaseName
 		err  *MarshalerError
 		want string
-	}{
-		{
-			&MarshalerError{st, fmt.Errorf(errText), ""},
-			"json: error calling MarshalJSON for type " + st.String() + ": " + errText,
-		},
-		{
-			&MarshalerError{st, fmt.Errorf(errText), "TestMarshalerError"},
-			"json: error calling TestMarshalerError for type " + st.String() + ": " + errText,
-		},
-	}
+	}{{
+		Name(""),
+		&MarshalerError{st, fmt.Errorf(errText), ""},
+		"json: error calling MarshalJSON for type " + st.String() + ": " + errText,
+	}, {
+		Name(""),
+		&MarshalerError{st, fmt.Errorf(errText), "TestMarshalerError"},
+		"json: error calling TestMarshalerError for type " + st.String() + ": " + errText,
+	}}
 
-	for i, tt := range tests {
-		got := tt.err.Error()
-		if got != tt.want {
-			t.Errorf("MarshalerError test %d, got: %s, want: %s", i, got, tt.want)
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			got := tt.err.Error()
+			if got != tt.want {
+				t.Errorf("%s: Error:\n\tgot:  %s\n\twant: %s", tt.Where, got, tt.want)
+			}
+		})
+	}
+}
+
+type marshaledValue string
+
+func (v marshaledValue) MarshalJSON() ([]byte, error) {
+	return []byte(v), nil
+}
+
+func TestIssue63379(t *testing.T) {
+	for _, v := range []string{
+		"[]<",
+		"[]>",
+		"[]&",
+		"[]\u2028",
+		"[]\u2029",
+		"{}<",
+		"{}>",
+		"{}&",
+		"{}\u2028",
+		"{}\u2029",
+	} {
+		_, err := Marshal(marshaledValue(v))
+		if err == nil {
+			t.Errorf("expected error for %q", v)
 		}
 	}
 }
diff --git a/src/encoding/json/indent.go b/src/encoding/json/indent.go
index 26bb5d2..01bfdf6 100644
--- a/src/encoding/json/indent.go
+++ b/src/encoding/json/indent.go
@@ -53,29 +53,37 @@
 	start := 0
 	for i, c := range src {
 		if escape && (c == '<' || c == '>' || c == '&') {
-			dst = append(dst, src[start:i]...)
+			if start < i {
+				dst = append(dst, src[start:i]...)
+			}
 			dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
 			start = i + 1
 		}
 		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
 		if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
-			dst = append(dst, src[start:i]...)
+			if start < i {
+				dst = append(dst, src[start:i]...)
+			}
 			dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
-			start = i + len("\u2029")
+			start = i + 3
 		}
 		v := scan.step(scan, c)
 		if v >= scanSkipSpace {
 			if v == scanError {
 				break
 			}
-			dst = append(dst, src[start:i]...)
+			if start < i {
+				dst = append(dst, src[start:i]...)
+			}
 			start = i + 1
 		}
 	}
 	if scan.eof() == scanError {
 		return dst[:origLen], scan.err
 	}
-	dst = append(dst, src[start:]...)
+	if start < len(src) {
+		dst = append(dst, src[start:]...)
+	}
 	return dst, nil
 }
 
diff --git a/src/encoding/json/scanner.go b/src/encoding/json/scanner.go
index 4c43f5f..da6ea2a 100644
--- a/src/encoding/json/scanner.go
+++ b/src/encoding/json/scanner.go
@@ -43,7 +43,7 @@
 }
 
 // A SyntaxError is a description of a JSON syntax error.
-// Unmarshal will return a SyntaxError if the JSON can't be parsed.
+// [Unmarshal] will return a SyntaxError if the JSON can't be parsed.
 type SyntaxError struct {
 	msg    string // description of error
 	Offset int64  // error occurred after reading Offset bytes
diff --git a/src/encoding/json/scanner_test.go b/src/encoding/json/scanner_test.go
index 3474b3e..068439d 100644
--- a/src/encoding/json/scanner_test.go
+++ b/src/encoding/json/scanner_test.go
@@ -9,51 +9,59 @@
 	"math"
 	"math/rand"
 	"reflect"
+	"strings"
 	"testing"
 )
 
-var validTests = []struct {
-	data string
-	ok   bool
-}{
-	{`foo`, false},
-	{`}{`, false},
-	{`{]`, false},
-	{`{}`, true},
-	{`{"foo":"bar"}`, true},
-	{`{"foo":"bar","bar":{"baz":["qux"]}}`, true},
+func indentNewlines(s string) string {
+	return strings.Join(strings.Split(s, "\n"), "\n\t")
+}
+
+func stripWhitespace(s string) string {
+	return strings.Map(func(r rune) rune {
+		if r == ' ' || r == '\n' || r == '\r' || r == '\t' {
+			return -1
+		}
+		return r
+	}, s)
 }
 
 func TestValid(t *testing.T) {
-	for _, tt := range validTests {
-		if ok := Valid([]byte(tt.data)); ok != tt.ok {
-			t.Errorf("Valid(%#q) = %v, want %v", tt.data, ok, tt.ok)
-		}
+	tests := []struct {
+		CaseName
+		data string
+		ok   bool
+	}{
+		{Name(""), `foo`, false},
+		{Name(""), `}{`, false},
+		{Name(""), `{]`, false},
+		{Name(""), `{}`, true},
+		{Name(""), `{"foo":"bar"}`, true},
+		{Name(""), `{"foo":"bar","bar":{"baz":["qux"]}}`, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			if ok := Valid([]byte(tt.data)); ok != tt.ok {
+				t.Errorf("%s: Valid(`%s`) = %v, want %v", tt.Where, tt.data, ok, tt.ok)
+			}
+		})
 	}
 }
 
-// Tests of simple examples.
-
-type example struct {
-	compact string
-	indent  string
-}
-
-var examples = []example{
-	{`1`, `1`},
-	{`{}`, `{}`},
-	{`[]`, `[]`},
-	{`{"":2}`, "{\n\t\"\": 2\n}"},
-	{`[3]`, "[\n\t3\n]"},
-	{`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
-	{`{"x":1}`, "{\n\t\"x\": 1\n}"},
-	{ex1, ex1i},
-	{"{\"\":\"<>&\u2028\u2029\"}", "{\n\t\"\": \"<>&\u2028\u2029\"\n}"}, // See golang.org/issue/34070
-}
-
-var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
-
-var ex1i = `[
+func TestCompactAndIndent(t *testing.T) {
+	tests := []struct {
+		CaseName
+		compact string
+		indent  string
+	}{
+		{Name(""), `1`, `1`},
+		{Name(""), `{}`, `{}`},
+		{Name(""), `[]`, `[]`},
+		{Name(""), `{"":2}`, "{\n\t\"\": 2\n}"},
+		{Name(""), `[3]`, "[\n\t3\n]"},
+		{Name(""), `[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
+		{Name(""), `{"x":1}`, "{\n\t\"x\": 1\n}"},
+		{Name(""), `[true,false,null,"x",1,1.5,0,-5e+2]`, `[
 	true,
 	false,
 	null,
@@ -62,25 +70,40 @@
 	1.5,
 	0,
 	-5e+2
-]`
-
-func TestCompact(t *testing.T) {
+]`},
+		{Name(""), "{\"\":\"<>&\u2028\u2029\"}", "{\n\t\"\": \"<>&\u2028\u2029\"\n}"}, // See golang.org/issue/34070
+	}
 	var buf bytes.Buffer
-	for _, tt := range examples {
-		buf.Reset()
-		if err := Compact(&buf, []byte(tt.compact)); err != nil {
-			t.Errorf("Compact(%#q): %v", tt.compact, err)
-		} else if s := buf.String(); s != tt.compact {
-			t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
-		}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			buf.Reset()
+			if err := Compact(&buf, []byte(tt.compact)); err != nil {
+				t.Errorf("%s: Compact error: %v", tt.Where, err)
+			} else if got := buf.String(); got != tt.compact {
+				t.Errorf("%s: Compact:\n\tgot:  %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.compact))
+			}
 
-		buf.Reset()
-		if err := Compact(&buf, []byte(tt.indent)); err != nil {
-			t.Errorf("Compact(%#q): %v", tt.indent, err)
-			continue
-		} else if s := buf.String(); s != tt.compact {
-			t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
-		}
+			buf.Reset()
+			if err := Compact(&buf, []byte(tt.indent)); err != nil {
+				t.Errorf("%s: Compact error: %v", tt.Where, err)
+			} else if got := buf.String(); got != tt.compact {
+				t.Errorf("%s: Compact:\n\tgot:  %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.compact))
+			}
+
+			buf.Reset()
+			if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
+				t.Errorf("%s: Indent error: %v", tt.Where, err)
+			} else if got := buf.String(); got != tt.indent {
+				t.Errorf("%s: Compact:\n\tgot:  %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.indent))
+			}
+
+			buf.Reset()
+			if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
+				t.Errorf("%s: Indent error: %v", tt.Where, err)
+			} else if got := buf.String(); got != tt.indent {
+				t.Errorf("%s: Compact:\n\tgot:  %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.indent))
+			}
+		})
 	}
 }
 
@@ -88,38 +111,21 @@
 	// U+2028 and U+2029 should be escaped inside strings.
 	// They should not appear outside strings.
 	tests := []struct {
+		CaseName
 		in, compact string
 	}{
-		{"{\"\u2028\": 1}", "{\"\u2028\":1}"},
-		{"{\"\u2029\" :2}", "{\"\u2029\":2}"},
+		{Name(""), "{\"\u2028\": 1}", "{\"\u2028\":1}"},
+		{Name(""), "{\"\u2029\" :2}", "{\"\u2029\":2}"},
 	}
 	for _, tt := range tests {
-		var buf bytes.Buffer
-		if err := Compact(&buf, []byte(tt.in)); err != nil {
-			t.Errorf("Compact(%q): %v", tt.in, err)
-		} else if s := buf.String(); s != tt.compact {
-			t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact)
-		}
-	}
-}
-
-func TestIndent(t *testing.T) {
-	var buf bytes.Buffer
-	for _, tt := range examples {
-		buf.Reset()
-		if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
-			t.Errorf("Indent(%#q): %v", tt.indent, err)
-		} else if s := buf.String(); s != tt.indent {
-			t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
-		}
-
-		buf.Reset()
-		if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
-			t.Errorf("Indent(%#q): %v", tt.compact, err)
-			continue
-		} else if s := buf.String(); s != tt.indent {
-			t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
-		}
+		t.Run(tt.Name, func(t *testing.T) {
+			var buf bytes.Buffer
+			if err := Compact(&buf, []byte(tt.in)); err != nil {
+				t.Errorf("%s: Compact error: %v", tt.Where, err)
+			} else if got := buf.String(); got != tt.compact {
+				t.Errorf("%s: Compact:\n\tgot:  %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.compact))
+			}
+		})
 	}
 }
 
@@ -129,11 +135,11 @@
 	initBig()
 	var buf bytes.Buffer
 	if err := Compact(&buf, jsonBig); err != nil {
-		t.Fatalf("Compact: %v", err)
+		t.Fatalf("Compact error: %v", err)
 	}
 	b := buf.Bytes()
 	if !bytes.Equal(b, jsonBig) {
-		t.Error("Compact(jsonBig) != jsonBig")
+		t.Error("Compact:")
 		diff(t, b, jsonBig)
 		return
 	}
@@ -144,23 +150,23 @@
 	initBig()
 	var buf bytes.Buffer
 	if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
-		t.Fatalf("Indent1: %v", err)
+		t.Fatalf("Indent error: %v", err)
 	}
 	b := buf.Bytes()
 	if len(b) == len(jsonBig) {
 		// jsonBig is compact (no unnecessary spaces);
 		// indenting should make it bigger
-		t.Fatalf("Indent(jsonBig) did not get bigger")
+		t.Fatalf("Indent did not expand the input")
 	}
 
 	// should be idempotent
 	var buf1 bytes.Buffer
 	if err := Indent(&buf1, b, "", "\t"); err != nil {
-		t.Fatalf("Indent2: %v", err)
+		t.Fatalf("Indent error: %v", err)
 	}
 	b1 := buf1.Bytes()
 	if !bytes.Equal(b1, b) {
-		t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
+		t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig):")
 		diff(t, b1, b)
 		return
 	}
@@ -168,40 +174,40 @@
 	// should get back to original
 	buf1.Reset()
 	if err := Compact(&buf1, b); err != nil {
-		t.Fatalf("Compact: %v", err)
+		t.Fatalf("Compact error: %v", err)
 	}
 	b1 = buf1.Bytes()
 	if !bytes.Equal(b1, jsonBig) {
-		t.Error("Compact(Indent(jsonBig)) != jsonBig")
+		t.Error("Compact(Indent(jsonBig)) != jsonBig:")
 		diff(t, b1, jsonBig)
 		return
 	}
 }
 
-type indentErrorTest struct {
-	in  string
-	err error
-}
-
-var indentErrorTests = []indentErrorTest{
-	{`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
-	{`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
-}
-
 func TestIndentErrors(t *testing.T) {
-	for i, tt := range indentErrorTests {
-		slice := make([]uint8, 0)
-		buf := bytes.NewBuffer(slice)
-		if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
-			if !reflect.DeepEqual(err, tt.err) {
-				t.Errorf("#%d: Indent: %#v", i, err)
-				continue
+	tests := []struct {
+		CaseName
+		in  string
+		err error
+	}{
+		{Name(""), `{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
+		{Name(""), `{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			slice := make([]uint8, 0)
+			buf := bytes.NewBuffer(slice)
+			if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
+				if !reflect.DeepEqual(err, tt.err) {
+					t.Fatalf("%s: Indent error:\n\tgot:  %v\n\twant: %v", tt.Where, err, tt.err)
+				}
 			}
-		}
+		})
 	}
 }
 
 func diff(t *testing.T, a, b []byte) {
+	t.Helper()
 	for i := 0; ; i++ {
 		if i >= len(a) || i >= len(b) || a[i] != b[i] {
 			j := i - 10
@@ -215,10 +221,7 @@
 }
 
 func trim(b []byte) []byte {
-	if len(b) > 20 {
-		return b[0:20]
-	}
-	return b
+	return b[:min(len(b), 20)]
 }
 
 // Generate a random JSON object.
diff --git a/src/encoding/json/stream.go b/src/encoding/json/stream.go
index b4146a3..5c98d1d 100644
--- a/src/encoding/json/stream.go
+++ b/src/encoding/json/stream.go
@@ -33,7 +33,7 @@
 }
 
 // UseNumber causes the Decoder to unmarshal a number into an interface{} as a
-// Number instead of as a float64.
+// [Number] instead of as a float64.
 func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
 
 // DisallowUnknownFields causes the Decoder to return an error when the destination
@@ -44,7 +44,7 @@
 // Decode reads the next JSON-encoded value from its
 // input and stores it in the value pointed to by v.
 //
-// See the documentation for Unmarshal for details about
+// See the documentation for [Unmarshal] for details about
 // the conversion of JSON into a Go value.
 func (dec *Decoder) Decode(v any) error {
 	if dec.err != nil {
@@ -79,7 +79,7 @@
 }
 
 // Buffered returns a reader of the data remaining in the Decoder's
-// buffer. The reader is valid until the next call to Decode.
+// buffer. The reader is valid until the next call to [Decoder.Decode].
 func (dec *Decoder) Buffered() io.Reader {
 	return bytes.NewReader(dec.buf[dec.scanp:])
 }
@@ -196,7 +196,7 @@
 // Encode writes the JSON encoding of v to the stream,
 // followed by a newline character.
 //
-// See the documentation for Marshal for details about the
+// See the documentation for [Marshal] for details about the
 // conversion of Go values to JSON.
 func (enc *Encoder) Encode(v any) error {
 	if enc.err != nil {
@@ -253,7 +253,7 @@
 }
 
 // RawMessage is a raw encoded JSON value.
-// It implements Marshaler and Unmarshaler and can
+// It implements [Marshaler] and [Unmarshaler] and can
 // be used to delay JSON decoding or precompute a JSON encoding.
 type RawMessage []byte
 
@@ -279,12 +279,12 @@
 
 // A Token holds a value of one of these types:
 //
-//	Delim, for the four JSON delimiters [ ] { }
-//	bool, for JSON booleans
-//	float64, for JSON numbers
-//	Number, for JSON numbers
-//	string, for JSON string literals
-//	nil, for JSON null
+//   - [Delim], for the four JSON delimiters [ ] { }
+//   - bool, for JSON booleans
+//   - float64, for JSON numbers
+//   - [Number], for JSON numbers
+//   - string, for JSON string literals
+//   - nil, for JSON null
 type Token any
 
 const (
@@ -354,14 +354,14 @@
 }
 
 // Token returns the next JSON token in the input stream.
-// At the end of the input stream, Token returns nil, io.EOF.
+// At the end of the input stream, Token returns nil, [io.EOF].
 //
 // Token guarantees that the delimiters [ ] { } it returns are
 // properly nested and matched: if Token encounters an unexpected
 // delimiter in the input, it will return an error.
 //
 // The input stream consists of basic JSON values—bool, string,
-// number, and null—along with delimiters [ ] { } of type Delim
+// number, and null—along with delimiters [ ] { } of type [Delim]
 // to mark the start and end of arrays and objects.
 // Commas and colons are elided.
 func (dec *Decoder) Token() (Token, error) {
diff --git a/src/encoding/json/stream_test.go b/src/encoding/json/stream_test.go
index 97f9fbd..32ede8c 100644
--- a/src/encoding/json/stream_test.go
+++ b/src/encoding/json/stream_test.go
@@ -6,17 +6,44 @@
 
 import (
 	"bytes"
+	"fmt"
 	"io"
 	"log"
 	"net"
 	"net/http"
 	"net/http/httptest"
+	"path"
 	"reflect"
+	"runtime"
 	"runtime/debug"
 	"strings"
 	"testing"
 )
 
+// TODO(https://go.dev/issue/52751): Replace with native testing support.
+
+// CaseName is a case name annotated with a file and line.
+type CaseName struct {
+	Name  string
+	Where CasePos
+}
+
+// Name annotates a case name with the file and line of the caller.
+func Name(s string) (c CaseName) {
+	c.Name = s
+	runtime.Callers(2, c.Where.pc[:])
+	return c
+}
+
+// CasePos represents a file and line number.
+type CasePos struct{ pc [1]uintptr }
+
+func (pos CasePos) String() string {
+	frames := runtime.CallersFrames(pos.pc[:])
+	frame, _ := frames.Next()
+	return fmt.Sprintf("%s:%d", path.Base(frame.File), frame.Line)
+}
+
 // Test values for the stream test.
 // One of each JSON kind.
 var streamTest = []any{
@@ -49,11 +76,11 @@
 		enc.SetIndent("", "")
 		for j, v := range streamTest[0:i] {
 			if err := enc.Encode(v); err != nil {
-				t.Fatalf("encode #%d: %v", j, err)
+				t.Fatalf("#%d.%d Encode error: %v", i, j, err)
 			}
 		}
 		if have, want := buf.String(), nlines(streamEncoded, i); have != want {
-			t.Errorf("encoding %d items: mismatch", i)
+			t.Errorf("encoding %d items: mismatch:", i)
 			diff(t, []byte(have), []byte(want))
 			break
 		}
@@ -76,24 +103,24 @@
 	var buf bytes.Buffer
 	enc := NewEncoder(&buf)
 	if err := enc.Encode(dummy); err == nil {
-		t.Errorf("Encode(dummy) == nil; want error")
+		t.Errorf("Encode(dummy) error: got nil, want non-nil")
 	}
 
 	type Data struct {
 		A string
 		I int
 	}
-	data := Data{A: "a", I: 1}
-	if err := enc.Encode(data); err != nil {
-		t.Errorf("Marshal(%v) = %v", data, err)
+	want := Data{A: "a", I: 1}
+	if err := enc.Encode(want); err != nil {
+		t.Errorf("Marshal error: %v", err)
 	}
 
-	var data2 Data
-	if err := Unmarshal(buf.Bytes(), &data2); err != nil {
-		t.Errorf("Unmarshal(%v) = %v", data2, err)
+	var got Data
+	if err := Unmarshal(buf.Bytes(), &got); err != nil {
+		t.Errorf("Unmarshal error: %v", err)
 	}
-	if data2 != data {
-		t.Errorf("expect: %v, but get: %v", data, data2)
+	if got != want {
+		t.Errorf("Marshal/Unmarshal roundtrip:\n\tgot:  %v\n\twant: %v", got, want)
 	}
 }
 
@@ -122,7 +149,7 @@
 		enc.Encode(v)
 	}
 	if have, want := buf.String(), streamEncodedIndent; have != want {
-		t.Error("indented encoding mismatch")
+		t.Error("Encode mismatch:")
 		diff(t, []byte(have), []byte(want))
 	}
 }
@@ -160,50 +187,51 @@
 		Bar string `json:"bar,string"`
 	}{`<html>foobar</html>`}
 
-	for _, tt := range []struct {
-		name       string
+	tests := []struct {
+		CaseName
 		v          any
 		wantEscape string
 		want       string
 	}{
-		{"c", c, `"\u003c\u0026\u003e"`, `"<&>"`},
-		{"ct", ct, `"\"\u003c\u0026\u003e\""`, `"\"<&>\""`},
-		{`"<&>"`, "<&>", `"\u003c\u0026\u003e"`, `"<&>"`},
+		{Name("c"), c, `"\u003c\u0026\u003e"`, `"<&>"`},
+		{Name("ct"), ct, `"\"\u003c\u0026\u003e\""`, `"\"<&>\""`},
+		{Name(`"<&>"`), "<&>", `"\u003c\u0026\u003e"`, `"<&>"`},
 		{
-			"tagStruct", tagStruct,
+			Name("tagStruct"), tagStruct,
 			`{"\u003c\u003e\u0026#! ":0,"Invalid":0}`,
 			`{"<>&#! ":0,"Invalid":0}`,
 		},
 		{
-			`"<str>"`, marshalerStruct,
+			Name(`"<str>"`), marshalerStruct,
 			`{"NonPtr":"\u003cstr\u003e","Ptr":"\u003cstr\u003e"}`,
 			`{"NonPtr":"<str>","Ptr":"<str>"}`,
 		},
 		{
-			"stringOption", stringOption,
+			Name("stringOption"), stringOption,
 			`{"bar":"\"\\u003chtml\\u003efoobar\\u003c/html\\u003e\""}`,
 			`{"bar":"\"<html>foobar</html>\""}`,
 		},
-	} {
-		var buf strings.Builder
-		enc := NewEncoder(&buf)
-		if err := enc.Encode(tt.v); err != nil {
-			t.Errorf("Encode(%s): %s", tt.name, err)
-			continue
-		}
-		if got := strings.TrimSpace(buf.String()); got != tt.wantEscape {
-			t.Errorf("Encode(%s) = %#q, want %#q", tt.name, got, tt.wantEscape)
-		}
-		buf.Reset()
-		enc.SetEscapeHTML(false)
-		if err := enc.Encode(tt.v); err != nil {
-			t.Errorf("SetEscapeHTML(false) Encode(%s): %s", tt.name, err)
-			continue
-		}
-		if got := strings.TrimSpace(buf.String()); got != tt.want {
-			t.Errorf("SetEscapeHTML(false) Encode(%s) = %#q, want %#q",
-				tt.name, got, tt.want)
-		}
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			var buf strings.Builder
+			enc := NewEncoder(&buf)
+			if err := enc.Encode(tt.v); err != nil {
+				t.Fatalf("%s: Encode(%s) error: %s", tt.Where, tt.Name, err)
+			}
+			if got := strings.TrimSpace(buf.String()); got != tt.wantEscape {
+				t.Errorf("%s: Encode(%s):\n\tgot:  %s\n\twant: %s", tt.Where, tt.Name, got, tt.wantEscape)
+			}
+			buf.Reset()
+			enc.SetEscapeHTML(false)
+			if err := enc.Encode(tt.v); err != nil {
+				t.Fatalf("%s: SetEscapeHTML(false) Encode(%s) error: %s", tt.Where, tt.Name, err)
+			}
+			if got := strings.TrimSpace(buf.String()); got != tt.want {
+				t.Errorf("%s: SetEscapeHTML(false) Encode(%s):\n\tgot:  %s\n\twant: %s",
+					tt.Where, tt.Name, got, tt.want)
+			}
+		})
 	}
 }
 
@@ -224,14 +252,14 @@
 		dec := NewDecoder(&buf)
 		for j := range out {
 			if err := dec.Decode(&out[j]); err != nil {
-				t.Fatalf("decode #%d/%d: %v", j, i, err)
+				t.Fatalf("decode #%d/%d error: %v", j, i, err)
 			}
 		}
 		if !reflect.DeepEqual(out, streamTest[0:i]) {
-			t.Errorf("decoding %d items: mismatch", i)
+			t.Errorf("decoding %d items: mismatch:", i)
 			for j := range out {
 				if !reflect.DeepEqual(out[j], streamTest[j]) {
-					t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
+					t.Errorf("#%d:\n\tgot:  %v\n\twant: %v", j, out[j], streamTest[j])
 				}
 			}
 			break
@@ -250,14 +278,14 @@
 		t.Fatal(err)
 	}
 	if m.Name != "Gopher" {
-		t.Errorf("Name = %q; want Gopher", m.Name)
+		t.Errorf("Name = %s, want Gopher", m.Name)
 	}
 	rest, err := io.ReadAll(d.Buffered())
 	if err != nil {
 		t.Fatal(err)
 	}
-	if g, w := string(rest), " extra "; g != w {
-		t.Errorf("Remaining = %q; want %q", g, w)
+	if got, want := string(rest), " extra "; got != want {
+		t.Errorf("Remaining = %s, want %s", got, want)
 	}
 }
 
@@ -282,20 +310,20 @@
 		Y  float32
 	}
 	const raw = `["\u0056",null]`
-	const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
-	err := Unmarshal([]byte(msg), &data)
+	const want = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
+	err := Unmarshal([]byte(want), &data)
 	if err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	if string([]byte(data.Id)) != raw {
-		t.Fatalf("Raw mismatch: have %#q want %#q", []byte(data.Id), raw)
+		t.Fatalf("Unmarshal:\n\tgot:  %s\n\twant: %s", []byte(data.Id), raw)
 	}
-	b, err := Marshal(&data)
+	got, err := Marshal(&data)
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	if string(b) != msg {
-		t.Fatalf("Marshal: have %#q want %#q", b, msg)
+	if string(got) != want {
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
@@ -306,159 +334,156 @@
 		IdPtr *RawMessage
 		Y     float32
 	}
-	const msg = `{"X":0.1,"Id":null,"IdPtr":null,"Y":0.2}`
-	err := Unmarshal([]byte(msg), &data)
+	const want = `{"X":0.1,"Id":null,"IdPtr":null,"Y":0.2}`
+	err := Unmarshal([]byte(want), &data)
 	if err != nil {
-		t.Fatalf("Unmarshal: %v", err)
+		t.Fatalf("Unmarshal error: %v", err)
 	}
 	if want, got := "null", string(data.Id); want != got {
-		t.Fatalf("Raw mismatch: have %q, want %q", got, want)
+		t.Fatalf("Unmarshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 	if data.IdPtr != nil {
-		t.Fatalf("Raw pointer mismatch: have non-nil, want nil")
+		t.Fatalf("pointer mismatch: got non-nil, want nil")
 	}
-	b, err := Marshal(&data)
+	got, err := Marshal(&data)
 	if err != nil {
-		t.Fatalf("Marshal: %v", err)
+		t.Fatalf("Marshal error: %v", err)
 	}
-	if string(b) != msg {
-		t.Fatalf("Marshal: have %#q want %#q", b, msg)
+	if string(got) != want {
+		t.Fatalf("Marshal:\n\tgot:  %s\n\twant: %s", got, want)
 	}
 }
 
-var blockingTests = []string{
-	`{"x": 1}`,
-	`[1, 2, 3]`,
-}
-
 func TestBlocking(t *testing.T) {
-	for _, enc := range blockingTests {
-		r, w := net.Pipe()
-		go w.Write([]byte(enc))
-		var val any
-
-		// If Decode reads beyond what w.Write writes above,
-		// it will block, and the test will deadlock.
-		if err := NewDecoder(r).Decode(&val); err != nil {
-			t.Errorf("decoding %s: %v", enc, err)
-		}
-		r.Close()
-		w.Close()
+	tests := []struct {
+		CaseName
+		in string
+	}{
+		{Name(""), `{"x": 1}`},
+		{Name(""), `[1, 2, 3]`},
 	}
-}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			r, w := net.Pipe()
+			go w.Write([]byte(tt.in))
+			var val any
 
-type tokenStreamCase struct {
-	json      string
-	expTokens []any
+			// If Decode reads beyond what w.Write writes above,
+			// it will block, and the test will deadlock.
+			if err := NewDecoder(r).Decode(&val); err != nil {
+				t.Errorf("%s: NewDecoder(%s).Decode error: %v", tt.Where, tt.in, err)
+			}
+			r.Close()
+			w.Close()
+		})
+	}
 }
 
 type decodeThis struct {
 	v any
 }
 
-var tokenStreamCases = []tokenStreamCase{
-	// streaming token cases
-	{json: `10`, expTokens: []any{float64(10)}},
-	{json: ` [10] `, expTokens: []any{
-		Delim('['), float64(10), Delim(']')}},
-	{json: ` [false,10,"b"] `, expTokens: []any{
-		Delim('['), false, float64(10), "b", Delim(']')}},
-	{json: `{ "a": 1 }`, expTokens: []any{
-		Delim('{'), "a", float64(1), Delim('}')}},
-	{json: `{"a": 1, "b":"3"}`, expTokens: []any{
-		Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
-	{json: ` [{"a": 1},{"a": 2}] `, expTokens: []any{
-		Delim('['),
-		Delim('{'), "a", float64(1), Delim('}'),
-		Delim('{'), "a", float64(2), Delim('}'),
-		Delim(']')}},
-	{json: `{"obj": {"a": 1}}`, expTokens: []any{
-		Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
-		Delim('}')}},
-	{json: `{"obj": [{"a": 1}]}`, expTokens: []any{
-		Delim('{'), "obj", Delim('['),
-		Delim('{'), "a", float64(1), Delim('}'),
-		Delim(']'), Delim('}')}},
-
-	// streaming tokens with intermittent Decode()
-	{json: `{ "a": 1 }`, expTokens: []any{
-		Delim('{'), "a",
-		decodeThis{float64(1)},
-		Delim('}')}},
-	{json: ` [ { "a" : 1 } ] `, expTokens: []any{
-		Delim('['),
-		decodeThis{map[string]any{"a": float64(1)}},
-		Delim(']')}},
-	{json: ` [{"a": 1},{"a": 2}] `, expTokens: []any{
-		Delim('['),
-		decodeThis{map[string]any{"a": float64(1)}},
-		decodeThis{map[string]any{"a": float64(2)}},
-		Delim(']')}},
-	{json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []any{
-		Delim('{'), "obj", Delim('['),
-		decodeThis{map[string]any{"a": float64(1)}},
-		Delim(']'), Delim('}')}},
-
-	{json: `{"obj": {"a": 1}}`, expTokens: []any{
-		Delim('{'), "obj",
-		decodeThis{map[string]any{"a": float64(1)}},
-		Delim('}')}},
-	{json: `{"obj": [{"a": 1}]}`, expTokens: []any{
-		Delim('{'), "obj",
-		decodeThis{[]any{
-			map[string]any{"a": float64(1)},
-		}},
-		Delim('}')}},
-	{json: ` [{"a": 1} {"a": 2}] `, expTokens: []any{
-		Delim('['),
-		decodeThis{map[string]any{"a": float64(1)}},
-		decodeThis{&SyntaxError{"expected comma after array element", 11}},
-	}},
-	{json: `{ "` + strings.Repeat("a", 513) + `" 1 }`, expTokens: []any{
-		Delim('{'), strings.Repeat("a", 513),
-		decodeThis{&SyntaxError{"expected colon after object key", 518}},
-	}},
-	{json: `{ "\a" }`, expTokens: []any{
-		Delim('{'),
-		&SyntaxError{"invalid character 'a' in string escape code", 3},
-	}},
-	{json: ` \a`, expTokens: []any{
-		&SyntaxError{"invalid character '\\\\' looking for beginning of value", 1},
-	}},
-}
-
 func TestDecodeInStream(t *testing.T) {
-	for ci, tcase := range tokenStreamCases {
+	tests := []struct {
+		CaseName
+		json      string
+		expTokens []any
+	}{
+		// streaming token cases
+		{CaseName: Name(""), json: `10`, expTokens: []any{float64(10)}},
+		{CaseName: Name(""), json: ` [10] `, expTokens: []any{
+			Delim('['), float64(10), Delim(']')}},
+		{CaseName: Name(""), json: ` [false,10,"b"] `, expTokens: []any{
+			Delim('['), false, float64(10), "b", Delim(']')}},
+		{CaseName: Name(""), json: `{ "a": 1 }`, expTokens: []any{
+			Delim('{'), "a", float64(1), Delim('}')}},
+		{CaseName: Name(""), json: `{"a": 1, "b":"3"}`, expTokens: []any{
+			Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
+		{CaseName: Name(""), json: ` [{"a": 1},{"a": 2}] `, expTokens: []any{
+			Delim('['),
+			Delim('{'), "a", float64(1), Delim('}'),
+			Delim('{'), "a", float64(2), Delim('}'),
+			Delim(']')}},
+		{CaseName: Name(""), json: `{"obj": {"a": 1}}`, expTokens: []any{
+			Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
+			Delim('}')}},
+		{CaseName: Name(""), json: `{"obj": [{"a": 1}]}`, expTokens: []any{
+			Delim('{'), "obj", Delim('['),
+			Delim('{'), "a", float64(1), Delim('}'),
+			Delim(']'), Delim('}')}},
 
-		dec := NewDecoder(strings.NewReader(tcase.json))
-		for i, etk := range tcase.expTokens {
+		// streaming tokens with intermittent Decode()
+		{CaseName: Name(""), json: `{ "a": 1 }`, expTokens: []any{
+			Delim('{'), "a",
+			decodeThis{float64(1)},
+			Delim('}')}},
+		{CaseName: Name(""), json: ` [ { "a" : 1 } ] `, expTokens: []any{
+			Delim('['),
+			decodeThis{map[string]any{"a": float64(1)}},
+			Delim(']')}},
+		{CaseName: Name(""), json: ` [{"a": 1},{"a": 2}] `, expTokens: []any{
+			Delim('['),
+			decodeThis{map[string]any{"a": float64(1)}},
+			decodeThis{map[string]any{"a": float64(2)}},
+			Delim(']')}},
+		{CaseName: Name(""), json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []any{
+			Delim('{'), "obj", Delim('['),
+			decodeThis{map[string]any{"a": float64(1)}},
+			Delim(']'), Delim('}')}},
 
-			var tk any
-			var err error
+		{CaseName: Name(""), json: `{"obj": {"a": 1}}`, expTokens: []any{
+			Delim('{'), "obj",
+			decodeThis{map[string]any{"a": float64(1)}},
+			Delim('}')}},
+		{CaseName: Name(""), json: `{"obj": [{"a": 1}]}`, expTokens: []any{
+			Delim('{'), "obj",
+			decodeThis{[]any{
+				map[string]any{"a": float64(1)},
+			}},
+			Delim('}')}},
+		{CaseName: Name(""), json: ` [{"a": 1} {"a": 2}] `, expTokens: []any{
+			Delim('['),
+			decodeThis{map[string]any{"a": float64(1)}},
+			decodeThis{&SyntaxError{"expected comma after array element", 11}},
+		}},
+		{CaseName: Name(""), json: `{ "` + strings.Repeat("a", 513) + `" 1 }`, expTokens: []any{
+			Delim('{'), strings.Repeat("a", 513),
+			decodeThis{&SyntaxError{"expected colon after object key", 518}},
+		}},
+		{CaseName: Name(""), json: `{ "\a" }`, expTokens: []any{
+			Delim('{'),
+			&SyntaxError{"invalid character 'a' in string escape code", 3},
+		}},
+		{CaseName: Name(""), json: ` \a`, expTokens: []any{
+			&SyntaxError{"invalid character '\\\\' looking for beginning of value", 1},
+		}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			dec := NewDecoder(strings.NewReader(tt.json))
+			for i, want := range tt.expTokens {
+				var got any
+				var err error
 
-			if dt, ok := etk.(decodeThis); ok {
-				etk = dt.v
-				err = dec.Decode(&tk)
-			} else {
-				tk, err = dec.Token()
-			}
-			if experr, ok := etk.(error); ok {
-				if err == nil || !reflect.DeepEqual(err, experr) {
-					t.Errorf("case %v: Expected error %#v in %q, but was %#v", ci, experr, tcase.json, err)
+				if dt, ok := want.(decodeThis); ok {
+					want = dt.v
+					err = dec.Decode(&got)
+				} else {
+					got, err = dec.Token()
 				}
-				break
-			} else if err == io.EOF {
-				t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
-				break
-			} else if err != nil {
-				t.Errorf("case %v: Unexpected error '%#v' in %q", ci, err, tcase.json)
-				break
+				if errWant, ok := want.(error); ok {
+					if err == nil || !reflect.DeepEqual(err, errWant) {
+						t.Fatalf("%s:\n\tinput: %s\n\tgot error:  %v\n\twant error: %v", tt.Where, tt.json, err, errWant)
+					}
+					break
+				} else if err != nil {
+					t.Fatalf("%s:\n\tinput: %s\n\tgot error:  %v\n\twant error: nil", tt.Where, tt.json, err)
+				}
+				if !reflect.DeepEqual(got, want) {
+					t.Fatalf("%s: token %d:\n\tinput: %s\n\tgot:  %T(%v)\n\twant: %T(%v)", tt.Where, i, tt.json, got, got, want, want)
+				}
 			}
-			if !reflect.DeepEqual(tk, etk) {
-				t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
-				break
-			}
-		}
+		})
 	}
 }
 
@@ -472,7 +497,7 @@
 	defer ts.Close()
 	res, err := http.Get(ts.URL)
 	if err != nil {
-		log.Fatalf("GET failed: %v", err)
+		log.Fatalf("http.Get error: %v", err)
 	}
 	defer res.Body.Close()
 
@@ -483,15 +508,15 @@
 	d := NewDecoder(res.Body)
 	err = d.Decode(&foo)
 	if err != nil {
-		t.Fatalf("Decode: %v", err)
+		t.Fatalf("Decode error: %v", err)
 	}
 	if foo.Foo != "bar" {
-		t.Errorf("decoded %q; want \"bar\"", foo.Foo)
+		t.Errorf(`Decode: got %q, want "bar"`, foo.Foo)
 	}
 
 	// make sure we get the EOF the second time
 	err = d.Decode(&foo)
 	if err != io.EOF {
-		t.Errorf("err = %v; want io.EOF", err)
+		t.Errorf("Decode error:\n\tgot:  %v\n\twant: io.EOF", err)
 	}
 }
diff --git a/src/encoding/json/tagkey_test.go b/src/encoding/json/tagkey_test.go
index 6330efd..d432cd7 100644
--- a/src/encoding/json/tagkey_test.go
+++ b/src/encoding/json/tagkey_test.go
@@ -72,49 +72,50 @@
 	W string `json:"Ελλάδα"`
 }
 
-var structTagObjectKeyTests = []struct {
-	raw   any
-	value string
-	key   string
-}{
-	{basicLatin2xTag{"2x"}, "2x", "$%-/"},
-	{basicLatin3xTag{"3x"}, "3x", "0123456789"},
-	{basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
-	{basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
-	{basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
-	{basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
-	{miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
-	{dashTag{"foo"}, "foo", "-"},
-	{emptyTag{"Pour Moi"}, "Pour Moi", "W"},
-	{misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
-	{badFormatTag{"Orfevre"}, "Orfevre", "Y"},
-	{badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
-	{percentSlashTag{"brut"}, "brut", "text/html%"},
-	{punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:;<=>?@[]^_{|}~ "},
-	{spaceTag{"Perreddu"}, "Perreddu", "With space"},
-	{unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
-}
-
 func TestStructTagObjectKey(t *testing.T) {
-	for _, tt := range structTagObjectKeyTests {
-		b, err := Marshal(tt.raw)
-		if err != nil {
-			t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
-		}
-		var f any
-		err = Unmarshal(b, &f)
-		if err != nil {
-			t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
-		}
-		for i, v := range f.(map[string]any) {
-			switch i {
-			case tt.key:
-				if s, ok := v.(string); !ok || s != tt.value {
-					t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
-				}
-			default:
-				t.Fatalf("Unexpected key: %#q, from %#q", i, b)
+	tests := []struct {
+		CaseName
+		raw   any
+		value string
+		key   string
+	}{
+		{Name(""), basicLatin2xTag{"2x"}, "2x", "$%-/"},
+		{Name(""), basicLatin3xTag{"3x"}, "3x", "0123456789"},
+		{Name(""), basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
+		{Name(""), basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
+		{Name(""), basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
+		{Name(""), basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
+		{Name(""), miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
+		{Name(""), dashTag{"foo"}, "foo", "-"},
+		{Name(""), emptyTag{"Pour Moi"}, "Pour Moi", "W"},
+		{Name(""), misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
+		{Name(""), badFormatTag{"Orfevre"}, "Orfevre", "Y"},
+		{Name(""), badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
+		{Name(""), percentSlashTag{"brut"}, "brut", "text/html%"},
+		{Name(""), punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:;<=>?@[]^_{|}~ "},
+		{Name(""), spaceTag{"Perreddu"}, "Perreddu", "With space"},
+		{Name(""), unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
+	}
+	for _, tt := range tests {
+		t.Run(tt.Name, func(t *testing.T) {
+			b, err := Marshal(tt.raw)
+			if err != nil {
+				t.Fatalf("%s: Marshal error: %v", tt.Where, err)
 			}
-		}
+			var f any
+			err = Unmarshal(b, &f)
+			if err != nil {
+				t.Fatalf("%s: Unmarshal error: %v", tt.Where, err)
+			}
+			for k, v := range f.(map[string]any) {
+				if k == tt.key {
+					if s, ok := v.(string); !ok || s != tt.value {
+						t.Fatalf("%s: Unmarshal(%#q) value:\n\tgot:  %q\n\twant: %q", tt.Where, b, s, tt.value)
+					}
+				} else {
+					t.Fatalf("%s: Unmarshal(%#q): unexpected key: %q", tt.Where, b, k)
+				}
+			}
+		})
 	}
 }
diff --git a/src/encoding/json/tags_test.go b/src/encoding/json/tags_test.go
index 8ba8ddd..1d2323d 100644
--- a/src/encoding/json/tags_test.go
+++ b/src/encoding/json/tags_test.go
@@ -22,7 +22,7 @@
 		{"bar", false},
 	} {
 		if opts.Contains(tt.opt) != tt.want {
-			t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
+			t.Errorf("Contains(%q) = %v, want %v", tt.opt, !tt.want, tt.want)
 		}
 	}
 }
diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go
index d26e4c8..4b4f749 100644
--- a/src/encoding/pem/pem.go
+++ b/src/encoding/pem/pem.go
@@ -25,7 +25,7 @@
 //	base64-encoded Bytes
 //	-----END Type-----
 //
-// where Headers is a possibly empty sequence of Key: Value lines.
+// where [Block.Headers] is a possibly empty sequence of Key: Value lines.
 type Block struct {
 	Type    string            // The type, taken from the preamble (i.e. "RSA PRIVATE KEY").
 	Headers map[string]string // Optional headers.
@@ -306,7 +306,7 @@
 //
 // If b has invalid headers and cannot be encoded,
 // EncodeToMemory returns nil. If it is important to
-// report details about this error case, use Encode instead.
+// report details about this error case, use [Encode] instead.
 func EncodeToMemory(b *Block) []byte {
 	var buf bytes.Buffer
 	if err := Encode(&buf, b); err != nil {
diff --git a/src/encoding/xml/marshal.go b/src/encoding/xml/marshal.go
index ae39846..05b5542 100644
--- a/src/encoding/xml/marshal.go
+++ b/src/encoding/xml/marshal.go
@@ -17,7 +17,7 @@
 )
 
 const (
-	// Header is a generic XML header suitable for use with the output of Marshal.
+	// Header is a generic XML header suitable for use with the output of [Marshal].
 	// This is not automatically added to any output of this package,
 	// it is provided as a convenience.
 	Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
@@ -34,7 +34,7 @@
 //
 // The name for the XML elements is taken from, in order of preference:
 //   - the tag on the XMLName field, if the data is a struct
-//   - the value of the XMLName field of type Name
+//   - the value of the XMLName field of type [Name]
 //   - the tag of the struct field used to obtain the data
 //   - the name of the struct field used to obtain the data
 //   - the name of the marshaled type
@@ -62,9 +62,9 @@
 //     string of length zero.
 //   - an anonymous struct field is handled as if the fields of its
 //     value were part of the outer struct.
-//   - a field implementing Marshaler is written by calling its MarshalXML
+//   - a field implementing [Marshaler] is written by calling its MarshalXML
 //     method.
-//   - a field implementing encoding.TextMarshaler is written by encoding the
+//   - a field implementing [encoding.TextMarshaler] is written by encoding the
 //     result of its MarshalText method as text.
 //
 // If a field uses a tag "a>b>c", then the element c will be nested inside
@@ -74,7 +74,7 @@
 // If the XML name for a struct field is defined by both the field tag and the
 // struct's XMLName field, the names must match.
 //
-// See MarshalIndent for an example.
+// See [MarshalIndent] for an example.
 //
 // Marshal will return an error if asked to marshal a channel, function, or map.
 func Marshal(v any) ([]byte, error) {
@@ -96,7 +96,7 @@
 // By convention, arrays or slices are typically encoded as a sequence
 // of elements, one per entry.
 // Using start as the element tag is not required, but doing so
-// will enable Unmarshal to match the XML elements to the correct
+// will enable [Unmarshal] to match the XML elements to the correct
 // struct field.
 // One common implementation strategy is to construct a separate
 // value with a layout corresponding to the desired XML and then
@@ -114,9 +114,9 @@
 //
 // MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.
 // Using name as the attribute name is not required, but doing so
-// will enable Unmarshal to match the attribute to the correct
+// will enable [Unmarshal] to match the attribute to the correct
 // struct field.
-// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute
+// If MarshalXMLAttr returns the zero attribute [Attr]{}, no attribute
 // will be generated in the output.
 // MarshalXMLAttr is used only for struct fields with the
 // "attr" option in the field tag.
@@ -124,7 +124,7 @@
 	MarshalXMLAttr(name Name) (Attr, error)
 }
 
-// MarshalIndent works like Marshal, but each XML element begins on a new
+// MarshalIndent works like [Marshal], but each XML element begins on a new
 // indented line that starts with prefix and is followed by one or more
 // copies of indent according to the nesting depth.
 func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
@@ -162,10 +162,10 @@
 
 // Encode writes the XML encoding of v to the stream.
 //
-// See the documentation for Marshal for details about the conversion
+// See the documentation for [Marshal] for details about the conversion
 // of Go values to XML.
 //
-// Encode calls Flush before returning.
+// Encode calls [Encoder.Flush] before returning.
 func (enc *Encoder) Encode(v any) error {
 	err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)
 	if err != nil {
@@ -177,10 +177,10 @@
 // EncodeElement writes the XML encoding of v to the stream,
 // using start as the outermost tag in the encoding.
 //
-// See the documentation for Marshal for details about the conversion
+// See the documentation for [Marshal] for details about the conversion
 // of Go values to XML.
 //
-// EncodeElement calls Flush before returning.
+// EncodeElement calls [Encoder.Flush] before returning.
 func (enc *Encoder) EncodeElement(v any, start StartElement) error {
 	err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)
 	if err != nil {
@@ -196,16 +196,16 @@
 )
 
 // EncodeToken writes the given XML token to the stream.
-// It returns an error if StartElement and EndElement tokens are not properly matched.
+// It returns an error if [StartElement] and [EndElement] tokens are not properly matched.
 //
-// EncodeToken does not call Flush, because usually it is part of a larger operation
-// such as Encode or EncodeElement (or a custom Marshaler's MarshalXML invoked
+// EncodeToken does not call [Encoder.Flush], because usually it is part of a larger operation
+// such as [Encoder.Encode] or [Encoder.EncodeElement] (or a custom [Marshaler]'s MarshalXML invoked
 // during those), and those will call Flush when finished.
 // Callers that create an Encoder and then invoke EncodeToken directly, without
 // using Encode or EncodeElement, need to call Flush when finished to ensure
 // that the XML is written to the underlying writer.
 //
-// EncodeToken allows writing a ProcInst with Target set to "xml" only as the first token
+// EncodeToken allows writing a [ProcInst] with Target set to "xml" only as the first token
 // in the stream.
 func (enc *Encoder) EncodeToken(t Token) error {
 
@@ -303,7 +303,7 @@
 }
 
 // Flush flushes any buffered XML to the underlying writer.
-// See the EncodeToken documentation for details about when it is necessary.
+// See the [Encoder.EncodeToken] documentation for details about when it is necessary.
 func (enc *Encoder) Flush() error {
 	return enc.p.w.Flush()
 }
@@ -415,9 +415,9 @@
 }
 
 var (
-	marshalerType     = reflect.TypeOf((*Marshaler)(nil)).Elem()
-	marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem()
-	textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+	marshalerType     = reflect.TypeFor[Marshaler]()
+	marshalerAttrType = reflect.TypeFor[MarshalerAttr]()
+	textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]()
 )
 
 // marshalValue writes one or more XML elements representing val.
@@ -543,7 +543,7 @@
 		}
 	}
 
-	// If a empty name was found, namespace is overridden with an empty space
+	// If an empty name was found, namespace is overridden with an empty space
 	if tinfo.xmlname != nil && start.Name.Space == "" &&
 		tinfo.xmlname.xmlns == "" && tinfo.xmlname.name == "" &&
 		len(p.tags) != 0 && p.tags[len(p.tags)-1].Space != "" {
@@ -797,7 +797,7 @@
 		// [...]byte
 		var bytes []byte
 		if val.CanAddr() {
-			bytes = val.Slice(0, val.Len()).Bytes()
+			bytes = val.Bytes()
 		} else {
 			bytes = make([]byte, val.Len())
 			reflect.Copy(reflect.ValueOf(bytes), val)
@@ -1106,7 +1106,7 @@
 	return nil
 }
 
-// UnsupportedTypeError is returned when Marshal encounters a type
+// UnsupportedTypeError is returned when [Marshal] encounters a type
 // that cannot be converted into XML.
 type UnsupportedTypeError struct {
 	Type reflect.Type
@@ -1120,16 +1120,12 @@
 	switch v.Kind() {
 	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
 		return v.Len() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Interface, reflect.Pointer:
-		return v.IsNil()
+	case reflect.Bool,
+		reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+		reflect.Float32, reflect.Float64,
+		reflect.Interface, reflect.Pointer:
+		return v.IsZero()
 	}
 	return false
 }
diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
index c1c843e..3cc4968 100644
--- a/src/encoding/xml/read.go
+++ b/src/encoding/xml/read.go
@@ -19,7 +19,7 @@
 // an XML element is an order-dependent collection of anonymous
 // values, while a data structure is an order-independent collection
 // of named values.
-// See package json for a textual representation more suitable
+// See [encoding/json] for a textual representation more suitable
 // to data structures.
 
 // Unmarshal parses the XML-encoded data and stores the result in
@@ -96,7 +96,7 @@
 // If Unmarshal encounters a field type that implements the Unmarshaler
 // interface, Unmarshal calls its UnmarshalXML method to produce the value from
 // the XML element.  Otherwise, if the value implements
-// encoding.TextUnmarshaler, Unmarshal calls that value's UnmarshalText method.
+// [encoding.TextUnmarshaler], Unmarshal calls that value's UnmarshalText method.
 //
 // Unmarshal maps an XML element to a string or []byte by saving the
 // concatenation of that element's character data in the string or
@@ -105,7 +105,7 @@
 // Unmarshal maps an attribute value to a string or []byte by saving
 // the value in the string or slice.
 //
-// Unmarshal maps an attribute value to an Attr by saving the attribute,
+// Unmarshal maps an attribute value to an [Attr] by saving the attribute,
 // including its name, in the Attr.
 //
 // Unmarshal maps an XML element or attribute value to a slice by
@@ -134,16 +134,16 @@
 	return NewDecoder(bytes.NewReader(data)).Decode(v)
 }
 
-// Decode works like Unmarshal, except it reads the decoder
+// Decode works like [Unmarshal], except it reads the decoder
 // stream to find the start element.
 func (d *Decoder) Decode(v any) error {
 	return d.DecodeElement(v, nil)
 }
 
-// DecodeElement works like Unmarshal except that it takes
+// DecodeElement works like [Unmarshal] except that it takes
 // a pointer to the start XML element to decode into v.
 // It is useful when a client reads some raw XML tokens itself
-// but also wants to defer to Unmarshal for some elements.
+// but also wants to defer to [Unmarshal] for some elements.
 func (d *Decoder) DecodeElement(v any, start *StartElement) error {
 	val := reflect.ValueOf(v)
 	if val.Kind() != reflect.Pointer {
@@ -184,7 +184,7 @@
 // an XML attribute description of themselves.
 //
 // UnmarshalXMLAttr decodes a single XML attribute.
-// If it returns an error, the outer call to Unmarshal stops and
+// If it returns an error, the outer call to [Unmarshal] stops and
 // returns that error.
 // UnmarshalXMLAttr is used only for struct fields with the
 // "attr" option in the field tag.
@@ -304,10 +304,10 @@
 }
 
 var (
-	attrType            = reflect.TypeOf(Attr{})
-	unmarshalerType     = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
-	unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
-	textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+	attrType            = reflect.TypeFor[Attr]()
+	unmarshalerType     = reflect.TypeFor[Unmarshaler]()
+	unmarshalerAttrType = reflect.TypeFor[UnmarshalerAttr]()
+	textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
 )
 
 const (
diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
index 3e85fca..ce99894 100644
--- a/src/encoding/xml/read_test.go
+++ b/src/encoding/xml/read_test.go
@@ -326,10 +326,10 @@
 var badPathTests = []struct {
 	v, e any
 }{
-	{&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}},
-	{&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
-	{&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}},
-	{&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}},
+	{&BadPathTestA{}, &TagPathError{reflect.TypeFor[BadPathTestA](), "First", "items>item1", "Second", "items"}},
+	{&BadPathTestB{}, &TagPathError{reflect.TypeFor[BadPathTestB](), "First", "items>item1", "Second", "items>item1>value"}},
+	{&BadPathTestC{}, &TagPathError{reflect.TypeFor[BadPathTestC](), "First", "", "Second", "First"}},
+	{&BadPathTestD{}, &TagPathError{reflect.TypeFor[BadPathTestD](), "First", "", "Second", "First"}},
 }
 
 func TestUnmarshalBadPaths(t *testing.T) {
diff --git a/src/encoding/xml/typeinfo.go b/src/encoding/xml/typeinfo.go
index 2f123fd..b18ed28 100644
--- a/src/encoding/xml/typeinfo.go
+++ b/src/encoding/xml/typeinfo.go
@@ -46,7 +46,7 @@
 
 var tinfoMap sync.Map // map[reflect.Type]*typeInfo
 
-var nameType = reflect.TypeOf(Name{})
+var nameType = reflect.TypeFor[Name]()
 
 // getTypeInfo returns the typeInfo structure with details necessary
 // for marshaling and unmarshaling typ.
@@ -251,13 +251,6 @@
 	return nil
 }
 
-func min(a, b int) int {
-	if a <= b {
-		return a
-	}
-	return b
-}
-
 // addFieldInfo adds finfo to tinfo.fields if there are no
 // conflicts, or if conflicts arise from previous fields that were
 // obtained from deeper embedded structures than finfo. In the latter
diff --git a/src/encoding/xml/xml.go b/src/encoding/xml/xml.go
index d121986..73eedad 100644
--- a/src/encoding/xml/xml.go
+++ b/src/encoding/xml/xml.go
@@ -34,7 +34,7 @@
 
 // A Name represents an XML name (Local) annotated
 // with a name space identifier (Space).
-// In tokens returned by Decoder.Token, the Space identifier
+// In tokens returned by [Decoder.Token], the Space identifier
 // is given as a canonical URL, not the short prefix used
 // in the document being parsed.
 type Name struct {
@@ -48,7 +48,7 @@
 }
 
 // A Token is an interface holding one of the token types:
-// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.
+// [StartElement], [EndElement], [CharData], [Comment], [ProcInst], or [Directive].
 type Token any
 
 // A StartElement represents an XML start element.
@@ -127,14 +127,14 @@
 }
 
 // A TokenReader is anything that can decode a stream of XML tokens, including a
-// Decoder.
+// [Decoder].
 //
 // When Token encounters an error or end-of-file condition after successfully
 // reading a token, it returns the token. It may return the (non-nil) error from
 // the same call or return the error (and a nil token) from a subsequent call.
 // An instance of this general case is that a TokenReader returning a non-nil
 // token at the end of the token stream may return either io.EOF or a nil error.
-// The next Read should return nil, io.EOF.
+// The next Read should return nil, [io.EOF].
 //
 // Implementations of Token are discouraged from returning a nil token with a
 // nil error. Callers should treat a return of nil, nil as indicating that
@@ -216,7 +216,7 @@
 }
 
 // NewDecoder creates a new XML parser reading from r.
-// If r does not implement io.ByteReader, NewDecoder will
+// If r does not implement [io.ByteReader], NewDecoder will
 // do its own buffering.
 func NewDecoder(r io.Reader) *Decoder {
 	d := &Decoder{
@@ -246,28 +246,28 @@
 }
 
 // Token returns the next XML token in the input stream.
-// At the end of the input stream, Token returns nil, io.EOF.
+// At the end of the input stream, Token returns nil, [io.EOF].
 //
 // Slices of bytes in the returned token data refer to the
 // parser's internal buffer and remain valid only until the next
-// call to Token. To acquire a copy of the bytes, call CopyToken
+// call to Token. To acquire a copy of the bytes, call [CopyToken]
 // or the token's Copy method.
 //
 // Token expands self-closing elements such as <br>
 // into separate start and end elements returned by successive calls.
 //
-// Token guarantees that the StartElement and EndElement
+// Token guarantees that the [StartElement] and [EndElement]
 // tokens it returns are properly nested and matched:
 // if Token encounters an unexpected end element
 // or EOF before all expected end elements,
 // it will return an error.
 //
-// If CharsetReader is called and returns an error,
+// If [Decoder.CharsetReader] is called and returns an error,
 // the error is wrapped and returned.
 //
 // Token implements XML name spaces as described by
 // https://www.w3.org/TR/REC-xml-names/. Each of the
-// Name structures contained in the Token has the Space
+// [Name] structures contained in the Token has the Space
 // set to the URL identifying its name space when known.
 // If Token encounters an unrecognized name space prefix,
 // it uses the prefix as the Space rather than report an error.
@@ -534,7 +534,7 @@
 
 var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method")
 
-// RawToken is like Token but does not verify that
+// RawToken is like [Decoder.Token] but does not verify that
 // start and end elements match and does not translate
 // name space prefixes to their corresponding URLs.
 func (d *Decoder) RawToken() (Token, error) {
@@ -1596,7 +1596,7 @@
 // HTMLEntity is an entity map containing translations for the
 // standard HTML entity characters.
 //
-// See the Decoder.Strict and Decoder.Entity fields' documentation.
+// See the [Decoder.Strict] and [Decoder.Entity] fields' documentation.
 var HTMLEntity map[string]string = htmlEntity
 
 var htmlEntity = map[string]string{
@@ -1865,7 +1865,7 @@
 // HTMLAutoClose is the set of HTML elements that
 // should be considered to close automatically.
 //
-// See the Decoder.Strict and Decoder.Entity fields' documentation.
+// See the [Decoder.Strict] and [Decoder.Entity] fields' documentation.
 var HTMLAutoClose []string = htmlAutoClose
 
 var htmlAutoClose = []string{
@@ -1993,9 +1993,9 @@
 	p.WriteString(s[last:])
 }
 
-// Escape is like EscapeText but omits the error return value.
+// Escape is like [EscapeText] but omits the error return value.
 // It is provided for backwards compatibility with Go 1.0.
-// Code targeting Go 1.1 or later should use EscapeText.
+// Code targeting Go 1.1 or later should use [EscapeText].
 func Escape(w io.Writer, s []byte) {
 	EscapeText(w, s)
 }
diff --git a/src/errors/errors.go b/src/errors/errors.go
index 4139777..9e3860a 100644
--- a/src/errors/errors.go
+++ b/src/errors/errors.go
@@ -26,7 +26,7 @@
 // itself followed by the tree of each of its children in turn
 // (pre-order, depth-first traversal).
 //
-// Is examines the tree of its first argument looking for an error that
+// [Is] examines the tree of its first argument looking for an error that
 // matches the second. It reports whether it finds a match. It should be
 // used in preference to simple equality checks:
 //
@@ -38,7 +38,7 @@
 //
 // because the former will succeed if err wraps [io/fs.ErrExist].
 //
-// As examines the tree of its first argument looking for an error that can be
+// [As] examines the tree of its first argument looking for an error that can be
 // assigned to its second argument, which must be a pointer. If it succeeds, it
 // performs the assignment and returns true. Otherwise, it returns false. The form
 //
@@ -80,7 +80,7 @@
 //
 //	errors.Is(err, errors.ErrUnsupported)
 //
-// either by directly wrapping ErrUnsupported or by implementing an Is method.
+// either by directly wrapping ErrUnsupported or by implementing an [Is] method.
 //
 // Functions and methods should document the cases in which an error
 // wrapping this will be returned.
diff --git a/src/errors/example_test.go b/src/errors/example_test.go
index beb5edc..1976f05 100644
--- a/src/errors/example_test.go
+++ b/src/errors/example_test.go
@@ -105,7 +105,7 @@
 	err2 := fmt.Errorf("error2: [%w]", err1)
 	fmt.Println(err2)
 	fmt.Println(errors.Unwrap(err2))
-	// Output
+	// Output:
 	// error2: [error1]
 	// error1
 }
diff --git a/src/errors/join.go b/src/errors/join.go
index 1c486d5..349fc06 100644
--- a/src/errors/join.go
+++ b/src/errors/join.go
@@ -4,6 +4,10 @@
 
 package errors
 
+import (
+	"unsafe"
+)
+
 // Join returns an error that wraps the given errors.
 // Any nil error values are discarded.
 // Join returns nil if every value in errs is nil.
@@ -38,14 +42,19 @@
 }
 
 func (e *joinError) Error() string {
-	var b []byte
-	for i, err := range e.errs {
-		if i > 0 {
-			b = append(b, '\n')
-		}
+	// Since Join returns nil if every value in errs is nil,
+	// e.errs cannot be empty.
+	if len(e.errs) == 1 {
+		return e.errs[0].Error()
+	}
+
+	b := []byte(e.errs[0].Error())
+	for _, err := range e.errs[1:] {
+		b = append(b, '\n')
 		b = append(b, err.Error()...)
 	}
-	return string(b)
+	// At this point, b has at least one byte '\n'.
+	return unsafe.String(&b[0], len(b))
 }
 
 func (e *joinError) Unwrap() []error {
diff --git a/src/errors/wrap.go b/src/errors/wrap.go
index 2c934ee..88ee0a9 100644
--- a/src/errors/wrap.go
+++ b/src/errors/wrap.go
@@ -27,8 +27,8 @@
 // Is reports whether any error in err's tree matches target.
 //
 // The tree consists of err itself, followed by the errors obtained by repeatedly
-// calling Unwrap. When err wraps multiple errors, Is examines err followed by a
-// depth-first traversal of its children.
+// calling its Unwrap() error or Unwrap() []error method. When err wraps multiple
+// errors, Is examines err followed by a depth-first traversal of its children.
 //
 // An error is considered to match a target if it is equal to that target or if
 // it implements a method Is(error) bool such that Is(target) returns true.
@@ -40,15 +40,19 @@
 //
 // then Is(MyError{}, fs.ErrExist) returns true. See [syscall.Errno.Is] for
 // an example in the standard library. An Is method should only shallowly
-// compare err and the target and not call Unwrap on either.
+// compare err and the target and not call [Unwrap] on either.
 func Is(err, target error) bool {
 	if target == nil {
 		return err == target
 	}
 
 	isComparable := reflectlite.TypeOf(target).Comparable()
+	return is(err, target, isComparable)
+}
+
+func is(err, target error, targetComparable bool) bool {
 	for {
-		if isComparable && err == target {
+		if targetComparable && err == target {
 			return true
 		}
 		if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
@@ -62,7 +66,7 @@
 			}
 		case interface{ Unwrap() []error }:
 			for _, err := range x.Unwrap() {
-				if Is(err, target) {
+				if is(err, target, targetComparable) {
 					return true
 				}
 			}
@@ -77,8 +81,8 @@
 // target to that error value and returns true. Otherwise, it returns false.
 //
 // The tree consists of err itself, followed by the errors obtained by repeatedly
-// calling Unwrap. When err wraps multiple errors, As examines err followed by a
-// depth-first traversal of its children.
+// calling its Unwrap() error or Unwrap() []error method. When err wraps multiple
+// errors, As examines err followed by a depth-first traversal of its children.
 //
 // An error matches target if the error's concrete value is assignable to the value
 // pointed to by target, or if the error has a method As(interface{}) bool such that
@@ -106,9 +110,13 @@
 	if targetType.Kind() != reflectlite.Interface && !targetType.Implements(errorType) {
 		panic("errors: *target must be interface or implement error")
 	}
+	return as(err, target, val, targetType)
+}
+
+func as(err error, target any, targetVal reflectlite.Value, targetType reflectlite.Type) bool {
 	for {
 		if reflectlite.TypeOf(err).AssignableTo(targetType) {
-			val.Elem().Set(reflectlite.ValueOf(err))
+			targetVal.Elem().Set(reflectlite.ValueOf(err))
 			return true
 		}
 		if x, ok := err.(interface{ As(any) bool }); ok && x.As(target) {
@@ -122,7 +130,10 @@
 			}
 		case interface{ Unwrap() []error }:
 			for _, err := range x.Unwrap() {
-				if As(err, target) {
+				if err == nil {
+					continue
+				}
+				if as(err, target, targetVal, targetType) {
 					return true
 				}
 			}
diff --git a/src/errors/wrap_test.go b/src/errors/wrap_test.go
index ca9dc0f..0a7bc5d 100644
--- a/src/errors/wrap_test.go
+++ b/src/errors/wrap_test.go
@@ -238,6 +238,27 @@
 	}
 }
 
+func BenchmarkIs(b *testing.B) {
+	err1 := errors.New("1")
+	err2 := multiErr{multiErr{multiErr{err1, errorT{"a"}}, errorT{"b"}}}
+
+	for i := 0; i < b.N; i++ {
+		if !errors.Is(err2, err1) {
+			b.Fatal("Is failed")
+		}
+	}
+}
+
+func BenchmarkAs(b *testing.B) {
+	err := multiErr{multiErr{multiErr{errors.New("a"), errorT{"a"}}, errorT{"b"}}}
+	for i := 0; i < b.N; i++ {
+		var target errorT
+		if !errors.As(err, &target) {
+			b.Fatal("As failed")
+		}
+	}
+}
+
 func TestUnwrap(t *testing.T) {
 	err1 := errors.New("1")
 	erra := wrapped{"wrap 2", err1}
diff --git a/src/expvar/expvar.go b/src/expvar/expvar.go
index 300d8c2..954d63d 100644
--- a/src/expvar/expvar.go
+++ b/src/expvar/expvar.go
@@ -23,7 +23,6 @@
 
 import (
 	"encoding/json"
-	"fmt"
 	"log"
 	"math"
 	"net/http"
@@ -31,9 +30,9 @@
 	"runtime"
 	"sort"
 	"strconv"
-	"strings"
 	"sync"
 	"sync/atomic"
+	"unicode/utf8"
 )
 
 // Var is an abstract type for all exported variables.
@@ -44,28 +43,37 @@
 	String() string
 }
 
-// Int is a 64-bit integer variable that satisfies the Var interface.
+type jsonVar interface {
+	// appendJSON appends the JSON representation of the receiver to b.
+	appendJSON(b []byte) []byte
+}
+
+// Int is a 64-bit integer variable that satisfies the [Var] interface.
 type Int struct {
-	i int64
+	i atomic.Int64
 }
 
 func (v *Int) Value() int64 {
-	return atomic.LoadInt64(&v.i)
+	return v.i.Load()
 }
 
 func (v *Int) String() string {
-	return strconv.FormatInt(atomic.LoadInt64(&v.i), 10)
+	return string(v.appendJSON(nil))
+}
+
+func (v *Int) appendJSON(b []byte) []byte {
+	return strconv.AppendInt(b, v.i.Load(), 10)
 }
 
 func (v *Int) Add(delta int64) {
-	atomic.AddInt64(&v.i, delta)
+	v.i.Add(delta)
 }
 
 func (v *Int) Set(value int64) {
-	atomic.StoreInt64(&v.i, value)
+	v.i.Store(value)
 }
 
-// Float is a 64-bit float variable that satisfies the Var interface.
+// Float is a 64-bit float variable that satisfies the [Var] interface.
 type Float struct {
 	f atomic.Uint64
 }
@@ -75,8 +83,11 @@
 }
 
 func (v *Float) String() string {
-	return strconv.FormatFloat(
-		math.Float64frombits(v.f.Load()), 'g', -1, 64)
+	return string(v.appendJSON(nil))
+}
+
+func (v *Float) appendJSON(b []byte) []byte {
+	return strconv.AppendFloat(b, math.Float64frombits(v.f.Load()), 'g', -1, 64)
 }
 
 // Add adds delta to v.
@@ -97,37 +108,58 @@
 	v.f.Store(math.Float64bits(value))
 }
 
-// Map is a string-to-Var map variable that satisfies the Var interface.
+// Map is a string-to-Var map variable that satisfies the [Var] interface.
 type Map struct {
 	m      sync.Map // map[string]Var
 	keysMu sync.RWMutex
 	keys   []string // sorted
 }
 
-// KeyValue represents a single entry in a Map.
+// KeyValue represents a single entry in a [Map].
 type KeyValue struct {
 	Key   string
 	Value Var
 }
 
 func (v *Map) String() string {
-	var b strings.Builder
-	fmt.Fprintf(&b, "{")
+	return string(v.appendJSON(nil))
+}
+
+func (v *Map) appendJSON(b []byte) []byte {
+	return v.appendJSONMayExpand(b, false)
+}
+
+func (v *Map) appendJSONMayExpand(b []byte, expand bool) []byte {
+	afterCommaDelim := byte(' ')
+	mayAppendNewline := func(b []byte) []byte { return b }
+	if expand {
+		afterCommaDelim = '\n'
+		mayAppendNewline = func(b []byte) []byte { return append(b, '\n') }
+	}
+
+	b = append(b, '{')
+	b = mayAppendNewline(b)
 	first := true
 	v.Do(func(kv KeyValue) {
 		if !first {
-			fmt.Fprintf(&b, ", ")
-		}
-		fmt.Fprintf(&b, "%q: ", kv.Key)
-		if kv.Value != nil {
-			fmt.Fprintf(&b, "%v", kv.Value)
-		} else {
-			fmt.Fprint(&b, "null")
+			b = append(b, ',', afterCommaDelim)
 		}
 		first = false
+		b = appendJSONQuote(b, kv.Key)
+		b = append(b, ':', ' ')
+		switch v := kv.Value.(type) {
+		case nil:
+			b = append(b, "null"...)
+		case jsonVar:
+			b = v.appendJSON(b)
+		default:
+			b = append(b, v.String()...)
+		}
 	})
-	fmt.Fprintf(&b, "}")
-	return b.String()
+	b = mayAppendNewline(b)
+	b = append(b, '}')
+	b = mayAppendNewline(b)
+	return b
 }
 
 // Init removes all keys from the map.
@@ -176,7 +208,7 @@
 	v.m.Store(key, av)
 }
 
-// Add adds delta to the *Int value stored under the given map key.
+// Add adds delta to the *[Int] value stored under the given map key.
 func (v *Map) Add(key string, delta int64) {
 	i, ok := v.m.Load(key)
 	if !ok {
@@ -193,7 +225,7 @@
 	}
 }
 
-// AddFloat adds delta to the *Float value stored under the given map key.
+// AddFloat adds delta to the *[Float] value stored under the given map key.
 func (v *Map) AddFloat(key string, delta float64) {
 	i, ok := v.m.Load(key)
 	if !ok {
@@ -234,7 +266,7 @@
 	}
 }
 
-// String is a string variable, and satisfies the Var interface.
+// String is a string variable, and satisfies the [Var] interface.
 type String struct {
 	s atomic.Value // string
 }
@@ -244,19 +276,21 @@
 	return p
 }
 
-// String implements the Var interface. To get the unquoted string
-// use Value.
+// String implements the [Var] interface. To get the unquoted string
+// use [String.Value].
 func (v *String) String() string {
-	s := v.Value()
-	b, _ := json.Marshal(s)
-	return string(b)
+	return string(v.appendJSON(nil))
+}
+
+func (v *String) appendJSON(b []byte) []byte {
+	return appendJSONQuote(b, v.Value())
 }
 
 func (v *String) Set(value string) {
 	v.s.Store(value)
 }
 
-// Func implements Var by calling the function
+// Func implements [Var] by calling the function
 // and formatting the returned value using JSON.
 type Func func() any
 
@@ -270,31 +304,25 @@
 }
 
 // All published variables.
-var (
-	vars      sync.Map // map[string]Var
-	varKeysMu sync.RWMutex
-	varKeys   []string // sorted
-)
+var vars Map
 
 // Publish declares a named exported variable. This should be called from a
 // package's init function when it creates its Vars. If the name is already
 // registered then this will log.Panic.
 func Publish(name string, v Var) {
-	if _, dup := vars.LoadOrStore(name, v); dup {
+	if _, dup := vars.m.LoadOrStore(name, v); dup {
 		log.Panicln("Reuse of exported var name:", name)
 	}
-	varKeysMu.Lock()
-	defer varKeysMu.Unlock()
-	varKeys = append(varKeys, name)
-	sort.Strings(varKeys)
+	vars.keysMu.Lock()
+	defer vars.keysMu.Unlock()
+	vars.keys = append(vars.keys, name)
+	sort.Strings(vars.keys)
 }
 
 // Get retrieves a named exported variable. It returns nil if the name has
 // not been registered.
 func Get(name string) Var {
-	i, _ := vars.Load(name)
-	v, _ := i.(Var)
-	return v
+	return vars.Get(name)
 }
 
 // Convenience functions for creating new exported variables.
@@ -327,26 +355,12 @@
 // The global variable map is locked during the iteration,
 // but existing entries may be concurrently updated.
 func Do(f func(KeyValue)) {
-	varKeysMu.RLock()
-	defer varKeysMu.RUnlock()
-	for _, k := range varKeys {
-		val, _ := vars.Load(k)
-		f(KeyValue{k, val.(Var)})
-	}
+	vars.Do(f)
 }
 
 func expvarHandler(w http.ResponseWriter, r *http.Request) {
 	w.Header().Set("Content-Type", "application/json; charset=utf-8")
-	fmt.Fprintf(w, "{\n")
-	first := true
-	Do(func(kv KeyValue) {
-		if !first {
-			fmt.Fprintf(w, ",\n")
-		}
-		first = false
-		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
-	})
-	fmt.Fprintf(w, "\n}\n")
+	w.Write(vars.appendJSONMayExpand(nil, true))
 }
 
 // Handler returns the expvar HTTP Handler.
@@ -371,3 +385,32 @@
 	Publish("cmdline", Func(cmdline))
 	Publish("memstats", Func(memstats))
 }
+
+// TODO: Use json.appendString instead.
+func appendJSONQuote(b []byte, s string) []byte {
+	const hex = "0123456789abcdef"
+	b = append(b, '"')
+	for _, r := range s {
+		switch {
+		case r < ' ' || r == '\\' || r == '"' || r == '<' || r == '>' || r == '&' || r == '\u2028' || r == '\u2029':
+			switch r {
+			case '\\', '"':
+				b = append(b, '\\', byte(r))
+			case '\n':
+				b = append(b, '\\', 'n')
+			case '\r':
+				b = append(b, '\\', 'r')
+			case '\t':
+				b = append(b, '\\', 't')
+			default:
+				b = append(b, '\\', 'u', hex[(r>>12)&0xf], hex[(r>>8)&0xf], hex[(r>>4)&0xf], hex[(r>>0)&0xf])
+			}
+		case r < utf8.RuneSelf:
+			b = append(b, byte(r))
+		default:
+			b = utf8.AppendRune(b, r)
+		}
+	}
+	b = append(b, '"')
+	return b
+}
diff --git a/src/expvar/expvar_test.go b/src/expvar/expvar_test.go
index ee98b5e..b827c4d 100644
--- a/src/expvar/expvar_test.go
+++ b/src/expvar/expvar_test.go
@@ -22,12 +22,12 @@
 // RemoveAll removes all exported variables.
 // This is for tests only.
 func RemoveAll() {
-	varKeysMu.Lock()
-	defer varKeysMu.Unlock()
-	for _, k := range varKeys {
-		vars.Delete(k)
+	vars.keysMu.Lock()
+	defer vars.keysMu.Unlock()
+	for _, k := range vars.keys {
+		vars.m.Delete(k)
 	}
-	varKeys = nil
+	vars.keys = nil
 }
 
 func TestNil(t *testing.T) {
@@ -487,6 +487,28 @@
 	}
 }
 
+func BenchmarkMapString(b *testing.B) {
+	var m, m1, m2 Map
+	m.Set("map1", &m1)
+	m1.Add("a", 1)
+	m1.Add("z", 2)
+	m.Set("map2", &m2)
+	for i := 0; i < 9; i++ {
+		m2.Add(strconv.Itoa(i), int64(i))
+	}
+	var s1, s2 String
+	m.Set("str1", &s1)
+	s1.Set("hello, world!")
+	m.Set("str2", &s2)
+	s2.Set("fizz buzz")
+	b.ResetTimer()
+
+	b.ReportAllocs()
+	for i := 0; i < b.N; i++ {
+		_ = m.String()
+	}
+}
+
 func BenchmarkRealworldExpvarUsage(b *testing.B) {
 	var (
 		bytesSent Int
@@ -622,3 +644,20 @@
 	}
 	wg.Wait()
 }
+
+func TestAppendJSONQuote(t *testing.T) {
+	var b []byte
+	for i := 0; i < 128; i++ {
+		b = append(b, byte(i))
+	}
+	b = append(b, "\u2028\u2029"...)
+	got := string(appendJSONQuote(nil, string(b[:])))
+	want := `"` +
+		`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\t\n\u000b\u000c\r\u000e\u000f` +
+		`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
+		` !\"#$%\u0026'()*+,-./0123456789:;\u003c=\u003e?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_` +
+		"`" + `abcdefghijklmnopqrstuvwxyz{|}~` + "\x7f" + `\u2028\u2029"`
+	if got != want {
+		t.Errorf("appendJSONQuote mismatch:\ngot  %v\nwant %v", got, want)
+	}
+}
diff --git a/src/flag/flag.go b/src/flag/flag.go
index 9d3e8d3..1669e9a 100644
--- a/src/flag/flag.go
+++ b/src/flag/flag.go
@@ -7,7 +7,7 @@
 
 # Usage
 
-Define flags using flag.String(), Bool(), Int(), etc.
+Define flags using [flag.String], [Bool], [Int], etc.
 
 This declares an integer flag, -n, stored in the pointer nFlag, with type *int:
 
@@ -41,8 +41,8 @@
 	fmt.Println("flagvar has value ", flagvar)
 
 After parsing, the arguments following the flags are available as the
-slice flag.Args() or individually as flag.Arg(i).
-The arguments are indexed from 0 through flag.NArg()-1.
+slice [flag.Args] or individually as [flag.Arg](i).
+The arguments are indexed from 0 through [flag.NArg]-1.
 
 # Command line flag syntax
 
@@ -74,9 +74,9 @@
 Duration flags accept any input valid for time.ParseDuration.
 
 The default set of command-line flags is controlled by
-top-level functions.  The FlagSet type allows one to define
+top-level functions.  The [FlagSet] type allows one to define
 independent sets of flags, such as to implement subcommands
-in a command-line interface. The methods of FlagSet are
+in a command-line interface. The methods of [FlagSet] are
 analogous to the top-level functions for the command-line
 flag set.
 */
@@ -355,26 +355,26 @@
 // rather than using the next command-line argument.
 //
 // Set is called once, in command line order, for each flag present.
-// The flag package may call the String method with a zero-valued receiver,
+// The flag package may call the [String] method with a zero-valued receiver,
 // such as a nil pointer.
 type Value interface {
 	String() string
 	Set(string) error
 }
 
-// Getter is an interface that allows the contents of a Value to be retrieved.
-// It wraps the Value interface, rather than being part of it, because it
-// appeared after Go 1 and its compatibility rules. All Value types provided
-// by this package satisfy the Getter interface, except the type used by Func.
+// Getter is an interface that allows the contents of a [Value] to be retrieved.
+// It wraps the [Value] interface, rather than being part of it, because it
+// appeared after Go 1 and its compatibility rules. All [Value] types provided
+// by this package satisfy the [Getter] interface, except the type used by [Func].
 type Getter interface {
 	Value
 	Get() any
 }
 
-// ErrorHandling defines how FlagSet.Parse behaves if the parse fails.
+// ErrorHandling defines how [FlagSet.Parse] behaves if the parse fails.
 type ErrorHandling int
 
-// These constants cause FlagSet.Parse to behave as described if the parse fails.
+// These constants cause [FlagSet.Parse] to behave as described if the parse fails.
 const (
 	ContinueOnError ErrorHandling = iota // Return a descriptive error.
 	ExitOnError                          // Call os.Exit(2) or for -h/-help Exit(0).
@@ -382,9 +382,9 @@
 )
 
 // A FlagSet represents a set of defined flags. The zero value of a FlagSet
-// has no name and has ContinueOnError error handling.
+// has no name and has [ContinueOnError] error handling.
 //
-// Flag names must be unique within a FlagSet. An attempt to define a flag whose
+// [Flag] names must be unique within a FlagSet. An attempt to define a flag whose
 // name is already in use will cause a panic.
 type FlagSet struct {
 	// Usage is the function called when an error occurs while parsing flags.
@@ -426,7 +426,7 @@
 	return result
 }
 
-// Output returns the destination for usage and error messages. os.Stderr is returned if
+// Output returns the destination for usage and error messages. [os.Stderr] is returned if
 // output was not set or was set to nil.
 func (f *FlagSet) Output() io.Writer {
 	if f.output == nil {
@@ -446,7 +446,7 @@
 }
 
 // SetOutput sets the destination for usage and error messages.
-// If output is nil, os.Stderr is used.
+// If output is nil, [os.Stderr] is used.
 func (f *FlagSet) SetOutput(output io.Writer) {
 	f.output = output
 }
@@ -479,12 +479,12 @@
 	CommandLine.Visit(fn)
 }
 
-// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+// Lookup returns the [Flag] structure of the named flag, returning nil if none exists.
 func (f *FlagSet) Lookup(name string) *Flag {
 	return f.formal[name]
 }
 
-// Lookup returns the Flag structure of the named command-line flag,
+// Lookup returns the [Flag] structure of the named command-line flag,
 // returning nil if none exists.
 func Lookup(name string) *Flag {
 	return CommandLine.formal[name]
@@ -675,7 +675,7 @@
 //	-I directory
 //		search directory for include files.
 //
-// To change the destination for flag messages, call CommandLine.SetOutput.
+// To change the destination for flag messages, call [CommandLine].SetOutput.
 func PrintDefaults() {
 	CommandLine.PrintDefaults()
 }
@@ -695,14 +695,14 @@
 // for how to write your own usage function.
 
 // Usage prints a usage message documenting all defined command-line flags
-// to CommandLine's output, which by default is os.Stderr.
+// to [CommandLine]'s output, which by default is [os.Stderr].
 // It is called when an error occurs while parsing flags.
 // The function is a variable that may be changed to point to a custom function.
-// By default it prints a simple header and calls PrintDefaults; for details about the
-// format of the output and how to control it, see the documentation for PrintDefaults.
+// By default it prints a simple header and calls [PrintDefaults]; for details about the
+// format of the output and how to control it, see the documentation for [PrintDefaults].
 // Custom usage functions may choose to exit the program; by default exiting
 // happens anyway as the command line's error handling strategy is set to
-// ExitOnError.
+// [ExitOnError].
 var Usage = func() {
 	fmt.Fprintf(CommandLine.Output(), "Usage of %s:\n", os.Args[0])
 	PrintDefaults()
@@ -1002,10 +1002,10 @@
 }
 
 // Var defines a flag with the specified name and usage string. The type and
-// value of the flag are represented by the first argument, of type Value, which
-// typically holds a user-defined implementation of Value. For instance, the
+// value of the flag are represented by the first argument, of type [Value], which
+// typically holds a user-defined implementation of [Value]. For instance, the
 // caller could create a flag that turns a comma-separated string into a slice
-// of strings by giving the slice the methods of Value; in particular, Set would
+// of strings by giving the slice the methods of [Value]; in particular, [Set] would
 // decompose the comma-separated string into the slice.
 func (f *FlagSet) Var(value Value, name string, usage string) {
 	// Flag must not begin "-" or contain "=".
@@ -1037,10 +1037,10 @@
 }
 
 // Var defines a flag with the specified name and usage string. The type and
-// value of the flag are represented by the first argument, of type Value, which
-// typically holds a user-defined implementation of Value. For instance, the
+// value of the flag are represented by the first argument, of type [Value], which
+// typically holds a user-defined implementation of [Value]. For instance, the
 // caller could create a flag that turns a comma-separated string into a slice
-// of strings by giving the slice the methods of Value; in particular, Set would
+// of strings by giving the slice the methods of [Value]; in particular, [Set] would
 // decompose the comma-separated string into the slice.
 func Var(value Value, name string, usage string) {
 	CommandLine.Var(value, name, usage)
@@ -1147,9 +1147,9 @@
 }
 
 // Parse parses flag definitions from the argument list, which should not
-// include the command name. Must be called after all flags in the FlagSet
+// include the command name. Must be called after all flags in the [FlagSet]
 // are defined and before flags are accessed by the program.
-// The return value will be ErrHelp if -help or -h were set but not defined.
+// The return value will be [ErrHelp] if -help or -h were set but not defined.
 func (f *FlagSet) Parse(arguments []string) error {
 	f.parsed = true
 	f.args = arguments
@@ -1181,7 +1181,7 @@
 	return f.parsed
 }
 
-// Parse parses the command-line flags from os.Args[1:]. Must be called
+// Parse parses the command-line flags from [os.Args][1:]. Must be called
 // after all flags are defined and before flags are accessed by the program.
 func Parse() {
 	// Ignore errors; CommandLine is set for ExitOnError.
@@ -1193,8 +1193,8 @@
 	return CommandLine.Parsed()
 }
 
-// CommandLine is the default set of command-line flags, parsed from os.Args.
-// The top-level functions such as BoolVar, Arg, and so on are wrappers for the
+// CommandLine is the default set of command-line flags, parsed from [os.Args].
+// The top-level functions such as [BoolVar], [Arg], and so on are wrappers for the
 // methods of CommandLine.
 var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
 
@@ -1223,8 +1223,8 @@
 }
 
 // Init sets the name and error handling property for a flag set.
-// By default, the zero FlagSet uses an empty name and the
-// ContinueOnError error handling policy.
+// By default, the zero [FlagSet] uses an empty name and the
+// [ContinueOnError] error handling policy.
 func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
 	f.name = name
 	f.errorHandling = errorHandling
diff --git a/src/flag/flag_test.go b/src/flag/flag_test.go
index 57c88f0..8e9ae31 100644
--- a/src/flag/flag_test.go
+++ b/src/flag/flag_test.go
@@ -701,7 +701,7 @@
 	}
 
 	for _, test := range tests {
-		cmd := exec.Command(os.Args[0], "-test.run=TestExitCode")
+		cmd := exec.Command(os.Args[0], "-test.run=^TestExitCode$")
 		cmd.Env = append(
 			os.Environ(),
 			"GO_CHILD_FLAG="+test.flag,
diff --git a/src/fmt/doc.go b/src/fmt/doc.go
index 9785ed9..1cda484 100644
--- a/src/fmt/doc.go
+++ b/src/fmt/doc.go
@@ -180,7 +180,8 @@
 implements the GoStringer interface, that will be invoked.
 
 If the format (which is implicitly %v for Println etc.) is valid
-for a string (%s %q %v %x %X), the following two rules apply:
+for a string (%s %q %x %X), or is %v but not %#v,
+the following two rules apply:
 
 4. If an operand implements the error interface, the Error method
 will be invoked to convert the object to a string, which will then
diff --git a/src/fmt/print.go b/src/fmt/print.go
index 9c3bd3e..cb393bd 100644
--- a/src/fmt/print.go
+++ b/src/fmt/print.go
@@ -112,8 +112,8 @@
 	*b = append(*b, c)
 }
 
-func (bp *buffer) writeRune(r rune) {
-	*bp = utf8.AppendRune(*bp, r)
+func (b *buffer) writeRune(r rune) {
+	*b = utf8.AppendRune(*b, r)
 }
 
 // pp is used to store a printer's state and is reused with sync.Pool to avoid allocations.
@@ -336,7 +336,7 @@
 }
 
 // getField gets the i'th field of the struct value.
-// If the field is itself is an interface, return a value for
+// If the field itself is a non-nil interface, return a value for
 // the thing inside the interface, not the interface itself.
 func getField(v reflect.Value, i int) reflect.Value {
 	val := v.Field(i)
@@ -550,7 +550,7 @@
 	var u uintptr
 	switch value.Kind() {
 	case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Slice, reflect.UnsafePointer:
-		u = value.Pointer()
+		u = uintptr(value.UnsafePointer())
 	default:
 		p.badVerb(verb)
 		return
@@ -872,12 +872,10 @@
 			t := f.Type()
 			if t.Elem().Kind() == reflect.Uint8 {
 				var bytes []byte
-				if f.Kind() == reflect.Slice {
+				if f.Kind() == reflect.Slice || f.CanAddr() {
 					bytes = f.Bytes()
-				} else if f.CanAddr() {
-					bytes = f.Slice(0, f.Len()).Bytes()
 				} else {
-					// We have an array, but we cannot Slice() a non-addressable array,
+					// We have an array, but we cannot Bytes() a non-addressable array,
 					// so we build a slice by hand. This is a rare case but it would be nice
 					// if reflection could help a little more.
 					bytes = make([]byte, f.Len())
@@ -916,7 +914,7 @@
 	case reflect.Pointer:
 		// pointer to array or slice or struct? ok at top level
 		// but not embedded (avoid loops)
-		if depth == 0 && f.Pointer() != 0 {
+		if depth == 0 && f.UnsafePointer() != nil {
 			switch a := f.Elem(); a.Kind() {
 			case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map:
 				p.buf.writeByte('&')
diff --git a/src/go.mod b/src/go.mod
index 3b24053..c18ae77 100644
--- a/src/go.mod
+++ b/src/go.mod
@@ -1,13 +1,13 @@
 module std
 
-go 1.21
+go 1.22
 
 require (
-	golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d
-	golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c
+	golang.org/x/crypto v0.16.1-0.20231129163542-152cdb1503eb
+	golang.org/x/net v0.19.0
 )
 
 require (
-	golang.org/x/sys v0.10.0 // indirect
-	golang.org/x/text v0.11.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.14.0 // indirect
 )
diff --git a/src/go.sum b/src/go.sum
index caf8ff0..7c35198 100644
--- a/src/go.sum
+++ b/src/go.sum
@@ -1,8 +1,8 @@
-golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d h1:LiA25/KWKuXfIq5pMIBq1s5hz3HQxhJJSu/SUGlD+SM=
-golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
-golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c h1:d+VvAxu4S13DWtf73R5eY//VaCk3aUcVdyYjM1SX7zw=
-golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
-golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
-golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
-golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/crypto v0.16.1-0.20231129163542-152cdb1503eb h1:1ceSY7sk6sJuiDREHpfyrqDnDljsLfEP2GuTClhBBfI=
+golang.org/x/crypto v0.16.1-0.20231129163542-152cdb1503eb/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go
index c439052..ef96cc4 100644
--- a/src/go/ast/ast.go
+++ b/src/go/ast/ast.go
@@ -59,7 +59,7 @@
 //
 // The Text field contains the comment text without carriage returns (\r) that
 // may have been present in the source. Because a comment's end position is
-// computed using len(Text), the position reported by End() does not match the
+// computed using len(Text), the position reported by [Comment.End] does not match the
 // true source end position for comments containing carriage returns.
 type Comment struct {
 	Slash token.Pos // position of "/" starting the comment
@@ -192,7 +192,7 @@
 // A Field represents a Field declaration list in a struct type,
 // a method list in an interface type, or a parameter/result declaration
 // in a signature.
-// Field.Names is nil for unnamed parameters (parameter lists which only contain types)
+// [Field.Names] is nil for unnamed parameters (parameter lists which only contain types)
 // and embedded struct fields. In the latter case, the field name is the type name.
 type Field struct {
 	Doc     *CommentGroup // associated documentation; or nil
@@ -257,7 +257,7 @@
 	return token.NoPos
 }
 
-// NumFields returns the number of parameters or struct fields represented by a FieldList.
+// NumFields returns the number of parameters or struct fields represented by a [FieldList].
 func (f *FieldList) NumFields() int {
 	n := 0
 	if f != nil {
@@ -287,7 +287,7 @@
 	Ident struct {
 		NamePos token.Pos // identifier position
 		Name    string    // identifier name
-		Obj     *Object   // denoted object; or nil
+		Obj     *Object   // denoted object, or nil. Deprecated: see Object.
 	}
 
 	// An Ellipsis node stands for the "..." type in a
@@ -575,7 +575,7 @@
 // ----------------------------------------------------------------------------
 // Convenience functions for Idents
 
-// NewIdent creates a new Ident without position.
+// NewIdent creates a new [Ident] without position.
 // Useful for ASTs generated by code other than the Go parser.
 func NewIdent(name string) *Ident { return &Ident{token.NoPos, name, nil} }
 
@@ -1028,12 +1028,12 @@
 // when a File's syntax tree is modified: For printing, comments are interspersed
 // between tokens based on their position. If syntax tree nodes are
 // removed or moved, relevant comments in their vicinity must also be removed
-// (from the File.Comments list) or moved accordingly (by updating their
-// positions). A CommentMap may be used to facilitate some of these operations.
+// (from the [File.Comments] list) or moved accordingly (by updating their
+// positions). A [CommentMap] may be used to facilitate some of these operations.
 //
 // Whether and how a comment is associated with a node depends on the
 // interpretation of the syntax tree by the manipulating program: Except for Doc
-// and Comment comments directly associated with nodes, the remaining comments
+// and [Comment] comments directly associated with nodes, the remaining comments
 // are "free-floating" (see also issues #18593, #20744).
 type File struct {
 	Doc     *CommentGroup // associated documentation; or nil
@@ -1042,9 +1042,9 @@
 	Decls   []Decl        // top-level declarations; or nil
 
 	FileStart, FileEnd token.Pos       // start and end of entire file
-	Scope              *Scope          // package scope (this file only)
+	Scope              *Scope          // package scope (this file only). Deprecated: see Object
 	Imports            []*ImportSpec   // imports in this file
-	Unresolved         []*Ident        // unresolved identifiers in this file
+	Unresolved         []*Ident        // unresolved identifiers in this file. Deprecated: see Object
 	Comments           []*CommentGroup // list of all comments in the source file
 	GoVersion          string          // minimum Go version required by //go:build or // +build directives
 }
@@ -1064,6 +1064,8 @@
 
 // A Package node represents a set of source files
 // collectively building a Go package.
+//
+// Deprecated: use the type checker [go/types] instead; see [Object].
 type Package struct {
 	Name    string             // package name
 	Scope   *Scope             // package scope across all files
@@ -1110,3 +1112,14 @@
 	}
 	return "", false
 }
+
+// Unparen returns the expression with any enclosing parentheses removed.
+func Unparen(e Expr) Expr {
+	for {
+		paren, ok := e.(*ParenExpr)
+		if !ok {
+			return e
+		}
+		e = paren.X
+	}
+}
diff --git a/src/go/ast/commentmap.go b/src/go/ast/commentmap.go
index 4196e47..a9488f5 100644
--- a/src/go/ast/commentmap.go
+++ b/src/go/ast/commentmap.go
@@ -29,7 +29,7 @@
 }
 
 // A CommentMap maps an AST node to a list of comment groups
-// associated with it. See NewCommentMap for a description of
+// associated with it. See [NewCommentMap] for a description of
 // the association.
 type CommentMap map[Node][]*CommentGroup
 
diff --git a/src/go/ast/filter.go b/src/go/ast/filter.go
index c9e733a..5c12ed1 100644
--- a/src/go/ast/filter.go
+++ b/src/go/ast/filter.go
@@ -21,7 +21,7 @@
 // only exported nodes remain: all top-level identifiers which are not exported
 // and their associated information (such as type, initial value, or function
 // body) are removed. Non-exported fields and methods of exported types are
-// stripped. The File.Comments list is not changed.
+// stripped. The [File.Comments] list is not changed.
 //
 // FileExports reports whether there are exported declarations.
 func FileExports(src *File) bool {
@@ -246,7 +246,7 @@
 // interface method names, but not from parameter lists) that don't
 // pass through the filter f. If the declaration is empty afterwards,
 // the declaration is removed from the AST. Import declarations are
-// always removed. The File.Comments list is not changed.
+// always removed. The [File.Comments] list is not changed.
 //
 // FilterFile reports whether there are any top-level declarations
 // left after filtering.
@@ -293,7 +293,7 @@
 // ----------------------------------------------------------------------------
 // Merging of package files
 
-// The MergeMode flags control the behavior of MergePackageFiles.
+// The MergeMode flags control the behavior of [MergePackageFiles].
 type MergeMode uint
 
 const (
diff --git a/src/go/ast/print.go b/src/go/ast/print.go
index 85e6943..d1aad50 100644
--- a/src/go/ast/print.go
+++ b/src/go/ast/print.go
@@ -14,7 +14,7 @@
 	"reflect"
 )
 
-// A FieldFilter may be provided to Fprint to control the output.
+// A FieldFilter may be provided to [Fprint] to control the output.
 type FieldFilter func(name string, value reflect.Value) bool
 
 // NotNilFilter returns true for field values that are not nil;
@@ -32,7 +32,7 @@
 // to that file set. Otherwise positions are printed as integer
 // values (file set specific offsets).
 //
-// A non-nil FieldFilter f may be provided to control the output:
+// A non-nil [FieldFilter] f may be provided to control the output:
 // struct fields for which f(fieldname, fieldvalue) is true are
 // printed; all others are filtered from the output. Unexported
 // struct fields are never printed.
diff --git a/src/go/ast/resolve.go b/src/go/ast/resolve.go
index 970aa88..b1d0bae 100644
--- a/src/go/ast/resolve.go
+++ b/src/go/ast/resolve.go
@@ -58,18 +58,22 @@
 // check the map to see if it is already present in the imports map.
 // If so, the Importer can return the map entry. Otherwise, the
 // Importer should load the package data for the given path into
-// a new *Object (pkg), record pkg in the imports map, and then
+// a new *[Object] (pkg), record pkg in the imports map, and then
 // return pkg.
+//
+// Deprecated: use the type checker [go/types] instead; see [Object].
 type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
 
-// NewPackage creates a new Package node from a set of File nodes. It resolves
+// NewPackage creates a new [Package] node from a set of [File] nodes. It resolves
 // unresolved identifiers across files and updates each file's Unresolved list
 // accordingly. If a non-nil importer and universe scope are provided, they are
 // used to resolve identifiers not declared in any of the package files. Any
 // remaining unresolved identifiers are reported as undeclared. If the files
 // belong to different packages, one package name is selected and files with
 // different package names are reported and then ignored.
-// The result is a package node and a scanner.ErrorList if there were errors.
+// The result is a package node and a [scanner.ErrorList] if there were errors.
+//
+// Deprecated: use the type checker [go/types] instead; see [Object].
 func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error) {
 	var p pkgBuilder
 	p.fset = fset
diff --git a/src/go/ast/scope.go b/src/go/ast/scope.go
index 8882212..039ca58 100644
--- a/src/go/ast/scope.go
+++ b/src/go/ast/scope.go
@@ -15,6 +15,8 @@
 // A Scope maintains the set of named language entities declared
 // in the scope and a link to the immediately surrounding (outer)
 // scope.
+//
+// Deprecated: use the type checker [go/types] instead; see [Object].
 type Scope struct {
 	Outer   *Scope
 	Objects map[string]*Object
@@ -69,6 +71,19 @@
 //	Kind    Data type         Data value
 //	Pkg     *Scope            package scope
 //	Con     int               iota for the respective declaration
+//
+// Deprecated: The relationship between Idents and Objects cannot be
+// correctly computed without type information. For example, the
+// expression T{K: 0} may denote a struct, map, slice, or array
+// literal, depending on the type of T. If T is a struct, then K
+// refers to a field of T, whereas for the other types it refers to a
+// value in the environment.
+//
+// New programs should set the [parser.SkipObjectResolution] parser
+// flag to disable syntactic object resolution (which also saves CPU
+// and memory), and instead use the type checker [go/types] if object
+// resolution is desired. See the Defs, Uses, and Implicits fields of
+// the [types.Info] struct for details.
 type Object struct {
 	Kind ObjKind
 	Name string // declared name
diff --git a/src/go/ast/walk.go b/src/go/ast/walk.go
index a293c99..87e190f 100644
--- a/src/go/ast/walk.go
+++ b/src/go/ast/walk.go
@@ -6,8 +6,8 @@
 
 import "fmt"
 
-// A Visitor's Visit method is invoked for each node encountered by Walk.
-// If the result visitor w is not nil, Walk visits each of the children
+// A Visitor's Visit method is invoked for each node encountered by [Walk].
+// If the result visitor w is not nil, [Walk] visits each of the children
 // of node with the visitor w, followed by a call of w.Visit(nil).
 type Visitor interface {
 	Visit(node Node) (w Visitor)
diff --git a/src/go/build/build.go b/src/go/build/build.go
index dd6cdc9..9ce3700 100644
--- a/src/go/build/build.go
+++ b/src/go/build/build.go
@@ -495,13 +495,13 @@
 	return p.Name == "main"
 }
 
-// ImportDir is like Import but processes the Go package found in
+// ImportDir is like [Import] but processes the Go package found in
 // the named directory.
 func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
 	return ctxt.Import(".", dir, mode)
 }
 
-// NoGoError is the error used by Import to describe a directory
+// NoGoError is the error used by [Import] to describe a directory
 // containing no buildable Go source files. (It may still contain
 // test files, files hidden by build tags, and so on.)
 type NoGoError struct {
@@ -549,7 +549,7 @@
 //   - files with build constraints not satisfied by the context
 //
 // If an error occurs, Import returns a non-nil error and a non-nil
-// *Package containing partial information.
+// *[Package] containing partial information.
 func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
 	p := &Package{
 		ImportPath: path,
@@ -1389,7 +1389,7 @@
 }
 
 // MatchFile reports whether the file with the given name in the given directory
-// matches the context and would be included in a Package created by ImportDir
+// matches the context and would be included in a [Package] created by [ImportDir]
 // of that directory.
 //
 // MatchFile considers the name of the file and may use ctxt.OpenFile to
@@ -1687,6 +1687,11 @@
 			continue
 		}
 
+		// #cgo (nocallback|noescape) <function name>
+		if fields := strings.Fields(line); len(fields) == 3 && (fields[1] == "nocallback" || fields[1] == "noescape") {
+			continue
+		}
+
 		// Split at colon.
 		line, argstr, ok := strings.Cut(strings.TrimSpace(line[4:]), ":")
 		if !ok {
diff --git a/src/go/build/constraint/expr.go b/src/go/build/constraint/expr.go
index 505cbff..e590123 100644
--- a/src/go/build/constraint/expr.go
+++ b/src/go/build/constraint/expr.go
@@ -17,7 +17,7 @@
 )
 
 // An Expr is a build tag constraint expression.
-// The underlying concrete type is *AndExpr, *OrExpr, *NotExpr, or *TagExpr.
+// The underlying concrete type is *[AndExpr], *[OrExpr], *[NotExpr], or *[TagExpr].
 type Expr interface {
 	// String returns the string form of the expression,
 	// using the boolean syntax used in //go:build lines.
@@ -33,7 +33,7 @@
 	isExpr()
 }
 
-// A TagExpr is an Expr for the single tag Tag.
+// A TagExpr is an [Expr] for the single tag Tag.
 type TagExpr struct {
 	Tag string // for example, “linux” or “cgo”
 }
diff --git a/src/go/build/constraint/vers.go b/src/go/build/constraint/vers.go
index 34c44dc..c842188 100644
--- a/src/go/build/constraint/vers.go
+++ b/src/go/build/constraint/vers.go
@@ -66,7 +66,7 @@
 		if z.Tag == "go1" {
 			return 0
 		}
-		_, v, _ := stringsCut(z.Tag, "go1.")
+		_, v, _ := strings.Cut(z.Tag, "go1.")
 		n, err := strconv.Atoi(v)
 		if err != nil {
 			// not a go1.N tag
@@ -76,14 +76,6 @@
 	}
 }
 
-// TODO: Delete, replace calls with strings.Cut once Go bootstrap toolchain is bumped.
-func stringsCut(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
-
 // andVersion returns the minimum Go version
 // implied by the AND of two minimum Go versions,
 // which is the max of the versions.
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index 592f2fd..7ce8d34 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -45,20 +45,26 @@
 	  internal/cpu, internal/goarch, internal/godebugs,
 	  internal/goexperiment, internal/goos,
 	  internal/goversion, internal/nettrace, internal/platform,
+	  internal/trace/traceviewer/format,
 	  log/internal,
 	  unicode/utf8, unicode/utf16, unicode,
 	  unsafe;
 
 	# These packages depend only on internal/goarch and unsafe.
 	internal/goarch, unsafe
-	< internal/abi;
+	< internal/abi, internal/chacha8rand;
 
 	unsafe < maps;
 
 	# RUNTIME is the core runtime group of packages, all of them very light-weight.
-	internal/abi, internal/cpu, internal/goarch,
-	internal/coverage/rtcov, internal/godebugs, internal/goexperiment,
-	internal/goos, unsafe
+	internal/abi,
+	internal/chacha8rand,
+	internal/coverage/rtcov,
+	internal/cpu,
+	internal/goarch,
+	internal/godebugs,
+	internal/goexperiment,
+	internal/goos
 	< internal/bytealg
 	< internal/itoa
 	< internal/unsafeheader
@@ -77,8 +83,18 @@
 	< internal/oserror, math/bits
 	< RUNTIME;
 
-	RUNTIME
-	< sort
+	internal/race
+	< iter;
+
+	# slices depends on unsafe for overlapping check, cmp for comparison
+	# semantics, and math/bits for # calculating bitlength of numbers.
+	unsafe, cmp, math/bits
+	< slices;
+
+	RUNTIME, slices
+	< sort;
+
+	sort
 	< container/heap;
 
 	RUNTIME
@@ -124,7 +140,7 @@
 	< math/cmplx;
 
 	MATH
-	< math/rand;
+	< math/rand, math/rand/v2;
 
 	MATH
 	< runtime/metrics;
@@ -203,7 +219,7 @@
 
 	# encodings
 	# core ones do not use fmt.
-	io, strconv
+	io, strconv, slices
 	< encoding;
 
 	encoding, reflect
@@ -223,11 +239,6 @@
 	< hash
 	< hash/adler32, hash/crc32, hash/crc64, hash/fnv;
 
-	# slices depends on unsafe for overlapping check, cmp for comparison
-	# semantics, and math/bits for # calculating bitlength of numbers.
-	unsafe, cmp, math/bits
-	< slices;
-
 	# math/big
 	FMT, encoding/binary, math/rand
 	< math/big;
@@ -268,6 +279,8 @@
 
 	# go parser and friends.
 	FMT
+	< internal/gover
+	< go/version
 	< go/token
 	< go/scanner
 	< go/ast
@@ -286,7 +299,10 @@
 	math/big, go/token
 	< go/constant;
 
-	container/heap, go/constant, go/parser, internal/goversion, internal/types/errors
+	FMT, internal/goexperiment
+	< internal/buildcfg;
+
+	container/heap, go/constant, go/parser, internal/buildcfg, internal/goversion, internal/types/errors
 	< go/types;
 
 	# The vast majority of standard library packages should not be resorting to regexp.
@@ -297,9 +313,6 @@
 	go/doc/comment, go/parser, internal/lazyregexp, text/template
 	< go/doc;
 
-	FMT, internal/goexperiment
-	< internal/buildcfg;
-
 	go/build/constraint, go/doc, go/parser, internal/buildcfg, internal/goroot, internal/goversion, internal/platform
 	< go/build;
 
@@ -562,7 +575,7 @@
 	< net/rpc/jsonrpc;
 
 	# System Information
-	internal/cpu, sync
+	bufio, bytes, internal/cpu, io, os, strings, sync
 	< internal/sysinfo;
 
 	# Test-only
@@ -570,15 +583,15 @@
 	< testing/iotest
 	< testing/fstest;
 
-	log/slog
-	< testing/slogtest;
-
 	FMT, flag, math/rand
 	< testing/quick;
 
 	FMT, DEBUG, flag, runtime/trace, internal/sysinfo, math/rand
 	< testing;
 
+	log/slog, testing
+	< testing/slogtest;
+
 	FMT, crypto/sha256, encoding/json, go/ast, go/parser, go/token,
 	internal/godebug, math/rand, encoding/hex, crypto/sha256
 	< internal/fuzz;
@@ -601,12 +614,46 @@
 	syscall
 	< os/exec/internal/fdtest;
 
-	FMT, container/heap, math/rand
-	< internal/trace;
-
 	FMT
 	< internal/diff, internal/txtar;
 
+	# v2 execution trace parser.
+	FMT
+	< internal/trace/v2/event;
+
+	internal/trace/v2/event
+	< internal/trace/v2/event/go122;
+
+	FMT, io, internal/trace/v2/event/go122
+	< internal/trace/v2/version;
+
+	FMT, encoding/binary, internal/trace/v2/version
+	< internal/trace/v2/raw;
+
+	FMT, encoding/binary, internal/trace/v2/version
+	< internal/trace/v2;
+
+	regexp, internal/trace/v2, internal/trace/v2/raw, internal/txtar
+	< internal/trace/v2/testtrace;
+
+	regexp, internal/txtar, internal/trace/v2, internal/trace/v2/raw
+	< internal/trace/v2/internal/testgen/go122;
+
+	FMT, container/heap, math/rand, internal/trace/v2
+	< internal/trace;
+
+	# cmd/trace dependencies.
+	FMT,
+	embed,
+	encoding/json,
+	html/template,
+	internal/profile,
+	internal/trace,
+	internal/trace/traceviewer/format,
+	net/http
+	< internal/trace/traceviewer;
+
+	# Coverage.
 	FMT, crypto/md5, encoding/binary, regexp, sort, text/tabwriter, unsafe,
 	internal/coverage, internal/coverage/uleb128
 	< internal/coverage/cmerge,
@@ -622,6 +669,9 @@
 	internal/coverage/cmerge
 	< internal/coverage/cformat;
 
+	internal/coverage, crypto/sha256, FMT
+	< cmd/internal/cov/covcmd;
+
     encoding/json,
 	runtime/debug,
 	internal/coverage/calloc,
diff --git a/src/go/build/doc.go b/src/go/build/doc.go
index cd1d3fd..da11e49 100644
--- a/src/go/build/doc.go
+++ b/src/go/build/doc.go
@@ -93,6 +93,6 @@
 // as end-user documentation.
 //
 // "go build" and other commands no longer support binary-only-packages.
-// Import and ImportDir will still set the BinaryOnly flag in packages
+// [Import] and [ImportDir] will still set the BinaryOnly flag in packages
 // containing these comments for use in tools and error messages.
 package build
diff --git a/src/go/constant/value.go b/src/go/constant/value.go
index ae300c7..3f1fd3c 100644
--- a/src/go/constant/value.go
+++ b/src/go/constant/value.go
@@ -25,7 +25,7 @@
 
 //go:generate stringer -type Kind
 
-// Kind specifies the kind of value represented by a Value.
+// Kind specifies the kind of value represented by a [Value].
 type Kind int
 
 const (
@@ -379,13 +379,13 @@
 // ----------------------------------------------------------------------------
 // Factories
 
-// MakeUnknown returns the Unknown value.
+// MakeUnknown returns the [Unknown] value.
 func MakeUnknown() Value { return unknownVal{} }
 
-// MakeBool returns the Bool value for b.
+// MakeBool returns the [Bool] value for b.
 func MakeBool(b bool) Value { return boolVal(b) }
 
-// MakeString returns the String value for s.
+// MakeString returns the [String] value for s.
 func MakeString(s string) Value {
 	if s == "" {
 		return &emptyString // common case
@@ -395,10 +395,10 @@
 
 var emptyString stringVal
 
-// MakeInt64 returns the Int value for x.
+// MakeInt64 returns the [Int] value for x.
 func MakeInt64(x int64) Value { return int64Val(x) }
 
-// MakeUint64 returns the Int value for x.
+// MakeUint64 returns the [Int] value for x.
 func MakeUint64(x uint64) Value {
 	if x < 1<<63 {
 		return int64Val(int64(x))
@@ -406,9 +406,9 @@
 	return intVal{newInt().SetUint64(x)}
 }
 
-// MakeFloat64 returns the Float value for x.
+// MakeFloat64 returns the [Float] value for x.
 // If x is -0.0, the result is 0.0.
-// If x is not finite, the result is an Unknown.
+// If x is not finite, the result is an [Unknown].
 func MakeFloat64(x float64) Value {
 	if math.IsInf(x, 0) || math.IsNaN(x) {
 		return unknownVal{}
@@ -421,9 +421,9 @@
 
 // MakeFromLiteral returns the corresponding integer, floating-point,
 // imaginary, character, or string value for a Go literal string. The
-// tok value must be one of token.INT, token.FLOAT, token.IMAG,
-// token.CHAR, or token.STRING. The final argument must be zero.
-// If the literal string syntax is invalid, the result is an Unknown.
+// tok value must be one of [token.INT], [token.FLOAT], [token.IMAG],
+// [token.CHAR], or [token.STRING]. The final argument must be zero.
+// If the literal string syntax is invalid, the result is an [Unknown].
 func MakeFromLiteral(lit string, tok token.Token, zero uint) Value {
 	if zero != 0 {
 		panic("MakeFromLiteral called with non-zero last argument")
@@ -475,8 +475,8 @@
 // For unknown arguments the result is the zero value for the respective
 // accessor type, except for Sign, where the result is 1.
 
-// BoolVal returns the Go boolean value of x, which must be a Bool or an Unknown.
-// If x is Unknown, the result is false.
+// BoolVal returns the Go boolean value of x, which must be a [Bool] or an [Unknown].
+// If x is [Unknown], the result is false.
 func BoolVal(x Value) bool {
 	switch x := x.(type) {
 	case boolVal:
@@ -488,8 +488,8 @@
 	}
 }
 
-// StringVal returns the Go string value of x, which must be a String or an Unknown.
-// If x is Unknown, the result is "".
+// StringVal returns the Go string value of x, which must be a [String] or an [Unknown].
+// If x is [Unknown], the result is "".
 func StringVal(x Value) string {
 	switch x := x.(type) {
 	case *stringVal:
@@ -502,8 +502,8 @@
 }
 
 // Int64Val returns the Go int64 value of x and whether the result is exact;
-// x must be an Int or an Unknown. If the result is not exact, its value is undefined.
-// If x is Unknown, the result is (0, false).
+// x must be an [Int] or an [Unknown]. If the result is not exact, its value is undefined.
+// If x is [Unknown], the result is (0, false).
 func Int64Val(x Value) (int64, bool) {
 	switch x := x.(type) {
 	case int64Val:
@@ -518,8 +518,8 @@
 }
 
 // Uint64Val returns the Go uint64 value of x and whether the result is exact;
-// x must be an Int or an Unknown. If the result is not exact, its value is undefined.
-// If x is Unknown, the result is (0, false).
+// x must be an [Int] or an [Unknown]. If the result is not exact, its value is undefined.
+// If x is [Unknown], the result is (0, false).
 func Uint64Val(x Value) (uint64, bool) {
 	switch x := x.(type) {
 	case int64Val:
@@ -533,7 +533,7 @@
 	}
 }
 
-// Float32Val is like Float64Val but for float32 instead of float64.
+// Float32Val is like [Float64Val] but for float32 instead of float64.
 func Float32Val(x Value) (float32, bool) {
 	switch x := x.(type) {
 	case int64Val:
@@ -555,10 +555,10 @@
 }
 
 // Float64Val returns the nearest Go float64 value of x and whether the result is exact;
-// x must be numeric or an Unknown, but not Complex. For values too small (too close to 0)
-// to represent as float64, Float64Val silently underflows to 0. The result sign always
+// x must be numeric or an [Unknown], but not [Complex]. For values too small (too close to 0)
+// to represent as float64, [Float64Val] silently underflows to 0. The result sign always
 // matches the sign of x, even for 0.
-// If x is Unknown, the result is (0, false).
+// If x is [Unknown], the result is (0, false).
 func Float64Val(x Value) (float64, bool) {
 	switch x := x.(type) {
 	case int64Val:
@@ -609,7 +609,7 @@
 	}
 }
 
-// Make returns the Value for x.
+// Make returns the [Value] for x.
 //
 //	type of x        result Kind
 //	----------------------------
@@ -640,8 +640,8 @@
 }
 
 // BitLen returns the number of bits required to represent
-// the absolute value x in binary representation; x must be an Int or an Unknown.
-// If x is Unknown, the result is 0.
+// the absolute value x in binary representation; x must be an [Int] or an [Unknown].
+// If x is [Unknown], the result is 0.
 func BitLen(x Value) int {
 	switch x := x.(type) {
 	case int64Val:
@@ -660,8 +660,8 @@
 }
 
 // Sign returns -1, 0, or 1 depending on whether x < 0, x == 0, or x > 0;
-// x must be numeric or Unknown. For complex values x, the sign is 0 if x == 0,
-// otherwise it is != 0. If x is Unknown, the result is 1.
+// x must be numeric or [Unknown]. For complex values x, the sign is 0 if x == 0,
+// otherwise it is != 0. If x is [Unknown], the result is 1.
 func Sign(x Value) int {
 	switch x := x.(type) {
 	case int64Val:
@@ -698,7 +698,7 @@
 )
 
 // Bytes returns the bytes for the absolute value of x in little-
-// endian binary representation; x must be an Int.
+// endian binary representation; x must be an [Int].
 func Bytes(x Value) []byte {
 	var t intVal
 	switch x := x.(type) {
@@ -729,7 +729,7 @@
 	return bytes[:i]
 }
 
-// MakeFromBytes returns the Int value given the bytes of its little-endian
+// MakeFromBytes returns the [Int] value given the bytes of its little-endian
 // binary representation. An empty byte slice argument represents 0.
 func MakeFromBytes(bytes []byte) Value {
 	words := make([]big.Word, (len(bytes)+(wordSize-1))/wordSize)
@@ -759,9 +759,9 @@
 	return makeInt(newInt().SetBits(words[:i]))
 }
 
-// Num returns the numerator of x; x must be Int, Float, or Unknown.
-// If x is Unknown, or if it is too large or small to represent as a
-// fraction, the result is Unknown. Otherwise the result is an Int
+// Num returns the numerator of x; x must be [Int], [Float], or [Unknown].
+// If x is [Unknown], or if it is too large or small to represent as a
+// fraction, the result is [Unknown]. Otherwise the result is an [Int]
 // with the same sign as x.
 func Num(x Value) Value {
 	switch x := x.(type) {
@@ -782,9 +782,9 @@
 	return unknownVal{}
 }
 
-// Denom returns the denominator of x; x must be Int, Float, or Unknown.
-// If x is Unknown, or if it is too large or small to represent as a
-// fraction, the result is Unknown. Otherwise the result is an Int >= 1.
+// Denom returns the denominator of x; x must be [Int], [Float], or [Unknown].
+// If x is [Unknown], or if it is too large or small to represent as a
+// fraction, the result is [Unknown]. Otherwise the result is an [Int] >= 1.
 func Denom(x Value) Value {
 	switch x := x.(type) {
 	case int64Val, intVal:
@@ -804,9 +804,9 @@
 	return unknownVal{}
 }
 
-// MakeImag returns the Complex value x*i;
-// x must be Int, Float, or Unknown.
-// If x is Unknown, the result is Unknown.
+// MakeImag returns the [Complex] value x*i;
+// x must be [Int], [Float], or [Unknown].
+// If x is [Unknown], the result is [Unknown].
 func MakeImag(x Value) Value {
 	switch x.(type) {
 	case unknownVal:
@@ -819,7 +819,7 @@
 }
 
 // Real returns the real part of x, which must be a numeric or unknown value.
-// If x is Unknown, the result is Unknown.
+// If x is [Unknown], the result is [Unknown].
 func Real(x Value) Value {
 	switch x := x.(type) {
 	case unknownVal, int64Val, intVal, ratVal, floatVal:
@@ -832,7 +832,7 @@
 }
 
 // Imag returns the imaginary part of x, which must be a numeric or unknown value.
-// If x is Unknown, the result is Unknown.
+// If x is [Unknown], the result is [Unknown].
 func Imag(x Value) Value {
 	switch x := x.(type) {
 	case unknownVal:
@@ -849,8 +849,8 @@
 // ----------------------------------------------------------------------------
 // Numeric conversions
 
-// ToInt converts x to an Int value if x is representable as an Int.
-// Otherwise it returns an Unknown.
+// ToInt converts x to an [Int] value if x is representable as an [Int].
+// Otherwise it returns an [Unknown].
 func ToInt(x Value) Value {
 	switch x := x.(type) {
 	case int64Val, intVal:
@@ -903,8 +903,8 @@
 	return unknownVal{}
 }
 
-// ToFloat converts x to a Float value if x is representable as a Float.
-// Otherwise it returns an Unknown.
+// ToFloat converts x to a [Float] value if x is representable as a [Float].
+// Otherwise it returns an [Unknown].
 func ToFloat(x Value) Value {
 	switch x := x.(type) {
 	case int64Val:
@@ -924,8 +924,8 @@
 	return unknownVal{}
 }
 
-// ToComplex converts x to a Complex value if x is representable as a Complex.
-// Otherwise it returns an Unknown.
+// ToComplex converts x to a [Complex] value if x is representable as a [Complex].
+// Otherwise it returns an [Unknown].
 func ToComplex(x Value) Value {
 	switch x := x.(type) {
 	case int64Val, intVal, ratVal, floatVal:
@@ -954,7 +954,7 @@
 // UnaryOp returns the result of the unary expression op y.
 // The operation must be defined for the operand.
 // If prec > 0 it specifies the ^ (xor) result size in bits.
-// If y is Unknown, the result is Unknown.
+// If y is [Unknown], the result is [Unknown].
 func UnaryOp(op token.Token, y Value, prec uint) Value {
 	switch op {
 	case token.ADD:
@@ -1093,12 +1093,12 @@
 
 // BinaryOp returns the result of the binary expression x op y.
 // The operation must be defined for the operands. If one of the
-// operands is Unknown, the result is Unknown.
-// BinaryOp doesn't handle comparisons or shifts; use Compare
-// or Shift instead.
+// operands is [Unknown], the result is [Unknown].
+// BinaryOp doesn't handle comparisons or shifts; use [Compare]
+// or [Shift] instead.
 //
-// To force integer division of Int operands, use op == token.QUO_ASSIGN
-// instead of token.QUO; the result is guaranteed to be Int in this case.
+// To force integer division of [Int] operands, use op == [token.QUO_ASSIGN]
+// instead of [token.QUO]; the result is guaranteed to be [Int] in this case.
 // Division by zero leads to a run-time panic.
 func BinaryOp(x_ Value, op token.Token, y_ Value) Value {
 	x, y := match(x_, y_)
@@ -1277,8 +1277,8 @@
 func quo(x, y Value) Value { return BinaryOp(x, token.QUO, y) }
 
 // Shift returns the result of the shift expression x op s
-// with op == token.SHL or token.SHR (<< or >>). x must be
-// an Int or an Unknown. If x is Unknown, the result is x.
+// with op == [token.SHL] or [token.SHR] (<< or >>). x must be
+// an [Int] or an [Unknown]. If x is [Unknown], the result is x.
 func Shift(x Value, op token.Token, s uint) Value {
 	switch x := x.(type) {
 	case unknownVal:
@@ -1332,7 +1332,7 @@
 
 // Compare returns the result of the comparison x op y.
 // The comparison must be defined for the operands.
-// If one of the operands is Unknown, the result is
+// If one of the operands is [Unknown], the result is
 // false.
 func Compare(x_ Value, op token.Token, y_ Value) bool {
 	x, y := match(x_, y_)
diff --git a/src/go/doc/comment/html.go b/src/go/doc/comment/html.go
index bc076f6..9244509 100644
--- a/src/go/doc/comment/html.go
+++ b/src/go/doc/comment/html.go
@@ -10,13 +10,13 @@
 	"strconv"
 )
 
-// An htmlPrinter holds the state needed for printing a Doc as HTML.
+// An htmlPrinter holds the state needed for printing a [Doc] as HTML.
 type htmlPrinter struct {
 	*Printer
 	tight bool
 }
 
-// HTML returns an HTML formatting of the Doc.
+// HTML returns an HTML formatting of the [Doc].
 // See the [Printer] documentation for ways to customize the HTML output.
 func (p *Printer) HTML(d *Doc) []byte {
 	hp := &htmlPrinter{Printer: p}
diff --git a/src/go/doc/comment/parse.go b/src/go/doc/comment/parse.go
index 62a0f8f..4d5784a 100644
--- a/src/go/doc/comment/parse.go
+++ b/src/go/doc/comment/parse.go
@@ -5,7 +5,7 @@
 package comment
 
 import (
-	"sort"
+	"slices"
 	"strings"
 	"unicode"
 	"unicode/utf8"
@@ -176,7 +176,7 @@
 func (*DocLink) text() {}
 
 // A Parser is a doc comment parser.
-// The fields in the struct can be filled in before calling Parse
+// The fields in the struct can be filled in before calling [Parser.Parse]
 // in order to customize the details of the parsing process.
 type Parser struct {
 	// Words is a map of Go identifier words that
@@ -260,14 +260,12 @@
 }
 
 func isStdPkg(path string) bool {
-	// TODO(rsc): Use sort.Find once we don't have to worry about
-	// copying this code into older Go environments.
-	i := sort.Search(len(stdPkgs), func(i int) bool { return stdPkgs[i] >= path })
-	return i < len(stdPkgs) && stdPkgs[i] == path
+	_, ok := slices.BinarySearch(stdPkgs, path)
+	return ok
 }
 
 // DefaultLookupPackage is the default package lookup
-// function, used when [Parser].LookupPackage is nil.
+// function, used when [Parser.LookupPackage] is nil.
 // It recognizes names of the packages from the standard
 // library with single-element import paths, such as math,
 // which would otherwise be impossible to name.
@@ -281,7 +279,7 @@
 	return "", false
 }
 
-// Parse parses the doc comment text and returns the *Doc form.
+// Parse parses the doc comment text and returns the *[Doc] form.
 // Comment markers (/* // and */) in the text must have already been removed.
 func (p *Parser) Parse(text string) *Doc {
 	lines := unindent(strings.Split(text, "\n"))
diff --git a/src/go/doc/comment/print.go b/src/go/doc/comment/print.go
index e1c070d..a6ae821 100644
--- a/src/go/doc/comment/print.go
+++ b/src/go/doc/comment/print.go
@@ -150,7 +150,7 @@
 	*Printer
 }
 
-// Comment returns the standard Go formatting of the Doc,
+// Comment returns the standard Go formatting of the [Doc],
 // without any comment markers.
 func (p *Printer) Comment(d *Doc) []byte {
 	cp := &commentPrinter{Printer: p}
diff --git a/src/go/doc/comment/text.go b/src/go/doc/comment/text.go
index 6f9c2e2..4e4214e 100644
--- a/src/go/doc/comment/text.go
+++ b/src/go/doc/comment/text.go
@@ -21,7 +21,7 @@
 	width      int
 }
 
-// Text returns a textual formatting of the Doc.
+// Text returns a textual formatting of the [Doc].
 // See the [Printer] documentation for ways to customize the text output.
 func (p *Printer) Text(d *Doc) []byte {
 	tp := &textPrinter{
diff --git a/src/go/doc/doc.go b/src/go/doc/doc.go
index eefadfa..4d01ae4 100644
--- a/src/go/doc/doc.go
+++ b/src/go/doc/doc.go
@@ -96,7 +96,7 @@
 	Body     string    // note body text
 }
 
-// Mode values control the operation of New and NewFromFiles.
+// Mode values control the operation of [New] and [NewFromFiles].
 type Mode int
 
 const (
@@ -116,7 +116,7 @@
 
 // New computes the package documentation for the given package AST.
 // New takes ownership of the AST pkg and may edit or overwrite it.
-// To have the Examples fields populated, use NewFromFiles and include
+// To have the [Examples] fields populated, use [NewFromFiles] and include
 // the package's _test.go files.
 func New(pkg *ast.Package, importPath string, mode Mode) *Package {
 	var r reader
@@ -198,9 +198,9 @@
 // Examples found in _test.go files are associated with the corresponding
 // type, function, method, or the package, based on their name.
 // If the example has a suffix in its name, it is set in the
-// Example.Suffix field. Examples with malformed names are skipped.
+// [Example.Suffix] field. [Examples] with malformed names are skipped.
 //
-// Optionally, a single extra argument of type Mode can be provided to
+// Optionally, a single extra argument of type [Mode] can be provided to
 // control low-level aspects of the documentation extraction behavior.
 //
 // NewFromFiles takes ownership of the AST files and may edit them,
diff --git a/src/go/doc/example.go b/src/go/doc/example.go
index 65ca885..6687ba8 100644
--- a/src/go/doc/example.go
+++ b/src/go/doc/example.go
@@ -35,7 +35,7 @@
 // Examples returns the examples found in testFiles, sorted by Name field.
 // The Order fields record the order in which the examples were encountered.
 // The Suffix field is not populated when Examples is called directly, it is
-// only populated by NewFromFiles for examples it finds in _test.go files.
+// only populated by [NewFromFiles] for examples it finds in _test.go files.
 //
 // Playable Examples must be in a package whose name ends in "_test".
 // An Example is "playable" (the Play field is non-nil) in either of these
diff --git a/src/go/doc/synopsis.go b/src/go/doc/synopsis.go
index 3c9e7e9..159bed5 100644
--- a/src/go/doc/synopsis.go
+++ b/src/go/doc/synopsis.go
@@ -54,7 +54,7 @@
 // That sentence ends after the first period followed by space and not
 // preceded by exactly one uppercase letter, or at the first paragraph break.
 // The result string has no \n, \r, or \t characters and uses only single
-// spaces between words. If text starts with any of the IllegalPrefixes,
+// spaces between words. If text starts with any of the [IllegalPrefixes],
 // the result is the empty string.
 func (p *Package) Synopsis(text string) string {
 	text = firstSentence(text)
diff --git a/src/go/format/format.go b/src/go/format/format.go
index 3837cb4..01e3f00 100644
--- a/src/go/format/format.go
+++ b/src/go/format/format.go
@@ -42,11 +42,11 @@
 
 // Node formats node in canonical gofmt style and writes the result to dst.
 //
-// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl,
-// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec,
-// or ast.Stmt. Node does not modify node. Imports are not sorted for
+// The node type must be *[ast.File], *[printer.CommentedNode], [][ast.Decl],
+// [][ast.Stmt], or assignment-compatible to [ast.Expr], [ast.Decl], [ast.Spec],
+// or [ast.Stmt]. Node does not modify node. Imports are not sorted for
 // nodes representing partial source files (for instance, if the node is
-// not an *ast.File or a *printer.CommentedNode not wrapping an *ast.File).
+// not an *[ast.File] or a *[printer.CommentedNode] not wrapping an *[ast.File]).
 //
 // The function may return early (before the entire result is written)
 // and return a formatting error, for instance due to an incorrect AST.
diff --git a/src/go/importer/importer.go b/src/go/importer/importer.go
index 23118d3..8a8fb0e 100644
--- a/src/go/importer/importer.go
+++ b/src/go/importer/importer.go
@@ -69,16 +69,16 @@
 	return nil
 }
 
-// For calls ForCompiler with a new FileSet.
+// For calls [ForCompiler] with a new FileSet.
 //
-// Deprecated: Use ForCompiler, which populates a FileSet
+// Deprecated: Use [ForCompiler], which populates a FileSet
 // with the positions of objects created by the importer.
 func For(compiler string, lookup Lookup) types.Importer {
 	return ForCompiler(token.NewFileSet(), compiler, lookup)
 }
 
 // Default returns an Importer for the compiler that built the running binary.
-// If available, the result implements types.ImporterFrom.
+// If available, the result implements [types.ImporterFrom].
 func Default() types.Importer {
 	return For(runtime.Compiler, nil)
 }
diff --git a/src/go/internal/gccgoimporter/parser.go b/src/go/internal/gccgoimporter/parser.go
index de9df0b..a7d2094 100644
--- a/src/go/internal/gccgoimporter/parser.go
+++ b/src/go/internal/gccgoimporter/parser.go
@@ -1063,7 +1063,7 @@
 		p.typeData = append(p.typeData, allTypeData[to.offset:to.offset+to.length])
 	}
 
-	for i := 1; i < int(exportedp1); i++ {
+	for i := 1; i < exportedp1; i++ {
 		p.parseSavedType(pkg, i, nil)
 	}
 }
diff --git a/src/go/internal/gcimporter/gcimporter.go b/src/go/internal/gcimporter/gcimporter.go
index 93b33d1..15ff93f 100644
--- a/src/go/internal/gcimporter/gcimporter.go
+++ b/src/go/internal/gcimporter/gcimporter.go
@@ -8,6 +8,7 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"go/build"
 	"go/token"
@@ -25,7 +26,7 @@
 // debugging/development support
 const debug = false
 
-var exportMap sync.Map // package dir → func() (string, bool)
+var exportMap sync.Map // package dir → func() (string, error)
 
 // lookupGorootExport returns the location of the export data
 // (normally found in the build cache, but located in GOROOT/pkg
@@ -34,37 +35,42 @@
 // (We use the package's directory instead of its import path
 // mainly to simplify handling of the packages in src/vendor
 // and cmd/vendor.)
-func lookupGorootExport(pkgDir string) (string, bool) {
+func lookupGorootExport(pkgDir string) (string, error) {
 	f, ok := exportMap.Load(pkgDir)
 	if !ok {
 		var (
 			listOnce   sync.Once
 			exportPath string
+			err        error
 		)
-		f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
+		f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
 			listOnce.Do(func() {
 				cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
 				cmd.Dir = build.Default.GOROOT
-				cmd.Env = append(cmd.Environ(), "GOROOT="+build.Default.GOROOT)
+				cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
 				var output []byte
-				output, err := cmd.Output()
+				output, err = cmd.Output()
 				if err != nil {
+					if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+						err = errors.New(string(ee.Stderr))
+					}
 					return
 				}
 
 				exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
 				if len(exports) != 1 {
+					err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
 					return
 				}
 
 				exportPath = exports[0]
 			})
 
-			return exportPath, exportPath != ""
+			return exportPath, err
 		})
 	}
 
-	return f.(func() (string, bool))()
+	return f.(func() (string, error))()
 }
 
 var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
@@ -73,10 +79,9 @@
 // path based on package information provided by build.Import (using
 // the build.Default build.Context). A relative srcDir is interpreted
 // relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
+func FindPkg(path, srcDir string) (filename, id string, err error) {
 	if path == "" {
-		return
+		return "", "", errors.New("path is empty")
 	}
 
 	var noext string
@@ -87,16 +92,19 @@
 		if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
 			srcDir = abs
 		}
-		bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+		var bp *build.Package
+		bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
 		if bp.PkgObj == "" {
-			var ok bool
 			if bp.Goroot && bp.Dir != "" {
-				filename, ok = lookupGorootExport(bp.Dir)
+				filename, err = lookupGorootExport(bp.Dir)
+				if err == nil {
+					_, err = os.Stat(filename)
+				}
+				if err == nil {
+					return filename, bp.ImportPath, nil
+				}
 			}
-			if !ok {
-				id = path // make sure we have an id to print in error message
-				return
-			}
+			goto notfound
 		} else {
 			noext = strings.TrimSuffix(bp.PkgObj, ".a")
 		}
@@ -121,21 +129,23 @@
 		}
 	}
 
-	if filename != "" {
-		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
-			return
-		}
-	}
 	// try extensions
 	for _, ext := range pkgExts {
 		filename = noext + ext
-		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
-			return
+		f, statErr := os.Stat(filename)
+		if statErr == nil && !f.IsDir() {
+			return filename, id, nil
+		}
+		if err == nil {
+			err = statErr
 		}
 	}
 
-	filename = "" // not found
-	return
+notfound:
+	if err == nil {
+		return "", path, fmt.Errorf("can't find import: %q", path)
+	}
+	return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
 }
 
 // Import imports a gc-generated package given its import path and srcDir, adds
@@ -163,12 +173,12 @@
 		rc = f
 	} else {
 		var filename string
-		filename, id = FindPkg(path, srcDir)
+		filename, id, err = FindPkg(path, srcDir)
 		if filename == "" {
 			if path == "unsafe" {
 				return types.Unsafe, nil
 			}
-			return nil, fmt.Errorf("can't find import: %q", id)
+			return nil, err
 		}
 
 		// no need to re-import if the package was imported completely before
diff --git a/src/go/internal/gcimporter/gcimporter_test.go b/src/go/internal/gcimporter/gcimporter_test.go
index 25ff402..07ab135 100644
--- a/src/go/internal/gcimporter/gcimporter_test.go
+++ b/src/go/internal/gcimporter/gcimporter_test.go
@@ -391,7 +391,7 @@
 	{"math.Pi", "const Pi untyped float"},
 	{"math.Sin", "func Sin(x float64) float64"},
 	{"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
-	{"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
+	{"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string, err error)"},
 
 	// interfaces
 	{"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
diff --git a/src/go/internal/gcimporter/ureader.go b/src/go/internal/gcimporter/ureader.go
index ac85a41..5397a27 100644
--- a/src/go/internal/gcimporter/ureader.go
+++ b/src/go/internal/gcimporter/ureader.go
@@ -118,7 +118,7 @@
 	// tparams is a slice of the constructed TypeParams for the element.
 	tparams []*types.TypeParam
 
-	// devived is a slice of types derived from tparams, which may be
+	// derived is a slice of types derived from tparams, which may be
 	// instantiated while reading the current element.
 	derived      []derivedInfo
 	derivedTypes []types.Type // lazily instantiated from derived
diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
index 73cb162..11d4264 100644
--- a/src/go/parser/interface.go
+++ b/src/go/parser/interface.go
@@ -53,30 +53,32 @@
 	Trace                                             // print a trace of parsed productions
 	DeclarationErrors                                 // report declaration errors
 	SpuriousErrors                                    // same as AllErrors, for backward-compatibility
-	SkipObjectResolution                              // don't resolve identifiers to objects - see ParseFile
+	SkipObjectResolution                              // skip deprecated identifier resolution; see ParseFile
 	AllErrors            = SpuriousErrors             // report all errors (not just the first 10 on different lines)
 )
 
 // ParseFile parses the source code of a single Go source file and returns
-// the corresponding ast.File node. The source code may be provided via
+// the corresponding [ast.File] node. The source code may be provided via
 // the filename of the source file, or via the src parameter.
 //
 // If src != nil, ParseFile parses the source from src and the filename is
 // only used when recording position information. The type of the argument
-// for the src parameter must be string, []byte, or io.Reader.
+// for the src parameter must be string, []byte, or [io.Reader].
 // If src == nil, ParseFile parses the file specified by filename.
 //
-// The mode parameter controls the amount of source text parsed and other
-// optional parser functionality. If the SkipObjectResolution mode bit is set,
-// the object resolution phase of parsing will be skipped, causing File.Scope,
-// File.Unresolved, and all Ident.Obj fields to be nil.
+// The mode parameter controls the amount of source text parsed and
+// other optional parser functionality. If the [SkipObjectResolution]
+// mode bit is set (recommended), the object resolution phase of
+// parsing will be skipped, causing File.Scope, File.Unresolved, and
+// all Ident.Obj fields to be nil. Those fields are deprecated; see
+// [ast.Object] for details.
 //
 // Position information is recorded in the file set fset, which must not be
 // nil.
 //
 // If the source couldn't be read, the returned AST is nil and the error
 // indicates the specific failure. If the source was read but syntax
-// errors were found, the result is a partial AST (with ast.Bad* nodes
+// errors were found, the result is a partial AST (with [ast.Bad]* nodes
 // representing the fragments of erroneous source code). Multiple errors
 // are returned via a scanner.ErrorList which is sorted by source position.
 func ParseFile(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error) {
@@ -124,13 +126,13 @@
 	return
 }
 
-// ParseDir calls ParseFile for all files with names ending in ".go" in the
+// ParseDir calls [ParseFile] for all files with names ending in ".go" in the
 // directory specified by path and returns a map of package name -> package
 // AST with all the packages found.
 //
-// If filter != nil, only the files with fs.FileInfo entries passing through
+// If filter != nil, only the files with [fs.FileInfo] entries passing through
 // the filter (and ending in ".go") are considered. The mode bits are passed
-// to ParseFile unchanged. Position information is recorded in fset, which
+// to [ParseFile] unchanged. Position information is recorded in fset, which
 // must not be nil.
 //
 // If the directory couldn't be read, a nil map and the respective error are
@@ -177,13 +179,13 @@
 }
 
 // ParseExprFrom is a convenience function for parsing an expression.
-// The arguments have the same meaning as for ParseFile, but the source must
+// The arguments have the same meaning as for [ParseFile], but the source must
 // be a valid Go (type or value) expression. Specifically, fset must not
 // be nil.
 //
 // If the source couldn't be read, the returned AST is nil and the error
 // indicates the specific failure. If the source was read but syntax
-// errors were found, the result is a partial AST (with ast.Bad* nodes
+// errors were found, the result is a partial AST (with [ast.Bad]* nodes
 // representing the fragments of erroneous source code). Multiple errors
 // are returned via a scanner.ErrorList which is sorted by source position.
 func ParseExprFrom(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error) {
@@ -230,7 +232,7 @@
 // The position information recorded in the AST is undefined. The filename used
 // in error messages is the empty string.
 //
-// If syntax errors were found, the result is a partial AST (with ast.Bad* nodes
+// If syntax errors were found, the result is a partial AST (with [ast.Bad]* nodes
 // representing the fragments of erroneous source code). Multiple errors are
 // returned via a scanner.ErrorList which is sorted by source position.
 func ParseExpr(x string) (ast.Expr, error) {
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index e1d941e..17808b3 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -880,26 +880,33 @@
 
 	// Type parameters are the only parameter list closed by ']'.
 	tparams := closing == token.RBRACK
-	// Type set notation is ok in type parameter lists.
-	typeSetsOK := tparams
 
-	pos := p.pos
+	pos0 := p.pos
 	if name0 != nil {
-		pos = name0.Pos()
+		pos0 = name0.Pos()
+	} else if typ0 != nil {
+		pos0 = typ0.Pos()
 	}
 
+	// Note: The code below matches the corresponding code in the syntax
+	//       parser closely. Changes must be reflected in either parser.
+	//       For the code to match, we use the local []field list that
+	//       corresponds to []syntax.Field. At the end, the list must be
+	//       converted into an []*ast.Field.
+
 	var list []field
 	var named int // number of parameters that have an explicit name and type
+	var typed int // number of parameters that have an explicit type
 
 	for name0 != nil || p.tok != closing && p.tok != token.EOF {
 		var par field
 		if typ0 != nil {
-			if typeSetsOK {
+			if tparams {
 				typ0 = p.embeddedElem(typ0)
 			}
 			par = field{name0, typ0}
 		} else {
-			par = p.parseParamDecl(name0, typeSetsOK)
+			par = p.parseParamDecl(name0, tparams)
 		}
 		name0 = nil // 1st name was consumed if present
 		typ0 = nil  // 1st typ was consumed if present
@@ -908,6 +915,9 @@
 			if par.name != nil && par.typ != nil {
 				named++
 			}
+			if par.typ != nil {
+				typed++
+			}
 		}
 		if !p.atComma("parameter list", closing) {
 			break
@@ -919,10 +929,7 @@
 		return // not uncommon
 	}
 
-	// TODO(gri) parameter distribution and conversion to []*ast.Field
-	//           can be combined and made more efficient
-
-	// distribute parameter types
+	// distribute parameter types (len(list) > 0)
 	if named == 0 {
 		// all unnamed => found names are type names
 		for i := 0; i < len(list); i++ {
@@ -933,42 +940,71 @@
 			}
 		}
 		if tparams {
-			p.error(pos, "type parameters must be named")
+			// This is the same error handling as below, adjusted for type parameters only.
+			// See comment below for details. (go.dev/issue/64534)
+			var errPos token.Pos
+			var msg string
+			if named == typed /* same as typed == 0 */ {
+				errPos = p.pos // position error at closing ]
+				msg = "missing type constraint"
+			} else {
+				errPos = pos0 // position at opening [ or first name
+				msg = "missing type parameter name"
+				if len(list) == 1 {
+					msg += " or invalid array length"
+				}
+			}
+			p.error(errPos, msg)
 		}
 	} else if named != len(list) {
-		// some named => all must be named
-		ok := true
-		var typ ast.Expr
-		missingName := pos
+		// some named or we're in a type parameter list => all must be named
+		var errPos token.Pos // left-most error position (or invalid)
+		var typ ast.Expr     // current type (from right to left)
 		for i := len(list) - 1; i >= 0; i-- {
 			if par := &list[i]; par.typ != nil {
 				typ = par.typ
 				if par.name == nil {
-					ok = false
-					missingName = par.typ.Pos()
+					errPos = typ.Pos()
 					n := ast.NewIdent("_")
-					n.NamePos = typ.Pos() // correct position
+					n.NamePos = errPos // correct position
 					par.name = n
 				}
 			} else if typ != nil {
 				par.typ = typ
 			} else {
 				// par.typ == nil && typ == nil => we only have a par.name
-				ok = false
-				missingName = par.name.Pos()
-				par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos}
+				errPos = par.name.Pos()
+				par.typ = &ast.BadExpr{From: errPos, To: p.pos}
 			}
 		}
-		if !ok {
+		if errPos.IsValid() {
+			var msg string
 			if tparams {
-				p.error(missingName, "type parameters must be named")
+				// Not all parameters are named because named != len(list).
+				// If named == typed we must have parameters that have no types,
+				// and they must be at the end of the parameter list, otherwise
+				// the types would have been filled in by the right-to-left sweep
+				// above and we wouldn't have an error. Since we are in a type
+				// parameter list, the missing types are constraints.
+				if named == typed {
+					errPos = p.pos // position error at closing ]
+					msg = "missing type constraint"
+				} else {
+					msg = "missing type parameter name"
+					// go.dev/issue/60812
+					if len(list) == 1 {
+						msg += " or invalid array length"
+					}
+				}
 			} else {
-				p.error(pos, "mixed named and unnamed parameters")
+				msg = "mixed named and unnamed parameters"
 			}
+			p.error(errPos, msg)
 		}
 	}
 
-	// convert list []*ast.Field
+	// Convert list to []*ast.Field.
+	// If list contains types only, each type gets its own ast.Field.
 	if named == 0 {
 		// parameter list consists of types only
 		for _, par := range list {
@@ -978,7 +1014,8 @@
 		return
 	}
 
-	// parameter list consists of named parameters with types
+	// If the parameter list consists of named parameters with types,
+	// collect all names with the same types into a single ast.Field.
 	var names []*ast.Ident
 	var typ ast.Expr
 	addParams := func() {
@@ -1545,7 +1582,7 @@
 		if ncolons == 2 {
 			slice3 = true
 			// Check presence of middle and final index here rather than during type-checking
-			// to prevent erroneous programs from passing through gofmt (was issue 7305).
+			// to prevent erroneous programs from passing through gofmt (was go.dev/issue/7305).
 			if index[1] == nil {
 				p.error(colons[0], "middle index required in 3-index slice")
 				index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
@@ -1654,14 +1691,6 @@
 	return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
 }
 
-// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
-func unparen(x ast.Expr) ast.Expr {
-	if p, isParen := x.(*ast.ParenExpr); isParen {
-		x = unparen(p.X)
-	}
-	return x
-}
-
 func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
 	if p.trace {
 		defer un(trace(p, "PrimaryExpr"))
@@ -1706,7 +1735,7 @@
 		case token.LBRACE:
 			// operand may have returned a parenthesized complit
 			// type; accept it but complain if we have a complit
-			t := unparen(x)
+			t := ast.Unparen(x)
 			// determine if '{' belongs to a composite literal or a block statement
 			switch t.(type) {
 			case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
@@ -1949,7 +1978,7 @@
 
 func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
 	x := p.parseRhs() // could be a conversion: (some type)(x)
-	if t := unparen(x); t != x {
+	if t := ast.Unparen(x); t != x {
 		p.error(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", callType))
 		x = t
 	}
@@ -2542,7 +2571,7 @@
 	closePos := p.expect(token.RBRACK)
 	spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
 	// Let the type checker decide whether to accept type parameters on aliases:
-	// see issue #46477.
+	// see go.dev/issue/46477.
 	if p.tok == token.ASSIGN {
 		// type alias
 		spec.Assign = p.pos
diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
index 0848aca..43b3416 100644
--- a/src/go/parser/parser_test.go
+++ b/src/go/parser/parser_test.go
@@ -319,7 +319,7 @@
 /* 3a */ // 3b
 /* 3c */ const e = 2.7182
 
-// Example from issue 3139
+// Example from go.dev/issue/3139
 func ExampleCount() {
 	fmt.Println(strings.Count("cheese", "e"))
 	fmt.Println(strings.Count("five", "")) // before & after each rune
@@ -335,7 +335,7 @@
 		{"/* 1a */", "/* 1b */", "/* 1c */", "// 1d"},
 		{"/* 2a\n*/", "// 2b"},
 		{"/* 3a */", "// 3b", "/* 3c */"},
-		{"// Example from issue 3139"},
+		{"// Example from go.dev/issue/3139"},
 		{"// before & after each rune"},
 		{"// Output:", "// 3", "// 5"},
 	}
@@ -573,7 +573,7 @@
 var parseDepthTests = []struct {
 	name   string
 	format string
-	// multiplier is used when a single statement may result in more than one
+	// parseMultiplier is used when a single statement may result in more than one
 	// change in the depth level, for instance "1+(..." produces a BinaryExpr
 	// followed by a UnaryExpr, which increments the depth twice. The test
 	// case comment explains which nodes are triggering the multiple depth
@@ -735,7 +735,7 @@
 	}
 }
 
-// proposal #50429
+// proposal go.dev/issue/50429
 func TestRangePos(t *testing.T) {
 	testcases := []string{
 		"package p; func _() { for range x {} }",
diff --git a/src/go/parser/resolver.go b/src/go/parser/resolver.go
index f8ff618..d1e1834 100644
--- a/src/go/parser/resolver.go
+++ b/src/go/parser/resolver.go
@@ -136,7 +136,7 @@
 		obj.Decl = decl
 		obj.Data = data
 		// Identifiers (for receiver type parameters) are written to the scope, but
-		// never set as the resolved object. See issue #50956.
+		// never set as the resolved object. See go.dev/issue/50956.
 		if _, ok := decl.(*ast.Ident); !ok {
 			ident.Obj = obj
 		}
@@ -209,7 +209,7 @@
 			}
 			assert(obj.Name != "", "obj with no name")
 			// Identifiers (for receiver type parameters) are written to the scope,
-			// but never set as the resolved object. See issue #50956.
+			// but never set as the resolved object. See go.dev/issue/50956.
 			if _, ok := obj.Decl.(*ast.Ident); !ok {
 				ident.Obj = obj
 			}
@@ -234,7 +234,7 @@
 
 func (r *resolver) walkLHS(list []ast.Expr) {
 	for _, expr := range list {
-		expr := unparen(expr)
+		expr := ast.Unparen(expr)
 		if _, ok := expr.(*ast.Ident); !ok && expr != nil {
 			ast.Walk(r, expr)
 		}
@@ -285,7 +285,7 @@
 		}
 		for _, e := range n.Elts {
 			if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
-				// See issue #45160: try to resolve composite lit keys, but don't
+				// See go.dev/issue/45160: try to resolve composite lit keys, but don't
 				// collect them as unresolved if resolution failed. This replicates
 				// existing behavior when resolving during parsing.
 				if ident, _ := kv.Key.(*ast.Ident); ident != nil {
diff --git a/src/go/parser/short_test.go b/src/go/parser/short_test.go
index f9575e1..3a34e8c 100644
--- a/src/go/parser/short_test.go
+++ b/src/go/parser/short_test.go
@@ -43,7 +43,7 @@
 	`package p; func _() { map[int]int{}[0]++; map[int]int{}[0] += 1 }`,
 	`package p; func _(x interface{f()}) { interface{f()}(x).f() }`,
 	`package p; func _(x chan int) { chan int(x) <- 0 }`,
-	`package p; const (x = 0; y; z)`, // issue 9639
+	`package p; const (x = 0; y; z)`, // go.dev/issue/9639
 	`package p; var _ = map[P]int{P{}:0, {}:1}`,
 	`package p; var _ = map[*P]int{&P{}:0, {}:1}`,
 	`package p; type T = int`,
@@ -172,21 +172,21 @@
 	`package p; type _ struct{ *( /* ERROR "cannot parenthesize embedded type" */ int) }`,
 	`package p; type _ struct{ *( /* ERROR "cannot parenthesize embedded type" */ []byte) }`,
 
-	// issue 8656
+	// go.dev/issue/8656
 	`package p; func f() (a b string /* ERROR "missing ','" */ , ok bool)`,
 
-	// issue 9639
+	// go.dev/issue/9639
 	`package p; var x, y, z; /* ERROR "expected type" */`,
 
-	// issue 12437
+	// go.dev/issue/12437
 	`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ }{};`,
 	`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ y float }{};`,
 
-	// issue 11611
+	// go.dev/issue/11611
 	`package p; type _ struct { int, } /* ERROR "expected 'IDENT', found '}'" */ ;`,
 	`package p; type _ struct { int, float } /* ERROR "expected type, found '}'" */ ;`,
 
-	// issue 13475
+	// go.dev/issue/13475
 	`package p; func f() { if true {} else ; /* ERROR "expected if statement or block" */ }`,
 	`package p; func f() { if true {} else defer /* ERROR "expected if statement or block" */ f() }`,
 
@@ -195,8 +195,7 @@
 	`package p; var _ func[ /* ERROR "must have no type parameters" */ T any](T)`,
 	`package p; func _[]/* ERROR "empty type parameter list" */()`,
 
-	// TODO(rfindley) a better location would be after the ']'
-	`package p; type _[A /* ERROR "type parameters must be named" */ ,] struct{ A }`,
+	`package p; type _[A,] /* ERROR "missing type constraint" */ struct{ A }`,
 
 	`package p; func _[type /* ERROR "found 'type'" */ P, *Q interface{}]()`,
 
diff --git a/src/go/parser/testdata/issue11377.src b/src/go/parser/testdata/issue11377.src
index 1c43800..a19b86e 100644
--- a/src/go/parser/testdata/issue11377.src
+++ b/src/go/parser/testdata/issue11377.src
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test case for issue 11377: Better synchronization of
+// Test case for go.dev/issue/11377: Better synchronization of
 // parser after certain syntax errors.
 
 package p
diff --git a/src/go/parser/testdata/issue23434.src b/src/go/parser/testdata/issue23434.src
index 24a0832..f04ca1d 100644
--- a/src/go/parser/testdata/issue23434.src
+++ b/src/go/parser/testdata/issue23434.src
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test case for issue 23434: Better synchronization of
+// Test case for go.dev/issue/23434: Better synchronization of
 // parser after missing type. There should be exactly
 // one error each time, with now follow errors.
 
diff --git a/src/go/parser/testdata/issue3106.src b/src/go/parser/testdata/issue3106.src
index 2db10be..37dfb2a 100644
--- a/src/go/parser/testdata/issue3106.src
+++ b/src/go/parser/testdata/issue3106.src
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test case for issue 3106: Better synchronization of
+// Test case for go.dev/issue/3106: Better synchronization of
 // parser after certain syntax errors.
 
 package main
diff --git a/src/go/parser/testdata/issue34946.src b/src/go/parser/testdata/issue34946.src
index 6bb15e1..87b703d 100644
--- a/src/go/parser/testdata/issue34946.src
+++ b/src/go/parser/testdata/issue34946.src
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test case for issue 34946: Better synchronization of
+// Test case for go.dev/issue/34946: Better synchronization of
 // parser for function declarations that start their
 // body's opening { on a new line.
 
diff --git a/src/go/parser/testdata/issue44504.src b/src/go/parser/testdata/issue44504.src
index 7791f4a..c46c79f 100644
--- a/src/go/parser/testdata/issue44504.src
+++ b/src/go/parser/testdata/issue44504.src
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Test case for issue 44504: panic due to duplicate resolution of slice/index
+// Test case for go.dev/issue/44504: panic due to duplicate resolution of slice/index
 // operands. We should not try to resolve a LHS expression with invalid syntax.
 
 package p
diff --git a/src/go/parser/testdata/issue49175.go2 b/src/go/parser/testdata/issue49175.go2
index cf1c83c..df303ad 100644
--- a/src/go/parser/testdata/issue49175.go2
+++ b/src/go/parser/testdata/issue49175.go2
@@ -10,4 +10,4 @@
 func _[_ []t]() {}
 func _[_ [1]t]() {}
 
-type t [t /* ERROR "type parameters must be named" */ [0]]t
+type t [t /* ERROR "missing type parameter name or invalid array length" */ [0]]t
diff --git a/src/go/parser/testdata/issue64534.src b/src/go/parser/testdata/issue64534.src
new file mode 100644
index 0000000..006cc93
--- /dev/null
+++ b/src/go/parser/testdata/issue64534.src
@@ -0,0 +1,10 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for go.dev/issue/64534.
+// Parser should not panic during object resolution.
+
+package main
+
+func _[A /* ERROR "missing type parameter name" */ $(B](){}}
diff --git a/src/go/parser/testdata/resolution/typeparams.go2 b/src/go/parser/testdata/resolution/typeparams.go2
index 7395ca2..0f894d7 100644
--- a/src/go/parser/testdata/resolution/typeparams.go2
+++ b/src/go/parser/testdata/resolution/typeparams.go2
@@ -47,5 +47,5 @@
   var t1var /* =@t1var */ T1 /* @T1 */
 }
 
-// From issue #39634
+// From go.dev/issue/39634
 func(*ph1[e, e])h(d)
diff --git a/src/go/parser/testdata/tparams.go2 b/src/go/parser/testdata/tparams.go2
index 1a9a6c6..3293b55 100644
--- a/src/go/parser/testdata/tparams.go2
+++ b/src/go/parser/testdata/tparams.go2
@@ -4,8 +4,8 @@
 
 package p
 
-type _[a /* ERROR "type parameters must be named" */, b] struct{}
-type _[a t, b t, c /* ERROR "type parameters must be named" */ ] struct{}
+type _[a, b] /* ERROR "missing type constraint" */ struct{}
+type _[a t, b t, c]  /* ERROR "missing type constraint" */ struct{}
 type _ struct {
 	t [n]byte
 	t[a]
@@ -25,13 +25,13 @@
 }
 
 func _[] /* ERROR "empty type parameter list" */ ()
-func _[a /* ERROR "type parameters must be named" */, b ]()
-func _[a t, b t, c /* ERROR "type parameters must be named" */ ]()
+func _[a, b ] /* ERROR "missing type constraint" */ ()
+func _[a t, b t, c] /* ERROR "missing type constraint" */ ()
 
 // TODO(rfindley) incorrect error message (see existing TODO in parser)
 func f[a b, 0 /* ERROR "expected '\)', found 0" */ ] ()
 
-// issue #49482
+// go.dev/issue/49482
 type (
 	_[a *[]int] struct{}
 	_[a *t,] struct{}
@@ -43,7 +43,7 @@
 	_[a *struct{}|~t] struct{}
 )
 
-// issue #51488
+// go.dev/issue/51488
 type (
 	_[a *t|t,] struct{}
 	_[a *t|t, b t] struct{}
@@ -52,3 +52,14 @@
 	_[a ([]t)] struct{}
 	_[a ([]t)|t] struct{}
 )
+
+// go.dev/issue/60812
+type (
+	_ [t]struct{}
+	_ [[]t]struct{}
+	_ [[t]t]struct{}
+	_ [t /* ERROR "missing type parameter name or invalid array length" */ [t]]struct{}
+	_ [t t[t], t /* ERROR "missing type parameter name" */ [t]]struct{}
+	_ [t /* ERROR "missing type parameter name" */ [t], t t[t]]struct{}
+	_ [t /* ERROR "missing type parameter name" */ [t], t[t]]struct{} // report only first error
+)
diff --git a/src/go/parser/testdata/typeset.go2 b/src/go/parser/testdata/typeset.go2
index 7844c22..3d90d76 100644
--- a/src/go/parser/testdata/typeset.go2
+++ b/src/go/parser/testdata/typeset.go2
@@ -61,12 +61,12 @@
         _[~t|~t] t
 )
 
-type _[_ t, t /* ERROR "type parameters must be named" */ ] t
-type _[_ ~t, t /* ERROR "type parameters must be named" */ ] t
-type _[_ t, ~ /* ERROR "type parameters must be named" */ t] t
-type _[_ ~t, ~ /* ERROR "type parameters must be named" */ t] t
+type _[_ t, t] /* ERROR "missing type constraint" */ t
+type _[_ ~t, t] /* ERROR "missing type constraint" */ t
+type _[_ t, ~ /* ERROR "missing type parameter name" */ t] t
+type _[_ ~t, ~ /* ERROR "missing type parameter name" */ t] t
 
-type _[_ t|t, t /* ERROR "type parameters must be named" */ |t] t
-type _[_ ~t|t, t /* ERROR "type parameters must be named" */ |t] t
-type _[_ t|t, ~ /* ERROR "type parameters must be named" */ t|t] t
-type _[_ ~t|t, ~ /* ERROR "type parameters must be named" */ t|t] t
+type _[_ t|t, t /* ERROR "missing type parameter name" */ |t] t
+type _[_ ~t|t, t /* ERROR "missing type parameter name" */ |t] t
+type _[_ t|t, ~ /* ERROR "missing type parameter name" */ t|t] t
+type _[_ ~t|t, ~ /* ERROR "missing type parameter name" */ t|t] t
diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go
index e41ffc1..a4651e0 100644
--- a/src/go/printer/nodes.go
+++ b/src/go/printer/nodes.go
@@ -44,10 +44,7 @@
 // linebreaks. At the moment there is no easy way to know about
 // future (not yet interspersed) comments in this function.
 func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) {
-	n := nlimit(line - p.pos.Line)
-	if n < min {
-		n = min
-	}
+	n := max(nlimit(line-p.pos.Line), min)
 	if n > 0 {
 		p.print(ws)
 		if newSection {
@@ -670,9 +667,7 @@
 		h4, h5, mp := walkBinary(l)
 		has4 = has4 || h4
 		has5 = has5 || h5
-		if maxProblem < mp {
-			maxProblem = mp
-		}
+		maxProblem = max(maxProblem, mp)
 	}
 
 	switch r := e.Y.(type) {
@@ -685,9 +680,7 @@
 		h4, h5, mp := walkBinary(r)
 		has4 = has4 || h4
 		has5 = has5 || h5
-		if maxProblem < mp {
-			maxProblem = mp
-		}
+		maxProblem = max(maxProblem, mp)
 
 	case *ast.StarExpr:
 		if e.Op == token.QUO { // `*/`
@@ -699,9 +692,7 @@
 		case "/*", "&&", "&^":
 			maxProblem = 5
 		case "++", "--":
-			if maxProblem < 4 {
-				maxProblem = 4
-			}
+			maxProblem = max(maxProblem, 4)
 		}
 	}
 	return
@@ -983,15 +974,24 @@
 		if len(x.Args) > 1 {
 			depth++
 		}
-		var wasIndented bool
-		if _, ok := x.Fun.(*ast.FuncType); ok {
-			// conversions to literal function types require parentheses around the type
-			p.print(token.LPAREN)
-			wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
-			p.print(token.RPAREN)
-		} else {
-			wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+
+		// Conversions to literal function types or <-chan
+		// types require parentheses around the type.
+		paren := false
+		switch t := x.Fun.(type) {
+		case *ast.FuncType:
+			paren = true
+		case *ast.ChanType:
+			paren = t.Dir == ast.RECV
 		}
+		if paren {
+			p.print(token.LPAREN)
+		}
+		wasIndented := p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+		if paren {
+			p.print(token.RPAREN)
+		}
+
 		p.setPos(x.Lparen)
 		p.print(token.LPAREN)
 		if x.Ellipsis.IsValid() {
@@ -1739,7 +1739,7 @@
 	p.setPos(d.Pos())
 	p.print(d.Tok, blank)
 
-	if d.Lparen.IsValid() || len(d.Specs) > 1 {
+	if d.Lparen.IsValid() || len(d.Specs) != 1 {
 		// group of parenthesized declarations
 		p.setPos(d.Lparen)
 		p.print(token.LPAREN)
diff --git a/src/go/printer/printer.go b/src/go/printer/printer.go
index 5cf4e4b..32be3d6 100644
--- a/src/go/printer/printer.go
+++ b/src/go/printer/printer.go
@@ -861,10 +861,7 @@
 
 // nlimit limits n to maxNewlines.
 func nlimit(n int) int {
-	if n > maxNewlines {
-		n = maxNewlines
-	}
-	return n
+	return min(n, maxNewlines)
 }
 
 func mayCombine(prev token.Token, next byte) (b bool) {
@@ -1413,7 +1410,7 @@
 }
 
 // A CommentedNode bundles an AST node and corresponding comments.
-// It may be provided as argument to any of the Fprint functions.
+// It may be provided as argument to any of the [Fprint] functions.
 type CommentedNode struct {
 	Node     any // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
 	Comments []*ast.CommentGroup
@@ -1421,14 +1418,14 @@
 
 // Fprint "pretty-prints" an AST node to output for a given configuration cfg.
 // Position information is interpreted relative to the file set fset.
-// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
-// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
+// The node type must be *[ast.File], *[CommentedNode], [][ast.Decl], [][ast.Stmt],
+// or assignment-compatible to [ast.Expr], [ast.Decl], [ast.Spec], or [ast.Stmt].
 func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node any) error {
 	return cfg.fprint(output, fset, node, make(map[ast.Node]int))
 }
 
 // Fprint "pretty-prints" an AST node to output.
-// It calls Config.Fprint with default settings.
+// It calls [Config.Fprint] with default settings.
 // Note that gofmt uses tabs for indentation but spaces for alignment;
 // use format.Node (package go/format) for output that matches gofmt.
 func Fprint(output io.Writer, fset *token.FileSet, node any) error {
diff --git a/src/go/printer/printer_test.go b/src/go/printer/printer_test.go
index 3a8ce60..6d5b559 100644
--- a/src/go/printer/printer_test.go
+++ b/src/go/printer/printer_test.go
@@ -604,6 +604,29 @@
 	}
 }
 
+// TestChanType tests that the tree for <-(<-chan int), without
+// ParenExpr, is correctly formatted with parens.
+// Test case for issue #63362.
+func TestChanType(t *testing.T) {
+	expr := &ast.UnaryExpr{
+		Op: token.ARROW,
+		X: &ast.CallExpr{
+			Fun: &ast.ChanType{
+				Dir:   ast.RECV,
+				Value: &ast.Ident{Name: "int"},
+			},
+			Args: []ast.Expr{&ast.Ident{Name: "nil"}},
+		},
+	}
+	var buf bytes.Buffer
+	if err := Fprint(&buf, fset, expr); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := buf.String(), `<-(<-chan int)(nil)`; got != want {
+		t.Fatalf("got:\n%s\nwant:\n%s\n", got, want)
+	}
+}
+
 type limitWriter struct {
 	remaining int
 	errCount  int
@@ -825,3 +848,18 @@
 		t.Errorf("unexpected Fprint output:\n%s", buf.Bytes())
 	}
 }
+
+// TestEmptyDecl tests that empty decls for const, var, import are printed with
+// valid syntax e.g "var ()" instead of just "var", which is invalid and cannot
+// be parsed.
+func TestEmptyDecl(t *testing.T) { // issue 63566
+	for _, tok := range []token.Token{token.IMPORT, token.CONST, token.TYPE, token.VAR} {
+		var buf bytes.Buffer
+		Fprint(&buf, token.NewFileSet(), &ast.GenDecl{Tok: tok})
+		got := buf.String()
+		want := tok.String() + " ()"
+		if got != want {
+			t.Errorf("got %q, want %q", got, want)
+		}
+	}
+}
diff --git a/src/go/printer/testdata/parser.go b/src/go/printer/testdata/parser.go
index bb06c8d..11795b4 100644
--- a/src/go/printer/testdata/parser.go
+++ b/src/go/printer/testdata/parser.go
@@ -1127,7 +1127,7 @@
 
 // checkExpr checks that x is an expression (and not a type).
 func (p *parser) checkExpr(x ast.Expr) ast.Expr {
-	switch t := unparen(x).(type) {
+	switch t := ast.Unparen(x).(type) {
 	case *ast.BadExpr:
 	case *ast.Ident:
 	case *ast.BasicLit:
@@ -1200,18 +1200,10 @@
 	return x
 }
 
-// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
-func unparen(x ast.Expr) ast.Expr {
-	if p, isParen := x.(*ast.ParenExpr); isParen {
-		x = unparen(p.X)
-	}
-	return x
-}
-
 // checkExprOrType checks that x is an expression or a type
 // (and not a raw type such as [...]T).
 func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
-	switch t := unparen(x).(type) {
+	switch t := ast.Unparen(x).(type) {
 	case *ast.ParenExpr:
 		panic("unreachable")
 	case *ast.UnaryExpr:
diff --git a/src/go/scanner/errors.go b/src/go/scanner/errors.go
index 3e9c365..3230f13 100644
--- a/src/go/scanner/errors.go
+++ b/src/go/scanner/errors.go
@@ -11,7 +11,7 @@
 	"sort"
 )
 
-// In an ErrorList, an error is represented by an *Error.
+// In an [ErrorList], an error is represented by an *Error.
 // The position Pos, if valid, points to the beginning of
 // the offending token, and the error condition is described
 // by Msg.
@@ -34,15 +34,15 @@
 // The zero value for an ErrorList is an empty ErrorList ready to use.
 type ErrorList []*Error
 
-// Add adds an Error with given position and error message to an ErrorList.
+// Add adds an [Error] with given position and error message to an [ErrorList].
 func (p *ErrorList) Add(pos token.Position, msg string) {
 	*p = append(*p, &Error{pos, msg})
 }
 
-// Reset resets an ErrorList to no errors.
+// Reset resets an [ErrorList] to no errors.
 func (p *ErrorList) Reset() { *p = (*p)[0:0] }
 
-// ErrorList implements the sort Interface.
+// [ErrorList] implements the sort Interface.
 func (p ErrorList) Len() int      { return len(p) }
 func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
 
@@ -64,14 +64,14 @@
 	return p[i].Msg < p[j].Msg
 }
 
-// Sort sorts an ErrorList. *Error entries are sorted by position,
-// other errors are sorted by error message, and before any *Error
+// Sort sorts an [ErrorList]. *[Error] entries are sorted by position,
+// other errors are sorted by error message, and before any *[Error]
 // entry.
 func (p ErrorList) Sort() {
 	sort.Sort(p)
 }
 
-// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
+// RemoveMultiples sorts an [ErrorList] and removes all but the first error per line.
 func (p *ErrorList) RemoveMultiples() {
 	sort.Sort(p)
 	var last token.Position // initial last.Line is != any legal error line
@@ -86,7 +86,7 @@
 	*p = (*p)[0:i]
 }
 
-// An ErrorList implements the error interface.
+// An [ErrorList] implements the error interface.
 func (p ErrorList) Error() string {
 	switch len(p) {
 	case 0:
@@ -107,7 +107,7 @@
 }
 
 // PrintError is a utility function that prints a list of errors to w,
-// one error per line, if the err parameter is an ErrorList. Otherwise
+// one error per line, if the err parameter is an [ErrorList]. Otherwise
 // it prints the err string.
 func PrintError(w io.Writer, err error) {
 	if list, ok := err.(ErrorList); ok {
diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
index 75f835d..8ca7466 100644
--- a/src/go/scanner/scanner.go
+++ b/src/go/scanner/scanner.go
@@ -17,7 +17,7 @@
 	"unicode/utf8"
 )
 
-// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
+// An ErrorHandler may be provided to [Scanner.Init]. If a syntax error is
 // encountered and a handler was installed, the handler is called with a
 // position and an error message. The position points to the beginning of
 // the offending token.
@@ -25,7 +25,7 @@
 
 // A Scanner holds the scanner's internal state while processing
 // a given text. It can be allocated as part of another data
-// structure but must be initialized via Init before use.
+// structure but must be initialized via [Scanner.Init] before use.
 type Scanner struct {
 	// immutable state
 	file *token.File  // source file handle
@@ -113,9 +113,9 @@
 // line information which is already present is ignored. Init causes a
 // panic if the file size does not match the src size.
 //
-// Calls to Scan will invoke the error handler err if they encounter a
+// Calls to [Scanner.Scan] will invoke the error handler err if they encounter a
 // syntax error and err is not nil. Also, for each error encountered,
-// the Scanner field ErrorCount is incremented by one. The mode parameter
+// the [Scanner] field ErrorCount is incremented by one. The mode parameter
 // determines how comments are handled.
 //
 // Note that Init may call err if there is an error in the first character
@@ -759,20 +759,20 @@
 
 // Scan scans the next token and returns the token position, the token,
 // and its literal string if applicable. The source end is indicated by
-// token.EOF.
+// [token.EOF].
 //
-// If the returned token is a literal (token.IDENT, token.INT, token.FLOAT,
-// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string
+// If the returned token is a literal ([token.IDENT], [token.INT], [token.FLOAT],
+// [token.IMAG], [token.CHAR], [token.STRING]) or [token.COMMENT], the literal string
 // has the corresponding value.
 //
 // If the returned token is a keyword, the literal string is the keyword.
 //
-// If the returned token is token.SEMICOLON, the corresponding
+// If the returned token is [token.SEMICOLON], the corresponding
 // literal string is ";" if the semicolon was present in the source,
 // and "\n" if the semicolon was inserted because of a newline or
 // at EOF.
 //
-// If the returned token is token.ILLEGAL, the literal string is the
+// If the returned token is [token.ILLEGAL], the literal string is the
 // offending character.
 //
 // In all other cases, Scan returns an empty literal string.
@@ -943,7 +943,13 @@
 		default:
 			// next reports unexpected BOMs - don't repeat
 			if ch != bom {
-				s.errorf(s.file.Offset(pos), "illegal character %#U", ch)
+				// Report an informative error for U+201[CD] quotation
+				// marks, which are easily introduced via copy and paste.
+				if ch == '“' || ch == '”' {
+					s.errorf(s.file.Offset(pos), "curly quotation mark %q (use neutral %q)", ch, '"')
+				} else {
+					s.errorf(s.file.Offset(pos), "illegal character %#U", ch)
+				}
 			}
 			insertSemi = s.insertSemi // preserve insertSemi info
 			tok = token.ILLEGAL
diff --git a/src/go/scanner/scanner_test.go b/src/go/scanner/scanner_test.go
index 9046148..916a40a 100644
--- a/src/go/scanner/scanner_test.go
+++ b/src/go/scanner/scanner_test.go
@@ -813,6 +813,7 @@
 	{`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored
 	{"abc\x00def", token.IDENT, 3, "abc", "illegal character NUL"},
 	{"abc\x00", token.IDENT, 3, "abc", "illegal character NUL"},
+	{"“abc”", token.ILLEGAL, 0, "abc", `curly quotation mark '“' (use neutral '"')`},
 }
 
 func TestScanErrors(t *testing.T) {
diff --git a/src/go/token/example_test.go b/src/go/token/example_test.go
index 0011703..7e5017e 100644
--- a/src/go/token/example_test.go
+++ b/src/go/token/example_test.go
@@ -66,7 +66,7 @@
 		fmt.Printf("%s: %s\n", fmtPosition, kind)
 	}
 
-	//Output:
+	// Output:
 	//
 	// main.go:3:1: import
 	// main.go:5:1: import
diff --git a/src/go/token/position.go b/src/go/token/position.go
index a644382..0b2ace0 100644
--- a/src/go/token/position.go
+++ b/src/go/token/position.go
@@ -54,7 +54,7 @@
 }
 
 // Pos is a compact encoding of a source position within a file set.
-// It can be converted into a Position for a more convenient, but much
+// It can be converted into a [Position] for a more convenient, but much
 // larger, representation.
 //
 // The Pos value for a given file is a number in the range [base, base+size],
@@ -65,9 +65,9 @@
 // representing the first byte in the file.
 //
 // To create the Pos value for a specific source offset (measured in bytes),
-// first add the respective file to the current file set using FileSet.AddFile
-// and then call File.Pos(offset) for that file. Given a Pos value p
-// for a specific file set fset, the corresponding Position value is
+// first add the respective file to the current file set using [FileSet.AddFile]
+// and then call [File.Pos](offset) for that file. Given a Pos value p
+// for a specific file set fset, the corresponding [Position] value is
 // obtained by calling fset.Position(p).
 //
 // Pos values can be compared directly with the usual comparison operators:
@@ -77,10 +77,10 @@
 // to the respective file set before the file implied by q.
 type Pos int
 
-// The zero value for Pos is NoPos; there is no file and line information
+// The zero value for [Pos] is NoPos; there is no file and line information
 // associated with it, and NoPos.IsValid() is false. NoPos is always
-// smaller than any other Pos value. The corresponding Position value
-// for NoPos is the zero value for Position.
+// smaller than any other [Pos] value. The corresponding [Position] value
+// for NoPos is the zero value for [Position].
 const NoPos Pos = 0
 
 // IsValid reports whether the position is valid.
@@ -91,7 +91,7 @@
 // -----------------------------------------------------------------------------
 // File
 
-// A File is a handle for a file belonging to a FileSet.
+// A File is a handle for a file belonging to a [FileSet].
 // A File has a name, size, and line offset table.
 type File struct {
 	name string // file name as provided to AddFile
@@ -140,7 +140,7 @@
 
 // MergeLine merges a line with the following line. It is akin to replacing
 // the newline character at the end of the line with a space (to not change the
-// remaining offsets). To obtain the line number, consult e.g. Position.Line.
+// remaining offsets). To obtain the line number, consult e.g. [Position.Line].
 // MergeLine will panic if given an invalid line number.
 func (f *File) MergeLine(line int) {
 	if line < 1 {
@@ -160,7 +160,7 @@
 	f.lines = f.lines[:len(f.lines)-1]
 }
 
-// Lines returns the effective line offset table of the form described by SetLines.
+// Lines returns the effective line offset table of the form described by [File.SetLines].
 // Callers must not mutate the result.
 func (f *File) Lines() []int {
 	f.mutex.Lock()
@@ -214,8 +214,8 @@
 	f.mutex.Unlock()
 }
 
-// LineStart returns the Pos value of the start of the specified line.
-// It ignores any alternative positions set using AddLineColumnInfo.
+// LineStart returns the [Pos] value of the start of the specified line.
+// It ignores any alternative positions set using [File.AddLineColumnInfo].
 // LineStart panics if the 1-based line number is invalid.
 func (f *File) LineStart(line int) Pos {
 	if line < 1 {
@@ -239,7 +239,7 @@
 	Line, Column int
 }
 
-// AddLineInfo is like AddLineColumnInfo with a column = 1 argument.
+// AddLineInfo is like [File.AddLineColumnInfo] with a column = 1 argument.
 // It is here for backward-compatibility for code prior to Go 1.11.
 func (f *File) AddLineInfo(offset int, filename string, line int) {
 	f.AddLineColumnInfo(offset, filename, line, 1)
@@ -272,7 +272,7 @@
 }
 
 // Offset returns the offset for the given file position p;
-// p must be a valid Pos value in that file.
+// p must be a valid [Pos] value in that file.
 // f.Offset(f.Pos(offset)) == offset.
 func (f *File) Offset(p Pos) int {
 	if int(p) < f.base || int(p) > f.base+f.size {
@@ -282,7 +282,7 @@
 }
 
 // Line returns the line number for the given file position p;
-// p must be a Pos value in that file or NoPos.
+// p must be a [Pos] value in that file or [NoPos].
 func (f *File) Line(p Pos) int {
 	return f.Position(p).Line
 }
@@ -365,21 +365,21 @@
 //
 // The byte offsets for each file in a file set are mapped into
 // distinct (integer) intervals, one interval [base, base+size]
-// per file. Base represents the first byte in the file, and size
-// is the corresponding file size. A Pos value is a value in such
-// an interval. By determining the interval a Pos value belongs
+// per file. [FileSet.Base] represents the first byte in the file, and size
+// is the corresponding file size. A [Pos] value is a value in such
+// an interval. By determining the interval a [Pos] value belongs
 // to, the file, its file base, and thus the byte offset (position)
-// the Pos value is representing can be computed.
+// the [Pos] value is representing can be computed.
 //
 // When adding a new file, a file base must be provided. That can
 // be any integer value that is past the end of any interval of any
-// file already in the file set. For convenience, FileSet.Base provides
+// file already in the file set. For convenience, [FileSet.Base] provides
 // such a value, which is simply the end of the Pos interval of the most
 // recently added file, plus one. Unless there is a need to extend an
-// interval later, using the FileSet.Base should be used as argument
-// for FileSet.AddFile.
+// interval later, using the [FileSet.Base] should be used as argument
+// for [FileSet.AddFile].
 //
-// A File may be removed from a FileSet when it is no longer needed.
+// A [File] may be removed from a FileSet when it is no longer needed.
 // This may reduce memory usage in a long-running application.
 type FileSet struct {
 	mutex sync.RWMutex         // protects the file set
@@ -396,7 +396,7 @@
 }
 
 // Base returns the minimum base offset that must be provided to
-// AddFile when adding the next file.
+// [FileSet.AddFile] when adding the next file.
 func (s *FileSet) Base() int {
 	s.mutex.RLock()
 	b := s.base
@@ -406,18 +406,18 @@
 
 // AddFile adds a new file with a given filename, base offset, and file size
 // to the file set s and returns the file. Multiple files may have the same
-// name. The base offset must not be smaller than the FileSet's Base(), and
+// name. The base offset must not be smaller than the [FileSet.Base], and
 // size must not be negative. As a special case, if a negative base is provided,
-// the current value of the FileSet's Base() is used instead.
+// the current value of the [FileSet.Base] is used instead.
 //
-// Adding the file will set the file set's Base() value to base + size + 1
+// Adding the file will set the file set's [FileSet.Base] value to base + size + 1
 // as the minimum base value for the next file. The following relationship
-// exists between a Pos value p for a given file offset offs:
+// exists between a [Pos] value p for a given file offset offs:
 //
 //	int(p) = base + offs
 //
 // with offs in the range [0, size] and thus p in the range [base, base+size].
-// For convenience, File.Pos may be used to create file-specific position
+// For convenience, [File.Pos] may be used to create file-specific position
 // values from a file offset.
 func (s *FileSet) AddFile(filename string, base, size int) *File {
 	// Allocate f outside the critical section.
@@ -447,9 +447,9 @@
 	return f
 }
 
-// RemoveFile removes a file from the FileSet so that subsequent
-// queries for its Pos interval yield a negative result.
-// This reduces the memory usage of a long-lived FileSet that
+// RemoveFile removes a file from the [FileSet] so that subsequent
+// queries for its [Pos] interval yield a negative result.
+// This reduces the memory usage of a long-lived [FileSet] that
 // encounters an unbounded stream of files.
 //
 // Removing a file that does not belong to the set has no effect.
@@ -510,7 +510,7 @@
 }
 
 // File returns the file that contains the position p.
-// If no such file is found (for instance for p == NoPos),
+// If no such file is found (for instance for p == [NoPos]),
 // the result is nil.
 func (s *FileSet) File(p Pos) (f *File) {
 	if p != NoPos {
@@ -519,10 +519,10 @@
 	return
 }
 
-// PositionFor converts a Pos p in the fileset into a Position value.
+// PositionFor converts a [Pos] p in the fileset into a [Position] value.
 // If adjusted is set, the position may be adjusted by position-altering
 // //line comments; otherwise those comments are ignored.
-// p must be a Pos value in s or NoPos.
+// p must be a [Pos] value in s or [NoPos].
 func (s *FileSet) PositionFor(p Pos, adjusted bool) (pos Position) {
 	if p != NoPos {
 		if f := s.file(p); f != nil {
@@ -532,7 +532,7 @@
 	return
 }
 
-// Position converts a Pos p in the fileset into a Position value.
+// Position converts a [Pos] p in the fileset into a Position value.
 // Calling s.Position(p) is equivalent to calling s.PositionFor(p, true).
 func (s *FileSet) Position(p Pos) (pos Position) {
 	return s.PositionFor(p, true)
diff --git a/src/go/token/token.go b/src/go/token/token.go
index 3ae10d8..aa5d6e0 100644
--- a/src/go/token/token.go
+++ b/src/go/token/token.go
@@ -235,9 +235,9 @@
 
 // String returns the string corresponding to the token tok.
 // For operators, delimiters, and keywords the string is the actual
-// token character sequence (e.g., for the token ADD, the string is
+// token character sequence (e.g., for the token [ADD], the string is
 // "+"). For all other tokens the string corresponds to the token
-// constant name (e.g. for the token IDENT, the string is "IDENT").
+// constant name (e.g. for the token [IDENT], the string is "IDENT").
 func (tok Token) String() string {
 	s := ""
 	if 0 <= tok && tok < Token(len(tokens)) {
@@ -288,7 +288,7 @@
 	}
 }
 
-// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
+// Lookup maps an identifier to its keyword token or [IDENT] (if not a keyword).
 func Lookup(ident string) Token {
 	if tok, is_keyword := keywords[ident]; is_keyword {
 		return tok
diff --git a/src/go/types/alias.go b/src/go/types/alias.go
new file mode 100644
index 0000000..6043c0a
--- /dev/null
+++ b/src/go/types/alias.go
@@ -0,0 +1,90 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "fmt"
+
+// An Alias represents an alias type.
+// Whether or not Alias types are created is controlled by the
+// gotypesalias setting with the GODEBUG environment variable.
+// For gotypesalias=1, alias declarations produce an Alias type.
+// Otherwise, the alias information is only in the type name,
+// which points directly to the actual (aliased) type.
+type Alias struct {
+	obj     *TypeName // corresponding declared alias object
+	fromRHS Type      // RHS of type alias declaration; may be an alias
+	actual  Type      // actual (aliased) type; never an alias
+}
+
+// NewAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func NewAlias(obj *TypeName, rhs Type) *Alias {
+	alias := (*Checker)(nil).newAlias(obj, rhs)
+	// Ensure that alias.actual is set (#65455).
+	unalias(alias)
+	return alias
+}
+
+func (a *Alias) Obj() *TypeName   { return a.obj }
+func (a *Alias) Underlying() Type { return unalias(a).Underlying() }
+func (a *Alias) String() string   { return TypeString(a, nil) }
+
+// Type accessors
+
+// Unalias returns t if it is not an alias type;
+// otherwise it follows t's alias chain until it
+// reaches a non-alias type which is then returned.
+// Consequently, the result is never an alias type.
+func Unalias(t Type) Type {
+	if a0, _ := t.(*Alias); a0 != nil {
+		return unalias(a0)
+	}
+	return t
+}
+
+func unalias(a0 *Alias) Type {
+	if a0.actual != nil {
+		return a0.actual
+	}
+	var t Type
+	for a := a0; a != nil; a, _ = t.(*Alias) {
+		t = a.fromRHS
+	}
+	if t == nil {
+		panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name))
+	}
+	a0.actual = t
+	return t
+}
+
+// asNamed returns t as *Named if that is t's
+// actual type. It returns nil otherwise.
+func asNamed(t Type) *Named {
+	n, _ := Unalias(t).(*Named)
+	return n
+}
+
+// newAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
+	assert(rhs != nil)
+	a := &Alias{obj, rhs, nil}
+	if obj.typ == nil {
+		obj.typ = a
+	}
+
+	// Ensure that a.actual is set at the end of type checking.
+	if check != nil {
+		check.needsCleanup(a)
+	}
+
+	return a
+}
+
+func (a *Alias) cleanup() {
+	Unalias(a)
+}
diff --git a/src/go/types/api.go b/src/go/types/api.go
index ad4c1a2..796fe05 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -4,23 +4,23 @@
 
 // Package types declares the data types and implements
 // the algorithms for type-checking of Go packages. Use
-// Config.Check to invoke the type checker for a package.
-// Alternatively, create a new type checker with NewChecker
-// and invoke it incrementally by calling Checker.Files.
+// [Config.Check] to invoke the type checker for a package.
+// Alternatively, create a new type checker with [NewChecker]
+// and invoke it incrementally by calling [Checker.Files].
 //
 // Type-checking consists of several interdependent phases:
 //
 // Name resolution maps each identifier (ast.Ident) in the program to the
-// language object (Object) it denotes.
-// Use Info.{Defs,Uses,Implicits} for the results of name resolution.
+// language object ([Object]) it denotes.
+// Use [Info].{Defs,Uses,Implicits} for the results of name resolution.
 //
 // Constant folding computes the exact constant value (constant.Value)
 // for every expression (ast.Expr) that is a compile-time constant.
 // Use Info.Types[expr].Value for the results of constant folding.
 //
-// Type inference computes the type (Type) of every expression (ast.Expr)
+// [Type] inference computes the type ([Type]) of every expression ([ast.Expr])
 // and checks for compliance with the language specification.
-// Use Info.Types[expr].Type for the results of type inference.
+// Use [Info.Types][expr].Type for the results of type inference.
 //
 // For a tutorial, see https://golang.org/s/types-tutorial.
 package types
@@ -73,7 +73,7 @@
 //
 // CAUTION: This interface does not support the import of locally
 // vendored packages. See https://golang.org/s/go15vendor.
-// If possible, external implementations should implement ImporterFrom.
+// If possible, external implementations should implement [ImporterFrom].
 type Importer interface {
 	// Import returns the imported package for the given import path.
 	// The semantics is like for ImporterFrom.ImportFrom except that
@@ -263,6 +263,15 @@
 	// scope, the function scopes are embedded in the file scope of the file
 	// containing the function declaration.
 	//
+	// The Scope of a function contains the declarations of any
+	// type parameters, parameters, and named results, plus any
+	// local declarations in the body block.
+	// It is coextensive with the complete extent of the
+	// function's syntax ([*ast.FuncDecl] or [*ast.FuncLit]).
+	// The Scopes mapping does not contain an entry for the
+	// function body ([*ast.BlockStmt]); the function's scope is
+	// associated with the [*ast.FuncType].
+	//
 	// The following node types may appear in Scopes:
 	//
 	//     *ast.File
@@ -285,6 +294,13 @@
 	// in source order. Variables without an initialization expression do not
 	// appear in this list.
 	InitOrder []*Initializer
+
+	// FileVersions maps a file to its Go version string.
+	// If the file doesn't specify a version, the reported
+	// string is Config.GoVersion.
+	// Version strings begin with “go”, like “go1.21”, and
+	// are suitable for use with the [go/version] package.
+	FileVersions map[*ast.File]string
 }
 
 func (info *Info) recordTypes() bool {
@@ -308,8 +324,8 @@
 // ObjectOf returns the object denoted by the specified id,
 // or nil if not found.
 //
-// If id is an embedded struct field, ObjectOf returns the field (*Var)
-// it defines, not the type (*TypeName) it uses.
+// If id is an embedded struct field, [Info.ObjectOf] returns the field (*[Var])
+// it defines, not the type (*[TypeName]) it uses.
 //
 // Precondition: the Uses and Defs maps are populated.
 func (info *Info) ObjectOf(id *ast.Ident) Object {
@@ -319,6 +335,23 @@
 	return info.Uses[id]
 }
 
+// PkgNameOf returns the local package name defined by the import,
+// or nil if not found.
+//
+// For dot-imports, the package name is ".".
+//
+// Precondition: the Defs and Implicts maps are populated.
+func (info *Info) PkgNameOf(imp *ast.ImportSpec) *PkgName {
+	var obj Object
+	if imp.Name != nil {
+		obj = info.Defs[imp.Name]
+	} else {
+		obj = info.Implicits[imp]
+	}
+	pkgname, _ := obj.(*PkgName)
+	return pkgname
+}
+
 // TypeAndValue reports the type and value (for constants)
 // of the corresponding expression.
 type TypeAndValue struct {
@@ -380,8 +413,8 @@
 }
 
 // Instance reports the type arguments and instantiated type for type and
-// function instantiations. For type instantiations, Type will be of dynamic
-// type *Named. For function instantiations, Type will be of dynamic type
+// function instantiations. For type instantiations, [Type] will be of dynamic
+// type *[Named]. For function instantiations, [Type] will be of dynamic type
 // *Signature.
 type Instance struct {
 	TypeArgs *TypeList
@@ -411,10 +444,10 @@
 
 // Check type-checks a package and returns the resulting package object and
 // the first error if any. Additionally, if info != nil, Check populates each
-// of the non-nil maps in the Info struct.
+// of the non-nil maps in the [Info] struct.
 //
 // The package is marked as complete if no errors occurred, otherwise it is
-// incomplete. See Config.Error for controlling behavior in the presence of
+// incomplete. See [Config.Error] for controlling behavior in the presence of
 // errors.
 //
 // The package is specified by a list of *ast.Files and corresponding
@@ -424,80 +457,3 @@
 	pkg := NewPackage(path, "")
 	return pkg, NewChecker(conf, fset, pkg, info).Files(files)
 }
-
-// AssertableTo reports whether a value of type V can be asserted to have type T.
-//
-// The behavior of AssertableTo is unspecified in three cases:
-//   - if T is Typ[Invalid]
-//   - if V is a generalized interface; i.e., an interface that may only be used
-//     as a type constraint in Go code
-//   - if T is an uninstantiated generic type
-func AssertableTo(V *Interface, T Type) bool {
-	// Checker.newAssertableTo suppresses errors for invalid types, so we need special
-	// handling here.
-	if T.Underlying() == Typ[Invalid] {
-		return false
-	}
-	return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
-}
-
-// AssignableTo reports whether a value of type V is assignable to a variable
-// of type T.
-//
-// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
-// uninstantiated generic type.
-func AssignableTo(V, T Type) bool {
-	x := operand{mode: value, typ: V}
-	ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
-	return ok
-}
-
-// ConvertibleTo reports whether a value of type V is convertible to a value of
-// type T.
-//
-// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
-// uninstantiated generic type.
-func ConvertibleTo(V, T Type) bool {
-	x := operand{mode: value, typ: V}
-	return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
-}
-
-// Implements reports whether type V implements interface T.
-//
-// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
-// generic type.
-func Implements(V Type, T *Interface) bool {
-	if T.Empty() {
-		// All types (even Typ[Invalid]) implement the empty interface.
-		return true
-	}
-	// Checker.implements suppresses errors for invalid types, so we need special
-	// handling here.
-	if V.Underlying() == Typ[Invalid] {
-		return false
-	}
-	return (*Checker)(nil).implements(0, V, T, false, nil)
-}
-
-// Satisfies reports whether type V satisfies the constraint T.
-//
-// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
-// generic type.
-func Satisfies(V Type, T *Interface) bool {
-	return (*Checker)(nil).implements(0, V, T, true, nil)
-}
-
-// Identical reports whether x and y are identical types.
-// Receivers of Signature types are ignored.
-func Identical(x, y Type) bool {
-	var c comparer
-	return c.identical(x, y, nil)
-}
-
-// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
-// Receivers of Signature types are ignored.
-func IdenticalIgnoreTags(x, y Type) bool {
-	var c comparer
-	c.ignoreTags = true
-	return c.identical(x, y, nil)
-}
diff --git a/src/go/types/api_predicates.go b/src/go/types/api_predicates.go
new file mode 100644
index 0000000..d712afe
--- /dev/null
+++ b/src/go/types/api_predicates.go
@@ -0,0 +1,86 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements exported type predicates.
+
+package types
+
+// AssertableTo reports whether a value of type V can be asserted to have type T.
+//
+// The behavior of AssertableTo is unspecified in three cases:
+//   - if T is Typ[Invalid]
+//   - if V is a generalized interface; i.e., an interface that may only be used
+//     as a type constraint in Go code
+//   - if T is an uninstantiated generic type
+func AssertableTo(V *Interface, T Type) bool {
+	// Checker.newAssertableTo suppresses errors for invalid types, so we need special
+	// handling here.
+	if !isValid(T.Underlying()) {
+		return false
+	}
+	return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
+}
+
+// AssignableTo reports whether a value of type V is assignable to a variable
+// of type T.
+//
+// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func AssignableTo(V, T Type) bool {
+	x := operand{mode: value, typ: V}
+	ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
+	return ok
+}
+
+// ConvertibleTo reports whether a value of type V is convertible to a value of
+// type T.
+//
+// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func ConvertibleTo(V, T Type) bool {
+	x := operand{mode: value, typ: V}
+	return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
+}
+
+// Implements reports whether type V implements interface T.
+//
+// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Implements(V Type, T *Interface) bool {
+	if T.Empty() {
+		// All types (even Typ[Invalid]) implement the empty interface.
+		return true
+	}
+	// Checker.implements suppresses errors for invalid types, so we need special
+	// handling here.
+	if !isValid(V.Underlying()) {
+		return false
+	}
+	return (*Checker)(nil).implements(nopos, V, T, false, nil)
+}
+
+// Satisfies reports whether type V satisfies the constraint T.
+//
+// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Satisfies(V Type, T *Interface) bool {
+	return (*Checker)(nil).implements(nopos, V, T, true, nil)
+}
+
+// Identical reports whether x and y are identical types.
+// Receivers of [Signature] types are ignored.
+func Identical(x, y Type) bool {
+	var c comparer
+	return c.identical(x, y, nil)
+}
+
+// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
+// Receivers of [Signature] types are ignored.
+func IdenticalIgnoreTags(x, y Type) bool {
+	var c comparer
+	c.ignoreTags = true
+	return c.identical(x, y, nil)
+}
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index a4ce86f..52f0009 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -11,11 +11,13 @@
 	"go/importer"
 	"go/parser"
 	"go/token"
+	"internal/goversion"
 	"internal/testenv"
 	"reflect"
 	"regexp"
 	"sort"
 	"strings"
+	"sync"
 	"testing"
 
 	. "go/types"
@@ -25,7 +27,7 @@
 var nopos token.Pos
 
 func mustParse(fset *token.FileSet, src string) *ast.File {
-	f, err := parser.ParseFile(fset, pkgName(src), src, 0)
+	f, err := parser.ParseFile(fset, pkgName(src), src, parser.ParseComments)
 	if err != nil {
 		panic(err) // so we don't need to pass *testing.T
 	}
@@ -959,6 +961,81 @@
 	}
 }
 
+func TestPkgNameOf(t *testing.T) {
+	testenv.MustHaveGoBuild(t)
+
+	const src = `
+package p
+
+import (
+	. "os"
+	_ "io"
+	"math"
+	"path/filepath"
+	snort "sort"
+)
+
+// avoid imported and not used errors
+var (
+	_ = Open // os.Open
+	_ = math.Sin
+	_ = filepath.Abs
+	_ = snort.Ints
+)
+`
+
+	var tests = []struct {
+		path string // path string enclosed in "'s
+		want string
+	}{
+		{`"os"`, "."},
+		{`"io"`, "_"},
+		{`"math"`, "math"},
+		{`"path/filepath"`, "filepath"},
+		{`"sort"`, "snort"},
+	}
+
+	fset := token.NewFileSet()
+	f := mustParse(fset, src)
+	info := Info{
+		Defs:      make(map[*ast.Ident]Object),
+		Implicits: make(map[ast.Node]Object),
+	}
+	var conf Config
+	conf.Importer = importer.Default()
+	_, err := conf.Check("p", fset, []*ast.File{f}, &info)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// map import paths to importDecl
+	imports := make(map[string]*ast.ImportSpec)
+	for _, s := range f.Decls[0].(*ast.GenDecl).Specs {
+		if imp, _ := s.(*ast.ImportSpec); imp != nil {
+			imports[imp.Path.Value] = imp
+		}
+	}
+
+	for _, test := range tests {
+		imp := imports[test.path]
+		if imp == nil {
+			t.Fatalf("invalid test case: import path %s not found", test.path)
+		}
+		got := info.PkgNameOf(imp)
+		if got == nil {
+			t.Fatalf("import %s: package name not found", test.path)
+		}
+		if got.Name() != test.want {
+			t.Errorf("import %s: got %s; want %s", test.path, got.Name(), test.want)
+		}
+	}
+
+	// test non-existing importDecl
+	if got := info.PkgNameOf(new(ast.ImportSpec)); got != nil {
+		t.Errorf("got %s for non-existing import declaration", got.Name())
+	}
+}
+
 func predString(tv TypeAndValue) string {
 	var buf strings.Builder
 	pred := func(b bool, s string) {
@@ -1819,12 +1896,12 @@
 type T struct{}
 var Y, _ = lib.X, X
 
-func F(){
+func F[T *U, U any](param1, param2 int) /*param1=undef*/ (res1 /*res1=undef*/, res2 int) /*param1=var:12*/ /*res1=var:12*/ /*U=typename:12*/ {
 	const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/
 	type /*t=undef*/ t /*t=typename:14*/ *t
 	print(Y) /*Y=var:10*/
 	x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y
-	var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F
+	var F = /*F=func:12*/ F[*int, int] /*F=var:17*/ ; _ = F
 
 	var a []int
 	for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
@@ -1843,6 +1920,10 @@
         	println(int)
         default /*int=var:31*/ :
         }
+
+	_ = param1
+	_ = res1
+	return
 }
 /*main=undef*/
 `
@@ -1904,8 +1985,29 @@
 
 		_, gotObj := inner.LookupParent(id.Name, id.Pos())
 		if gotObj != wantObj {
-			t.Errorf("%s: got %v, want %v",
-				fset.Position(id.Pos()), gotObj, wantObj)
+			// Print the scope tree of mainScope in case of error.
+			var printScopeTree func(indent string, s *Scope)
+			printScopeTree = func(indent string, s *Scope) {
+				t.Logf("%sscope %s %v-%v = %v",
+					indent,
+					ScopeComment(s),
+					s.Pos(),
+					s.End(),
+					s.Names())
+				for i := range s.NumChildren() {
+					printScopeTree(indent+"  ", s.Child(i))
+				}
+			}
+			printScopeTree("", mainScope)
+
+			t.Errorf("%s: Scope(%s).LookupParent(%s@%v) got %v, want %v [scopePos=%v]",
+				fset.Position(id.Pos()),
+				ScopeComment(inner),
+				id.Name,
+				id.Pos(),
+				gotObj,
+				wantObj,
+				ObjectScopePos(wantObj))
 			continue
 		}
 	}
@@ -2094,6 +2196,12 @@
 	iface.Complete()
 }
 
+func TestNewAlias_Issue65455(t *testing.T) {
+	obj := NewTypeName(nopos, nil, "A", nil)
+	alias := NewAlias(obj, Typ[Int])
+	alias.Underlying() // must not panic
+}
+
 func TestIssue15305(t *testing.T) {
 	const src = "package p; func f() int16; var _ = f(undef)"
 	fset := token.NewFileSet()
@@ -2323,6 +2431,60 @@
 	}
 }
 
+func TestInstantiateConcurrent(t *testing.T) {
+	const src = `package p
+
+type I[P any] interface {
+	m(P)
+	n() P
+}
+
+type J = I[int]
+
+type Nested[P any] *interface{b(P)}
+
+type K = Nested[string]
+`
+	pkg := mustTypecheck(src, nil, nil)
+
+	insts := []*Interface{
+		pkg.Scope().Lookup("J").Type().Underlying().(*Interface),
+		pkg.Scope().Lookup("K").Type().Underlying().(*Pointer).Elem().(*Interface),
+	}
+
+	// Use the interface instances concurrently.
+	for _, inst := range insts {
+		var (
+			counts  [2]int      // method counts
+			methods [2][]string // method strings
+		)
+		var wg sync.WaitGroup
+		for i := 0; i < 2; i++ {
+			i := i
+			wg.Add(1)
+			go func() {
+				defer wg.Done()
+
+				counts[i] = inst.NumMethods()
+				for mi := 0; mi < counts[i]; mi++ {
+					methods[i] = append(methods[i], inst.Method(mi).String())
+				}
+			}()
+		}
+		wg.Wait()
+
+		if counts[0] != counts[1] {
+			t.Errorf("mismatching method counts for %s: %d vs %d", inst, counts[0], counts[1])
+			continue
+		}
+		for i := 0; i < counts[0]; i++ {
+			if m0, m1 := methods[0][i], methods[1][i]; m0 != m1 {
+				t.Errorf("mismatching methods for %s: %s vs %s", inst, m0, m1)
+			}
+		}
+	}
+}
+
 func TestInstantiateErrors(t *testing.T) {
 	tests := []struct {
 		src    string // by convention, T must be the type being instantiated
@@ -2718,3 +2880,69 @@
 		t.Errorf("src1: unexpected error: got %v", err)
 	}
 }
+
+func TestModuleVersion(t *testing.T) {
+	// version go1.dd must be able to typecheck go1.dd.0, go1.dd.1, etc.
+	goversion := fmt.Sprintf("go1.%d", goversion.Version)
+	for _, v := range []string{
+		goversion,
+		goversion + ".0",
+		goversion + ".1",
+		goversion + ".rc",
+	} {
+		conf := Config{GoVersion: v}
+		pkg := mustTypecheck("package p", &conf, nil)
+		if pkg.GoVersion() != conf.GoVersion {
+			t.Errorf("got %s; want %s", pkg.GoVersion(), conf.GoVersion)
+		}
+	}
+}
+
+func TestFileVersions(t *testing.T) {
+	for _, test := range []struct {
+		goVersion   string
+		fileVersion string
+		wantVersion string
+	}{
+		{"", "", ""},                   // no versions specified
+		{"go1.19", "", "go1.19"},       // module version specified
+		{"", "go1.20", ""},             // file upgrade ignored
+		{"go1.19", "go1.20", "go1.20"}, // file upgrade permitted
+		{"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted
+		{"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21)
+
+		// versions containing release numbers
+		// (file versions containing release numbers are considered invalid)
+		{"go1.19.0", "", "go1.19.0"},         // no file version specified
+		{"go1.20", "go1.20.1", "go1.20"},     // file upgrade ignored
+		{"go1.20.1", "go1.20", "go1.20.1"},   // file upgrade ignored
+		{"go1.20.1", "go1.21", "go1.21"},     // file upgrade permitted
+		{"go1.20.1", "go1.19", "go1.20.1"},   // file downgrade not permitted
+		{"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version)
+		{"go1.21.1", "go1.19", "go1.19"},     // file downgrade permitted (module version is >= go1.21)
+	} {
+		var src string
+		if test.fileVersion != "" {
+			src = "//go:build " + test.fileVersion + "\n"
+		}
+		src += "package p"
+
+		conf := Config{GoVersion: test.goVersion}
+		versions := make(map[*ast.File]string)
+		var info Info
+		info.FileVersions = versions
+		mustTypecheck(src, &conf, &info)
+
+		n := 0
+		for _, v := range versions {
+			want := test.wantVersion
+			if v != want {
+				t.Errorf("%q: unexpected file version: got %q, want %q", src, v, want)
+			}
+			n++
+		}
+		if n != 1 {
+			t.Errorf("%q: incorrect number of map entries: got %d", src, n)
+		}
+	}
+}
diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go
index 1ea5114..ac9e7bd 100644
--- a/src/go/types/assignments.go
+++ b/src/go/types/assignments.go
@@ -101,7 +101,7 @@
 }
 
 func (check *Checker) initConst(lhs *Const, x *operand) {
-	if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+	if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
 		if lhs.typ == nil {
 			lhs.typ = Typ[Invalid]
 		}
@@ -136,7 +136,7 @@
 // or Typ[Invalid] in case of an error.
 // If the initialization check fails, x.mode is set to invalid.
 func (check *Checker) initVar(lhs *Var, x *operand, context string) {
-	if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+	if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
 		if lhs.typ == nil {
 			lhs.typ = Typ[Invalid]
 		}
@@ -201,7 +201,7 @@
 		v.used = v_used // restore v.used
 	}
 
-	if x.mode == invalid || x.typ == Typ[Invalid] {
+	if x.mode == invalid || !isValid(x.typ) {
 		return Typ[Invalid]
 	}
 
@@ -231,9 +231,9 @@
 // assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil).
 // If x != nil, it must be the evaluation of rhs (and rhs will be ignored).
 // If the assignment check fails and x != nil, x.mode is set to invalid.
-func (check *Checker) assignVar(lhs, rhs ast.Expr, x *operand) {
+func (check *Checker) assignVar(lhs, rhs ast.Expr, x *operand, context string) {
 	T := check.lhsVar(lhs) // nil if lhs is _
-	if T == Typ[Invalid] {
+	if !isValid(T) {
 		if x != nil {
 			x.mode = invalid
 		} else {
@@ -243,12 +243,18 @@
 	}
 
 	if x == nil {
+		var target *target
+		// avoid calling ExprString if not needed
+		if T != nil {
+			if _, ok := under(T).(*Signature); ok {
+				target = newTarget(T, ExprString(lhs))
+			}
+		}
 		x = new(operand)
-		check.expr(T, x, rhs)
+		check.expr(target, x, rhs)
 	}
 
-	context := "assignment"
-	if T == nil {
+	if T == nil && context == "assignment" {
 		context = "assignment to _ identifier"
 	}
 	check.assignment(x, T, context)
@@ -281,7 +287,7 @@
 		switch {
 		case t == nil:
 			fallthrough // should not happen but be cautious
-		case t == Typ[Invalid]:
+		case !isValid(t):
 			s = "unknown type"
 		case isUntyped(t):
 			if isNumeric(t) {
@@ -368,7 +374,11 @@
 	if l == r && !isCall {
 		var x operand
 		for i, lhs := range lhs {
-			check.expr(lhs.typ, &x, orig_rhs[i])
+			desc := lhs.name
+			if returnStmt != nil && desc == "" {
+				desc = "result variable"
+			}
+			check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i])
 			check.initVar(lhs, &x, context)
 		}
 		return
@@ -442,7 +452,7 @@
 	// each value can be assigned to its corresponding variable.
 	if l == r && !isCall {
 		for i, lhs := range lhs {
-			check.assignVar(lhs, orig_rhs[i], nil)
+			check.assignVar(lhs, orig_rhs[i], nil, "assignment")
 		}
 		return
 	}
@@ -463,7 +473,7 @@
 	r = len(rhs)
 	if l == r {
 		for i, lhs := range lhs {
-			check.assignVar(lhs, nil, rhs[i])
+			check.assignVar(lhs, nil, rhs[i], "assignment")
 		}
 		// Only record comma-ok expression if both assignments succeeded
 		// (go.dev/issue/59371).
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index 11eacef..9015736 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -205,7 +205,7 @@
 
 		if mode == invalid {
 			// avoid error if underlying type is invalid
-			if under(x.typ) != Typ[Invalid] {
+			if isValid(under(x.typ)) {
 				code := InvalidCap
 				if id == _Len {
 					code = InvalidLen
@@ -489,7 +489,7 @@
 		// (no argument evaluated yet)
 		arg0 := argList[0]
 		T := check.varType(arg0)
-		if T == Typ[Invalid] {
+		if !isValid(T) {
 			return
 		}
 
@@ -599,7 +599,7 @@
 		// new(T)
 		// (no argument evaluated yet)
 		T := check.varType(argList[0])
-		if T == Typ[Invalid] {
+		if !isValid(T) {
 			return
 		}
 
@@ -798,7 +798,7 @@
 		// unsafe.Slice(ptr *T, len IntegerType) []T
 		check.verifyVersionf(call.Fun, go1_17, "unsafe.Slice")
 
-		ptr, _ := under(x.typ).(*Pointer) // TODO(gri) should this be coreType rather than under?
+		ptr, _ := coreType(x.typ).(*Pointer)
 		if ptr == nil {
 			check.errorf(x, InvalidUnsafeSlice, invalidArg+"%s is not a pointer", x)
 			return
@@ -819,7 +819,7 @@
 		// unsafe.SliceData(slice []T) *T
 		check.verifyVersionf(call.Fun, go1_20, "unsafe.SliceData")
 
-		slice, _ := under(x.typ).(*Slice) // TODO(gri) should this be coreType rather than under?
+		slice, _ := coreType(x.typ).(*Slice)
 		if slice == nil {
 			check.errorf(x, InvalidUnsafeSliceData, invalidArg+"%s is not a slice", x)
 			return
@@ -922,7 +922,7 @@
 	// Cycles are only possible through *Named types.
 	// The seen map is used to detect cycles and track
 	// the results of previously seen types.
-	if named, _ := t.(*Named); named != nil {
+	if named := asNamed(t); named != nil {
 		if v, ok := seen[named]; ok {
 			return v
 		}
@@ -953,7 +953,7 @@
 }
 
 // applyTypeFunc applies f to x. If x is a type parameter,
-// the result is a type parameter constrained by an new
+// the result is a type parameter constrained by a new
 // interface bound. The type bounds for that interface
 // are computed by applying f to each of the type bounds
 // of x. If any of these applications of f return nil,
@@ -1034,13 +1034,4 @@
 	return typ
 }
 
-// unparen returns e with any enclosing parentheses stripped.
-func unparen(e ast.Expr) ast.Expr {
-	for {
-		p, ok := e.(*ast.ParenExpr)
-		if !ok {
-			return e
-		}
-		e = p.X
-	}
-}
+func unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
diff --git a/src/go/types/call.go b/src/go/types/call.go
index f00290a..c7de3bd 100644
--- a/src/go/types/call.go
+++ b/src/go/types/call.go
@@ -18,8 +18,8 @@
 // funcInst type-checks a function instantiation.
 // The incoming x must be a generic function.
 // If ix != nil, it provides some or all of the type arguments (ix.Indices).
-// If target type tsig != nil, the signature may be used to infer missing type
-// arguments of x, if any. At least one of tsig or inst must be provided.
+// If target != nil, it may be used to infer missing type arguments of x, if any.
+// At least one of T or ix must be provided.
 //
 // There are two modes of operation:
 //
@@ -34,8 +34,8 @@
 //
 // If an error (other than a version error) occurs in any case, it is reported
 // and x.mode is set to invalid.
-func (check *Checker) funcInst(tsig *Signature, pos token.Pos, x *operand, ix *typeparams.IndexExpr, infer bool) ([]Type, []ast.Expr) {
-	assert(tsig != nil || ix != nil)
+func (check *Checker) funcInst(T *target, pos token.Pos, x *operand, ix *typeparams.IndexExpr, infer bool) ([]Type, []ast.Expr) {
+	assert(T != nil || ix != nil)
 
 	var instErrPos positioner
 	if ix != nil {
@@ -89,7 +89,8 @@
 		//
 		var args []*operand
 		var params []*Var
-		if tsig != nil && sig.tparams != nil {
+		var reverse bool
+		if T != nil && sig.tparams != nil {
 			if !versionErr && !check.allowVersion(check.pkg, instErrPos, go1_21) {
 				if ix != nil {
 					check.versionErrorf(instErrPos, go1_21, "partially instantiated function in assignment")
@@ -102,16 +103,17 @@
 			// The type of the argument operand is tsig, which is the type of the LHS in an assignment
 			// or the result type in a return statement. Create a pseudo-expression for that operand
 			// that makes sense when reported in error messages from infer, below.
-			expr := ast.NewIdent("variable in assignment")
+			expr := ast.NewIdent(T.desc)
 			expr.NamePos = x.Pos() // correct position
-			args = []*operand{{mode: value, expr: expr, typ: tsig}}
+			args = []*operand{{mode: value, expr: expr, typ: T.sig}}
+			reverse = true
 		}
 
 		// Rename type parameters to avoid problems with recursive instantiations.
 		// Note that NewTuple(params...) below is (*Tuple)(nil) if len(params) == 0, as desired.
 		tparams, params2 := check.renameTParams(pos, sig.TypeParams().list(), NewTuple(params...))
 
-		targs = check.infer(atPos(pos), tparams, targs, params2.(*Tuple), args)
+		targs = check.infer(atPos(pos), tparams, targs, params2.(*Tuple), args, reverse)
 		if targs == nil {
 			// error was already reported
 			x.mode = invalid
@@ -577,8 +579,7 @@
 				// Before we change the type (type parameter renaming, below), make
 				// a clone of it as otherwise we implicitly modify the object's type
 				// (go.dev/issues/63260).
-				clone := *asig
-				asig = &clone
+				asig = clone(asig)
 				// Rename type parameters for cases like f(g, g); this gives each
 				// generic function argument a unique type identity (go.dev/issues/59956).
 				// TODO(gri) Consider only doing this if a function argument appears
@@ -611,7 +612,7 @@
 
 	// infer missing type arguments of callee and function arguments
 	if len(tparams) > 0 {
-		targs = check.infer(call, tparams, targs, sigParams, args)
+		targs = check.infer(call, tparams, targs, sigParams, args, false)
 		if targs == nil {
 			// TODO(gri) If infer inferred the first targs[:n], consider instantiating
 			//           the call signature for better error messages/gopls behavior.
@@ -668,7 +669,7 @@
 	"_Cmacro_", // function to evaluate the expanded expression
 }
 
-func (check *Checker) selector(x *operand, e *ast.SelectorExpr, def *Named, wantType bool) {
+func (check *Checker) selector(x *operand, e *ast.SelectorExpr, def *TypeName, wantType bool) {
 	// these must be declared before the "goto Error" statements
 	var (
 		obj      Object
@@ -769,8 +770,8 @@
 	switch x.mode {
 	case typexpr:
 		// don't crash for "type T T.x" (was go.dev/issue/51509)
-		if def != nil && x.typ == def {
-			check.cycleError([]Object{def.obj})
+		if def != nil && def.typ == x.typ {
+			check.cycleError([]Object{def})
 			goto Error
 		}
 	case builtin:
@@ -803,7 +804,7 @@
 	obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
 	if obj == nil {
 		// Don't report another error if the underlying type was invalid (go.dev/issue/49541).
-		if under(x.typ) == Typ[Invalid] {
+		if !isValid(under(x.typ)) {
 			goto Error
 		}
 
diff --git a/src/go/types/check.go b/src/go/types/check.go
index 3b0f5e4..85fd074 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -12,8 +12,9 @@
 	"go/ast"
 	"go/constant"
 	"go/token"
-	"internal/goversion"
+	"internal/godebug"
 	. "internal/types/errors"
+	"strings"
 )
 
 // nopos indicates an unknown position
@@ -22,6 +23,9 @@
 // debugging/development support
 const debug = false // leave on during development
 
+// gotypesalias controls the use of Alias types
+var gotypesalias = godebug.New("gotypesalias")
+
 // exprInfo stores information about an untyped expression.
 type exprInfo struct {
 	isLhs bool // expression is lhs operand of a shift with delayed type-check
@@ -90,16 +94,22 @@
 }
 
 // A Checker maintains the state of the type checker.
-// It must be created with NewChecker.
+// It must be created with [NewChecker].
 type Checker struct {
 	// package information
 	// (initialized by NewChecker, valid for the life-time of checker)
+
+	// If EnableAlias is set, alias declarations produce an Alias type.
+	// Otherwise the alias information is only in the type name, which
+	// points directly to the actual (aliased) type.
+	enableAlias bool
+
 	conf *Config
 	ctxt *Context // context for de-duplicating instances
 	fset *token.FileSet
 	pkg  *Package
 	*Info
-	version version                // accepted language version
+	version goVersion              // accepted language version
 	nextID  uint64                 // unique Id for type parameters (first valid Id is 1)
 	objMap  map[Object]*declInfo   // maps package-level objects and (non-interface) methods to declaration info
 	impMap  map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
@@ -119,7 +129,7 @@
 	// (initialized by Files, valid only for the duration of check.Files;
 	// maps and lists are allocated on demand)
 	files         []*ast.File               // package files
-	posVers       map[*token.File]version   // Pos -> Go version mapping
+	versions      map[*ast.File]string      // maps files to version strings (each file has an entry)
 	imports       []*PkgName                // list of imported packages
 	dotImportMap  map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through
 	recvTParamMap map[*ast.Ident]*TypeParam // maps blank receiver type parameters to their type
@@ -154,9 +164,14 @@
 	from.addDep(to)
 }
 
+// Note: The following three alias-related functions are only used
+//       when Alias types are not enabled.
+
 // brokenAlias records that alias doesn't have a determined type yet.
 // It also sets alias.typ to Typ[Invalid].
+// Not used if check.enableAlias is set.
 func (check *Checker) brokenAlias(alias *TypeName) {
+	assert(!check.enableAlias)
 	if check.brokenAliases == nil {
 		check.brokenAliases = make(map[*TypeName]bool)
 	}
@@ -166,13 +181,15 @@
 
 // validAlias records that alias has the valid type typ (possibly Typ[Invalid]).
 func (check *Checker) validAlias(alias *TypeName, typ Type) {
+	assert(!check.enableAlias)
 	delete(check.brokenAliases, alias)
 	alias.typ = typ
 }
 
 // isBrokenAlias reports whether alias doesn't have a determined type yet.
 func (check *Checker) isBrokenAlias(alias *TypeName) bool {
-	return alias.typ == Typ[Invalid] && check.brokenAliases[alias]
+	assert(!check.enableAlias)
+	return check.brokenAliases[alias]
 }
 
 func (check *Checker) rememberUntyped(e ast.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) {
@@ -221,8 +238,8 @@
 	check.cleaners = append(check.cleaners, c)
 }
 
-// NewChecker returns a new Checker instance for a given package.
-// Package files may be added incrementally via checker.Files.
+// NewChecker returns a new [Checker] instance for a given package.
+// [Package] files may be added incrementally via checker.Files.
 func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker {
 	// make sure we have a configuration
 	if conf == nil {
@@ -241,13 +258,15 @@
 	// (previously, pkg.goVersion was mutated here: go.dev/issue/61212)
 
 	return &Checker{
-		conf:   conf,
-		ctxt:   conf.Context,
-		fset:   fset,
-		pkg:    pkg,
-		Info:   info,
-		objMap: make(map[Object]*declInfo),
-		impMap: make(map[importKey]*Package),
+		enableAlias: gotypesalias.Value() == "1",
+		conf:        conf,
+		ctxt:        conf.Context,
+		fset:        fset,
+		pkg:         pkg,
+		Info:        info,
+		version:     asGoVersion(conf.GoVersion),
+		objMap:      make(map[Object]*declInfo),
+		impMap:      make(map[importKey]*Package),
 	}
 }
 
@@ -287,40 +306,52 @@
 		}
 	}
 
-	for _, file := range check.files {
-		v, _ := parseGoVersion(file.GoVersion)
-		if v.major > 0 {
-			if v.equal(check.version) {
-				continue
-			}
-			// Go 1.21 introduced the feature of setting the go.mod
-			// go line to an early version of Go and allowing //go:build lines
-			// to “upgrade” the Go version in a given file.
-			// We can do that backwards compatibly.
-			// Go 1.21 also introduced the feature of allowing //go:build lines
-			// to “downgrade” the Go version in a given file.
-			// That can't be done compatibly in general, since before the
-			// build lines were ignored and code got the module's Go version.
-			// To work around this, downgrades are only allowed when the
-			// module's Go version is Go 1.21 or later.
-			// If there is no check.version, then we don't really know what Go version to apply.
-			// Legacy tools may do this, and they historically have accepted everything.
-			// Preserve that behavior by ignoring //go:build constraints entirely in that case.
-			if (v.before(check.version) && check.version.before(version{1, 21})) || check.version.equal(version{0, 0}) {
-				continue
-			}
-			if check.posVers == nil {
-				check.posVers = make(map[*token.File]version)
-			}
-			check.posVers[check.fset.File(file.FileStart)] = v
-		}
+	// reuse Info.FileVersions if provided
+	versions := check.Info.FileVersions
+	if versions == nil {
+		versions = make(map[*ast.File]string)
 	}
-}
+	check.versions = versions
 
-// A posVers records that the file starting at pos declares the Go version vers.
-type posVers struct {
-	pos  token.Pos
-	vers version
+	pkgVersionOk := check.version.isValid()
+	downgradeOk := check.version.cmp(go1_21) >= 0
+
+	// determine Go version for each file
+	for _, file := range check.files {
+		// use unaltered Config.GoVersion by default
+		// (This version string may contain dot-release numbers as in go1.20.1,
+		// unlike file versions which are Go language versions only, if valid.)
+		v := check.conf.GoVersion
+		// use the file version, if applicable
+		// (file versions are either the empty string or of the form go1.dd)
+		if pkgVersionOk {
+			fileVersion := asGoVersion(file.GoVersion)
+			if fileVersion.isValid() {
+				cmp := fileVersion.cmp(check.version)
+				// Go 1.21 introduced the feature of setting the go.mod
+				// go line to an early version of Go and allowing //go:build lines
+				// to “upgrade” (cmp > 0) the Go version in a given file.
+				// We can do that backwards compatibly.
+				//
+				// Go 1.21 also introduced the feature of allowing //go:build lines
+				// to “downgrade” (cmp < 0) the Go version in a given file.
+				// That can't be done compatibly in general, since before the
+				// build lines were ignored and code got the module's Go version.
+				// To work around this, downgrades are only allowed when the
+				// module's Go version is Go 1.21 or later.
+				//
+				// If there is no valid check.version, then we don't really know what
+				// Go version to apply.
+				// Legacy tools may do this, and they historically have accepted everything.
+				// Preserve that behavior by ignoring //go:build constraints entirely in that
+				// case (!pkgVersionOk).
+				if cmp > 0 || cmp < 0 && downgradeOk {
+					v = file.GoVersion
+				}
+			}
+		}
+		versions[file] = v
+	}
 }
 
 // A bailout panic is used for early termination.
@@ -350,11 +381,8 @@
 		return nil
 	}
 
-	check.version, err = parseGoVersion(check.conf.GoVersion)
-	if err != nil {
-		return err
-	}
-	if check.version.after(version{1, goversion.Version}) {
+	// Note: NewChecker doesn't return an error, so we need to check the version here.
+	if check.version.cmp(go_current) > 0 {
 		return fmt.Errorf("package requires newer Go version %v", check.version)
 	}
 	if check.conf.FakeImportC && check.conf.go115UsesCgo {
@@ -505,7 +533,7 @@
 		assert(val != nil)
 		// We check allBasic(typ, IsConstType) here as constant expressions may be
 		// recorded as type parameters.
-		assert(typ == Typ[Invalid] || allBasic(typ, IsConstType))
+		assert(!isValid(typ) || allBasic(typ, IsConstType))
 	}
 	if m := check.Types; m != nil {
 		m[x] = TypeAndValue{mode, typ, val}
@@ -591,7 +619,12 @@
 	case *ast.SelectorExpr:
 		return x.Sel
 	}
-	panic("instantiated ident not found")
+
+	// extra debugging of #63933
+	var buf strings.Builder
+	buf.WriteString("instantiated ident not found; please report: ")
+	ast.Fprint(&buf, token.NewFileSet(), expr, ast.NotNilFilter)
+	panic(buf.String())
 }
 
 func (check *Checker) recordDef(id *ast.Ident, obj Object) {
diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go
index 9093a46..fc9723a 100644
--- a/src/go/types/check_test.go
+++ b/src/go/types/check_test.go
@@ -38,12 +38,14 @@
 	"go/parser"
 	"go/scanner"
 	"go/token"
+	"internal/buildcfg"
 	"internal/testenv"
 	"internal/types/errors"
 	"os"
 	"path/filepath"
 	"reflect"
 	"regexp"
+	"runtime"
 	"strconv"
 	"strings"
 	"testing"
@@ -122,32 +124,35 @@
 
 // testFiles type-checks the package consisting of the given files, and
 // compares the resulting errors with the ERROR annotations in the source.
+// Except for manual tests, each package is type-checked twice, once without
+// use of Alias types, and once with Alias types.
 //
 // The srcs slice contains the file content for the files named in the
-// filenames slice. The manual parameter specifies whether this is a 'manual'
-// test.
+// filenames slice. The colDelta parameter specifies the tolerance for position
+// mismatch when comparing errors. The manual parameter specifies whether this
+// is a 'manual' test.
 //
 // If provided, opts may be used to mutate the Config before type-checking.
 func testFiles(t *testing.T, filenames []string, srcs [][]byte, manual bool, opts ...func(*Config)) {
+	// Alias types are disabled by default
+	testFilesImpl(t, filenames, srcs, manual, opts...)
+	if !manual {
+		t.Setenv("GODEBUG", "gotypesalias=1")
+		testFilesImpl(t, filenames, srcs, manual, opts...)
+	}
+}
+
+func testFilesImpl(t *testing.T, filenames []string, srcs [][]byte, manual bool, opts ...func(*Config)) {
 	if len(filenames) == 0 {
 		t.Fatal("no source files")
 	}
 
-	var conf Config
-	flags := flag.NewFlagSet("", flag.PanicOnError)
-	flags.StringVar(&conf.GoVersion, "lang", "", "")
-	flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
-	if err := parseFlags(srcs[0], flags); err != nil {
-		t.Fatal(err)
-	}
-
+	// parse files
 	files, errlist := parseFiles(t, filenames, srcs, parser.AllErrors)
-
 	pkgName := "<no package>"
 	if len(files) > 0 {
 		pkgName = files[0].Name.Name
 	}
-
 	listErrors := manual && !*verifyErrors
 	if listErrors && len(errlist) > 0 {
 		t.Errorf("--- %s:", pkgName)
@@ -156,7 +161,8 @@
 		}
 	}
 
-	// typecheck and collect typechecker errors
+	// set up typechecker
+	var conf Config
 	*boolFieldAddr(&conf, "_Trace") = manual && testing.Verbose()
 	conf.Importer = importer.Default()
 	conf.Error = func(err error) {
@@ -174,12 +180,51 @@
 		}
 	}
 
+	// apply custom configuration
 	for _, opt := range opts {
 		opt(&conf)
 	}
 
-	conf.Check(pkgName, fset, files, nil)
+	// apply flag setting (overrides custom configuration)
+	var goexperiment, gotypesalias string
+	flags := flag.NewFlagSet("", flag.PanicOnError)
+	flags.StringVar(&conf.GoVersion, "lang", "", "")
+	flags.StringVar(&goexperiment, "goexperiment", "", "")
+	flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
+	flags.StringVar(&gotypesalias, "gotypesalias", "", "")
+	if err := parseFlags(srcs[0], flags); err != nil {
+		t.Fatal(err)
+	}
 
+	exp, err := buildcfg.ParseGOEXPERIMENT(runtime.GOOS, runtime.GOARCH, goexperiment)
+	if err != nil {
+		t.Fatal(err)
+	}
+	old := buildcfg.Experiment
+	defer func() {
+		buildcfg.Experiment = old
+	}()
+	buildcfg.Experiment = *exp
+
+	// By default, gotypesalias is not set.
+	if gotypesalias != "" {
+		t.Setenv("GODEBUG", "gotypesalias="+gotypesalias)
+	}
+
+	// Provide Config.Info with all maps so that info recording is tested.
+	info := Info{
+		Types:        make(map[ast.Expr]TypeAndValue),
+		Instances:    make(map[*ast.Ident]Instance),
+		Defs:         make(map[*ast.Ident]Object),
+		Uses:         make(map[*ast.Ident]Object),
+		Implicits:    make(map[ast.Node]Object),
+		Selections:   make(map[*ast.SelectorExpr]*Selection),
+		Scopes:       make(map[ast.Node]*Scope),
+		FileVersions: make(map[*ast.File]string),
+	}
+
+	// typecheck
+	conf.Check(pkgName, fset, files, &info)
 	if listErrors {
 		return
 	}
@@ -373,6 +418,12 @@
 }
 
 func TestCheck(t *testing.T) {
+	old := buildcfg.Experiment.RangeFunc
+	defer func() {
+		buildcfg.Experiment.RangeFunc = old
+	}()
+	buildcfg.Experiment.RangeFunc = true
+
 	DefPredeclaredTestFuncs()
 	testDirFiles(t, "../../internal/types/testdata/check", false)
 }
diff --git a/src/go/types/context.go b/src/go/types/context.go
index 56368e1..ce9bbf3 100644
--- a/src/go/types/context.go
+++ b/src/go/types/context.go
@@ -81,7 +81,7 @@
 		h.typeList(targs)
 	}
 
-	return strings.Replace(buf.String(), " ", "#", -1) // ReplaceAll is not available in Go1.4
+	return strings.ReplaceAll(buf.String(), " ", "#")
 }
 
 // lookup returns an existing instantiation of orig with targs, if it exists.
diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go
index 2fa3f92..2be17ee 100644
--- a/src/go/types/conversions.go
+++ b/src/go/types/conversions.go
@@ -42,6 +42,14 @@
 	case constArg && isConstType(T):
 		// constant conversion
 		ok = constConvertibleTo(T, &x.val)
+		// A conversion from an integer constant to an integer type
+		// can only fail if there's overflow. Give a concise error.
+		// (go.dev/issue/63563)
+		if !ok && isInteger(x.typ) && isInteger(T) {
+			check.errorf(x, InvalidConversion, "constant %s overflows %s", x.val, T)
+			x.mode = invalid
+			return
+		}
 	case constArg && isTypeParam(T):
 		// x is convertible to T if it is convertible
 		// to each specific type in the type set of T.
@@ -58,7 +66,12 @@
 				return true
 			}
 			if !constConvertibleTo(u, nil) {
-				cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+				if isInteger(x.typ) && isInteger(u) {
+					// see comment above on constant conversion
+					cause = check.sprintf("constant %s overflows %s (in %s)", x.val, u, T)
+				} else {
+					cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+				}
 				return false
 			}
 			return true
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index af8ec84..9f8c44a 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -53,7 +53,7 @@
 
 // objDecl type-checks the declaration of obj in its respective (file) environment.
 // For the meaning of def, see Checker.definedType, in typexpr.go.
-func (check *Checker) objDecl(obj Object, def *Named) {
+func (check *Checker) objDecl(obj Object, def *TypeName) {
 	if check.conf._Trace && obj.Type() == nil {
 		if check.indent == 0 {
 			fmt.Println() // empty line between top-level objects for readability
@@ -249,10 +249,14 @@
 			// the syntactic information. We should consider storing
 			// this information explicitly in the object.
 			var alias bool
-			if d := check.objMap[obj]; d != nil {
-				alias = d.tdecl.Assign.IsValid() // package-level object
+			if check.enableAlias {
+				alias = obj.IsAlias()
 			} else {
-				alias = obj.IsAlias() // function local object
+				if d := check.objMap[obj]; d != nil {
+					alias = d.tdecl.Assign.IsValid() // package-level object
+				} else {
+					alias = obj.IsAlias() // function local object
+				}
 			}
 			if !alias {
 				ndef++
@@ -320,7 +324,11 @@
 	// If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
 	tname, _ := obj.(*TypeName)
 	if tname != nil && tname.IsAlias() {
-		check.validAlias(tname, Typ[Invalid])
+		// If we use Alias nodes, it is initialized with Typ[Invalid].
+		// TODO(gri) Adjust this code if we initialize with nil.
+		if !check.enableAlias {
+			check.validAlias(tname, Typ[Invalid])
+		}
 	}
 
 	// report a more concise error for self references
@@ -456,7 +464,7 @@
 		if !isConstType(t) {
 			// don't report an error if the type is an invalid C (defined) type
 			// (go.dev/issue/22090)
-			if under(t) != Typ[Invalid] {
+			if isValid(under(t)) {
 				check.errorf(typ, InvalidConstType, "invalid constant type %s", t)
 			}
 			obj.typ = Typ[Invalid]
@@ -510,7 +518,7 @@
 	if lhs == nil || len(lhs) == 1 {
 		assert(lhs == nil || lhs[0] == obj)
 		var x operand
-		check.expr(obj.typ, &x, init)
+		check.expr(newTarget(obj.typ, obj.name), &x, init)
 		check.initVar(obj, &x, "variable declaration")
 		return
 	}
@@ -544,7 +552,7 @@
 
 // isImportedConstraint reports whether typ is an imported type constraint.
 func (check *Checker) isImportedConstraint(typ Type) bool {
-	named, _ := typ.(*Named)
+	named := asNamed(typ)
 	if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil {
 		return false
 	}
@@ -552,38 +560,50 @@
 	return u != nil && !u.IsMethodSet()
 }
 
-func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
+func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName) {
 	assert(obj.typ == nil)
 
 	var rhs Type
 	check.later(func() {
-		if t, _ := obj.typ.(*Named); t != nil { // type may be invalid
+		if t := asNamed(obj.typ); t != nil { // type may be invalid
 			check.validType(t)
 		}
 		// If typ is local, an error was already reported where typ is specified/defined.
 		_ = check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs)
 	}).describef(obj, "validType(%s)", obj.Name())
 
-	alias := tdecl.Assign.IsValid()
-	if alias && tdecl.TypeParams.NumFields() != 0 {
+	aliasDecl := tdecl.Assign.IsValid()
+	if aliasDecl && tdecl.TypeParams.NumFields() != 0 {
 		// The parser will ensure this but we may still get an invalid AST.
 		// Complain and continue as regular type definition.
 		check.error(atPos(tdecl.Assign), BadDecl, "generic type cannot be alias")
-		alias = false
+		aliasDecl = false
 	}
 
 	// alias declaration
-	if alias {
+	if aliasDecl {
 		check.verifyVersionf(atPos(tdecl.Assign), go1_9, "type aliases")
-		check.brokenAlias(obj)
-		rhs = check.typ(tdecl.Type)
-		check.validAlias(obj, rhs)
+		if check.enableAlias {
+			// TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark
+			//           the alias as incomplete. Currently this causes problems
+			//           with certain cycles. Investigate.
+			alias := check.newAlias(obj, Typ[Invalid])
+			setDefType(def, alias)
+			rhs = check.definedType(tdecl.Type, obj)
+			assert(rhs != nil)
+			alias.fromRHS = rhs
+			Unalias(alias) // resolve alias.actual
+		} else {
+			check.brokenAlias(obj)
+			rhs = check.typ(tdecl.Type)
+			check.validAlias(obj, rhs)
+		}
 		return
 	}
 
 	// type definition or generic type declaration
 	named := check.newNamed(obj, nil, nil)
-	def.setUnderlying(named)
+	setDefType(def, named)
 
 	if tdecl.TypeParams != nil {
 		check.openScope(tdecl, "type parameters")
@@ -592,7 +612,7 @@
 	}
 
 	// determine underlying type of named
-	rhs = check.definedType(tdecl.Type, named)
+	rhs = check.definedType(tdecl.Type, obj)
 	assert(rhs != nil)
 	named.fromRHS = rhs
 
@@ -618,8 +638,9 @@
 	// Declare type parameters up-front, with empty interface as type bound.
 	// The scope of type parameters starts at the beginning of the type parameter
 	// list (so we can have mutually recursive parameterized interfaces).
+	scopePos := list.Pos()
 	for _, f := range list.List {
-		tparams = check.declareTypeParams(tparams, f.Names)
+		tparams = check.declareTypeParams(tparams, f.Names, scopePos)
 	}
 
 	// Set the type parameters before collecting the type constraints because
@@ -688,7 +709,7 @@
 	return check.typ(x)
 }
 
-func (check *Checker) declareTypeParams(tparams []*TypeParam, names []*ast.Ident) []*TypeParam {
+func (check *Checker) declareTypeParams(tparams []*TypeParam, names []*ast.Ident, scopePos token.Pos) []*TypeParam {
 	// Use Typ[Invalid] for the type constraint to ensure that a type
 	// is present even if the actual constraint has not been assigned
 	// yet.
@@ -697,8 +718,8 @@
 	//           are not properly set yet.
 	for _, name := range names {
 		tname := NewTypeName(name.Pos(), check.pkg, name.Name, nil)
-		tpar := check.newTypeParam(tname, Typ[Invalid])          // assigns type to tpar as a side-effect
-		check.declare(check.scope, name, tname, check.scope.pos) // TODO(gri) check scope position
+		tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tpar as a side-effect
+		check.declare(check.scope, name, tname, scopePos)
 		tparams = append(tparams, tpar)
 	}
 
@@ -726,7 +747,7 @@
 
 	// spec: "If the base type is a struct type, the non-blank method
 	// and field names must be distinct."
-	base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
+	base := asNamed(obj.typ) // shouldn't fail but be conservative
 	if base != nil {
 		assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
 
@@ -815,6 +836,11 @@
 	check.funcType(sig, fdecl.Recv, fdecl.Type)
 	obj.color_ = saved
 
+	// Set the scope's extent to the complete "func (...) { ... }"
+	// so that Scope.Innermost works correctly.
+	sig.scope.pos = fdecl.Pos()
+	sig.scope.end = fdecl.End()
+
 	if fdecl.Type.TypeParams.NumFields() > 0 && fdecl.Body == nil {
 		check.softErrorf(fdecl.Name, BadDecl, "generic function is missing function body")
 	}
diff --git a/src/go/types/errorcalls_test.go b/src/go/types/errorcalls_test.go
index d76c06d..0238909 100644
--- a/src/go/types/errorcalls_test.go
+++ b/src/go/types/errorcalls_test.go
@@ -1,6 +1,6 @@
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE ast.
+// license that can be found in the LICENSE file.
 
 package types_test
 
diff --git a/src/go/types/errors.go b/src/go/types/errors.go
index 14d0383..63b0d9d 100644
--- a/src/go/types/errors.go
+++ b/src/go/types/errors.go
@@ -228,7 +228,7 @@
 		panic("no error code provided")
 	}
 
-	// If we have an URL for error codes, add a link to the first line.
+	// If we have a URL for error codes, add a link to the first line.
 	if errp.code != 0 && check.conf._ErrorURL != "" {
 		u := fmt.Sprintf(check.conf._ErrorURL, errp.code)
 		if i := strings.Index(msg, "\n"); i >= 0 {
@@ -316,7 +316,7 @@
 	check.report(err)
 }
 
-func (check *Checker) versionErrorf(at positioner, v version, format string, args ...interface{}) {
+func (check *Checker) versionErrorf(at positioner, v goVersion, format string, args ...interface{}) {
 	msg := check.sprintf(format, args...)
 	var err *error_
 	err = newErrorf(at, UnsupportedFeature, "%s requires %s or later", msg, v)
diff --git a/src/go/types/eval.go b/src/go/types/eval.go
index ff2af58..3618441 100644
--- a/src/go/types/eval.go
+++ b/src/go/types/eval.go
@@ -18,7 +18,7 @@
 // set.
 //
 // The meaning of the parameters fset, pkg, and pos is the
-// same as in CheckExpr. An error is returned if expr cannot
+// same as in [CheckExpr]. An error is returned if expr cannot
 // be parsed successfully, or the resulting expr AST cannot be
 // type-checked.
 func Eval(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error) {
@@ -36,11 +36,11 @@
 }
 
 // CheckExpr type checks the expression expr as if it had appeared at position
-// pos of package pkg. Type information about the expression is recorded in
+// pos of package pkg. [Type] information about the expression is recorded in
 // info. The expression may be an identifier denoting an uninstantiated generic
 // function or type.
 //
-// If pkg == nil, the Universe scope is used and the provided
+// If pkg == nil, the [Universe] scope is used and the provided
 // position pos is ignored. If pkg != nil, and pos is invalid,
 // the package scope is used. Otherwise, pos must belong to the
 // package.
@@ -48,7 +48,7 @@
 // An error is returned if pos is not within the package or
 // if the node cannot be type-checked.
 //
-// Note: Eval and CheckExpr should not be used instead of running Check
+// Note: [Eval] and CheckExpr should not be used instead of running Check
 // to compute types and values, but in addition to Check, as these
 // functions ignore the context in which an expression is used (e.g., an
 // assignment). Thus, top-level untyped constants will return an
diff --git a/src/go/types/eval_test.go b/src/go/types/eval_test.go
index 4e995af..dd9bd7f 100644
--- a/src/go/types/eval_test.go
+++ b/src/go/types/eval_test.go
@@ -139,7 +139,7 @@
 				/* c => , struct{c int} */
 				_ = c
 			}
-			_ = func(a, b, c int) /* c => , string */ {
+			_ = func(a, b, c int /* c => , string */) /* c => , int */ {
 				/* c => , int */
 			}
 			_ = c
diff --git a/src/go/types/expr.go b/src/go/types/expr.go
index fd776c2..8651dda 100644
--- a/src/go/types/expr.go
+++ b/src/go/types/expr.go
@@ -356,7 +356,7 @@
 // If x is a constant operand, the returned constant.Value will be the
 // representation of x in this context.
 func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, Code) {
-	if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
+	if x.mode == invalid || isTyped(x.typ) || !isValid(target) {
 		return x.typ, nil, 0
 	}
 	// x is untyped
@@ -452,7 +452,7 @@
 // If switchCase is true, the operator op is ignored.
 func (check *Checker) comparison(x, y *operand, op token.Token, switchCase bool) {
 	// Avoid spurious errors if any of the operands has an invalid type (go.dev/issue/54405).
-	if x.typ == Typ[Invalid] || y.typ == Typ[Invalid] {
+	if !isValid(x.typ) || !isValid(y.typ) {
 		x.mode = invalid
 		return
 	}
@@ -810,7 +810,7 @@
 	if !Identical(x.typ, y.typ) {
 		// only report an error if we have valid types
 		// (otherwise we had an error reported elsewhere already)
-		if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
+		if isValid(x.typ) && isValid(y.typ) {
 			var posn positioner = x
 			if e != nil {
 				posn = e
@@ -941,18 +941,32 @@
 	statement
 )
 
-// TODO(gri) In rawExpr below, consider using T instead of hint and
-//           some sort of "operation mode" instead of allowGeneric.
-//           May be clearer and less error-prone.
+// target represent the (signature) type and description of the LHS
+// variable of an assignment, or of a function result variable.
+type target struct {
+	sig  *Signature
+	desc string
+}
+
+// newTarget creates a new target for the given type and description.
+// The result is nil if typ is not a signature.
+func newTarget(typ Type, desc string) *target {
+	if typ != nil {
+		if sig, _ := under(typ).(*Signature); sig != nil {
+			return &target{sig, desc}
+		}
+	}
+	return nil
+}
 
 // rawExpr typechecks expression e and initializes x with the expression
 // value or type. If an error occurred, x.mode is set to invalid.
-// If a non-nil target type T is given and e is a generic function
-// or function call, T is used to infer the type arguments for e.
+// If a non-nil target T is given and e is a generic function,
+// T is used to infer the type arguments for e.
 // If hint != nil, it is the type of a composite literal element.
 // If allowGeneric is set, the operand type may be an uninstantiated
 // parameterized type or function value.
-func (check *Checker) rawExpr(T Type, x *operand, e ast.Expr, hint Type, allowGeneric bool) exprKind {
+func (check *Checker) rawExpr(T *target, x *operand, e ast.Expr, hint Type, allowGeneric bool) exprKind {
 	if check.conf._Trace {
 		check.trace(e.Pos(), "-- expr %s", e)
 		check.indent++
@@ -974,9 +988,9 @@
 }
 
 // If x is a generic type, or a generic function whose type arguments cannot be inferred
-// from a non-nil target type T, nonGeneric reports an error and invalidates x.mode and x.typ.
+// from a non-nil target T, nonGeneric reports an error and invalidates x.mode and x.typ.
 // Otherwise it leaves x alone.
-func (check *Checker) nonGeneric(T Type, x *operand) {
+func (check *Checker) nonGeneric(T *target, x *operand) {
 	if x.mode == invalid || x.mode == novalue {
 		return
 	}
@@ -989,10 +1003,8 @@
 	case *Signature:
 		if t.tparams != nil {
 			if enableReverseTypeInference && T != nil {
-				if tsig, _ := under(T).(*Signature); tsig != nil {
-					check.funcInst(tsig, x.Pos(), x, nil, true)
-					return
-				}
+				check.funcInst(T, x.Pos(), x, nil, true)
+				return
 			}
 			what = "function"
 		}
@@ -1007,7 +1019,7 @@
 // exprInternal contains the core of type checking of expressions.
 // Must only be called by rawExpr.
 // (See rawExpr for an explanation of the parameters.)
-func (check *Checker) exprInternal(T Type, x *operand, e ast.Expr, hint Type) exprKind {
+func (check *Checker) exprInternal(T *target, x *operand, e ast.Expr, hint Type) exprKind {
 	// make sure x has a valid state in case of bailout
 	// (was go.dev/issue/5770)
 	x.mode = invalid
@@ -1059,6 +1071,10 @@
 
 	case *ast.FuncLit:
 		if sig, ok := check.typ(e.Type).(*Signature); ok {
+			// Set the Scope's extent to the complete "func (...) {...}"
+			// so that Scope.Innermost works correctly.
+			sig.scope.pos = e.Pos()
+			sig.scope.end = e.End()
 			if !check.conf.IgnoreFuncBodies && e.Body != nil {
 				// Anonymous functions are considered part of the
 				// init expression/func declaration which contains
@@ -1290,7 +1306,7 @@
 				check.use(e)
 			}
 			// if utyp is invalid, an error was reported before
-			if utyp != Typ[Invalid] {
+			if isValid(utyp) {
 				check.errorf(e, InvalidLit, "invalid composite literal type %s", typ)
 				goto Error
 			}
@@ -1311,11 +1327,10 @@
 	case *ast.IndexExpr, *ast.IndexListExpr:
 		ix := typeparams.UnpackIndexExpr(e)
 		if check.indexExpr(x, ix) {
-			var tsig *Signature
-			if enableReverseTypeInference && T != nil {
-				tsig, _ = under(T).(*Signature)
+			if !enableReverseTypeInference {
+				T = nil
 			}
-			check.funcInst(tsig, e.Pos(), x, ix, true)
+			check.funcInst(T, e.Pos(), x, ix, true)
 		}
 		if x.mode == invalid {
 			goto Error
@@ -1334,7 +1349,7 @@
 		}
 		// x.(type) expressions are handled explicitly in type switches
 		if e.Type == nil {
-			// Don't use invalidAST because this can occur in the AST produced by
+			// Don't use InvalidSyntaxTree because this can occur in the AST produced by
 			// go/parser.
 			check.error(e, BadTypeKeyword, "use of .(type) outside type switch")
 			goto Error
@@ -1348,7 +1363,7 @@
 			goto Error
 		}
 		T := check.varType(e.Type)
-		if T == Typ[Invalid] {
+		if !isValid(T) {
 			goto Error
 		}
 		check.typeAssertion(e, x, T, false)
@@ -1490,11 +1505,11 @@
 }
 
 // expr typechecks expression e and initializes x with the expression value.
-// If a non-nil target type T is given and e is a generic function
-// or function call, T is used to infer the type arguments for e.
+// If a non-nil target T is given and e is a generic function or
+// a function call, T is used to infer the type arguments for e.
 // The result must be a single value.
 // If an error occurred, x.mode is set to invalid.
-func (check *Checker) expr(T Type, x *operand, e ast.Expr) {
+func (check *Checker) expr(T *target, x *operand, e ast.Expr) {
 	check.rawExpr(T, x, e, nil, false)
 	check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
 	check.singleValue(x)
diff --git a/src/go/types/exprstring_test.go b/src/go/types/exprstring_test.go
index 604ceb9..75bdc0e 100644
--- a/src/go/types/exprstring_test.go
+++ b/src/go/types/exprstring_test.go
@@ -21,6 +21,7 @@
 	dup(`'a'`),
 	dup(`"foo"`),
 	dup("`bar`"),
+	dup("any"),
 
 	// func and composite literals
 	{"func(){}", "(func() literal)"},
@@ -46,14 +47,18 @@
 	// new interfaces
 	dup("interface{int}"),
 	dup("interface{~int}"),
-	dup("interface{~int}"),
+
+	// generic constraints
+	dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
 	dup("interface{int | string}"),
 	dup("interface{~int | ~string; float64; m()}"),
-
-	// See above.
-	// dup("interface{type a, b, c; ~int | ~string; float64; m()}"),
 	dup("interface{~T[int, string] | string}"),
 
+	// generic types
+	dup("x[T]"),
+	dup("x[N | A | S]"),
+	dup("x[N, A]"),
+
 	// non-type expressions
 	dup("(x)"),
 	dup("x.f"),
@@ -101,6 +106,12 @@
 	dup("f(s...)"),
 	dup("f(a, s...)"),
 
+	// generic functions
+	dup("f[T]()"),
+	dup("f[T](T)"),
+	dup("f[T, T1]()"),
+	dup("f[T, T1](T, T1)"),
+
 	dup("*x"),
 	dup("&x"),
 	dup("x + y"),
diff --git a/src/go/types/gcsizes.go b/src/go/types/gcsizes.go
new file mode 100644
index 0000000..4329cc2
--- /dev/null
+++ b/src/go/types/gcsizes.go
@@ -0,0 +1,172 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+type gcSizes struct {
+	WordSize int64 // word size in bytes - must be >= 4 (32bits)
+	MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *gcSizes) Alignof(T Type) (result int64) {
+	defer func() {
+		assert(result >= 1)
+	}()
+
+	// For arrays and structs, alignment is defined in terms
+	// of alignment of the elements and fields, respectively.
+	switch t := under(T).(type) {
+	case *Array:
+		// spec: "For a variable x of array type: unsafe.Alignof(x)
+		// is the same as unsafe.Alignof(x[0]), but at least 1."
+		return s.Alignof(t.elem)
+	case *Struct:
+		if len(t.fields) == 0 && _IsSyncAtomicAlign64(T) {
+			// Special case: sync/atomic.align64 is an
+			// empty struct we recognize as a signal that
+			// the struct it contains must be
+			// 64-bit-aligned.
+			//
+			// This logic is equivalent to the logic in
+			// cmd/compile/internal/types/size.go:calcStructOffset
+			return 8
+		}
+
+		// spec: "For a variable x of struct type: unsafe.Alignof(x)
+		// is the largest of the values unsafe.Alignof(x.f) for each
+		// field f of x, but at least 1."
+		max := int64(1)
+		for _, f := range t.fields {
+			if a := s.Alignof(f.typ); a > max {
+				max = a
+			}
+		}
+		return max
+	case *Slice, *Interface:
+		// Multiword data structures are effectively structs
+		// in which each element has size WordSize.
+		// Type parameters lead to variable sizes/alignments;
+		// StdSizes.Alignof won't be called for them.
+		assert(!isTypeParam(T))
+		return s.WordSize
+	case *Basic:
+		// Strings are like slices and interfaces.
+		if t.Info()&IsString != 0 {
+			return s.WordSize
+		}
+	case *TypeParam, *Union:
+		unreachable()
+	}
+	a := s.Sizeof(T) // may be 0 or negative
+	// spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+	if a < 1 {
+		return 1
+	}
+	// complex{64,128} are aligned like [2]float{32,64}.
+	if isComplex(T) {
+		a /= 2
+	}
+	if a > s.MaxAlign {
+		return s.MaxAlign
+	}
+	return a
+}
+
+func (s *gcSizes) Offsetsof(fields []*Var) []int64 {
+	offsets := make([]int64, len(fields))
+	var offs int64
+	for i, f := range fields {
+		if offs < 0 {
+			// all remaining offsets are too large
+			offsets[i] = -1
+			continue
+		}
+		// offs >= 0
+		a := s.Alignof(f.typ)
+		offs = align(offs, a) // possibly < 0 if align overflows
+		offsets[i] = offs
+		if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 {
+			offs += d // ok to overflow to < 0
+		} else {
+			offs = -1 // f.typ or offs is too large
+		}
+	}
+	return offsets
+}
+
+func (s *gcSizes) Sizeof(T Type) int64 {
+	switch t := under(T).(type) {
+	case *Basic:
+		assert(isTyped(T))
+		k := t.kind
+		if int(k) < len(basicSizes) {
+			if s := basicSizes[k]; s > 0 {
+				return int64(s)
+			}
+		}
+		if k == String {
+			return s.WordSize * 2
+		}
+	case *Array:
+		n := t.len
+		if n <= 0 {
+			return 0
+		}
+		// n > 0
+		esize := s.Sizeof(t.elem)
+		if esize < 0 {
+			return -1 // element too large
+		}
+		if esize == 0 {
+			return 0 // 0-size element
+		}
+		// esize > 0
+		// Final size is esize * n; and size must be <= maxInt64.
+		const maxInt64 = 1<<63 - 1
+		if esize > maxInt64/n {
+			return -1 // esize * n overflows
+		}
+		return esize * n
+	case *Slice:
+		return s.WordSize * 3
+	case *Struct:
+		n := t.NumFields()
+		if n == 0 {
+			return 0
+		}
+		offsets := s.Offsetsof(t.fields)
+		offs := offsets[n-1]
+		size := s.Sizeof(t.fields[n-1].typ)
+		if offs < 0 || size < 0 {
+			return -1 // type too large
+		}
+		// gc: The last field of a non-zero-sized struct is not allowed to
+		// have size 0.
+		if offs > 0 && size == 0 {
+			size = 1
+		}
+		// gc: Size includes alignment padding.
+		return align(offs+size, s.Alignof(t)) // may overflow to < 0 which is ok
+	case *Interface:
+		// Type parameters lead to variable sizes/alignments;
+		// StdSizes.Sizeof won't be called for them.
+		assert(!isTypeParam(T))
+		return s.WordSize * 2
+	case *TypeParam, *Union:
+		unreachable()
+	}
+	return s.WordSize // catch-all
+}
+
+// gcSizesFor returns the Sizes used by gc for an architecture.
+// The result is a nil *gcSizes pointer (which is not a valid types.Sizes)
+// if a compiler/architecture pair is not known.
+func gcSizesFor(compiler, arch string) *gcSizes {
+	if compiler != "gc" {
+		return nil
+	}
+	return gcArchSizes[arch]
+}
diff --git a/src/go/types/generate_test.go b/src/go/types/generate_test.go
index 2578cbb..59c0a97 100644
--- a/src/go/types/generate_test.go
+++ b/src/go/types/generate_test.go
@@ -95,14 +95,17 @@
 type action func(in *ast.File)
 
 var filemap = map[string]action{
-	"array.go":        nil,
-	"basic.go":        nil,
-	"chan.go":         nil,
-	"const.go":        func(f *ast.File) { fixTokenPos(f) },
-	"context.go":      nil,
-	"context_test.go": nil,
-	"gccgosizes.go":   nil,
-	"hilbert_test.go": func(f *ast.File) { renameImportPath(f, `"cmd/compile/internal/types2"`, `"go/types"`) },
+	"alias.go":          nil,
+	"array.go":          nil,
+	"api_predicates.go": nil,
+	"basic.go":          nil,
+	"chan.go":           nil,
+	"const.go":          func(f *ast.File) { fixTokenPos(f) },
+	"context.go":        nil,
+	"context_test.go":   nil,
+	"gccgosizes.go":     nil,
+	"gcsizes.go":        func(f *ast.File) { renameIdents(f, "IsSyncAtomicAlign64->_IsSyncAtomicAlign64") },
+	"hilbert_test.go":   func(f *ast.File) { renameImportPath(f, `"cmd/compile/internal/types2"`, `"go/types"`) },
 	"infer.go": func(f *ast.File) {
 		fixTokenPos(f)
 		fixInferSig(f)
@@ -114,7 +117,7 @@
 	"main_test.go":        nil,
 	"map.go":              nil,
 	"named.go":            func(f *ast.File) { fixTokenPos(f); fixTraceSel(f) },
-	"object.go":           func(f *ast.File) { fixTokenPos(f); renameIdent(f, "NewTypeNameLazy", "_NewTypeNameLazy") },
+	"object.go":           func(f *ast.File) { fixTokenPos(f); renameIdents(f, "NewTypeNameLazy->_NewTypeNameLazy") },
 	"object_test.go":      func(f *ast.File) { renameImportPath(f, `"cmd/compile/internal/types2"`, `"go/types"`) },
 	"objset.go":           nil,
 	"package.go":          nil,
@@ -122,11 +125,10 @@
 	"predicates.go":       nil,
 	"scope.go": func(f *ast.File) {
 		fixTokenPos(f)
-		renameIdent(f, "Squash", "squash")
-		renameIdent(f, "InsertLazy", "_InsertLazy")
+		renameIdents(f, "Squash->squash", "InsertLazy->_InsertLazy")
 	},
 	"selection.go":     nil,
-	"sizes.go":         func(f *ast.File) { renameIdent(f, "IsSyncAtomicAlign64", "_IsSyncAtomicAlign64") },
+	"sizes.go":         func(f *ast.File) { renameIdents(f, "IsSyncAtomicAlign64->_IsSyncAtomicAlign64") },
 	"slice.go":         nil,
 	"subst.go":         func(f *ast.File) { fixTokenPos(f); fixTraceSel(f) },
 	"termlist.go":      nil,
@@ -141,20 +143,29 @@
 	"universe.go":      fixGlobalTypVarDecl,
 	"util_test.go":     fixTokenPos,
 	"validtype.go":     nil,
-	"version_test.go":  nil,
 }
 
 // TODO(gri) We should be able to make these rewriters more configurable/composable.
 //           For now this is a good starting point.
 
-// renameIdent renames an identifier.
-// Note: This doesn't change the use of the identifier in comments.
-func renameIdent(f *ast.File, from, to string) {
+// renameIdent renames identifiers: each renames entry is of the form from->to.
+// Note: This doesn't change the use of the identifiers in comments.
+func renameIdents(f *ast.File, renames ...string) {
+	var list [][]string
+	for _, r := range renames {
+		s := strings.Split(r, "->")
+		if len(s) != 2 {
+			panic("invalid rename entry: " + r)
+		}
+		list = append(list, s)
+	}
 	ast.Inspect(f, func(n ast.Node) bool {
 		switch n := n.(type) {
 		case *ast.Ident:
-			if n.Name == from {
-				n.Name = to
+			for _, r := range list {
+				if n.Name == r[0] {
+					n.Name = r[1]
+				}
 			}
 			return false
 		}
@@ -211,7 +222,7 @@
 	ast.Inspect(f, func(n ast.Node) bool {
 		switch n := n.(type) {
 		case *ast.FuncDecl:
-			if n.Name.Name == "infer" || n.Name.Name == "infer1" || n.Name.Name == "infer2" {
+			if n.Name.Name == "infer" {
 				// rewrite (pos token.Pos, ...) to (posn positioner, ...)
 				par := n.Type.Params.List[0]
 				if len(par.Names) == 1 && par.Names[0].Name == "pos" {
@@ -232,10 +243,8 @@
 						n.Args[0] = arg
 						return false
 					}
-				case "errorf", "infer1", "infer2":
+				case "errorf":
 					// rewrite check.errorf(pos, ...) to check.errorf(posn, ...)
-					// rewrite check.infer1(pos, ...) to check.infer1(posn, ...)
-					// rewrite check.infer2(pos, ...) to check.infer2(posn, ...)
 					if ident, _ := n.Args[0].(*ast.Ident); ident != nil && ident.Name == "pos" {
 						pos := n.Args[0].Pos()
 						arg := newIdent(pos, "posn")
diff --git a/src/go/types/index.go b/src/go/types/index.go
index c1c0f40..6f532a9 100644
--- a/src/go/types/index.go
+++ b/src/go/types/index.go
@@ -30,7 +30,7 @@
 		x.mode = invalid
 		// TODO(gri) here we re-evaluate e.X - try to avoid this
 		x.typ = check.varType(e.Orig)
-		if x.typ != Typ[Invalid] {
+		if isValid(x.typ) {
 			x.mode = typexpr
 		}
 		return false
@@ -421,7 +421,7 @@
 		validIndex := false
 		eval := e
 		if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
-			if typ, i := check.index(kv.Key, length); typ != Typ[Invalid] {
+			if typ, i := check.index(kv.Key, length); isValid(typ) {
 				if i >= 0 {
 					index = i
 					validIndex = true
diff --git a/src/go/types/infer.go b/src/go/types/infer.go
index cb76344..889de00 100644
--- a/src/go/types/infer.go
+++ b/src/go/types/infer.go
@@ -26,10 +26,16 @@
 // based on the given type parameters tparams, type arguments targs, function parameters params, and
 // function arguments args, if any. There must be at least one type parameter, no more type arguments
 // than type parameters, and params and args must match in number (incl. zero).
+// If reverse is set, an error message's contents are reversed for a better error message for some
+// errors related to reverse type inference (where the function call is synthetic).
 // If successful, infer returns the complete list of given and inferred type arguments, one for each
 // type parameter. Otherwise the result is nil and appropriate errors will be reported.
-func (check *Checker) infer(posn positioner, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (inferred []Type) {
-	if debug {
+func (check *Checker) infer(posn positioner, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand, reverse bool) (inferred []Type) {
+	// Don't verify result conditions if there's no error handler installed:
+	// in that case, an error leads to an exit panic and the result value may
+	// be incorrect. But in that case it doesn't matter because callers won't
+	// be able to use it either.
+	if check.conf.Error != nil {
 		defer func() {
 			assert(inferred == nil || len(inferred) == len(tparams) && !containsNil(inferred))
 		}()
@@ -54,6 +60,14 @@
 		return targs
 	}
 
+	// If we have invalid (ordinary) arguments, an error was reported before.
+	// Avoid additional inference errors and exit early (go.dev/issue/60434).
+	for _, arg := range args {
+		if arg.mode == invalid {
+			return nil
+		}
+	}
+
 	// Make sure we have a "full" list of type arguments, some of which may
 	// be nil (unknown). Make a copy so as to not clobber the incoming slice.
 	if len(targs) < n {
@@ -100,7 +114,7 @@
 	// Terminology: generic parameter = function parameter with a type-parameterized type
 	u := newUnifier(tparams, targs, check.allowVersion(check.pkg, posn, go1_21))
 
-	errorf := func(kind string, tpar, targ Type, arg *operand) {
+	errorf := func(tpar, targ Type, arg *operand) {
 		// provide a better error message if we can
 		targs := u.inferred(tparams)
 		if targs[0] == nil {
@@ -115,7 +129,7 @@
 				}
 			}
 			if allFailed {
-				check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s (cannot infer %s)", kind, targ, arg.expr, tpar, typeParamsString(tparams))
+				check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s (cannot infer %s)", targ, arg.expr, tpar, typeParamsString(tparams))
 				return
 			}
 		}
@@ -127,9 +141,13 @@
 		// InvalidTypeArg). We can't differentiate these cases, so fall back on
 		// the more general CannotInferTypeArgs.
 		if inferred != tpar {
-			check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar)
+			if reverse {
+				check.errorf(arg, CannotInferTypeArgs, "inferred type %s for %s does not match type %s of %s", inferred, tpar, targ, arg.expr)
+			} else {
+				check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match inferred type %s for %s", targ, arg.expr, inferred, tpar)
+			}
 		} else {
-			check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s", kind, targ, arg.expr, tpar)
+			check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s", targ, arg.expr, tpar)
 		}
 	}
 
@@ -158,7 +176,7 @@
 			// Collect the indices of untyped arguments and handle them later.
 			if isTyped(arg.typ) {
 				if !u.unify(par.typ, arg.typ, assign) {
-					errorf("type", par.typ, arg.typ, arg)
+					errorf(par.typ, arg.typ, arg)
 					return nil
 				}
 			} else if _, ok := par.typ.(*TypeParam); ok && !arg.isNil() {
@@ -540,6 +558,9 @@
 	case *Basic:
 		// nothing to do
 
+	case *Alias:
+		return w.isParameterized(Unalias(t))
+
 	case *Array:
 		return w.isParameterized(t.elem)
 
@@ -691,6 +712,9 @@
 	case *Basic:
 		// nothing to do
 
+	case *Alias:
+		w.typ(Unalias(t))
+
 	case *Array:
 		w.typ(t.elem)
 
diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go
index 088b433..bf7ecc5 100644
--- a/src/go/types/instantiate.go
+++ b/src/go/types/instantiate.go
@@ -124,7 +124,8 @@
 		assert(expanding == nil) // function instances cannot be reached from Named types
 
 		tparams := orig.TypeParams()
-		if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
+		// TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here)
+		if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) {
 			return Typ[Invalid]
 		}
 		if tparams.Len() == 0 {
@@ -152,19 +153,27 @@
 	return updateContexts(res)
 }
 
-// validateTArgLen verifies that the length of targs and tparams matches,
-// reporting an error if not. If validation fails and check is nil,
-// validateTArgLen panics.
-func (check *Checker) validateTArgLen(pos token.Pos, ntparams, ntargs int) bool {
-	if ntargs != ntparams {
-		// TODO(gri) provide better error message
-		if check != nil {
-			check.errorf(atPos(pos), WrongTypeArgCount, "got %d arguments but %d type parameters", ntargs, ntparams)
-			return false
-		}
-		panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, ntargs, ntparams))
+// validateTArgLen checks that the number of type arguments (got) matches the
+// number of type parameters (want); if they don't match an error is reported.
+// If validation fails and check is nil, validateTArgLen panics.
+func (check *Checker) validateTArgLen(pos token.Pos, name string, want, got int) bool {
+	var qual string
+	switch {
+	case got < want:
+		qual = "not enough"
+	case got > want:
+		qual = "too many"
+	default:
+		return true
 	}
-	return true
+
+	msg := check.sprintf("%s type arguments for type %s: have %d, want %d", qual, name, got, want)
+	if check != nil {
+		check.error(atPos(pos), WrongTypeArgCount, msg)
+		return false
+	}
+
+	panic(fmt.Sprintf("%v: %s", pos, msg))
 }
 
 func (check *Checker) verify(pos token.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
@@ -194,10 +203,10 @@
 func (check *Checker) implements(pos token.Pos, V, T Type, constraint bool, cause *string) bool {
 	Vu := under(V)
 	Tu := under(T)
-	if Vu == Typ[Invalid] || Tu == Typ[Invalid] {
+	if !isValid(Vu) || !isValid(Tu) {
 		return true // avoid follow-on errors
 	}
-	if p, _ := Vu.(*Pointer); p != nil && under(p.base) == Typ[Invalid] {
+	if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) {
 		return true // avoid follow-on errors (see go.dev/issue/49541 for an example)
 	}
 
diff --git a/src/go/types/interface.go b/src/go/types/interface.go
index f2bb15e..01bbb08 100644
--- a/src/go/types/interface.go
+++ b/src/go/types/interface.go
@@ -94,17 +94,17 @@
 func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
 
 // ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
-// The methods are ordered by their unique Id.
+// The methods are ordered by their unique [Id].
 func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
 
 // NumEmbeddeds returns the number of embedded types in interface t.
 func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
 
-// Embedded returns the i'th embedded defined (*Named) type of interface t for 0 <= i < t.NumEmbeddeds().
+// Embedded returns the i'th embedded defined (*[Named]) type of interface t for 0 <= i < t.NumEmbeddeds().
 // The result is nil if the i'th embedded type is not a defined type.
 //
-// Deprecated: Use EmbeddedType which is not restricted to defined (*Named) types.
-func (t *Interface) Embedded(i int) *Named { tname, _ := t.embeddeds[i].(*Named); return tname }
+// Deprecated: Use [Interface.EmbeddedType] which is not restricted to defined (*[Named]) types.
+func (t *Interface) Embedded(i int) *Named { return asNamed(t.embeddeds[i]) }
 
 // EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
 func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
@@ -130,7 +130,7 @@
 func (t *Interface) IsImplicit() bool { return t.implicit }
 
 // Complete computes the interface's type set. It must be called by users of
-// NewInterfaceType and NewInterface after the interface's embedded types are
+// [NewInterfaceType] and [NewInterface] after the interface's embedded types are
 // fully defined and before using the interface type in any way other than to
 // form other types. The interface must not contain duplicate methods or a
 // panic occurs. Complete returns the receiver.
@@ -151,11 +151,12 @@
 // Implementation
 
 func (t *Interface) cleanup() {
+	t.typeSet() // any interface that escapes type checking must be safe for concurrent use
 	t.check = nil
 	t.embedPos = nil
 }
 
-func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
+func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *TypeName) {
 	addEmbedded := func(pos token.Pos, typ Type) {
 		ityp.embeddeds = append(ityp.embeddeds, typ)
 		if ityp.embedPos == nil {
@@ -181,27 +182,27 @@
 		typ := check.typ(f.Type)
 		sig, _ := typ.(*Signature)
 		if sig == nil {
-			if typ != Typ[Invalid] {
+			if isValid(typ) {
 				check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ)
 			}
 			continue // ignore
 		}
 
-		// Always type-check method type parameters but complain if they are not enabled.
-		// (This extra check is needed here because interface method signatures don't have
-		// a receiver specification.)
+		// The go/parser doesn't accept method type parameters but an ast.FuncType may have them.
 		if sig.tparams != nil {
 			var at positioner = f.Type
 			if ftyp, _ := f.Type.(*ast.FuncType); ftyp != nil && ftyp.TypeParams != nil {
 				at = ftyp.TypeParams
 			}
-			check.error(at, InvalidMethodTypeParams, "methods cannot have type parameters")
+			check.error(at, InvalidSyntaxTree, "methods cannot have type parameters")
 		}
 
 		// use named receiver type if available (for better error messages)
 		var recvTyp Type = ityp
 		if def != nil {
-			recvTyp = def
+			if named := asNamed(def.typ); named != nil {
+				recvTyp = named
+			}
 		}
 		sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
 
diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go
index 4a559cb..6f9d597 100644
--- a/src/go/types/issues_test.go
+++ b/src/go/types/issues_test.go
@@ -931,6 +931,22 @@
 	conf.Check(f.Name.Name, fset, []*ast.File{f}, nil) // must not panic
 }
 
+func TestIssue61938(t *testing.T) {
+	const src = `
+package p
+
+func f[T any]() {}
+func _()        { f() }
+`
+	// no error handler provided (this issue)
+	var conf Config
+	typecheck(src, &conf, nil) // must not panic
+
+	// with error handler (sanity check)
+	conf.Error = func(error) {}
+	typecheck(src, &conf, nil) // must not panic
+}
+
 func TestIssue63260(t *testing.T) {
 	const src = `
 package p
@@ -974,3 +990,116 @@
 		t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T)
 	}
 }
+
+func TestIssue44410(t *testing.T) {
+	const src = `
+package p
+
+type A = []int
+type S struct{ A }
+`
+
+	t.Setenv("GODEBUG", "gotypesalias=1")
+	pkg := mustTypecheck(src, nil, nil)
+
+	S := pkg.Scope().Lookup("S")
+	if S == nil {
+		t.Fatal("object S not found")
+	}
+
+	got := S.String()
+	const want = "type p.S struct{p.A}"
+	if got != want {
+		t.Fatalf("got %q; want %q", got, want)
+	}
+}
+
+func TestIssue59831(t *testing.T) {
+	// Package a exports a type S with an unexported method m;
+	// the tests check the error messages when m is not found.
+	const asrc = `package a; type S struct{}; func (S) m() {}`
+	apkg := mustTypecheck(asrc, nil, nil)
+
+	// Package b exports a type S with an exported method m;
+	// the tests check the error messages when M is not found.
+	const bsrc = `package b; type S struct{}; func (S) M() {}`
+	bpkg := mustTypecheck(bsrc, nil, nil)
+
+	tests := []struct {
+		imported *Package
+		src, err string
+	}{
+		// tests importing a (or nothing)
+		{apkg, `package a1; import "a"; var _ interface { M() } = a.S{}`,
+			"a.S does not implement interface{M()} (missing method M) have m() want M()"},
+
+		{apkg, `package a2; import "a"; var _ interface { m() } = a.S{}`,
+			"a.S does not implement interface{m()} (unexported method m)"}, // test for issue
+
+		{nil, `package a3; type S struct{}; func (S) m(); var _ interface { M() } = S{}`,
+			"S does not implement interface{M()} (missing method M) have m() want M()"},
+
+		{nil, `package a4; type S struct{}; func (S) m(); var _ interface { m() } = S{}`,
+			""}, // no error expected
+
+		{nil, `package a5; type S struct{}; func (S) m(); var _ interface { n() } = S{}`,
+			"S does not implement interface{n()} (missing method n)"},
+
+		// tests importing b (or nothing)
+		{bpkg, `package b1; import "b"; var _ interface { m() } = b.S{}`,
+			"b.S does not implement interface{m()} (missing method m) have M() want m()"},
+
+		{bpkg, `package b2; import "b"; var _ interface { M() } = b.S{}`,
+			""}, // no error expected
+
+		{nil, `package b3; type S struct{}; func (S) M(); var _ interface { M() } = S{}`,
+			""}, // no error expected
+
+		{nil, `package b4; type S struct{}; func (S) M(); var _ interface { m() } = S{}`,
+			"S does not implement interface{m()} (missing method m) have M() want m()"},
+
+		{nil, `package b5; type S struct{}; func (S) M(); var _ interface { n() } = S{}`,
+			"S does not implement interface{n()} (missing method n)"},
+	}
+
+	for _, test := range tests {
+		// typecheck test source
+		conf := Config{Importer: importHelper{pkg: test.imported}}
+		pkg, err := typecheck(test.src, &conf, nil)
+		if err == nil {
+			if test.err != "" {
+				t.Errorf("package %s: got no error, want %q", pkg.Name(), test.err)
+			}
+			continue
+		}
+		if test.err == "" {
+			t.Errorf("package %s: got %q, want not error", pkg.Name(), err.Error())
+		}
+
+		// flatten reported error message
+		errmsg := strings.ReplaceAll(err.Error(), "\n", " ")
+		errmsg = strings.ReplaceAll(errmsg, "\t", "")
+
+		// verify error message
+		if !strings.Contains(errmsg, test.err) {
+			t.Errorf("package %s: got %q, want %q", pkg.Name(), errmsg, test.err)
+		}
+	}
+}
+
+func TestIssue64759(t *testing.T) {
+	const src = `
+//go:build go1.18
+package p
+
+func f[S ~[]E, E any](S) {}
+
+func _() {
+	f([]string{})
+}
+`
+	// Per the go:build directive, the source must typecheck
+	// even though the (module) Go version is set to go1.17.
+	conf := Config{GoVersion: "go1.17"}
+	mustTypecheck(src, &conf, nil)
+}
diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go
index d96dd86..7723c43 100644
--- a/src/go/types/lookup.go
+++ b/src/go/types/lookup.go
@@ -56,7 +56,7 @@
 	// Thus, if we have a named pointer type, proceed with the underlying
 	// pointer type but discard the result if it is a method since we would
 	// not have found it for T (see also go.dev/issue/8590).
-	if t, _ := T.(*Named); t != nil {
+	if t := asNamed(T); t != nil {
 		if p, _ := t.Underlying().(*Pointer); p != nil {
 			obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, false)
 			if _, ok := obj.(*Func); ok {
@@ -98,7 +98,7 @@
 // and missingMethod (the latter doesn't care about struct fields).
 //
 // If foldCase is true, method names are considered equal if they are equal
-// with case folding.
+// with case folding, irrespective of which package they are in.
 //
 // The resulting object may not be fully type-checked.
 func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) {
@@ -140,7 +140,7 @@
 
 			// If we have a named type, we may have associated methods.
 			// Look for those first.
-			if named, _ := typ.(*Named); named != nil {
+			if named := asNamed(typ); named != nil {
 				if alt := seen.lookup(named); alt != nil {
 					// We have seen this type before, at a more shallow depth
 					// (note that multiples of this type at the current depth
@@ -345,6 +345,7 @@
 		ok = iota
 		notFound
 		wrongName
+		unexported
 		wrongSig
 		ambigSel
 		ptrRecv
@@ -390,6 +391,11 @@
 					f, _ = obj.(*Func)
 					if f != nil {
 						state = wrongName
+						if f.name == m.name {
+							// If the names are equal, f must be unexported
+							// (otherwise the package wouldn't matter).
+							state = unexported
+						}
 					}
 				}
 				break
@@ -438,8 +444,9 @@
 			}
 		case wrongName:
 			fs, ms := check.funcString(f, false), check.funcString(m, false)
-			*cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s",
-				m.Name(), fs, ms)
+			*cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
+		case unexported:
+			*cause = check.sprintf("(unexported method %s)", m.Name())
 		case wrongSig:
 			fs, ms := check.funcString(f, false), check.funcString(m, false)
 			if fs == ms {
@@ -447,8 +454,18 @@
 				// Add package information to disambiguate (go.dev/issue/54258).
 				fs, ms = check.funcString(f, true), check.funcString(m, true)
 			}
-			*cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s",
-				m.Name(), fs, ms)
+			if fs == ms {
+				// We still have "want Foo, have Foo".
+				// This is most likely due to different type parameters with
+				// the same name appearing in the instantiated signatures
+				// (go.dev/issue/61685).
+				// Rather than reporting this misleading error cause, for now
+				// just point out that the method signature is incorrect.
+				// TODO(gri) should find a good way to report the root cause
+				*cause = check.sprintf("(wrong type for method %s)", m.Name())
+				break
+			}
+			*cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
 		case ambigSel:
 			*cause = check.sprintf("(ambiguous selector %s.%s)", V, m.Name())
 		case ptrRecv:
@@ -529,7 +546,7 @@
 // with an underlying pointer type!) and returns its base and true.
 // Otherwise it returns (typ, false).
 func deref(typ Type) (Type, bool) {
-	if p, _ := typ.(*Pointer); p != nil {
+	if p, _ := Unalias(typ).(*Pointer); p != nil {
 		// p.base should never be nil, but be conservative
 		if p.base == nil {
 			if debug {
@@ -574,11 +591,12 @@
 }
 
 // lookupMethod returns the index of and method with matching package and name, or (-1, nil).
-// If foldCase is true, method names are considered equal if they are equal with case folding.
+// If foldCase is true, method names are considered equal if they are equal with case folding
+// and their packages are ignored (e.g., pkg1.m, pkg1.M, pkg2.m, and pkg2.M are all equal).
 func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) {
 	if name != "_" {
 		for i, m := range methods {
-			if (m.name == name || foldCase && strings.EqualFold(m.name, name)) && m.sameId(pkg, m.name) {
+			if m.sameId(pkg, name) || foldCase && strings.EqualFold(m.name, name) {
 				return i, m
 			}
 		}
diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go
index 0d9d9b4..ac8f0bd 100644
--- a/src/go/types/methodset.go
+++ b/src/go/types/methodset.go
@@ -13,7 +13,7 @@
 )
 
 // A MethodSet is an ordered set of concrete or abstract (interface) methods;
-// a method is a MethodVal selection, and they are ordered by ascending m.Obj().Id().
+// a method is a [MethodVal] selection, and they are ordered by ascending m.Obj().Id().
 // The zero value for a MethodSet is a ready-to-use empty method set.
 type MethodSet struct {
 	list []*Selection
@@ -80,7 +80,7 @@
 	// (spec: "The type denoted by T is called the receiver base type;
 	// it must not be a pointer or interface type and it must be declared
 	// in the same package as the method.").
-	if t, _ := T.(*Named); t != nil && isPointer(t) {
+	if t := asNamed(T); t != nil && isPointer(t) {
 		return &emptyMethodSet
 	}
 
@@ -117,7 +117,7 @@
 
 			// If we have a named type, we may have associated methods.
 			// Look for those first.
-			if named, _ := typ.(*Named); named != nil {
+			if named := asNamed(typ); named != nil {
 				if alt := seen.lookup(named); alt != nil {
 					// We have seen this type before, at a more shallow depth
 					// (note that multiples of this type at the current depth
diff --git a/src/go/types/mono.go b/src/go/types/mono.go
index ebf4d8c..7411339 100644
--- a/src/go/types/mono.go
+++ b/src/go/types/mono.go
@@ -206,7 +206,7 @@
 	// type parameters.
 	var do func(typ Type)
 	do = func(typ Type) {
-		switch typ := typ.(type) {
+		switch typ := Unalias(typ).(type) {
 		default:
 			panic("unexpected type")
 
diff --git a/src/go/types/named.go b/src/go/types/named.go
index 413eaad..21c0de2 100644
--- a/src/go/types/named.go
+++ b/src/go/types/named.go
@@ -143,7 +143,7 @@
 // If the given type name obj doesn't have a type yet, its type is set to the returned named type.
 // The underlying type must not be a *Named.
 func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
-	if _, ok := underlying.(*Named); ok {
+	if asNamed(underlying) != nil {
 		panic("underlying type must not be *Named")
 	}
 	return (*Checker)(nil).newNamed(obj, underlying, methods)
@@ -226,7 +226,7 @@
 	atomic.StoreUint32(&n.state_, uint32(state))
 }
 
-// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
+// newNamed is like NewNamed but with a *Checker receiver.
 func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
 	typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
 	if obj.typ == nil {
@@ -436,7 +436,7 @@
 	if underlying == nil {
 		panic("underlying type must not be nil")
 	}
-	if _, ok := underlying.(*Named); ok {
+	if asNamed(underlying) != nil {
 		panic("underlying type must not be *Named")
 	}
 	t.resolve().underlying = underlying
@@ -455,7 +455,8 @@
 	}
 }
 
-func (t *Named) Underlying() Type { return t.resolve().underlying }
+// TODO(gri) Investigate if Unalias can be moved to where underlying is set.
+func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) }
 func (t *Named) String() string   { return TypeString(t, nil) }
 
 // ----------------------------------------------------------------------------
@@ -552,12 +553,6 @@
 	return u
 }
 
-func (n *Named) setUnderlying(typ Type) {
-	if n != nil {
-		n.underlying = typ
-	}
-}
-
 func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
 	n.resolve()
 	// If n is an instance, we may not have yet instantiated all of its methods.
@@ -600,7 +595,7 @@
 	orig := n.inst.orig
 	targs := n.inst.targs
 
-	if _, unexpanded := orig.underlying.(*Named); unexpanded {
+	if asNamed(orig.underlying) != nil {
 		// We should only get a Named underlying type here during type checking
 		// (for example, in recursive type declarations).
 		assert(check != nil)
@@ -635,11 +630,18 @@
 				old := iface
 				iface = check.newInterface()
 				iface.embeddeds = old.embeddeds
+				assert(old.complete) // otherwise we are copying incomplete data
 				iface.complete = old.complete
 				iface.implicit = old.implicit // should be false but be conservative
 				underlying = iface
 			}
 			iface.methods = methods
+			iface.tset = nil // recompute type set with new methods
+
+			// If check != nil, check.newInterface will have saved the interface for later completion.
+			if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated
+				iface.typeSet()
+			}
 		}
 	}
 
@@ -651,7 +653,7 @@
 //
 // TODO(rfindley): eliminate this function or give it a better name.
 func safeUnderlying(typ Type) Type {
-	if t, _ := typ.(*Named); t != nil {
+	if t := asNamed(typ); t != nil {
 		return t.underlying
 	}
 	return typ.Underlying()
diff --git a/src/go/types/object.go b/src/go/types/object.go
index e47ef2e..51b3886 100644
--- a/src/go/types/object.go
+++ b/src/go/types/object.go
@@ -287,6 +287,8 @@
 	switch t := obj.typ.(type) {
 	case nil:
 		return false
+	// case *Alias:
+	//	handled by default case
 	case *Basic:
 		// unsafe.Pointer is not an alias.
 		if obj.pkg == Unsafe {
@@ -408,6 +410,12 @@
 	return obj
 }
 
+// Pkg returns the package to which the function belongs.
+//
+// The result is nil for methods of types in the Universe scope,
+// like method Error of the error built-in interface type.
+func (obj *Func) Pkg() *Package { return obj.object.Pkg() }
+
 // hasPtrRecv reports whether the receiver is of the form *T for the given method obj.
 func (obj *Func) hasPtrRecv() bool {
 	// If a method's receiver type is set, use that as the source of truth for the receiver.
diff --git a/src/go/types/operand.go b/src/go/types/operand.go
index d7719fd..d5c1634 100644
--- a/src/go/types/operand.go
+++ b/src/go/types/operand.go
@@ -159,7 +159,7 @@
 
 	// <typ>
 	if hasType {
-		if x.typ != Typ[Invalid] {
+		if isValid(x.typ) {
 			var intro string
 			if isGeneric(x.typ) {
 				intro = " of generic type "
@@ -232,7 +232,7 @@
 // if assignableTo is invoked through an exported API call, i.e., when all
 // methods have been type-checked.
 func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Code) {
-	if x.mode == invalid || T == Typ[Invalid] {
+	if x.mode == invalid || !isValid(T) {
 		return true, 0 // avoid spurious errors
 	}
 
diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go
index b821b58..cac2b3c 100644
--- a/src/go/types/predicates.go
+++ b/src/go/types/predicates.go
@@ -8,6 +8,9 @@
 
 package types
 
+// isValid reports whether t is a valid type.
+func isValid(t Type) bool { return Unalias(t) != Typ[Invalid] }
+
 // The isX predicates below report whether t is an X.
 // If t is a type parameter the result is false; i.e.,
 // these predicates don't look inside a type parameter.
@@ -49,7 +52,7 @@
 // for all specific types of the type parameter's type set.
 // allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
 func allBasic(t Type, info BasicInfo) bool {
-	if tpar, _ := t.(*TypeParam); tpar != nil {
+	if tpar, _ := Unalias(t).(*TypeParam); tpar != nil {
 		return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
 	}
 	return isBasic(t, info)
@@ -59,7 +62,7 @@
 // predeclared types, defined types, and type parameters.
 // hasName may be called with types that are not fully set up.
 func hasName(t Type) bool {
-	switch t.(type) {
+	switch Unalias(t).(type) {
 	case *Basic, *Named, *TypeParam:
 		return true
 	}
@@ -70,7 +73,7 @@
 // This includes all non-defined types, but also basic types.
 // isTypeLit may be called with types that are not fully set up.
 func isTypeLit(t Type) bool {
-	switch t.(type) {
+	switch Unalias(t).(type) {
 	case *Named, *TypeParam:
 		return false
 	}
@@ -81,8 +84,10 @@
 // constant or boolean. isTyped may be called with types that
 // are not fully set up.
 func isTyped(t Type) bool {
-	// isTyped is called with types that are not fully
-	// set up. Must not call under()!
+	// Alias or Named types cannot denote untyped types,
+	// thus we don't need to call Unalias or under
+	// (which would be unsafe to do for types that are
+	// not fully set up).
 	b, _ := t.(*Basic)
 	return b == nil || b.info&IsUntyped == 0
 }
@@ -105,7 +110,7 @@
 
 // isTypeParam reports whether t is a type parameter.
 func isTypeParam(t Type) bool {
-	_, ok := t.(*TypeParam)
+	_, ok := Unalias(t).(*TypeParam)
 	return ok
 }
 
@@ -114,7 +119,7 @@
 // use anywhere, but it may report a false negative if the type set has not been
 // computed yet.
 func hasEmptyTypeset(t Type) bool {
-	if tpar, _ := t.(*TypeParam); tpar != nil && tpar.bound != nil {
+	if tpar, _ := Unalias(t).(*TypeParam); tpar != nil && tpar.bound != nil {
 		iface, _ := safeUnderlying(tpar.bound).(*Interface)
 		return iface != nil && iface.tset != nil && iface.tset.IsEmpty()
 	}
@@ -126,7 +131,7 @@
 // TODO(gri) should we include signatures or assert that they are not present?
 func isGeneric(t Type) bool {
 	// A parameterized type is only generic if it doesn't have an instantiation already.
-	named, _ := t.(*Named)
+	named := asNamed(t)
 	return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
 }
 
@@ -220,11 +225,14 @@
 
 // For changes to this code the corresponding changes should be made to unifier.nify.
 func (c *comparer) identical(x, y Type, p *ifacePair) bool {
+	x = Unalias(x)
+	y = Unalias(y)
+
 	if x == y {
 		return true
 	}
 
-	if c.ignoreInvalids && (x == Typ[Invalid] || y == Typ[Invalid]) {
+	if c.ignoreInvalids && (!isValid(x) || !isValid(y)) {
 		return true
 	}
 
@@ -437,7 +445,7 @@
 		// Two named types are identical if their type names originate
 		// in the same type declaration; if they are instantiated they
 		// must have identical type argument lists.
-		if y, ok := y.(*Named); ok {
+		if y := asNamed(y); y != nil {
 			// check type arguments before origins to match unifier
 			// (for correct source code we need to do all checks so
 			// order doesn't matter)
@@ -451,7 +459,7 @@
 					return false
 				}
 			}
-			return indenticalOrigin(x, y)
+			return identicalOrigin(x, y)
 		}
 
 	case *TypeParam:
@@ -468,7 +476,7 @@
 }
 
 // identicalOrigin reports whether x and y originated in the same declaration.
-func indenticalOrigin(x, y *Named) bool {
+func identicalOrigin(x, y *Named) bool {
 	// TODO(gri) is this correct?
 	return x.Origin().obj == y.Origin().obj
 }
@@ -494,7 +502,7 @@
 // it returns the incoming type for all other types. The default type
 // for untyped nil is untyped nil.
 func Default(t Type) Type {
-	if t, ok := t.(*Basic); ok {
+	if t, ok := Unalias(t).(*Basic); ok {
 		switch t.kind {
 		case UntypedBool:
 			return Typ[Bool]
@@ -532,3 +540,9 @@
 	}
 	return nil
 }
+
+// clone makes a "flat copy" of *p and returns a pointer to the copy.
+func clone[P *T, T any](p P) P {
+	c := *p
+	return &c
+}
diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go
index 6397b39..f8283447 100644
--- a/src/go/types/resolver.go
+++ b/src/go/types/resolver.go
@@ -659,32 +659,39 @@
 		}
 	}
 
-	// We process non-alias type declarations first, followed by alias declarations,
-	// and then everything else. This appears to avoid most situations where the type
-	// of an alias is needed before it is available.
-	// There may still be cases where this is not good enough (see also go.dev/issue/25838).
-	// In those cases Checker.ident will report an error ("invalid use of type alias").
-	var aliasList []*TypeName
-	var othersList []Object // everything that's not a type
-	// phase 1: non-alias type declarations
-	for _, obj := range objList {
-		if tname, _ := obj.(*TypeName); tname != nil {
-			if check.objMap[tname].tdecl.Assign.IsValid() {
-				aliasList = append(aliasList, tname)
-			} else {
-				check.objDecl(obj, nil)
-			}
-		} else {
-			othersList = append(othersList, obj)
+	if check.enableAlias {
+		// With Alias nodes we can process declarations in any order.
+		for _, obj := range objList {
+			check.objDecl(obj, nil)
 		}
-	}
-	// phase 2: alias type declarations
-	for _, obj := range aliasList {
-		check.objDecl(obj, nil)
-	}
-	// phase 3: all other declarations
-	for _, obj := range othersList {
-		check.objDecl(obj, nil)
+	} else {
+		// Without Alias nodes, we process non-alias type declarations first, followed by
+		// alias declarations, and then everything else. This appears to avoid most situations
+		// where the type of an alias is needed before it is available.
+		// There may still be cases where this is not good enough (see also go.dev/issue/25838).
+		// In those cases Checker.ident will report an error ("invalid use of type alias").
+		var aliasList []*TypeName
+		var othersList []Object // everything that's not a type
+		// phase 1: non-alias type declarations
+		for _, obj := range objList {
+			if tname, _ := obj.(*TypeName); tname != nil {
+				if check.objMap[tname].tdecl.Assign.IsValid() {
+					aliasList = append(aliasList, tname)
+				} else {
+					check.objDecl(obj, nil)
+				}
+			} else {
+				othersList = append(othersList, obj)
+			}
+		}
+		// phase 2: alias type declarations
+		for _, obj := range aliasList {
+			check.objDecl(obj, nil)
+		}
+		// phase 3: all other declarations
+		for _, obj := range othersList {
+			check.objDecl(obj, nil)
+		}
 	}
 
 	// At this point we may have a non-empty check.methods map; this means that not all
diff --git a/src/go/types/selection.go b/src/go/types/selection.go
index c79e13c..50d340c 100644
--- a/src/go/types/selection.go
+++ b/src/go/types/selection.go
@@ -15,6 +15,39 @@
 
 // SelectionKind describes the kind of a selector expression x.f
 // (excluding qualified identifiers).
+//
+// If x is a struct or *struct, a selector expression x.f may denote a
+// sequence of selection operations x.a.b.c.f. The SelectionKind
+// describes the kind of the final (explicit) operation; all the
+// previous (implicit) operations are always field selections.
+// Each element of Indices specifies an implicit field (a, b, c)
+// by its index in the struct type of the field selection operand.
+//
+// For a FieldVal operation, the final selection refers to the field
+// specified by Selection.Obj.
+//
+// For a MethodVal operation, the final selection refers to a method.
+// If the "pointerness" of the method's declared receiver does not
+// match that of the effective receiver after implicit field
+// selection, then an & or * operation is implicitly applied to the
+// receiver variable or value.
+// So, x.f denotes (&x.a.b.c).f when f requires a pointer receiver but
+// x.a.b.c is a non-pointer variable; and it denotes (*x.a.b.c).f when
+// f requires a non-pointer receiver but x.a.b.c is a pointer value.
+//
+// All pointer indirections, whether due to implicit or explicit field
+// selections or * operations inserted for "pointerness", panic if
+// applied to a nil pointer, so a method call x.f() may panic even
+// before the function call.
+//
+// By contrast, a MethodExpr operation T.f is essentially equivalent
+// to a function literal of the form:
+//
+//	func(x T, args) (results) { return x.f(args) }
+//
+// Consequently, any implicit field selections and * operations
+// inserted for "pointerness" are not evaluated until the function is
+// called, so a T.f or (*T).f expression never panics.
 type SelectionKind int
 
 const (
@@ -104,6 +137,11 @@
 
 // Indirect reports whether any pointer indirection was required to get from
 // x to f in x.f.
+//
+// Beware: Indirect spuriously returns true (Go issue #8353) for a
+// MethodVal selection in which the receiver argument and parameter
+// both have type *T so there is no indirection.
+// Unfortunately, a fix is too risky.
 func (s *Selection) Indirect() bool { return s.indirect }
 
 func (s *Selection) String() string { return SelectionString(s, nil) }
diff --git a/src/go/types/signature.go b/src/go/types/signature.go
index 8285f1b..cad42cb 100644
--- a/src/go/types/signature.go
+++ b/src/go/types/signature.go
@@ -7,6 +7,7 @@
 import (
 	"fmt"
 	"go/ast"
+	"go/token"
 	. "internal/types/errors"
 )
 
@@ -34,7 +35,7 @@
 // is variadic, it must have at least one parameter, and the last parameter
 // must be of unnamed slice type.
 //
-// Deprecated: Use NewSignatureType instead which allows for type parameters.
+// Deprecated: Use [NewSignatureType] instead which allows for type parameters.
 func NewSignature(recv *Var, params, results *Tuple, variadic bool) *Signature {
 	return NewSignatureType(recv, nil, nil, params, results, variadic)
 }
@@ -76,7 +77,7 @@
 // function. It is ignored when comparing signatures for identity.
 //
 // For an abstract method, Recv returns the enclosing interface either
-// as a *Named or an *Interface. Due to embedding, an interface may
+// as a *[Named] or an *[Interface]. Due to embedding, an interface may
 // contain methods whose receiver type is a different interface.
 func (s *Signature) Recv() *Var { return s.recv }
 
@@ -115,7 +116,10 @@
 		// - the receiver specification acts as local declaration for its type parameters, which may be blank
 		_, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
 		if len(rparams) > 0 {
-			tparams := check.declareTypeParams(nil, rparams)
+			// The scope of the type parameter T in "func (r T[T]) f()"
+			// starts after f, not at "r"; see #52038.
+			scopePos := ftyp.Params.Pos()
+			tparams := check.declareTypeParams(nil, rparams, scopePos)
 			sig.rparams = bindTParams(tparams)
 			// Blank identifiers don't get declared, so naive type-checking of the
 			// receiver type expression would fail in Checker.collectParams below,
@@ -140,7 +144,7 @@
 				// Also: Don't report an error via genericType since it will be reported
 				//       again when we type-check the signature.
 				// TODO(gri) maybe the receiver should be marked as invalid instead?
-				if recv, _ := check.genericType(rname, nil).(*Named); recv != nil {
+				if recv := asNamed(check.genericType(rname, nil)); recv != nil {
 					recvTParams = recv.TypeParams().list()
 				}
 			}
@@ -176,13 +180,18 @@
 		}
 	}
 
-	// Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
-	// declarations and then squash that scope into the parent scope (and report any redeclarations at
-	// that time).
+	// Use a temporary scope for all parameter declarations and then
+	// squash that scope into the parent scope (and report any
+	// redeclarations at that time).
+	//
+	// TODO(adonovan): now that each declaration has the correct
+	// scopePos, there should be no need for scope squashing.
+	// Audit to ensure all lookups honor scopePos and simplify.
 	scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
-	recvList, _ := check.collectParams(scope, recvPar, false)
-	params, variadic := check.collectParams(scope, ftyp.Params, true)
-	results, _ := check.collectParams(scope, ftyp.Results, false)
+	scopePos := ftyp.End() // all parameters' scopes start after the signature
+	recvList, _ := check.collectParams(scope, recvPar, false, scopePos)
+	params, variadic := check.collectParams(scope, ftyp.Params, true, scopePos)
+	results, _ := check.collectParams(scope, ftyp.Results, false, scopePos)
 	scope.squash(func(obj, alt Object) {
 		check.errorf(obj, DuplicateDecl, "%s redeclared in this block", obj.Name())
 		check.reportAltDecl(alt)
@@ -211,13 +220,14 @@
 		check.later(func() {
 			// spec: "The receiver type must be of the form T or *T where T is a type name."
 			rtyp, _ := deref(recv.typ)
-			if rtyp == Typ[Invalid] {
+			atyp := Unalias(rtyp)
+			if !isValid(atyp) {
 				return // error was reported before
 			}
 			// spec: "The type denoted by T is called the receiver base type; it must not
 			// be a pointer or interface type and it must be declared in the same package
 			// as the method."
-			switch T := rtyp.(type) {
+			switch T := atyp.(type) {
 			case *Named:
 				// The receiver type may be an instantiated type referred to
 				// by an alias (which cannot have receiver parameters for now).
@@ -261,7 +271,7 @@
 
 // collectParams declares the parameters of list in scope and returns the corresponding
 // variable list.
-func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicOk bool) (params []*Var, variadic bool) {
+func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicOk bool, scopePos token.Pos) (params []*Var, variadic bool) {
 	if list == nil {
 		return
 	}
@@ -289,7 +299,7 @@
 					// ok to continue
 				}
 				par := NewParam(name.Pos(), check.pkg, name.Name, typ)
-				check.declare(scope, name, par, scope.pos)
+				check.declare(scope, name, par, scopePos)
 				params = append(params, par)
 			}
 			named = true
diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go
index 2dcaebe..12a2140 100644
--- a/src/go/types/sizes.go
+++ b/src/go/types/sizes.go
@@ -114,8 +114,8 @@
 }
 
 func _IsSyncAtomicAlign64(T Type) bool {
-	named, ok := T.(*Named)
-	if !ok {
+	named := asNamed(T)
+	if named == nil {
 		return false
 	}
 	obj := named.Obj()
@@ -229,7 +229,7 @@
 }
 
 // common architecture word sizes and alignments
-var gcArchSizes = map[string]*StdSizes{
+var gcArchSizes = map[string]*gcSizes{
 	"386":      {4, 4},
 	"amd64":    {8, 8},
 	"amd64p32": {4, 8},
@@ -257,20 +257,17 @@
 // "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle",
 // "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
 func SizesFor(compiler, arch string) Sizes {
-	var m map[string]*StdSizes
 	switch compiler {
 	case "gc":
-		m = gcArchSizes
+		if s := gcSizesFor(compiler, arch); s != nil {
+			return Sizes(s)
+		}
 	case "gccgo":
-		m = gccgoArchSizes
-	default:
-		return nil
+		if s, ok := gccgoArchSizes[arch]; ok {
+			return Sizes(s)
+		}
 	}
-	s, ok := m[arch]
-	if !ok {
-		return nil
-	}
-	return s
+	return nil
 }
 
 // stdSizes is used if Config.Sizes == nil.
diff --git a/src/go/types/sizes_test.go b/src/go/types/sizes_test.go
index f2e7e8a..825bc1f 100644
--- a/src/go/types/sizes_test.go
+++ b/src/go/types/sizes_test.go
@@ -134,3 +134,62 @@
 		})
 	}
 }
+
+type gcSizeTest struct {
+	name string
+	src  string
+}
+
+var gcSizesTests = []gcSizeTest{
+	{
+		"issue60431",
+		`
+package main
+
+import "unsafe"
+
+// The foo struct size is expected to be rounded up to 16 bytes.
+type foo struct {
+	a int64
+	b bool
+}
+
+func main() {
+	assert(unsafe.Sizeof(foo{}) == 16)
+}`,
+	},
+	{
+		"issue60734",
+		`
+package main
+
+import (
+	"unsafe"
+)
+
+// The Data struct size is expected to be rounded up to 16 bytes.
+type Data struct {
+	Value  uint32   // 4 bytes
+	Label  [10]byte // 10 bytes
+	Active bool     // 1 byte
+	// padded with 1 byte to make it align
+}
+
+func main() {
+	assert(unsafe.Sizeof(Data{}) == 16)
+}
+`,
+	},
+}
+
+func TestGCSizes(t *testing.T) {
+	types.DefPredeclaredTestFuncs()
+	for _, tc := range gcSizesTests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+			conf := types.Config{Importer: importer.Default(), Sizes: types.SizesFor("gc", "amd64")}
+			mustTypecheck(tc.src, &conf, nil)
+		})
+	}
+}
diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go
index 07c9222..a89cd85 100644
--- a/src/go/types/stdlib_test.go
+++ b/src/go/types/stdlib_test.go
@@ -192,7 +192,7 @@
 				lit = lit[:len(lit)-2]
 			}
 			contents := strings.TrimSpace(lit[2:])
-			if strings.HasPrefix(contents, "+build ") {
+			if strings.HasPrefix(contents, "go:build ") {
 				return "skip"
 			}
 			if first == "" {
@@ -237,6 +237,9 @@
 		filename := filepath.Join(path, f.Name())
 		goVersion := ""
 		if comment := firstComment(filename); comment != "" {
+			if strings.Contains(comment, "-goexperiment") {
+				continue // ignore this file
+			}
 			fields := strings.Fields(comment)
 			switch fields[0] {
 			case "skip", "compiledir":
@@ -309,6 +312,7 @@
 
 	testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "fixedbugs"),
 		"bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
+		"bug398.go",      // go/types doesn't check for anonymous interface cycles (go.dev/issue/56103)
 		"issue6889.go",   // gc-specific test
 		"issue11362.go",  // canonical import path check
 		"issue16369.go",  // go/types handles this correctly - not an issue
@@ -327,6 +331,7 @@
 		"issue49767.go",  // go/types does not have constraints on channel element size
 		"issue49814.go",  // go/types does not have constraints on array size
 		"issue56103.go",  // anonymous interface cycles; will be a type checker error in 1.22
+		"issue52697.go",  // go/types does not have constraints on stack size
 
 		// These tests requires runtime/cgo.Incomplete, which is only available on some platforms.
 		// However, go/types does not know about build constraints.
diff --git a/src/go/types/stmt.go b/src/go/types/stmt.go
index 7869f37..80f3ac7 100644
--- a/src/go/types/stmt.go
+++ b/src/go/types/stmt.go
@@ -10,6 +10,7 @@
 	"go/ast"
 	"go/constant"
 	"go/token"
+	"internal/buildcfg"
 	. "internal/types/errors"
 	"sort"
 )
@@ -23,10 +24,6 @@
 		check.trace(body.Pos(), "-- %s: %s", name, sig)
 	}
 
-	// set function scope extent
-	sig.scope.pos = body.Pos()
-	sig.scope.end = body.End()
-
 	// save/restore current environment and set up function environment
 	// (and use 0 indentation at function start)
 	defer func(env environment, indent int) {
@@ -291,7 +288,7 @@
 			check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings
 		} else {
 			T = check.varType(e)
-			if T == Typ[Invalid] {
+			if !isValid(T) {
 				continue L
 			}
 		}
@@ -332,7 +329,7 @@
 // 			hash = "<nil>" // avoid collision with a type named nil
 // 		} else {
 // 			T = check.varType(e)
-// 			if T == Typ[Invalid] {
+// 			if !isValid(T) {
 // 				continue L
 // 			}
 // 			hash = typeHash(T, nil)
@@ -463,7 +460,7 @@
 		if x.mode == invalid {
 			return
 		}
-		check.assignVar(s.X, nil, &x)
+		check.assignVar(s.X, nil, &x, "assignment")
 
 	case *ast.AssignStmt:
 		switch s.Tok {
@@ -495,7 +492,7 @@
 			if x.mode == invalid {
 				return
 			}
-			check.assignVar(s.Lhs[0], nil, &x)
+			check.assignVar(s.Lhs[0], nil, &x, "assignment")
 		}
 
 	case *ast.GoStmt:
@@ -827,136 +824,220 @@
 
 	case *ast.RangeStmt:
 		inner |= breakOk | continueOk
-
-		// check expression to iterate over
-		var x operand
-		check.expr(nil, &x, s.X)
-
-		// determine key/value types
-		var key, val Type
-		if x.mode != invalid {
-			// Ranging over a type parameter is permitted if it has a core type.
-			var cause string
-			u := coreType(x.typ)
-			switch t := u.(type) {
-			case nil:
-				cause = check.sprintf("%s has no core type", x.typ)
-			case *Chan:
-				if s.Value != nil {
-					check.softErrorf(s.Value, InvalidIterVar, "range over %s permits only one iteration variable", &x)
-					// ok to continue
-				}
-				if t.dir == SendOnly {
-					cause = "receive from send-only channel"
-				}
-			}
-			key, val = rangeKeyVal(u)
-			if key == nil || cause != "" {
-				if cause == "" {
-					check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
-				} else {
-					check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s (%s)", &x, cause)
-				}
-				// ok to continue
-			}
-		}
-
-		// Open the for-statement block scope now, after the range clause.
-		// Iteration variables declared with := need to go in this scope (was go.dev/issue/51437).
-		check.openScope(s, "range")
-		defer check.closeScope()
-
-		// check assignment to/declaration of iteration variables
-		// (irregular assignment, cannot easily map to existing assignment checks)
-
-		// lhs expressions and initialization value (rhs) types
-		lhs := [2]ast.Expr{s.Key, s.Value}
-		rhs := [2]Type{key, val} // key, val may be nil
-
-		if s.Tok == token.DEFINE {
-			// short variable declaration
-			var vars []*Var
-			for i, lhs := range lhs {
-				if lhs == nil {
-					continue
-				}
-
-				// determine lhs variable
-				var obj *Var
-				if ident, _ := lhs.(*ast.Ident); ident != nil {
-					// declare new variable
-					name := ident.Name
-					obj = NewVar(ident.Pos(), check.pkg, name, nil)
-					check.recordDef(ident, obj)
-					// _ variables don't count as new variables
-					if name != "_" {
-						vars = append(vars, obj)
-					}
-				} else {
-					check.errorf(lhs, InvalidSyntaxTree, "cannot declare %s", lhs)
-					obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
-				}
-
-				// initialize lhs variable
-				if typ := rhs[i]; typ != nil {
-					x.mode = value
-					x.expr = lhs // we don't have a better rhs expression to use here
-					x.typ = typ
-					check.initVar(obj, &x, "range clause")
-				} else {
-					obj.typ = Typ[Invalid]
-					obj.used = true // don't complain about unused variable
-				}
-			}
-
-			// declare variables
-			if len(vars) > 0 {
-				scopePos := s.Body.Pos()
-				for _, obj := range vars {
-					check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
-				}
-			} else {
-				check.error(inNode(s, s.TokPos), NoNewVar, "no new variables on left side of :=")
-			}
-		} else {
-			// ordinary assignment
-			for i, lhs := range lhs {
-				if lhs == nil {
-					continue
-				}
-				if typ := rhs[i]; typ != nil {
-					x.mode = value
-					x.expr = lhs // we don't have a better rhs expression to use here
-					x.typ = typ
-					check.assignVar(lhs, nil, &x)
-				}
-			}
-		}
-
-		check.stmt(inner, s.Body)
+		check.rangeStmt(inner, s)
 
 	default:
 		check.error(s, InvalidSyntaxTree, "invalid statement")
 	}
 }
 
+func (check *Checker) rangeStmt(inner stmtContext, s *ast.RangeStmt) {
+	// Convert go/ast form to local variables.
+	type Expr = ast.Expr
+	type identType = ast.Ident
+	identName := func(n *identType) string { return n.Name }
+	sKey, sValue := s.Key, s.Value
+	var sExtra ast.Expr = nil
+	isDef := s.Tok == token.DEFINE
+	rangeVar := s.X
+	noNewVarPos := inNode(s, s.TokPos)
+
+	// Everything from here on is shared between cmd/compile/internal/types2 and go/types.
+
+	// check expression to iterate over
+	var x operand
+	check.expr(nil, &x, rangeVar)
+
+	// determine key/value types
+	var key, val Type
+	if x.mode != invalid {
+		// Ranging over a type parameter is permitted if it has a core type.
+		k, v, cause, isFunc, ok := rangeKeyVal(x.typ, func(v goVersion) bool {
+			return check.allowVersion(check.pkg, x.expr, v)
+		})
+		switch {
+		case !ok && cause != "":
+			check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s: %s", &x, cause)
+		case !ok:
+			check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
+		case k == nil && sKey != nil:
+			check.softErrorf(sKey, InvalidIterVar, "range over %s permits no iteration variables", &x)
+		case v == nil && sValue != nil:
+			check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x)
+		case sExtra != nil:
+			check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables")
+		case isFunc && ((k == nil) != (sKey == nil) || (v == nil) != (sValue == nil)):
+			var count string
+			switch {
+			case k == nil:
+				count = "no iteration variables"
+			case v == nil:
+				count = "one iteration variable"
+			default:
+				count = "two iteration variables"
+			}
+			check.softErrorf(&x, InvalidIterVar, "range over %s must have %s", &x, count)
+		}
+		key, val = k, v
+	}
+
+	// Open the for-statement block scope now, after the range clause.
+	// Iteration variables declared with := need to go in this scope (was go.dev/issue/51437).
+	check.openScope(s, "range")
+	defer check.closeScope()
+
+	// check assignment to/declaration of iteration variables
+	// (irregular assignment, cannot easily map to existing assignment checks)
+
+	// lhs expressions and initialization value (rhs) types
+	lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil
+	rhs := [2]Type{key, val}     // key, val may be nil
+
+	constIntRange := x.mode == constant_ && isInteger(x.typ)
+
+	if isDef {
+		// short variable declaration
+		var vars []*Var
+		for i, lhs := range lhs {
+			if lhs == nil {
+				continue
+			}
+
+			// determine lhs variable
+			var obj *Var
+			if ident, _ := lhs.(*identType); ident != nil {
+				// declare new variable
+				name := identName(ident)
+				obj = NewVar(ident.Pos(), check.pkg, name, nil)
+				check.recordDef(ident, obj)
+				// _ variables don't count as new variables
+				if name != "_" {
+					vars = append(vars, obj)
+				}
+			} else {
+				check.errorf(lhs, InvalidSyntaxTree, "cannot declare %s", lhs)
+				obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
+			}
+
+			// initialize lhs variable
+			if constIntRange {
+				check.initVar(obj, &x, "range clause")
+			} else if typ := rhs[i]; typ != nil {
+				x.mode = value
+				x.expr = lhs // we don't have a better rhs expression to use here
+				x.typ = typ
+				check.initVar(obj, &x, "assignment") // error is on variable, use "assignment" not "range clause"
+			} else {
+				obj.typ = Typ[Invalid]
+				obj.used = true // don't complain about unused variable
+			}
+		}
+
+		// declare variables
+		if len(vars) > 0 {
+			scopePos := s.Body.Pos()
+			for _, obj := range vars {
+				check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
+			}
+		} else {
+			check.error(noNewVarPos, NoNewVar, "no new variables on left side of :=")
+		}
+	} else if sKey != nil /* lhs[0] != nil */ {
+		// ordinary assignment
+		for i, lhs := range lhs {
+			if lhs == nil {
+				continue
+			}
+
+			if constIntRange {
+				check.assignVar(lhs, nil, &x, "range clause")
+			} else if typ := rhs[i]; typ != nil {
+				x.mode = value
+				x.expr = lhs // we don't have a better rhs expression to use here
+				x.typ = typ
+				check.assignVar(lhs, nil, &x, "assignment") // error is on variable, use "assignment" not "range clause"
+			}
+		}
+	} else if constIntRange {
+		// If we don't have any iteration variables, we still need to
+		// check that a (possibly untyped) integer range expression x
+		// is valid.
+		// We do this by checking the assignment _ = x. This ensures
+		// that an untyped x can be converted to a value of type int.
+		check.assignment(&x, nil, "range clause")
+	}
+
+	check.stmt(inner, s.Body)
+}
+
 // rangeKeyVal returns the key and value type produced by a range clause
-// over an expression of type typ. If the range clause is not permitted
-// the results are nil.
-func rangeKeyVal(typ Type) (key, val Type) {
-	switch typ := arrayPtrDeref(typ).(type) {
+// over an expression of type typ.
+// If allowVersion != nil, it is used to check the required language version.
+// If the range clause is not permitted, rangeKeyVal returns ok = false.
+// When ok = false, rangeKeyVal may also return a reason in cause.
+func rangeKeyVal(typ Type, allowVersion func(goVersion) bool) (key, val Type, cause string, isFunc, ok bool) {
+	bad := func(cause string) (Type, Type, string, bool, bool) {
+		return Typ[Invalid], Typ[Invalid], cause, false, false
+	}
+	toSig := func(t Type) *Signature {
+		sig, _ := coreType(t).(*Signature)
+		return sig
+	}
+
+	orig := typ
+	switch typ := arrayPtrDeref(coreType(typ)).(type) {
+	case nil:
+		return bad("no core type")
 	case *Basic:
 		if isString(typ) {
-			return Typ[Int], universeRune // use 'rune' name
+			return Typ[Int], universeRune, "", false, true // use 'rune' name
+		}
+		if isInteger(typ) {
+			if allowVersion != nil && !allowVersion(go1_22) {
+				return bad("requires go1.22 or later")
+			}
+			return orig, nil, "", false, true
 		}
 	case *Array:
-		return Typ[Int], typ.elem
+		return Typ[Int], typ.elem, "", false, true
 	case *Slice:
-		return Typ[Int], typ.elem
+		return Typ[Int], typ.elem, "", false, true
 	case *Map:
-		return typ.key, typ.elem
+		return typ.key, typ.elem, "", false, true
 	case *Chan:
-		return typ.elem, Typ[Invalid]
+		if typ.dir == SendOnly {
+			return bad("receive from send-only channel")
+		}
+		return typ.elem, nil, "", false, true
+	case *Signature:
+		// TODO(gri) when this becomes enabled permanently, add version check
+		if !buildcfg.Experiment.RangeFunc {
+			break
+		}
+		assert(typ.Recv() == nil)
+		switch {
+		case typ.Params().Len() != 1:
+			return bad("func must be func(yield func(...) bool): wrong argument count")
+		case toSig(typ.Params().At(0).Type()) == nil:
+			return bad("func must be func(yield func(...) bool): argument is not func")
+		case typ.Results().Len() != 0:
+			return bad("func must be func(yield func(...) bool): unexpected results")
+		}
+		cb := toSig(typ.Params().At(0).Type())
+		assert(cb.Recv() == nil)
+		switch {
+		case cb.Params().Len() > 2:
+			return bad("func must be func(yield func(...) bool): yield func has too many parameters")
+		case cb.Results().Len() != 1 || !isBoolean(cb.Results().At(0).Type()):
+			return bad("func must be func(yield func(...) bool): yield func does not return bool")
+		}
+		if cb.Params().Len() >= 1 {
+			key = cb.Params().At(0).Type()
+		}
+		if cb.Params().Len() >= 2 {
+			val = cb.Params().At(1).Type()
+		}
+		return key, val, "", true, true
 	}
 	return
 }
diff --git a/src/go/types/struct.go b/src/go/types/struct.go
index 7247a25..935a549 100644
--- a/src/go/types/struct.go
+++ b/src/go/types/struct.go
@@ -146,7 +146,7 @@
 				t, isPtr := deref(embeddedTyp)
 				switch u := under(t).(type) {
 				case *Basic:
-					if t == Typ[Invalid] {
+					if !isValid(t) {
 						// error was reported before
 						return
 					}
diff --git a/src/go/types/subst.go b/src/go/types/subst.go
index 30c48e1..1934eba 100644
--- a/src/go/types/subst.go
+++ b/src/go/types/subst.go
@@ -171,7 +171,9 @@
 		if mcopied || ecopied {
 			iface := subst.check.newInterface()
 			iface.embeddeds = embeddeds
+			iface.embedPos = t.embedPos
 			iface.implicit = t.implicit
+			assert(t.complete) // otherwise we are copying incomplete data
 			iface.complete = t.complete
 			// If we've changed the interface type, we may need to replace its
 			// receiver if the receiver type is the original interface. Receivers of
@@ -187,6 +189,11 @@
 			// need to create new interface methods to hold the instantiated
 			// receiver. This is handled by Named.expandUnderlying.
 			iface.methods, _ = replaceRecvType(methods, t, iface)
+
+			// If check != nil, check.newInterface will have saved the interface for later completion.
+			if subst.check == nil { // golang/go#61561: all newly created interfaces must be completed
+				iface.typeSet()
+			}
 			return iface
 		}
 
diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go
index 763fcc6..a13f86c 100644
--- a/src/go/types/typeparam.go
+++ b/src/go/types/typeparam.go
@@ -11,11 +11,11 @@
 // Note: This is a uint32 rather than a uint64 because the
 // respective 64 bit atomic instructions are not available
 // on all platforms.
-var lastID uint32
+var lastID atomic.Uint32
 
 // nextID returns a value increasing monotonically by 1 with
 // each call, starting with 1. It may be called concurrently.
-func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
+func nextID() uint64 { return uint64(lastID.Add(1)) }
 
 // A TypeParam represents a type parameter type.
 type TypeParam struct {
@@ -110,7 +110,7 @@
 	var ityp *Interface
 	switch u := under(bound).(type) {
 	case *Basic:
-		if u == Typ[Invalid] {
+		if !isValid(u) {
 			// error is reported elsewhere
 			return &emptyInterface
 		}
@@ -134,7 +134,7 @@
 		// pos is used for tracing output; start with the type parameter position.
 		pos := t.obj.pos
 		// use the (original or possibly instantiated) type bound position if we have one
-		if n, _ := bound.(*Named); n != nil {
+		if n := asNamed(bound); n != nil {
 			pos = n.obj.pos
 		}
 		computeInterfaceTypeSet(t.check, pos, ityp)
diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go
index 206aa3d..d164749 100644
--- a/src/go/types/typeset.go
+++ b/src/go/types/typeset.go
@@ -288,7 +288,7 @@
 			assert(len(tset.methods) == 0)
 			terms = tset.terms
 		default:
-			if u == Typ[Invalid] {
+			if !isValid(u) {
 				continue
 			}
 			if check != nil && !check.verifyVersionf(atPos(pos), go1_18, "embedding non-interface type %s", typ) {
@@ -302,7 +302,6 @@
 		// separately. Here we only need to intersect the term lists and comparable bits.
 		allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
 	}
-	ityp.embedPos = nil // not needed anymore (errors have been reported)
 
 	ityp.tset.comparable = allComparable
 	if len(allMethods) != 0 {
@@ -387,7 +386,7 @@
 			// For now we don't permit type parameters as constraints.
 			assert(!isTypeParam(t.typ))
 			terms = computeInterfaceTypeSet(check, pos, ui).terms
-		} else if u == Typ[Invalid] {
+		} else if !isValid(u) {
 			continue
 		} else {
 			if t.tilde && !Identical(t.typ, u) {
diff --git a/src/go/types/typestring.go b/src/go/types/typestring.go
index 9615e24..23bddb2 100644
--- a/src/go/types/typestring.go
+++ b/src/go/types/typestring.go
@@ -17,18 +17,18 @@
 )
 
 // A Qualifier controls how named package-level objects are printed in
-// calls to TypeString, ObjectString, and SelectionString.
+// calls to [TypeString], [ObjectString], and [SelectionString].
 //
 // These three formatting routines call the Qualifier for each
 // package-level object O, and if the Qualifier returns a non-empty
 // string p, the object is printed in the form p.O.
 // If it returns an empty string, only the object name O is printed.
 //
-// Using a nil Qualifier is equivalent to using (*Package).Path: the
+// Using a nil Qualifier is equivalent to using (*[Package]).Path: the
 // object is qualified by the import path, e.g., "encoding/json.Marshal".
 type Qualifier func(*Package) string
 
-// RelativeTo returns a Qualifier that fully qualifies members of
+// RelativeTo returns a [Qualifier] that fully qualifies members of
 // all packages other than pkg.
 func RelativeTo(pkg *Package) Qualifier {
 	if pkg == nil {
@@ -43,7 +43,7 @@
 }
 
 // TypeString returns the string representation of typ.
-// The Qualifier controls the printing of
+// The [Qualifier] controls the printing of
 // package-level objects, and may be nil.
 func TypeString(typ Type, qf Qualifier) string {
 	var buf bytes.Buffer
@@ -52,14 +52,14 @@
 }
 
 // WriteType writes the string representation of typ to buf.
-// The Qualifier controls the printing of
+// The [Qualifier] controls the printing of
 // package-level objects, and may be nil.
 func WriteType(buf *bytes.Buffer, typ Type, qf Qualifier) {
 	newTypeWriter(buf, qf).typ(typ)
 }
 
 // WriteSignature writes the representation of the signature sig to buf,
-// without a leading "func" keyword. The Qualifier controls the printing
+// without a leading "func" keyword. The [Qualifier] controls the printing
 // of package-level objects, and may be nil.
 func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) {
 	newTypeWriter(buf, qf).signature(sig)
@@ -219,7 +219,7 @@
 				w.string("any")
 				break
 			}
-			if t == universeComparable.Type().(*Named).underlying {
+			if t == asNamed(universeComparable.Type()).underlying {
 				w.string("interface{comparable}")
 				break
 			}
@@ -329,6 +329,13 @@
 			}
 		}
 
+	case *Alias:
+		w.typeName(t.obj)
+		if w.ctxt != nil {
+			// TODO(gri) do we need to print the alias type name, too?
+			w.typ(Unalias(t.obj.typ))
+		}
+
 	default:
 		// For externally defined implementations of Type.
 		// Note: In this case cycles won't be caught.
diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go
index ca390ab..c887b51 100644
--- a/src/go/types/typexpr.go
+++ b/src/go/types/typexpr.go
@@ -19,7 +19,7 @@
 // If an error occurred, x.mode is set to invalid.
 // For the meaning of def, see Checker.definedType, below.
 // If wantType is set, the identifier e is expected to denote a type.
-func (check *Checker) ident(x *operand, e *ast.Ident, def *Named, wantType bool) {
+func (check *Checker) ident(x *operand, e *ast.Ident, def *TypeName, wantType bool) {
 	x.mode = invalid
 	x.expr = e
 
@@ -79,7 +79,7 @@
 
 	case *Const:
 		check.addDeclDep(obj)
-		if typ == Typ[Invalid] {
+		if !isValid(typ) {
 			return
 		}
 		if obj == universeIota {
@@ -95,7 +95,7 @@
 		x.mode = constant_
 
 	case *TypeName:
-		if check.isBrokenAlias(obj) {
+		if !check.enableAlias && check.isBrokenAlias(obj) {
 			check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", obj.name)
 			return
 		}
@@ -109,7 +109,7 @@
 			obj.used = true
 		}
 		check.addDeclDep(obj)
-		if typ == Typ[Invalid] {
+		if !isValid(typ) {
 			return
 		}
 		x.mode = variable
@@ -173,10 +173,10 @@
 }
 
 // definedType is like typ but also accepts a type name def.
-// If def != nil, e is the type specification for the defined type def, declared
-// in a type declaration, and def.underlying will be set to the type of e before
-// any components of e are type-checked.
-func (check *Checker) definedType(e ast.Expr, def *Named) Type {
+// If def != nil, e is the type specification for the type named def, declared
+// in a type declaration, and def.typ.underlying will be set to the type of e
+// before any components of e are type-checked.
+func (check *Checker) definedType(e ast.Expr, def *TypeName) Type {
 	typ := check.typInternal(e, def)
 	assert(isTyped(typ))
 	if isGeneric(typ) {
@@ -193,7 +193,7 @@
 func (check *Checker) genericType(e ast.Expr, cause *string) Type {
 	typ := check.typInternal(e, nil)
 	assert(isTyped(typ))
-	if typ != Typ[Invalid] && !isGeneric(typ) {
+	if isValid(typ) && !isGeneric(typ) {
 		if cause != nil {
 			*cause = check.sprintf("%s is not a generic type", typ)
 		}
@@ -212,7 +212,7 @@
 
 // typInternal drives type checking of types.
 // Must only be called by definedType or genericType.
-func (check *Checker) typInternal(e0 ast.Expr, def *Named) (T Type) {
+func (check *Checker) typInternal(e0 ast.Expr, def *TypeName) (T Type) {
 	if check.conf._Trace {
 		check.trace(e0.Pos(), "-- type %s", e0)
 		check.indent++
@@ -243,7 +243,7 @@
 		switch x.mode {
 		case typexpr:
 			typ := x.typ
-			def.setUnderlying(typ)
+			setDefType(def, typ)
 			return typ
 		case invalid:
 			// ignore - error reported before
@@ -260,7 +260,7 @@
 		switch x.mode {
 		case typexpr:
 			typ := x.typ
-			def.setUnderlying(typ)
+			setDefType(def, typ)
 			return typ
 		case invalid:
 			// ignore - error reported before
@@ -283,13 +283,13 @@
 	case *ast.ArrayType:
 		if e.Len == nil {
 			typ := new(Slice)
-			def.setUnderlying(typ)
+			setDefType(def, typ)
 			typ.elem = check.varType(e.Elt)
 			return typ
 		}
 
 		typ := new(Array)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		// Provide a more specific error when encountering a [...] array
 		// rather than leaving it to the handling of the ... expression.
 		if _, ok := e.Len.(*ast.Ellipsis); ok {
@@ -312,32 +312,32 @@
 
 	case *ast.StructType:
 		typ := new(Struct)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		check.structType(typ, e)
 		return typ
 
 	case *ast.StarExpr:
 		typ := new(Pointer)
 		typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		typ.base = check.varType(e.X)
 		return typ
 
 	case *ast.FuncType:
 		typ := new(Signature)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		check.funcType(typ, nil, e)
 		return typ
 
 	case *ast.InterfaceType:
 		typ := check.newInterface()
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 		check.interfaceType(typ, e, def)
 		return typ
 
 	case *ast.MapType:
 		typ := new(Map)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 
 		typ.key = check.varType(e.Key)
 		typ.elem = check.varType(e.Value)
@@ -362,7 +362,7 @@
 
 	case *ast.ChanType:
 		typ := new(Chan)
-		def.setUnderlying(typ)
+		setDefType(def, typ)
 
 		dir := SendRecv
 		switch e.Dir {
@@ -387,11 +387,31 @@
 	}
 
 	typ := Typ[Invalid]
-	def.setUnderlying(typ)
+	setDefType(def, typ)
 	return typ
 }
 
-func (check *Checker) instantiatedType(ix *typeparams.IndexExpr, def *Named) (res Type) {
+func setDefType(def *TypeName, typ Type) {
+	if def != nil {
+		switch t := def.typ.(type) {
+		case *Alias:
+			// t.fromRHS should always be set, either to an invalid type
+			// in the beginning, or to typ in certain cyclic declarations.
+			if t.fromRHS != Typ[Invalid] && t.fromRHS != typ {
+				panic(sprintf(nil, nil, true, "t.fromRHS = %s, typ = %s\n", t.fromRHS, typ))
+			}
+			t.fromRHS = typ
+		case *Basic:
+			assert(t == Typ[Invalid])
+		case *Named:
+			t.underlying = typ
+		default:
+			panic(fmt.Sprintf("unexpected type %T", t))
+		}
+	}
+}
+
+func (check *Checker) instantiatedType(ix *typeparams.IndexExpr, def *TypeName) (res Type) {
 	if check.conf._Trace {
 		check.trace(ix.Pos(), "-- instantiating type %s with %s", ix.X, ix.Indices)
 		check.indent++
@@ -407,11 +427,11 @@
 	if cause != "" {
 		check.errorf(ix.Orig, NotAGenericType, invalidOp+"%s (%s)", ix.Orig, cause)
 	}
-	if gtyp == Typ[Invalid] {
+	if !isValid(gtyp) {
 		return gtyp // error already reported
 	}
 
-	orig, _ := gtyp.(*Named)
+	orig := asNamed(gtyp)
 	if orig == nil {
 		panic(fmt.Sprintf("%v: cannot instantiate %v", ix.Pos(), gtyp))
 	}
@@ -419,13 +439,13 @@
 	// evaluate arguments
 	targs := check.typeList(ix.Indices)
 	if targs == nil {
-		def.setUnderlying(Typ[Invalid]) // avoid errors later due to lazy instantiation
+		setDefType(def, Typ[Invalid]) // avoid errors later due to lazy instantiation
 		return Typ[Invalid]
 	}
 
 	// create the instance
-	inst := check.instance(ix.Pos(), orig, targs, nil, check.context()).(*Named)
-	def.setUnderlying(inst)
+	inst := asNamed(check.instance(ix.Pos(), orig, targs, nil, check.context()))
+	setDefType(def, inst)
 
 	// orig.tparams may not be set up, so we need to do expansion later.
 	check.later(func() {
@@ -434,7 +454,7 @@
 		// errors.
 		check.recordInstance(ix.Orig, inst.TypeArgs().list(), inst)
 
-		if check.validateTArgLen(ix.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) {
+		if check.validateTArgLen(ix.Pos(), inst.obj.name, inst.TypeParams().Len(), inst.TypeArgs().Len()) {
 			if i, err := check.verify(ix.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
 				// best position for error reporting
 				pos := ix.Pos()
@@ -511,7 +531,7 @@
 	res := make([]Type, len(list)) // res != nil even if len(list) == 0
 	for i, x := range list {
 		t := check.varType(x)
-		if t == Typ[Invalid] {
+		if !isValid(t) {
 			res = nil
 		}
 		if res != nil {
diff --git a/src/go/types/under.go b/src/go/types/under.go
index f17d3bc..3838528 100644
--- a/src/go/types/under.go
+++ b/src/go/types/under.go
@@ -11,7 +11,7 @@
 // under must only be called when a type is known
 // to be fully set up.
 func under(t Type) Type {
-	if t, _ := t.(*Named); t != nil {
+	if t := asNamed(t); t != nil {
 		return t.under()
 	}
 	return t.Underlying()
diff --git a/src/go/types/unify.go b/src/go/types/unify.go
index 6680d97..d4889b9 100644
--- a/src/go/types/unify.go
+++ b/src/go/types/unify.go
@@ -293,6 +293,9 @@
 		u.depth--
 	}()
 
+	x = Unalias(x)
+	y = Unalias(y)
+
 	// nothing to do if x == y
 	if x == y {
 		return true
@@ -313,7 +316,7 @@
 	// Ensure that if we have at least one
 	// - defined type, make sure one is in y
 	// - type parameter recorded with u, make sure one is in x
-	if _, ok := x.(*Named); ok || u.asTypeParam(y) != nil {
+	if asNamed(x) != nil || u.asTypeParam(y) != nil {
 		if traceInference {
 			u.tracef("%s ≡ %s\t// swap", y, x)
 		}
@@ -337,7 +340,7 @@
 	// we will fail at function instantiation or argument assignment time.
 	//
 	// If we have at least one defined type, there is one in y.
-	if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
+	if ny := asNamed(y); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
 		if traceInference {
 			u.tracef("%s ≡ under %s", x, ny)
 		}
@@ -374,8 +377,8 @@
 				// We have a match, possibly through underlying types.
 				xi := asInterface(x)
 				yi := asInterface(y)
-				_, xn := x.(*Named)
-				_, yn := y.(*Named)
+				xn := asNamed(x) != nil
+				yn := asNamed(y) != nil
 				// If we have two interfaces, what to do depends on
 				// whether they are named and their method sets.
 				if xi != nil && yi != nil {
@@ -450,13 +453,6 @@
 	// x != y if we get here
 	assert(x != y)
 
-	// Type elements (array, slice, etc. elements) use emode for unification.
-	// Element types must match exactly if the types are used in an assignment.
-	emode := mode
-	if mode&assign != 0 {
-		emode |= exact
-	}
-
 	// If u.EnableInterfaceInference is set and we don't require exact unification,
 	// if both types are interfaces, one interface must have a subset of the
 	// methods of the other and corresponding method signatures must unify.
@@ -572,6 +568,13 @@
 		x, y = y, x
 	}
 
+	// Type elements (array, slice, etc. elements) use emode for unification.
+	// Element types must match exactly if the types are used in an assignment.
+	emode := mode
+	if mode&assign != 0 {
+		emode |= exact
+	}
+
 	switch x := x.(type) {
 	case *Basic:
 		// Basic types are singletons except for the rune and byte
@@ -730,7 +733,7 @@
 	case *Named:
 		// Two named types unify if their type names originate in the same type declaration.
 		// If they are instantiated, their type argument lists must unify.
-		if y, ok := y.(*Named); ok {
+		if y := asNamed(y); y != nil {
 			// Check type arguments before origins so they unify
 			// even if the origins don't match; for better error
 			// messages (see go.dev/issue/53692).
@@ -744,7 +747,7 @@
 					return false
 				}
 			}
-			return indenticalOrigin(x, y)
+			return identicalOrigin(x, y)
 		}
 
 	case *TypeParam:
diff --git a/src/go/types/union.go b/src/go/types/union.go
index 085f507..30365e3 100644
--- a/src/go/types/union.go
+++ b/src/go/types/union.go
@@ -18,7 +18,7 @@
 	terms []*Term // list of syntactical terms (not a canonicalized termlist)
 }
 
-// NewUnion returns a new Union type with the given terms.
+// NewUnion returns a new [Union] type with the given terms.
 // It is an error to create an empty union; they are syntactically not possible.
 func NewUnion(terms []*Term) *Union {
 	if len(terms) == 0 {
@@ -33,7 +33,7 @@
 func (u *Union) Underlying() Type { return u }
 func (u *Union) String() string   { return TypeString(u, nil) }
 
-// A Term represents a term in a Union.
+// A Term represents a term in a [Union].
 type Term term
 
 // NewTerm returns a new union term.
@@ -67,7 +67,7 @@
 			return term.typ // typ already recorded through check.typ in parseTilde
 		}
 		if len(terms) >= maxTermCount {
-			if u != Typ[Invalid] {
+			if isValid(u) {
 				check.errorf(x, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
 				u = Typ[Invalid]
 			}
@@ -81,7 +81,7 @@
 		}
 	}
 
-	if u == Typ[Invalid] {
+	if !isValid(u) {
 		return u
 	}
 
@@ -90,7 +90,7 @@
 	// Note: This is a quadratic algorithm, but unions tend to be short.
 	check.later(func() {
 		for i, t := range terms {
-			if t.typ == Typ[Invalid] {
+			if !isValid(t.typ) {
 				continue
 			}
 
diff --git a/src/go/types/universe.go b/src/go/types/universe.go
index cc4d42d..bde0293 100644
--- a/src/go/types/universe.go
+++ b/src/go/types/universe.go
@@ -267,7 +267,7 @@
 		return // nothing to do
 	}
 	// fix Obj link for named types
-	if typ, _ := obj.Type().(*Named); typ != nil {
+	if typ := asNamed(obj.Type()); typ != nil {
 		typ.obj = obj.(*TypeName)
 	}
 	// exported identifiers go into package unsafe
diff --git a/src/go/types/util_test.go b/src/go/types/util_test.go
index 2052372..70d376f 100644
--- a/src/go/types/util_test.go
+++ b/src/go/types/util_test.go
@@ -9,6 +9,11 @@
 
 package types
 
-import "go/token"
+import (
+	"go/token"
+)
 
 func CmpPos(p, q token.Pos) int { return cmpPos(p, q) }
+
+func ScopeComment(s *Scope) string        { return s.comment }
+func ObjectScopePos(obj Object) token.Pos { return obj.scopePos() }
diff --git a/src/go/types/validtype.go b/src/go/types/validtype.go
index d915fef..0638714 100644
--- a/src/go/types/validtype.go
+++ b/src/go/types/validtype.go
@@ -25,7 +25,7 @@
 // (say S->F->S) we have an invalid recursive type. The path list is the full
 // path of named types in a cycle, it is only needed for error reporting.
 func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
-	switch t := typ.(type) {
+	switch t := Unalias(typ).(type) {
 	case nil:
 		// We should never see a nil type but be conservative and panic
 		// only in debug mode.
@@ -70,7 +70,7 @@
 		// Don't report a 2nd error if we already know the type is invalid
 		// (e.g., if a cycle was detected earlier, via under).
 		// Note: ensure that t.orig is fully resolved by calling Underlying().
-		if t.Underlying() == Typ[Invalid] {
+		if !isValid(t.Underlying()) {
 			return false
 		}
 
diff --git a/src/go/types/version.go b/src/go/types/version.go
index 108d9b3..f2466ed 100644
--- a/src/go/types/version.go
+++ b/src/go/types/version.go
@@ -8,90 +8,47 @@
 	"fmt"
 	"go/ast"
 	"go/token"
+	"go/version"
+	"internal/goversion"
 	"strings"
 )
 
-// A version represents a released Go version.
-type version struct {
-	major, minor int
+// A goVersion is a Go language version string of the form "go1.%d"
+// where d is the minor version number. goVersion strings don't
+// contain release numbers ("go1.20.1" is not a valid goVersion).
+type goVersion string
+
+// asGoVersion returns v as a goVersion (e.g., "go1.20.1" becomes "go1.20").
+// If v is not a valid Go version, the result is the empty string.
+func asGoVersion(v string) goVersion {
+	return goVersion(version.Lang(v))
 }
 
-func (v version) String() string {
-	return fmt.Sprintf("go%d.%d", v.major, v.minor)
+// isValid reports whether v is a valid Go version.
+func (v goVersion) isValid() bool {
+	return v != ""
 }
 
-func (v version) equal(u version) bool {
-	return v.major == u.major && v.minor == u.minor
+// cmp returns -1, 0, or +1 depending on whether x < y, x == y, or x > y,
+// interpreted as Go versions.
+func (x goVersion) cmp(y goVersion) int {
+	return version.Compare(string(x), string(y))
 }
 
-func (v version) before(u version) bool {
-	return v.major < u.major || v.major == u.major && v.minor < u.minor
-}
-
-func (v version) after(u version) bool {
-	return v.major > u.major || v.major == u.major && v.minor > u.minor
-}
-
-// Go versions that introduced language changes.
 var (
-	go0_0  = version{0, 0} // no version specified
-	go1_9  = version{1, 9}
-	go1_13 = version{1, 13}
-	go1_14 = version{1, 14}
-	go1_17 = version{1, 17}
-	go1_18 = version{1, 18}
-	go1_20 = version{1, 20}
-	go1_21 = version{1, 21}
-)
+	// Go versions that introduced language changes
+	go1_9  = asGoVersion("go1.9")
+	go1_13 = asGoVersion("go1.13")
+	go1_14 = asGoVersion("go1.14")
+	go1_17 = asGoVersion("go1.17")
+	go1_18 = asGoVersion("go1.18")
+	go1_20 = asGoVersion("go1.20")
+	go1_21 = asGoVersion("go1.21")
+	go1_22 = asGoVersion("go1.22")
 
-// parseGoVersion parses a Go version string (such as "go1.12")
-// and returns the version, or an error. If s is the empty
-// string, the version is 0.0.
-func parseGoVersion(s string) (v version, err error) {
-	bad := func() (version, error) {
-		return version{}, fmt.Errorf("invalid Go version syntax %q", s)
-	}
-	if s == "" {
-		return
-	}
-	if !strings.HasPrefix(s, "go") {
-		return bad()
-	}
-	s = s[len("go"):]
-	i := 0
-	for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
-		if i >= 10 || i == 0 && s[i] == '0' {
-			return bad()
-		}
-		v.major = 10*v.major + int(s[i]) - '0'
-	}
-	if i > 0 && i == len(s) {
-		return
-	}
-	if i == 0 || s[i] != '.' {
-		return bad()
-	}
-	s = s[i+1:]
-	if s == "0" {
-		// We really should not accept "go1.0",
-		// but we didn't reject it from the start
-		// and there are now programs that use it.
-		// So accept it.
-		return
-	}
-	i = 0
-	for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {
-		if i >= 10 || i == 0 && s[i] == '0' {
-			return bad()
-		}
-		v.minor = 10*v.minor + int(s[i]) - '0'
-	}
-	// Accept any suffix after the minor number.
-	// We are only looking for the language version (major.minor)
-	// but want to accept any valid Go version, like go1.21.0
-	// and go1.21rc2.
-	return
-}
+	// current (deployed) Go version
+	go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version))
+)
 
 // langCompat reports an error if the representation of a numeric
 // literal is not compatible with the current language version.
@@ -122,33 +79,54 @@
 	}
 }
 
-// allowVersion reports whether the given package
-// is allowed to use version major.minor.
-func (check *Checker) allowVersion(pkg *Package, at positioner, v version) bool {
+// allowVersion reports whether the given package is allowed to use version v.
+func (check *Checker) allowVersion(pkg *Package, at positioner, v goVersion) bool {
 	// We assume that imported packages have all been checked,
 	// so we only have to check for the local package.
 	if pkg != check.pkg {
 		return true
 	}
 
-	// If the source file declares its Go version, use that to decide.
-	if check.posVers != nil {
-		if src, ok := check.posVers[check.fset.File(at.Pos())]; ok && src.major >= 1 {
-			return !src.before(v)
-		}
+	// If no explicit file version is specified,
+	// fileVersion corresponds to the module version.
+	var fileVersion goVersion
+	if pos := at.Pos(); pos.IsValid() {
+		// We need version.Lang below because file versions
+		// can be (unaltered) Config.GoVersion strings that
+		// may contain dot-release information.
+		fileVersion = asGoVersion(check.versions[check.fileFor(pos)])
 	}
-
-	// Otherwise fall back to the version in the checker.
-	return check.version.equal(go0_0) || !check.version.before(v)
+	return !fileVersion.isValid() || fileVersion.cmp(v) >= 0
 }
 
 // verifyVersionf is like allowVersion but also accepts a format string and arguments
 // which are used to report a version error if allowVersion returns false. It uses the
 // current package.
-func (check *Checker) verifyVersionf(at positioner, v version, format string, args ...interface{}) bool {
+func (check *Checker) verifyVersionf(at positioner, v goVersion, format string, args ...interface{}) bool {
 	if !check.allowVersion(check.pkg, at, v) {
 		check.versionErrorf(at, v, format, args...)
 		return false
 	}
 	return true
 }
+
+// TODO(gri) Consider a more direct (position-independent) mechanism
+//           to identify which file we're in so that version checks
+//           work correctly in the absence of correct position info.
+
+// fileFor returns the *ast.File which contains the position pos.
+// If there are no files, the result is nil.
+// The position must be valid.
+func (check *Checker) fileFor(pos token.Pos) *ast.File {
+	assert(pos.IsValid())
+	// Eval and CheckExpr tests may not have any source files.
+	if len(check.files) == 0 {
+		return nil
+	}
+	for _, file := range check.files {
+		if file.FileStart <= pos && pos < file.FileEnd {
+			return file
+		}
+	}
+	panic(check.sprintf("file not found for pos = %d (%s)", int(pos), check.fset.Position(pos)))
+}
diff --git a/src/go/types/version_test.go b/src/go/types/version_test.go
deleted file mode 100644
index d25f7f5..0000000
--- a/src/go/types/version_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
-
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-import "testing"
-
-var parseGoVersionTests = []struct {
-	in  string
-	out version
-}{
-	{"go1.21", version{1, 21}},
-	{"go1.21.0", version{1, 21}},
-	{"go1.21rc2", version{1, 21}},
-}
-
-func TestParseGoVersion(t *testing.T) {
-	for _, tt := range parseGoVersionTests {
-		if out, err := parseGoVersion(tt.in); out != tt.out || err != nil {
-			t.Errorf("parseGoVersion(%q) = %v, %v, want %v, nil", tt.in, out, err, tt.out)
-		}
-	}
-}
diff --git a/src/go/version/version.go b/src/go/version/version.go
new file mode 100644
index 0000000..6b8ee67
--- /dev/null
+++ b/src/go/version/version.go
@@ -0,0 +1,64 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package version provides operations on [Go versions]
+// in [Go toolchain name syntax]: strings like
+// "go1.20", "go1.21.0", "go1.22rc2", and "go1.23.4-bigcorp".
+//
+// [Go versions]: https://go.dev/doc/toolchain#version
+// [Go toolchain name syntax]: https://go.dev/doc/toolchain#name
+package version // import "go/version"
+
+import (
+	"internal/gover"
+	"strings"
+)
+
+// stripGo converts from a "go1.21-bigcorp" version to a "1.21" version.
+// If v does not start with "go", stripGo returns the empty string (a known invalid version).
+func stripGo(v string) string {
+	v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix.
+	if len(v) < 2 || v[:2] != "go" {
+		return ""
+	}
+	return v[2:]
+}
+
+// Lang returns the Go language version for version x.
+// If x is not a valid version, Lang returns the empty string.
+// For example:
+//
+//	Lang("go1.21rc2") = "go1.21"
+//	Lang("go1.21.2") = "go1.21"
+//	Lang("go1.21") = "go1.21"
+//	Lang("go1") = "go1"
+//	Lang("bad") = ""
+//	Lang("1.21") = ""
+func Lang(x string) string {
+	v := gover.Lang(stripGo(x))
+	if v == "" {
+		return ""
+	}
+	if strings.HasPrefix(x[2:], v) {
+		return x[:2+len(v)] // "go"+v without allocation
+	} else {
+		return "go" + v
+	}
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as Go versions.
+// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
+// Invalid versions, including the empty string, compare less than
+// valid versions and equal to each other.
+// The language version "go1.21" compares less than the
+// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
+func Compare(x, y string) int {
+	return gover.Compare(stripGo(x), stripGo(y))
+}
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool {
+	return gover.IsValid(stripGo(x))
+}
diff --git a/src/go/version/version_test.go b/src/go/version/version_test.go
new file mode 100644
index 0000000..ad83a25
--- /dev/null
+++ b/src/go/version/version_test.go
@@ -0,0 +1,106 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package version
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestCompare(t *testing.T) { test2(t, compareTests, "Compare", Compare) }
+
+var compareTests = []testCase2[string, string, int]{
+	{"", "", 0},
+	{"x", "x", 0},
+	{"", "x", 0},
+	{"1", "1.1", 0},
+	{"go1", "go1.1", -1},
+	{"go1.5", "go1.6", -1},
+	{"go1.5", "go1.10", -1},
+	{"go1.6", "go1.6.1", -1},
+	{"go1.19", "go1.19.0", 0},
+	{"go1.19rc1", "go1.19", -1},
+	{"go1.20", "go1.20.0", 0},
+	{"go1.20", "go1.20.0-bigcorp", 0},
+	{"go1.20rc1", "go1.20", -1},
+	{"go1.21", "go1.21.0", -1},
+	{"go1.21", "go1.21.0-bigcorp", -1},
+	{"go1.21", "go1.21rc1", -1},
+	{"go1.21rc1", "go1.21.0", -1},
+	{"go1.6", "go1.19", -1},
+	{"go1.19", "go1.19.1", -1},
+	{"go1.19rc1", "go1.19", -1},
+	{"go1.19rc1", "go1.19", -1},
+	{"go1.19rc1", "go1.19.1", -1},
+	{"go1.19rc1", "go1.19rc2", -1},
+	{"go1.19.0", "go1.19.1", -1},
+	{"go1.19rc1", "go1.19.0", -1},
+	{"go1.19alpha3", "go1.19beta2", -1},
+	{"go1.19beta2", "go1.19rc1", -1},
+	{"go1.1", "go1.99999999999999998", -1},
+	{"go1.99999999999999998", "go1.99999999999999999", -1},
+}
+
+func TestLang(t *testing.T) { test1(t, langTests, "Lang", Lang) }
+
+var langTests = []testCase1[string, string]{
+	{"bad", ""},
+	{"go1.2rc3", "go1.2"},
+	{"go1.2.3", "go1.2"},
+	{"go1.2", "go1.2"},
+	{"go1", "go1"},
+	{"go222", "go222.0"},
+	{"go1.999testmod", "go1.999"},
+}
+
+func TestIsValid(t *testing.T) { test1(t, isValidTests, "IsValid", IsValid) }
+
+var isValidTests = []testCase1[string, bool]{
+	{"", false},
+	{"1.2.3", false},
+	{"go1.2rc3", true},
+	{"go1.2.3", true},
+	{"go1.999testmod", true},
+	{"go1.600+auto", false},
+	{"go1.22", true},
+	{"go1.21.0", true},
+	{"go1.21rc2", true},
+	{"go1.21", true},
+	{"go1.20.0", true},
+	{"go1.20", true},
+	{"go1.19", true},
+	{"go1.3", true},
+	{"go1.2", true},
+	{"go1", true},
+}
+
+type testCase1[In, Out any] struct {
+	in  In
+	out Out
+}
+
+type testCase2[In1, In2, Out any] struct {
+	in1 In1
+	in2 In2
+	out Out
+}
+
+func test1[In, Out any](t *testing.T, tests []testCase1[In, Out], name string, f func(In) Out) {
+	t.Helper()
+	for _, tt := range tests {
+		if out := f(tt.in); !reflect.DeepEqual(out, tt.out) {
+			t.Errorf("%s(%v) = %v, want %v", name, tt.in, out, tt.out)
+		}
+	}
+}
+
+func test2[In1, In2, Out any](t *testing.T, tests []testCase2[In1, In2, Out], name string, f func(In1, In2) Out) {
+	t.Helper()
+	for _, tt := range tests {
+		if out := f(tt.in1, tt.in2); !reflect.DeepEqual(out, tt.out) {
+			t.Errorf("%s(%+v, %+v) = %+v, want %+v", name, tt.in1, tt.in2, out, tt.out)
+		}
+	}
+}
diff --git a/src/hash/adler32/adler32.go b/src/hash/adler32/adler32.go
index 38d644d..07695e9 100644
--- a/src/hash/adler32/adler32.go
+++ b/src/hash/adler32/adler32.go
@@ -38,8 +38,8 @@
 
 // New returns a new hash.Hash32 computing the Adler-32 checksum. Its
 // Sum method will lay the value out in big-endian byte order. The
-// returned Hash32 also implements encoding.BinaryMarshaler and
-// encoding.BinaryUnmarshaler to marshal and unmarshal the internal
+// returned Hash32 also implements [encoding.BinaryMarshaler] and
+// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
 // state of the hash.
 func New() hash.Hash32 {
 	d := new(digest)
@@ -74,16 +74,19 @@
 	return nil
 }
 
+// appendUint32 is semantically the same as [binary.BigEndian.AppendUint32]
+// We copied this function because we can not import "encoding/binary" here.
 func appendUint32(b []byte, x uint32) []byte {
-	a := [4]byte{
-		byte(x >> 24),
-		byte(x >> 16),
-		byte(x >> 8),
+	return append(b,
+		byte(x>>24),
+		byte(x>>16),
+		byte(x>>8),
 		byte(x),
-	}
-	return append(b, a[:]...)
+	)
 }
 
+// readUint32 is semantically the same as [binary.BigEndian.Uint32]
+// We copied this function because we can not import "encoding/binary" here.
 func readUint32(b []byte) uint32 {
 	_ = b[3]
 	return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
diff --git a/src/hash/crc32/crc32.go b/src/hash/crc32/crc32.go
index e828089..170f05c 100644
--- a/src/hash/crc32/crc32.go
+++ b/src/hash/crc32/crc32.go
@@ -97,7 +97,7 @@
 	haveCastagnoli.Store(true)
 }
 
-// IEEETable is the table for the IEEE polynomial.
+// IEEETable is the table for the [IEEE] polynomial.
 var IEEETable = simpleMakeTable(IEEE)
 
 // ieeeTable8 is the slicing8Table for IEEE
@@ -118,8 +118,8 @@
 	}
 }
 
-// MakeTable returns a Table constructed from the specified polynomial.
-// The contents of this Table must not be modified.
+// MakeTable returns a [Table] constructed from the specified polynomial.
+// The contents of this [Table] must not be modified.
 func MakeTable(poly uint32) *Table {
 	switch poly {
 	case IEEE:
@@ -139,10 +139,10 @@
 	tab *Table
 }
 
-// New creates a new hash.Hash32 computing the CRC-32 checksum using the
-// polynomial represented by the Table. Its Sum method will lay the
+// New creates a new [hash.Hash32] computing the CRC-32 checksum using the
+// polynomial represented by the [Table]. Its Sum method will lay the
 // value out in big-endian byte order. The returned Hash32 also
-// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
+// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
 // marshal and unmarshal the internal state of the hash.
 func New(tab *Table) hash.Hash32 {
 	if tab == IEEETable {
@@ -151,10 +151,10 @@
 	return &digest{0, tab}
 }
 
-// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum using
-// the IEEE polynomial. Its Sum method will lay the value out in
+// NewIEEE creates a new [hash.Hash32] computing the CRC-32 checksum using
+// the [IEEE] polynomial. Its Sum method will lay the value out in
 // big-endian byte order. The returned Hash32 also implements
-// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to marshal
+// [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to marshal
 // and unmarshal the internal state of the hash.
 func NewIEEE() hash.Hash32 { return New(IEEETable) }
 
@@ -191,16 +191,19 @@
 	return nil
 }
 
+// appendUint32 is semantically the same as [binary.BigEndian.AppendUint32]
+// We copied this function because we can not import "encoding/binary" here.
 func appendUint32(b []byte, x uint32) []byte {
-	a := [4]byte{
-		byte(x >> 24),
-		byte(x >> 16),
-		byte(x >> 8),
+	return append(b,
+		byte(x>>24),
+		byte(x>>16),
+		byte(x>>8),
 		byte(x),
-	}
-	return append(b, a[:]...)
+	)
 }
 
+// readUint32 is semantically the same as [binary.BigEndian.Uint32]
+// We copied this function because we can not import "encoding/binary" here.
 func readUint32(b []byte) uint32 {
 	_ = b[3]
 	return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
@@ -242,11 +245,11 @@
 }
 
 // Checksum returns the CRC-32 checksum of data
-// using the polynomial represented by the Table.
+// using the polynomial represented by the [Table].
 func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
 
 // ChecksumIEEE returns the CRC-32 checksum of data
-// using the IEEE polynomial.
+// using the [IEEE] polynomial.
 func ChecksumIEEE(data []byte) uint32 {
 	ieeeOnce.Do(ieeeInit)
 	return updateIEEE(0, data)
diff --git a/src/hash/crc64/crc64.go b/src/hash/crc64/crc64.go
index 26b2573..17ee8eb 100644
--- a/src/hash/crc64/crc64.go
+++ b/src/hash/crc64/crc64.go
@@ -43,8 +43,8 @@
 	slicing8TableECMA = makeSlicingBy8Table(makeTable(ECMA))
 }
 
-// MakeTable returns a Table constructed from the specified polynomial.
-// The contents of this Table must not be modified.
+// MakeTable returns a [Table] constructed from the specified polynomial.
+// The contents of this [Table] must not be modified.
 func MakeTable(poly uint64) *Table {
 	buildSlicing8TablesOnce()
 	switch poly {
@@ -93,9 +93,9 @@
 }
 
 // New creates a new hash.Hash64 computing the CRC-64 checksum using the
-// polynomial represented by the Table. Its Sum method will lay the
+// polynomial represented by the [Table]. Its Sum method will lay the
 // value out in big-endian byte order. The returned Hash64 also
-// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
+// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
 // marshal and unmarshal the internal state of the hash.
 func New(tab *Table) hash.Hash64 { return &digest{0, tab} }
 
@@ -132,20 +132,23 @@
 	return nil
 }
 
+// appendUint64 is semantically the same as [binary.BigEndian.AppendUint64]
+// We copied this function because we can not import "encoding/binary" here.
 func appendUint64(b []byte, x uint64) []byte {
-	a := [8]byte{
-		byte(x >> 56),
-		byte(x >> 48),
-		byte(x >> 40),
-		byte(x >> 32),
-		byte(x >> 24),
-		byte(x >> 16),
-		byte(x >> 8),
+	return append(b,
+		byte(x>>56),
+		byte(x>>48),
+		byte(x>>40),
+		byte(x>>32),
+		byte(x>>24),
+		byte(x>>16),
+		byte(x>>8),
 		byte(x),
-	}
-	return append(b, a[:]...)
+	)
 }
 
+// readUint64 is semantically the same as [binary.BigEndian.Uint64]
+// We copied this function because we can not import "encoding/binary" here.
 func readUint64(b []byte) uint64 {
 	_ = b[7]
 	return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
@@ -210,7 +213,7 @@
 }
 
 // Checksum returns the CRC-64 checksum of data
-// using the polynomial represented by the Table.
+// using the polynomial represented by the [Table].
 func Checksum(data []byte, tab *Table) uint64 { return update(0, tab, data) }
 
 // tableSum returns the ISO checksum of table t.
diff --git a/src/hash/fnv/fnv.go b/src/hash/fnv/fnv.go
index 0fce177..29439e2 100644
--- a/src/hash/fnv/fnv.go
+++ b/src/hash/fnv/fnv.go
@@ -38,35 +38,35 @@
 	prime128Shift   = 24
 )
 
-// New32 returns a new 32-bit FNV-1 hash.Hash.
+// New32 returns a new 32-bit FNV-1 [hash.Hash].
 // Its Sum method will lay the value out in big-endian byte order.
 func New32() hash.Hash32 {
 	var s sum32 = offset32
 	return &s
 }
 
-// New32a returns a new 32-bit FNV-1a hash.Hash.
+// New32a returns a new 32-bit FNV-1a [hash.Hash].
 // Its Sum method will lay the value out in big-endian byte order.
 func New32a() hash.Hash32 {
 	var s sum32a = offset32
 	return &s
 }
 
-// New64 returns a new 64-bit FNV-1 hash.Hash.
+// New64 returns a new 64-bit FNV-1 [hash.Hash].
 // Its Sum method will lay the value out in big-endian byte order.
 func New64() hash.Hash64 {
 	var s sum64 = offset64
 	return &s
 }
 
-// New64a returns a new 64-bit FNV-1a hash.Hash.
+// New64a returns a new 64-bit FNV-1a [hash.Hash].
 // Its Sum method will lay the value out in big-endian byte order.
 func New64a() hash.Hash64 {
 	var s sum64a = offset64
 	return &s
 }
 
-// New128 returns a new 128-bit FNV-1 hash.Hash.
+// New128 returns a new 128-bit FNV-1 [hash.Hash].
 // Its Sum method will lay the value out in big-endian byte order.
 func New128() hash.Hash {
 	var s sum128
@@ -75,7 +75,7 @@
 	return &s
 }
 
-// New128a returns a new 128-bit FNV-1a hash.Hash.
+// New128a returns a new 128-bit FNV-1a [hash.Hash].
 // Its Sum method will lay the value out in big-endian byte order.
 func New128a() hash.Hash {
 	var s sum128a
@@ -241,7 +241,6 @@
 	b = append(b, magic64...)
 	b = appendUint64(b, uint64(*s))
 	return b, nil
-
 }
 
 func (s *sum64a) MarshalBinary() ([]byte, error) {
@@ -335,35 +334,41 @@
 	return nil
 }
 
+// readUint32 is semantically the same as [binary.BigEndian.Uint32]
+// We copied this function because we can not import "encoding/binary" here.
 func readUint32(b []byte) uint32 {
 	_ = b[3]
 	return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
 }
 
+// appendUint32 is semantically the same as [binary.BigEndian.AppendUint32]
+// We copied this function because we can not import "encoding/binary" here.
 func appendUint32(b []byte, x uint32) []byte {
-	a := [4]byte{
-		byte(x >> 24),
-		byte(x >> 16),
-		byte(x >> 8),
+	return append(b,
+		byte(x>>24),
+		byte(x>>16),
+		byte(x>>8),
 		byte(x),
-	}
-	return append(b, a[:]...)
+	)
 }
 
+// appendUint64 is semantically the same as [binary.BigEndian.AppendUint64]
+// We copied this function because we can not import "encoding/binary" here.
 func appendUint64(b []byte, x uint64) []byte {
-	a := [8]byte{
-		byte(x >> 56),
-		byte(x >> 48),
-		byte(x >> 40),
-		byte(x >> 32),
-		byte(x >> 24),
-		byte(x >> 16),
-		byte(x >> 8),
+	return append(b,
+		byte(x>>56),
+		byte(x>>48),
+		byte(x>>40),
+		byte(x>>32),
+		byte(x>>24),
+		byte(x>>16),
+		byte(x>>8),
 		byte(x),
-	}
-	return append(b, a[:]...)
+	)
 }
 
+// readUint64 is semantically the same as [binary.BigEndian.Uint64]
+// We copied this function because we can not import "encoding/binary" here.
 func readUint64(b []byte) uint64 {
 	_ = b[7]
 	return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
diff --git a/src/hash/hash.go b/src/hash/hash.go
index 62cf6a4..82c8103 100644
--- a/src/hash/hash.go
+++ b/src/hash/hash.go
@@ -9,9 +9,9 @@
 
 // Hash is the common interface implemented by all hash functions.
 //
-// Hash implementations in the standard library (e.g. hash/crc32 and
-// crypto/sha256) implement the encoding.BinaryMarshaler and
-// encoding.BinaryUnmarshaler interfaces. Marshaling a hash implementation
+// Hash implementations in the standard library (e.g. [hash/crc32] and
+// [crypto/sha256]) implement the [encoding.BinaryMarshaler] and
+// [encoding.BinaryUnmarshaler] interfaces. Marshaling a hash implementation
 // allows its internal state to be saved and used for additional processing
 // later, without having to re-write the data previously written to the hash.
 // The hash state may contain portions of the input in its original form,
diff --git a/src/hash/maphash/maphash.go b/src/hash/maphash/maphash.go
index c2e9e40..1e70a27 100644
--- a/src/hash/maphash/maphash.go
+++ b/src/hash/maphash/maphash.go
@@ -6,20 +6,20 @@
 // These hash functions are intended to be used to implement hash tables or
 // other data structures that need to map arbitrary strings or byte
 // sequences to a uniform distribution on unsigned 64-bit integers.
-// Each different instance of a hash table or data structure should use its own Seed.
+// Each different instance of a hash table or data structure should use its own [Seed].
 //
 // The hash functions are not cryptographically secure.
 // (See crypto/sha256 and crypto/sha512 for cryptographic use.)
 package maphash
 
 // A Seed is a random value that selects the specific hash function
-// computed by a Hash. If two Hashes use the same Seeds, they
+// computed by a [Hash]. If two Hashes use the same Seeds, they
 // will compute the same hash values for any given input.
 // If two Hashes use different Seeds, they are very likely to compute
 // distinct hash values for any given input.
 //
-// A Seed must be initialized by calling MakeSeed.
-// The zero seed is uninitialized and not valid for use with Hash's SetSeed method.
+// A Seed must be initialized by calling [MakeSeed].
+// The zero seed is uninitialized and not valid for use with [Hash]'s SetSeed method.
 //
 // Each Seed value is local to a single process and cannot be serialized
 // or otherwise recreated in a different process.
@@ -122,7 +122,7 @@
 }
 
 // WriteByte adds b to the sequence of bytes hashed by h.
-// It never fails; the error result is for implementing io.ByteWriter.
+// It never fails; the error result is for implementing [io.ByteWriter].
 func (h *Hash) WriteByte(b byte) error {
 	if h.n == len(h.buf) {
 		h.flush()
@@ -133,7 +133,7 @@
 }
 
 // Write adds b to the sequence of bytes hashed by h.
-// It always writes all of b and never fails; the count and error result are for implementing io.Writer.
+// It always writes all of b and never fails; the count and error result are for implementing [io.Writer].
 func (h *Hash) Write(b []byte) (int, error) {
 	size := len(b)
 	// Deal with bytes left over in h.buf.
@@ -165,7 +165,7 @@
 }
 
 // WriteString adds the bytes of s to the sequence of bytes hashed by h.
-// It always writes all of s and never fails; the count and error result are for implementing io.StringWriter.
+// It always writes all of s and never fails; the count and error result are for implementing [io.StringWriter].
 func (h *Hash) WriteString(s string) (int, error) {
 	// WriteString mirrors Write. See Write for comments.
 	size := len(s)
@@ -196,10 +196,10 @@
 	return h.seed
 }
 
-// SetSeed sets h to use seed, which must have been returned by MakeSeed
-// or by another Hash's Seed method.
-// Two Hash objects with the same seed behave identically.
-// Two Hash objects with different seeds will very likely behave differently.
+// SetSeed sets h to use seed, which must have been returned by [MakeSeed]
+// or by another [Hash.Seed] method.
+// Two [Hash] objects with the same seed behave identically.
+// Two [Hash] objects with different seeds will very likely behave differently.
 // Any bytes added to h before this call will be discarded.
 func (h *Hash) SetSeed(seed Seed) {
 	if seed.s == 0 {
@@ -230,7 +230,7 @@
 
 // Sum64 returns h's current 64-bit value, which depends on
 // h's seed and the sequence of bytes added to h since the
-// last call to Reset or SetSeed.
+// last call to [Hash.Reset] or [Hash.SetSeed].
 //
 // All bits of the Sum64 result are close to uniformly and
 // independently distributed, so it can be safely reduced
@@ -255,8 +255,8 @@
 }
 
 // Sum appends the hash's current 64-bit value to b.
-// It exists for implementing hash.Hash.
-// For direct calls, it is more efficient to use Sum64.
+// It exists for implementing [hash.Hash].
+// For direct calls, it is more efficient to use [Hash.Sum64].
 func (h *Hash) Sum(b []byte) []byte {
 	x := h.Sum64()
 	return append(b,
diff --git a/src/hash/maphash/maphash_runtime.go b/src/hash/maphash/maphash_runtime.go
index 98097ff..b831df2 100644
--- a/src/hash/maphash/maphash_runtime.go
+++ b/src/hash/maphash/maphash_runtime.go
@@ -10,8 +10,8 @@
 	"unsafe"
 )
 
-//go:linkname runtime_fastrand64 runtime.fastrand64
-func runtime_fastrand64() uint64
+//go:linkname runtime_rand runtime.rand
+func runtime_rand() uint64
 
 //go:linkname runtime_memhash runtime.memhash
 //go:noescape
@@ -39,5 +39,5 @@
 }
 
 func randUint64() uint64 {
-	return runtime_fastrand64()
+	return runtime_rand()
 }
diff --git a/src/hash/maphash/smhasher_test.go b/src/hash/maphash/smhasher_test.go
index a6e8a21..085036b 100644
--- a/src/hash/maphash/smhasher_test.go
+++ b/src/hash/maphash/smhasher_test.go
@@ -381,7 +381,7 @@
 	// find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999
 	for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 {
 	}
-	c *= 8.0 // allowed slack - we don't need to be perfectly random
+	c *= 11.0 // allowed slack: 40% to 60% - we don't need to be perfectly random
 	mean := .5 * REP
 	stddev := .5 * math.Sqrt(REP)
 	low := int(mean - c*stddev)
diff --git a/src/html/template/content.go b/src/html/template/content.go
index 49d2f26..6a9eb4e 100644
--- a/src/html/template/content.go
+++ b/src/html/template/content.go
@@ -128,8 +128,8 @@
 }
 
 var (
-	errorType       = reflect.TypeOf((*error)(nil)).Elem()
-	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+	errorType       = reflect.TypeFor[error]()
+	fmtStringerType = reflect.TypeFor[fmt.Stringer]()
 )
 
 // indirectToStringerOrError returns the value, after dereferencing as many times
diff --git a/src/html/template/context.go b/src/html/template/context.go
index 7987713..b78f0f7 100644
--- a/src/html/template/context.go
+++ b/src/html/template/context.go
@@ -21,10 +21,15 @@
 	delim   delim
 	urlPart urlPart
 	jsCtx   jsCtx
-	attr    attr
-	element element
-	n       parse.Node // for range break/continue
-	err     *Error
+	// jsBraceDepth contains the current depth, for each JS template literal
+	// string interpolation expression, of braces we've seen. This is used to
+	// determine if the next } will close a JS template literal string
+	// interpolation expression or not.
+	jsBraceDepth []int
+	attr         attr
+	element      element
+	n            parse.Node // for range break/continue
+	err          *Error
 }
 
 func (c context) String() string {
@@ -120,8 +125,8 @@
 	stateJSDqStr
 	// stateJSSqStr occurs inside a JavaScript single quoted string.
 	stateJSSqStr
-	// stateJSBqStr occurs inside a JavaScript back quoted string.
-	stateJSBqStr
+	// stateJSTmplLit occurs inside a JavaScript back quoted string.
+	stateJSTmplLit
 	// stateJSRegexp occurs inside a JavaScript regexp literal.
 	stateJSRegexp
 	// stateJSBlockCmt occurs inside a JavaScript /* block comment */.
@@ -175,14 +180,14 @@
 }
 
 // isInScriptLiteral returns true if s is one of the literal states within a
-// <script> tag, and as such occurances of "<!--", "<script", and "</script"
+// <script> tag, and as such occurrences of "<!--", "<script", and "</script"
 // need to be treated specially.
 func isInScriptLiteral(s state) bool {
 	// Ignore the comment states (stateJSBlockCmt, stateJSLineCmt,
 	// stateJSHTMLOpenCmt, stateJSHTMLCloseCmt) because their content is already
 	// omitted from the output.
 	switch s {
-	case stateJSDqStr, stateJSSqStr, stateJSBqStr, stateJSRegexp:
+	case stateJSDqStr, stateJSSqStr, stateJSTmplLit, stateJSRegexp:
 		return true
 	}
 	return false
diff --git a/src/html/template/error.go b/src/html/template/error.go
index a763924..805a788 100644
--- a/src/html/template/error.go
+++ b/src/html/template/error.go
@@ -221,6 +221,10 @@
 	// Discussion:
 	//   Package html/template does not support actions inside of JS template
 	//   literals.
+	//
+	// Deprecated: ErrJSTemplate is no longer returned when an action is present
+	// in a JS template literal. Actions inside of JS template literals are now
+	// escaped as expected.
 	ErrJSTemplate
 )
 
diff --git a/src/html/template/escape.go b/src/html/template/escape.go
index 01f6303..1eace16 100644
--- a/src/html/template/escape.go
+++ b/src/html/template/escape.go
@@ -62,22 +62,23 @@
 
 // funcMap maps command names to functions that render their inputs safe.
 var funcMap = template.FuncMap{
-	"_html_template_attrescaper":     attrEscaper,
-	"_html_template_commentescaper":  commentEscaper,
-	"_html_template_cssescaper":      cssEscaper,
-	"_html_template_cssvaluefilter":  cssValueFilter,
-	"_html_template_htmlnamefilter":  htmlNameFilter,
-	"_html_template_htmlescaper":     htmlEscaper,
-	"_html_template_jsregexpescaper": jsRegexpEscaper,
-	"_html_template_jsstrescaper":    jsStrEscaper,
-	"_html_template_jsvalescaper":    jsValEscaper,
-	"_html_template_nospaceescaper":  htmlNospaceEscaper,
-	"_html_template_rcdataescaper":   rcdataEscaper,
-	"_html_template_srcsetescaper":   srcsetFilterAndEscaper,
-	"_html_template_urlescaper":      urlEscaper,
-	"_html_template_urlfilter":       urlFilter,
-	"_html_template_urlnormalizer":   urlNormalizer,
-	"_eval_args_":                    evalArgs,
+	"_html_template_attrescaper":      attrEscaper,
+	"_html_template_commentescaper":   commentEscaper,
+	"_html_template_cssescaper":       cssEscaper,
+	"_html_template_cssvaluefilter":   cssValueFilter,
+	"_html_template_htmlnamefilter":   htmlNameFilter,
+	"_html_template_htmlescaper":      htmlEscaper,
+	"_html_template_jsregexpescaper":  jsRegexpEscaper,
+	"_html_template_jsstrescaper":     jsStrEscaper,
+	"_html_template_jstmpllitescaper": jsTmplLitEscaper,
+	"_html_template_jsvalescaper":     jsValEscaper,
+	"_html_template_nospaceescaper":   htmlNospaceEscaper,
+	"_html_template_rcdataescaper":    rcdataEscaper,
+	"_html_template_srcsetescaper":    srcsetFilterAndEscaper,
+	"_html_template_urlescaper":       urlEscaper,
+	"_html_template_urlfilter":        urlFilter,
+	"_html_template_urlnormalizer":    urlNormalizer,
+	"_eval_args_":                     evalArgs,
 }
 
 // escaper collects type inferences about templates and changes needed to make
@@ -227,16 +228,8 @@
 		c.jsCtx = jsCtxDivOp
 	case stateJSDqStr, stateJSSqStr:
 		s = append(s, "_html_template_jsstrescaper")
-	case stateJSBqStr:
-		if debugAllowActionJSTmpl.Value() == "1" {
-			debugAllowActionJSTmpl.IncNonDefault()
-			s = append(s, "_html_template_jsstrescaper")
-		} else {
-			return context{
-				state: stateError,
-				err:   errorf(ErrJSTemplate, n, n.Line, "%s appears in a JS template literal", n),
-			}
-		}
+	case stateJSTmplLit:
+		s = append(s, "_html_template_jstmpllitescaper")
 	case stateJSRegexp:
 		s = append(s, "_html_template_jsregexpescaper")
 	case stateCSS:
@@ -395,6 +388,9 @@
 	"_html_template_jsstrescaper": {
 		"_html_template_attrescaper": true,
 	},
+	"_html_template_jstmpllitescaper": {
+		"_html_template_attrescaper": true,
+	},
 	"_html_template_urlescaper": {
 		"_html_template_urlnormalizer": true,
 	},
diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
index 8a4f62e..497ead8 100644
--- a/src/html/template/escape_test.go
+++ b/src/html/template/escape_test.go
@@ -30,14 +30,14 @@
 
 func TestEscape(t *testing.T) {
 	data := struct {
-		F, T    bool
-		C, G, H string
-		A, E    []string
-		B, M    json.Marshaler
-		N       int
-		U       any  // untyped nil
-		Z       *int // typed nil
-		W       HTML
+		F, T       bool
+		C, G, H, I string
+		A, E       []string
+		B, M       json.Marshaler
+		N          int
+		U          any  // untyped nil
+		Z          *int // typed nil
+		W          HTML
 	}{
 		F: false,
 		T: true,
@@ -52,6 +52,7 @@
 		U: nil,
 		Z: nil,
 		W: HTML(`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`),
+		I: "${ asd `` }",
 	}
 	pdata := &data
 
@@ -718,6 +719,21 @@
 			"<p name=\"{{.U}}\">",
 			"<p name=\"\">",
 		},
+		{
+			"JS template lit special characters",
+			"<script>var a = `{{.I}}`</script>",
+			"<script>var a = `\\u0024\\u007b asd \\u0060\\u0060 \\u007d`</script>",
+		},
+		{
+			"JS template lit special characters, nested lit",
+			"<script>var a = `${ `{{.I}}` }`</script>",
+			"<script>var a = `${ `\\u0024\\u007b asd \\u0060\\u0060 \\u007d` }`</script>",
+		},
+		{
+			"JS template lit, nested JS",
+			"<script>var a = `${ var a = \"{{\"a \\\" d\"}}\" }`</script>",
+			"<script>var a = `${ var a = \"a \\u0022 d\" }`</script>",
+		},
 	}
 
 	for _, test := range tests {
@@ -976,6 +992,31 @@
 			"<script>var a = `${a+b}`</script>`",
 			"",
 		},
+		{
+			"<script>var tmpl = `asd`;</script>",
+			``,
+		},
+		{
+			"<script>var tmpl = `${1}`;</script>",
+			``,
+		},
+		{
+			"<script>var tmpl = `${return ``}`;</script>",
+			``,
+		},
+		{
+			"<script>var tmpl = `${return {{.}} }`;</script>",
+			``,
+		},
+		{
+			"<script>var tmpl = `${ let a = {1:1} {{.}} }`;</script>",
+			``,
+		},
+		{
+			"<script>var tmpl = `asd ${return \"{\"}`;</script>",
+			``,
+		},
+
 		// Error cases.
 		{
 			"{{if .Cond}}<a{{end}}",
@@ -1122,10 +1163,6 @@
 			// html is allowed since it is the last command in the pipeline, but urlquery is not.
 			`predefined escaper "urlquery" disallowed in template`,
 		},
-		{
-			"<script>var tmpl = `asd {{.}}`;</script>",
-			`{{.}} appears in a JS template literal`,
-		},
 	}
 	for _, test := range tests {
 		buf := new(bytes.Buffer)
@@ -1349,7 +1386,7 @@
 		},
 		{
 			"<a onclick=\"`foo",
-			context{state: stateJSBqStr, delim: delimDoubleQuote, attr: attrScript},
+			context{state: stateJSTmplLit, delim: delimDoubleQuote, attr: attrScript},
 		},
 		{
 			`<A ONCLICK="'`,
@@ -1691,6 +1728,94 @@
 			`<svg:a svg:onclick="x()">`,
 			context{},
 		},
+		{
+			"<script>var a = `",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
+		{
+			"<script>var a = `${",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>var a = `${}",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
+		{
+			"<script>var a = `${`",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
+		{
+			"<script>var a = `${var a = \"",
+			context{state: stateJSDqStr, element: elementScript},
+		},
+		{
+			"<script>var a = `${var a = \"`",
+			context{state: stateJSDqStr, element: elementScript},
+		},
+		{
+			"<script>var a = `${var a = \"}",
+			context{state: stateJSDqStr, element: elementScript},
+		},
+		{
+			"<script>var a = `${``",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>var a = `${`}",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
+		{
+			"<script>`${ {} } asd`</script><script>`${ {} }",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
+		{
+			"<script>var foo = `${ (_ => { return \"x\" })() + \"${",
+			context{state: stateJSDqStr, element: elementScript},
+		},
+		{
+			"<script>var a = `${ {</script><script>var b = `${ x }",
+			context{state: stateJSTmplLit, element: elementScript, jsCtx: jsCtxDivOp},
+		},
+		{
+			"<script>var foo = `x` + \"${",
+			context{state: stateJSDqStr, element: elementScript},
+		},
+		{
+			"<script>function f() { var a = `${}`; }",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>{`${}`}",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>`${ function f() { return `${1}` }() }`",
+			context{state: stateJS, element: elementScript, jsCtx: jsCtxDivOp},
+		},
+		{
+			"<script>function f() {`${ function f() { `${1}` } }`}",
+			context{state: stateJS, element: elementScript, jsCtx: jsCtxDivOp},
+		},
+		{
+			"<script>`${ { `` }",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>`${ { }`",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
+		{
+			"<script>var foo = `${ foo({ a: { c: `${",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>var foo = `${ foo({ a: { c: `${ {{.}} }` }, b: ",
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			"<script>`${ `}",
+			context{state: stateJSTmplLit, element: elementScript},
+		},
 	}
 
 	for _, test := range tests {
diff --git a/src/html/template/exec_test.go b/src/html/template/exec_test.go
index 51923ff..0530215 100644
--- a/src/html/template/exec_test.go
+++ b/src/html/template/exec_test.go
@@ -268,8 +268,8 @@
 // of the max int boundary.
 // We do it this way so the test doesn't depend on ints being 32 bits.
 var (
-	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
-	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeFor[int]().Bits()-1)-1))
+	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeFor[int]().Bits()-1)))
 )
 
 var execTests = []execTest{
diff --git a/src/html/template/js.go b/src/html/template/js.go
index 4e05c14..d911ada 100644
--- a/src/html/template/js.go
+++ b/src/html/template/js.go
@@ -124,7 +124,7 @@
 	"void":       true,
 }
 
-var jsonMarshalType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+var jsonMarshalType = reflect.TypeFor[json.Marshaler]()
 
 // indirectToJSONMarshaler returns the value, after dereferencing as many times
 // as necessary to reach the base type (or nil) or an implementation of json.Marshal.
@@ -171,13 +171,31 @@
 	// cyclic data. This may be an unacceptable DoS risk.
 	b, err := json.Marshal(a)
 	if err != nil {
-		// Put a space before comment so that if it is flush against
+		// While the standard JSON marshaller does not include user controlled
+		// information in the error message, if a type has a MarshalJSON method,
+		// the content of the error message is not guaranteed. Since we insert
+		// the error into the template, as part of a comment, we attempt to
+		// prevent the error from either terminating the comment, or the script
+		// block itself.
+		//
+		// In particular we:
+		//   * replace "*/" comment end tokens with "* /", which does not
+		//     terminate the comment
+		//   * replace "</script" with "\x3C/script", and "<!--" with
+		//     "\x3C!--", which prevents confusing script block termination
+		//     semantics
+		//
+		// We also put a space before the comment so that if it is flush against
 		// a division operator it is not turned into a line comment:
 		//     x/{{y}}
 		// turning into
 		//     x//* error marshaling y:
 		//          second line of error message */null
-		return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
+		errStr := err.Error()
+		errStr = strings.ReplaceAll(errStr, "*/", "* /")
+		errStr = strings.ReplaceAll(errStr, "</script", `\x3C/script`)
+		errStr = strings.ReplaceAll(errStr, "<!--", `\x3C!--`)
+		return fmt.Sprintf(" /* %s */null ", errStr)
 	}
 
 	// TODO: maybe post-process output to prevent it from containing
@@ -238,6 +256,11 @@
 	return replace(s, jsStrReplacementTable)
 }
 
+func jsTmplLitEscaper(args ...any) string {
+	s, _ := stringify(args...)
+	return replace(s, jsBqStrReplacementTable)
+}
+
 // jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
 // specials so the result is treated literally when included in a regular
 // expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
@@ -324,6 +347,31 @@
 	'\\': `\\`,
 }
 
+// jsBqStrReplacementTable is like jsStrReplacementTable except it also contains
+// the special characters for JS template literals: $, {, and }.
+var jsBqStrReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'`':  `\u0060`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'+':  `\u002b`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+	'\\': `\\`,
+	'$':  `\u0024`,
+	'{':  `\u007b`,
+	'}':  `\u007d`,
+}
+
 // jsStrNormReplacementTable is like jsStrReplacementTable but does not
 // overencode existing escapes since this table has no entry for `\`.
 var jsStrNormReplacementTable = []string{
diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
index 259dcfb..17cedce 100644
--- a/src/html/template/js_test.go
+++ b/src/html/template/js_test.go
@@ -5,6 +5,7 @@
 package template
 
 import (
+	"errors"
 	"math"
 	"strings"
 	"testing"
@@ -103,61 +104,72 @@
 	}
 }
 
+type jsonErrType struct{}
+
+func (e *jsonErrType) MarshalJSON() ([]byte, error) {
+	return nil, errors.New("beep */ boop </script blip <!--")
+}
+
 func TestJSValEscaper(t *testing.T) {
 	tests := []struct {
-		x  any
-		js string
+		x        any
+		js       string
+		skipNest bool
 	}{
-		{int(42), " 42 "},
-		{uint(42), " 42 "},
-		{int16(42), " 42 "},
-		{uint16(42), " 42 "},
-		{int32(-42), " -42 "},
-		{uint32(42), " 42 "},
-		{int16(-42), " -42 "},
-		{uint16(42), " 42 "},
-		{int64(-42), " -42 "},
-		{uint64(42), " 42 "},
-		{uint64(1) << 53, " 9007199254740992 "},
+		{int(42), " 42 ", false},
+		{uint(42), " 42 ", false},
+		{int16(42), " 42 ", false},
+		{uint16(42), " 42 ", false},
+		{int32(-42), " -42 ", false},
+		{uint32(42), " 42 ", false},
+		{int16(-42), " -42 ", false},
+		{uint16(42), " 42 ", false},
+		{int64(-42), " -42 ", false},
+		{uint64(42), " 42 ", false},
+		{uint64(1) << 53, " 9007199254740992 ", false},
 		// ulp(1 << 53) > 1 so this loses precision in JS
 		// but it is still a representable integer literal.
-		{uint64(1)<<53 + 1, " 9007199254740993 "},
-		{float32(1.0), " 1 "},
-		{float32(-1.0), " -1 "},
-		{float32(0.5), " 0.5 "},
-		{float32(-0.5), " -0.5 "},
-		{float32(1.0) / float32(256), " 0.00390625 "},
-		{float32(0), " 0 "},
-		{math.Copysign(0, -1), " -0 "},
-		{float64(1.0), " 1 "},
-		{float64(-1.0), " -1 "},
-		{float64(0.5), " 0.5 "},
-		{float64(-0.5), " -0.5 "},
-		{float64(0), " 0 "},
-		{math.Copysign(0, -1), " -0 "},
-		{"", `""`},
-		{"foo", `"foo"`},
+		{uint64(1)<<53 + 1, " 9007199254740993 ", false},
+		{float32(1.0), " 1 ", false},
+		{float32(-1.0), " -1 ", false},
+		{float32(0.5), " 0.5 ", false},
+		{float32(-0.5), " -0.5 ", false},
+		{float32(1.0) / float32(256), " 0.00390625 ", false},
+		{float32(0), " 0 ", false},
+		{math.Copysign(0, -1), " -0 ", false},
+		{float64(1.0), " 1 ", false},
+		{float64(-1.0), " -1 ", false},
+		{float64(0.5), " 0.5 ", false},
+		{float64(-0.5), " -0.5 ", false},
+		{float64(0), " 0 ", false},
+		{math.Copysign(0, -1), " -0 ", false},
+		{"", `""`, false},
+		{"foo", `"foo"`, false},
 		// Newlines.
-		{"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
+		{"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`, false},
 		// "\v" == "v" on IE 6 so use "\u000b" instead.
-		{"\t\x0b", `"\t\u000b"`},
-		{struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
-		{[]any{}, "[]"},
-		{[]any{42, "foo", nil}, `[42,"foo",null]`},
-		{[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
-		{"<!--", `"\u003c!--"`},
-		{"-->", `"--\u003e"`},
-		{"<![CDATA[", `"\u003c![CDATA["`},
-		{"]]>", `"]]\u003e"`},
-		{"</script", `"\u003c/script"`},
-		{"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
-		{nil, " null "},
+		{"\t\x0b", `"\t\u000b"`, false},
+		{struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`, false},
+		{[]any{}, "[]", false},
+		{[]any{42, "foo", nil}, `[42,"foo",null]`, false},
+		{[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`, false},
+		{"<!--", `"\u003c!--"`, false},
+		{"-->", `"--\u003e"`, false},
+		{"<![CDATA[", `"\u003c![CDATA["`, false},
+		{"]]>", `"]]\u003e"`, false},
+		{"</script", `"\u003c/script"`, false},
+		{"\U0001D11E", "\"\U0001D11E\"", false}, // or "\uD834\uDD1E"
+		{nil, " null ", false},
+		{&jsonErrType{}, " /* json: error calling MarshalJSON for type *template.jsonErrType: beep * / boop \\x3C/script blip \\x3C!-- */null ", true},
 	}
 
 	for _, test := range tests {
 		if js := jsValEscaper(test.x); js != test.js {
 			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
 		}
+		if test.skipNest {
+			continue
+		}
 		// Make sure that escaping corner cases are not broken
 		// by nesting.
 		a := []any{test.x}
diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
index be7a920..eed1e8b 100644
--- a/src/html/template/state_string.go
+++ b/src/html/template/state_string.go
@@ -21,7 +21,7 @@
 	_ = x[stateJS-10]
 	_ = x[stateJSDqStr-11]
 	_ = x[stateJSSqStr-12]
-	_ = x[stateJSBqStr-13]
+	_ = x[stateJSTmplLit-13]
 	_ = x[stateJSRegexp-14]
 	_ = x[stateJSBlockCmt-15]
 	_ = x[stateJSLineCmt-16]
@@ -39,9 +39,9 @@
 	_ = x[stateDead-28]
 }
 
-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSTmplLitstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
 
-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 214, 233, 241, 254, 267, 280, 293, 304, 320, 335, 345, 354}
+var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 156, 169, 184, 198, 216, 235, 243, 256, 269, 282, 295, 306, 322, 337, 347, 356}
 
 func (i state) String() string {
 	if i >= state(len(_state_index)-1) {
diff --git a/src/html/template/transition.go b/src/html/template/transition.go
index 432c365..d5a05f6 100644
--- a/src/html/template/transition.go
+++ b/src/html/template/transition.go
@@ -27,8 +27,8 @@
 	stateJS:             tJS,
 	stateJSDqStr:        tJSDelimited,
 	stateJSSqStr:        tJSDelimited,
-	stateJSBqStr:        tJSDelimited,
 	stateJSRegexp:       tJSDelimited,
+	stateJSTmplLit:      tJSTmpl,
 	stateJSBlockCmt:     tBlockCmt,
 	stateJSLineCmt:      tLineCmt,
 	stateJSHTMLOpenCmt:  tLineCmt,
@@ -270,7 +270,7 @@
 
 // tJS is the context transition function for the JS state.
 func tJS(c context, s []byte) (context, int) {
-	i := bytes.IndexAny(s, "\"`'/<-#")
+	i := bytes.IndexAny(s, "\"`'/{}<-#")
 	if i == -1 {
 		// Entire input is non string, comment, regexp tokens.
 		c.jsCtx = nextJSCtx(s, c.jsCtx)
@@ -283,7 +283,7 @@
 	case '\'':
 		c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
 	case '`':
-		c.state, c.jsCtx = stateJSBqStr, jsCtxRegexp
+		c.state, c.jsCtx = stateJSTmplLit, jsCtxRegexp
 	case '/':
 		switch {
 		case i+1 < len(s) && s[i+1] == '/':
@@ -320,12 +320,66 @@
 		if i+1 < len(s) && s[i+1] == '!' {
 			c.state, i = stateJSLineCmt, i+1
 		}
+	case '{':
+		// We only care about tracking brace depth if we are inside of a
+		// template literal.
+		if len(c.jsBraceDepth) == 0 {
+			return c, i + 1
+		}
+		c.jsBraceDepth[len(c.jsBraceDepth)-1]++
+	case '}':
+		if len(c.jsBraceDepth) == 0 {
+			return c, i + 1
+		}
+		// There are no cases where a brace can be escaped in the JS context
+		// that are not syntax errors, it seems. Because of this we can just
+		// count "\}" as "}" and move on, the script is already broken as
+		// fully fledged parsers will just fail anyway.
+		c.jsBraceDepth[len(c.jsBraceDepth)-1]--
+		if c.jsBraceDepth[len(c.jsBraceDepth)-1] >= 0 {
+			return c, i + 1
+		}
+		c.jsBraceDepth = c.jsBraceDepth[:len(c.jsBraceDepth)-1]
+		c.state = stateJSTmplLit
 	default:
 		panic("unreachable")
 	}
 	return c, i + 1
 }
 
+func tJSTmpl(c context, s []byte) (context, int) {
+	var k int
+	for {
+		i := k + bytes.IndexAny(s[k:], "`\\$")
+		if i < k {
+			break
+		}
+		switch s[i] {
+		case '\\':
+			i++
+			if i == len(s) {
+				return context{
+					state: stateError,
+					err:   errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s),
+				}, len(s)
+			}
+		case '$':
+			if len(s) >= i+2 && s[i+1] == '{' {
+				c.jsBraceDepth = append(c.jsBraceDepth, 0)
+				c.state = stateJS
+				return c, i + 2
+			}
+		case '`':
+			// end
+			c.state = stateJS
+			return c, i + 1
+		}
+		k = i + 1
+	}
+
+	return c, len(s)
+}
+
 // tJSDelimited is the context transition function for the JS string and regexp
 // states.
 func tJSDelimited(c context, s []byte) (context, int) {
@@ -333,8 +387,6 @@
 	switch c.state {
 	case stateJSSqStr:
 		specials = `\'`
-	case stateJSBqStr:
-		specials = "`\\"
 	case stateJSRegexp:
 		specials = `\/[]`
 	}
diff --git a/src/image/color/color.go b/src/image/color/color.go
index 8895839..c700a58 100644
--- a/src/image/color/color.go
+++ b/src/image/color/color.go
@@ -137,13 +137,13 @@
 	return y, y, y, 0xffff
 }
 
-// Model can convert any Color to one from its own color model. The conversion
+// Model can convert any [Color] to one from its own color model. The conversion
 // may be lossy.
 type Model interface {
 	Convert(c Color) Color
 }
 
-// ModelFunc returns a Model that invokes f to implement the conversion.
+// ModelFunc returns a [Model] that invokes f to implement the conversion.
 func ModelFunc(f func(Color) Color) Model {
 	// Note: using *modelFunc as the implementation
 	// means that callers can still use comparisons
diff --git a/src/image/color/palette/palette.go b/src/image/color/palette/palette.go
index 2a4cdcb..f73d463 100644
--- a/src/image/color/palette/palette.go
+++ b/src/image/color/palette/palette.go
@@ -10,7 +10,7 @@
 
 // Plan9 is a 256-color palette that partitions the 24-bit RGB space
 // into 4×4×4 subdivision, with 4 shades in each subcube. Compared to the
-// WebSafe, the idea is to reduce the color resolution by dicing the
+// [WebSafe], the idea is to reduce the color resolution by dicing the
 // color cube into fewer cells, and to use the extra space to increase the
 // intensity resolution. This results in 16 gray shades (4 gray subcubes with
 // 4 samples in each), 13 shades of each primary and secondary color (3
diff --git a/src/image/color/ycbcr.go b/src/image/color/ycbcr.go
index 8b6d508..a6d17ab 100644
--- a/src/image/color/ycbcr.go
+++ b/src/image/color/ycbcr.go
@@ -225,7 +225,7 @@
 	return uint32(r), uint32(g), uint32(b), 0xffff
 }
 
-// YCbCrModel is the Model for Y'CbCr colors.
+// YCbCrModel is the [Model] for Y'CbCr colors.
 var YCbCrModel Model = ModelFunc(yCbCrModel)
 
 func yCbCrModel(c Color) Color {
@@ -287,7 +287,7 @@
 	return uint32(r) * a / 0xffff, uint32(g) * a / 0xffff, uint32(b) * a / 0xffff, a
 }
 
-// NYCbCrAModel is the Model for non-alpha-premultiplied Y'CbCr-with-alpha
+// NYCbCrAModel is the [Model] for non-alpha-premultiplied Y'CbCr-with-alpha
 // colors.
 var NYCbCrAModel Model = ModelFunc(nYCbCrAModel)
 
@@ -332,7 +332,7 @@
 	return uint8(c), uint8(m), uint8(y), uint8(0xff - w)
 }
 
-// CMYKToRGB converts a CMYK quadruple to an RGB triple.
+// CMYKToRGB converts a [CMYK] quadruple to an RGB triple.
 func CMYKToRGB(c, m, y, k uint8) (uint8, uint8, uint8) {
 	w := 0xffff - uint32(k)*0x101
 	r := (0xffff - uint32(c)*0x101) * w / 0xffff
@@ -360,7 +360,7 @@
 	return r, g, b, 0xffff
 }
 
-// CMYKModel is the Model for CMYK colors.
+// CMYKModel is the [Model] for CMYK colors.
 var CMYKModel Model = ModelFunc(cmykModel)
 
 func cmykModel(c Color) Color {
diff --git a/src/image/draw/bench_test.go b/src/image/draw/bench_test.go
index 55d25b8..956f4ef 100644
--- a/src/image/draw/bench_test.go
+++ b/src/image/draw/bench_test.go
@@ -186,7 +186,7 @@
 		x := 3 * i % (dstw - srcw)
 		y := 7 * i % (dsth - srch)
 
-		DrawMask(dst, dst.Bounds().Add(image.Pt(x, y)), src, image.ZP, mask, image.ZP, op)
+		DrawMask(dst, dst.Bounds().Add(image.Pt(x, y)), src, image.Point{}, mask, image.Point{}, op)
 	}
 }
 
diff --git a/src/image/draw/clip_test.go b/src/image/draw/clip_test.go
index 0abf53e..7bbbf7e 100644
--- a/src/image/draw/clip_test.go
+++ b/src/image/draw/clip_test.go
@@ -25,104 +25,104 @@
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 100, 100),
-		image.ZR,
-		image.ZP,
-		image.ZP,
+		image.Rectangle{},
+		image.Point{},
+		image.Point{},
 		true,
 		image.Rect(0, 0, 100, 100),
-		image.ZP,
-		image.ZP,
+		image.Point{},
+		image.Point{},
 	},
 	{
 		"clip dr",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(40, 40, 60, 60),
 		image.Rect(0, 0, 100, 100),
-		image.ZR,
-		image.ZP,
-		image.ZP,
+		image.Rectangle{},
+		image.Point{},
+		image.Point{},
 		true,
 		image.Rect(40, 40, 60, 60),
 		image.Pt(40, 40),
-		image.ZP,
+		image.Point{},
 	},
 	{
 		"clip sr",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 100, 100),
 		image.Rect(20, 20, 80, 80),
-		image.ZR,
-		image.ZP,
-		image.ZP,
+		image.Rectangle{},
+		image.Point{},
+		image.Point{},
 		true,
 		image.Rect(20, 20, 80, 80),
 		image.Pt(20, 20),
-		image.ZP,
+		image.Point{},
 	},
 	{
 		"clip dr and sr",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 50, 100),
 		image.Rect(20, 20, 80, 80),
-		image.ZR,
-		image.ZP,
-		image.ZP,
+		image.Rectangle{},
+		image.Point{},
+		image.Point{},
 		true,
 		image.Rect(20, 20, 50, 80),
 		image.Pt(20, 20),
-		image.ZP,
+		image.Point{},
 	},
 	{
 		"clip dr and sr, sp outside sr (top-left)",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 50, 100),
 		image.Rect(20, 20, 80, 80),
-		image.ZR,
+		image.Rectangle{},
 		image.Pt(15, 8),
-		image.ZP,
+		image.Point{},
 		true,
 		image.Rect(5, 12, 50, 72),
 		image.Pt(20, 20),
-		image.ZP,
+		image.Point{},
 	},
 	{
 		"clip dr and sr, sp outside sr (middle-left)",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 50, 100),
 		image.Rect(20, 20, 80, 80),
-		image.ZR,
+		image.Rectangle{},
 		image.Pt(15, 66),
-		image.ZP,
+		image.Point{},
 		true,
 		image.Rect(5, 0, 50, 14),
 		image.Pt(20, 66),
-		image.ZP,
+		image.Point{},
 	},
 	{
 		"clip dr and sr, sp outside sr (bottom-left)",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 50, 100),
 		image.Rect(20, 20, 80, 80),
-		image.ZR,
+		image.Rectangle{},
 		image.Pt(15, 91),
-		image.ZP,
+		image.Point{},
 		true,
-		image.ZR,
+		image.Rectangle{},
 		image.Pt(15, 91),
-		image.ZP,
+		image.Point{},
 	},
 	{
 		"clip dr and sr, sp inside sr",
 		image.Rect(0, 0, 100, 100),
 		image.Rect(0, 0, 50, 100),
 		image.Rect(20, 20, 80, 80),
-		image.ZR,
+		image.Rectangle{},
 		image.Pt(44, 33),
-		image.ZP,
+		image.Point{},
 		true,
 		image.Rect(0, 0, 36, 47),
 		image.Pt(44, 33),
-		image.ZP,
+		image.Point{},
 	},
 
 	// The following tests all have a non-nil mask.
@@ -132,8 +132,8 @@
 		image.Rect(20, 0, 100, 80),
 		image.Rect(0, 0, 50, 49),
 		image.Rect(0, 0, 46, 47),
-		image.ZP,
-		image.ZP,
+		image.Point{},
+		image.Point{},
 		false,
 		image.Rect(20, 0, 46, 47),
 		image.Pt(20, 0),
diff --git a/src/image/draw/draw.go b/src/image/draw/draw.go
index 920ebb9..1b7e90f 100644
--- a/src/image/draw/draw.go
+++ b/src/image/draw/draw.go
@@ -23,10 +23,10 @@
 	Set(x, y int, c color.Color)
 }
 
-// RGBA64Image extends both the Image and image.RGBA64Image interfaces with a
+// RGBA64Image extends both the [Image] and [image.RGBA64Image] interfaces with a
 // SetRGBA64 method to change a single pixel. SetRGBA64 is equivalent to
 // calling Set, but it can avoid allocations from converting concrete color
-// types to the color.Color interface type.
+// types to the [color.Color] interface type.
 type RGBA64Image interface {
 	image.RGBA64Image
 	Set(x, y int, c color.Color)
@@ -50,20 +50,20 @@
 	Src
 )
 
-// Draw implements the Drawer interface by calling the Draw function with this
-// Op.
+// Draw implements the [Drawer] interface by calling the Draw function with this
+// [Op].
 func (op Op) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) {
 	DrawMask(dst, r, src, sp, nil, image.Point{}, op)
 }
 
-// Drawer contains the Draw method.
+// Drawer contains the [Draw] method.
 type Drawer interface {
 	// Draw aligns r.Min in dst with sp in src and then replaces the
 	// rectangle r in dst with the result of drawing src on dst.
 	Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point)
 }
 
-// FloydSteinberg is a Drawer that is the Src Op with Floyd-Steinberg error
+// FloydSteinberg is a [Drawer] that is the [Src] [Op] with Floyd-Steinberg error
 // diffusion.
 var FloydSteinberg Drawer = floydSteinberg{}
 
@@ -106,7 +106,7 @@
 		(sp.Y < r.Min.Y || (sp.Y == r.Min.Y && sp.X < r.Min.X))
 }
 
-// Draw calls DrawMask with a nil mask.
+// Draw calls [DrawMask] with a nil mask.
 func Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op) {
 	DrawMask(dst, r, src, sp, nil, image.Point{}, op)
 }
diff --git a/src/image/draw/draw_test.go b/src/image/draw/draw_test.go
index a34d1c3..ff9e71d 100644
--- a/src/image/draw/draw_test.go
+++ b/src/image/draw/draw_test.go
@@ -13,7 +13,7 @@
 	"testing/quick"
 )
 
-// slowestRGBA is a draw.Image like image.RGBA but it is a different type and
+// slowestRGBA is a draw.Image like image.RGBA, but it is a different type and
 // therefore does not trigger the draw.go fastest code paths.
 //
 // Unlike slowerRGBA, it does not implement the draw.RGBA64Image interface.
@@ -484,7 +484,7 @@
 				}
 
 				// Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation.
-				golden := makeGolden(dst, image.Rect(0, 0, 16, 16), test.src, image.ZP, test.mask, image.ZP, test.op)
+				golden := makeGolden(dst, image.Rect(0, 0, 16, 16), test.src, image.Point{}, test.mask, image.Point{}, test.op)
 				b := dst.Bounds()
 				if !b.Eq(golden.Bounds()) {
 					t.Errorf("draw %v %s on %T: bounds %v versus %v",
@@ -492,7 +492,7 @@
 					continue
 				}
 				// Draw the same combination onto the actual dst using the optimized DrawMask implementation.
-				DrawMask(dst, image.Rect(0, 0, 16, 16), test.src, image.ZP, test.mask, image.ZP, test.op)
+				DrawMask(dst, image.Rect(0, 0, 16, 16), test.src, image.Point{}, test.mask, image.Point{}, test.op)
 				if image.Pt(8, 8).In(r) {
 					// Check that the resultant pixel at (8, 8) matches what we expect
 					// (the expected value can be verified by hand).
@@ -527,13 +527,13 @@
 				src := m.SubImage(image.Rect(5+xoff, 5+yoff, 10+xoff, 10+yoff)).(*image.RGBA)
 				b := dst.Bounds()
 				// Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation.
-				golden := makeGolden(dst, b, src, src.Bounds().Min, nil, image.ZP, op)
+				golden := makeGolden(dst, b, src, src.Bounds().Min, nil, image.Point{}, op)
 				if !b.Eq(golden.Bounds()) {
 					t.Errorf("drawOverlap xoff=%d,yoff=%d: bounds %v versus %v", xoff, yoff, dst.Bounds(), golden.Bounds())
 					continue
 				}
 				// Draw the same combination onto the actual dst using the optimized DrawMask implementation.
-				DrawMask(dst, b, src, src.Bounds().Min, nil, image.ZP, op)
+				DrawMask(dst, b, src, src.Bounds().Min, nil, image.Point{}, op)
 				// Check that the resultant dst image matches the golden output.
 				for y := b.Min.Y; y < b.Max.Y; y++ {
 					for x := b.Min.X; x < b.Max.X; x++ {
@@ -596,7 +596,7 @@
 		// Draw 1 pixel at a time.
 		for y := b.Min.Y; y < b.Max.Y; y++ {
 			for x := b.Min.X; x < b.Max.X; x++ {
-				DrawMask(m, image.Rect(x, y, x+1, y+1), src, image.ZP, nil, image.ZP, Src)
+				DrawMask(m, image.Rect(x, y, x+1, y+1), src, image.Point{}, nil, image.Point{}, Src)
 			}
 		}
 		check("pixel")
@@ -604,20 +604,20 @@
 		c = color.RGBA{0, 22, 0, 255}
 		src = &image.Uniform{C: c}
 		for y := b.Min.Y; y < b.Max.Y; y++ {
-			DrawMask(m, image.Rect(b.Min.X, y, b.Max.X, y+1), src, image.ZP, nil, image.ZP, Src)
+			DrawMask(m, image.Rect(b.Min.X, y, b.Max.X, y+1), src, image.Point{}, nil, image.Point{}, Src)
 		}
 		check("row")
 		// Draw 1 column at a time.
 		c = color.RGBA{0, 0, 33, 255}
 		src = &image.Uniform{C: c}
 		for x := b.Min.X; x < b.Max.X; x++ {
-			DrawMask(m, image.Rect(x, b.Min.Y, x+1, b.Max.Y), src, image.ZP, nil, image.ZP, Src)
+			DrawMask(m, image.Rect(x, b.Min.Y, x+1, b.Max.Y), src, image.Point{}, nil, image.Point{}, Src)
 		}
 		check("column")
 		// Draw the whole image at once.
 		c = color.RGBA{44, 55, 66, 77}
 		src = &image.Uniform{C: c}
-		DrawMask(m, b, src, image.ZP, nil, image.ZP, Src)
+		DrawMask(m, b, src, image.Point{}, nil, image.Point{}, Src)
 		check("whole")
 	}
 }
diff --git a/src/image/draw/example_test.go b/src/image/draw/example_test.go
index 2ccc2f4..bfa6355 100644
--- a/src/image/draw/example_test.go
+++ b/src/image/draw/example_test.go
@@ -37,7 +37,7 @@
 		color.Gray{Y: 0},
 	})
 
-	draw.FloydSteinberg.Draw(pi, im.Bounds(), im, image.ZP)
+	draw.FloydSteinberg.Draw(pi, im.Bounds(), im, image.Point{})
 	shade := []string{" ", "░", "▒", "▓", "█"}
 	for i, p := range pi.Pix {
 		fmt.Print(shade[p])
diff --git a/src/image/format.go b/src/image/format.go
index 51d7ad9..7426afb 100644
--- a/src/image/format.go
+++ b/src/image/format.go
@@ -28,12 +28,12 @@
 	atomicFormats atomic.Value
 )
 
-// RegisterFormat registers an image format for use by Decode.
+// RegisterFormat registers an image format for use by [Decode].
 // Name is the name of the format, like "jpeg" or "png".
 // Magic is the magic prefix that identifies the format's encoding. The magic
 // string can contain "?" wildcards that each match any one byte.
-// Decode is the function that decodes the encoded image.
-// DecodeConfig is the function that decodes just its configuration.
+// [Decode] is the function that decodes the encoded image.
+// [DecodeConfig] is the function that decodes just its configuration.
 func RegisterFormat(name, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error)) {
 	formatsMu.Lock()
 	formats, _ := atomicFormats.Load().([]format)
diff --git a/src/image/geom.go b/src/image/geom.go
index e71aa61..7731b6b 100644
--- a/src/image/geom.go
+++ b/src/image/geom.go
@@ -67,12 +67,12 @@
 	return p == q
 }
 
-// ZP is the zero Point.
+// ZP is the zero [Point].
 //
-// Deprecated: Use a literal image.Point{} instead.
+// Deprecated: Use a literal [image.Point] instead.
 var ZP Point
 
-// Pt is shorthand for Point{X, Y}.
+// Pt is shorthand for [Point]{X, Y}.
 func Pt(X, Y int) Point {
 	return Point{X, Y}
 }
@@ -82,7 +82,7 @@
 // well-formed. A rectangle's methods always return well-formed outputs for
 // well-formed inputs.
 //
-// A Rectangle is also an Image whose bounds are the rectangle itself. At
+// A Rectangle is also an [Image] whose bounds are the rectangle itself. At
 // returns color.Opaque for points in the rectangle and color.Transparent
 // otherwise.
 type Rectangle struct {
@@ -238,7 +238,7 @@
 	return r
 }
 
-// At implements the Image interface.
+// At implements the [Image] interface.
 func (r Rectangle) At(x, y int) color.Color {
 	if (Point{x, y}).In(r) {
 		return color.Opaque
@@ -246,7 +246,7 @@
 	return color.Transparent
 }
 
-// RGBA64At implements the RGBA64Image interface.
+// RGBA64At implements the [RGBA64Image] interface.
 func (r Rectangle) RGBA64At(x, y int) color.RGBA64 {
 	if (Point{x, y}).In(r) {
 		return color.RGBA64{0xffff, 0xffff, 0xffff, 0xffff}
@@ -254,22 +254,22 @@
 	return color.RGBA64{}
 }
 
-// Bounds implements the Image interface.
+// Bounds implements the [Image] interface.
 func (r Rectangle) Bounds() Rectangle {
 	return r
 }
 
-// ColorModel implements the Image interface.
+// ColorModel implements the [Image] interface.
 func (r Rectangle) ColorModel() color.Model {
 	return color.Alpha16Model
 }
 
-// ZR is the zero Rectangle.
+// ZR is the zero [Rectangle].
 //
-// Deprecated: Use a literal image.Rectangle{} instead.
+// Deprecated: Use a literal [image.Rectangle] instead.
 var ZR Rectangle
 
-// Rect is shorthand for Rectangle{Pt(x0, y0), Pt(x1, y1)}. The returned
+// Rect is shorthand for [Rectangle]{Pt(x0, y0), [Pt](x1, y1)}. The returned
 // rectangle has minimum and maximum coordinates swapped if necessary so that
 // it is well-formed.
 func Rect(x0, y0, x1, y1 int) Rectangle {
diff --git a/src/image/gif/reader.go b/src/image/gif/reader.go
index 0867b10..b4ea4fd 100644
--- a/src/image/gif/reader.go
+++ b/src/image/gif/reader.go
@@ -156,7 +156,7 @@
 }
 
 // blockReader must implement io.Reader, but its Read shouldn't ever actually
-// be called in practice. The compress/lzw package will only call ReadByte.
+// be called in practice. The compress/lzw package will only call [blockReader.ReadByte].
 func (b *blockReader) Read(p []byte) (int, error) {
 	if len(p) == 0 || b.err != nil {
 		return 0, b.err
@@ -561,7 +561,7 @@
 }
 
 // Decode reads a GIF image from r and returns the first embedded
-// image as an image.Image.
+// image as an [image.Image].
 func Decode(r io.Reader) (image.Image, error) {
 	var d decoder
 	if err := d.decode(r, false, false); err != nil {
diff --git a/src/image/gif/reader_test.go b/src/image/gif/reader_test.go
index a7f943a..f90ebc5 100644
--- a/src/image/gif/reader_test.go
+++ b/src/image/gif/reader_test.go
@@ -28,7 +28,7 @@
 	trailerStr = "\x3b"
 )
 
-// lzw.NewReader wants a io.ByteReader, this ensures we're compatible.
+// lzw.NewReader wants an io.ByteReader, this ensures we're compatible.
 var _ io.ByteReader = (*blockReader)(nil)
 
 // lzwEncode returns an LZW encoding (with 2-bit literals) of in.
diff --git a/src/image/image.go b/src/image/image.go
index dfb70d4..f08182b 100644
--- a/src/image/image.go
+++ b/src/image/image.go
@@ -4,11 +4,11 @@
 
 // Package image implements a basic 2-D image library.
 //
-// The fundamental interface is called Image. An Image contains colors, which
+// The fundamental interface is called [Image]. An [Image] contains colors, which
 // are described in the image/color package.
 //
-// Values of the Image interface are created either by calling functions such
-// as NewRGBA and NewPaletted, or by calling Decode on an io.Reader containing
+// Values of the [Image] interface are created either by calling functions such
+// as [NewRGBA] and [NewPaletted], or by calling [Decode] on an [io.Reader] containing
 // image data in a format such as GIF, JPEG or PNG. Decoding any particular
 // image format requires the prior registration of a decoder function.
 // Registration is typically automatic as a side effect of initializing that
@@ -21,6 +21,20 @@
 //
 // See "The Go image package" for more details:
 // https://golang.org/doc/articles/image_package.html
+//
+// # Security Considerations
+//
+// The image package can be used to parse arbitrarily large images, which can
+// cause resource exhaustion on machines which do not have enough memory to
+// store them. When operating on arbitrary images, [DecodeConfig] should be called
+// before [Decode], so that the program can decide whether the image, as defined
+// in the returned header, can be safely decoded with the available resources. A
+// call to [Decode] which produces an extremely large image, as defined in the
+// header returned by [DecodeConfig], is not considered a security issue,
+// regardless of whether the image is itself malformed or not. A call to
+// [DecodeConfig] which returns a header which does not match the image returned
+// by [Decode] may be considered a security issue, and should be reported per the
+// [Go Security Policy](https://go.dev/security/policy).
 package image
 
 import (
@@ -33,7 +47,7 @@
 	Width, Height int
 }
 
-// Image is a finite rectangular grid of color.Color values taken from a color
+// Image is a finite rectangular grid of [color.Color] values taken from a color
 // model.
 type Image interface {
 	// ColorModel returns the Image's color model.
@@ -47,7 +61,7 @@
 	At(x, y int) color.Color
 }
 
-// RGBA64Image is an Image whose pixels can be converted directly to a
+// RGBA64Image is an [Image] whose pixels can be converted directly to a
 // color.RGBA64.
 type RGBA64Image interface {
 	// RGBA64At returns the RGBA64 color of the pixel at (x, y). It is
@@ -59,7 +73,7 @@
 }
 
 // PalettedImage is an image whose colors may come from a limited palette.
-// If m is a PalettedImage and m.ColorModel() returns a color.Palette p,
+// If m is a PalettedImage and m.ColorModel() returns a [color.Palette] p,
 // then m.At(x, y) should be equivalent to p[m.ColorIndexAt(x, y)]. If m's
 // color model is not a color.Palette, then ColorIndexAt's behavior is
 // undefined.
@@ -84,7 +98,7 @@
 	return totalLength
 }
 
-// RGBA is an in-memory image whose At method returns color.RGBA values.
+// RGBA is an in-memory image whose At method returns [color.RGBA] values.
 type RGBA struct {
 	// Pix holds the image's pixels, in R, G, B, A order. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
@@ -209,7 +223,7 @@
 	return true
 }
 
-// NewRGBA returns a new RGBA image with the given bounds.
+// NewRGBA returns a new [RGBA] image with the given bounds.
 func NewRGBA(r Rectangle) *RGBA {
 	return &RGBA{
 		Pix:    make([]uint8, pixelBufferLength(4, r, "RGBA")),
@@ -218,7 +232,7 @@
 	}
 }
 
-// RGBA64 is an in-memory image whose At method returns color.RGBA64 values.
+// RGBA64 is an in-memory image whose At method returns [color.RGBA64] values.
 type RGBA64 struct {
 	// Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
@@ -326,7 +340,7 @@
 	return true
 }
 
-// NewRGBA64 returns a new RGBA64 image with the given bounds.
+// NewRGBA64 returns a new [RGBA64] image with the given bounds.
 func NewRGBA64(r Rectangle) *RGBA64 {
 	return &RGBA64{
 		Pix:    make([]uint8, pixelBufferLength(8, r, "RGBA64")),
@@ -335,7 +349,7 @@
 	}
 }
 
-// NRGBA is an in-memory image whose At method returns color.NRGBA values.
+// NRGBA is an in-memory image whose At method returns [color.NRGBA] values.
 type NRGBA struct {
 	// Pix holds the image's pixels, in R, G, B, A order. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
@@ -453,7 +467,7 @@
 	return true
 }
 
-// NewNRGBA returns a new NRGBA image with the given bounds.
+// NewNRGBA returns a new [NRGBA] image with the given bounds.
 func NewNRGBA(r Rectangle) *NRGBA {
 	return &NRGBA{
 		Pix:    make([]uint8, pixelBufferLength(4, r, "NRGBA")),
@@ -462,7 +476,7 @@
 	}
 }
 
-// NRGBA64 is an in-memory image whose At method returns color.NRGBA64 values.
+// NRGBA64 is an in-memory image whose At method returns [color.NRGBA64] values.
 type NRGBA64 struct {
 	// Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
@@ -597,7 +611,7 @@
 	return true
 }
 
-// NewNRGBA64 returns a new NRGBA64 image with the given bounds.
+// NewNRGBA64 returns a new [NRGBA64] image with the given bounds.
 func NewNRGBA64(r Rectangle) *NRGBA64 {
 	return &NRGBA64{
 		Pix:    make([]uint8, pixelBufferLength(8, r, "NRGBA64")),
@@ -606,7 +620,7 @@
 	}
 }
 
-// Alpha is an in-memory image whose At method returns color.Alpha values.
+// Alpha is an in-memory image whose At method returns [color.Alpha] values.
 type Alpha struct {
 	// Pix holds the image's pixels, as alpha values. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
@@ -705,7 +719,7 @@
 	return true
 }
 
-// NewAlpha returns a new Alpha image with the given bounds.
+// NewAlpha returns a new [Alpha] image with the given bounds.
 func NewAlpha(r Rectangle) *Alpha {
 	return &Alpha{
 		Pix:    make([]uint8, pixelBufferLength(1, r, "Alpha")),
@@ -714,7 +728,7 @@
 	}
 }
 
-// Alpha16 is an in-memory image whose At method returns color.Alpha16 values.
+// Alpha16 is an in-memory image whose At method returns [color.Alpha16] values.
 type Alpha16 struct {
 	// Pix holds the image's pixels, as alpha values in big-endian format. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
@@ -816,7 +830,7 @@
 	return true
 }
 
-// NewAlpha16 returns a new Alpha16 image with the given bounds.
+// NewAlpha16 returns a new [Alpha16] image with the given bounds.
 func NewAlpha16(r Rectangle) *Alpha16 {
 	return &Alpha16{
 		Pix:    make([]uint8, pixelBufferLength(2, r, "Alpha16")),
@@ -825,7 +839,7 @@
 	}
 }
 
-// Gray is an in-memory image whose At method returns color.Gray values.
+// Gray is an in-memory image whose At method returns [color.Gray] values.
 type Gray struct {
 	// Pix holds the image's pixels, as gray values. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
@@ -913,7 +927,7 @@
 	return true
 }
 
-// NewGray returns a new Gray image with the given bounds.
+// NewGray returns a new [Gray] image with the given bounds.
 func NewGray(r Rectangle) *Gray {
 	return &Gray{
 		Pix:    make([]uint8, pixelBufferLength(1, r, "Gray")),
@@ -922,7 +936,7 @@
 	}
 }
 
-// Gray16 is an in-memory image whose At method returns color.Gray16 values.
+// Gray16 is an in-memory image whose At method returns [color.Gray16] values.
 type Gray16 struct {
 	// Pix holds the image's pixels, as gray values in big-endian format. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
@@ -1013,7 +1027,7 @@
 	return true
 }
 
-// NewGray16 returns a new Gray16 image with the given bounds.
+// NewGray16 returns a new [Gray16] image with the given bounds.
 func NewGray16(r Rectangle) *Gray16 {
 	return &Gray16{
 		Pix:    make([]uint8, pixelBufferLength(2, r, "Gray16")),
@@ -1022,7 +1036,7 @@
 	}
 }
 
-// CMYK is an in-memory image whose At method returns color.CMYK values.
+// CMYK is an in-memory image whose At method returns [color.CMYK] values.
 type CMYK struct {
 	// Pix holds the image's pixels, in C, M, Y, K order. The pixel at
 	// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
@@ -1261,7 +1275,7 @@
 	return true
 }
 
-// NewPaletted returns a new Paletted image with the given width, height and
+// NewPaletted returns a new [Paletted] image with the given width, height and
 // palette.
 func NewPaletted(r Rectangle, p color.Palette) *Paletted {
 	return &Paletted{
diff --git a/src/image/jpeg/fuzz_test.go b/src/image/jpeg/fuzz_test.go
index bd534a9..91a9914 100644
--- a/src/image/jpeg/fuzz_test.go
+++ b/src/image/jpeg/fuzz_test.go
@@ -49,16 +49,18 @@
 			var w bytes.Buffer
 			err := Encode(&w, img, &Options{Quality: q})
 			if err != nil {
-				t.Fatalf("failed to encode valid image: %s", err)
+				t.Errorf("failed to encode valid image: %s", err)
+				continue
 			}
 			img1, err := Decode(&w)
 			if err != nil {
-				t.Fatalf("failed to decode roundtripped image: %s", err)
+				t.Errorf("failed to decode roundtripped image: %s", err)
+				continue
 			}
 			got := img1.Bounds()
 			want := img.Bounds()
 			if !got.Eq(want) {
-				t.Fatalf("roundtripped image bounds have changed, got: %s, want: %s", got, want)
+				t.Errorf("roundtripped image bounds have changed, got: %s, want: %s", got, want)
 			}
 		}
 	})
diff --git a/src/image/jpeg/reader.go b/src/image/jpeg/reader.go
index 61f2b40..5aa51ad 100644
--- a/src/image/jpeg/reader.go
+++ b/src/image/jpeg/reader.go
@@ -86,7 +86,7 @@
 	53, 60, 61, 54, 47, 55, 62, 63,
 }
 
-// Deprecated: Reader is not used by the image/jpeg package and should
+// Deprecated: Reader is not used by the [image/jpeg] package and should
 // not be used by others. It is kept for compatibility.
 type Reader interface {
 	io.ByteReader
@@ -767,7 +767,7 @@
 	return img, nil
 }
 
-// Decode reads a JPEG image from r and returns it as an image.Image.
+// Decode reads a JPEG image from r and returns it as an [image.Image].
 func Decode(r io.Reader) (image.Image, error) {
 	var d decoder
 	return d.decode(r, false)
diff --git a/src/image/jpeg/writer.go b/src/image/jpeg/writer.go
index 0027f78..87c109a 100644
--- a/src/image/jpeg/writer.go
+++ b/src/image/jpeg/writer.go
@@ -12,14 +12,6 @@
 	"io"
 )
 
-// min returns the minimum of two integers.
-func min(x, y int) int {
-	if x < y {
-		return x
-	}
-	return y
-}
-
 // div returns a/b rounded to the nearest integer, instead of rounded to zero.
 func div(a, b int32) int32 {
 	if a >= 0 {
@@ -571,7 +563,7 @@
 }
 
 // Encode writes the Image m to w in JPEG 4:2:0 baseline format with the given
-// options. Default parameters are used if a nil *Options is passed.
+// options. Default parameters are used if a nil *[Options] is passed.
 func Encode(w io.Writer, m image.Image, o *Options) error {
 	b := m.Bounds()
 	if b.Dx() >= 1<<16 || b.Dy() >= 1<<16 {
diff --git a/src/image/names.go b/src/image/names.go
index 17b0658..a2968fa 100644
--- a/src/image/names.go
+++ b/src/image/names.go
@@ -19,8 +19,8 @@
 	Opaque = NewUniform(color.Opaque)
 )
 
-// Uniform is an infinite-sized Image of uniform color.
-// It implements the color.Color, color.Model, and Image interfaces.
+// Uniform is an infinite-sized [Image] of uniform color.
+// It implements the [color.Color], [color.Model], and [Image] interfaces.
 type Uniform struct {
 	C color.Color
 }
@@ -52,7 +52,7 @@
 	return a == 0xffff
 }
 
-// NewUniform returns a new Uniform image of the given color.
+// NewUniform returns a new [Uniform] image of the given color.
 func NewUniform(c color.Color) *Uniform {
 	return &Uniform{c}
 }
diff --git a/src/image/png/fuzz_test.go b/src/image/png/fuzz_test.go
index 4b63945..ea4bf4e 100644
--- a/src/image/png/fuzz_test.go
+++ b/src/image/png/fuzz_test.go
@@ -56,16 +56,18 @@
 			e := &Encoder{CompressionLevel: l}
 			err = e.Encode(&w, img)
 			if err != nil {
-				t.Fatalf("failed to encode valid image: %s", err)
+				t.Errorf("failed to encode valid image: %s", err)
+				continue
 			}
 			img1, err := Decode(&w)
 			if err != nil {
-				t.Fatalf("failed to decode roundtripped image: %s", err)
+				t.Errorf("failed to decode roundtripped image: %s", err)
+				continue
 			}
 			got := img1.Bounds()
 			want := img.Bounds()
 			if !got.Eq(want) {
-				t.Fatalf("roundtripped image bounds have changed, got: %s, want: %s", got, want)
+				t.Errorf("roundtripped image bounds have changed, got: %s, want: %s", got, want)
 			}
 		}
 	})
diff --git a/src/image/png/reader.go b/src/image/png/reader.go
index 3a71734..020e91c 100644
--- a/src/image/png/reader.go
+++ b/src/image/png/reader.go
@@ -136,13 +136,6 @@
 
 func (e UnsupportedError) Error() string { return "png: unsupported feature: " + string(e) }
 
-func min(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
 func (d *decoder) parseIHDR(length uint32) error {
 	if length != 13 {
 		return FormatError("bad IHDR length")
@@ -971,7 +964,7 @@
 	return nil
 }
 
-// Decode reads a PNG image from r and returns it as an image.Image.
+// Decode reads a PNG image from r and returns it as an [image.Image].
 // The type of Image returned depends on the PNG contents.
 func Decode(r io.Reader) (image.Image, error) {
 	d := &decoder{
diff --git a/src/image/png/writer.go b/src/image/png/writer.go
index 0d747da..9f92ad3 100644
--- a/src/image/png/writer.go
+++ b/src/image/png/writer.go
@@ -25,7 +25,7 @@
 }
 
 // EncoderBufferPool is an interface for getting and returning temporary
-// instances of the EncoderBuffer struct. This can be used to reuse buffers
+// instances of the [EncoderBuffer] struct. This can be used to reuse buffers
 // when encoding multiple images.
 type EncoderBufferPool interface {
 	Get() *EncoderBuffer
@@ -190,7 +190,7 @@
 
 // An encoder is an io.Writer that satisfies writes by writing PNG IDAT chunks,
 // including an 8-byte header and 4-byte CRC checksum per Write call. Such calls
-// should be relatively infrequent, since writeIDATs uses a bufio.Writer.
+// should be relatively infrequent, since writeIDATs uses a [bufio.Writer].
 //
 // This method should only be called from writeIDATs (via writeImage).
 // No other code should treat an encoder as an io.Writer.
@@ -586,7 +586,7 @@
 func (e *encoder) writeIEND() { e.writeChunk(nil, "IEND") }
 
 // Encode writes the Image m to w in PNG format. Any Image may be
-// encoded, but images that are not image.NRGBA might be encoded lossily.
+// encoded, but images that are not [image.NRGBA] might be encoded lossily.
 func Encode(w io.Writer, m image.Image) error {
 	var e Encoder
 	return e.Encode(w, m)
diff --git a/src/image/ycbcr.go b/src/image/ycbcr.go
index 78f5ebe..5433311 100644
--- a/src/image/ycbcr.go
+++ b/src/image/ycbcr.go
@@ -294,7 +294,7 @@
 	return true
 }
 
-// NewNYCbCrA returns a new NYCbCrA image with the given bounds and subsample
+// NewNYCbCrA returns a new [NYCbCrA] image with the given bounds and subsample
 // ratio.
 func NewNYCbCrA(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA {
 	w, h, cw, ch := yCbCrSize(r, subsampleRatio)
diff --git a/src/index/suffixarray/sais.go b/src/index/suffixarray/sais.go
index 3283aa3..b53700b 100644
--- a/src/index/suffixarray/sais.go
+++ b/src/index/suffixarray/sais.go
@@ -141,7 +141,7 @@
 // then the algorithm runs a little faster.
 // If sais_8_32 modifies tmp, it sets tmp[0] = -1 on return.
 func sais_8_32(text []byte, textMax int, sa, tmp []int32) {
-	if len(sa) != len(text) || len(tmp) < int(textMax) {
+	if len(sa) != len(text) || len(tmp) < textMax {
 		panic("suffixarray: misuse of sais_8_32")
 	}
 
diff --git a/src/index/suffixarray/suffixarray.go b/src/index/suffixarray/suffixarray.go
index 7fca0fd..5c222a1 100644
--- a/src/index/suffixarray/suffixarray.go
+++ b/src/index/suffixarray/suffixarray.go
@@ -70,8 +70,8 @@
 	return ints{nil, a.int64[i:j]}
 }
 
-// New creates a new Index for data.
-// Index creation time is O(N) for N = len(data).
+// New creates a new [Index] for data.
+// [Index] creation time is O(N) for N = len(data).
 func New(data []byte) *Index {
 	ix := &Index{data: data}
 	if len(data) <= maxData32 {
diff --git a/src/internal/abi/abi_loong64.go b/src/internal/abi/abi_loong64.go
new file mode 100644
index 0000000..c2306ae
--- /dev/null
+++ b/src/internal/abi/abi_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.regabiargs
+
+package abi
+
+const (
+	// See abi_generic.go.
+
+	// R4 - R19
+	IntArgRegs = 16
+
+	// F0 - F15
+	FloatArgRegs = 16
+
+	EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/abi/abi_test.go b/src/internal/abi/abi_test.go
index 44b9e78..e230c88 100644
--- a/src/internal/abi/abi_test.go
+++ b/src/internal/abi/abi_test.go
@@ -46,7 +46,7 @@
 	testenv.WriteImportcfg(t, importcfgfile, nil, "internal/abi")
 
 	// parse assembly code for symabi.
-	cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-gensymabis", "-o", symabi, asmSrc)
+	cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-p=p", "-gensymabis", "-o", symabi, asmSrc)
 	out, err := cmd.CombinedOutput()
 	if err != nil {
 		t.Fatalf("go tool asm -gensymabis failed: %v\n%s", err, out)
diff --git a/src/internal/abi/compiletype.go b/src/internal/abi/compiletype.go
index d92adde..6f1a2d6 100644
--- a/src/internal/abi/compiletype.go
+++ b/src/internal/abi/compiletype.go
@@ -21,147 +21,5 @@
 // This exported function is in an internal package, so it may change to depend on ptrSize in the future.
 func UncommonSize() uint64 { return 4 + 2 + 2 + 4 + 4 }
 
-// IMethodSize returns sizeof(IMethod) for a compilation target with a given ptrSize
-func IMethodSize(ptrSize int) int { return 4 + 4 }
-
-// KindOff returns the offset of Type.Kind_ for a compilation target with a given ptrSize
-func KindOff(ptrSize int) int { return 2*ptrSize + 7 }
-
-// SizeOff returns the offset of Type.Size_ for a compilation target with a given ptrSize
-func SizeOff(ptrSize int) int { return 0 }
-
-// PtrBytes returns the offset of Type.PtrBytes for a compilation target with a given ptrSize
-func PtrBytesOff(ptrSize int) int { return ptrSize }
-
 // TFlagOff returns the offset of Type.TFlag for a compilation target with a given ptrSize
 func TFlagOff(ptrSize int) int { return 2*ptrSize + 4 }
-
-// Offset is for computing offsets of type data structures at compile/link time;
-// the target platform may not be the host platform.  Its state includes the
-// current offset, necessary alignment for the sequence of types, and the size
-// of pointers and alignment of slices, interfaces, and strings (this is for tearing-
-// resistant access to these types, if/when that is supported).
-type Offset struct {
-	off        uint64 // the current offset
-	align      uint8  // the required alignmentof the container
-	ptrSize    uint8  // the size of a pointer in bytes
-	sliceAlign uint8  // the alignment of slices (and interfaces and strings)
-}
-
-// NewOffset returns a new Offset with offset 0 and alignment 1.
-func NewOffset(ptrSize uint8, twoWordAlignSlices bool) Offset {
-	if twoWordAlignSlices {
-		return Offset{off: 0, align: 1, ptrSize: ptrSize, sliceAlign: 2 * ptrSize}
-	}
-	return Offset{off: 0, align: 1, ptrSize: ptrSize, sliceAlign: ptrSize}
-}
-
-func assertIsAPowerOfTwo(x uint8) {
-	if x == 0 {
-		panic("Zero is not a power of two")
-	}
-	if x&-x == x {
-		return
-	}
-	panic("Not a power of two")
-}
-
-// InitializedOffset returns a new Offset with specified offset, alignment, pointer size, and slice alignment.
-func InitializedOffset(off int, align uint8, ptrSize uint8, twoWordAlignSlices bool) Offset {
-	assertIsAPowerOfTwo(align)
-	o0 := NewOffset(ptrSize, twoWordAlignSlices)
-	o0.off = uint64(off)
-	o0.align = align
-	return o0
-}
-
-func (o Offset) align_(a uint8) Offset {
-	o.off = (o.off + uint64(a) - 1) & ^(uint64(a) - 1)
-	if o.align < a {
-		o.align = a
-	}
-	return o
-}
-
-// Align returns the offset obtained by aligning offset to a multiple of a.
-// a must be a power of two.
-func (o Offset) Align(a uint8) Offset {
-	assertIsAPowerOfTwo(a)
-	return o.align_(a)
-}
-
-// plus returns the offset obtained by appending a power-of-2-sized-and-aligned object to o.
-func (o Offset) plus(x uint64) Offset {
-	o = o.align_(uint8(x))
-	o.off += x
-	return o
-}
-
-// D8 returns the offset obtained by appending an 8-bit field to o.
-func (o Offset) D8() Offset {
-	return o.plus(1)
-}
-
-// D16 returns the offset obtained by appending a 16-bit field to o.
-func (o Offset) D16() Offset {
-	return o.plus(2)
-}
-
-// D32 returns the offset obtained by appending a 32-bit field to o.
-func (o Offset) D32() Offset {
-	return o.plus(4)
-}
-
-// D64 returns the offset obtained by appending a 64-bit field to o.
-func (o Offset) D64() Offset {
-	return o.plus(8)
-}
-
-// D64 returns the offset obtained by appending a pointer field to o.
-func (o Offset) P() Offset {
-	if o.ptrSize == 0 {
-		panic("This offset has no defined pointer size")
-	}
-	return o.plus(uint64(o.ptrSize))
-}
-
-// Slice returns the offset obtained by appending a slice field to o.
-func (o Offset) Slice() Offset {
-	o = o.align_(o.sliceAlign)
-	o.off += 3 * uint64(o.ptrSize)
-	// There's been discussion of whether slices should be 2-word aligned to allow
-	// use of aligned 2-word load/store to prevent tearing, this is future proofing.
-	// In general, for purposes of struct layout (and very likely default C layout
-	// compatibility) the "size" of a Go type is rounded up to its alignment.
-	return o.Align(o.sliceAlign)
-}
-
-// String returns the offset obtained by appending a string field to o.
-func (o Offset) String() Offset {
-	o = o.align_(o.sliceAlign)
-	o.off += 2 * uint64(o.ptrSize)
-	return o // We "know" it needs no further alignment
-}
-
-// Interface returns the offset obtained by appending an interface field to o.
-func (o Offset) Interface() Offset {
-	o = o.align_(o.sliceAlign)
-	o.off += 2 * uint64(o.ptrSize)
-	return o // We "know" it needs no further alignment
-}
-
-// Offset returns the struct-aligned offset (size) of o.
-// This is at least as large as the current internal offset; it may be larger.
-func (o Offset) Offset() uint64 {
-	return o.Align(o.align).off
-}
-
-func (o Offset) PlusUncommon() Offset {
-	o.off += UncommonSize()
-	return o
-}
-
-// CommonOffset returns the Offset to the data after the common portion of type data structures.
-func CommonOffset(ptrSize int, twoWordAlignSlices bool) Offset {
-	return InitializedOffset(CommonSize(ptrSize), uint8(ptrSize), uint8(ptrSize), twoWordAlignSlices)
-}
diff --git a/src/internal/abi/map.go b/src/internal/abi/map.go
index e5b0a0b..ad054e7 100644
--- a/src/internal/abi/map.go
+++ b/src/internal/abi/map.go
@@ -12,3 +12,6 @@
 	MapMaxKeyBytes     = 128 // Must fit in a uint8.
 	MapMaxElemBytes    = 128 // Must fit in a uint8.
 )
+
+// ZeroValSize is the size in bytes of runtime.zeroVal.
+const ZeroValSize = 1024
diff --git a/src/internal/abi/switch.go b/src/internal/abi/switch.go
new file mode 100644
index 0000000..9669fe5
--- /dev/null
+++ b/src/internal/abi/switch.go
@@ -0,0 +1,61 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+type InterfaceSwitch struct {
+	Cache  *InterfaceSwitchCache
+	NCases int
+
+	// Array of NCases elements.
+	// Each case must be a non-empty interface type.
+	Cases [1]*InterfaceType
+}
+
+type InterfaceSwitchCache struct {
+	Mask    uintptr                      // mask for index. Must be a power of 2 minus 1
+	Entries [1]InterfaceSwitchCacheEntry // Mask+1 entries total
+}
+
+type InterfaceSwitchCacheEntry struct {
+	// type of source value (a *Type)
+	Typ uintptr
+	// case # to dispatch to
+	Case int
+	// itab to use for resulting case variable (a *runtime.itab)
+	Itab uintptr
+}
+
+const go122InterfaceSwitchCache = true
+
+func UseInterfaceSwitchCache(goarch string) bool {
+	if !go122InterfaceSwitchCache {
+		return false
+	}
+	// We need an atomic load instruction to make the cache multithreaded-safe.
+	// (AtomicLoadPtr needs to be implemented in cmd/compile/internal/ssa/_gen/ARCH.rules.)
+	switch goarch {
+	case "amd64", "arm64", "loong64", "mips", "mipsle", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x":
+		return true
+	default:
+		return false
+	}
+}
+
+type TypeAssert struct {
+	Cache   *TypeAssertCache
+	Inter   *InterfaceType
+	CanFail bool
+}
+type TypeAssertCache struct {
+	Mask    uintptr
+	Entries [1]TypeAssertCacheEntry
+}
+type TypeAssertCacheEntry struct {
+	// type of source value (a *runtime._type)
+	Typ uintptr
+	// itab to use for result (a *runtime.itab)
+	// nil if CanFail is set and conversion would fail.
+	Itab uintptr
+}
diff --git a/src/internal/abi/symtab.go b/src/internal/abi/symtab.go
index bf6ea82..ce1b650 100644
--- a/src/internal/abi/symtab.go
+++ b/src/internal/abi/symtab.go
@@ -44,6 +44,7 @@
 	FuncID_asmcgocall
 	FuncID_asyncPreempt
 	FuncID_cgocallback
+	FuncID_corostart
 	FuncID_debugCallV2
 	FuncID_gcBgMarkWorker
 	FuncID_goexit
diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go
index 4794f5a..659fb7b 100644
--- a/src/internal/abi/type.go
+++ b/src/internal/abi/type.go
@@ -10,13 +10,13 @@
 
 // Type is the runtime representation of a Go type.
 //
-// Type is also referenced implicitly
-// (in the form of expressions involving constants and arch.PtrSize)
-// in cmd/compile/internal/reflectdata/reflect.go
-// and cmd/link/internal/ld/decodesym.go
-// (e.g. data[2*arch.PtrSize+4] references the TFlag field)
-// unsafe.OffsetOf(Type{}.TFlag) cannot be used directly in those
-// places because it varies with cross compilation and experiments.
+// Be careful about accessing this type at build time, as the version
+// of this type in the compiler/linker may not have the same layout
+// as the version in the target binary, due to pointer width
+// differences and any experiments. Use cmd/compile/internal/rttype
+// or the functions in compiletype.go to access this type instead.
+// (TODO: this admonition applies to every type in this package.
+// Put it in some shared location?)
 type Type struct {
 	Size_       uintptr
 	PtrBytes    uintptr // number of (prefix) bytes in the type that can contain pointers
@@ -111,6 +111,12 @@
 	// TFlagRegularMemory means that equal and hash functions can treat
 	// this type as a single region of t.size bytes.
 	TFlagRegularMemory TFlag = 1 << 3
+
+	// TFlagUnrolledBitmap marks special types that are unrolled-bitmap
+	// versions of types with GC programs.
+	// These types need to be deallocated when the underlying object
+	// is freed.
+	TFlagUnrolledBitmap TFlag = 1 << 4
 )
 
 // NameOff is the offset to a name from moduledata.types.  See resolveNameOff in runtime.
@@ -179,7 +185,7 @@
 }
 
 func (t *Type) GcSlice(begin, end uintptr) []byte {
-	return unsafeSliceFor(t.GCData, int(end))[begin:]
+	return unsafe.Slice(t.GCData, int(end))[begin:]
 }
 
 // Method on non-interface type
@@ -660,7 +666,7 @@
 		return ""
 	}
 	i, l := n.ReadVarint(1)
-	return unsafeStringFor(n.DataChecked(1+i, "non-empty string"), l)
+	return unsafe.String(n.DataChecked(1+i, "non-empty string"), l)
 }
 
 // Tag returns the tag string for n, or empty if there is none.
@@ -670,7 +676,7 @@
 	}
 	i, l := n.ReadVarint(1)
 	i2, l2 := n.ReadVarint(1 + i + l)
-	return unsafeStringFor(n.DataChecked(1+i+l+i2, "non-empty string"), l2)
+	return unsafe.String(n.DataChecked(1+i+l+i2, "non-empty string"), l2)
 }
 
 func NewName(n, tag string, exported, embedded bool) Name {
diff --git a/src/internal/abi/unsafestring_go119.go b/src/internal/abi/unsafestring_go119.go
deleted file mode 100644
index a710384..0000000
--- a/src/internal/abi/unsafestring_go119.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.20
-// +build !go1.20
-
-package abi
-
-import "unsafe"
-
-type (
-	stringHeader struct {
-		Data *byte
-		Len  int
-	}
-	sliceHeader struct {
-		Data *byte
-		Len  int
-		Cap  int
-	}
-)
-
-func unsafeStringFor(b *byte, l int) string {
-	h := stringHeader{Data: b, Len: l}
-	return *(*string)(unsafe.Pointer(&h))
-}
-
-func unsafeSliceFor(b *byte, l int) []byte {
-	h := sliceHeader{Data: b, Len: l, Cap: l}
-	return *(*[]byte)(unsafe.Pointer(&h))
-}
diff --git a/src/internal/abi/unsafestring_go120.go b/src/internal/abi/unsafestring_go120.go
deleted file mode 100644
index 93ff8ea..0000000
--- a/src/internal/abi/unsafestring_go120.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-// +build go1.20
-
-package abi
-
-import "unsafe"
-
-func unsafeStringFor(b *byte, l int) string {
-	return unsafe.String(b, l)
-}
-
-func unsafeSliceFor(b *byte, l int) []byte {
-	return unsafe.Slice(b, l)
-}
diff --git a/src/internal/bisect/bisect.go b/src/internal/bisect/bisect.go
index 48c796e..3e5a684 100644
--- a/src/internal/bisect/bisect.go
+++ b/src/internal/bisect/bisect.go
@@ -66,7 +66,7 @@
 //
 //	func ShouldEnable(file string, line int) bool {
 //		if m == nil {
-//			return false
+//			return true
 //		}
 //		h := bisect.Hash(file, line)
 //		if m.ShouldPrint(h) {
@@ -83,12 +83,12 @@
 //
 //	func ShouldEnable(file string, line int) bool {
 //		if m == nil {
-//			return false
+//			return true
 //		}
 //		h := bisect.Hash(file, line)
 //		if m.ShouldPrint(h) {
 //			if m.MarkerOnly() {
-//				bisect.PrintMarker(os.Stderr)
+//				bisect.PrintMarker(os.Stderr, h)
 //			} else {
 //				fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line)
 //			}
@@ -482,7 +482,6 @@
 		}
 	}
 	return m.ShouldEnable(h)
-
 }
 
 // Writer is the same interface as io.Writer.
@@ -495,7 +494,7 @@
 // It is appropriate to use when [Matcher.ShouldPrint] and [Matcher.MarkerOnly] both return true.
 func PrintMarker(w Writer, h uint64) error {
 	var buf [50]byte
-	b := AppendMarker(buf[:], h)
+	b := AppendMarker(buf[:0], h)
 	b = append(b, '\n')
 	_, err := w.Write(b)
 	return err
@@ -728,7 +727,7 @@
 
 func fnvUint64(h uint64, x uint64) uint64 {
 	for i := 0; i < 8; i++ {
-		h ^= uint64(x & 0xFF)
+		h ^= x & 0xFF
 		x >>= 8
 		h *= prime64
 	}
diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go
index b97b9c1..8b97a65 100644
--- a/src/internal/buildcfg/cfg.go
+++ b/src/internal/buildcfg/cfg.go
@@ -69,22 +69,61 @@
 	return int(defaultGOAMD64[len("v")] - '0')
 }
 
-func goarm() int {
+type goarmFeatures struct {
+	Version   int
+	SoftFloat bool
+}
+
+func (g goarmFeatures) String() string {
+	armStr := strconv.Itoa(g.Version)
+	if g.SoftFloat {
+		armStr += ",softfloat"
+	} else {
+		armStr += ",hardfloat"
+	}
+	return armStr
+}
+
+func goarm() (g goarmFeatures) {
+	const (
+		softFloatOpt = ",softfloat"
+		hardFloatOpt = ",hardfloat"
+	)
 	def := defaultGOARM
 	if GOOS == "android" && GOARCH == "arm" {
 		// Android arm devices always support GOARM=7.
 		def = "7"
 	}
-	switch v := envOr("GOARM", def); v {
-	case "5":
-		return 5
-	case "6":
-		return 6
-	case "7":
-		return 7
+	v := envOr("GOARM", def)
+
+	floatSpecified := false
+	if strings.HasSuffix(v, softFloatOpt) {
+		g.SoftFloat = true
+		floatSpecified = true
+		v = v[:len(v)-len(softFloatOpt)]
 	}
-	Error = fmt.Errorf("invalid GOARM: must be 5, 6, 7")
-	return int(def[0] - '0')
+	if strings.HasSuffix(v, hardFloatOpt) {
+		floatSpecified = true
+		v = v[:len(v)-len(hardFloatOpt)]
+	}
+
+	switch v {
+	case "5":
+		g.Version = 5
+	case "6":
+		g.Version = 6
+	case "7":
+		g.Version = 7
+	default:
+		Error = fmt.Errorf("invalid GOARM: must start with 5, 6, or 7, and may optionally end in either %q or %q", hardFloatOpt, softFloatOpt)
+		g.Version = int(def[0] - '0')
+	}
+
+	// 5 defaults to softfloat. 6 and 7 default to hardfloat.
+	if !floatSpecified && g.Version == 5 {
+		g.SoftFloat = true
+	}
+	return
 }
 
 func gomips() string {
@@ -182,7 +221,7 @@
 	case "amd64":
 		return "GOAMD64", fmt.Sprintf("v%d", GOAMD64)
 	case "arm":
-		return "GOARM", strconv.Itoa(GOARM)
+		return "GOARM", GOARM.String()
 	case "mips", "mipsle":
 		return "GOMIPS", GOMIPS
 	case "mips64", "mips64le":
@@ -207,7 +246,7 @@
 		return list
 	case "arm":
 		var list []string
-		for i := 5; i <= GOARM; i++ {
+		for i := 5; i <= GOARM.Version; i++ {
 			list = append(list, fmt.Sprintf("%s.%d", GOARCH, i))
 		}
 		return list
diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go
index 513070c..a45cfaf 100644
--- a/src/internal/buildcfg/exp.go
+++ b/src/internal/buildcfg/exp.go
@@ -65,12 +65,16 @@
 	case "amd64", "arm64", "ppc64le", "ppc64", "riscv64":
 		regabiAlwaysOn = true
 		regabiSupported = true
+	case "loong64":
+		regabiSupported = true
 	}
 
 	baseline := goexperiment.Flags{
 		RegabiWrappers:   regabiSupported,
 		RegabiArgs:       regabiSupported,
 		CoverageRedesign: true,
+		AllocHeaders:     true,
+		ExecTracer2:      true,
 	}
 
 	// Start with the statically enabled set of experiments.
@@ -129,7 +133,7 @@
 		flags.RegabiWrappers = true
 		flags.RegabiArgs = true
 	}
-	// regabi is only supported on amd64, arm64, riscv64, ppc64 and ppc64le.
+	// regabi is only supported on amd64, arm64, loong64, riscv64, ppc64 and ppc64le.
 	if !regabiSupported {
 		flags.RegabiWrappers = false
 		flags.RegabiArgs = false
diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go
index 28f2742..1103891 100644
--- a/src/internal/bytealg/bytealg.go
+++ b/src/internal/bytealg/bytealg.go
@@ -24,33 +24,12 @@
 // If MaxLen is not 0, make sure MaxLen >= 4.
 var MaxLen int
 
-// FIXME: the logic of HashStrBytes, HashStrRevBytes, IndexRabinKarpBytes and HashStr, HashStrRev,
-// IndexRabinKarp are exactly the same, except that the types are different. Can we eliminate
-// three of them without causing allocation?
-
 // PrimeRK is the prime base used in Rabin-Karp algorithm.
 const PrimeRK = 16777619
 
-// HashStrBytes returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func HashStrBytes(sep []byte) (uint32, uint32) {
-	hash := uint32(0)
-	for i := 0; i < len(sep); i++ {
-		hash = hash*PrimeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, PrimeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
-
 // HashStr returns the hash and the appropriate multiplicative
 // factor for use in Rabin-Karp algorithm.
-func HashStr(sep string) (uint32, uint32) {
+func HashStr[T string | []byte](sep T) (uint32, uint32) {
 	hash := uint32(0)
 	for i := 0; i < len(sep); i++ {
 		hash = hash*PrimeRK + uint32(sep[i])
@@ -65,26 +44,9 @@
 	return hash, pow
 }
 
-// HashStrRevBytes returns the hash of the reverse of sep and the
-// appropriate multiplicative factor for use in Rabin-Karp algorithm.
-func HashStrRevBytes(sep []byte) (uint32, uint32) {
-	hash := uint32(0)
-	for i := len(sep) - 1; i >= 0; i-- {
-		hash = hash*PrimeRK + uint32(sep[i])
-	}
-	var pow, sq uint32 = 1, PrimeRK
-	for i := len(sep); i > 0; i >>= 1 {
-		if i&1 != 0 {
-			pow *= sq
-		}
-		sq *= sq
-	}
-	return hash, pow
-}
-
 // HashStrRev returns the hash of the reverse of sep and the
 // appropriate multiplicative factor for use in Rabin-Karp algorithm.
-func HashStrRev(sep string) (uint32, uint32) {
+func HashStrRev[T string | []byte](sep T) (uint32, uint32) {
 	hash := uint32(0)
 	for i := len(sep) - 1; i >= 0; i-- {
 		hash = hash*PrimeRK + uint32(sep[i])
@@ -99,17 +61,17 @@
 	return hash, pow
 }
 
-// IndexRabinKarpBytes uses the Rabin-Karp search algorithm to return the index of the
-// first occurrence of substr in s, or -1 if not present.
-func IndexRabinKarpBytes(s, sep []byte) int {
+// IndexRabinKarp uses the Rabin-Karp search algorithm to return the index of the
+// first occurrence of sep in s, or -1 if not present.
+func IndexRabinKarp[T string | []byte](s, sep T) int {
 	// Rabin-Karp search
-	hashsep, pow := HashStrBytes(sep)
+	hashss, pow := HashStr(sep)
 	n := len(sep)
 	var h uint32
 	for i := 0; i < n; i++ {
 		h = h*PrimeRK + uint32(s[i])
 	}
-	if h == hashsep && Equal(s[:n], sep) {
+	if h == hashss && string(s[:n]) == string(sep) {
 		return 0
 	}
 	for i := n; i < len(s); {
@@ -117,33 +79,33 @@
 		h += uint32(s[i])
 		h -= pow * uint32(s[i-n])
 		i++
-		if h == hashsep && Equal(s[i-n:i], sep) {
+		if h == hashss && string(s[i-n:i]) == string(sep) {
 			return i - n
 		}
 	}
 	return -1
 }
 
-// IndexRabinKarp uses the Rabin-Karp search algorithm to return the index of the
-// first occurrence of substr in s, or -1 if not present.
-func IndexRabinKarp(s, substr string) int {
-	// Rabin-Karp search
-	hashss, pow := HashStr(substr)
-	n := len(substr)
+// LastIndexRabinKarp uses the Rabin-Karp search algorithm to return the last index of the
+// occurrence of sep in s, or -1 if not present.
+func LastIndexRabinKarp[T string | []byte](s, sep T) int {
+	// Rabin-Karp search from the end of the string
+	hashss, pow := HashStrRev(sep)
+	n := len(sep)
+	last := len(s) - n
 	var h uint32
-	for i := 0; i < n; i++ {
+	for i := len(s) - 1; i >= last; i-- {
 		h = h*PrimeRK + uint32(s[i])
 	}
-	if h == hashss && s[:n] == substr {
-		return 0
+	if h == hashss && string(s[last:]) == string(sep) {
+		return last
 	}
-	for i := n; i < len(s); {
+	for i := last - 1; i >= 0; i-- {
 		h *= PrimeRK
 		h += uint32(s[i])
-		h -= pow * uint32(s[i-n])
-		i++
-		if h == hashss && s[i-n:i] == substr {
-			return i - n
+		h -= pow * uint32(s[i+n])
+		if h == hashss && string(s[i:i+n]) == string(sep) {
+			return i
 		}
 	}
 	return -1
diff --git a/src/internal/bytealg/compare_loong64.s b/src/internal/bytealg/compare_loong64.s
index c89c5a9..311449a 100644
--- a/src/internal/bytealg/compare_loong64.s
+++ b/src/internal/bytealg/compare_loong64.s
@@ -5,83 +5,102 @@
 #include "go_asm.h"
 #include "textflag.h"
 
-TEXT ·Compare(SB),NOSPLIT,$0-56
-	MOVV	a_base+0(FP), R6
-	MOVV	b_base+24(FP), R7
-	MOVV	a_len+8(FP), R4
-	MOVV	b_len+32(FP), R5
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT,$0-56
+#ifndef GOEXPERIMENT_regabiargs
+	MOVV	a_base+0(FP), R4
+	MOVV	a_len+8(FP), R5
+	MOVV	b_base+24(FP), R6
+	MOVV	b_len+32(FP), R7
 	MOVV	$ret+48(FP), R13
+#else
+	// R4 = a_base
+	// R5 = a_len
+	// R6 = a_cap (unused)
+	// R7 = b_base (want in R6)
+	// R8 = b_len (want in R7)
+	// R9 = b_cap (unused)
+	MOVV	R7, R6
+	MOVV	R8, R7
+#endif
 	JMP	cmpbody<>(SB)
 
-TEXT runtime·cmpstring(SB),NOSPLIT,$0-40
-	MOVV	a_base+0(FP), R6
-	MOVV	b_base+16(FP), R7
-	MOVV	a_len+8(FP), R4
-	MOVV	b_len+24(FP), R5
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT,$0-40
+#ifndef GOEXPERIMENT_regabiargs
+	MOVV	a_base+0(FP), R4
+	MOVV	b_base+16(FP), R6
+	MOVV	a_len+8(FP), R5
+	MOVV	b_len+24(FP), R7
 	MOVV	$ret+32(FP), R13
+#endif
+	// R4 = a_base
+	// R5 = a_len
+	// R6 = b_base
+	// R7 = b_len
 	JMP	cmpbody<>(SB)
 
 // On entry:
-// R4 length of a
-// R5 length of b
-// R6 points to the start of a
-// R7 points to the start of b
+// R5 length of a
+// R7 length of b
+// R4 points to the start of a
+// R6 points to the start of b
 // R13 points to the return value (-1/0/1)
 TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0
-	BEQ	R6, R7, samebytes // same start of a and b
+	BEQ	R4, R6, samebytes // same start of a and b
 
-	SGTU	R4, R5, R9
+	SGTU	R5, R7, R9
 	BNE	R0, R9, r2_lt_r1
-	MOVV	R4, R14
+	MOVV	R5, R14
 	JMP	entry
 r2_lt_r1:
-	MOVV	R5, R14	// R14 is min(R4, R5)
+	MOVV	R7, R14	// R14 is min(R4, R5)
 entry:
-	ADDV	R6, R14, R12	// R6 start of a, R14 end of a
-	BEQ	R6, R12, samebytes // length is 0
+	ADDV	R4, R14, R12	// R6 start of a, R14 end of a
+	BEQ	R4, R12, samebytes // length is 0
 
 	SRLV	$4, R14		// R14 is number of chunks
 	BEQ	R0, R14, byte_loop
 
 	// make sure both a and b are aligned.
-	OR	R6, R7, R15
+	OR	R4, R6, R15
 	AND	$7, R15
 	BNE	R0, R15, byte_loop
 
 	PCALIGN	$16
 chunk16_loop:
 	BEQ	R0, R14, byte_loop
-	MOVV	(R6), R8
-	MOVV	(R7), R9
+	MOVV	(R4), R8
+	MOVV	(R6), R9
 	BNE	R8, R9, byte_loop
-	MOVV	8(R6), R16
-	MOVV	8(R7), R17
+	MOVV	8(R4), R16
+	MOVV	8(R6), R17
+	ADDV	$16, R4
 	ADDV	$16, R6
-	ADDV	$16, R7
 	SUBVU	$1, R14
 	BEQ	R16, R17, chunk16_loop
+	SUBV	$8, R4
 	SUBV	$8, R6
-	SUBV	$8, R7
 
 byte_loop:
-	BEQ	R6, R12, samebytes
-	MOVBU	(R6), R8
+	BEQ	R4, R12, samebytes
+	MOVBU	(R4), R8
+	ADDVU	$1, R4
+	MOVBU	(R6), R9
 	ADDVU	$1, R6
-	MOVBU	(R7), R9
-	ADDVU	$1, R7
 	BEQ	R8, R9, byte_loop
 
 byte_cmp:
-	SGTU	R8, R9, R12 // R12 = 1 if (R8 > R9)
-	BNE	R0, R12, ret
-	MOVV	$-1, R12
+	SGTU	R8, R9, R4 // R12 = 1 if (R8 > R9)
+	BNE	R0, R4, ret
+	MOVV	$-1, R4
 	JMP	ret
 
 samebytes:
-	SGTU	R4, R5, R8
-	SGTU	R5, R4, R9
-	SUBV	R9, R8, R12
+	SGTU	R5, R7, R8
+	SGTU	R7, R5, R9
+	SUBV	R9, R8, R4
 
 ret:
-	MOVV	R12, (R13)
+#ifndef GOEXPERIMENT_regabiargs
+	MOVV	R4, (R13)
+#endif
 	RET
diff --git a/src/internal/bytealg/compare_ppc64x.s b/src/internal/bytealg/compare_ppc64x.s
index 63c33ee..2629251 100644
--- a/src/internal/bytealg/compare_ppc64x.s
+++ b/src/internal/bytealg/compare_ppc64x.s
@@ -274,7 +274,16 @@
 	RET
 
 	PCALIGN $16
-cmp8:	// 8 - 15B
+cmp8:	// 8 - 15B (0 - 15B if GOPPC64_power10)
+#ifdef GOPPC64_power10
+	SLD	$56,R9,R9
+	LXVLL	R5,R9,V3	// Load bytes starting from MSB to LSB, unused are zero filled.
+	LXVLL	R6,R9,V4
+	VCMPUQ	V3,V4,CR0	// Compare as a 128b integer.
+	SETB_CR0(R6)
+	ISEL	CR0EQ,R3,R6,R3	// If equal, length determines the return value.
+	RET
+#else
 	CMP	R9,$8
 	BLT	cmp4
 	ANDCC	$7,R9,R9
@@ -330,3 +339,4 @@
 	SETB_CR0(R6)
 	ISEL	CR0EQ,R3,R6,R3
 	RET
+#endif
diff --git a/src/internal/bytealg/compare_riscv64.s b/src/internal/bytealg/compare_riscv64.s
index a4164a2..b1e1f7b 100644
--- a/src/internal/bytealg/compare_riscv64.s
+++ b/src/internal/bytealg/compare_riscv64.s
@@ -53,7 +53,7 @@
 	ADD	$8, X7, X7
 	SUB	X7, X5, X5
 align:
-	ADD	$-1, X7
+	SUB	$1, X7
 	MOVBU	0(X10), X8
 	MOVBU	0(X12), X9
 	BNE	X8, X9, cmp
@@ -79,7 +79,7 @@
 	BNE	X17, X18, cmp8b
 	ADD	$32, X10
 	ADD	$32, X12
-	ADD	$-32, X5
+	SUB	$32, X5
 	BGE	X5, X6, compare32
 	BEQZ	X5, cmp_len
 
@@ -95,7 +95,7 @@
 	BNE	X17, X18, cmp8b
 	ADD	$16, X10
 	ADD	$16, X12
-	ADD	$-16, X5
+	SUB	$16, X5
 	BEQZ	X5, cmp_len
 
 check8_unaligned:
@@ -128,7 +128,7 @@
 	BNE	X29, X30, cmp1h
 	ADD	$8, X10
 	ADD	$8, X12
-	ADD	$-8, X5
+	SUB	$8, X5
 	BGE	X5, X6, compare8_unaligned
 	BEQZ	X5, cmp_len
 
@@ -150,7 +150,7 @@
 	BNE	X19, X20, cmp1d
 	ADD	$4, X10
 	ADD	$4, X12
-	ADD	$-4, X5
+	SUB	$4, X5
 	BGE	X5, X6, compare4_unaligned
 
 compare1:
@@ -160,7 +160,7 @@
 	BNE	X8, X9, cmp
 	ADD	$1, X10
 	ADD	$1, X12
-	ADD	$-1, X5
+	SUB	$1, X5
 	JMP	compare1
 
 	// Compare 8 bytes of memory in X15/X16 that are known to differ.
diff --git a/src/internal/bytealg/count_amd64.s b/src/internal/bytealg/count_amd64.s
index efb17f8..3a8dc36 100644
--- a/src/internal/bytealg/count_amd64.s
+++ b/src/internal/bytealg/count_amd64.s
@@ -51,12 +51,13 @@
 
 	MOVQ SI, DI
 
-	CMPQ BX, $32
-	JA avx2
+	CMPQ BX, $64
+	JAE avx2
 sse:
 	LEAQ	-16(SI)(BX*1), AX	// AX = address of last 16 bytes
 	JMP	sseloopentry
 
+	PCALIGN $16
 sseloop:
 	// Move the next 16-byte chunk of the data into X1.
 	MOVOU	(DI), X1
@@ -161,43 +162,63 @@
 	JNE sse
 #endif
 	MOVD AX, X0
-	LEAQ -32(SI)(BX*1), R11
+	LEAQ -64(SI)(BX*1), R11
+	LEAQ (SI)(BX*1), R13
 	VPBROADCASTB  X0, Y1
+	PCALIGN $32
 avx2_loop:
 	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y4
 	VPCMPEQB Y1, Y2, Y3
+	VPCMPEQB Y1, Y4, Y5
 	VPMOVMSKB Y3, DX
+	VPMOVMSKB Y5, CX
 	POPCNTL DX, DX
+	POPCNTL CX, CX
 	ADDQ DX, R12
-	ADDQ $32, DI
+	ADDQ CX, R12
+	ADDQ $64, DI
 	CMPQ DI, R11
 	JLE avx2_loop
 
 	// If last block is already processed,
 	// skip to the end.
-	CMPQ DI, R11
+	//
+	// This check is NOT an optimization; if the input length is a
+	// multiple of 64, we must not go through the last leg of the
+	// function because the bit shift count passed to SALQ below would
+	// be 64, which is outside of the 0-63 range supported by those
+	// instructions.
+	//
+	// Tests in the bytes and strings packages with input lengths that
+	// are multiples of 64 will break if this condition were removed.
+	CMPQ DI, R13
 	JEQ endavx
 
-	// Load address of the last 32 bytes.
+	// Load address of the last 64 bytes.
 	// There is an overlap with the previous block.
 	MOVQ R11, DI
 	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y4
 	VPCMPEQB Y1, Y2, Y3
+	VPCMPEQB Y1, Y4, Y5
 	VPMOVMSKB Y3, DX
+	VPMOVMSKB Y5, CX
 	// Exit AVX mode.
 	VZEROUPPER
+	SALQ $32, CX
+	ORQ CX, DX
 
-	// Create mask to ignore overlap between previous 32 byte block
+	// Create mask to ignore overlap between previous 64 byte block
 	// and the next.
-	ANDQ $31, BX
-	MOVQ $32,CX
+	ANDQ $63, BX
+	MOVQ $64, CX
 	SUBQ BX, CX
-	MOVQ $0xFFFFFFFF, R10
-	SARQ CL, R10
+	MOVQ $0xFFFFFFFFFFFFFFFF, R10
 	SALQ CL, R10
 	// Apply mask
 	ANDQ R10, DX
-	POPCNTL DX, DX
+	POPCNTQ DX, DX
 	ADDQ DX, R12
 	MOVQ R12, (R8)
 	RET
diff --git a/src/internal/bytealg/count_arm64.s b/src/internal/bytealg/count_arm64.s
index 8cd703d..e616627 100644
--- a/src/internal/bytealg/count_arm64.s
+++ b/src/internal/bytealg/count_arm64.s
@@ -37,6 +37,7 @@
 	// Work with not 32-byte aligned head
 	BIC	$0x1f, R0, R3
 	ADD	$0x20, R3
+	PCALIGN $16
 head_loop:
 	MOVBU.P	1(R0), R5
 	CMP	R5, R1
@@ -60,6 +61,7 @@
 	// Clear the low 64-bit element of V7 and V8
 	VEOR	V7.B8, V7.B8, V7.B8
 	VEOR	V8.B8, V8.B8, V8.B8
+	PCALIGN $16
 	// Count the target byte in 32-byte chunk
 chunk_loop:
 	VLD1.P	(R0), [V1.B16, V2.B16]
diff --git a/src/internal/bytealg/count_ppc64x.s b/src/internal/bytealg/count_ppc64x.s
index 2d2490b..55e02ce 100644
--- a/src/internal/bytealg/count_ppc64x.s
+++ b/src/internal/bytealg/count_ppc64x.s
@@ -8,89 +8,147 @@
 #include "textflag.h"
 
 TEXT ·Count<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
-	// R3 = byte array pointer 
+	// R3 = byte array pointer
 	// R4 = length
-	MOVBZ R6, R5              // R5 = byte
-	BR    countbytebody<>(SB)
+	// R6 = byte to count
+	MTVRD	R6, V1		// move compare byte
+	MOVD	R6, R5
+	VSPLTB	$7, V1, V1	// replicate byte across V1
+	BR	countbytebody<>(SB)
 
 TEXT ·CountString<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-32
 	// R3 = byte array pointer
 	// R4 = length
-	MOVBZ R5, R5              // R5 = byte
-	BR    countbytebody<>(SB)
+	// R5 = byte to count
+	MTVRD	R5, V1		// move compare byte
+	VSPLTB	$7, V1, V1	// replicate byte across V1
+	BR	countbytebody<>(SB)
 
 // R3: addr of string
 // R4: len of string
 // R5: byte to count
+// V1: byte to count, splatted.
 // On exit:
 // R3: return value
-// endianness shouldn't matter since we are just counting and order
-// is irrelevant
 TEXT countbytebody<>(SB), NOSPLIT|NOFRAME, $0-0
-	DCBT (R3)    // Prepare cache line.
-	MOVD R0, R18 // byte count
-	MOVD R3, R19 // Save base address for calculating the index later.
-	MOVD R4, R16
+	MOVD	$0, R18 // byte count
 
-	MOVD   R5, R6
-	RLDIMI $8, R6, $48, R6
-	RLDIMI $16, R6, $32, R6
-	RLDIMI $32, R6, $0, R6  // fill reg with the byte to count
+#ifndef GOPPC64_power10
+	RLDIMI	$8, R5, $48, R5
+	RLDIMI	$16, R5, $32, R5
+	RLDIMI	$32, R5, $0, R5	// fill reg with the byte to count
+#endif
 
-	VSPLTISW $3, V4     // used for shift
-	MTVRD    R6, V1     // move compare byte
-	VSPLTB   $7, V1, V1 // replicate byte across V1
+	CMPU	R4, $32		// Check if it's a small string (<32 bytes)
+	BLT	tail		// Jump to the small string case
+	SRD	$5, R4, R20
+	MOVD	R20, CTR
+	MOVD	$16, R21
+	XXLXOR	V4, V4, V4
+	XXLXOR	V5, V5, V5
 
-	CMPU   R4, $32          // Check if it's a small string (<32 bytes)
-	BLT    tail             // Jump to the small string case
-	XXLXOR VS37, VS37, VS37 // clear V5 (aka VS37) to use as accumulator
-
+	PCALIGN	$16
 cmploop:
-	LXVW4X (R3), VS32 // load bytes from string
+	LXVD2X	(R0)(R3), V0	// Count 32B per loop with two vector accumulators.
+	LXVD2X	(R21)(R3), V2
+	VCMPEQUB V2, V1, V2
+	VCMPEQUB V0, V1, V0
+	VPOPCNTD V2, V2		// A match is 0xFF or 0. Count the bits into doubleword buckets.
+	VPOPCNTD V0, V0
+	VADDUDM	V0, V4, V4	// Accumulate the popcounts. They are 8x the count.
+	VADDUDM	V2, V5, V5	// The count will be fixed up afterwards.
+	ADD	$32, R3
+	BDNZ	cmploop
 
-	// when the bytes match, the corresponding byte contains all 1s
-	VCMPEQUB V1, V0, V2     // compare bytes
-	VPOPCNTD V2, V3         // each double word contains its count
-	VADDUDM  V3, V5, V5     // accumulate bit count in each double word
-	ADD      $16, R3, R3    // increment pointer
-	SUB      $16, R16, R16  // remaining bytes
-	CMP      R16, $16       // at least 16 remaining?
-	BGE      cmploop
-	VSRD     V5, V4, V5     // shift by 3 to convert bits to bytes
-	VSLDOI   $8, V5, V5, V6 // get the double word values from vector
-	MFVSRD   V5, R9
-	MFVSRD   V6, R10
-	ADD      R9, R10, R9
-	ADD      R9, R18, R18
+	VADDUDM	V4, V5, V5
+	MFVSRD	V5, R18
+	VSLDOI	$8, V5, V5, V5
+	MFVSRD	V5, R21
+	ADD	R21, R18, R18
+	ANDCC	$31, R4, R4
+	// Skip the tail processing if no bytes remaining.
+	BEQ	tail_0
 
-tail:
-	CMP R16, $8 // 8 bytes left?
-	BLT small
+#ifdef GOPPC64_power10
+	SRD	$3, R18, R18	// Fix the vector loop count before counting the tail on P10.
 
-	MOVD    (R3), R12     // load 8 bytes
-	CMPB    R12, R6, R17  // compare bytes
-	POPCNTD R17, R15      // bit count
-	SRD     $3, R15, R15  // byte count
-	ADD     R15, R18, R18 // add to byte count
+tail:	// Count the last 0 - 31 bytes.
+	CMP	R4, $16
+	BLE	small_tail_p10
+	LXV	0(R3), V0
+	VCMPEQUB V0, V1, V0
+	VCNTMBB	V0, $1, R14	// Sum the value of bit 0 of each byte of the compare into R14.
+	SRD	$56, R14, R14	// The result of VCNTMBB is shifted. Unshift it.
+	ADD	R14, R18, R18
+	ADD	$16, R3, R3
+	ANDCC	$15, R4, R4
 
-next1:
-	ADD $8, R3, R3
-	SUB $8, R16, R16 // remaining bytes
-	BR  tail
+small_tail_p10:
+	SLD	$56, R4, R6
+	LXVLL	R3, R6, V0
+	VCMPEQUB V0, V1, V0
+	VCLRRB	V0, R4, V0	// If <16B being compared, clear matches of the 16-R4 bytes.
+	VCNTMBB	V0, $1, R14	// Sum the value of bit 0 of each byte of the compare into R14.
+	SRD	$56, R14, R14	// The result of VCNTMBB is shifted. Unshift it.
+	ADD	R14, R18, R3
+	RET
 
-small:
-	CMP   $0, R16   // any remaining
-	BEQ   done
-	MOVBZ (R3), R12 // check each remaining byte
-	CMP   R12, R5
-	BNE   next2
-	ADD   $1, R18
+#else
+tail:	// Count the last 0 - 31 bytes.
+	CMP	R4, $16
+	BLT	tail_8
+	MOVD	(R3), R12
+	MOVD	8(R3), R14
+	CMPB	R12, R5, R12
+	CMPB	R14, R5, R14
+	POPCNTD	R12, R12
+	POPCNTD	R14, R14
+	ADD	R12, R18, R18
+	ADD	R14, R18, R18
+	ADD	$16, R3, R3
+	ADD	$-16, R4, R4
 
-next2:
-	SUB $1, R16
-	ADD $1, R3  // inc address
-	BR  small
+tail_8:	// Count the remaining 0 - 15 bytes.
+	CMP	R4, $8
+	BLT	tail_4
+	MOVD	(R3), R12
+	CMPB	R12, R5, R12
+	POPCNTD	R12, R12
+	ADD	R12, R18, R18
+	ADD	$8, R3, R3
+	ADD	$-8, R4, R4
 
-done:
-	MOVD R18, R3    // return count
+tail_4:	// Count the remaining 0 - 7 bytes.
+	CMP	R4, $4
+	BLT	tail_2
+	MOVWZ	(R3), R12
+	CMPB	R12, R5, R12
+	SLD	$32, R12, R12	// Remove non-participating matches.
+	POPCNTD	R12, R12
+	ADD	R12, R18, R18
+	ADD	$4, R3, R3
+	ADD	$-4, R4, R4
+
+tail_2:	// Count the remaining 0 - 3 bytes.
+	CMP	R4, $2
+	BLT	tail_1
+	MOVHZ	(R3), R12
+	CMPB	R12, R5, R12
+	SLD	$48, R12, R12	// Remove non-participating matches.
+	POPCNTD	R12, R12
+	ADD	R12, R18, R18
+	ADD	$2, R3, R3
+	ADD	$-2, R4, R4
+
+tail_1:	// Count the remaining 0 - 1 bytes.
+	CMP	R4, $1
+	BLT	tail_0
+	MOVBZ	(R3), R12
+	CMPB	R12, R5, R12
+	ANDCC	$0x8, R12, R12
+	ADD	R12, R18, R18
+#endif
+
+tail_0:	// No remaining tail to count.
+	SRD	$3, R18, R3	// Fixup count, it is off by 8x.
 	RET
diff --git a/src/internal/bytealg/count_riscv64.s b/src/internal/bytealg/count_riscv64.s
index d123cbd..3f255cd 100644
--- a/src/internal/bytealg/count_riscv64.s
+++ b/src/internal/bytealg/count_riscv64.s
@@ -14,6 +14,7 @@
 	MOV	ZERO, X14	// count
 	ADD	X10, X11	// end
 
+	PCALIGN	$16
 loop:
 	BEQ	X10, X11, done
 	MOVBU	(X10), X15
@@ -34,6 +35,7 @@
 	MOV	ZERO, X14	// count
 	ADD	X10, X11	// end
 
+	PCALIGN	$16
 loop:
 	BEQ	X10, X11, done
 	MOVBU	(X10), X15
diff --git a/src/internal/bytealg/equal_amd64.s b/src/internal/bytealg/equal_amd64.s
index d178a33..79a0520 100644
--- a/src/internal/bytealg/equal_amd64.s
+++ b/src/internal/bytealg/equal_amd64.s
@@ -52,6 +52,7 @@
 	JE	hugeloop_avx2
 
 	// 64 bytes at a time using xmm registers
+	PCALIGN $16
 hugeloop:
 	CMPQ	BX, $64
 	JB	bigloop
@@ -81,6 +82,7 @@
 #endif
 
 	// 64 bytes at a time using ymm registers
+	PCALIGN $16
 hugeloop_avx2:
 	CMPQ	BX, $64
 	JB	bigloop_avx2
@@ -105,6 +107,7 @@
 	VZEROUPPER
 
 	// 8 bytes at a time using 64-bit register
+	PCALIGN $16
 bigloop:
 	CMPQ	BX, $8
 	JBE	leftover
diff --git a/src/internal/bytealg/equal_loong64.s b/src/internal/bytealg/equal_loong64.s
index ba2a557..a3ad5c1 100644
--- a/src/internal/bytealg/equal_loong64.s
+++ b/src/internal/bytealg/equal_loong64.s
@@ -8,17 +8,21 @@
 #define	REGCTXT	R29
 
 // memequal(a, b unsafe.Pointer, size uintptr) bool
-TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	a+0(FP), R4
 	MOVV	b+8(FP), R5
-	BEQ	R4, R5, eq
 	MOVV	size+16(FP), R6
+#endif
+	BEQ	R4, R5, eq
 	ADDV	R4, R6, R7
 	PCALIGN	$16
 loop:
 	BNE	R4, R7, test
 	MOVV	$1, R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVB	R4, ret+24(FP)
+#endif
 	RET
 test:
 	MOVBU	(R4), R9
@@ -27,17 +31,24 @@
 	ADDV	$1, R5
 	BEQ	R9, R10, loop
 
+	MOVB    R0, R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVB	R0, ret+24(FP)
+#endif
 	RET
 eq:
 	MOVV	$1, R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVB	R4, ret+24(FP)
+#endif
 	RET
 
 // memequal_varlen(a, b unsafe.Pointer) bool
-TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$40-17
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	a+0(FP), R4
 	MOVV	b+8(FP), R5
+#endif
 	BEQ	R4, R5, eq
 	MOVV	8(REGCTXT), R6    // compiler stores size at offset 8 in the closure
 	MOVV	R4, 8(R3)
@@ -45,9 +56,13 @@
 	MOVV	R6, 24(R3)
 	JAL	runtime·memequal(SB)
 	MOVBU	32(R3), R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVB	R4, ret+16(FP)
+#endif
 	RET
 eq:
 	MOVV	$1, R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVB	R4, ret+16(FP)
+#endif
 	RET
diff --git a/src/internal/bytealg/equal_riscv64.s b/src/internal/bytealg/equal_riscv64.s
index 503aac5..7f470ce 100644
--- a/src/internal/bytealg/equal_riscv64.s
+++ b/src/internal/bytealg/equal_riscv64.s
@@ -41,7 +41,7 @@
 	ADD	$8, X9, X9
 	SUB	X9, X12, X12
 align:
-	ADD	$-1, X9
+	SUB	$1, X9
 	MOVBU	0(X10), X19
 	MOVBU	0(X11), X20
 	BNE	X19, X20, not_eq
@@ -67,7 +67,7 @@
 	BNE	X16, X17, not_eq
 	ADD	$32, X10
 	ADD	$32, X11
-	ADD	$-32, X12
+	SUB	$32, X12
 	BGE	X12, X9, loop32
 	BEQZ	X12, eq
 
@@ -83,7 +83,7 @@
 	BNE	X21, X22, not_eq
 	ADD	$16, X10
 	ADD	$16, X11
-	ADD	$-16, X12
+	SUB	$16, X12
 	BGE	X12, X23, loop16
 	BEQZ	X12, eq
 
@@ -105,7 +105,7 @@
 	BNE	X16, X17, not_eq
 	ADD	$4, X10
 	ADD	$4, X11
-	ADD	$-4, X12
+	SUB	$4, X12
 	BGE	X12, X23, loop4
 
 loop1:
@@ -115,7 +115,7 @@
 	BNE	X19, X20, not_eq
 	ADD	$1, X10
 	ADD	$1, X11
-	ADD	$-1, X12
+	SUB	$1, X12
 	JMP	loop1
 
 not_eq:
diff --git a/src/internal/bytealg/index_amd64.s b/src/internal/bytealg/index_amd64.s
index 0431491..31730e5 100644
--- a/src/internal/bytealg/index_amd64.s
+++ b/src/internal/bytealg/index_amd64.s
@@ -39,6 +39,7 @@
 	JA   _3_or_more
 	MOVW (R8), R8
 	LEAQ -1(DI)(DX*1), DX
+	PCALIGN $16
 loop2:
 	MOVW (DI), SI
 	CMPW SI,R8
@@ -250,6 +251,7 @@
 	LEAQ -15(DI)(DX*1), SI
 	MOVQ $16, R9
 	SUBQ AX, R9 // We advance by 16-len(sep) each iteration, so precalculate it into R9
+	PCALIGN $16
 loop_sse42:
 	// 0x0c means: unsigned byte compare (bits 0,1 are 00)
 	// for equality (bits 2,3 are 11)
diff --git a/src/internal/bytealg/indexbyte_amd64.s b/src/internal/bytealg/indexbyte_amd64.s
index 1ca70e3..c097dc6 100644
--- a/src/internal/bytealg/indexbyte_amd64.s
+++ b/src/internal/bytealg/indexbyte_amd64.s
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !plan9
+
 #include "go_asm.h"
 #include "textflag.h"
 
@@ -43,6 +45,7 @@
 	LEAQ	-16(SI)(BX*1), AX	// AX = address of last 16 bytes
 	JMP	sseloopentry
 
+	PCALIGN $16
 sseloop:
 	// Move the next 16-byte chunk of the data into X1.
 	MOVOU	(DI), X1
@@ -122,6 +125,8 @@
 	MOVD AX, X0
 	LEAQ -32(SI)(BX*1), R11
 	VPBROADCASTB  X0, Y1
+
+	PCALIGN $32
 avx2_loop:
 	VMOVDQU (DI), Y2
 	VPCMPEQB Y1, Y2, Y3
diff --git a/src/internal/bytealg/indexbyte_generic.go b/src/internal/bytealg/indexbyte_generic.go
index b89d34f..b7fffcf 100644
--- a/src/internal/bytealg/indexbyte_generic.go
+++ b/src/internal/bytealg/indexbyte_generic.go
@@ -2,7 +2,11 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !loong64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm
+// Avoid IndexByte and IndexByteString on Plan 9 because it uses
+// SSE instructions on x86 machines, and those are classified as
+// floating point instructions, which are illegal in a note handler.
+
+//go:build !386 && (!amd64 || plan9) && !s390x && !arm && !arm64 && !loong64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm
 
 package bytealg
 
diff --git a/src/internal/bytealg/indexbyte_loong64.s b/src/internal/bytealg/indexbyte_loong64.s
index 6049705..03e0660 100644
--- a/src/internal/bytealg/indexbyte_loong64.s
+++ b/src/internal/bytealg/indexbyte_loong64.s
@@ -5,11 +5,18 @@
 #include "go_asm.h"
 #include "textflag.h"
 
-TEXT ·IndexByte(SB),NOSPLIT,$0-40
+TEXT ·IndexByte<ABIInternal>(SB),NOSPLIT,$0-40
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	b_base+0(FP), R4
 	MOVV	b_len+8(FP), R5
-	MOVBU	c+24(FP), R6	// byte to find
-	MOVV	R4, R7		// store base for later
+	MOVBU	c+24(FP), R7	// byte to find
+#endif
+	// R4 = b_base
+	// R5 = b_len
+	// R6 = b_cap (unused)
+	// R7 = byte to find
+	AND	$0xff, R7
+	MOVV	R4, R6		// store base for later
 	ADDV	R4, R5		// end
 	ADDV	$-1, R4
 
@@ -18,21 +25,30 @@
 	ADDV	$1, R4
 	BEQ	R4, R5, notfound
 	MOVBU	(R4), R8
-	BNE	R6, R8, loop
+	BNE	R7, R8, loop
 
-	SUBV	R7, R4		// remove base
+	SUBV	R6, R4		// remove base
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	R4, ret+32(FP)
+#endif
 	RET
 
 notfound:
 	MOVV	$-1, R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	R4, ret+32(FP)
+#endif
 	RET
 
-TEXT ·IndexByteString(SB),NOSPLIT,$0-32
+TEXT ·IndexByteString<ABIInternal>(SB),NOSPLIT,$0-32
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	s_base+0(FP), R4
 	MOVV	s_len+8(FP), R5
 	MOVBU	c+16(FP), R6	// byte to find
+#endif
+	// R4 = s_base
+	// R5 = s_len
+	// R6 = byte to find
 	MOVV	R4, R7		// store base for later
 	ADDV	R4, R5		// end
 	ADDV	$-1, R4
@@ -45,10 +61,14 @@
 	BNE	R6, R8, loop
 
 	SUBV	R7, R4		// remove base
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	R4, ret+24(FP)
+#endif
 	RET
 
 notfound:
 	MOVV	$-1, R4
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	R4, ret+24(FP)
+#endif
 	RET
diff --git a/src/internal/bytealg/indexbyte_native.go b/src/internal/bytealg/indexbyte_native.go
index c5bb2df..8e46c31 100644
--- a/src/internal/bytealg/indexbyte_native.go
+++ b/src/internal/bytealg/indexbyte_native.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build 386 || amd64 || s390x || arm || arm64 || loong64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm
+//go:build 386 || (amd64 && !plan9) || s390x || arm || arm64 || loong64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm
 
 package bytealg
 
diff --git a/src/internal/bytealg/indexbyte_riscv64.s b/src/internal/bytealg/indexbyte_riscv64.s
index 8be78ed..de00983 100644
--- a/src/internal/bytealg/indexbyte_riscv64.s
+++ b/src/internal/bytealg/indexbyte_riscv64.s
@@ -13,7 +13,7 @@
 	AND	$0xff, X13
 	MOV	X10, X12		// store base for later
 	ADD	X10, X11		// end
-	ADD	$-1, X10
+	SUB	$1, X10
 
 loop:
 	ADD	$1, X10
@@ -35,7 +35,7 @@
 	AND	$0xff, X12
 	MOV	X10, X13		// store base for later
 	ADD	X10, X11		// end
-	ADD	$-1, X10
+	SUB	$1, X10
 
 loop:
 	ADD	$1, X10
diff --git a/src/internal/bytealg/lastindexbyte_generic.go b/src/internal/bytealg/lastindexbyte_generic.go
new file mode 100644
index 0000000..b905f53
--- /dev/null
+++ b/src/internal/bytealg/lastindexbyte_generic.go
@@ -0,0 +1,23 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+func LastIndexByte(s []byte, c byte) int {
+	for i := len(s) - 1; i >= 0; i-- {
+		if s[i] == c {
+			return i
+		}
+	}
+	return -1
+}
+
+func LastIndexByteString(s string, c byte) int {
+	for i := len(s) - 1; i >= 0; i-- {
+		if s[i] == c {
+			return i
+		}
+	}
+	return -1
+}
diff --git a/src/internal/chacha8rand/chacha8.go b/src/internal/chacha8rand/chacha8.go
new file mode 100644
index 0000000..ce55c07
--- /dev/null
+++ b/src/internal/chacha8rand/chacha8.go
@@ -0,0 +1,197 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package chacha8rand implements a pseudorandom generator
+// based on ChaCha8. It is used by both runtime and math/rand/v2
+// and must have no dependencies.
+package chacha8rand
+
+const (
+	ctrInc = 4  // increment counter by 4 between block calls
+	ctrMax = 16 // reseed when counter reaches 16
+	chunk  = 32 // each chunk produced by block is 32 uint64s
+	reseed = 4  // reseed with 4 words
+)
+
+// block is the chacha8rand block function.
+func block(seed *[4]uint64, blocks *[32]uint64, counter uint32)
+
+// A State holds the state for a single random generator.
+// It must be used from one goroutine at a time.
+// If used by multiple goroutines at a time, the goroutines
+// may see the same random values, but the code will not
+// crash or cause out-of-bounds memory accesses.
+type State struct {
+	buf  [32]uint64
+	seed [4]uint64
+	i    uint32
+	n    uint32
+	c    uint32
+}
+
+// Next returns the next random value, along with a boolean
+// indicating whether one was available.
+// If one is not available, the caller should call Refill
+// and then repeat the call to Next.
+//
+// Next is //go:nosplit to allow its use in the runtime
+// with per-m data without holding the per-m lock.
+//go:nosplit
+func (s *State) Next() (uint64, bool) {
+	i := s.i
+	if i >= s.n {
+		return 0, false
+	}
+	s.i = i + 1
+	return s.buf[i&31], true // i&31 eliminates bounds check
+}
+
+// Init seeds the State with the given seed value.
+func (s *State) Init(seed [32]byte) {
+	s.Init64([4]uint64{
+		leUint64(seed[0*8:]),
+		leUint64(seed[1*8:]),
+		leUint64(seed[2*8:]),
+		leUint64(seed[3*8:]),
+	})
+}
+
+// Init64 seeds the state with the given seed value.
+func (s *State) Init64(seed [4]uint64) {
+	s.seed = seed
+	block(&s.seed, &s.buf, 0)
+	s.c = 0
+	s.i = 0
+	s.n = chunk
+}
+
+// Refill refills the state with more random values.
+// After a call to Refill, an immediate call to Next will succeed
+// (unless multiple goroutines are incorrectly sharing a state).
+func (s *State) Refill() {
+	s.c += ctrInc
+	if s.c == ctrMax {
+		// Reseed with generated uint64s for forward secrecy.
+		// Normally this is done immediately after computing a block,
+		// but we do it immediately before computing the next block,
+		// to allow a much smaller serialized state (just the seed plus offset).
+		// This gives a delayed benefit for the forward secrecy
+		// (you can reconstruct the recent past given a memory dump),
+		// which we deem acceptable in exchange for the reduced size.
+		s.seed[0] = s.buf[len(s.buf)-reseed+0]
+		s.seed[1] = s.buf[len(s.buf)-reseed+1]
+		s.seed[2] = s.buf[len(s.buf)-reseed+2]
+		s.seed[3] = s.buf[len(s.buf)-reseed+3]
+		s.c = 0
+	}
+	block(&s.seed, &s.buf, s.c)
+	s.i = 0
+	s.n = uint32(len(s.buf))
+	if s.c == ctrMax-ctrInc {
+		s.n = uint32(len(s.buf)) - reseed
+	}
+}
+
+// Reseed reseeds the state with new random values.
+// After a call to Reseed, any previously returned random values
+// have been erased from the memory of the state and cannot be
+// recovered.
+func (s *State) Reseed() {
+	var seed [4]uint64
+	for i := range seed {
+		for {
+			x, ok := s.Next()
+			if ok {
+				seed[i] = x
+				break
+			}
+			s.Refill()
+		}
+	}
+	s.Init64(seed)
+}
+
+// Marshal marshals the state into a byte slice.
+// Marshal and Unmarshal are functions, not methods,
+// so that they will not be linked into the runtime
+// when it uses the State struct, since the runtime
+// does not need these.
+func Marshal(s *State) []byte {
+	data := make([]byte, 6*8)
+	copy(data, "chacha8:")
+	used := (s.c/ctrInc)*chunk + s.i
+	bePutUint64(data[1*8:], uint64(used))
+	for i, seed := range s.seed {
+		lePutUint64(data[(2+i)*8:], seed)
+	}
+	return data
+}
+
+type errUnmarshalChaCha8 struct{}
+
+func (*errUnmarshalChaCha8) Error() string {
+	return "invalid ChaCha8 encoding"
+}
+
+// Unmarshal unmarshals the state from a byte slice.
+func Unmarshal(s *State, data []byte) error {
+	if len(data) != 6*8 || string(data[:8]) != "chacha8:" {
+		return new(errUnmarshalChaCha8)
+	}
+	used := beUint64(data[1*8:])
+	if used > (ctrMax/ctrInc)*chunk-reseed {
+		return new(errUnmarshalChaCha8)
+	}
+	for i := range s.seed {
+		s.seed[i] = leUint64(data[(2+i)*8:])
+	}
+	s.c = ctrInc * (uint32(used) / chunk)
+	block(&s.seed, &s.buf, s.c)
+	s.i = uint32(used) % chunk
+	s.n = chunk
+	if s.c == ctrMax-ctrInc {
+		s.n = chunk - reseed
+	}
+	return nil
+}
+
+// binary.bigEndian.Uint64, copied to avoid dependency
+func beUint64(b []byte) uint64 {
+	_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+	return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+		uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+// binary.bigEndian.PutUint64, copied to avoid dependency
+func bePutUint64(b []byte, v uint64) {
+	_ = b[7] // early bounds check to guarantee safety of writes below
+	b[0] = byte(v >> 56)
+	b[1] = byte(v >> 48)
+	b[2] = byte(v >> 40)
+	b[3] = byte(v >> 32)
+	b[4] = byte(v >> 24)
+	b[5] = byte(v >> 16)
+	b[6] = byte(v >> 8)
+	b[7] = byte(v)
+}
+
+// binary.littleEndian.Uint64, copied to avoid dependency
+func leUint64(b []byte) uint64 {
+	_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// binary.littleEndian.PutUint64, copied to avoid dependency
+func lePutUint64(b []byte, v uint64) {
+	_ = b[7] // early bounds check to guarantee safety of writes below
+	b[0] = byte(v)
+	b[1] = byte(v >> 8)
+	b[2] = byte(v >> 16)
+	b[3] = byte(v >> 24)
+	b[4] = byte(v >> 32)
+	b[5] = byte(v >> 40)
+	b[6] = byte(v >> 48)
+	b[7] = byte(v >> 56)
+}
diff --git a/src/internal/chacha8rand/chacha8_amd64.s b/src/internal/chacha8rand/chacha8_amd64.s
new file mode 100644
index 0000000..b56deb3
--- /dev/null
+++ b/src/internal/chacha8rand/chacha8_amd64.s
@@ -0,0 +1,174 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// ChaCha8 is ChaCha with 8 rounds.
+// See https://cr.yp.to/chacha/chacha-20080128.pdf.
+// See chacha8_generic.go for additional details.
+
+// ROL rotates the uint32s in register R left by N bits, using temporary T.
+#define ROL(N, R, T) \
+	MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R
+
+// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed.
+#ifdef GOAMD64_v2
+#define ROL16(R, T) PSHUFB ·rol16<>(SB), R
+#else
+#define ROL16(R, T) ROL(16, R, T)
+#endif
+
+// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed.
+#ifdef GOAMD64_v2
+#define ROL8(R, T) PSHUFB ·rol8<>(SB), R
+#else
+#define ROL8(R, T) ROL(8, R, T)
+#endif
+
+// QR is the ChaCha quarter-round on A, B, C, and D. T is an available temporary.
+#define QR(A, B, C, D, T) \
+	PADDD B, A; PXOR A, D; ROL16(D, T); \
+	PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B; \
+	PADDD B, A; PXOR A, D; ROL8(D, T); \
+	PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B
+
+// REPLREG replicates the register R into 4 uint32s in XR.
+#define REPLREG(R, XR) \
+	MOVQ R, XR; \
+	PSHUFD $0, XR, XR
+
+// REPL replicates the uint32 constant val into 4 uint32s in XR. It smashes DX.
+#define REPL(val, XR) \
+	MOVL $val, DX; \
+	REPLREG(DX, XR)
+
+// SEED copies the off'th uint32 of the seed into the register XR,
+// replicating it into all four stripes of the register.
+#define SEED(off, reg, XR) \
+	MOVL (4*off)(AX), reg; \
+	REPLREG(reg, XR) \
+
+// block runs 4 ChaCha8 block transformations in the four stripes of the X registers.
+
+// func block(seed *[8]uint32, blocks *[16][4]uint32, counter uint32)
+TEXT ·block<ABIInternal>(SB), NOSPLIT, $16
+	// seed in AX
+	// blocks in BX
+	// counter in CX
+
+	// Load initial constants into top row.
+	REPL(0x61707865, X0)
+	REPL(0x3320646e, X1)
+	REPL(0x79622d32, X2)
+	REPL(0x6b206574, X3)
+
+	// Load counter into bottom left cell.
+	// Each stripe gets a different counter: 0, 1, 2, 3.
+	// (PINSRD is not available in GOAMD64_v1,
+	// so just do it in memory on all systems.
+	// This is not on the critical path.)
+	MOVL CX, 0(SP)
+	INCL CX
+	MOVL CX, 4(SP)
+	INCL CX
+	MOVL CX, 8(SP)
+	INCL CX
+	MOVL CX, 12(SP)
+	MOVOU 0(SP), X12
+
+	// Load seed words into next two rows and into DI, SI, R8..R13
+	SEED(0, DI, X4)
+	SEED(1, SI, X5)
+	SEED(2, R8, X6)
+	SEED(3, R9, X7)
+	SEED(4, R10, X8)
+	SEED(5, R11, X9)
+	SEED(6, R12, X10)
+	SEED(7, R13, X11)
+
+	// Zeros for remaining two matrix entries.
+	// We have just enough XMM registers to hold the state,
+	// without one for the temporary, so we flush and restore
+	// some values to and from memory to provide a temporary.
+	// The initial temporary is X15, so zero its memory instead
+	// of X15 itself.
+	MOVL $0, DX
+	MOVQ DX, X13
+	MOVQ DX, X14
+	MOVOU X14, (15*16)(BX)
+
+	// 4 iterations. Each iteration is 8 quarter-rounds.
+	MOVL $4, DX
+loop:
+	QR(X0, X4, X8, X12, X15)
+	MOVOU X4, (4*16)(BX) // save X4
+	QR(X1, X5, X9, X13, X15)
+	MOVOU (15*16)(BX), X15 // reload X15; temp now X4
+	QR(X2, X6, X10, X14, X4)
+	QR(X3, X7, X11, X15, X4)
+
+	QR(X0, X5, X10, X15, X4)
+	MOVOU X15, (15*16)(BX) // save X15
+	QR(X1, X6, X11, X12, X4)
+	MOVOU (4*16)(BX), X4  // reload X4; temp now X15
+	QR(X2, X7, X8, X13, X15)
+	QR(X3, X4, X9, X14, X15)
+
+	DECL DX
+	JNZ loop
+
+	// Store interlaced blocks back to output buffer,
+	// adding original seed along the way.
+
+	// First the top and bottom rows.
+	MOVOU X0, (0*16)(BX)
+	MOVOU X1, (1*16)(BX)
+	MOVOU X2, (2*16)(BX)
+	MOVOU X3, (3*16)(BX)
+	MOVOU X12, (12*16)(BX)
+	MOVOU X13, (13*16)(BX)
+	MOVOU X14, (14*16)(BX)
+	// X15 has already been stored.
+
+	// Now we have X0-X3, X12-X15 available for temporaries.
+	// Add seed rows back to output. We left seed in DI, SI, R8..R13 above.
+	REPLREG(DI, X0)
+	REPLREG(SI, X1)
+	REPLREG(R8, X2)
+	REPLREG(R9, X3)
+	REPLREG(R10, X12)
+	REPLREG(R11, X13)
+	REPLREG(R12, X14)
+	REPLREG(R13, X15)
+	PADDD X0, X4
+	PADDD X1, X5
+	PADDD X2, X6
+	PADDD X3, X7
+	PADDD X12, X8
+	PADDD X13, X9
+	PADDD X14, X10
+	PADDD X15, X11
+	MOVOU X4, (4*16)(BX)
+	MOVOU X5, (5*16)(BX)
+	MOVOU X6, (6*16)(BX)
+	MOVOU X7, (7*16)(BX)
+	MOVOU X8, (8*16)(BX)
+	MOVOU X9, (9*16)(BX)
+	MOVOU X10, (10*16)(BX)
+	MOVOU X11, (11*16)(BX)
+
+	MOVL $0, AX
+	MOVQ AX, X15 // must be 0 on return
+
+	RET
+
+// rotate left 16 indexes for PSHUFB
+GLOBL ·rol16<>(SB), NOPTR|RODATA, $16
+DATA ·rol16<>+0(SB)/8, $0x0504070601000302
+DATA ·rol16<>+8(SB)/8, $0x0D0C0F0E09080B0A
+
+// rotate left 8 indexes for PSHUFB
+GLOBL ·rol8<>(SB), NOPTR|RODATA, $16
+DATA ·rol8<>+0(SB)/8, $0x0605040702010003
+DATA ·rol8<>+8(SB)/8, $0x0E0D0C0F0A09080B
diff --git a/src/internal/chacha8rand/chacha8_arm64.s b/src/internal/chacha8rand/chacha8_arm64.s
new file mode 100644
index 0000000..18e34dd
--- /dev/null
+++ b/src/internal/chacha8rand/chacha8_arm64.s
@@ -0,0 +1,104 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// QR is the ChaCha quarter-round on A, B, C, and D.
+// V30 is used as a temporary, and V31 is assumed to
+// hold the index table for rotate left 8.
+#define QR(A, B, C, D) \
+	VADD A.S4, B.S4, A.S4; VEOR D.B16, A.B16, D.B16;   VREV32 D.H8, D.H8; \
+	VADD C.S4, D.S4, C.S4; VEOR B.B16, C.B16, V30.B16; VSHL $12, V30.S4, B.S4; VSRI $20, V30.S4, B.S4 \
+	VADD A.S4, B.S4, A.S4; VEOR D.B16, A.B16, D.B16;   VTBL V31.B16, [D.B16], D.B16; \
+	VADD C.S4, D.S4, C.S4; VEOR B.B16, C.B16, V30.B16; VSHL  $7, V30.S4, B.S4; VSRI $25, V30.S4, B.S4
+
+// block runs 4 ChaCha8 block transformations in the four stripes of the V registers.
+
+// func block(seed *[8]uint32, blocks *[4][16]uint32, counter uint32)
+TEXT ·block<ABIInternal>(SB), NOSPLIT, $16
+	// seed in R0
+	// blocks in R1
+	// counter in R2
+
+	// Load initial constants into top row.
+	MOVD $·chachaConst(SB), R10
+	VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
+
+	// Load increment and rotate 8 constants into V30, V31.
+	MOVD $·chachaIncRot(SB), R11
+	VLD1 (R11), [V30.S4, V31.S4]
+
+	VLD4R.P 16(R0), [V4.S4, V5.S4, V6.S4, V7.S4]
+	VLD4R.P 16(R0), [V8.S4, V9.S4, V10.S4, V11.S4]
+
+	// store counter to memory to replicate its uint32 halfs back out
+	MOVW R2, 0(RSP)
+	VLD1R 0(RSP), [V12.S4]
+
+	// Add 0, 1, 2, 3 to counter stripes.
+	VADD V30.S4, V12.S4, V12.S4
+
+	// Zeros for remaining two matrix entries.
+	VEOR V13.B16, V13.B16, V13.B16
+	VEOR V14.B16, V14.B16, V14.B16
+	VEOR V15.B16, V15.B16, V15.B16
+
+	// Save seed state for adding back later.
+	VMOV V4.B16, V20.B16
+	VMOV V5.B16, V21.B16
+	VMOV V6.B16, V22.B16
+	VMOV V7.B16, V23.B16
+	VMOV V8.B16, V24.B16
+	VMOV V9.B16, V25.B16
+	VMOV V10.B16, V26.B16
+	VMOV V11.B16, V27.B16
+
+	// 4 iterations. Each iteration is 8 quarter-rounds.
+	MOVD $4, R0
+loop:
+	QR(V0, V4, V8, V12)
+	QR(V1, V5, V9, V13)
+	QR(V2, V6, V10, V14)
+	QR(V3, V7, V11, V15)
+
+	QR(V0, V5, V10, V15)
+	QR(V1, V6, V11, V12)
+	QR(V2, V7, V8, V13)
+	QR(V3, V4, V9, V14)
+
+	SUB $1, R0
+	CBNZ R0, loop
+
+	// Add seed back.
+	VADD V4.S4, V20.S4, V4.S4
+	VADD V5.S4, V21.S4, V5.S4
+	VADD V6.S4, V22.S4, V6.S4
+	VADD V7.S4, V23.S4, V7.S4
+	VADD V8.S4, V24.S4, V8.S4
+	VADD V9.S4, V25.S4, V9.S4
+	VADD V10.S4, V26.S4, V10.S4
+	VADD V11.S4, V27.S4, V11.S4
+
+	// Store interlaced blocks back to output buffer.
+	VST1.P [ V0.B16,  V1.B16,  V2.B16,  V3.B16], 64(R1)
+	VST1.P [ V4.B16,  V5.B16,  V6.B16,  V7.B16], 64(R1)
+	VST1.P [ V8.B16,  V9.B16, V10.B16, V11.B16], 64(R1)
+	VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R1)
+	RET
+
+GLOBL	·chachaConst(SB), NOPTR|RODATA, $32
+DATA	·chachaConst+0x00(SB)/4, $0x61707865
+DATA	·chachaConst+0x04(SB)/4, $0x3320646e
+DATA	·chachaConst+0x08(SB)/4, $0x79622d32
+DATA	·chachaConst+0x0c(SB)/4, $0x6b206574
+
+GLOBL	·chachaIncRot(SB), NOPTR|RODATA, $32
+DATA	·chachaIncRot+0x00(SB)/4, $0x00000000
+DATA	·chachaIncRot+0x04(SB)/4, $0x00000001
+DATA	·chachaIncRot+0x08(SB)/4, $0x00000002
+DATA	·chachaIncRot+0x0c(SB)/4, $0x00000003
+DATA	·chachaIncRot+0x10(SB)/4, $0x02010003
+DATA	·chachaIncRot+0x14(SB)/4, $0x06050407
+DATA	·chachaIncRot+0x18(SB)/4, $0x0A09080B
+DATA	·chachaIncRot+0x1c(SB)/4, $0x0E0D0C0F
diff --git a/src/internal/chacha8rand/chacha8_generic.go b/src/internal/chacha8rand/chacha8_generic.go
new file mode 100644
index 0000000..2a0f5cd
--- /dev/null
+++ b/src/internal/chacha8rand/chacha8_generic.go
@@ -0,0 +1,235 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// ChaCha8 is ChaCha with 8 rounds.
+// See https://cr.yp.to/chacha/chacha-20080128.pdf.
+//
+// ChaCha8 operates on a 4x4 matrix of uint32 values, initially set to:
+//
+//	const1 const2 const3 const4
+//	seed   seed   seed   seed
+//	seed   seed   seed   seed
+//	counter64     0      0
+//
+// We use the same constants as ChaCha20 does, a random seed,
+// and a counter. Running ChaCha8 on this input produces
+// a 4x4 matrix of pseudo-random values with as much entropy
+// as the seed.
+//
+// Given SIMD registers that can hold N uint32s, it is possible
+// to run N ChaCha8 block transformations in parallel by filling
+// the first register with the N copies of const1, the second
+// with N copies of const2, and so on, and then running the operations.
+//
+// Each iteration of ChaCha8Rand operates over 32 bytes of input and
+// produces 992 bytes of RNG output, plus 32 bytes of input for the next
+// iteration.
+//
+// The 32 bytes of input are used as a ChaCha8 key, with a zero nonce, to
+// produce 1024 bytes of output (16 blocks, with counters 0 to 15).
+// First, for each block, the values 0x61707865, 0x3320646e, 0x79622d32,
+// 0x6b206574 are subtracted from the 32-bit little-endian words at
+// position 0, 1, 2, and 3 respectively, and an increasing counter
+// starting at zero is subtracted from each word at position 12. Then,
+// this stream is permuted such that for each sequence of four blocks,
+// first we output the first four bytes of each block, then the next four
+// bytes of each block, and so on. Finally, the last 32 bytes of output
+// are used as the input of the next iteration, and the remaining 992
+// bytes are the RNG output.
+//
+// See https://c2sp.org/chacha8rand for additional details.
+//
+// Normal ChaCha20 implementations for encryption use this same
+// parallelism but then have to deinterlace the results so that
+// it appears the blocks were generated separately. For the purposes
+// of generating random numbers, the interlacing is fine.
+// We are simply locked in to preserving the 4-way interlacing
+// in any future optimizations.
+package chacha8rand
+
+import (
+	"internal/goarch"
+	"unsafe"
+)
+
+// setup sets up 4 ChaCha8 blocks in b32 with the counter and seed.
+// Note that b32 is [16][4]uint32 not [4][16]uint32: the blocks are interlaced
+// the same way they would be in a 4-way SIMD implementations.
+func setup(seed *[4]uint64, b32 *[16][4]uint32, counter uint32) {
+	// Convert to uint64 to do half as many stores to memory.
+	b := (*[16][2]uint64)(unsafe.Pointer(b32))
+
+	// Constants; same as in ChaCha20: "expand 32-byte k"
+	b[0][0] = 0x61707865_61707865
+	b[0][1] = 0x61707865_61707865
+
+	b[1][0] = 0x3320646e_3320646e
+	b[1][1] = 0x3320646e_3320646e
+
+	b[2][0] = 0x79622d32_79622d32
+	b[2][1] = 0x79622d32_79622d32
+
+	b[3][0] = 0x6b206574_6b206574
+	b[3][1] = 0x6b206574_6b206574
+
+	// Seed values.
+	var x64 uint64
+	var x uint32
+
+	x = uint32(seed[0])
+	x64 = uint64(x)<<32 | uint64(x)
+	b[4][0] = x64
+	b[4][1] = x64
+
+	x = uint32(seed[0] >> 32)
+	x64 = uint64(x)<<32 | uint64(x)
+	b[5][0] = x64
+	b[5][1] = x64
+
+	x = uint32(seed[1])
+	x64 = uint64(x)<<32 | uint64(x)
+	b[6][0] = x64
+	b[6][1] = x64
+
+	x = uint32(seed[1] >> 32)
+	x64 = uint64(x)<<32 | uint64(x)
+	b[7][0] = x64
+	b[7][1] = x64
+
+	x = uint32(seed[2])
+	x64 = uint64(x)<<32 | uint64(x)
+	b[8][0] = x64
+	b[8][1] = x64
+
+	x = uint32(seed[2] >> 32)
+	x64 = uint64(x)<<32 | uint64(x)
+	b[9][0] = x64
+	b[9][1] = x64
+
+	x = uint32(seed[3])
+	x64 = uint64(x)<<32 | uint64(x)
+	b[10][0] = x64
+	b[10][1] = x64
+
+	x = uint32(seed[3] >> 32)
+	x64 = uint64(x)<<32 | uint64(x)
+	b[11][0] = x64
+	b[11][1] = x64
+
+	// Counters.
+	if goarch.BigEndian {
+		b[12][0] = uint64(counter+0)<<32 | uint64(counter+1)
+		b[12][1] = uint64(counter+2)<<32 | uint64(counter+3)
+	} else {
+		b[12][0] = uint64(counter+0) | uint64(counter+1)<<32
+		b[12][1] = uint64(counter+2) | uint64(counter+3)<<32
+	}
+
+	// Zeros.
+	b[13][0] = 0
+	b[13][1] = 0
+	b[14][0] = 0
+	b[14][1] = 0
+
+	b[15][0] = 0
+	b[15][1] = 0
+}
+
+func _() {
+	// block and block_generic must have same type
+	x := block
+	x = block_generic
+	_ = x
+}
+
+// block_generic is the non-assembly block implementation,
+// for use on systems without special assembly.
+// Even on such systems, it is quite fast: on GOOS=386,
+// ChaCha8 using this code generates random values faster than PCG-DXSM.
+func block_generic(seed *[4]uint64, buf *[32]uint64, counter uint32) {
+	b := (*[16][4]uint32)(unsafe.Pointer(buf))
+
+	setup(seed, b, counter)
+
+	for i := range b[0] {
+		// Load block i from b[*][i] into local variables.
+		b0 := b[0][i]
+		b1 := b[1][i]
+		b2 := b[2][i]
+		b3 := b[3][i]
+		b4 := b[4][i]
+		b5 := b[5][i]
+		b6 := b[6][i]
+		b7 := b[7][i]
+		b8 := b[8][i]
+		b9 := b[9][i]
+		b10 := b[10][i]
+		b11 := b[11][i]
+		b12 := b[12][i]
+		b13 := b[13][i]
+		b14 := b[14][i]
+		b15 := b[15][i]
+
+		// 4 iterations of eight quarter-rounds each is 8 rounds
+		for round := 0; round < 4; round++ {
+			b0, b4, b8, b12 = qr(b0, b4, b8, b12)
+			b1, b5, b9, b13 = qr(b1, b5, b9, b13)
+			b2, b6, b10, b14 = qr(b2, b6, b10, b14)
+			b3, b7, b11, b15 = qr(b3, b7, b11, b15)
+
+			b0, b5, b10, b15 = qr(b0, b5, b10, b15)
+			b1, b6, b11, b12 = qr(b1, b6, b11, b12)
+			b2, b7, b8, b13 = qr(b2, b7, b8, b13)
+			b3, b4, b9, b14 = qr(b3, b4, b9, b14)
+		}
+
+		// Store block i back into b[*][i].
+		// Add b4..b11 back to the original key material,
+		// like in ChaCha20, to avoid trivial invertibility.
+		// There is no entropy in b0..b3 and b12..b15
+		// so we can skip the additions and save some time.
+		b[0][i] = b0
+		b[1][i] = b1
+		b[2][i] = b2
+		b[3][i] = b3
+		b[4][i] += b4
+		b[5][i] += b5
+		b[6][i] += b6
+		b[7][i] += b7
+		b[8][i] += b8
+		b[9][i] += b9
+		b[10][i] += b10
+		b[11][i] += b11
+		b[12][i] = b12
+		b[13][i] = b13
+		b[14][i] = b14
+		b[15][i] = b15
+	}
+
+	if goarch.BigEndian {
+		// On a big-endian system, reading the uint32 pairs as uint64s
+		// will word-swap them compared to little-endian, so we word-swap
+		// them here first to make the next swap get the right answer.
+		for i, x := range buf {
+			buf[i] = x>>32 | x<<32
+		}
+	}
+}
+
+// qr is the (inlinable) ChaCha8 quarter round.
+func qr(a, b, c, d uint32) (_a, _b, _c, _d uint32) {
+	a += b
+	d ^= a
+	d = d<<16 | d>>16
+	c += d
+	b ^= c
+	b = b<<12 | b>>20
+	a += b
+	d ^= a
+	d = d<<8 | d>>24
+	c += d
+	b ^= c
+	b = b<<7 | b>>25
+	return a, b, c, d
+}
diff --git a/src/internal/chacha8rand/chacha8_stub.s b/src/internal/chacha8rand/chacha8_stub.s
new file mode 100644
index 0000000..09be558
--- /dev/null
+++ b/src/internal/chacha8rand/chacha8_stub.s
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !arm64
+
+#include "textflag.h"
+
+// func block(counter uint64, seed *[8]uint32, blocks *[16][4]uint32)
+TEXT ·block(SB), NOSPLIT, $0
+	JMP ·block_generic(SB)
+
diff --git a/src/internal/chacha8rand/export_test.go b/src/internal/chacha8rand/export_test.go
new file mode 100644
index 0000000..728aded
--- /dev/null
+++ b/src/internal/chacha8rand/export_test.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chacha8rand
+
+var Block = block
+var Block_generic = block_generic
+
+func Seed(s *State) [4]uint64 {
+	return s.seed
+}
diff --git a/src/internal/chacha8rand/rand_test.go b/src/internal/chacha8rand/rand_test.go
new file mode 100644
index 0000000..2975013
--- /dev/null
+++ b/src/internal/chacha8rand/rand_test.go
@@ -0,0 +1,202 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chacha8rand_test
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	. "internal/chacha8rand"
+	"slices"
+	"testing"
+)
+
+func TestOutput(t *testing.T) {
+	var s State
+	s.Init(seed)
+	for i := range output {
+		for {
+			x, ok := s.Next()
+			if ok {
+				if x != output[i] {
+					t.Errorf("#%d: have %#x want %#x", i, x, output[i])
+				}
+				break
+			}
+			s.Refill()
+		}
+	}
+}
+
+func TestMarshal(t *testing.T) {
+	var s State
+	s.Init(seed)
+	for i := range output {
+		for {
+			b := Marshal(&s)
+			s = State{}
+			err := Unmarshal(&s, b)
+			if err != nil {
+				t.Fatalf("#%d: Unmarshal: %v", i, err)
+			}
+			x, ok := s.Next()
+			if ok {
+				if x != output[i] {
+					t.Fatalf("#%d: have %#x want %#x", i, x, output[i])
+				}
+				break
+			}
+			s.Refill()
+		}
+	}
+}
+
+func TestReseed(t *testing.T) {
+	var s State
+	s.Init(seed)
+	old := Seed(&s)
+	s.Reseed()
+	if Seed(&s) == old {
+		t.Errorf("Reseed did not change seed")
+	}
+}
+
+func BenchmarkBlock(b *testing.B) {
+	var seed [4]uint64
+	var blocks [32]uint64
+
+	for i := 0; i < b.N; i++ {
+		Block(&seed, &blocks, 0)
+	}
+	b.SetBytes(32 * 8)
+}
+
+func TestBlockGeneric(t *testing.T) {
+	var b1, b2 [32]uint64
+	s := seed // byte seed
+	seed := [4]uint64{
+		binary.LittleEndian.Uint64(s[0*8:]),
+		binary.LittleEndian.Uint64(s[1*8:]),
+		binary.LittleEndian.Uint64(s[2*8:]),
+		binary.LittleEndian.Uint64(s[3*8:]),
+	}
+
+	Block(&seed, &b1, 4)
+	Block_generic(&seed, &b2, 4)
+	if !slices.Equal(b1[:], b2[:]) {
+		var out bytes.Buffer
+		fmt.Fprintf(&out, "%-18s %-18s\n", "block", "block_generic")
+		for i := range b1 {
+			suffix := ""
+			if b1[i] != b2[i] {
+				suffix = " mismatch!"
+			}
+			fmt.Fprintf(&out, "%#016x %#016x%s\n", b1[i], b2[i], suffix)
+		}
+		t.Errorf("block and block_generic disagree:\n%s", out.String())
+	}
+}
+
+// Golden output test to make sure algorithm never changes,
+// so that its use in math/rand/v2 stays stable.
+// See https://c2sp.org/chacha8rand.
+
+var seed = [32]byte([]byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ123456"))
+
+var output = []uint64{
+	0xb773b6063d4616a5, 0x1160af22a66abc3c, 0x8c2599d9418d287c, 0x7ee07e037edc5cd6,
+	0xcfaa9ee02d1c16ad, 0x0e090eef8febea79, 0x3c82d271128b5b3e, 0x9c5addc11252a34f,
+	0xdf79bb617d6ceea6, 0x36d553591f9d736a, 0xeef0d14e181ee01f, 0x089bfc760ae58436,
+	0xd9e52b59cc2ad268, 0xeb2fb4444b1b8aba, 0x4f95c8a692c46661, 0xc3c6323217cae62c,
+	0x91ebb4367f4e2e7e, 0x784cf2c6a0ec9bc6, 0x5c34ec5c34eabe20, 0x4f0a8f515570daa8,
+	0xfc35dcb4113d6bf2, 0x5b0da44c645554bc, 0x6d963da3db21d9e1, 0xeeaefc3150e500f3,
+	0x2d37923dda3750a5, 0x380d7a626d4bc8b0, 0xeeaf68ede3d7ee49, 0xf4356695883b717c,
+	0x846a9021392495a4, 0x8e8510549630a61b, 0x18dc02545dbae493, 0x0f8f9ff0a65a3d43,
+	0xccf065f7190ff080, 0xfd76d1aa39673330, 0x95d232936cba6433, 0x6c7456d1070cbd17,
+	0x462acfdaff8c6562, 0x5bafab866d34fc6a, 0x0c862f78030a2988, 0xd39a83e407c3163d,
+	0xc00a2b7b45f22ebf, 0x564307c62466b1a9, 0x257e0424b0c072d4, 0x6fb55e99496c28fe,
+	0xae9873a88f5cd4e0, 0x4657362ac60d3773, 0x1c83f91ecdf23e8e, 0x6fdc0792c15387c0,
+	0x36dad2a30dfd2b5c, 0xa4b593290595bdb7, 0x4de18934e4cc02c5, 0xcdc0d604f015e3a7,
+	0xfba0dbf69ad80321, 0x60e8bea3d139de87, 0xd18a4d851ef48756, 0x6366447c2215f34a,
+	0x05682e97d3d007ee, 0x4c0e8978c6d54ab2, 0xcf1e9f6a6712edc2, 0x061439414c80cfd3,
+	0xd1a8b6e2745c0ead, 0x31a7918d45c410e8, 0xabcc61ad90216eec, 0x4040d92d2032a71a,
+	0x3cd2f66ffb40cd68, 0xdcd051c07295857a, 0xeab55cbcd9ab527e, 0x18471dce781bdaac,
+	0xf7f08cd144dc7252, 0x5804e0b13d7f40d1, 0x5cb1a446e4b2d35b, 0xe6d4a728d2138a06,
+	0x05223e40ca60dad8, 0x2d61ec3206ac6a68, 0xab692356874c17b8, 0xc30954417676de1c,
+	0x4f1ace3732225624, 0xfba9510813988338, 0x997f200f52752e11, 0x1116aaafe86221fa,
+	0x07ce3b5cb2a13519, 0x2956bc72bc458314, 0x4188b7926140eb78, 0x56ca6dbfd4adea4d,
+	0x7fe3c22349340ce5, 0x35c08f9c37675f8a, 0x11e1c7fbef5ed521, 0x98adc8464ec1bc75,
+	0xd163b2c73d1203f8, 0x8c761ee043a2f3f3, 0x24b99d6accecd7b7, 0x793e31aa112f0370,
+	0x8e87dc2a19285139, 0x4247ae04f7096e25, 0x514f3122926fe20f, 0xdc6fb3f045d2a7e9,
+	0x15cb30cecdd18eba, 0xcbc7fdecf6900274, 0x3fb5c696dc8ba021, 0xd1664417c8d274e6,
+	0x05f7e445ea457278, 0xf920bbca1b9db657, 0x0c1950b4da22cb99, 0xf875baf1af09e292,
+	0xbed3d7b84250f838, 0xf198e8080fd74160, 0xc9eda51d9b7ea703, 0xf709ef55439bf8f6,
+	0xd20c74feebf116fc, 0x305668eb146d7546, 0x829af3ec10d89787, 0x15b8f9697b551dbc,
+	0xfc823c6c8e64b8c9, 0x345585e8183b40bc, 0x674b4171d6581368, 0x1234d81cd670e9f7,
+	0x0e505210d8a55e19, 0xe8258d69eeeca0dc, 0x05d4c452e8baf67e, 0xe8dbe30116a45599,
+	0x1cf08ce1b1176f00, 0xccf7d0a4b81ecb49, 0x303fea136b2c430e, 0x861d6c139c06c871,
+	0x5f41df72e05e0487, 0x25bd7e1e1ae26b1d, 0xbe9f4004d662a41d, 0x65bf58d483188546,
+	0xd1b27cff69db13cc, 0x01a6663372c1bb36, 0x578dd7577b727f4d, 0x19c78f066c083cf6,
+	0xdbe014d4f9c391bb, 0x97fbb2dd1d13ffb3, 0x31c91e0af9ef8d4f, 0x094dfc98402a43ba,
+	0x069bd61bea37b752, 0x5b72d762e8d986ca, 0x72ee31865904bc85, 0xd1f5fdc5cd36c33e,
+	0xba9b4980a8947cad, 0xece8f05eac49ab43, 0x65fe1184abae38e7, 0x2d7cb9dea5d31452,
+	0xcc71489476e467e3, 0x4c03a258a578c68c, 0x00efdf9ecb0fd8fc, 0x9924cad471e2666d,
+	0x87f8668318f765e9, 0xcb4dc57c1b55f5d8, 0xd373835a86604859, 0xe526568b5540e482,
+	0x1f39040f08586fec, 0xb764f3f00293f8e6, 0x049443a2f6bd50a8, 0x76fec88697d3941a,
+	0x3efb70d039bae7a2, 0xe2f4611368eca8a8, 0x7c007a96e01d2425, 0xbbcce5768e69c5bf,
+	0x784fb4985c42aac3, 0xf72b5091aa223874, 0x3630333fb1e62e07, 0x8e7319ebdebbb8de,
+	0x2a3982bca959fa00, 0xb2b98b9f964ba9b3, 0xf7e31014adb71951, 0xebd0fca3703acc82,
+	0xec654e2a2fe6419a, 0xb326132d55a52e2c, 0x2248c57f44502978, 0x32710c2f342daf16,
+	0x0517b47b5acb2bec, 0x4c7a718fca270937, 0xd69142bed0bcc541, 0xe40ebcb8ff52ce88,
+	0x3e44a2dbc9f828d4, 0xc74c2f4f8f873f58, 0x3dbf648eb799e45b, 0x33f22475ee0e86f8,
+	0x1eb4f9ee16d47f65, 0x40f8d2b8712744e3, 0xb886b4da3cb14572, 0x2086326fbdd6f64d,
+	0xcc3de5907dd882b9, 0xa2e8b49a5ee909df, 0xdbfb8e7823964c10, 0x70dd6089ef0df8d5,
+	0x30141663cdd9c99f, 0x04b805325c240365, 0x7483d80314ac12d6, 0x2b271cb91aa7f5f9,
+	0x97e2245362abddf0, 0x5a84f614232a9fab, 0xf71125fcda4b7fa2, 0x1ca5a61d74b27267,
+	0x38cc6a9b3adbcb45, 0xdde1bb85dc653e39, 0xe9d0c8fa64f89fd4, 0x02c5fb1ecd2b4188,
+	0xf2bd137bca5756e5, 0xadefe25d121be155, 0x56cd1c3c5d893a8e, 0x4c50d337beb65bb9,
+	0x918c5151675cf567, 0xaba649ffcfb56a1e, 0x20c74ab26a2247cd, 0x71166bac853c08da,
+	0xb07befe2e584fc5d, 0xda45ff2a588dbf32, 0xdb98b03c4d75095e, 0x60285ae1aaa65a4c,
+	0xf93b686a263140b8, 0xde469752ee1c180e, 0xcec232dc04129aae, 0xeb916baa1835ea04,
+	0xd49c21c8b64388ff, 0x72a82d9658864888, 0x003348ef7eac66a8, 0x7f6f67e655b209eb,
+	0x532ffb0b7a941b25, 0xd940ade6128deede, 0xdf24f2a1af89fe23, 0x95aa3b4988195ae0,
+	0x3da649404f94be4a, 0x692dad132c3f7e27, 0x40aee76ecaaa9eb8, 0x1294a01e09655024,
+	0x6df797abdba4e4f5, 0xea2fb6024c1d7032, 0x5f4e0492295489fc, 0x57972914ea22e06a,
+	0x9a8137d133aad473, 0xa2e6dd6ae7cdf2f3, 0x9f42644f18086647, 0x16d03301c170bd3e,
+	0x908c416fa546656d, 0xe081503be22e123e, 0x077cf09116c4cc72, 0xcbd25cd264b7f229,
+	0x3db2f468ec594031, 0x46c00e734c9badd5, 0xd0ec0ac72075d861, 0x3037cb3cf80b7630,
+	0x574c3d7b3a2721c6, 0xae99906a0076824b, 0xb175a5418b532e70, 0xd8b3e251ee231ddd,
+	0xb433eec25dca1966, 0x530f30dc5cff9a93, 0x9ff03d98b53cd335, 0xafc4225076558cdf,
+	0xef81d3a28284402a, 0x110bdbf51c110a28, 0x9ae1b255d027e8f6, 0x7de3e0aa24688332,
+	0xe483c3ecd2067ee2, 0xf829328b276137e6, 0xa413ccad57562cad, 0xe6118e8b496acb1f,
+	0x8288dca6da5ec01f, 0xa53777dc88c17255, 0x8a00f1e0d5716eda, 0x618e6f47b7a720a8,
+	0x9e3907b0c692a841, 0x978b42ca963f34f3, 0x75e4b0cd98a7d7ef, 0xde4dbd6e0b5f4752,
+	0x0252e4153f34493f, 0x50f0e7d803734ef9, 0x237766a38ed167ee, 0x4124414001ee39a0,
+	0xd08df643e535bb21, 0x34f575b5a9a80b74, 0x2c343af87297f755, 0xcd8b6d99d821f7cb,
+	0xe376fd7256fc48ae, 0xe1b06e7334352885, 0xfa87b26f86c169eb, 0x36c1604665a971de,
+	0xdba147c2239c8e80, 0x6b208e69fc7f0e24, 0x8795395b6f2b60c3, 0x05dabee9194907f4,
+	0xb98175142f5ed902, 0x5e1701e2021ddc81, 0x0875aba2755eed08, 0x778d83289251de95,
+	0x3bfbe46a039ecb31, 0xb24704fce4cbd7f9, 0x6985ffe9a7c91e3d, 0xc8efb13df249dabb,
+	0xb1037e64b0f4c9f6, 0x55f69fd197d6b7c3, 0x672589d71d68a90c, 0xbebdb8224f50a77e,
+	0x3f589f80007374a7, 0xd307f4635954182a, 0xcff5850c10d4fd90, 0xc6da02dfb6408e15,
+	0x93daeef1e2b1a485, 0x65d833208aeea625, 0xe2b13fa13ed3b5fa, 0x67053538130fb68e,
+	0xc1042f6598218fa9, 0xee5badca749b8a2e, 0x6d22a3f947dae37d, 0xb62c6d1657f4dbaf,
+	0x6e007de69704c20b, 0x1af2b913fc3841d8, 0xdc0e47348e2e8e22, 0x9b1ddef1cf958b22,
+	0x632ed6b0233066b8, 0xddd02d3311bed8f2, 0xf147cfe1834656e9, 0x399aaa49d511597a,
+	0x6b14886979ec0309, 0x64fc4ac36b5afb97, 0xb82f78e07f7cf081, 0x10925c9a323d0e1b,
+	0xf451c79ee13c63f6, 0x7c2fc180317876c7, 0x35a12bd9eecb7d22, 0x335654a539621f90,
+	0xcc32a3f35db581f0, 0xc60748a80b2369cb, 0x7c4dd3b08591156b, 0xac1ced4b6de22291,
+	0xa32cfa2df134def5, 0x627108918dea2a53, 0x0555b1608fcb4ff4, 0x143ee7ac43aaa33c,
+	0xdae90ce7cf4fc218, 0x4d68fc2582bcf4b5, 0x37094e1849135d71, 0xf7857e09f3d49fd8,
+	0x007538c503768be7, 0xedf648ba2f6be601, 0xaa347664dd72513e, 0xbe63893c6ef23b86,
+	0x130b85710605af97, 0xdd765c6b1ef6ab56, 0xf3249a629a97dc6b, 0x2a114f9020fab8e5,
+	0x5a69e027cfc6ad08, 0x3c4ccb36f1a5e050, 0x2e9e7d596834f0a5, 0x2430be6858fce789,
+	0xe90b862f2466e597, 0x895e2884f159a9ec, 0x26ab8fa4902fcb57, 0xa6efff5c54e1fa50,
+	0x333ac4e5811a8255, 0xa58d515f02498611, 0xfe5a09dcb25c6ef4, 0x03898988ab5f5818,
+	0x289ff6242af6c617, 0x3d9dd59fd381ea23, 0x52d7d93d8a8aae51, 0xc76a123d511f786f,
+	0xf68901edaf00c46c, 0x8c630871b590de80, 0x05209c308991e091, 0x1f809f99b4788177,
+	0x11170c2eb6c19fd8, 0x44433c779062ba58, 0xc0acb51af1874c45, 0x9f2e134284809fa1,
+	0xedb523bd15c619fa, 0x02d97fd53ecc23c0, 0xacaf05a34462374c, 0xddd9c6d34bffa11f,
+}
diff --git a/src/internal/coverage/cmddefs.go b/src/internal/coverage/cmddefs.go
deleted file mode 100644
index 49376a4..0000000
--- a/src/internal/coverage/cmddefs.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-// CoverPkgConfig is a bundle of information passed from the Go
-// command to the cover command during "go build -cover" runs. The
-// Go command creates and fills in a struct as below, then passes
-// file containing the encoded JSON for the struct to the "cover"
-// tool when instrumenting the source files in a Go package.
-type CoverPkgConfig struct {
-	// File into which cmd/cover should emit summary info
-	// when instrumentation is complete.
-	OutConfig string
-
-	// Import path for the package being instrumented.
-	PkgPath string
-
-	// Package name.
-	PkgName string
-
-	// Instrumentation granularity: one of "perfunc" or "perblock" (default)
-	Granularity string
-
-	// Module path for this package (empty if no go.mod in use)
-	ModulePath string
-
-	// Local mode indicates we're doing a coverage build or test of a
-	// package selected via local import path, e.g. "./..." or
-	// "./foo/bar" as opposed to a non-relative import path. See the
-	// corresponding field in cmd/go's PackageInternal struct for more
-	// info.
-	Local bool
-}
-
-// CoverFixupConfig contains annotations/notes generated by the
-// cmd/cover tool (during instrumentation) to be passed on to the
-// compiler when the instrumented code is compiled. The cmd/cover tool
-// creates a struct of this type, JSON-encodes it, and emits the
-// result to a file, which the Go command then passes to the compiler
-// when the instrumented package is built.
-type CoverFixupConfig struct {
-	// Name of the variable (created by cmd/cover) containing the
-	// encoded meta-data for the package.
-	MetaVar string
-
-	// Length of the meta-data.
-	MetaLen int
-
-	// Hash computed by cmd/cover of the meta-data.
-	MetaHash string
-
-	// Instrumentation strategy. For now this is always set to
-	// "normal", but in the future we may add new values (for example,
-	// if panic paths are instrumented, or if the instrumenter
-	// eliminates redundant counters).
-	Strategy string
-
-	// Prefix assigned to the names of counter variables generated
-	// during instrumentation by cmd/cover.
-	CounterPrefix string
-
-	// Name chosen for the package ID variable generated during
-	// instrumentation.
-	PkgIdVar string
-
-	// Counter mode (e.g. set/count/atomic)
-	CounterMode string
-
-	// Counter granularity (perblock or perfunc).
-	CounterGranularity string
-}
-
-// MetaFilePaths contains information generated by the Go command and
-// the read in by coverage test support functions within an executing
-// "go test -cover" binary.
-type MetaFileCollection struct {
-	ImportPaths       []string
-	MetaFileFragments []string
-}
-
-// Name of file within the "go test -cover" temp coverdir directory
-// containing a list of meta-data files for packages being tested
-// in a "go test -coverpkg=... ..." run. This constant is shared
-// by the Go command and by the coverage runtime.
-const MetaFilesFileName = "metafiles.txt"
diff --git a/src/internal/coverage/decodemeta/decodefile.go b/src/internal/coverage/decodemeta/decodefile.go
index 6580dd5..96e0765 100644
--- a/src/internal/coverage/decodemeta/decodefile.go
+++ b/src/internal/coverage/decodemeta/decodefile.go
@@ -147,7 +147,7 @@
 	return r.hdr.CMode
 }
 
-// CounterMode returns the counter granularity (single counter per
+// CounterGranularity returns the counter granularity (single counter per
 // function, or counter per block) selected when building for coverage
 // for the program that produce this meta-data file.
 func (r *CoverageMetaFileReader) CounterGranularity() coverage.CounterGranularity {
diff --git a/src/internal/coverage/defs.go b/src/internal/coverage/defs.go
index 8751b9f..340ac95 100644
--- a/src/internal/coverage/defs.go
+++ b/src/internal/coverage/defs.go
@@ -261,6 +261,20 @@
 	return "<invalid>"
 }
 
+// Name of file within the "go test -cover" temp coverdir directory
+// containing a list of meta-data files for packages being tested
+// in a "go test -coverpkg=... ..." run. This constant is shared
+// by the Go command and by the coverage runtime.
+const MetaFilesFileName = "metafiles.txt"
+
+// MetaFilePaths contains information generated by the Go command and
+// the read in by coverage test support functions within an executing
+// "go test -cover" binary.
+type MetaFileCollection struct {
+	ImportPaths       []string
+	MetaFileFragments []string
+}
+
 //.....................................................................
 //
 // Counter data definitions:
diff --git a/src/internal/coverage/pkid.go b/src/internal/coverage/pkid.go
index 8ddd44d..372a9cb 100644
--- a/src/internal/coverage/pkid.go
+++ b/src/internal/coverage/pkid.go
@@ -49,6 +49,7 @@
 	"internal/goarch",
 	"runtime/internal/atomic",
 	"internal/goos",
+	"internal/chacha8rand",
 	"runtime/internal/sys",
 	"internal/abi",
 	"runtime/internal/math",
diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go
index 1352810..d794e53 100644
--- a/src/internal/cpu/cpu.go
+++ b/src/internal/cpu/cpu.go
@@ -29,6 +29,9 @@
 	HasADX       bool
 	HasAVX       bool
 	HasAVX2      bool
+	HasAVX512F   bool
+	HasAVX512BW  bool
+	HasAVX512VL  bool
 	HasBMI1      bool
 	HasBMI2      bool
 	HasERMS      bool
@@ -48,10 +51,11 @@
 // The booleans in ARM contain the correspondingly named cpu feature bit.
 // The struct is padded to avoid false sharing.
 var ARM struct {
-	_        CacheLinePad
-	HasVFPv4 bool
-	HasIDIVA bool
-	_        CacheLinePad
+	_            CacheLinePad
+	HasVFPv4     bool
+	HasIDIVA     bool
+	HasV7Atomics bool
+	_            CacheLinePad
 }
 
 // The booleans in ARM64 contain the correspondingly named cpu feature bit.
@@ -212,6 +216,8 @@
 
 // indexByte returns the index of the first instance of c in s,
 // or -1 if c is not present in s.
+// indexByte is semantically the same as [strings.IndexByte].
+// We copy this function because "internal/cpu" should not have external dependencies.
 func indexByte(s string, c byte) int {
 	for i := 0; i < len(s); i++ {
 		if s[i] == c {
diff --git a/src/internal/cpu/cpu_arm.go b/src/internal/cpu/cpu_arm.go
index b624526..080e788 100644
--- a/src/internal/cpu/cpu_arm.go
+++ b/src/internal/cpu/cpu_arm.go
@@ -11,24 +11,38 @@
 // initialized.
 var HWCap uint
 var HWCap2 uint
+var Platform string
 
 // HWCAP/HWCAP2 bits. These are exposed by Linux and FreeBSD.
 const (
 	hwcap_VFPv4 = 1 << 16
 	hwcap_IDIVA = 1 << 17
+	hwcap_LPAE  = 1 << 20
 )
 
 func doinit() {
 	options = []option{
 		{Name: "vfpv4", Feature: &ARM.HasVFPv4},
 		{Name: "idiva", Feature: &ARM.HasIDIVA},
+		{Name: "v7atomics", Feature: &ARM.HasV7Atomics},
 	}
 
 	// HWCAP feature bits
 	ARM.HasVFPv4 = isSet(HWCap, hwcap_VFPv4)
 	ARM.HasIDIVA = isSet(HWCap, hwcap_IDIVA)
+	// lpae is required to make the 64-bit instructions LDRD and STRD (and variants) atomic.
+	// See ARMv7 manual section B1.6.
+	// We also need at least a v7 chip, for the DMB instruction.
+	ARM.HasV7Atomics = isSet(HWCap, hwcap_LPAE) && isV7(Platform)
 }
 
 func isSet(hwc uint, value uint) bool {
 	return hwc&value != 0
 }
+
+func isV7(s string) bool {
+	if s == "aarch64" {
+		return true
+	}
+	return s >= "v7" // will be something like v5, v7, v8, v8l
+}
diff --git a/src/internal/cpu/cpu_riscv64.go b/src/internal/cpu/cpu_riscv64.go
index 54b8c33..2173fe8 100644
--- a/src/internal/cpu/cpu_riscv64.go
+++ b/src/internal/cpu/cpu_riscv64.go
@@ -4,7 +4,7 @@
 
 package cpu
 
-const CacheLinePadSize = 32
+const CacheLinePadSize = 64
 
 func doinit() {
 }
diff --git a/src/internal/cpu/cpu_s390x.s b/src/internal/cpu/cpu_s390x.s
index a1243aa..4ffbbde 100644
--- a/src/internal/cpu/cpu_s390x.s
+++ b/src/internal/cpu/cpu_s390x.s
@@ -16,48 +16,48 @@
 TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KM-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xB92E0024    // cipher message (KM)
+	KM   R2, R4         // cipher message (KM)
 	RET
 
 // func kmcQuery() queryResult
 TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KMC-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xB92F0024    // cipher message with chaining (KMC)
+	KMC  R2, R4         // cipher message with chaining (KMC)
 	RET
 
 // func kmctrQuery() queryResult
 TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KMCTR-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xB92D4024    // cipher message with counter (KMCTR)
+	KMCTR R2, R4, R4    // cipher message with counter (KMCTR)
 	RET
 
 // func kmaQuery() queryResult
 TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KMA-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xb9296024    // cipher message with authentication (KMA)
+	KMA  R2, R6, R4     // cipher message with authentication (KMA)
 	RET
 
 // func kimdQuery() queryResult
 TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KIMD-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xB93E0024    // compute intermediate message digest (KIMD)
+	KIMD R2, R4         // compute intermediate message digest (KIMD)
 	RET
 
 // func klmdQuery() queryResult
 TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KLMD-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xB93F0024    // compute last message digest (KLMD)
+	KLMD R2, R4         // compute last message digest (KLMD)
 	RET
 
 // func kdsaQuery() queryResult
 TEXT ·kdsaQuery(SB), NOSPLIT|NOFRAME, $0-16
 	MOVD $0, R0         // set function code to 0 (KLMD-Query)
 	MOVD $ret+0(FP), R1 // address of 16-byte return value
-	WORD $0xB93A0008    // compute digital signature authentication
+	KDSA R0, R4      // compute digital signature authentication
 	RET
 
diff --git a/src/internal/cpu/cpu_test.go b/src/internal/cpu/cpu_test.go
index b8c74f2..a6fe7f7 100644
--- a/src/internal/cpu/cpu_test.go
+++ b/src/internal/cpu/cpu_test.go
@@ -30,7 +30,7 @@
 
 	env := "GODEBUG=" + options
 
-	cmd := exec.Command(os.Args[0], "-test.run="+test)
+	cmd := exec.Command(os.Args[0], "-test.run=^"+test+"$")
 	cmd.Env = append(cmd.Env, env)
 
 	output, err := cmd.CombinedOutput()
diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go
index 96b8ef9..f8aa53a 100644
--- a/src/internal/cpu/cpu_x86.go
+++ b/src/internal/cpu/cpu_x86.go
@@ -34,12 +34,15 @@
 	cpuid_AVX       = 1 << 28
 
 	// ebx bits
-	cpuid_BMI1 = 1 << 3
-	cpuid_AVX2 = 1 << 5
-	cpuid_BMI2 = 1 << 8
-	cpuid_ERMS = 1 << 9
-	cpuid_ADX  = 1 << 19
-	cpuid_SHA  = 1 << 29
+	cpuid_BMI1     = 1 << 3
+	cpuid_AVX2     = 1 << 5
+	cpuid_BMI2     = 1 << 8
+	cpuid_ERMS     = 1 << 9
+	cpuid_AVX512F  = 1 << 16
+	cpuid_ADX      = 1 << 19
+	cpuid_SHA      = 1 << 29
+	cpuid_AVX512BW = 1 << 30
+	cpuid_AVX512VL = 1 << 31
 
 	// edx bits for CPUID 0x80000001
 	cpuid_RDTSCP = 1 << 27
@@ -77,6 +80,15 @@
 			option{Name: "bmi2", Feature: &X86.HasBMI2},
 			option{Name: "fma", Feature: &X86.HasFMA})
 	}
+	if level < 4 {
+		// These options are required at level 4. At lower levels
+		// they can be turned off.
+		options = append(options,
+			option{Name: "avx512f", Feature: &X86.HasAVX512F},
+			option{Name: "avx512bw", Feature: &X86.HasAVX512BW},
+			option{Name: "avx512vl", Feature: &X86.HasAVX512VL},
+		)
+	}
 
 	maxID, _, _, _ := cpuid(0, 0)
 
@@ -108,11 +120,18 @@
 	X86.HasFMA = isSet(ecx1, cpuid_FMA) && X86.HasOSXSAVE
 
 	osSupportsAVX := false
+	osSupportsAVX512 := false
 	// For XGETBV, OSXSAVE bit is required and sufficient.
 	if X86.HasOSXSAVE {
 		eax, _ := xgetbv()
 		// Check if XMM and YMM registers have OS support.
 		osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
+
+		// AVX512 detection does not work on Darwin,
+		// see https://github.com/golang/go/issues/49233
+		//
+		// Check if opmask, ZMMhi256 and Hi16_ZMM have OS support.
+		osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7)
 	}
 
 	X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
@@ -129,6 +148,12 @@
 	X86.HasADX = isSet(ebx7, cpuid_ADX)
 	X86.HasSHA = isSet(ebx7, cpuid_SHA)
 
+	X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512
+	if X86.HasAVX512F {
+		X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
+		X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
+	}
+
 	var maxExtendedInformation uint32
 	maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0)
 
diff --git a/src/internal/cpu/cpu_x86_test.go b/src/internal/cpu/cpu_x86_test.go
index 8564ccc..cc6552b 100644
--- a/src/internal/cpu/cpu_x86_test.go
+++ b/src/internal/cpu/cpu_x86_test.go
@@ -18,6 +18,24 @@
 	}
 }
 
+func TestX86ifAVX512FhasAVX2(t *testing.T) {
+	if X86.HasAVX512F && !X86.HasAVX2 {
+		t.Fatalf("HasAVX2 expected true when HasAVX512F is true, got false")
+	}
+}
+
+func TestX86ifAVX512BWhasAVX512F(t *testing.T) {
+	if X86.HasAVX512BW && !X86.HasAVX512F {
+		t.Fatalf("HasAVX512F expected true when HasAVX512BW is true, got false")
+	}
+}
+
+func TestX86ifAVX512VLhasAVX512F(t *testing.T) {
+	if X86.HasAVX512VL && !X86.HasAVX512F {
+		t.Fatalf("HasAVX512F expected true when HasAVX512VL is true, got false")
+	}
+}
+
 func TestDisableSSE3(t *testing.T) {
 	if GetGOAMD64level() > 1 {
 		t.Skip("skipping test: can't run on GOAMD64>v1 machines")
diff --git a/src/internal/diff/diff.go b/src/internal/diff/diff.go
index 0aeeb75..6a40b23 100644
--- a/src/internal/diff/diff.go
+++ b/src/internal/diff/diff.go
@@ -74,7 +74,7 @@
 			continue
 		}
 
-		// Expand matching lines as far possible,
+		// Expand matching lines as far as possible,
 		// establishing that x[start.x:end.x] == y[start.y:end.y].
 		// Note that on the first (or last) iteration we may (or definitely do)
 		// have an empty match: start.x==end.x and start.y==end.y.
diff --git a/src/internal/fmtsort/sort_test.go b/src/internal/fmtsort/sort_test.go
index cddcf70..7d5de9f 100644
--- a/src/internal/fmtsort/sort_test.go
+++ b/src/internal/fmtsort/sort_test.go
@@ -9,6 +9,7 @@
 	"internal/fmtsort"
 	"math"
 	"reflect"
+	"runtime"
 	"sort"
 	"strings"
 	"testing"
@@ -38,7 +39,7 @@
 	ct(reflect.TypeOf(chans[0]), chans[0], chans[1], chans[2]),
 	ct(reflect.TypeOf(toy{}), toy{0, 1}, toy{0, 2}, toy{1, -1}, toy{1, 1}),
 	ct(reflect.TypeOf([2]int{}), [2]int{1, 1}, [2]int{1, 2}, [2]int{2, 0}),
-	ct(reflect.TypeOf(any(any(0))), iFace, 1, 2, 3),
+	ct(reflect.TypeOf(any(0)), iFace, 1, 2, 3),
 }
 
 var iFace any
@@ -190,12 +191,15 @@
 var (
 	ints  [3]int
 	chans = makeChans()
+	pin   runtime.Pinner
 )
 
 func makeChans() []chan int {
 	cs := []chan int{make(chan int), make(chan int), make(chan int)}
 	// Order channels by address. See issue #49431.
-	// TODO: pin these pointers once pinning is available (#46787).
+	for i := range cs {
+		pin.Pin(reflect.ValueOf(cs[i]).UnsafePointer())
+	}
 	sort.Slice(cs, func(i, j int) bool {
 		return uintptr(reflect.ValueOf(cs[i]).UnsafePointer()) < uintptr(reflect.ValueOf(cs[j]).UnsafePointer())
 	})
diff --git a/src/internal/fuzz/mutator.go b/src/internal/fuzz/mutator.go
index bb96066..9bba0d6 100644
--- a/src/internal/fuzz/mutator.go
+++ b/src/internal/fuzz/mutator.go
@@ -44,13 +44,6 @@
 	}
 }
 
-func min(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
 // mutate performs several mutations on the provided values.
 func (m *mutator) mutate(vals []any, maxBytes int) {
 	// TODO(katiehockman): pull some of these functions into helper methods and
@@ -81,7 +74,7 @@
 	case uint32:
 		vals[i] = uint32(m.mutateUInt(uint64(v), math.MaxUint32))
 	case uint64:
-		vals[i] = m.mutateUInt(uint64(v), maxUint)
+		vals[i] = m.mutateUInt(v, maxUint)
 	case float32:
 		vals[i] = float32(m.mutateFloat(float64(v), math.MaxFloat32))
 	case float64:
diff --git a/src/internal/fuzz/pcg.go b/src/internal/fuzz/pcg.go
index c9ea0af..4fe8aeb 100644
--- a/src/internal/fuzz/pcg.go
+++ b/src/internal/fuzz/pcg.go
@@ -30,7 +30,7 @@
 // creation and use, no reproducibility, no concurrency safety, just the
 // necessary methods, optimized for speed.
 
-var globalInc uint64 // PCG stream
+var globalInc atomic.Uint64 // PCG stream
 
 const multiplier uint64 = 6364136223846793005
 
@@ -63,7 +63,7 @@
 	if seed := godebugSeed(); seed != nil {
 		now = uint64(*seed)
 	}
-	inc := atomic.AddUint64(&globalInc, 1)
+	inc := globalInc.Add(1)
 	r.state = now
 	r.inc = (inc << 1) | 1
 	r.step()
diff --git a/src/internal/fuzz/sys_windows.go b/src/internal/fuzz/sys_windows.go
index aa85be7..82c9703 100644
--- a/src/internal/fuzz/sys_windows.go
+++ b/src/internal/fuzz/sys_windows.go
@@ -85,12 +85,13 @@
 // run a worker process.
 func setWorkerComm(cmd *exec.Cmd, comm workerComm) {
 	mem := <-comm.memMu
-	memName := mem.f.Name()
+	memFD := mem.f.Fd()
 	comm.memMu <- mem
 	syscall.SetHandleInformation(syscall.Handle(comm.fuzzIn.Fd()), syscall.HANDLE_FLAG_INHERIT, 1)
 	syscall.SetHandleInformation(syscall.Handle(comm.fuzzOut.Fd()), syscall.HANDLE_FLAG_INHERIT, 1)
-	cmd.Env = append(cmd.Env, fmt.Sprintf("GO_TEST_FUZZ_WORKER_HANDLES=%x,%x,%q", comm.fuzzIn.Fd(), comm.fuzzOut.Fd(), memName))
-	cmd.SysProcAttr = &syscall.SysProcAttr{AdditionalInheritedHandles: []syscall.Handle{syscall.Handle(comm.fuzzIn.Fd()), syscall.Handle(comm.fuzzOut.Fd())}}
+	syscall.SetHandleInformation(syscall.Handle(memFD), syscall.HANDLE_FLAG_INHERIT, 1)
+	cmd.Env = append(cmd.Env, fmt.Sprintf("GO_TEST_FUZZ_WORKER_HANDLES=%x,%x,%x", comm.fuzzIn.Fd(), comm.fuzzOut.Fd(), memFD))
+	cmd.SysProcAttr = &syscall.SysProcAttr{AdditionalInheritedHandles: []syscall.Handle{syscall.Handle(comm.fuzzIn.Fd()), syscall.Handle(comm.fuzzOut.Fd()), syscall.Handle(memFD)}}
 }
 
 // getWorkerComm returns communication channels in the worker process.
@@ -99,19 +100,15 @@
 	if v == "" {
 		return workerComm{}, fmt.Errorf("GO_TEST_FUZZ_WORKER_HANDLES not set")
 	}
-	var fuzzInFD, fuzzOutFD uintptr
-	var memName string
-	if _, err := fmt.Sscanf(v, "%x,%x,%q", &fuzzInFD, &fuzzOutFD, &memName); err != nil {
+	var fuzzInFD, fuzzOutFD, memFileFD uintptr
+	if _, err := fmt.Sscanf(v, "%x,%x,%x", &fuzzInFD, &fuzzOutFD, &memFileFD); err != nil {
 		return workerComm{}, fmt.Errorf("parsing GO_TEST_FUZZ_WORKER_HANDLES=%s: %v", v, err)
 	}
 
 	fuzzIn := os.NewFile(fuzzInFD, "fuzz_in")
 	fuzzOut := os.NewFile(fuzzOutFD, "fuzz_out")
-	tmpFile, err := os.OpenFile(memName, os.O_RDWR, 0)
-	if err != nil {
-		return workerComm{}, fmt.Errorf("worker opening temp file: %w", err)
-	}
-	fi, err := tmpFile.Stat()
+	memFile := os.NewFile(memFileFD, "fuzz_mem")
+	fi, err := memFile.Stat()
 	if err != nil {
 		return workerComm{}, fmt.Errorf("worker checking temp file size: %w", err)
 	}
@@ -120,7 +117,7 @@
 		return workerComm{}, fmt.Errorf("fuzz temp file exceeds maximum size")
 	}
 	removeOnClose := false
-	mem, err := sharedMemMapFile(tmpFile, size, removeOnClose)
+	mem, err := sharedMemMapFile(memFile, size, removeOnClose)
 	if err != nil {
 		return workerComm{}, err
 	}
diff --git a/src/internal/godebug/godebug_test.go b/src/internal/godebug/godebug_test.go
index 8e46283..1ed0a36 100644
--- a/src/internal/godebug/godebug_test.go
+++ b/src/internal/godebug/godebug_test.go
@@ -7,6 +7,7 @@
 import (
 	"fmt"
 	. "internal/godebug"
+	"internal/race"
 	"internal/testenv"
 	"os"
 	"os/exec"
@@ -70,9 +71,39 @@
 	}
 }
 
+// TestPanicNilRace checks for a race in the runtime caused by use of runtime
+// atomics (not visible to usual race detection) to install the counter for
+// non-default panic(nil) semantics.  For #64649.
+func TestPanicNilRace(t *testing.T) {
+	if !race.Enabled {
+		t.Skip("Skipping test intended for use with -race.")
+	}
+	if os.Getenv("GODEBUG") != "panicnil=1" {
+		cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestPanicNilRace$", "-test.v", "-test.parallel=2", "-test.count=1"))
+		cmd.Env = append(cmd.Env, "GODEBUG=panicnil=1")
+		out, err := cmd.CombinedOutput()
+		t.Logf("output:\n%s", out)
+
+		if err != nil {
+			t.Errorf("Was not expecting a crash")
+		}
+		return
+	}
+
+	test := func(t *testing.T) {
+		t.Parallel()
+		defer func() {
+			recover()
+		}()
+		panic(nil)
+	}
+	t.Run("One", test)
+	t.Run("Two", test)
+}
+
 func TestCmdBisect(t *testing.T) {
 	testenv.MustHaveGoBuild(t)
-	out, err := exec.Command("go", "run", "cmd/vendor/golang.org/x/tools/cmd/bisect", "GODEBUG=buggy=1#PATTERN", os.Args[0], "-test.run=BisectTestCase").CombinedOutput()
+	out, err := exec.Command("go", "run", "cmd/vendor/golang.org/x/tools/cmd/bisect", "GODEBUG=buggy=1#PATTERN", os.Args[0], "-test.run=^TestBisectTestCase$").CombinedOutput()
 	if err != nil {
 		t.Fatalf("exec bisect: %v\n%s", err, out)
 	}
@@ -101,7 +132,7 @@
 
 // This test does nothing by itself, but you can run
 //
-//	bisect 'GODEBUG=buggy=1#PATTERN' go test -run=BisectTestCase
+//	bisect 'GODEBUG=buggy=1#PATTERN' go test -run='^TestBisectTestCase$'
 //
 // to see that the GODEBUG bisect support is working.
 // TestCmdBisect above does exactly that.
diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go
index b1711d9..a0a0672 100644
--- a/src/internal/godebugs/table.go
+++ b/src/internal/godebugs/table.go
@@ -29,9 +29,12 @@
 	{Name: "gocachehash", Package: "cmd/go"},
 	{Name: "gocachetest", Package: "cmd/go"},
 	{Name: "gocacheverify", Package: "cmd/go"},
+	{Name: "gotypesalias", Package: "go/types"},
 	{Name: "http2client", Package: "net/http"},
 	{Name: "http2debug", Package: "net/http", Opaque: true},
 	{Name: "http2server", Package: "net/http"},
+	{Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"},
+	{Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"},
 	{Name: "installgoroot", Package: "go/build"},
 	{Name: "jstmpllitinterp", Package: "html/template"},
 	//{Name: "multipartfiles", Package: "mime/multipart"},
@@ -42,9 +45,13 @@
 	{Name: "panicnil", Package: "runtime", Changed: 21, Old: "1"},
 	{Name: "randautoseed", Package: "math/rand"},
 	{Name: "tarinsecurepath", Package: "archive/tar"},
+	{Name: "tls10server", Package: "crypto/tls", Changed: 22, Old: "1"},
 	{Name: "tlsmaxrsasize", Package: "crypto/tls"},
+	{Name: "tlsrsakex", Package: "crypto/tls", Changed: 22, Old: "1"},
+	{Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"},
 	{Name: "x509sha1", Package: "crypto/x509"},
 	{Name: "x509usefallbackroots", Package: "crypto/x509"},
+	{Name: "x509usepolicies", Package: "crypto/x509"},
 	{Name: "zipinsecurepath", Package: "archive/zip"},
 }
 
@@ -54,7 +61,7 @@
 	lo := 0
 	hi := len(All)
 	for lo < hi {
-		m := lo + (hi-lo)>>1
+		m := int(uint(lo+hi) >> 1)
 		mid := All[m].Name
 		if name == mid {
 			return &All[m]
diff --git a/src/internal/goexperiment/exp_allocheaders_off.go b/src/internal/goexperiment/exp_allocheaders_off.go
new file mode 100644
index 0000000..72c702f
--- /dev/null
+++ b/src/internal/goexperiment/exp_allocheaders_off.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.allocheaders
+
+package goexperiment
+
+const AllocHeaders = false
+const AllocHeadersInt = 0
diff --git a/src/internal/goexperiment/exp_allocheaders_on.go b/src/internal/goexperiment/exp_allocheaders_on.go
new file mode 100644
index 0000000..f9f2965
--- /dev/null
+++ b/src/internal/goexperiment/exp_allocheaders_on.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.allocheaders
+
+package goexperiment
+
+const AllocHeaders = true
+const AllocHeadersInt = 1
diff --git a/src/internal/goexperiment/exp_arenas_off.go b/src/internal/goexperiment/exp_arenas_off.go
index 9e40ebc..01f5332 100644
--- a/src/internal/goexperiment/exp_arenas_off.go
+++ b/src/internal/goexperiment/exp_arenas_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.arenas
-// +build !goexperiment.arenas
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_arenas_on.go b/src/internal/goexperiment/exp_arenas_on.go
index 92ef748..609dfbc 100644
--- a/src/internal/goexperiment/exp_arenas_on.go
+++ b/src/internal/goexperiment/exp_arenas_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.arenas
-// +build goexperiment.arenas
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_boringcrypto_off.go b/src/internal/goexperiment/exp_boringcrypto_off.go
index 020c75b..de71267 100644
--- a/src/internal/goexperiment/exp_boringcrypto_off.go
+++ b/src/internal/goexperiment/exp_boringcrypto_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.boringcrypto
-// +build !goexperiment.boringcrypto
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_boringcrypto_on.go b/src/internal/goexperiment/exp_boringcrypto_on.go
index 1454329..ce476fa 100644
--- a/src/internal/goexperiment/exp_boringcrypto_on.go
+++ b/src/internal/goexperiment/exp_boringcrypto_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.boringcrypto
-// +build goexperiment.boringcrypto
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_cacheprog_off.go b/src/internal/goexperiment/exp_cacheprog_off.go
index 29aa869..276855c 100644
--- a/src/internal/goexperiment/exp_cacheprog_off.go
+++ b/src/internal/goexperiment/exp_cacheprog_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.cacheprog
-// +build !goexperiment.cacheprog
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_cacheprog_on.go b/src/internal/goexperiment/exp_cacheprog_on.go
index 121b299..b959dd6 100644
--- a/src/internal/goexperiment/exp_cacheprog_on.go
+++ b/src/internal/goexperiment/exp_cacheprog_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.cacheprog
-// +build goexperiment.cacheprog
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_cgocheck2_off.go b/src/internal/goexperiment/exp_cgocheck2_off.go
index 77aa538..e99ad07 100644
--- a/src/internal/goexperiment/exp_cgocheck2_off.go
+++ b/src/internal/goexperiment/exp_cgocheck2_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.cgocheck2
-// +build !goexperiment.cgocheck2
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_cgocheck2_on.go b/src/internal/goexperiment/exp_cgocheck2_on.go
index 6201249..f6d1790 100644
--- a/src/internal/goexperiment/exp_cgocheck2_on.go
+++ b/src/internal/goexperiment/exp_cgocheck2_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.cgocheck2
-// +build goexperiment.cgocheck2
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_coverageredesign_off.go b/src/internal/goexperiment/exp_coverageredesign_off.go
index 95d3a6c..2c33177 100644
--- a/src/internal/goexperiment/exp_coverageredesign_off.go
+++ b/src/internal/goexperiment/exp_coverageredesign_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.coverageredesign
-// +build !goexperiment.coverageredesign
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_coverageredesign_on.go b/src/internal/goexperiment/exp_coverageredesign_on.go
index 330a234..3fc6c2f 100644
--- a/src/internal/goexperiment/exp_coverageredesign_on.go
+++ b/src/internal/goexperiment/exp_coverageredesign_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.coverageredesign
-// +build goexperiment.coverageredesign
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_exectracer2_off.go b/src/internal/goexperiment/exp_exectracer2_off.go
new file mode 100644
index 0000000..2f9c826
--- /dev/null
+++ b/src/internal/goexperiment/exp_exectracer2_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.exectracer2
+// +build !goexperiment.exectracer2
+
+package goexperiment
+
+const ExecTracer2 = false
+const ExecTracer2Int = 0
diff --git a/src/internal/goexperiment/exp_exectracer2_on.go b/src/internal/goexperiment/exp_exectracer2_on.go
new file mode 100644
index 0000000..f94a292
--- /dev/null
+++ b/src/internal/goexperiment/exp_exectracer2_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.exectracer2
+// +build goexperiment.exectracer2
+
+package goexperiment
+
+const ExecTracer2 = true
+const ExecTracer2Int = 1
diff --git a/src/internal/goexperiment/exp_fieldtrack_off.go b/src/internal/goexperiment/exp_fieldtrack_off.go
index e5e1326..ccced94 100644
--- a/src/internal/goexperiment/exp_fieldtrack_off.go
+++ b/src/internal/goexperiment/exp_fieldtrack_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.fieldtrack
-// +build !goexperiment.fieldtrack
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_fieldtrack_on.go b/src/internal/goexperiment/exp_fieldtrack_on.go
index 0d8c447..a497567 100644
--- a/src/internal/goexperiment/exp_fieldtrack_on.go
+++ b/src/internal/goexperiment/exp_fieldtrack_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.fieldtrack
-// +build goexperiment.fieldtrack
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_heapminimum512kib_off.go b/src/internal/goexperiment/exp_heapminimum512kib_off.go
index 09da431..d67c5bb 100644
--- a/src/internal/goexperiment/exp_heapminimum512kib_off.go
+++ b/src/internal/goexperiment/exp_heapminimum512kib_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.heapminimum512kib
-// +build !goexperiment.heapminimum512kib
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_heapminimum512kib_on.go b/src/internal/goexperiment/exp_heapminimum512kib_on.go
index bab684b..2d29c98 100644
--- a/src/internal/goexperiment/exp_heapminimum512kib_on.go
+++ b/src/internal/goexperiment/exp_heapminimum512kib_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.heapminimum512kib
-// +build goexperiment.heapminimum512kib
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_loopvar_off.go b/src/internal/goexperiment/exp_loopvar_off.go
index 1cd7ee1..cfede54 100644
--- a/src/internal/goexperiment/exp_loopvar_off.go
+++ b/src/internal/goexperiment/exp_loopvar_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.loopvar
-// +build !goexperiment.loopvar
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_loopvar_on.go b/src/internal/goexperiment/exp_loopvar_on.go
index e3c8980..e158e0a 100644
--- a/src/internal/goexperiment/exp_loopvar_on.go
+++ b/src/internal/goexperiment/exp_loopvar_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.loopvar
-// +build goexperiment.loopvar
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_newinliner_off.go b/src/internal/goexperiment/exp_newinliner_off.go
new file mode 100644
index 0000000..d94e736
--- /dev/null
+++ b/src/internal/goexperiment/exp_newinliner_off.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.newinliner
+
+package goexperiment
+
+const NewInliner = false
+const NewInlinerInt = 0
diff --git a/src/internal/goexperiment/exp_newinliner_on.go b/src/internal/goexperiment/exp_newinliner_on.go
new file mode 100644
index 0000000..6777dbc
--- /dev/null
+++ b/src/internal/goexperiment/exp_newinliner_on.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.newinliner
+
+package goexperiment
+
+const NewInliner = true
+const NewInlinerInt = 1
diff --git a/src/internal/goexperiment/exp_pagetrace_off.go b/src/internal/goexperiment/exp_pagetrace_off.go
index 789e883..142be47 100644
--- a/src/internal/goexperiment/exp_pagetrace_off.go
+++ b/src/internal/goexperiment/exp_pagetrace_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.pagetrace
-// +build !goexperiment.pagetrace
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_pagetrace_on.go b/src/internal/goexperiment/exp_pagetrace_on.go
index ea72b54..f3b1614 100644
--- a/src/internal/goexperiment/exp_pagetrace_on.go
+++ b/src/internal/goexperiment/exp_pagetrace_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.pagetrace
-// +build goexperiment.pagetrace
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_preemptibleloops_off.go b/src/internal/goexperiment/exp_preemptibleloops_off.go
index 7a26088..cddcc1b 100644
--- a/src/internal/goexperiment/exp_preemptibleloops_off.go
+++ b/src/internal/goexperiment/exp_preemptibleloops_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.preemptibleloops
-// +build !goexperiment.preemptibleloops
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_preemptibleloops_on.go b/src/internal/goexperiment/exp_preemptibleloops_on.go
index a9ca28c..7f474c0 100644
--- a/src/internal/goexperiment/exp_preemptibleloops_on.go
+++ b/src/internal/goexperiment/exp_preemptibleloops_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.preemptibleloops
-// +build goexperiment.preemptibleloops
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_rangefunc_off.go b/src/internal/goexperiment/exp_rangefunc_off.go
new file mode 100644
index 0000000..fc02820
--- /dev/null
+++ b/src/internal/goexperiment/exp_rangefunc_off.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.rangefunc
+
+package goexperiment
+
+const RangeFunc = false
+const RangeFuncInt = 0
diff --git a/src/internal/goexperiment/exp_rangefunc_on.go b/src/internal/goexperiment/exp_rangefunc_on.go
new file mode 100644
index 0000000..25e7bd3
--- /dev/null
+++ b/src/internal/goexperiment/exp_rangefunc_on.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.rangefunc
+
+package goexperiment
+
+const RangeFunc = true
+const RangeFuncInt = 1
diff --git a/src/internal/goexperiment/exp_regabiargs_off.go b/src/internal/goexperiment/exp_regabiargs_off.go
index 31a139b..a8c8536 100644
--- a/src/internal/goexperiment/exp_regabiargs_off.go
+++ b/src/internal/goexperiment/exp_regabiargs_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.regabiargs
-// +build !goexperiment.regabiargs
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_regabiargs_on.go b/src/internal/goexperiment/exp_regabiargs_on.go
index 9b26f3c..def3b94 100644
--- a/src/internal/goexperiment/exp_regabiargs_on.go
+++ b/src/internal/goexperiment/exp_regabiargs_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.regabiargs
-// +build goexperiment.regabiargs
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_regabiwrappers_off.go b/src/internal/goexperiment/exp_regabiwrappers_off.go
index bfa0fa3..a65ed36 100644
--- a/src/internal/goexperiment/exp_regabiwrappers_off.go
+++ b/src/internal/goexperiment/exp_regabiwrappers_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.regabiwrappers
-// +build !goexperiment.regabiwrappers
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_regabiwrappers_on.go b/src/internal/goexperiment/exp_regabiwrappers_on.go
index 11ffffb..d525c9a 100644
--- a/src/internal/goexperiment/exp_regabiwrappers_on.go
+++ b/src/internal/goexperiment/exp_regabiwrappers_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.regabiwrappers
-// +build goexperiment.regabiwrappers
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_staticlockranking_off.go b/src/internal/goexperiment/exp_staticlockranking_off.go
index 3d546c0..5fafff2 100644
--- a/src/internal/goexperiment/exp_staticlockranking_off.go
+++ b/src/internal/goexperiment/exp_staticlockranking_off.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build !goexperiment.staticlockranking
-// +build !goexperiment.staticlockranking
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/exp_staticlockranking_on.go b/src/internal/goexperiment/exp_staticlockranking_on.go
index 78188fb..dfd32a8 100644
--- a/src/internal/goexperiment/exp_staticlockranking_on.go
+++ b/src/internal/goexperiment/exp_staticlockranking_on.go
@@ -1,7 +1,6 @@
 // Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build goexperiment.staticlockranking
-// +build goexperiment.staticlockranking
 
 package goexperiment
 
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
index ae3cbaf..dacc4c3 100644
--- a/src/internal/goexperiment/flags.go
+++ b/src/internal/goexperiment/flags.go
@@ -109,4 +109,22 @@
 	// CacheProg adds support to cmd/go to use a child process to implement
 	// the build cache; see https://github.com/golang/go/issues/59719.
 	CacheProg bool
+
+	// NewInliner enables a new+improved version of the function
+	// inlining phase within the Go compiler.
+	NewInliner bool
+
+	// RangeFunc enables range over func.
+	RangeFunc bool
+
+	// Range enables range over int and func.
+	Range bool
+
+	// AllocHeaders enables a different, more efficient way for the GC to
+	// manage heap metadata.
+	AllocHeaders bool
+
+	// ExecTracer2 controls whether to use the new execution trace
+	// implementation.
+	ExecTracer2 bool
 }
diff --git a/src/internal/goexperiment/mkconsts.go b/src/internal/goexperiment/mkconsts.go
index 204ca9d..65c100f 100644
--- a/src/internal/goexperiment/mkconsts.go
+++ b/src/internal/goexperiment/mkconsts.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // mkconsts generates const definition files for each GOEXPERIMENT.
 package main
@@ -52,13 +51,12 @@
 			data := fmt.Sprintf(`// Code generated by mkconsts.go. DO NOT EDIT.
 
 //go:build %s%s
-// +build %s%s
 
 package goexperiment
 
 const %s = %v
 const %sInt = %s
-`, pick(val, "!", ""), buildTag, pick(val, "!", ""), buildTag, f, val, f, pick(val, "0", "1"))
+`, pick(val, "!", ""), buildTag, f, val, f, pick(val, "0", "1"))
 			if err := os.WriteFile(name, []byte(data), 0666); err != nil {
 				log.Fatalf("writing %s: %v", name, err)
 			}
diff --git a/src/internal/gover/gover.go b/src/internal/gover/gover.go
new file mode 100644
index 0000000..2ad0684
--- /dev/null
+++ b/src/internal/gover/gover.go
@@ -0,0 +1,223 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gover implements support for Go toolchain versions like 1.21.0 and 1.21rc1.
+// (For historical reasons, Go does not use semver for its toolchains.)
+// This package provides the same basic analysis that golang.org/x/mod/semver does for semver.
+//
+// The go/version package should be imported instead of this one when possible.
+// Note that this package works on "1.21" while go/version works on "go1.21".
+package gover
+
+import (
+	"cmp"
+)
+
+// A Version is a parsed Go version: major[.Minor[.Patch]][kind[pre]]
+// The numbers are the original decimal strings to avoid integer overflows
+// and since there is very little actual math. (Probably overflow doesn't matter in practice,
+// but at the time this code was written, there was an existing test that used
+// go1.99999999999, which does not fit in an int on 32-bit platforms.
+// The "big decimal" representation avoids the problem entirely.)
+type Version struct {
+	Major string // decimal
+	Minor string // decimal or ""
+	Patch string // decimal or ""
+	Kind  string // "", "alpha", "beta", "rc"
+	Pre   string // decimal or ""
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as toolchain versions.
+// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
+// Malformed versions compare less than well-formed versions and equal to each other.
+// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
+func Compare(x, y string) int {
+	vx := Parse(x)
+	vy := Parse(y)
+
+	if c := CmpInt(vx.Major, vy.Major); c != 0 {
+		return c
+	}
+	if c := CmpInt(vx.Minor, vy.Minor); c != 0 {
+		return c
+	}
+	if c := CmpInt(vx.Patch, vy.Patch); c != 0 {
+		return c
+	}
+	if c := cmp.Compare(vx.Kind, vy.Kind); c != 0 { // "" < alpha < beta < rc
+		return c
+	}
+	if c := CmpInt(vx.Pre, vy.Pre); c != 0 {
+		return c
+	}
+	return 0
+}
+
+// Max returns the maximum of x and y interpreted as toolchain versions,
+// compared using Compare.
+// If x and y compare equal, Max returns x.
+func Max(x, y string) string {
+	if Compare(x, y) < 0 {
+		return y
+	}
+	return x
+}
+
+// IsLang reports whether v denotes the overall Go language version
+// and not a specific release. Starting with the Go 1.21 release, "1.x" denotes
+// the overall language version; the first release is "1.x.0".
+// The distinction is important because the relative ordering is
+//
+//	1.21 < 1.21rc1 < 1.21.0
+//
+// meaning that Go 1.21rc1 and Go 1.21.0 will both handle go.mod files that
+// say "go 1.21", but Go 1.21rc1 will not handle files that say "go 1.21.0".
+func IsLang(x string) bool {
+	v := Parse(x)
+	return v != Version{} && v.Patch == "" && v.Kind == "" && v.Pre == ""
+}
+
+// Lang returns the Go language version. For example, Lang("1.2.3") == "1.2".
+func Lang(x string) string {
+	v := Parse(x)
+	if v.Minor == "" || v.Major == "1" && v.Minor == "0" {
+		return v.Major
+	}
+	return v.Major + "." + v.Minor
+}
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool {
+	return Parse(x) != Version{}
+}
+
+// Parse parses the Go version string x into a version.
+// It returns the zero version if x is malformed.
+func Parse(x string) Version {
+	var v Version
+
+	// Parse major version.
+	var ok bool
+	v.Major, x, ok = cutInt(x)
+	if !ok {
+		return Version{}
+	}
+	if x == "" {
+		// Interpret "1" as "1.0.0".
+		v.Minor = "0"
+		v.Patch = "0"
+		return v
+	}
+
+	// Parse . before minor version.
+	if x[0] != '.' {
+		return Version{}
+	}
+
+	// Parse minor version.
+	v.Minor, x, ok = cutInt(x[1:])
+	if !ok {
+		return Version{}
+	}
+	if x == "" {
+		// Patch missing is same as "0" for older versions.
+		// Starting in Go 1.21, patch missing is different from explicit .0.
+		if CmpInt(v.Minor, "21") < 0 {
+			v.Patch = "0"
+		}
+		return v
+	}
+
+	// Parse patch if present.
+	if x[0] == '.' {
+		v.Patch, x, ok = cutInt(x[1:])
+		if !ok || x != "" {
+			// Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
+			// Allowing them would be a bit confusing because we already have:
+			//	1.21 < 1.21rc1
+			// But a prerelease of a patch would have the opposite effect:
+			//	1.21.3rc1 < 1.21.3
+			// We've never needed them before, so let's not start now.
+			return Version{}
+		}
+		return v
+	}
+
+	// Parse prerelease.
+	i := 0
+	for i < len(x) && (x[i] < '0' || '9' < x[i]) {
+		if x[i] < 'a' || 'z' < x[i] {
+			return Version{}
+		}
+		i++
+	}
+	if i == 0 {
+		return Version{}
+	}
+	v.Kind, x = x[:i], x[i:]
+	if x == "" {
+		return v
+	}
+	v.Pre, x, ok = cutInt(x)
+	if !ok || x != "" {
+		return Version{}
+	}
+
+	return v
+}
+
+// cutInt scans the leading decimal number at the start of x to an integer
+// and returns that value and the rest of the string.
+func cutInt(x string) (n, rest string, ok bool) {
+	i := 0
+	for i < len(x) && '0' <= x[i] && x[i] <= '9' {
+		i++
+	}
+	if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
+		return "", "", false
+	}
+	return x[:i], x[i:], true
+}
+
+// CmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
+// (Copied from golang.org/x/mod/semver's compareInt.)
+func CmpInt(x, y string) int {
+	if x == y {
+		return 0
+	}
+	if len(x) < len(y) {
+		return -1
+	}
+	if len(x) > len(y) {
+		return +1
+	}
+	if x < y {
+		return -1
+	} else {
+		return +1
+	}
+}
+
+// DecInt returns the decimal string decremented by 1, or the empty string
+// if the decimal is all zeroes.
+// (Copied from golang.org/x/mod/module's decDecimal.)
+func DecInt(decimal string) string {
+	// Scan right to left turning 0s to 9s until you find a digit to decrement.
+	digits := []byte(decimal)
+	i := len(digits) - 1
+	for ; i >= 0 && digits[i] == '0'; i-- {
+		digits[i] = '9'
+	}
+	if i < 0 {
+		// decimal is all zeros
+		return ""
+	}
+	if i == 0 && digits[i] == '1' && len(digits) > 1 {
+		digits = digits[1:]
+	} else {
+		digits[i]--
+	}
+	return string(digits)
+}
diff --git a/src/internal/gover/gover_test.go b/src/internal/gover/gover_test.go
new file mode 100644
index 0000000..0edfb1f
--- /dev/null
+++ b/src/internal/gover/gover_test.go
@@ -0,0 +1,138 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestCompare(t *testing.T) { test2(t, compareTests, "Compare", Compare) }
+
+var compareTests = []testCase2[string, string, int]{
+	{"", "", 0},
+	{"x", "x", 0},
+	{"", "x", 0},
+	{"1", "1.1", -1},
+	{"1.5", "1.6", -1},
+	{"1.5", "1.10", -1},
+	{"1.6", "1.6.1", -1},
+	{"1.19", "1.19.0", 0},
+	{"1.19rc1", "1.19", -1},
+	{"1.20", "1.20.0", 0},
+	{"1.20rc1", "1.20", -1},
+	{"1.21", "1.21.0", -1},
+	{"1.21", "1.21rc1", -1},
+	{"1.21rc1", "1.21.0", -1},
+	{"1.6", "1.19", -1},
+	{"1.19", "1.19.1", -1},
+	{"1.19rc1", "1.19", -1},
+	{"1.19rc1", "1.19.1", -1},
+	{"1.19rc1", "1.19rc2", -1},
+	{"1.19.0", "1.19.1", -1},
+	{"1.19rc1", "1.19.0", -1},
+	{"1.19alpha3", "1.19beta2", -1},
+	{"1.19beta2", "1.19rc1", -1},
+	{"1.1", "1.99999999999999998", -1},
+	{"1.99999999999999998", "1.99999999999999999", -1},
+}
+
+func TestParse(t *testing.T) { test1(t, parseTests, "Parse", Parse) }
+
+var parseTests = []testCase1[string, Version]{
+	{"1", Version{"1", "0", "0", "", ""}},
+	{"1.2", Version{"1", "2", "0", "", ""}},
+	{"1.2.3", Version{"1", "2", "3", "", ""}},
+	{"1.2rc3", Version{"1", "2", "", "rc", "3"}},
+	{"1.20", Version{"1", "20", "0", "", ""}},
+	{"1.21", Version{"1", "21", "", "", ""}},
+	{"1.21rc3", Version{"1", "21", "", "rc", "3"}},
+	{"1.21.0", Version{"1", "21", "0", "", ""}},
+	{"1.24", Version{"1", "24", "", "", ""}},
+	{"1.24rc3", Version{"1", "24", "", "rc", "3"}},
+	{"1.24.0", Version{"1", "24", "0", "", ""}},
+	{"1.999testmod", Version{"1", "999", "", "testmod", ""}},
+	{"1.99999999999999999", Version{"1", "99999999999999999", "", "", ""}},
+}
+
+func TestLang(t *testing.T) { test1(t, langTests, "Lang", Lang) }
+
+var langTests = []testCase1[string, string]{
+	{"1.2rc3", "1.2"},
+	{"1.2.3", "1.2"},
+	{"1.2", "1.2"},
+	{"1", "1"},
+	{"1.999testmod", "1.999"},
+}
+
+func TestIsLang(t *testing.T) { test1(t, isLangTests, "IsLang", IsLang) }
+
+var isLangTests = []testCase1[string, bool]{
+	{"1.2rc3", false},
+	{"1.2.3", false},
+	{"1.999testmod", false},
+	{"1.22", true},
+	{"1.21", true},
+	{"1.20", false}, // == 1.20.0
+	{"1.19", false}, // == 1.20.0
+	{"1.3", false},  // == 1.3.0
+	{"1.2", false},  // == 1.2.0
+	{"1", false},    // == 1.0.0
+}
+
+func TestIsValid(t *testing.T) { test1(t, isValidTests, "IsValid", IsValid) }
+
+var isValidTests = []testCase1[string, bool]{
+	{"1.2rc3", true},
+	{"1.2.3", true},
+	{"1.999testmod", true},
+	{"1.600+auto", false},
+	{"1.22", true},
+	{"1.21.0", true},
+	{"1.21rc2", true},
+	{"1.21", true},
+	{"1.20.0", true},
+	{"1.20", true},
+	{"1.19", true},
+	{"1.3", true},
+	{"1.2", true},
+	{"1", true},
+}
+
+type testCase1[In, Out any] struct {
+	in  In
+	out Out
+}
+
+type testCase2[In1, In2, Out any] struct {
+	in1 In1
+	in2 In2
+	out Out
+}
+
+type testCase3[In1, In2, In3, Out any] struct {
+	in1 In1
+	in2 In2
+	in3 In3
+	out Out
+}
+
+func test1[In, Out any](t *testing.T, tests []testCase1[In, Out], name string, f func(In) Out) {
+	t.Helper()
+	for _, tt := range tests {
+		if out := f(tt.in); !reflect.DeepEqual(out, tt.out) {
+			t.Errorf("%s(%v) = %v, want %v", name, tt.in, out, tt.out)
+		}
+	}
+}
+
+func test2[In1, In2, Out any](t *testing.T, tests []testCase2[In1, In2, Out], name string, f func(In1, In2) Out) {
+	t.Helper()
+	for _, tt := range tests {
+		if out := f(tt.in1, tt.in2); !reflect.DeepEqual(out, tt.out) {
+			t.Errorf("%s(%+v, %+v) = %+v, want %+v", name, tt.in1, tt.in2, out, tt.out)
+		}
+	}
+}
diff --git a/src/internal/goversion/goversion.go b/src/internal/goversion/goversion.go
index 5a52f9e..770ef11 100644
--- a/src/internal/goversion/goversion.go
+++ b/src/internal/goversion/goversion.go
@@ -9,4 +9,4 @@
 //
 // It should be updated at the start of each development cycle to be
 // the version of the next Go 1.x release. See golang.org/issue/40705.
-const Version = 21
+const Version = 22
diff --git a/src/internal/intern/intern_test.go b/src/internal/intern/intern_test.go
index d1e409e..e87d7e4 100644
--- a/src/internal/intern/intern_test.go
+++ b/src/internal/intern/intern_test.go
@@ -171,9 +171,7 @@
 func clearMap() {
 	mu.Lock()
 	defer mu.Unlock()
-	for k := range valMap {
-		delete(valMap, k)
-	}
+	clear(valMap)
 }
 
 var (
diff --git a/src/internal/itoa/itoa.go b/src/internal/itoa/itoa.go
index c6062d9..4340ae0 100644
--- a/src/internal/itoa/itoa.go
+++ b/src/internal/itoa/itoa.go
@@ -31,3 +31,27 @@
 	buf[i] = byte('0' + val)
 	return string(buf[i:])
 }
+
+const hex = "0123456789abcdef"
+
+// Uitox converts val (a uint) to a hexadecimal string.
+func Uitox(val uint) string {
+	if val == 0 { // avoid string allocation
+		return "0x0"
+	}
+	var buf [20]byte // big enough for 64bit value base 16 + 0x
+	i := len(buf) - 1
+	for val >= 16 {
+		q := val / 16
+		buf[i] = hex[val%16]
+		i--
+		val = q
+	}
+	// val < 16
+	buf[i] = hex[val%16]
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+	return string(buf[i:])
+}
diff --git a/src/internal/itoa/itoa_test.go b/src/internal/itoa/itoa_test.go
index 71931c1..8bed888 100644
--- a/src/internal/itoa/itoa_test.go
+++ b/src/internal/itoa/itoa_test.go
@@ -38,3 +38,14 @@
 		}
 	}
 }
+
+func TestUitox(t *testing.T) {
+	tests := []uint{0, 1, 15, 100, 999, math.MaxUint32, uint(maxUint64)}
+	for _, tt := range tests {
+		got := itoa.Uitox(tt)
+		want := fmt.Sprintf("%#x", tt)
+		if want != got {
+			t.Fatalf("Uitox(%x) = %s, want %s", tt, got, want)
+		}
+	}
+}
diff --git a/src/internal/nettrace/nettrace.go b/src/internal/nettrace/nettrace.go
index 0a2bf92..7d46268 100644
--- a/src/internal/nettrace/nettrace.go
+++ b/src/internal/nettrace/nettrace.go
@@ -39,7 +39,7 @@
 	// goroutines.
 	ConnectStart func(network, addr string)
 
-	// ConnectStart is called after a Dial with the results, excluding
+	// ConnectDone is called after a Dial with the results, excluding
 	// Dials made during DNS lookups. It may also be called multiple
 	// times, like ConnectStart.
 	ConnectDone func(network, addr string, err error)
diff --git a/src/internal/platform/supported.go b/src/internal/platform/supported.go
index 230a952..82c66e2 100644
--- a/src/internal/platform/supported.go
+++ b/src/internal/platform/supported.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:generate go test . -run=TestGenerated -fix
+//go:generate go test . -run=^TestGenerated$ -fix
 
 package platform
 
@@ -38,7 +38,7 @@
 func MSanSupported(goos, goarch string) bool {
 	switch goos {
 	case "linux":
-		return goarch == "amd64" || goarch == "arm64"
+		return goarch == "amd64" || goarch == "arm64" || goarch == "loong64"
 	case "freebsd":
 		return goarch == "amd64"
 	default:
@@ -51,7 +51,7 @@
 func ASanSupported(goos, goarch string) bool {
 	switch goos {
 	case "linux":
-		return goarch == "arm64" || goarch == "amd64" || goarch == "riscv64" || goarch == "ppc64le"
+		return goarch == "arm64" || goarch == "amd64" || goarch == "loong64" || goarch == "riscv64" || goarch == "ppc64le"
 	default:
 		return false
 	}
@@ -85,9 +85,7 @@
 func MustLinkExternal(goos, goarch string, withCgo bool) bool {
 	if withCgo {
 		switch goarch {
-		case "loong64",
-			"mips", "mipsle", "mips64", "mips64le",
-			"riscv64":
+		case "loong64", "mips", "mipsle", "mips64", "mips64le":
 			// Internally linking cgo is incomplete on some architectures.
 			// https://go.dev/issue/14449
 			return true
@@ -99,7 +97,9 @@
 		case "ppc64":
 			// Big Endian PPC64 cgo internal linking is not implemented for aix or linux.
 			// https://go.dev/issue/8912
-			return true
+			if goos == "aix" || goos == "linux" {
+				return true
+			}
 		}
 
 		switch goos {
@@ -206,7 +206,7 @@
 
 	case "plugin":
 		switch platform {
-		case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le",
+		case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/s390x", "linux/ppc64le",
 			"android/amd64", "android/386",
 			"darwin/amd64", "darwin/arm64",
 			"freebsd/amd64":
@@ -245,7 +245,7 @@
 		}
 		return true
 	case "darwin":
-		return goarch == "arm64"
+		return true
 	}
 	return false
 }
diff --git a/src/internal/platform/zosarch.go b/src/internal/platform/zosarch.go
index 7f5a290..7c3db53 100644
--- a/src/internal/platform/zosarch.go
+++ b/src/internal/platform/zosarch.go
@@ -49,6 +49,7 @@
 	{"openbsd", "arm64"},
 	{"openbsd", "mips64"},
 	{"openbsd", "ppc64"},
+	{"openbsd", "riscv64"},
 	{"plan9", "386"},
 	{"plan9", "amd64"},
 	{"plan9", "arm"},
@@ -101,7 +102,8 @@
 	{"openbsd", "arm"}:     {CgoSupported: true},
 	{"openbsd", "arm64"}:   {CgoSupported: true},
 	{"openbsd", "mips64"}:  {CgoSupported: true, Broken: true},
-	{"openbsd", "ppc64"}:   {Broken: true},
+	{"openbsd", "ppc64"}:   {},
+	{"openbsd", "riscv64"}: {Broken: true},
 	{"plan9", "386"}:       {},
 	{"plan9", "amd64"}:     {},
 	{"plan9", "arm"}:       {},
diff --git a/src/internal/poll/fd.go b/src/internal/poll/fd.go
index ef61d0c..4e038d0 100644
--- a/src/internal/poll/fd.go
+++ b/src/internal/poll/fd.go
@@ -81,3 +81,14 @@
 
 // TestHookDidWritev is a hook for testing writev.
 var TestHookDidWritev = func(wrote int) {}
+
+// String is an internal string definition for methods/functions
+// that is not intended for use outside the standard libraries.
+//
+// Other packages in std that import internal/poll and have some
+// exported APIs (now we've got some in net.rawConn) which are only used
+// internally and are not intended to be used outside the standard libraries,
+// Therefore, we make those APIs use internal types like poll.FD or poll.String
+// in their function signatures to disable the usability of these APIs from
+// external codebase.
+type String string
diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go
index 9df39ed..2095a6a 100644
--- a/src/internal/poll/fd_windows.go
+++ b/src/internal/poll/fd_windows.go
@@ -1037,8 +1037,7 @@
 
 	var du windows.FILE_BASIC_INFO
 	du.FileAttributes = attrs
-	l := uint32(unsafe.Sizeof(d))
-	return windows.SetFileInformationByHandle(fd.Sysfd, windows.FileBasicInfo, uintptr(unsafe.Pointer(&du)), l)
+	return windows.SetFileInformationByHandle(fd.Sysfd, windows.FileBasicInfo, unsafe.Pointer(&du), uint32(unsafe.Sizeof(du)))
 }
 
 // Fchdir wraps syscall.Fchdir.
diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go
index 89315a8..0f55cad 100644
--- a/src/internal/poll/sendfile_bsd.go
+++ b/src/internal/poll/sendfile_bsd.go
@@ -13,18 +13,21 @@
 const maxSendfileSize int = 4 << 20
 
 // SendFile wraps the sendfile system call.
-func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error) {
+func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error, bool) {
 	if err := dstFD.writeLock(); err != nil {
-		return 0, err
+		return 0, err, false
 	}
 	defer dstFD.writeUnlock()
 	if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
-		return 0, err
+		return 0, err, false
 	}
 
 	dst := dstFD.Sysfd
-	var written int64
-	var err error
+	var (
+		written int64
+		err     error
+		handled = true
+	)
 	for remain > 0 {
 		n := maxSendfileSize
 		if int64(n) > remain {
@@ -52,8 +55,9 @@
 			// support) and syscall.EINVAL (fd types which
 			// don't implement sendfile)
 			err = err1
+			handled = false
 			break
 		}
 	}
-	return written, err
+	return written, err, handled
 }
diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go
index 7ae18f4..f9f685c 100644
--- a/src/internal/poll/sendfile_solaris.go
+++ b/src/internal/poll/sendfile_solaris.go
@@ -16,18 +16,21 @@
 const maxSendfileSize int = 4 << 20
 
 // SendFile wraps the sendfile system call.
-func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error) {
+func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error, bool) {
 	if err := dstFD.writeLock(); err != nil {
-		return 0, err
+		return 0, err, false
 	}
 	defer dstFD.writeUnlock()
 	if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
-		return 0, err
+		return 0, err, false
 	}
 
 	dst := dstFD.Sysfd
-	var written int64
-	var err error
+	var (
+		written int64
+		err     error
+		handled = true
+	)
 	for remain > 0 {
 		n := maxSendfileSize
 		if int64(n) > remain {
@@ -59,8 +62,9 @@
 			// support) and syscall.EINVAL (fd types which
 			// don't implement sendfile)
 			err = err1
+			handled = false
 			break
 		}
 	}
-	return written, err
+	return written, err, handled
 }
diff --git a/src/internal/poll/sockopt_windows.go b/src/internal/poll/sockopt_windows.go
index dd5fb70..f32bca4 100644
--- a/src/internal/poll/sockopt_windows.go
+++ b/src/internal/poll/sockopt_windows.go
@@ -6,15 +6,6 @@
 
 import "syscall"
 
-// Setsockopt wraps the setsockopt network call.
-func (fd *FD) Setsockopt(level, optname int32, optval *byte, optlen int32) error {
-	if err := fd.incref(); err != nil {
-		return err
-	}
-	defer fd.decref()
-	return syscall.Setsockopt(fd.Sysfd, level, optname, optval, optlen)
-}
-
 // WSAIoctl wraps the WSAIoctl network call.
 func (fd *FD) WSAIoctl(iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *syscall.Overlapped, completionRoutine uintptr) error {
 	if err := fd.incref(); err != nil {
diff --git a/src/internal/poll/splice_linux.go b/src/internal/poll/splice_linux.go
index 9505c5d..72cca34 100644
--- a/src/internal/poll/splice_linux.go
+++ b/src/internal/poll/splice_linux.go
@@ -13,6 +13,12 @@
 )
 
 const (
+	// spliceNonblock doesn't make the splice itself necessarily nonblocking
+	// (because the actual file descriptors that are spliced from/to may block
+	// unless they have the O_NONBLOCK flag set), but it makes the splice pipe
+	// operations nonblocking.
+	spliceNonblock = 0x2
+
 	// maxSpliceSize is the maximum amount of data Splice asks
 	// the kernel to move in a single call to splice(2).
 	// We use 1MB as Splice writes data through a pipe, and 1MB is the default maximum pipe buffer size,
@@ -89,7 +95,11 @@
 		return 0, err
 	}
 	for {
-		n, err := splice(pipefd, sock.Sysfd, max, 0)
+		// In theory calling splice(2) with SPLICE_F_NONBLOCK could end up an infinite loop here,
+		// because it could return EAGAIN ceaselessly when the write end of the pipe is full,
+		// but this shouldn't be a concern here, since the pipe buffer must be sufficient for
+		// this data transmission on the basis of the workflow in Splice.
+		n, err := splice(pipefd, sock.Sysfd, max, spliceNonblock)
 		if err == syscall.EINTR {
 			continue
 		}
@@ -127,7 +137,14 @@
 	}
 	written := 0
 	for inPipe > 0 {
-		n, err := splice(sock.Sysfd, pipefd, inPipe, 0)
+		// In theory calling splice(2) with SPLICE_F_NONBLOCK could end up an infinite loop here,
+		// because it could return EAGAIN ceaselessly when the read end of the pipe is empty,
+		// but this shouldn't be a concern here, since the pipe buffer must contain inPipe size of
+		// data on the basis of the workflow in Splice.
+		n, err := splice(sock.Sysfd, pipefd, inPipe, spliceNonblock)
+		if err == syscall.EINTR {
+			continue
+		}
 		// Here, the condition n == 0 && err == nil should never be
 		// observed, since Splice controls the write side of the pipe.
 		if n > 0 {
diff --git a/src/internal/profile/encode.go b/src/internal/profile/encode.go
index 77d77f1..94d04bf 100644
--- a/src/internal/profile/encode.go
+++ b/src/internal/profile/encode.go
@@ -291,7 +291,7 @@
 	p.commentX = nil
 	p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
 	p.stringTable = nil
-	return nil
+	return err
 }
 
 func (p *ValueType) decoder() []decoder {
diff --git a/src/internal/profile/profile.go b/src/internal/profile/profile.go
index c779bb2..02d1bed 100644
--- a/src/internal/profile/profile.go
+++ b/src/internal/profile/profile.go
@@ -141,10 +141,14 @@
 		}
 		orig = data
 	}
-	if p, err = parseUncompressed(orig); err != nil {
-		if p, err = parseLegacy(orig); err != nil {
-			return nil, fmt.Errorf("parsing profile: %v", err)
-		}
+
+	var lErr error
+	p, pErr := parseUncompressed(orig)
+	if pErr != nil {
+		p, lErr = parseLegacy(orig)
+	}
+	if pErr != nil && lErr != nil {
+		return nil, fmt.Errorf("parsing profile: not a valid proto profile (%w) or legacy profile (%w)", pErr, lErr)
 	}
 
 	if err := p.CheckValid(); err != nil {
@@ -155,6 +159,7 @@
 
 var errUnrecognized = fmt.Errorf("unrecognized profile format")
 var errMalformed = fmt.Errorf("malformed profile format")
+var ErrNoData = fmt.Errorf("empty input file")
 
 func parseLegacy(data []byte) (*Profile, error) {
 	parsers := []func([]byte) (*Profile, error){
@@ -180,6 +185,10 @@
 }
 
 func parseUncompressed(data []byte) (*Profile, error) {
+	if len(data) == 0 {
+		return nil, ErrNoData
+	}
+
 	p := &Profile{}
 	if err := unmarshal(data, p); err != nil {
 		return nil, err
diff --git a/src/internal/profile/profile_test.go b/src/internal/profile/profile_test.go
index e1963f3..84158b6 100644
--- a/src/internal/profile/profile_test.go
+++ b/src/internal/profile/profile_test.go
@@ -5,24 +5,9 @@
 package profile
 
 import (
-	"bytes"
 	"testing"
 )
 
-func TestEmptyProfile(t *testing.T) {
-	var buf bytes.Buffer
-	p, err := Parse(&buf)
-	if err != nil {
-		t.Error("Want no error, got", err)
-	}
-	if p == nil {
-		t.Fatal("Want a valid profile, got <nil>")
-	}
-	if !p.Empty() {
-		t.Errorf("Profile should be empty, got %#v", p)
-	}
-}
-
 func TestParseContention(t *testing.T) {
 	tests := []struct {
 		name    string
diff --git a/src/internal/race/norace.go b/src/internal/race/norace.go
index 67b1305..da65048 100644
--- a/src/internal/race/norace.go
+++ b/src/internal/race/norace.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !race
-// +build !race
 
 package race
 
diff --git a/src/internal/race/race.go b/src/internal/race/race.go
index 40f2c99..d2c7e53e4 100644
--- a/src/internal/race/race.go
+++ b/src/internal/race/race.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build race
-// +build race
 
 package race
 
diff --git a/src/internal/reflectlite/all_test.go b/src/internal/reflectlite/all_test.go
index 820b4ae..a78f9ae 100644
--- a/src/internal/reflectlite/all_test.go
+++ b/src/internal/reflectlite/all_test.go
@@ -809,15 +809,15 @@
 		var i any
 		var v Value
 
-		// We can uncomment this when compiler escape analysis
-		// is good enough to see that the integer assigned to i
-		// does not escape and therefore need not be allocated.
-		//
-		// i = 42 + j
-		// v = ValueOf(i)
-		// if int(v.Int()) != 42+j {
-		// 	panic("wrong int")
-		// }
+		i = []int{j, j, j}
+		v = ValueOf(i)
+		if v.Len() != 3 {
+			panic("wrong length")
+		}
+	})
+	noAlloc(t, 100, func(j int) {
+		var i any
+		var v Value
 
 		i = func(j int) int { return j }
 		v = ValueOf(i)
diff --git a/src/internal/reflectlite/export_test.go b/src/internal/reflectlite/export_test.go
index 88be6e2..ea937b8 100644
--- a/src/internal/reflectlite/export_test.go
+++ b/src/internal/reflectlite/export_test.go
@@ -14,7 +14,7 @@
 	if v.kind() != Struct {
 		panic(&ValueError{"reflect.Value.Field", v.kind()})
 	}
-	tt := (*structType)(unsafe.Pointer(v.typ))
+	tt := (*structType)(unsafe.Pointer(v.typ()))
 	if uint(i) >= uint(len(tt.Fields)) {
 		panic("reflect: Field index out of range")
 	}
diff --git a/src/internal/reflectlite/type.go b/src/internal/reflectlite/type.go
index f13ce8f..e585d24 100644
--- a/src/internal/reflectlite/type.go
+++ b/src/internal/reflectlite/type.go
@@ -233,11 +233,15 @@
 // resolveNameOff resolves a name offset from a base pointer.
 // The (*rtype).nameOff method is a convenience wrapper for this function.
 // Implemented in the runtime package.
+//
+//go:noescape
 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
 
 // resolveTypeOff resolves an *rtype offset from a base type.
 // The (*rtype).typeOff method is a convenience wrapper for this function.
 // Implemented in the runtime package.
+//
+//go:noescape
 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
 
 func (t rtype) nameOff(off nameOff) abi.Name {
@@ -395,7 +399,9 @@
 // If i is a nil interface value, TypeOf returns nil.
 func TypeOf(i any) Type {
 	eface := *(*emptyInterface)(unsafe.Pointer(&i))
-	return toType(eface.typ)
+	// Noescape so this doesn't make i to escape. See the comment
+	// at Value.typ for why this is safe.
+	return toType((*abi.Type)(noescape(unsafe.Pointer(eface.typ))))
 }
 
 func (t rtype) Implements(u Type) bool {
diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go
index eb79894..c47e5ea 100644
--- a/src/internal/reflectlite/value.go
+++ b/src/internal/reflectlite/value.go
@@ -34,8 +34,9 @@
 // Using == on two Values does not compare the underlying values
 // they represent.
 type Value struct {
-	// typ holds the type of the value represented by a Value.
-	typ *abi.Type
+	// typ_ holds the type of the value represented by a Value.
+	// Access using the typ method to avoid escape of v.
+	typ_ *abi.Type
 
 	// Pointer-valued data or, if flagIndir is set, pointer to data.
 	// Valid when either flagIndir is set or typ.pointers() is true.
@@ -87,10 +88,19 @@
 	return 0
 }
 
+func (v Value) typ() *abi.Type {
+	// Types are either static (for compiler-created types) or
+	// heap-allocated but always reachable (for reflection-created
+	// types, held in the central map). So there is no need to
+	// escape types. noescape here help avoid unnecessary escape
+	// of v.
+	return (*abi.Type)(noescape(unsafe.Pointer(v.typ_)))
+}
+
 // pointer returns the underlying pointer represented by v.
 // v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
 func (v Value) pointer() unsafe.Pointer {
-	if v.typ.Size() != goarch.PtrSize || !v.typ.Pointers() {
+	if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
 		panic("can't call pointer on a non-pointer Value")
 	}
 	if v.flag&flagIndir != 0 {
@@ -101,7 +111,7 @@
 
 // packEface converts v to the empty interface.
 func packEface(v Value) any {
-	t := v.typ
+	t := v.typ()
 	var i any
 	e := (*emptyInterface)(unsafe.Pointer(&i))
 	// First, fill in the data portion of the interface.
@@ -228,7 +238,7 @@
 	switch k {
 	case abi.Interface:
 		var eface any
-		if v.typ.NumMethod() == 0 {
+		if v.typ().NumMethod() == 0 {
 			eface = *(*any)(v.ptr)
 		} else {
 			eface = (any)(*(*interface {
@@ -249,7 +259,7 @@
 		if ptr == nil {
 			return Value{}
 		}
-		tt := (*ptrType)(unsafe.Pointer(v.typ))
+		tt := (*ptrType)(unsafe.Pointer(v.typ()))
 		typ := tt.Elem
 		fl := v.flag&flagRO | flagIndir | flagAddr
 		fl |= flag(typ.Kind())
@@ -322,7 +332,11 @@
 }
 
 // implemented in runtime:
+
+//go:noescape
 func chanlen(unsafe.Pointer) int
+
+//go:noescape
 func maplen(unsafe.Pointer) int
 
 // Len returns v's length.
@@ -331,7 +345,7 @@
 	k := v.kind()
 	switch k {
 	case abi.Array:
-		tt := (*arrayType)(unsafe.Pointer(v.typ))
+		tt := (*arrayType)(unsafe.Pointer(v.typ()))
 		return int(tt.Len)
 	case abi.Chan:
 		return chanlen(v.pointer())
@@ -349,10 +363,10 @@
 
 // NumMethod returns the number of exported methods in the value's method set.
 func (v Value) numMethod() int {
-	if v.typ == nil {
+	if v.typ() == nil {
 		panic(&ValueError{"reflectlite.Value.NumMethod", abi.Invalid})
 	}
-	return v.typ.NumMethod()
+	return v.typ().NumMethod()
 }
 
 // Set assigns x to the value v.
@@ -365,9 +379,9 @@
 	if v.kind() == abi.Interface {
 		target = v.ptr
 	}
-	x = x.assignTo("reflectlite.Set", v.typ, target)
+	x = x.assignTo("reflectlite.Set", v.typ(), target)
 	if x.flag&flagIndir != 0 {
-		typedmemmove(v.typ, v.ptr, x.ptr)
+		typedmemmove(v.typ(), v.ptr, x.ptr)
 	} else {
 		*(*unsafe.Pointer)(v.ptr) = x.ptr
 	}
@@ -380,7 +394,7 @@
 		panic(&ValueError{"reflectlite.Value.Type", abi.Invalid})
 	}
 	// Method values not supported.
-	return toRType(v.typ)
+	return toRType(v.typ())
 }
 
 /*
@@ -388,6 +402,8 @@
  */
 
 // implemented in package runtime
+
+//go:noescape
 func unsafe_New(*abi.Type) unsafe.Pointer
 
 // ValueOf returns a new Value initialized to the concrete value
@@ -396,13 +412,6 @@
 	if i == nil {
 		return Value{}
 	}
-
-	// TODO: Maybe allow contents of a Value to live on the stack.
-	// For now we make the contents always escape to the heap. It
-	// makes life easier in a few places (see chanrecv/mapassign
-	// comment below).
-	escapes(i)
-
 	return unpackEface(i)
 }
 
@@ -415,14 +424,14 @@
 	// }
 
 	switch {
-	case directlyAssignable(dst, v.typ):
+	case directlyAssignable(dst, v.typ()):
 		// Overwrite type so that they match.
 		// Same memory layout, so no harm done.
 		fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
 		fl |= flag(dst.Kind())
 		return Value{dst, v.ptr, fl}
 
-	case implements(dst, v.typ):
+	case implements(dst, v.typ()):
 		if target == nil {
 			target = unsafe_New(dst)
 		}
@@ -442,7 +451,7 @@
 	}
 
 	// Failed.
-	panic(context + ": value of type " + toRType(v.typ).String() + " is not assignable to type " + toRType(dst).String())
+	panic(context + ": value of type " + toRType(v.typ()).String() + " is not assignable to type " + toRType(dst).String())
 }
 
 // arrayAt returns the i-th element of p,
@@ -476,3 +485,9 @@
 	b bool
 	x any
 }
+
+//go:nosplit
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+	x := uintptr(p)
+	return unsafe.Pointer(x ^ 0)
+}
diff --git a/src/internal/saferio/io.go b/src/internal/saferio/io.go
index 66cc044..5c428e6 100644
--- a/src/internal/saferio/io.go
+++ b/src/internal/saferio/io.go
@@ -11,7 +11,7 @@
 
 import (
 	"io"
-	"reflect"
+	"unsafe"
 )
 
 // chunk is an arbitrary limit on how much memory we are willing
@@ -102,34 +102,31 @@
 	return buf, nil
 }
 
-// SliceCap returns the capacity to use when allocating a slice.
+// SliceCapWithSize returns the capacity to use when allocating a slice.
 // After the slice is allocated with the capacity, it should be
 // built using append. This will avoid allocating too much memory
 // if the capacity is large and incorrect.
 //
 // A negative result means that the value is always too big.
-//
-// The element type is described by passing a pointer to a value of that type.
-// This would ideally use generics, but this code is built with
-// the bootstrap compiler which need not support generics.
-// We use a pointer so that we can handle slices of interface type.
-func SliceCap(v any, c uint64) int {
+func SliceCapWithSize(size, c uint64) int {
 	if int64(c) < 0 || c != uint64(int(c)) {
 		return -1
 	}
-	typ := reflect.TypeOf(v)
-	if typ.Kind() != reflect.Ptr {
-		panic("SliceCap called with non-pointer type")
-	}
-	size := uint64(typ.Elem().Size())
 	if size > 0 && c > (1<<64-1)/size {
 		return -1
 	}
 	if c*size > chunk {
-		c = uint64(chunk / size)
+		c = chunk / size
 		if c == 0 {
 			c = 1
 		}
 	}
 	return int(c)
 }
+
+// SliceCap is like SliceCapWithSize but using generics.
+func SliceCap[E any](c uint64) int {
+	var v E
+	size := uint64(unsafe.Sizeof(v))
+	return SliceCapWithSize(size, c)
+}
diff --git a/src/internal/saferio/io_test.go b/src/internal/saferio/io_test.go
index 356c9eb..696356f 100644
--- a/src/internal/saferio/io_test.go
+++ b/src/internal/saferio/io_test.go
@@ -105,14 +105,14 @@
 
 func TestSliceCap(t *testing.T) {
 	t.Run("small", func(t *testing.T) {
-		c := SliceCap((*int)(nil), 10)
+		c := SliceCap[int](10)
 		if c != 10 {
 			t.Errorf("got capacity %d, want %d", c, 10)
 		}
 	})
 
 	t.Run("large", func(t *testing.T) {
-		c := SliceCap((*byte)(nil), 1<<30)
+		c := SliceCap[byte](1 << 30)
 		if c < 0 {
 			t.Error("SliceCap failed unexpectedly")
 		} else if c == 1<<30 {
@@ -121,14 +121,14 @@
 	})
 
 	t.Run("maxint", func(t *testing.T) {
-		c := SliceCap((*byte)(nil), 1<<63)
+		c := SliceCap[byte](1 << 63)
 		if c >= 0 {
 			t.Errorf("SliceCap returned %d, expected failure", c)
 		}
 	})
 
 	t.Run("overflow", func(t *testing.T) {
-		c := SliceCap((*int64)(nil), 1<<62)
+		c := SliceCap[int64](1 << 62)
 		if c >= 0 {
 			t.Errorf("SliceCap returned %d, expected failure", c)
 		}
diff --git a/src/internal/syscall/unix/asm_darwin.s b/src/internal/syscall/unix/asm_darwin.s
index 8662c28..10d16ce 100644
--- a/src/internal/syscall/unix/asm_darwin.s
+++ b/src/internal/syscall/unix/asm_darwin.s
@@ -4,7 +4,6 @@
 
 #include "textflag.h"
 
-TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0; JMP libc_getentropy(SB)
 TEXT ·libc_getaddrinfo_trampoline(SB),NOSPLIT,$0-0; JMP libc_getaddrinfo(SB)
 TEXT ·libc_freeaddrinfo_trampoline(SB),NOSPLIT,$0-0; JMP libc_freeaddrinfo(SB)
 TEXT ·libc_getnameinfo_trampoline(SB),NOSPLIT,$0-0; JMP libc_getnameinfo(SB)
diff --git a/src/internal/syscall/unix/at_fstatat.go b/src/internal/syscall/unix/at_fstatat.go
index 8f25fe9..25de336 100644
--- a/src/internal/syscall/unix/at_fstatat.go
+++ b/src/internal/syscall/unix/at_fstatat.go
@@ -24,5 +24,4 @@
 	}
 
 	return nil
-
 }
diff --git a/src/internal/syscall/unix/at_sysnum_dragonfly.go b/src/internal/syscall/unix/at_sysnum_dragonfly.go
index b7ed3f7..9ac1f91 100644
--- a/src/internal/syscall/unix/at_sysnum_dragonfly.go
+++ b/src/internal/syscall/unix/at_sysnum_dragonfly.go
@@ -10,7 +10,11 @@
 const openatTrap uintptr = syscall.SYS_OPENAT
 const fstatatTrap uintptr = syscall.SYS_FSTATAT
 
-const AT_REMOVEDIR = 0x2
-const AT_SYMLINK_NOFOLLOW = 0x1
+const (
+	AT_EACCESS          = 0x4
+	AT_FDCWD            = 0xfffafdcd
+	AT_REMOVEDIR        = 0x2
+	AT_SYMLINK_NOFOLLOW = 0x1
 
-const UTIME_OMIT = -0x1
+	UTIME_OMIT = -0x1
+)
diff --git a/src/internal/syscall/unix/at_sysnum_freebsd.go b/src/internal/syscall/unix/at_sysnum_freebsd.go
index 9cd5da6..f74961d 100644
--- a/src/internal/syscall/unix/at_sysnum_freebsd.go
+++ b/src/internal/syscall/unix/at_sysnum_freebsd.go
@@ -7,6 +7,8 @@
 import "syscall"
 
 const (
+	AT_EACCESS          = 0x100
+	AT_FDCWD            = -0x64
 	AT_REMOVEDIR        = 0x800
 	AT_SYMLINK_NOFOLLOW = 0x200
 
diff --git a/src/internal/syscall/unix/at_sysnum_netbsd.go b/src/internal/syscall/unix/at_sysnum_netbsd.go
index becc1bd..ffb1d2e 100644
--- a/src/internal/syscall/unix/at_sysnum_netbsd.go
+++ b/src/internal/syscall/unix/at_sysnum_netbsd.go
@@ -10,7 +10,11 @@
 const openatTrap uintptr = syscall.SYS_OPENAT
 const fstatatTrap uintptr = syscall.SYS_FSTATAT
 
-const AT_REMOVEDIR = 0x800
-const AT_SYMLINK_NOFOLLOW = 0x200
+const (
+	AT_EACCESS          = 0x100
+	AT_FDCWD            = -0x64
+	AT_REMOVEDIR        = 0x800
+	AT_SYMLINK_NOFOLLOW = 0x200
 
-const UTIME_OMIT = (1 << 30) - 2
+	UTIME_OMIT = (1 << 30) - 2
+)
diff --git a/src/internal/syscall/unix/eaccess_bsd.go b/src/internal/syscall/unix/eaccess_bsd.go
new file mode 100644
index 0000000..3411e3a
--- /dev/null
+++ b/src/internal/syscall/unix/eaccess_bsd.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || netbsd
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func faccessat(dirfd int, path string, mode uint32, flags int) error {
+	p, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+	_, _, errno := syscall.Syscall6(syscall.SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(mode), uintptr(flags), 0, 0)
+	if errno != 0 {
+		return errno
+	}
+	return err
+}
+
+func Eaccess(path string, mode uint32) error {
+	return faccessat(AT_FDCWD, path, mode, AT_EACCESS)
+}
diff --git a/src/internal/syscall/unix/eaccess_other.go b/src/internal/syscall/unix/eaccess_other.go
index 23be118..19a2be5 100644
--- a/src/internal/syscall/unix/eaccess_other.go
+++ b/src/internal/syscall/unix/eaccess_other.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix && !linux
+//go:build unix && !dragonfly && !freebsd && !linux && !netbsd
 
 package unix
 
diff --git a/src/internal/syscall/unix/getentropy_darwin.s b/src/internal/syscall/unix/getentropy_darwin.s
new file mode 100644
index 0000000..f41e0fe
--- /dev/null
+++ b/src/internal/syscall/unix/getentropy_darwin.s
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin && !ios
+
+#include "textflag.h"
+
+TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0; JMP libc_getentropy(SB)
diff --git a/src/internal/syscall/unix/getentropy_netbsd.go b/src/internal/syscall/unix/getentropy_netbsd.go
new file mode 100644
index 0000000..02bac1b
--- /dev/null
+++ b/src/internal/syscall/unix/getentropy_netbsd.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build netbsd
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	_CTL_KERN = 1
+
+	_KERN_ARND = 81
+)
+
+func GetEntropy(p []byte) error {
+	mib := [2]uint32{_CTL_KERN, _KERN_ARND}
+	n := uintptr(len(p))
+	_, _, errno := syscall.Syscall6(
+		syscall.SYS___SYSCTL,
+		uintptr(unsafe.Pointer(&mib[0])),
+		uintptr(len(mib)),
+		uintptr(unsafe.Pointer(&p[0])), // olddata
+		uintptr(unsafe.Pointer(&n)),    // &oldlen
+		uintptr(unsafe.Pointer(nil)),   // newdata
+		0)                              // newlen
+	if errno != 0 {
+		return syscall.Errno(errno)
+	}
+	if n != uintptr(len(p)) {
+		return syscall.EINVAL
+	}
+	return nil
+}
diff --git a/src/internal/syscall/unix/getrandom_netbsd.go b/src/internal/syscall/unix/getrandom_netbsd.go
deleted file mode 100644
index c83e3b2..0000000
--- a/src/internal/syscall/unix/getrandom_netbsd.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unix
-
-import (
-	"sync"
-	"sync/atomic"
-	"syscall"
-	"unsafe"
-)
-
-// NetBSD getrandom system call number.
-const getrandomTrap uintptr = 91
-
-var getrandomUnsupported atomic.Bool
-
-// GetRandomFlag is a flag supported by the getrandom system call.
-type GetRandomFlag uintptr
-
-// GetRandom calls the getrandom system call.
-func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) {
-	if len(p) == 0 {
-		return 0, nil
-	}
-	if getrandomUnsupported.Load() {
-		return 0, syscall.ENOSYS
-	}
-	// getrandom(2) was added in NetBSD 10.0
-	if getOSRevision() < 1000000000 {
-		getrandomUnsupported.Store(true)
-		return 0, syscall.ENOSYS
-	}
-	r1, _, errno := syscall.Syscall(getrandomTrap,
-		uintptr(unsafe.Pointer(&p[0])),
-		uintptr(len(p)),
-		uintptr(flags))
-	if errno != 0 {
-		if errno == syscall.ENOSYS {
-			getrandomUnsupported.Store(true)
-		}
-		return 0, errno
-	}
-	return int(r1), nil
-}
-
-var (
-	osrevisionOnce sync.Once
-	osrevision     uint32
-)
-
-func getOSRevision() uint32 {
-	osrevisionOnce.Do(func() { osrevision, _ = syscall.SysctlUint32("kern.osrevision") })
-	return osrevision
-}
diff --git a/src/internal/syscall/unix/net_darwin.go b/src/internal/syscall/unix/net_darwin.go
index 5601b49..bbaa94b 100644
--- a/src/internal/syscall/unix/net_darwin.go
+++ b/src/internal/syscall/unix/net_darwin.go
@@ -19,6 +19,7 @@
 	EAI_AGAIN    = 2
 	EAI_NODATA   = 7
 	EAI_NONAME   = 8
+	EAI_SERVICE  = 9
 	EAI_SYSTEM   = 11
 	EAI_OVERFLOW = 14
 
diff --git a/src/internal/syscall/unix/pidfd_linux.go b/src/internal/syscall/unix/pidfd_linux.go
new file mode 100644
index 0000000..02cfaa0
--- /dev/null
+++ b/src/internal/syscall/unix/pidfd_linux.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+func PidFDSendSignal(pidfd uintptr, s syscall.Signal) error {
+	_, _, errno := syscall.Syscall(pidfdSendSignalTrap, pidfd, uintptr(s), 0)
+	if errno != 0 {
+		return errno
+	}
+	return nil
+}
diff --git a/src/internal/syscall/unix/sysnum_linux_386.go b/src/internal/syscall/unix/sysnum_linux_386.go
index 2bda08c..9f750a1 100644
--- a/src/internal/syscall/unix/sysnum_linux_386.go
+++ b/src/internal/syscall/unix/sysnum_linux_386.go
@@ -5,6 +5,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 355
-	copyFileRangeTrap uintptr = 377
+	getrandomTrap       uintptr = 355
+	copyFileRangeTrap   uintptr = 377
+	pidfdSendSignalTrap uintptr = 424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_amd64.go b/src/internal/syscall/unix/sysnum_linux_amd64.go
index ae5239e..706898d 100644
--- a/src/internal/syscall/unix/sysnum_linux_amd64.go
+++ b/src/internal/syscall/unix/sysnum_linux_amd64.go
@@ -5,6 +5,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 318
-	copyFileRangeTrap uintptr = 326
+	getrandomTrap       uintptr = 318
+	copyFileRangeTrap   uintptr = 326
+	pidfdSendSignalTrap uintptr = 424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_arm.go b/src/internal/syscall/unix/sysnum_linux_arm.go
index acaec05..c00644b 100644
--- a/src/internal/syscall/unix/sysnum_linux_arm.go
+++ b/src/internal/syscall/unix/sysnum_linux_arm.go
@@ -5,6 +5,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 384
-	copyFileRangeTrap uintptr = 391
+	getrandomTrap       uintptr = 384
+	copyFileRangeTrap   uintptr = 391
+	pidfdSendSignalTrap uintptr = 424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_generic.go b/src/internal/syscall/unix/sysnum_linux_generic.go
index 8c132c6..bf25428 100644
--- a/src/internal/syscall/unix/sysnum_linux_generic.go
+++ b/src/internal/syscall/unix/sysnum_linux_generic.go
@@ -11,6 +11,7 @@
 // means only arm64 loong64 and riscv64 use the standard numbers.
 
 const (
-	getrandomTrap     uintptr = 278
-	copyFileRangeTrap uintptr = 285
+	getrandomTrap       uintptr = 278
+	copyFileRangeTrap   uintptr = 285
+	pidfdSendSignalTrap uintptr = 424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_mips64x.go b/src/internal/syscall/unix/sysnum_linux_mips64x.go
index bca526d..6a9e238 100644
--- a/src/internal/syscall/unix/sysnum_linux_mips64x.go
+++ b/src/internal/syscall/unix/sysnum_linux_mips64x.go
@@ -7,6 +7,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 5313
-	copyFileRangeTrap uintptr = 5320
+	getrandomTrap       uintptr = 5313
+	copyFileRangeTrap   uintptr = 5320
+	pidfdSendSignalTrap uintptr = 5424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_mipsx.go b/src/internal/syscall/unix/sysnum_linux_mipsx.go
index c86195e..22d38f1 100644
--- a/src/internal/syscall/unix/sysnum_linux_mipsx.go
+++ b/src/internal/syscall/unix/sysnum_linux_mipsx.go
@@ -7,6 +7,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 4353
-	copyFileRangeTrap uintptr = 4360
+	getrandomTrap       uintptr = 4353
+	copyFileRangeTrap   uintptr = 4360
+	pidfdSendSignalTrap uintptr = 4424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_ppc64x.go b/src/internal/syscall/unix/sysnum_linux_ppc64x.go
index a4dcf2b..945ec28 100644
--- a/src/internal/syscall/unix/sysnum_linux_ppc64x.go
+++ b/src/internal/syscall/unix/sysnum_linux_ppc64x.go
@@ -7,6 +7,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 359
-	copyFileRangeTrap uintptr = 379
+	getrandomTrap       uintptr = 359
+	copyFileRangeTrap   uintptr = 379
+	pidfdSendSignalTrap uintptr = 424
 )
diff --git a/src/internal/syscall/unix/sysnum_linux_s390x.go b/src/internal/syscall/unix/sysnum_linux_s390x.go
index bf2c01e..2c74343 100644
--- a/src/internal/syscall/unix/sysnum_linux_s390x.go
+++ b/src/internal/syscall/unix/sysnum_linux_s390x.go
@@ -5,6 +5,7 @@
 package unix
 
 const (
-	getrandomTrap     uintptr = 349
-	copyFileRangeTrap uintptr = 375
+	getrandomTrap       uintptr = 349
+	copyFileRangeTrap   uintptr = 375
+	pidfdSendSignalTrap uintptr = 424
 )
diff --git a/src/internal/syscall/windows/exec_windows_test.go b/src/internal/syscall/windows/exec_windows_test.go
index 3311da5..72550b5 100644
--- a/src/internal/syscall/windows/exec_windows_test.go
+++ b/src/internal/syscall/windows/exec_windows_test.go
@@ -29,7 +29,7 @@
 		return
 	}
 
-	cmd := exec.Command(os.Args[0], "-test.run=TestRunAtLowIntegrity", "--")
+	cmd := exec.Command(os.Args[0], "-test.run=^TestRunAtLowIntegrity$", "--")
 	cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
 
 	token, err := getIntegrityLevelToken(sidWilLow)
diff --git a/src/internal/syscall/windows/registry/key.go b/src/internal/syscall/windows/registry/key.go
index ce6397f..b95fa8d 100644
--- a/src/internal/syscall/windows/registry/key.go
+++ b/src/internal/syscall/windows/registry/key.go
@@ -31,7 +31,7 @@
 
 const (
 	// Registry key security and access rights.
-	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+	// See https://learn.microsoft.com/en-us/windows/win32/sysinfo/registry-key-security-and-access-rights
 	// for details.
 	ALL_ACCESS         = 0xf003f
 	CREATE_LINK        = 0x00020
@@ -98,7 +98,7 @@
 
 	names := make([]string, 0)
 	// Registry key size limit is 255 bytes and described there:
-	// https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+	// https://learn.microsoft.com/en-us/windows/win32/sysinfo/registry-element-size-limits
 	buf := make([]uint16, 256) //plus extra room for terminating zero byte
 loopItems:
 	for i := uint32(0); ; i++ {
diff --git a/src/internal/syscall/windows/registry/registry_test.go b/src/internal/syscall/windows/registry/registry_test.go
index 278b0b4..afe7a5d 100644
--- a/src/internal/syscall/windows/registry/registry_test.go
+++ b/src/internal/syscall/windows/registry/registry_test.go
@@ -599,12 +599,6 @@
 }
 
 func TestGetMUIStringValue(t *testing.T) {
-	if err := registry.LoadRegLoadMUIString(); err != nil {
-		t.Skip("regLoadMUIString not supported; skipping")
-	}
-	if err := procGetDynamicTimeZoneInformation.Find(); err != nil {
-		t.Skipf("%s not supported; skipping", procGetDynamicTimeZoneInformation.Name)
-	}
 	var dtzi DynamicTimezoneinformation
 	if _, err := GetDynamicTimeZoneInformation(&dtzi); err != nil {
 		t.Fatal(err)
diff --git a/src/internal/syscall/windows/registry/syscall.go b/src/internal/syscall/windows/registry/syscall.go
index cb315ad..8e73091 100644
--- a/src/internal/syscall/windows/registry/syscall.go
+++ b/src/internal/syscall/windows/registry/syscall.go
@@ -17,10 +17,6 @@
 	_ERROR_NO_MORE_ITEMS syscall.Errno = 259
 )
 
-func LoadRegLoadMUIString() error {
-	return procRegLoadMUIStringW.Find()
-}
-
 //sys	regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
 //sys	regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
 //sys	regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
diff --git a/src/internal/syscall/windows/registry/value.go b/src/internal/syscall/windows/registry/value.go
index 7dfee03..67b1144 100644
--- a/src/internal/syscall/windows/registry/value.go
+++ b/src/internal/syscall/windows/registry/value.go
@@ -115,9 +115,6 @@
 // the specified value name associated with an open key k.
 // If the value name doesn't exist or the localized string value
 // can't be resolved, GetMUIStringValue returns ErrNotExist.
-// GetMUIStringValue panics if the system doesn't support
-// regLoadMUIString; use LoadRegLoadMUIString to check if
-// regLoadMUIString is supported before calling this function.
 func (k Key) GetMUIStringValue(name string) (string, error) {
 	pname, err := syscall.UTF16PtrFromString(name)
 	if err != nil {
@@ -244,7 +241,7 @@
 		if len(data) != 8 {
 			return 0, typ, errors.New("QWORD value is not 8 bytes long")
 		}
-		return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil
+		return *(*uint64)(unsafe.Pointer(&data[0])), QWORD, nil
 	default:
 		return 0, typ, ErrUnexpectedType
 	}
diff --git a/src/internal/syscall/windows/reparse_windows.go b/src/internal/syscall/windows/reparse_windows.go
index 6e11139..02f32c6 100644
--- a/src/internal/syscall/windows/reparse_windows.go
+++ b/src/internal/syscall/windows/reparse_windows.go
@@ -12,13 +12,14 @@
 const (
 	FSCTL_SET_REPARSE_POINT    = 0x000900A4
 	IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
+	IO_REPARSE_TAG_DEDUP       = 0x80000013
 
 	SYMLINK_FLAG_RELATIVE = 1
 )
 
 // These structures are described
-// in https://msdn.microsoft.com/en-us/library/cc232007.aspx
-// and https://msdn.microsoft.com/en-us/library/cc232006.aspx.
+// in https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/ca069dad-ed16-42aa-b057-b6b207f447cc
+// and https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/b41f1cbf-10df-4a47-98d4-1c52a833d913.
 
 type REPARSE_DATA_BUFFER struct {
 	ReparseTag        uint32
diff --git a/src/internal/syscall/windows/security_windows.go b/src/internal/syscall/windows/security_windows.go
index 4a2dfc0..c8c5cbe 100644
--- a/src/internal/syscall/windows/security_windows.go
+++ b/src/internal/syscall/windows/security_windows.go
@@ -126,3 +126,9 @@
 }
 
 //sys	NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) = netapi32.NetUserGetLocalGroups
+
+// GetSystemDirectory retrieves the path to current location of the system
+// directory, which is typically, though not always, `C:\Windows\System32`.
+//
+//go:linkname GetSystemDirectory
+func GetSystemDirectory() string // Implemented in runtime package.
diff --git a/src/internal/syscall/windows/symlink_windows.go b/src/internal/syscall/windows/symlink_windows.go
index b64d058..62e3f79 100644
--- a/src/internal/syscall/windows/symlink_windows.go
+++ b/src/internal/syscall/windows/symlink_windows.go
@@ -9,6 +9,8 @@
 const (
 	ERROR_INVALID_PARAMETER syscall.Errno = 87
 
+	FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000
+
 	// symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972)
 	SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2
 
diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go
index 892b878..d10e30c 100644
--- a/src/internal/syscall/windows/syscall_windows.go
+++ b/src/internal/syscall/windows/syscall_windows.go
@@ -129,11 +129,22 @@
 }
 
 type FILE_BASIC_INFO struct {
-	CreationTime   syscall.Filetime
-	LastAccessTime syscall.Filetime
-	LastWriteTime  syscall.Filetime
-	ChangedTime    syscall.Filetime
+	CreationTime   int64
+	LastAccessTime int64
+	LastWriteTime  int64
+	ChangedTime    int64
 	FileAttributes uint32
+
+	// Pad out to 8-byte alignment.
+	//
+	// Without this padding, TestChmod fails due to an argument validation error
+	// in SetFileInformationByHandle on windows/386.
+	//
+	// https://learn.microsoft.com/en-us/cpp/build/reference/zp-struct-member-alignment?view=msvc-170
+	// says that “The C/C++ headers in the Windows SDK assume the platform's
+	// default alignment is used.” What we see here is padding rather than
+	// alignment, but maybe it is related.
+	_ uint32
 }
 
 const (
@@ -150,7 +161,7 @@
 //sys	GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
 //sys	MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW
 //sys	GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) = kernel32.GetModuleFileNameW
-//sys	SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf uintptr, bufsize uint32) (err error) = kernel32.SetFileInformationByHandle
+//sys	SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) = kernel32.SetFileInformationByHandle
 //sys	VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery
 //sys	GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPath2W
 
@@ -333,7 +344,11 @@
 //sys	MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
 //sys	GetCurrentThread() (pseudoHandle syscall.Handle, err error) = kernel32.GetCurrentThread
 
-const STYPE_DISKTREE = 0x00
+// Constants from lmshare.h
+const (
+	STYPE_DISKTREE  = 0x00
+	STYPE_TEMPORARY = 0x40000000
+)
 
 type SHARE_INFO_2 struct {
 	Netname     *uint16
@@ -361,10 +376,6 @@
 
 //sys	GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW
 
-func LoadGetFinalPathNameByHandle() error {
-	return procGetFinalPathNameByHandleW.Find()
-}
-
 func ErrorLoadingGetTempPath2() error {
 	return procGetTempPath2W.Find()
 }
@@ -373,7 +384,62 @@
 //sys	DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock
 //sys	CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) = kernel32.CreateEventW
 
-//sys	RtlGenRandom(buf []byte) (err error) = advapi32.SystemFunction036
+//sys	ProcessPrng(buf []byte) (err error) = bcryptprimitives.ProcessPrng
+
+type FILE_ID_BOTH_DIR_INFO struct {
+	NextEntryOffset uint32
+	FileIndex       uint32
+	CreationTime    syscall.Filetime
+	LastAccessTime  syscall.Filetime
+	LastWriteTime   syscall.Filetime
+	ChangeTime      syscall.Filetime
+	EndOfFile       uint64
+	AllocationSize  uint64
+	FileAttributes  uint32
+	FileNameLength  uint32
+	EaSize          uint32
+	ShortNameLength uint32
+	ShortName       [12]uint16
+	FileID          uint64
+	FileName        [1]uint16
+}
+
+type FILE_FULL_DIR_INFO struct {
+	NextEntryOffset uint32
+	FileIndex       uint32
+	CreationTime    syscall.Filetime
+	LastAccessTime  syscall.Filetime
+	LastWriteTime   syscall.Filetime
+	ChangeTime      syscall.Filetime
+	EndOfFile       uint64
+	AllocationSize  uint64
+	FileAttributes  uint32
+	FileNameLength  uint32
+	EaSize          uint32
+	FileName        [1]uint16
+}
+
+//sys	GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW
+//sys	GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW
 
 //sys	RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) = kernel32.RtlLookupFunctionEntry
 //sys	RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) = kernel32.RtlVirtualUnwind
+
+type SERVICE_STATUS struct {
+	ServiceType             uint32
+	CurrentState            uint32
+	ControlsAccepted        uint32
+	Win32ExitCode           uint32
+	ServiceSpecificExitCode uint32
+	CheckPoint              uint32
+	WaitHint                uint32
+}
+
+const (
+	SERVICE_RUNNING      = 4
+	SERVICE_QUERY_STATUS = 4
+)
+
+//sys    OpenService(mgr syscall.Handle, serviceName *uint16, access uint32) (handle syscall.Handle, err error) = advapi32.OpenServiceW
+//sys	QueryServiceStatus(hService syscall.Handle, lpServiceStatus *SERVICE_STATUS) (err error)  = advapi32.QueryServiceStatus
+//sys    OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle syscall.Handle, err error)  [failretval==0] = advapi32.OpenSCManagerW
diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go
index a5c246b..931f157 100644
--- a/src/internal/syscall/windows/zsyscall_windows.go
+++ b/src/internal/syscall/windows/zsyscall_windows.go
@@ -37,50 +37,56 @@
 }
 
 var (
-	modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll"))
-	modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll"))
-	modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll"))
-	modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll"))
-	modpsapi    = syscall.NewLazyDLL(sysdll.Add("psapi.dll"))
-	moduserenv  = syscall.NewLazyDLL(sysdll.Add("userenv.dll"))
-	modws2_32   = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll"))
+	modadvapi32         = syscall.NewLazyDLL(sysdll.Add("advapi32.dll"))
+	modbcryptprimitives = syscall.NewLazyDLL(sysdll.Add("bcryptprimitives.dll"))
+	modiphlpapi         = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll"))
+	modkernel32         = syscall.NewLazyDLL(sysdll.Add("kernel32.dll"))
+	modnetapi32         = syscall.NewLazyDLL(sysdll.Add("netapi32.dll"))
+	modpsapi            = syscall.NewLazyDLL(sysdll.Add("psapi.dll"))
+	moduserenv          = syscall.NewLazyDLL(sysdll.Add("userenv.dll"))
+	modws2_32           = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll"))
 
-	procAdjustTokenPrivileges        = modadvapi32.NewProc("AdjustTokenPrivileges")
-	procDuplicateTokenEx             = modadvapi32.NewProc("DuplicateTokenEx")
-	procImpersonateSelf              = modadvapi32.NewProc("ImpersonateSelf")
-	procLookupPrivilegeValueW        = modadvapi32.NewProc("LookupPrivilegeValueW")
-	procOpenThreadToken              = modadvapi32.NewProc("OpenThreadToken")
-	procRevertToSelf                 = modadvapi32.NewProc("RevertToSelf")
-	procSetTokenInformation          = modadvapi32.NewProc("SetTokenInformation")
-	procSystemFunction036            = modadvapi32.NewProc("SystemFunction036")
-	procGetAdaptersAddresses         = modiphlpapi.NewProc("GetAdaptersAddresses")
-	procCreateEventW                 = modkernel32.NewProc("CreateEventW")
-	procGetACP                       = modkernel32.NewProc("GetACP")
-	procGetComputerNameExW           = modkernel32.NewProc("GetComputerNameExW")
-	procGetConsoleCP                 = modkernel32.NewProc("GetConsoleCP")
-	procGetCurrentThread             = modkernel32.NewProc("GetCurrentThread")
-	procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
-	procGetFinalPathNameByHandleW    = modkernel32.NewProc("GetFinalPathNameByHandleW")
-	procGetModuleFileNameW           = modkernel32.NewProc("GetModuleFileNameW")
-	procGetTempPath2W                = modkernel32.NewProc("GetTempPath2W")
-	procLockFileEx                   = modkernel32.NewProc("LockFileEx")
-	procModule32FirstW               = modkernel32.NewProc("Module32FirstW")
-	procModule32NextW                = modkernel32.NewProc("Module32NextW")
-	procMoveFileExW                  = modkernel32.NewProc("MoveFileExW")
-	procMultiByteToWideChar          = modkernel32.NewProc("MultiByteToWideChar")
-	procRtlLookupFunctionEntry       = modkernel32.NewProc("RtlLookupFunctionEntry")
-	procRtlVirtualUnwind             = modkernel32.NewProc("RtlVirtualUnwind")
-	procSetFileInformationByHandle   = modkernel32.NewProc("SetFileInformationByHandle")
-	procUnlockFileEx                 = modkernel32.NewProc("UnlockFileEx")
-	procVirtualQuery                 = modkernel32.NewProc("VirtualQuery")
-	procNetShareAdd                  = modnetapi32.NewProc("NetShareAdd")
-	procNetShareDel                  = modnetapi32.NewProc("NetShareDel")
-	procNetUserGetLocalGroups        = modnetapi32.NewProc("NetUserGetLocalGroups")
-	procGetProcessMemoryInfo         = modpsapi.NewProc("GetProcessMemoryInfo")
-	procCreateEnvironmentBlock       = moduserenv.NewProc("CreateEnvironmentBlock")
-	procDestroyEnvironmentBlock      = moduserenv.NewProc("DestroyEnvironmentBlock")
-	procGetProfilesDirectoryW        = moduserenv.NewProc("GetProfilesDirectoryW")
-	procWSASocketW                   = modws2_32.NewProc("WSASocketW")
+	procAdjustTokenPrivileges             = modadvapi32.NewProc("AdjustTokenPrivileges")
+	procDuplicateTokenEx                  = modadvapi32.NewProc("DuplicateTokenEx")
+	procImpersonateSelf                   = modadvapi32.NewProc("ImpersonateSelf")
+	procLookupPrivilegeValueW             = modadvapi32.NewProc("LookupPrivilegeValueW")
+	procOpenSCManagerW                    = modadvapi32.NewProc("OpenSCManagerW")
+	procOpenServiceW                      = modadvapi32.NewProc("OpenServiceW")
+	procOpenThreadToken                   = modadvapi32.NewProc("OpenThreadToken")
+	procQueryServiceStatus                = modadvapi32.NewProc("QueryServiceStatus")
+	procRevertToSelf                      = modadvapi32.NewProc("RevertToSelf")
+	procSetTokenInformation               = modadvapi32.NewProc("SetTokenInformation")
+	procProcessPrng                       = modbcryptprimitives.NewProc("ProcessPrng")
+	procGetAdaptersAddresses              = modiphlpapi.NewProc("GetAdaptersAddresses")
+	procCreateEventW                      = modkernel32.NewProc("CreateEventW")
+	procGetACP                            = modkernel32.NewProc("GetACP")
+	procGetComputerNameExW                = modkernel32.NewProc("GetComputerNameExW")
+	procGetConsoleCP                      = modkernel32.NewProc("GetConsoleCP")
+	procGetCurrentThread                  = modkernel32.NewProc("GetCurrentThread")
+	procGetFileInformationByHandleEx      = modkernel32.NewProc("GetFileInformationByHandleEx")
+	procGetFinalPathNameByHandleW         = modkernel32.NewProc("GetFinalPathNameByHandleW")
+	procGetModuleFileNameW                = modkernel32.NewProc("GetModuleFileNameW")
+	procGetTempPath2W                     = modkernel32.NewProc("GetTempPath2W")
+	procGetVolumeInformationByHandleW     = modkernel32.NewProc("GetVolumeInformationByHandleW")
+	procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW")
+	procLockFileEx                        = modkernel32.NewProc("LockFileEx")
+	procModule32FirstW                    = modkernel32.NewProc("Module32FirstW")
+	procModule32NextW                     = modkernel32.NewProc("Module32NextW")
+	procMoveFileExW                       = modkernel32.NewProc("MoveFileExW")
+	procMultiByteToWideChar               = modkernel32.NewProc("MultiByteToWideChar")
+	procRtlLookupFunctionEntry            = modkernel32.NewProc("RtlLookupFunctionEntry")
+	procRtlVirtualUnwind                  = modkernel32.NewProc("RtlVirtualUnwind")
+	procSetFileInformationByHandle        = modkernel32.NewProc("SetFileInformationByHandle")
+	procUnlockFileEx                      = modkernel32.NewProc("UnlockFileEx")
+	procVirtualQuery                      = modkernel32.NewProc("VirtualQuery")
+	procNetShareAdd                       = modnetapi32.NewProc("NetShareAdd")
+	procNetShareDel                       = modnetapi32.NewProc("NetShareDel")
+	procNetUserGetLocalGroups             = modnetapi32.NewProc("NetUserGetLocalGroups")
+	procGetProcessMemoryInfo              = modpsapi.NewProc("GetProcessMemoryInfo")
+	procCreateEnvironmentBlock            = moduserenv.NewProc("CreateEnvironmentBlock")
+	procDestroyEnvironmentBlock           = moduserenv.NewProc("DestroyEnvironmentBlock")
+	procGetProfilesDirectoryW             = moduserenv.NewProc("GetProfilesDirectoryW")
+	procWSASocketW                        = modws2_32.NewProc("WSASocketW")
 )
 
 func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) {
@@ -120,6 +126,24 @@
 	return
 }
 
+func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle syscall.Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+	handle = syscall.Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenService(mgr syscall.Handle, serviceName *uint16, access uint32) (handle syscall.Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+	handle = syscall.Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) {
 	var _p0 uint32
 	if openasself {
@@ -132,6 +156,14 @@
 	return
 }
 
+func QueryServiceStatus(hService syscall.Handle, lpServiceStatus *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(hService), uintptr(unsafe.Pointer(lpServiceStatus)), 0)
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func RevertToSelf() (err error) {
 	r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
 	if r1 == 0 {
@@ -148,12 +180,12 @@
 	return
 }
 
-func RtlGenRandom(buf []byte) (err error) {
+func ProcessPrng(buf []byte) (err error) {
 	var _p0 *byte
 	if len(buf) > 0 {
 		_p0 = &buf[0]
 	}
-	r1, _, e1 := syscall.Syscall(procSystemFunction036.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0)
+	r1, _, e1 := syscall.Syscall(procProcessPrng.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0)
 	if r1 == 0 {
 		err = errnoErr(e1)
 	}
@@ -241,6 +273,22 @@
 	return
 }
 
+func GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
+	r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) {
 	r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
 	if r1 == 0 {
@@ -294,7 +342,7 @@
 	return
 }
 
-func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf uintptr, bufsize uint32) (err error) {
+func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) {
 	r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(fileInformationClass), uintptr(buf), uintptr(bufsize), 0, 0)
 	if r1 == 0 {
 		err = errnoErr(e1)
diff --git a/src/internal/sysinfo/cpuinfo_linux.go b/src/internal/sysinfo/cpuinfo_linux.go
new file mode 100644
index 0000000..aff63b3
--- /dev/null
+++ b/src/internal/sysinfo/cpuinfo_linux.go
@@ -0,0 +1,77 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sysinfo
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"os"
+	"strings"
+)
+
+func readLinuxProcCPUInfo(buf []byte) error {
+	f, err := os.Open("/proc/cpuinfo")
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	_, err = io.ReadFull(f, buf)
+	if err != nil && err != io.ErrUnexpectedEOF {
+		return err
+	}
+
+	return nil
+}
+
+func osCpuInfoName() string {
+	modelName := ""
+	cpuMHz := ""
+
+	// The 512-byte buffer is enough to hold the contents of CPU0
+	buf := make([]byte, 512)
+	err := readLinuxProcCPUInfo(buf)
+	if err != nil {
+		return ""
+	}
+
+	scanner := bufio.NewScanner(bytes.NewReader(buf))
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "Model Name", "model name":
+			modelName = field[1]
+		case "CPU MHz", "cpu MHz":
+			cpuMHz = field[1]
+		}
+	}
+
+	if modelName == "" {
+		return ""
+	}
+
+	if cpuMHz == "" {
+		return modelName
+	}
+
+	// The modelName field already contains the frequency information,
+	// so the cpuMHz field information is not needed.
+	// modelName filed example:
+	//	Intel(R) Core(TM) i7-10700 CPU @ 2.90GHz
+	f := [...]string{"GHz", "MHz"}
+	for _, v := range f {
+		if strings.Contains(modelName, v) {
+			return modelName
+		}
+	}
+
+	return modelName + " @ " + cpuMHz + "MHz"
+}
diff --git a/src/internal/sysinfo/cpuinfo_stub.go b/src/internal/sysinfo/cpuinfo_stub.go
new file mode 100644
index 0000000..5dcfed1
--- /dev/null
+++ b/src/internal/sysinfo/cpuinfo_stub.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package sysinfo
+
+func osCpuInfoName() string {
+	return ""
+}
diff --git a/src/internal/sysinfo/sysinfo.go b/src/internal/sysinfo/sysinfo.go
index 961be7a..6a29ad2 100644
--- a/src/internal/sysinfo/sysinfo.go
+++ b/src/internal/sysinfo/sysinfo.go
@@ -7,25 +7,29 @@
 package sysinfo
 
 import (
-	internalcpu "internal/cpu"
+	"internal/cpu"
 	"sync"
 )
 
-type cpuInfo struct {
+var cpuInfo struct {
 	once sync.Once
 	name string
 }
 
-var CPU cpuInfo
-
-func (cpu *cpuInfo) Name() string {
-	cpu.once.Do(func() {
+func CPUName() string {
+	cpuInfo.once.Do(func() {
 		// Try to get the information from internal/cpu.
-		if name := internalcpu.Name(); name != "" {
-			cpu.name = name
+		if name := cpu.Name(); name != "" {
+			cpuInfo.name = name
 			return
 		}
+
 		// TODO(martisch): use /proc/cpuinfo and /sys/devices/system/cpu/ on Linux as fallback.
+		if name := osCpuInfoName(); name != "" {
+			cpuInfo.name = name
+			return
+		}
 	})
-	return cpu.name
+
+	return cpuInfo.name
 }
diff --git a/src/internal/testenv/exec.go b/src/internal/testenv/exec.go
index 50d3b0d..7f6ad5c 100644
--- a/src/internal/testenv/exec.go
+++ b/src/internal/testenv/exec.go
@@ -100,11 +100,14 @@
 // CleanCmdEnv will fill cmd.Env with the environment, excluding certain
 // variables that could modify the behavior of the Go tools such as
 // GODEBUG and GOTRACEBACK.
+//
+// If the caller wants to set cmd.Dir, set it before calling this function,
+// so PWD will be set correctly in the environment.
 func CleanCmdEnv(cmd *exec.Cmd) *exec.Cmd {
 	if cmd.Env != nil {
 		panic("environment already set")
 	}
-	for _, env := range os.Environ() {
+	for _, env := range cmd.Environ() {
 		// Exclude GODEBUG from the environment to prevent its output
 		// from breaking tests that are trying to parse other command output.
 		if strings.HasPrefix(env, "GODEBUG=") {
diff --git a/src/internal/testenv/testenv.go b/src/internal/testenv/testenv.go
index 31b58dd..5c80137 100644
--- a/src/internal/testenv/testenv.go
+++ b/src/internal/testenv/testenv.go
@@ -124,13 +124,13 @@
 	}
 }
 
-// HasGoRun reports whether the current system can run programs with “go run.”
+// HasGoRun reports whether the current system can run programs with “go run”.
 func HasGoRun() bool {
 	// For now, having go run and having go build are the same.
 	return HasGoBuild()
 }
 
-// MustHaveGoRun checks that the current system can run programs with “go run.”
+// MustHaveGoRun checks that the current system can run programs with “go run”.
 // If not, MustHaveGoRun calls t.Skip with an explanation.
 func MustHaveGoRun(t testing.TB) {
 	if !HasGoRun() {
diff --git a/src/internal/testenv/testenv_test.go b/src/internal/testenv/testenv_test.go
index d748e41..769db3a 100644
--- a/src/internal/testenv/testenv_test.go
+++ b/src/internal/testenv/testenv_test.go
@@ -78,7 +78,7 @@
 		// we will presumably find out about it when those tests fail.)
 		switch runtime.GOOS {
 		case "ios":
-			if strings.HasSuffix(b, "-corellium") {
+			if isCorelliumBuilder(b) {
 				// The corellium environment is self-hosting, so it should be able
 				// to build even though real "ios" devices can't exec.
 			} else {
@@ -89,7 +89,7 @@
 				return
 			}
 		case "android":
-			if strings.HasSuffix(b, "-emu") && platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) {
+			if isEmulatedBuilder(b) && platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) {
 				// As of 2023-05-02, the test environment on the emulated builders is
 				// missing a C linker.
 				t.Logf("HasGoBuild is false on %s", b)
@@ -97,7 +97,7 @@
 			}
 		}
 
-		if strings.HasSuffix(b, "-noopt") {
+		if strings.Contains(b, "-noopt") {
 			// The -noopt builder sets GO_GCFLAGS, which causes tests of 'go build' to
 			// be skipped.
 			t.Logf("HasGoBuild is false on %s", b)
@@ -153,7 +153,7 @@
 			t.Errorf("expected MustHaveExec to skip on %v", runtime.GOOS)
 		}
 	case "ios":
-		if b := testenv.Builder(); strings.HasSuffix(b, "-corellium") && !hasExec {
+		if b := testenv.Builder(); isCorelliumBuilder(b) && !hasExec {
 			// Most ios environments can't exec, but the corellium builder can.
 			t.Errorf("expected MustHaveExec not to skip on %v", b)
 		}
@@ -163,3 +163,46 @@
 		}
 	}
 }
+
+func TestCleanCmdEnvPWD(t *testing.T) {
+	// Test that CleanCmdEnv sets PWD if cmd.Dir is set.
+	switch runtime.GOOS {
+	case "plan9", "windows":
+		t.Skipf("PWD is not used on %s", runtime.GOOS)
+	}
+	dir := t.TempDir()
+	cmd := testenv.Command(t, testenv.GoToolPath(t), "help")
+	cmd.Dir = dir
+	cmd = testenv.CleanCmdEnv(cmd)
+
+	for _, env := range cmd.Env {
+		if strings.HasPrefix(env, "PWD=") {
+			pwd := strings.TrimPrefix(env, "PWD=")
+			if pwd != dir {
+				t.Errorf("unexpected PWD: want %s, got %s", dir, pwd)
+			}
+			return
+		}
+	}
+	t.Error("PWD not set in cmd.Env")
+}
+
+func isCorelliumBuilder(builderName string) bool {
+	// Support both the old infra's builder names and the LUCI builder names.
+	// The former's names are ad-hoc so we could maintain this invariant on
+	// the builder side. The latter's names are structured, and "corellium" will
+	// appear as a "host" suffix after the GOOS and GOARCH, which always begin
+	// with an underscore.
+	return strings.HasSuffix(builderName, "-corellium") || strings.Contains(builderName, "_corellium")
+}
+
+func isEmulatedBuilder(builderName string) bool {
+	// Support both the old infra's builder names and the LUCI builder names.
+	// The former's names are ad-hoc so we could maintain this invariant on
+	// the builder side. The latter's names are structured, and the signifier
+	// of emulation "emu" will appear as a "host" suffix after the GOOS and
+	// GOARCH because it modifies the run environment in such a way that it
+	// the target GOOS and GOARCH may not match the host. This suffix always
+	// begins with an underscore.
+	return strings.HasSuffix(builderName, "-emu") || strings.Contains(builderName, "_emu")
+}
diff --git a/src/internal/trace/gc.go b/src/internal/trace/gc.go
index 3bd284e..ca91969 100644
--- a/src/internal/trace/gc.go
+++ b/src/internal/trace/gc.go
@@ -6,6 +6,7 @@
 
 import (
 	"container/heap"
+	tracev2 "internal/trace/v2"
 	"math"
 	"sort"
 	"strings"
@@ -202,6 +203,255 @@
 	return out
 }
 
+// MutatorUtilizationV2 returns a set of mutator utilization functions
+// for the given v2 trace, passed as an io.Reader. Each function will
+// always end with 0 utilization. The bounds of each function are implicit
+// in the first and last event; outside of these bounds each function is
+// undefined.
+//
+// If the UtilPerProc flag is not given, this always returns a single
+// utilization function. Otherwise, it returns one function per P.
+func MutatorUtilizationV2(events []tracev2.Event, flags UtilFlags) [][]MutatorUtil {
+	// Set up a bunch of analysis state.
+	type perP struct {
+		// gc > 0 indicates that GC is active on this P.
+		gc int
+		// series the logical series number for this P. This
+		// is necessary because Ps may be removed and then
+		// re-added, and then the new P needs a new series.
+		series int
+	}
+	type procsCount struct {
+		// time at which procs changed.
+		time int64
+		// n is the number of procs at that point.
+		n int
+	}
+	out := [][]MutatorUtil{}
+	stw := 0
+	ps := []perP{}
+	inGC := make(map[tracev2.GoID]bool)
+	states := make(map[tracev2.GoID]tracev2.GoState)
+	bgMark := make(map[tracev2.GoID]bool)
+	procs := []procsCount{}
+	seenSync := false
+
+	// Helpers.
+	handleSTW := func(r tracev2.Range) bool {
+		return flags&UtilSTW != 0 && isGCSTW(r)
+	}
+	handleMarkAssist := func(r tracev2.Range) bool {
+		return flags&UtilAssist != 0 && isGCMarkAssist(r)
+	}
+	handleSweep := func(r tracev2.Range) bool {
+		return flags&UtilSweep != 0 && isGCSweep(r)
+	}
+
+	// Iterate through the trace, tracking mutator utilization.
+	var lastEv *tracev2.Event
+	for i := range events {
+		ev := &events[i]
+		lastEv = ev
+
+		// Process the event.
+		switch ev.Kind() {
+		case tracev2.EventSync:
+			seenSync = true
+		case tracev2.EventMetric:
+			m := ev.Metric()
+			if m.Name != "/sched/gomaxprocs:threads" {
+				break
+			}
+			gomaxprocs := int(m.Value.Uint64())
+			if len(ps) > gomaxprocs {
+				if flags&UtilPerProc != 0 {
+					// End each P's series.
+					for _, p := range ps[gomaxprocs:] {
+						out[p.series] = addUtil(out[p.series], MutatorUtil{int64(ev.Time()), 0})
+					}
+				}
+				ps = ps[:gomaxprocs]
+			}
+			for len(ps) < gomaxprocs {
+				// Start new P's series.
+				series := 0
+				if flags&UtilPerProc != 0 || len(out) == 0 {
+					series = len(out)
+					out = append(out, []MutatorUtil{{int64(ev.Time()), 1}})
+				}
+				ps = append(ps, perP{series: series})
+			}
+			if len(procs) == 0 || gomaxprocs != procs[len(procs)-1].n {
+				procs = append(procs, procsCount{time: int64(ev.Time()), n: gomaxprocs})
+			}
+		}
+		if len(ps) == 0 {
+			// We can't start doing any analysis until we see what GOMAXPROCS is.
+			// It will show up very early in the trace, but we need to be robust to
+			// something else being emitted beforehand.
+			continue
+		}
+
+		switch ev.Kind() {
+		case tracev2.EventRangeActive:
+			if seenSync {
+				// If we've seen a sync, then we can be sure we're not finding out about
+				// something late; we have complete information after that point, and these
+				// active events will just be redundant.
+				break
+			}
+			// This range is active back to the start of the trace. We're failing to account
+			// for this since we just found out about it now. Fix up the mutator utilization.
+			//
+			// N.B. A trace can't start during a STW, so we don't handle it here.
+			r := ev.Range()
+			switch {
+			case handleMarkAssist(r):
+				if !states[ev.Goroutine()].Executing() {
+					// If the goroutine isn't executing, then the fact that it was in mark
+					// assist doesn't actually count.
+					break
+				}
+				// This G has been in a mark assist *and running on its P* since the start
+				// of the trace.
+				fallthrough
+			case handleSweep(r):
+				// This P has been in sweep (or mark assist, from above) in the start of the trace.
+				//
+				// We don't need to do anything if UtilPerProc is set. If we get an event like
+				// this for a running P, it must show up the first time a P is mentioned. Therefore,
+				// this P won't actually have any MutatorUtils on its list yet.
+				//
+				// However, if UtilPerProc isn't set, then we probably have data from other procs
+				// and from previous events. We need to fix that up.
+				if flags&UtilPerProc != 0 {
+					break
+				}
+				// Subtract out 1/gomaxprocs mutator utilization for all time periods
+				// from the beginning of the trace until now.
+				mi, pi := 0, 0
+				for mi < len(out[0]) {
+					if pi < len(procs)-1 && procs[pi+1].time < out[0][mi].Time {
+						pi++
+						continue
+					}
+					out[0][mi].Util -= float64(1) / float64(procs[pi].n)
+					if out[0][mi].Util < 0 {
+						out[0][mi].Util = 0
+					}
+					mi++
+				}
+			}
+			// After accounting for the portion we missed, this just acts like the
+			// beginning of a new range.
+			fallthrough
+		case tracev2.EventRangeBegin:
+			r := ev.Range()
+			if handleSTW(r) {
+				stw++
+			} else if handleSweep(r) {
+				ps[ev.Proc()].gc++
+			} else if handleMarkAssist(r) {
+				ps[ev.Proc()].gc++
+				if g := r.Scope.Goroutine(); g != tracev2.NoGoroutine {
+					inGC[g] = true
+				}
+			}
+		case tracev2.EventRangeEnd:
+			r := ev.Range()
+			if handleSTW(r) {
+				stw--
+			} else if handleSweep(r) {
+				ps[ev.Proc()].gc--
+			} else if handleMarkAssist(r) {
+				ps[ev.Proc()].gc--
+				if g := r.Scope.Goroutine(); g != tracev2.NoGoroutine {
+					delete(inGC, g)
+				}
+			}
+		case tracev2.EventStateTransition:
+			st := ev.StateTransition()
+			if st.Resource.Kind != tracev2.ResourceGoroutine {
+				break
+			}
+			old, new := st.Goroutine()
+			g := st.Resource.Goroutine()
+			if inGC[g] || bgMark[g] {
+				if !old.Executing() && new.Executing() {
+					// Started running while doing GC things.
+					ps[ev.Proc()].gc++
+				} else if old.Executing() && !new.Executing() {
+					// Stopped running while doing GC things.
+					ps[ev.Proc()].gc--
+				}
+			}
+			states[g] = new
+		case tracev2.EventLabel:
+			l := ev.Label()
+			if flags&UtilBackground != 0 && strings.HasPrefix(l.Label, "GC ") && l.Label != "GC (idle)" {
+				// Background mark worker.
+				//
+				// If we're in per-proc mode, we don't
+				// count dedicated workers because
+				// they kick all of the goroutines off
+				// that P, so don't directly
+				// contribute to goroutine latency.
+				if !(flags&UtilPerProc != 0 && l.Label == "GC (dedicated)") {
+					bgMark[ev.Goroutine()] = true
+					ps[ev.Proc()].gc++
+				}
+			}
+		}
+
+		if flags&UtilPerProc == 0 {
+			// Compute the current average utilization.
+			if len(ps) == 0 {
+				continue
+			}
+			gcPs := 0
+			if stw > 0 {
+				gcPs = len(ps)
+			} else {
+				for i := range ps {
+					if ps[i].gc > 0 {
+						gcPs++
+					}
+				}
+			}
+			mu := MutatorUtil{int64(ev.Time()), 1 - float64(gcPs)/float64(len(ps))}
+
+			// Record the utilization change. (Since
+			// len(ps) == len(out), we know len(out) > 0.)
+			out[0] = addUtil(out[0], mu)
+		} else {
+			// Check for per-P utilization changes.
+			for i := range ps {
+				p := &ps[i]
+				util := 1.0
+				if stw > 0 || p.gc > 0 {
+					util = 0.0
+				}
+				out[p.series] = addUtil(out[p.series], MutatorUtil{int64(ev.Time()), util})
+			}
+		}
+	}
+
+	// No events in the stream.
+	if lastEv == nil {
+		return nil
+	}
+
+	// Add final 0 utilization event to any remaining series. This
+	// is important to mark the end of the trace. The exact value
+	// shouldn't matter since no window should extend beyond this,
+	// but using 0 is symmetric with the start of the trace.
+	mu := MutatorUtil{int64(lastEv.Time()), 0}
+	for i := range ps {
+		out[ps[i].series] = addUtil(out[ps[i].series], mu)
+	}
+	return out
+}
+
 func addUtil(util []MutatorUtil, mu MutatorUtil) []MutatorUtil {
 	if len(util) > 0 {
 		if mu.Util == util[len(util)-1].Util {
@@ -824,3 +1074,15 @@
 	}
 	return 1<<63 - 1
 }
+
+func isGCSTW(r tracev2.Range) bool {
+	return strings.HasPrefix(r.Name, "stop-the-world") && strings.Contains(r.Name, "GC")
+}
+
+func isGCMarkAssist(r tracev2.Range) bool {
+	return r.Name == "GC mark assist"
+}
+
+func isGCSweep(r tracev2.Range) bool {
+	return r.Name == "GC incremental sweep"
+}
diff --git a/src/internal/trace/gc_test.go b/src/internal/trace/gc_test.go
index 9b9771e..4bbf160 100644
--- a/src/internal/trace/gc_test.go
+++ b/src/internal/trace/gc_test.go
@@ -6,6 +6,10 @@
 
 import (
 	"bytes"
+	"internal/trace/v2"
+	tracev2 "internal/trace/v2"
+	"internal/trace/v2/testtrace"
+	"io"
 	"math"
 	"os"
 	"testing"
@@ -83,46 +87,73 @@
 		// test input too big for all.bash
 		t.Skip("skipping in -short mode")
 	}
+	check := func(t *testing.T, mu [][]MutatorUtil) {
+		mmuCurve := NewMMUCurve(mu)
 
-	data, err := os.ReadFile("testdata/stress_1_10_good")
-	if err != nil {
-		t.Fatalf("failed to read input file: %v", err)
-	}
-	_, events, err := parse(bytes.NewReader(data), "")
-	if err != nil {
-		t.Fatalf("failed to parse trace: %s", err)
-	}
-	mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist)
-	mmuCurve := NewMMUCurve(mu)
-
-	// Test the optimized implementation against the "obviously
-	// correct" implementation.
-	for window := time.Nanosecond; window < 10*time.Second; window *= 10 {
-		want := mmuSlow(mu[0], window)
-		got := mmuCurve.MMU(window)
-		if !aeq(want, got) {
-			t.Errorf("want %f, got %f mutator utilization in window %s", want, got, window)
+		// Test the optimized implementation against the "obviously
+		// correct" implementation.
+		for window := time.Nanosecond; window < 10*time.Second; window *= 10 {
+			want := mmuSlow(mu[0], window)
+			got := mmuCurve.MMU(window)
+			if !aeq(want, got) {
+				t.Errorf("want %f, got %f mutator utilization in window %s", want, got, window)
+			}
 		}
-	}
 
-	// Test MUD with band optimization against MUD without band
-	// optimization. We don't have a simple testing implementation
-	// of MUDs (the simplest implementation is still quite
-	// complex), but this is still a pretty good test.
-	defer func(old int) { bandsPerSeries = old }(bandsPerSeries)
-	bandsPerSeries = 1
-	mmuCurve2 := NewMMUCurve(mu)
-	quantiles := []float64{0, 1 - .999, 1 - .99}
-	for window := time.Microsecond; window < time.Second; window *= 10 {
-		mud1 := mmuCurve.MUD(window, quantiles)
-		mud2 := mmuCurve2.MUD(window, quantiles)
-		for i := range mud1 {
-			if !aeq(mud1[i], mud2[i]) {
-				t.Errorf("for quantiles %v at window %v, want %v, got %v", quantiles, window, mud2, mud1)
-				break
+		// Test MUD with band optimization against MUD without band
+		// optimization. We don't have a simple testing implementation
+		// of MUDs (the simplest implementation is still quite
+		// complex), but this is still a pretty good test.
+		defer func(old int) { bandsPerSeries = old }(bandsPerSeries)
+		bandsPerSeries = 1
+		mmuCurve2 := NewMMUCurve(mu)
+		quantiles := []float64{0, 1 - .999, 1 - .99}
+		for window := time.Microsecond; window < time.Second; window *= 10 {
+			mud1 := mmuCurve.MUD(window, quantiles)
+			mud2 := mmuCurve2.MUD(window, quantiles)
+			for i := range mud1 {
+				if !aeq(mud1[i], mud2[i]) {
+					t.Errorf("for quantiles %v at window %v, want %v, got %v", quantiles, window, mud2, mud1)
+					break
+				}
 			}
 		}
 	}
+	t.Run("V1", func(t *testing.T) {
+		data, err := os.ReadFile("testdata/stress_1_10_good")
+		if err != nil {
+			t.Fatalf("failed to read input file: %v", err)
+		}
+		events, err := Parse(bytes.NewReader(data), "")
+		if err != nil {
+			t.Fatalf("failed to parse trace: %s", err)
+		}
+		check(t, MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist))
+	})
+	t.Run("V2", func(t *testing.T) {
+		testPath := "v2/testdata/tests/go122-gc-stress.test"
+		r, _, err := testtrace.ParseFile(testPath)
+		if err != nil {
+			t.Fatalf("malformed test %s: bad trace file: %v", testPath, err)
+		}
+		var events []tracev2.Event
+		tr, err := trace.NewReader(r)
+		if err != nil {
+			t.Fatalf("malformed test %s: bad trace file: %v", testPath, err)
+		}
+		for {
+			ev, err := tr.ReadEvent()
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Fatalf("malformed test %s: bad trace file: %v", testPath, err)
+			}
+			events = append(events, ev)
+		}
+		// Pass the trace through MutatorUtilizationV2 and check it.
+		check(t, MutatorUtilizationV2(events, UtilSTW|UtilBackground|UtilAssist))
+	})
 }
 
 func BenchmarkMMU(b *testing.B) {
@@ -130,7 +161,7 @@
 	if err != nil {
 		b.Fatalf("failed to read input file: %v", err)
 	}
-	_, events, err := parse(bytes.NewReader(data), "")
+	events, err := Parse(bytes.NewReader(data), "")
 	if err != nil {
 		b.Fatalf("failed to parse trace: %s", err)
 	}
diff --git a/src/internal/trace/parser.go b/src/internal/trace/parser.go
index 67fa60b..3bbfbeb 100644
--- a/src/internal/trace/parser.go
+++ b/src/internal/trace/parser.go
@@ -136,17 +136,23 @@
 	sargs []string
 }
 
-// readTrace does wire-format parsing and verification.
-// It does not care about specific event types and argument meaning.
-func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) {
+func ReadVersion(r io.Reader) (ver int, off int, err error) {
 	// Read and validate trace header.
 	var buf [16]byte
-	off, err := io.ReadFull(r, buf[:])
+	off, err = io.ReadFull(r, buf[:])
 	if err != nil {
 		err = fmt.Errorf("failed to read header: read %v, err %v", off, err)
 		return
 	}
 	ver, err = parseHeader(buf[:])
+	return
+}
+
+// readTrace does wire-format parsing and verification.
+// It does not care about specific event types and argument meaning.
+func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) {
+	var off int
+	ver, off, err = ReadVersion(r)
 	if err != nil {
 		return
 	}
@@ -161,6 +167,7 @@
 	}
 
 	// Read events.
+	var buf [16]byte
 	strings = make(map[uint64]string)
 	for {
 		// Read event type and number of arguments (1 byte).
diff --git a/src/internal/trace/summary.go b/src/internal/trace/summary.go
new file mode 100644
index 0000000..b714e01
--- /dev/null
+++ b/src/internal/trace/summary.go
@@ -0,0 +1,667 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	tracev2 "internal/trace/v2"
+	"sort"
+	"time"
+)
+
+// Summary is the analysis result produced by the summarizer.
+type Summary struct {
+	Goroutines map[tracev2.GoID]*GoroutineSummary
+	Tasks      map[tracev2.TaskID]*UserTaskSummary
+}
+
+// GoroutineSummary contains statistics and execution details of a single goroutine.
+// (For v2 traces.)
+type GoroutineSummary struct {
+	ID           tracev2.GoID
+	Name         string       // A non-unique human-friendly identifier for the goroutine.
+	PC           uint64       // The first PC we saw for the entry function of the goroutine
+	CreationTime tracev2.Time // Timestamp of the first appearance in the trace.
+	StartTime    tracev2.Time // Timestamp of the first time it started running. 0 if the goroutine never ran.
+	EndTime      tracev2.Time // Timestamp of when the goroutine exited. 0 if the goroutine never exited.
+
+	// List of regions in the goroutine, sorted based on the start time.
+	Regions []*UserRegionSummary
+
+	// Statistics of execution time during the goroutine execution.
+	GoroutineExecStats
+
+	// goroutineSummary is state used just for computing this structure.
+	// It's dropped before being returned to the caller.
+	//
+	// More specifically, if it's nil, it indicates that this summary has
+	// already been finalized.
+	*goroutineSummary
+}
+
+// UserTaskSummary represents a task in the trace.
+type UserTaskSummary struct {
+	ID       tracev2.TaskID
+	Name     string
+	Parent   *UserTaskSummary // nil if the parent is unknown.
+	Children []*UserTaskSummary
+
+	// Task begin event. An EventTaskBegin event or nil.
+	Start *tracev2.Event
+
+	// End end event. Normally EventTaskEnd event or nil.
+	End *tracev2.Event
+
+	// Logs is a list of tracev2.EventLog events associated with the task.
+	Logs []*tracev2.Event
+
+	// List of regions in the task, sorted based on the start time.
+	Regions []*UserRegionSummary
+
+	// Goroutines is the set of goroutines associated with this task.
+	Goroutines map[tracev2.GoID]*GoroutineSummary
+}
+
+// Complete returns true if we have complete information about the task
+// from the trace: both a start and an end.
+func (s *UserTaskSummary) Complete() bool {
+	return s.Start != nil && s.End != nil
+}
+
+// Descendents returns a slice consisting of itself (always the first task returned),
+// and the transitive closure of all of its children.
+func (s *UserTaskSummary) Descendents() []*UserTaskSummary {
+	descendents := []*UserTaskSummary{s}
+	for _, child := range s.Children {
+		descendents = append(descendents, child.Descendents()...)
+	}
+	return descendents
+}
+
+// UserRegionSummary represents a region and goroutine execution stats
+// while the region was active. (For v2 traces.)
+type UserRegionSummary struct {
+	TaskID tracev2.TaskID
+	Name   string
+
+	// Region start event. Normally EventRegionBegin event or nil,
+	// but can be a state transition event from NotExist or Undetermined
+	// if the region is a synthetic region representing task inheritance
+	// from the parent goroutine.
+	Start *tracev2.Event
+
+	// Region end event. Normally EventRegionEnd event or nil,
+	// but can be a state transition event to NotExist if the goroutine
+	// terminated without explicitly ending the region.
+	End *tracev2.Event
+
+	GoroutineExecStats
+}
+
+// GoroutineExecStats contains statistics about a goroutine's execution
+// during a period of time.
+type GoroutineExecStats struct {
+	// These stats are all non-overlapping.
+	ExecTime          time.Duration
+	SchedWaitTime     time.Duration
+	BlockTimeByReason map[string]time.Duration
+	SyscallTime       time.Duration
+	SyscallBlockTime  time.Duration
+
+	// TotalTime is the duration of the goroutine's presence in the trace.
+	// Necessarily overlaps with other stats.
+	TotalTime time.Duration
+
+	// Total time the goroutine spent in certain ranges; may overlap
+	// with other stats.
+	RangeTime map[string]time.Duration
+}
+
+func (s GoroutineExecStats) NonOverlappingStats() map[string]time.Duration {
+	stats := map[string]time.Duration{
+		"Execution time":         s.ExecTime,
+		"Sched wait time":        s.SchedWaitTime,
+		"Syscall execution time": s.SyscallTime,
+		"Block time (syscall)":   s.SyscallBlockTime,
+		"Unknown time":           s.UnknownTime(),
+	}
+	for reason, dt := range s.BlockTimeByReason {
+		stats["Block time ("+reason+")"] += dt
+	}
+	// N.B. Don't include RangeTime or TotalTime; they overlap with these other
+	// stats.
+	return stats
+}
+
+// UnknownTime returns whatever isn't accounted for in TotalTime.
+func (s GoroutineExecStats) UnknownTime() time.Duration {
+	sum := s.ExecTime + s.SchedWaitTime + s.SyscallTime +
+		s.SyscallBlockTime
+	for _, dt := range s.BlockTimeByReason {
+		sum += dt
+	}
+	// N.B. Don't include range time. Ranges overlap with
+	// other stats, whereas these stats are non-overlapping.
+	if sum < s.TotalTime {
+		return s.TotalTime - sum
+	}
+	return 0
+}
+
+// sub returns the stats v-s.
+func (s GoroutineExecStats) sub(v GoroutineExecStats) (r GoroutineExecStats) {
+	r = s.clone()
+	r.ExecTime -= v.ExecTime
+	r.SchedWaitTime -= v.SchedWaitTime
+	for reason := range s.BlockTimeByReason {
+		r.BlockTimeByReason[reason] -= v.BlockTimeByReason[reason]
+	}
+	r.SyscallTime -= v.SyscallTime
+	r.SyscallBlockTime -= v.SyscallBlockTime
+	r.TotalTime -= v.TotalTime
+	for name := range s.RangeTime {
+		r.RangeTime[name] -= v.RangeTime[name]
+	}
+	return r
+}
+
+func (s GoroutineExecStats) clone() (r GoroutineExecStats) {
+	r = s
+	r.BlockTimeByReason = make(map[string]time.Duration)
+	for reason, dt := range s.BlockTimeByReason {
+		r.BlockTimeByReason[reason] = dt
+	}
+	r.RangeTime = make(map[string]time.Duration)
+	for name, dt := range s.RangeTime {
+		r.RangeTime[name] = dt
+	}
+	return r
+}
+
+// snapshotStat returns the snapshot of the goroutine execution statistics.
+// This is called as we process the ordered trace event stream. lastTs is used
+// to process pending statistics if this is called before any goroutine end event.
+func (g *GoroutineSummary) snapshotStat(lastTs tracev2.Time) (ret GoroutineExecStats) {
+	ret = g.GoroutineExecStats.clone()
+
+	if g.goroutineSummary == nil {
+		return ret // Already finalized; no pending state.
+	}
+
+	// Set the total time if necessary.
+	if g.TotalTime == 0 {
+		ret.TotalTime = lastTs.Sub(g.CreationTime)
+	}
+
+	// Add in time since lastTs.
+	if g.lastStartTime != 0 {
+		ret.ExecTime += lastTs.Sub(g.lastStartTime)
+	}
+	if g.lastRunnableTime != 0 {
+		ret.SchedWaitTime += lastTs.Sub(g.lastRunnableTime)
+	}
+	if g.lastBlockTime != 0 {
+		ret.BlockTimeByReason[g.lastBlockReason] += lastTs.Sub(g.lastBlockTime)
+	}
+	if g.lastSyscallTime != 0 {
+		ret.SyscallTime += lastTs.Sub(g.lastSyscallTime)
+	}
+	if g.lastSyscallBlockTime != 0 {
+		ret.SchedWaitTime += lastTs.Sub(g.lastSyscallBlockTime)
+	}
+	for name, ts := range g.lastRangeTime {
+		ret.RangeTime[name] += lastTs.Sub(ts)
+	}
+	return ret
+}
+
+// finalize is called when processing a goroutine end event or at
+// the end of trace processing. This finalizes the execution stat
+// and any active regions in the goroutine, in which case trigger is nil.
+func (g *GoroutineSummary) finalize(lastTs tracev2.Time, trigger *tracev2.Event) {
+	if trigger != nil {
+		g.EndTime = trigger.Time()
+	}
+	finalStat := g.snapshotStat(lastTs)
+
+	g.GoroutineExecStats = finalStat
+
+	// System goroutines are never part of regions, even though they
+	// "inherit" a task due to creation (EvGoCreate) from within a region.
+	// This may happen e.g. if the first GC is triggered within a region,
+	// starting the GC worker goroutines.
+	if !IsSystemGoroutine(g.Name) {
+		for _, s := range g.activeRegions {
+			s.End = trigger
+			s.GoroutineExecStats = finalStat.sub(s.GoroutineExecStats)
+			g.Regions = append(g.Regions, s)
+		}
+	}
+	*(g.goroutineSummary) = goroutineSummary{}
+}
+
+// goroutineSummary is a private part of GoroutineSummary that is required only during analysis.
+type goroutineSummary struct {
+	lastStartTime        tracev2.Time
+	lastRunnableTime     tracev2.Time
+	lastBlockTime        tracev2.Time
+	lastBlockReason      string
+	lastSyscallTime      tracev2.Time
+	lastSyscallBlockTime tracev2.Time
+	lastRangeTime        map[string]tracev2.Time
+	activeRegions        []*UserRegionSummary // stack of active regions
+}
+
+// Summarizer constructs per-goroutine time statistics for v2 traces.
+type Summarizer struct {
+	// gs contains the map of goroutine summaries we're building up to return to the caller.
+	gs map[tracev2.GoID]*GoroutineSummary
+
+	// tasks contains the map of task summaries we're building up to return to the caller.
+	tasks map[tracev2.TaskID]*UserTaskSummary
+
+	// syscallingP and syscallingG represent a binding between a P and G in a syscall.
+	// Used to correctly identify and clean up after syscalls (blocking or otherwise).
+	syscallingP map[tracev2.ProcID]tracev2.GoID
+	syscallingG map[tracev2.GoID]tracev2.ProcID
+
+	// rangesP is used for optimistic tracking of P-based ranges for goroutines.
+	//
+	// It's a best-effort mapping of an active range on a P to the goroutine we think
+	// is associated with it.
+	rangesP map[rangeP]tracev2.GoID
+
+	lastTs tracev2.Time // timestamp of the last event processed.
+	syncTs tracev2.Time // timestamp of the last sync event processed (or the first timestamp in the trace).
+}
+
+// NewSummarizer creates a new struct to build goroutine stats from a trace.
+func NewSummarizer() *Summarizer {
+	return &Summarizer{
+		gs:          make(map[tracev2.GoID]*GoroutineSummary),
+		tasks:       make(map[tracev2.TaskID]*UserTaskSummary),
+		syscallingP: make(map[tracev2.ProcID]tracev2.GoID),
+		syscallingG: make(map[tracev2.GoID]tracev2.ProcID),
+		rangesP:     make(map[rangeP]tracev2.GoID),
+	}
+}
+
+type rangeP struct {
+	id   tracev2.ProcID
+	name string
+}
+
+// Event feeds a single event into the stats summarizer.
+func (s *Summarizer) Event(ev *tracev2.Event) {
+	if s.syncTs == 0 {
+		s.syncTs = ev.Time()
+	}
+	s.lastTs = ev.Time()
+
+	switch ev.Kind() {
+	// Record sync time for the RangeActive events.
+	case tracev2.EventSync:
+		s.syncTs = ev.Time()
+
+	// Handle state transitions.
+	case tracev2.EventStateTransition:
+		st := ev.StateTransition()
+		switch st.Resource.Kind {
+		// Handle goroutine transitions, which are the meat of this computation.
+		case tracev2.ResourceGoroutine:
+			id := st.Resource.Goroutine()
+			old, new := st.Goroutine()
+			if old == new {
+				// Skip these events; they're not telling us anything new.
+				break
+			}
+
+			// Handle transition out.
+			g := s.gs[id]
+			switch old {
+			case tracev2.GoUndetermined, tracev2.GoNotExist:
+				g = &GoroutineSummary{ID: id, goroutineSummary: &goroutineSummary{}}
+				// If we're coming out of GoUndetermined, then the creation time is the
+				// time of the last sync.
+				if old == tracev2.GoUndetermined {
+					g.CreationTime = s.syncTs
+				} else {
+					g.CreationTime = ev.Time()
+				}
+				// The goroutine is being created, or it's being named for the first time.
+				g.lastRangeTime = make(map[string]tracev2.Time)
+				g.BlockTimeByReason = make(map[string]time.Duration)
+				g.RangeTime = make(map[string]time.Duration)
+
+				// When a goroutine is newly created, inherit the task
+				// of the active region. For ease handling of this
+				// case, we create a fake region description with the
+				// task id. This isn't strictly necessary as this
+				// goroutine may not be associated with the task, but
+				// it can be convenient to see all children created
+				// during a region.
+				//
+				// N.B. ev.Goroutine() will always be NoGoroutine for the
+				// Undetermined case, so this is will simply not fire.
+				if creatorG := s.gs[ev.Goroutine()]; creatorG != nil && len(creatorG.activeRegions) > 0 {
+					regions := creatorG.activeRegions
+					s := regions[len(regions)-1]
+					g.activeRegions = []*UserRegionSummary{{TaskID: s.TaskID, Start: ev}}
+				}
+				s.gs[g.ID] = g
+			case tracev2.GoRunning:
+				// Record execution time as we transition out of running
+				g.ExecTime += ev.Time().Sub(g.lastStartTime)
+				g.lastStartTime = 0
+			case tracev2.GoWaiting:
+				// Record block time as we transition out of waiting.
+				if g.lastBlockTime != 0 {
+					g.BlockTimeByReason[g.lastBlockReason] += ev.Time().Sub(g.lastBlockTime)
+					g.lastBlockTime = 0
+				}
+			case tracev2.GoRunnable:
+				// Record sched latency time as we transition out of runnable.
+				if g.lastRunnableTime != 0 {
+					g.SchedWaitTime += ev.Time().Sub(g.lastRunnableTime)
+					g.lastRunnableTime = 0
+				}
+			case tracev2.GoSyscall:
+				// Record syscall execution time and syscall block time as we transition out of syscall.
+				if g.lastSyscallTime != 0 {
+					if g.lastSyscallBlockTime != 0 {
+						g.SyscallBlockTime += ev.Time().Sub(g.lastSyscallBlockTime)
+						g.SyscallTime += g.lastSyscallBlockTime.Sub(g.lastSyscallTime)
+					} else {
+						g.SyscallTime += ev.Time().Sub(g.lastSyscallTime)
+					}
+					g.lastSyscallTime = 0
+					g.lastSyscallBlockTime = 0
+
+					// Clear the syscall map.
+					delete(s.syscallingP, s.syscallingG[id])
+					delete(s.syscallingG, id)
+				}
+			}
+
+			// The goroutine hasn't been identified yet. Take the transition stack
+			// and identify the goroutine by the root frame of that stack.
+			// This root frame will be identical for all transitions on this
+			// goroutine, because it represents its immutable start point.
+			if g.Name == "" {
+				stk := st.Stack
+				if stk != tracev2.NoStack {
+					var frame tracev2.StackFrame
+					var ok bool
+					stk.Frames(func(f tracev2.StackFrame) bool {
+						frame = f
+						ok = true
+						return true
+					})
+					if ok {
+						// NB: this PC won't actually be consistent for
+						// goroutines which existed at the start of the
+						// trace. The UI doesn't use it directly; this
+						// mainly serves as an indication that we
+						// actually saw a call stack for the goroutine
+						g.PC = frame.PC
+						g.Name = frame.Func
+					}
+				}
+			}
+
+			// Handle transition in.
+			switch new {
+			case tracev2.GoRunning:
+				// We started running. Record it.
+				g.lastStartTime = ev.Time()
+				if g.StartTime == 0 {
+					g.StartTime = ev.Time()
+				}
+			case tracev2.GoRunnable:
+				g.lastRunnableTime = ev.Time()
+			case tracev2.GoWaiting:
+				if st.Reason != "forever" {
+					g.lastBlockTime = ev.Time()
+					g.lastBlockReason = st.Reason
+					break
+				}
+				// "Forever" is like goroutine death.
+				fallthrough
+			case tracev2.GoNotExist:
+				g.finalize(ev.Time(), ev)
+			case tracev2.GoSyscall:
+				s.syscallingP[ev.Proc()] = id
+				s.syscallingG[id] = ev.Proc()
+				g.lastSyscallTime = ev.Time()
+			}
+
+		// Handle procs to detect syscall blocking, which si identifiable as a
+		// proc going idle while the goroutine it was attached to is in a syscall.
+		case tracev2.ResourceProc:
+			id := st.Resource.Proc()
+			old, new := st.Proc()
+			if old != new && new == tracev2.ProcIdle {
+				if goid, ok := s.syscallingP[id]; ok {
+					g := s.gs[goid]
+					g.lastSyscallBlockTime = ev.Time()
+					delete(s.syscallingP, id)
+				}
+			}
+		}
+
+	// Handle ranges of all kinds.
+	case tracev2.EventRangeBegin, tracev2.EventRangeActive:
+		r := ev.Range()
+		var g *GoroutineSummary
+		switch r.Scope.Kind {
+		case tracev2.ResourceGoroutine:
+			// Simple goroutine range. We attribute the entire range regardless of
+			// goroutine stats. Lots of situations are still identifiable, e.g. a
+			// goroutine blocked often in mark assist will have both high mark assist
+			// and high block times. Those interested in a deeper view can look at the
+			// trace viewer.
+			g = s.gs[r.Scope.Goroutine()]
+		case tracev2.ResourceProc:
+			// N.B. These ranges are not actually bound to the goroutine, they're
+			// bound to the P. But if we happen to be on the P the whole time, let's
+			// try to attribute it to the goroutine. (e.g. GC sweeps are here.)
+			g = s.gs[ev.Goroutine()]
+			if g != nil {
+				s.rangesP[rangeP{id: r.Scope.Proc(), name: r.Name}] = ev.Goroutine()
+			}
+		}
+		if g == nil {
+			break
+		}
+		if ev.Kind() == tracev2.EventRangeActive {
+			if ts := g.lastRangeTime[r.Name]; ts != 0 {
+				g.RangeTime[r.Name] += s.syncTs.Sub(ts)
+			}
+			g.lastRangeTime[r.Name] = s.syncTs
+		} else {
+			g.lastRangeTime[r.Name] = ev.Time()
+		}
+	case tracev2.EventRangeEnd:
+		r := ev.Range()
+		var g *GoroutineSummary
+		switch r.Scope.Kind {
+		case tracev2.ResourceGoroutine:
+			g = s.gs[r.Scope.Goroutine()]
+		case tracev2.ResourceProc:
+			rp := rangeP{id: r.Scope.Proc(), name: r.Name}
+			if goid, ok := s.rangesP[rp]; ok {
+				if goid == ev.Goroutine() {
+					// As the comment in the RangeBegin case states, this is only OK
+					// if we finish on the same goroutine we started on.
+					g = s.gs[goid]
+				}
+				delete(s.rangesP, rp)
+			}
+		}
+		if g == nil {
+			break
+		}
+		ts := g.lastRangeTime[r.Name]
+		if ts == 0 {
+			break
+		}
+		g.RangeTime[r.Name] += ev.Time().Sub(ts)
+		delete(g.lastRangeTime, r.Name)
+
+	// Handle user-defined regions.
+	case tracev2.EventRegionBegin:
+		g := s.gs[ev.Goroutine()]
+		r := ev.Region()
+		region := &UserRegionSummary{
+			Name:               r.Type,
+			TaskID:             r.Task,
+			Start:              ev,
+			GoroutineExecStats: g.snapshotStat(ev.Time()),
+		}
+		g.activeRegions = append(g.activeRegions, region)
+		// Associate the region and current goroutine to the task.
+		task := s.getOrAddTask(r.Task)
+		task.Regions = append(task.Regions, region)
+		task.Goroutines[g.ID] = g
+	case tracev2.EventRegionEnd:
+		g := s.gs[ev.Goroutine()]
+		r := ev.Region()
+		var sd *UserRegionSummary
+		if regionStk := g.activeRegions; len(regionStk) > 0 {
+			// Pop the top region from the stack since that's what must have ended.
+			n := len(regionStk)
+			sd = regionStk[n-1]
+			regionStk = regionStk[:n-1]
+			g.activeRegions = regionStk
+			// N.B. No need to add the region to a task; the EventRegionBegin already handled it.
+		} else {
+			// This is an "end" without a start. Just fabricate the region now.
+			sd = &UserRegionSummary{Name: r.Type, TaskID: r.Task}
+			// Associate the region and current goroutine to the task.
+			task := s.getOrAddTask(r.Task)
+			task.Goroutines[g.ID] = g
+			task.Regions = append(task.Regions, sd)
+		}
+		sd.GoroutineExecStats = g.snapshotStat(ev.Time()).sub(sd.GoroutineExecStats)
+		sd.End = ev
+		g.Regions = append(g.Regions, sd)
+
+	// Handle tasks and logs.
+	case tracev2.EventTaskBegin, tracev2.EventTaskEnd:
+		// Initialize the task.
+		t := ev.Task()
+		task := s.getOrAddTask(t.ID)
+		task.Name = t.Type
+		task.Goroutines[ev.Goroutine()] = s.gs[ev.Goroutine()]
+		if ev.Kind() == tracev2.EventTaskBegin {
+			task.Start = ev
+		} else {
+			task.End = ev
+		}
+		// Initialize the parent, if one exists and it hasn't been done yet.
+		// We need to avoid doing it twice, otherwise we could appear twice
+		// in the parent's Children list.
+		if t.Parent != tracev2.NoTask && task.Parent == nil {
+			parent := s.getOrAddTask(t.Parent)
+			task.Parent = parent
+			parent.Children = append(parent.Children, task)
+		}
+	case tracev2.EventLog:
+		log := ev.Log()
+		// Just add the log to the task. We'll create the task if it
+		// doesn't exist (it's just been mentioned now).
+		task := s.getOrAddTask(log.Task)
+		task.Goroutines[ev.Goroutine()] = s.gs[ev.Goroutine()]
+		task.Logs = append(task.Logs, ev)
+	}
+}
+
+func (s *Summarizer) getOrAddTask(id tracev2.TaskID) *UserTaskSummary {
+	task := s.tasks[id]
+	if task == nil {
+		task = &UserTaskSummary{ID: id, Goroutines: make(map[tracev2.GoID]*GoroutineSummary)}
+		s.tasks[id] = task
+	}
+	return task
+}
+
+// Finalize indicates to the summarizer that we're done processing the trace.
+// It cleans up any remaining state and returns the full summary.
+func (s *Summarizer) Finalize() *Summary {
+	for _, g := range s.gs {
+		g.finalize(s.lastTs, nil)
+
+		// Sort based on region start time.
+		sort.Slice(g.Regions, func(i, j int) bool {
+			x := g.Regions[i].Start
+			y := g.Regions[j].Start
+			if x == nil {
+				return true
+			}
+			if y == nil {
+				return false
+			}
+			return x.Time() < y.Time()
+		})
+		g.goroutineSummary = nil
+	}
+	return &Summary{
+		Goroutines: s.gs,
+		Tasks:      s.tasks,
+	}
+}
+
+// RelatedGoroutinesV2 finds a set of goroutines related to goroutine goid for v2 traces.
+// The association is based on whether they have synchronized with each other in the Go
+// scheduler (one has unblocked another).
+func RelatedGoroutinesV2(events []tracev2.Event, goid tracev2.GoID) map[tracev2.GoID]struct{} {
+	// Process all the events, looking for transitions of goroutines
+	// out of GoWaiting. If there was an active goroutine when this
+	// happened, then we know that active goroutine unblocked another.
+	// Scribble all these down so we can process them.
+	type unblockEdge struct {
+		operator tracev2.GoID
+		operand  tracev2.GoID
+	}
+	var unblockEdges []unblockEdge
+	for _, ev := range events {
+		if ev.Goroutine() == tracev2.NoGoroutine {
+			continue
+		}
+		if ev.Kind() != tracev2.EventStateTransition {
+			continue
+		}
+		st := ev.StateTransition()
+		if st.Resource.Kind != tracev2.ResourceGoroutine {
+			continue
+		}
+		id := st.Resource.Goroutine()
+		old, new := st.Goroutine()
+		if old == new || old != tracev2.GoWaiting {
+			continue
+		}
+		unblockEdges = append(unblockEdges, unblockEdge{
+			operator: ev.Goroutine(),
+			operand:  id,
+		})
+	}
+	// Compute the transitive closure of depth 2 of goroutines that have unblocked each other
+	// (starting from goid).
+	gmap := make(map[tracev2.GoID]struct{})
+	gmap[goid] = struct{}{}
+	for i := 0; i < 2; i++ {
+		// Copy the map.
+		gmap1 := make(map[tracev2.GoID]struct{})
+		for g := range gmap {
+			gmap1[g] = struct{}{}
+		}
+		for _, edge := range unblockEdges {
+			if _, ok := gmap[edge.operand]; ok {
+				gmap1[edge.operator] = struct{}{}
+			}
+		}
+		gmap = gmap1
+	}
+	return gmap
+}
diff --git a/src/internal/trace/summary_test.go b/src/internal/trace/summary_test.go
new file mode 100644
index 0000000..9978b57
--- /dev/null
+++ b/src/internal/trace/summary_test.go
@@ -0,0 +1,436 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	tracev2 "internal/trace/v2"
+	"internal/trace/v2/testtrace"
+	"io"
+	"testing"
+)
+
+func TestSummarizeGoroutinesTrace(t *testing.T) {
+	summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-gc-stress.test").Goroutines
+	var (
+		hasSchedWaitTime    bool
+		hasSyncBlockTime    bool
+		hasGCMarkAssistTime bool
+	)
+
+	assertContainsGoroutine(t, summaries, "runtime.gcBgMarkWorker")
+	assertContainsGoroutine(t, summaries, "main.main.func1")
+
+	for _, summary := range summaries {
+		basicGoroutineSummaryChecks(t, summary)
+		hasSchedWaitTime = hasSchedWaitTime || summary.SchedWaitTime > 0
+		if dt, ok := summary.BlockTimeByReason["sync"]; ok && dt > 0 {
+			hasSyncBlockTime = true
+		}
+		if dt, ok := summary.RangeTime["GC mark assist"]; ok && dt > 0 {
+			hasGCMarkAssistTime = true
+		}
+	}
+	if !hasSchedWaitTime {
+		t.Error("missing sched wait time")
+	}
+	if !hasSyncBlockTime {
+		t.Error("missing sync block time")
+	}
+	if !hasGCMarkAssistTime {
+		t.Error("missing GC mark assist time")
+	}
+}
+
+func TestSummarizeGoroutinesRegionsTrace(t *testing.T) {
+	summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-annotations.test").Goroutines
+	type region struct {
+		startKind tracev2.EventKind
+		endKind   tracev2.EventKind
+	}
+	wantRegions := map[string]region{
+		// N.B. "pre-existing region" never even makes it into the trace.
+		//
+		// TODO(mknyszek): Add test case for end-without-a-start, which can happen at
+		// a generation split only.
+		"":                     {tracev2.EventStateTransition, tracev2.EventStateTransition}, // Task inheritance marker.
+		"task0 region":         {tracev2.EventRegionBegin, tracev2.EventBad},
+		"region0":              {tracev2.EventRegionBegin, tracev2.EventRegionEnd},
+		"region1":              {tracev2.EventRegionBegin, tracev2.EventRegionEnd},
+		"unended region":       {tracev2.EventRegionBegin, tracev2.EventStateTransition},
+		"post-existing region": {tracev2.EventRegionBegin, tracev2.EventBad},
+	}
+	for _, summary := range summaries {
+		basicGoroutineSummaryChecks(t, summary)
+		for _, region := range summary.Regions {
+			want, ok := wantRegions[region.Name]
+			if !ok {
+				continue
+			}
+			checkRegionEvents(t, want.startKind, want.endKind, summary.ID, region)
+			delete(wantRegions, region.Name)
+		}
+	}
+	if len(wantRegions) != 0 {
+		t.Errorf("failed to find regions: %#v", wantRegions)
+	}
+}
+
+func TestSummarizeTasksTrace(t *testing.T) {
+	summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-annotations-stress.test").Tasks
+	type task struct {
+		name       string
+		parent     *tracev2.TaskID
+		children   []tracev2.TaskID
+		logs       []tracev2.Log
+		goroutines []tracev2.GoID
+	}
+	parent := func(id tracev2.TaskID) *tracev2.TaskID {
+		p := new(tracev2.TaskID)
+		*p = id
+		return p
+	}
+	wantTasks := map[tracev2.TaskID]task{
+		tracev2.BackgroundTask: {
+			// The background task (0) is never any task's parent.
+			logs: []tracev2.Log{
+				{Task: tracev2.BackgroundTask, Category: "log", Message: "before do"},
+				{Task: tracev2.BackgroundTask, Category: "log", Message: "before do"},
+			},
+			goroutines: []tracev2.GoID{1},
+		},
+		1: {
+			// This started before tracing started and has no parents.
+			// Task 2 is technically a child, but we lost that information.
+			children: []tracev2.TaskID{3, 7, 16},
+			logs: []tracev2.Log{
+				{Task: 1, Category: "log", Message: "before do"},
+				{Task: 1, Category: "log", Message: "before do"},
+			},
+			goroutines: []tracev2.GoID{1},
+		},
+		2: {
+			// This started before tracing started and its parent is technically (1), but that information was lost.
+			children: []tracev2.TaskID{8, 17},
+			logs: []tracev2.Log{
+				{Task: 2, Category: "log", Message: "before do"},
+				{Task: 2, Category: "log", Message: "before do"},
+			},
+			goroutines: []tracev2.GoID{1},
+		},
+		3: {
+			parent:   parent(1),
+			children: []tracev2.TaskID{10, 19},
+			logs: []tracev2.Log{
+				{Task: 3, Category: "log", Message: "before do"},
+				{Task: 3, Category: "log", Message: "before do"},
+			},
+			goroutines: []tracev2.GoID{1},
+		},
+		4: {
+			// Explicitly, no parent.
+			children: []tracev2.TaskID{12, 21},
+			logs: []tracev2.Log{
+				{Task: 4, Category: "log", Message: "before do"},
+				{Task: 4, Category: "log", Message: "before do"},
+			},
+			goroutines: []tracev2.GoID{1},
+		},
+		12: {
+			parent:   parent(4),
+			children: []tracev2.TaskID{13},
+			logs: []tracev2.Log{
+				// TODO(mknyszek): This is computed asynchronously in the trace,
+				// which makes regenerating this test very annoying, since it will
+				// likely break this test. Resolve this by making the order not matter.
+				{Task: 12, Category: "log2", Message: "do"},
+				{Task: 12, Category: "log", Message: "fanout region4"},
+				{Task: 12, Category: "log", Message: "fanout region0"},
+				{Task: 12, Category: "log", Message: "fanout region1"},
+				{Task: 12, Category: "log", Message: "fanout region2"},
+				{Task: 12, Category: "log", Message: "before do"},
+				{Task: 12, Category: "log", Message: "fanout region3"},
+			},
+			goroutines: []tracev2.GoID{1, 5, 6, 7, 8, 9},
+		},
+		13: {
+			// Explicitly, no children.
+			parent: parent(12),
+			logs: []tracev2.Log{
+				{Task: 13, Category: "log2", Message: "do"},
+			},
+			goroutines: []tracev2.GoID{7},
+		},
+	}
+	for id, summary := range summaries {
+		want, ok := wantTasks[id]
+		if !ok {
+			continue
+		}
+		if id != summary.ID {
+			t.Errorf("ambiguous task %d (or %d?): field likely set incorrectly", id, summary.ID)
+		}
+
+		// Check parent.
+		if want.parent != nil {
+			if summary.Parent == nil {
+				t.Errorf("expected parent %d for task %d without a parent", *want.parent, id)
+			} else if summary.Parent.ID != *want.parent {
+				t.Errorf("bad parent for task %d: want %d, got %d", id, *want.parent, summary.Parent.ID)
+			}
+		} else if summary.Parent != nil {
+			t.Errorf("unexpected parent %d for task %d", summary.Parent.ID, id)
+		}
+
+		// Check children.
+		gotChildren := make(map[tracev2.TaskID]struct{})
+		for _, child := range summary.Children {
+			gotChildren[child.ID] = struct{}{}
+		}
+		for _, wantChild := range want.children {
+			if _, ok := gotChildren[wantChild]; ok {
+				delete(gotChildren, wantChild)
+			} else {
+				t.Errorf("expected child task %d for task %d not found", wantChild, id)
+			}
+		}
+		if len(gotChildren) != 0 {
+			for child := range gotChildren {
+				t.Errorf("unexpected child task %d for task %d", child, id)
+			}
+		}
+
+		// Check logs.
+		if len(want.logs) != len(summary.Logs) {
+			t.Errorf("wanted %d logs for task %d, got %d logs instead", len(want.logs), id, len(summary.Logs))
+		} else {
+			for i := range want.logs {
+				if want.logs[i] != summary.Logs[i].Log() {
+					t.Errorf("log mismatch: want %#v, got %#v", want.logs[i], summary.Logs[i].Log())
+				}
+			}
+		}
+
+		// Check goroutines.
+		if len(want.goroutines) != len(summary.Goroutines) {
+			t.Errorf("wanted %d goroutines for task %d, got %d goroutines instead", len(want.goroutines), id, len(summary.Goroutines))
+		} else {
+			for _, goid := range want.goroutines {
+				g, ok := summary.Goroutines[goid]
+				if !ok {
+					t.Errorf("want goroutine %d for task %d, not found", goid, id)
+					continue
+				}
+				if g.ID != goid {
+					t.Errorf("goroutine summary for %d does not match task %d listing of %d", g.ID, id, goid)
+				}
+			}
+		}
+
+		// Marked as seen.
+		delete(wantTasks, id)
+	}
+	if len(wantTasks) != 0 {
+		t.Errorf("failed to find tasks: %#v", wantTasks)
+	}
+}
+
+func assertContainsGoroutine(t *testing.T, summaries map[tracev2.GoID]*GoroutineSummary, name string) {
+	for _, summary := range summaries {
+		if summary.Name == name {
+			return
+		}
+	}
+	t.Errorf("missing goroutine %s", name)
+}
+
+func basicGoroutineSummaryChecks(t *testing.T, summary *GoroutineSummary) {
+	if summary.ID == tracev2.NoGoroutine {
+		t.Error("summary found for no goroutine")
+		return
+	}
+	if (summary.StartTime != 0 && summary.CreationTime > summary.StartTime) ||
+		(summary.StartTime != 0 && summary.EndTime != 0 && summary.StartTime > summary.EndTime) {
+		t.Errorf("bad summary creation/start/end times for G %d: creation=%d start=%d end=%d", summary.ID, summary.CreationTime, summary.StartTime, summary.EndTime)
+	}
+	if (summary.PC != 0 && summary.Name == "") || (summary.PC == 0 && summary.Name != "") {
+		t.Errorf("bad name and/or PC for G %d: pc=0x%x name=%q", summary.ID, summary.PC, summary.Name)
+	}
+	basicGoroutineExecStatsChecks(t, &summary.GoroutineExecStats)
+	for _, region := range summary.Regions {
+		basicGoroutineExecStatsChecks(t, &region.GoroutineExecStats)
+	}
+}
+
+func summarizeTraceTest(t *testing.T, testPath string) *Summary {
+	trace, _, err := testtrace.ParseFile(testPath)
+	if err != nil {
+		t.Fatalf("malformed test %s: bad trace file: %v", testPath, err)
+	}
+	// Create the analysis state.
+	s := NewSummarizer()
+
+	// Create a reader.
+	r, err := tracev2.NewReader(trace)
+	if err != nil {
+		t.Fatalf("failed to create trace reader for %s: %v", testPath, err)
+	}
+	// Process the trace.
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatalf("failed to process trace %s: %v", testPath, err)
+		}
+		s.Event(&ev)
+	}
+	return s.Finalize()
+}
+
+func checkRegionEvents(t *testing.T, wantStart, wantEnd tracev2.EventKind, goid tracev2.GoID, region *UserRegionSummary) {
+	switch wantStart {
+	case tracev2.EventBad:
+		if region.Start != nil {
+			t.Errorf("expected nil region start event, got\n%s", region.Start.String())
+		}
+	case tracev2.EventStateTransition, tracev2.EventRegionBegin:
+		if region.Start == nil {
+			t.Error("expected non-nil region start event, got nil")
+		}
+		kind := region.Start.Kind()
+		if kind != wantStart {
+			t.Errorf("wanted region start event %s, got %s", wantStart, kind)
+		}
+		if kind == tracev2.EventRegionBegin {
+			if region.Start.Region().Type != region.Name {
+				t.Errorf("region name mismatch: event has %s, summary has %s", region.Start.Region().Type, region.Name)
+			}
+		} else {
+			st := region.Start.StateTransition()
+			if st.Resource.Kind != tracev2.ResourceGoroutine {
+				t.Errorf("found region start event for the wrong resource: %s", st.Resource)
+			}
+			if st.Resource.Goroutine() != goid {
+				t.Errorf("found region start event for the wrong resource: wanted goroutine %d, got %s", goid, st.Resource)
+			}
+			if old, _ := st.Goroutine(); old != tracev2.GoNotExist && old != tracev2.GoUndetermined {
+				t.Errorf("expected transition from GoNotExist or GoUndetermined, got transition from %s instead", old)
+			}
+		}
+	default:
+		t.Errorf("unexpected want start event type: %s", wantStart)
+	}
+
+	switch wantEnd {
+	case tracev2.EventBad:
+		if region.End != nil {
+			t.Errorf("expected nil region end event, got\n%s", region.End.String())
+		}
+	case tracev2.EventStateTransition, tracev2.EventRegionEnd:
+		if region.End == nil {
+			t.Error("expected non-nil region end event, got nil")
+		}
+		kind := region.End.Kind()
+		if kind != wantEnd {
+			t.Errorf("wanted region end event %s, got %s", wantEnd, kind)
+		}
+		if kind == tracev2.EventRegionEnd {
+			if region.End.Region().Type != region.Name {
+				t.Errorf("region name mismatch: event has %s, summary has %s", region.End.Region().Type, region.Name)
+			}
+		} else {
+			st := region.End.StateTransition()
+			if st.Resource.Kind != tracev2.ResourceGoroutine {
+				t.Errorf("found region end event for the wrong resource: %s", st.Resource)
+			}
+			if st.Resource.Goroutine() != goid {
+				t.Errorf("found region end event for the wrong resource: wanted goroutine %d, got %s", goid, st.Resource)
+			}
+			if _, new := st.Goroutine(); new != tracev2.GoNotExist {
+				t.Errorf("expected transition to GoNotExist, got transition to %s instead", new)
+			}
+		}
+	default:
+		t.Errorf("unexpected want end event type: %s", wantEnd)
+	}
+}
+
+func basicGoroutineExecStatsChecks(t *testing.T, stats *GoroutineExecStats) {
+	if stats.ExecTime < 0 {
+		t.Error("found negative ExecTime")
+	}
+	if stats.SchedWaitTime < 0 {
+		t.Error("found negative SchedWaitTime")
+	}
+	if stats.SyscallTime < 0 {
+		t.Error("found negative SyscallTime")
+	}
+	if stats.SyscallBlockTime < 0 {
+		t.Error("found negative SyscallBlockTime")
+	}
+	if stats.TotalTime < 0 {
+		t.Error("found negative TotalTime")
+	}
+	for reason, dt := range stats.BlockTimeByReason {
+		if dt < 0 {
+			t.Errorf("found negative BlockTimeByReason for %s", reason)
+		}
+	}
+	for name, dt := range stats.RangeTime {
+		if dt < 0 {
+			t.Errorf("found negative RangeTime for range %s", name)
+		}
+	}
+}
+
+func TestRelatedGoroutinesV2Trace(t *testing.T) {
+	testPath := "v2/testdata/tests/go122-gc-stress.test"
+	trace, _, err := testtrace.ParseFile(testPath)
+	if err != nil {
+		t.Fatalf("malformed test %s: bad trace file: %v", testPath, err)
+	}
+
+	// Create a reader.
+	r, err := tracev2.NewReader(trace)
+	if err != nil {
+		t.Fatalf("failed to create trace reader for %s: %v", testPath, err)
+	}
+
+	// Collect all the events.
+	var events []tracev2.Event
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatalf("failed to process trace %s: %v", testPath, err)
+		}
+		events = append(events, ev)
+	}
+
+	// Test the function.
+	targetg := tracev2.GoID(86)
+	got := RelatedGoroutinesV2(events, targetg)
+	want := map[tracev2.GoID]struct{}{
+		tracev2.GoID(86):  struct{}{}, // N.B. Result includes target.
+		tracev2.GoID(71):  struct{}{},
+		tracev2.GoID(25):  struct{}{},
+		tracev2.GoID(122): struct{}{},
+	}
+	for goid := range got {
+		if _, ok := want[goid]; ok {
+			delete(want, goid)
+		} else {
+			t.Errorf("unexpected goroutine %d found in related goroutines for %d in test %s", goid, targetg, testPath)
+		}
+	}
+	if len(want) != 0 {
+		for goid := range want {
+			t.Errorf("failed to find related goroutine %d for goroutine %d in test %s", goid, targetg, testPath)
+		}
+	}
+}
diff --git a/src/internal/trace/traceviewer/emitter.go b/src/internal/trace/traceviewer/emitter.go
new file mode 100644
index 0000000..c91c743
--- /dev/null
+++ b/src/internal/trace/traceviewer/emitter.go
@@ -0,0 +1,813 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceviewer
+
+import (
+	"encoding/json"
+	"fmt"
+	"internal/trace"
+	"internal/trace/traceviewer/format"
+	"io"
+	"strconv"
+	"time"
+)
+
+type TraceConsumer struct {
+	ConsumeTimeUnit    func(unit string)
+	ConsumeViewerEvent func(v *format.Event, required bool)
+	ConsumeViewerFrame func(key string, f format.Frame)
+	Flush              func()
+}
+
+// ViewerDataTraceConsumer returns a TraceConsumer that writes to w. The
+// startIdx and endIdx are used for splitting large traces. They refer to
+// indexes in the traceEvents output array, not the events in the trace input.
+func ViewerDataTraceConsumer(w io.Writer, startIdx, endIdx int64) TraceConsumer {
+	allFrames := make(map[string]format.Frame)
+	requiredFrames := make(map[string]format.Frame)
+	enc := json.NewEncoder(w)
+	written := 0
+	index := int64(-1)
+
+	io.WriteString(w, "{")
+	return TraceConsumer{
+		ConsumeTimeUnit: func(unit string) {
+			io.WriteString(w, `"displayTimeUnit":`)
+			enc.Encode(unit)
+			io.WriteString(w, ",")
+		},
+		ConsumeViewerEvent: func(v *format.Event, required bool) {
+			index++
+			if !required && (index < startIdx || index > endIdx) {
+				// not in the range. Skip!
+				return
+			}
+			WalkStackFrames(allFrames, v.Stack, func(id int) {
+				s := strconv.Itoa(id)
+				requiredFrames[s] = allFrames[s]
+			})
+			WalkStackFrames(allFrames, v.EndStack, func(id int) {
+				s := strconv.Itoa(id)
+				requiredFrames[s] = allFrames[s]
+			})
+			if written == 0 {
+				io.WriteString(w, `"traceEvents": [`)
+			}
+			if written > 0 {
+				io.WriteString(w, ",")
+			}
+			enc.Encode(v)
+			// TODO(mknyszek): get rid of the extra \n inserted by enc.Encode.
+			// Same should be applied to splittingTraceConsumer.
+			written++
+		},
+		ConsumeViewerFrame: func(k string, v format.Frame) {
+			allFrames[k] = v
+		},
+		Flush: func() {
+			io.WriteString(w, `], "stackFrames":`)
+			enc.Encode(requiredFrames)
+			io.WriteString(w, `}`)
+		},
+	}
+}
+
+func SplittingTraceConsumer(max int) (*splitter, TraceConsumer) {
+	type eventSz struct {
+		Time   float64
+		Sz     int
+		Frames []int
+	}
+
+	var (
+		// data.Frames contains only the frames for required events.
+		data = format.Data{Frames: make(map[string]format.Frame)}
+
+		allFrames = make(map[string]format.Frame)
+
+		sizes []eventSz
+		cw    countingWriter
+	)
+
+	s := new(splitter)
+
+	return s, TraceConsumer{
+		ConsumeTimeUnit: func(unit string) {
+			data.TimeUnit = unit
+		},
+		ConsumeViewerEvent: func(v *format.Event, required bool) {
+			if required {
+				// Store required events inside data so flush
+				// can include them in the required part of the
+				// trace.
+				data.Events = append(data.Events, v)
+				WalkStackFrames(allFrames, v.Stack, func(id int) {
+					s := strconv.Itoa(id)
+					data.Frames[s] = allFrames[s]
+				})
+				WalkStackFrames(allFrames, v.EndStack, func(id int) {
+					s := strconv.Itoa(id)
+					data.Frames[s] = allFrames[s]
+				})
+				return
+			}
+			enc := json.NewEncoder(&cw)
+			enc.Encode(v)
+			size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",".
+			// Add referenced stack frames. Their size is computed
+			// in flush, where we can dedup across events.
+			WalkStackFrames(allFrames, v.Stack, func(id int) {
+				size.Frames = append(size.Frames, id)
+			})
+			WalkStackFrames(allFrames, v.EndStack, func(id int) {
+				size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later.
+			})
+			sizes = append(sizes, size)
+			cw.size = 0
+		},
+		ConsumeViewerFrame: func(k string, v format.Frame) {
+			allFrames[k] = v
+		},
+		Flush: func() {
+			// Calculate size of the mandatory part of the trace.
+			// This includes thread names and stack frames for
+			// required events.
+			cw.size = 0
+			enc := json.NewEncoder(&cw)
+			enc.Encode(data)
+			requiredSize := cw.size
+
+			// Then calculate size of each individual event and
+			// their stack frames, grouping them into ranges. We
+			// only include stack frames relevant to the events in
+			// the range to reduce overhead.
+
+			var (
+				start = 0
+
+				eventsSize = 0
+
+				frames     = make(map[string]format.Frame)
+				framesSize = 0
+			)
+			for i, ev := range sizes {
+				eventsSize += ev.Sz
+
+				// Add required stack frames. Note that they
+				// may already be in the map.
+				for _, id := range ev.Frames {
+					s := strconv.Itoa(id)
+					_, ok := frames[s]
+					if ok {
+						continue
+					}
+					f := allFrames[s]
+					frames[s] = f
+					framesSize += stackFrameEncodedSize(uint(id), f)
+				}
+
+				total := requiredSize + framesSize + eventsSize
+				if total < max {
+					continue
+				}
+
+				// Reached max size, commit this range and
+				// start a new range.
+				startTime := time.Duration(sizes[start].Time * 1000)
+				endTime := time.Duration(ev.Time * 1000)
+				s.Ranges = append(s.Ranges, Range{
+					Name:      fmt.Sprintf("%v-%v", startTime, endTime),
+					Start:     start,
+					End:       i + 1,
+					StartTime: int64(startTime),
+					EndTime:   int64(endTime),
+				})
+				start = i + 1
+				frames = make(map[string]format.Frame)
+				framesSize = 0
+				eventsSize = 0
+			}
+			if len(s.Ranges) <= 1 {
+				s.Ranges = nil
+				return
+			}
+
+			if end := len(sizes) - 1; start < end {
+				s.Ranges = append(s.Ranges, Range{
+					Name:      fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)),
+					Start:     start,
+					End:       end,
+					StartTime: int64(sizes[start].Time * 1000),
+					EndTime:   int64(sizes[end].Time * 1000),
+				})
+			}
+		},
+	}
+}
+
+type splitter struct {
+	Ranges []Range
+}
+
+type countingWriter struct {
+	size int
+}
+
+func (cw *countingWriter) Write(data []byte) (int, error) {
+	cw.size += len(data)
+	return len(data), nil
+}
+
+func stackFrameEncodedSize(id uint, f format.Frame) int {
+	// We want to know the marginal size of traceviewer.Data.Frames for
+	// each event. Running full JSON encoding of the map for each event is
+	// far too slow.
+	//
+	// Since the format is fixed, we can easily compute the size without
+	// encoding.
+	//
+	// A single entry looks like one of the following:
+	//
+	//   "1":{"name":"main.main:30"},
+	//   "10":{"name":"pkg.NewSession:173","parent":9},
+	//
+	// The parent is omitted if 0. The trailing comma is omitted from the
+	// last entry, but we don't need that much precision.
+	const (
+		baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`)
+
+		// Don't count the trailing quote on the name, as that is
+		// counted in baseSize.
+		parentBaseSize = len(`,"parent":`)
+	)
+
+	size := baseSize
+
+	size += len(f.Name)
+
+	// Bytes for id (always positive).
+	for id > 0 {
+		size += 1
+		id /= 10
+	}
+
+	if f.Parent > 0 {
+		size += parentBaseSize
+		// Bytes for parent (always positive).
+		for f.Parent > 0 {
+			size += 1
+			f.Parent /= 10
+		}
+	}
+
+	return size
+}
+
+// WalkStackFrames calls fn for id and all of its parent frames from allFrames.
+func WalkStackFrames(allFrames map[string]format.Frame, id int, fn func(id int)) {
+	for id != 0 {
+		f, ok := allFrames[strconv.Itoa(id)]
+		if !ok {
+			break
+		}
+		fn(id)
+		id = f.Parent
+	}
+}
+
+type Mode int
+
+const (
+	ModeGoroutineOriented Mode = 1 << iota
+	ModeTaskOriented
+	ModeThreadOriented // Mutually exclusive with ModeGoroutineOriented.
+)
+
+// NewEmitter returns a new Emitter that writes to c. The rangeStart and
+// rangeEnd args are used for splitting large traces.
+func NewEmitter(c TraceConsumer, rangeStart, rangeEnd time.Duration) *Emitter {
+	c.ConsumeTimeUnit("ns")
+
+	return &Emitter{
+		c:          c,
+		rangeStart: rangeStart,
+		rangeEnd:   rangeEnd,
+		frameTree:  frameNode{children: make(map[uint64]frameNode)},
+		resources:  make(map[uint64]string),
+		tasks:      make(map[uint64]task),
+	}
+}
+
+type Emitter struct {
+	c          TraceConsumer
+	rangeStart time.Duration
+	rangeEnd   time.Duration
+
+	heapStats, prevHeapStats     heapStats
+	gstates, prevGstates         [gStateCount]int64
+	threadStats, prevThreadStats [threadStateCount]int64
+	gomaxprocs                   uint64
+	frameTree                    frameNode
+	frameSeq                     int
+	arrowSeq                     uint64
+	filter                       func(uint64) bool
+	resourceType                 string
+	resources                    map[uint64]string
+	focusResource                uint64
+	tasks                        map[uint64]task
+	asyncSliceSeq                uint64
+}
+
+type task struct {
+	name      string
+	sortIndex int
+}
+
+func (e *Emitter) Gomaxprocs(v uint64) {
+	if v > e.gomaxprocs {
+		e.gomaxprocs = v
+	}
+}
+
+func (e *Emitter) Resource(id uint64, name string) {
+	if e.filter != nil && !e.filter(id) {
+		return
+	}
+	e.resources[id] = name
+}
+
+func (e *Emitter) SetResourceType(name string) {
+	e.resourceType = name
+}
+
+func (e *Emitter) SetResourceFilter(filter func(uint64) bool) {
+	e.filter = filter
+}
+
+func (e *Emitter) Task(id uint64, name string, sortIndex int) {
+	e.tasks[id] = task{name, sortIndex}
+}
+
+func (e *Emitter) Slice(s SliceEvent) {
+	if e.filter != nil && !e.filter(s.Resource) {
+		return
+	}
+	e.slice(s, format.ProcsSection, "")
+}
+
+func (e *Emitter) TaskSlice(s SliceEvent) {
+	e.slice(s, format.TasksSection, pickTaskColor(s.Resource))
+}
+
+func (e *Emitter) slice(s SliceEvent, sectionID uint64, cname string) {
+	if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) {
+		return
+	}
+	e.OptionalEvent(&format.Event{
+		Name:     s.Name,
+		Phase:    "X",
+		Time:     viewerTime(s.Ts),
+		Dur:      viewerTime(s.Dur),
+		PID:      sectionID,
+		TID:      s.Resource,
+		Stack:    s.Stack,
+		EndStack: s.EndStack,
+		Arg:      s.Arg,
+		Cname:    cname,
+	})
+}
+
+type SliceEvent struct {
+	Name     string
+	Ts       time.Duration
+	Dur      time.Duration
+	Resource uint64
+	Stack    int
+	EndStack int
+	Arg      any
+}
+
+func (e *Emitter) AsyncSlice(s AsyncSliceEvent) {
+	if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) {
+		return
+	}
+	if e.filter != nil && !e.filter(s.Resource) {
+		return
+	}
+	cname := ""
+	if s.TaskColorIndex != 0 {
+		cname = pickTaskColor(s.TaskColorIndex)
+	}
+	e.asyncSliceSeq++
+	e.OptionalEvent(&format.Event{
+		Category: s.Category,
+		Name:     s.Name,
+		Phase:    "b",
+		Time:     viewerTime(s.Ts),
+		TID:      s.Resource,
+		ID:       e.asyncSliceSeq,
+		Scope:    s.Scope,
+		Stack:    s.Stack,
+		Cname:    cname,
+	})
+	e.OptionalEvent(&format.Event{
+		Category: s.Category,
+		Name:     s.Name,
+		Phase:    "e",
+		Time:     viewerTime(s.Ts + s.Dur),
+		TID:      s.Resource,
+		ID:       e.asyncSliceSeq,
+		Scope:    s.Scope,
+		Stack:    s.EndStack,
+		Arg:      s.Arg,
+		Cname:    cname,
+	})
+}
+
+type AsyncSliceEvent struct {
+	SliceEvent
+	Category       string
+	Scope          string
+	TaskColorIndex uint64 // Take on the same color as the task with this ID.
+}
+
+func (e *Emitter) Instant(i InstantEvent) {
+	if !e.tsWithinRange(i.Ts) {
+		return
+	}
+	if e.filter != nil && !e.filter(i.Resource) {
+		return
+	}
+	cname := ""
+	e.OptionalEvent(&format.Event{
+		Name:     i.Name,
+		Category: i.Category,
+		Phase:    "I",
+		Scope:    "t",
+		Time:     viewerTime(i.Ts),
+		PID:      format.ProcsSection,
+		TID:      i.Resource,
+		Stack:    i.Stack,
+		Cname:    cname,
+		Arg:      i.Arg,
+	})
+}
+
+type InstantEvent struct {
+	Ts       time.Duration
+	Name     string
+	Category string
+	Resource uint64
+	Stack    int
+	Arg      any
+}
+
+func (e *Emitter) Arrow(a ArrowEvent) {
+	if e.filter != nil && (!e.filter(a.FromResource) || !e.filter(a.ToResource)) {
+		return
+	}
+	e.arrow(a, format.ProcsSection)
+}
+
+func (e *Emitter) TaskArrow(a ArrowEvent) {
+	e.arrow(a, format.TasksSection)
+}
+
+func (e *Emitter) arrow(a ArrowEvent, sectionID uint64) {
+	if !e.tsWithinRange(a.Start) || !e.tsWithinRange(a.End) {
+		return
+	}
+	e.arrowSeq++
+	e.OptionalEvent(&format.Event{
+		Name:  a.Name,
+		Phase: "s",
+		TID:   a.FromResource,
+		PID:   sectionID,
+		ID:    e.arrowSeq,
+		Time:  viewerTime(a.Start),
+		Stack: a.FromStack,
+	})
+	e.OptionalEvent(&format.Event{
+		Name:  a.Name,
+		Phase: "t",
+		TID:   a.ToResource,
+		PID:   sectionID,
+		ID:    e.arrowSeq,
+		Time:  viewerTime(a.End),
+	})
+}
+
+type ArrowEvent struct {
+	Name         string
+	Start        time.Duration
+	End          time.Duration
+	FromResource uint64
+	FromStack    int
+	ToResource   uint64
+}
+
+func (e *Emitter) Event(ev *format.Event) {
+	e.c.ConsumeViewerEvent(ev, true)
+}
+
+func (e *Emitter) HeapAlloc(ts time.Duration, v uint64) {
+	e.heapStats.heapAlloc = v
+	e.emitHeapCounters(ts)
+}
+
+func (e *Emitter) Focus(id uint64) {
+	e.focusResource = id
+}
+
+func (e *Emitter) GoroutineTransition(ts time.Duration, from, to GState) {
+	e.gstates[from]--
+	e.gstates[to]++
+	if e.prevGstates == e.gstates {
+		return
+	}
+	if e.tsWithinRange(ts) {
+		e.OptionalEvent(&format.Event{
+			Name:  "Goroutines",
+			Phase: "C",
+			Time:  viewerTime(ts),
+			PID:   1,
+			Arg: &format.GoroutineCountersArg{
+				Running:   uint64(e.gstates[GRunning]),
+				Runnable:  uint64(e.gstates[GRunnable]),
+				GCWaiting: uint64(e.gstates[GWaitingGC]),
+			},
+		})
+	}
+	e.prevGstates = e.gstates
+}
+
+func (e *Emitter) IncThreadStateCount(ts time.Duration, state ThreadState, delta int64) {
+	e.threadStats[state] += delta
+	if e.prevThreadStats == e.threadStats {
+		return
+	}
+	if e.tsWithinRange(ts) {
+		e.OptionalEvent(&format.Event{
+			Name:  "Threads",
+			Phase: "C",
+			Time:  viewerTime(ts),
+			PID:   1,
+			Arg: &format.ThreadCountersArg{
+				Running:   int64(e.threadStats[ThreadStateRunning]),
+				InSyscall: int64(e.threadStats[ThreadStateInSyscall]),
+				// TODO(mknyszek): Why is InSyscallRuntime not included here?
+			},
+		})
+	}
+	e.prevThreadStats = e.threadStats
+}
+
+func (e *Emitter) HeapGoal(ts time.Duration, v uint64) {
+	// This cutoff at 1 PiB is a Workaround for https://github.com/golang/go/issues/63864.
+	//
+	// TODO(mknyszek): Remove this once the problem has been fixed.
+	const PB = 1 << 50
+	if v > PB {
+		v = 0
+	}
+	e.heapStats.nextGC = v
+	e.emitHeapCounters(ts)
+}
+
+func (e *Emitter) emitHeapCounters(ts time.Duration) {
+	if e.prevHeapStats == e.heapStats {
+		return
+	}
+	diff := uint64(0)
+	if e.heapStats.nextGC > e.heapStats.heapAlloc {
+		diff = e.heapStats.nextGC - e.heapStats.heapAlloc
+	}
+	if e.tsWithinRange(ts) {
+		e.OptionalEvent(&format.Event{
+			Name:  "Heap",
+			Phase: "C",
+			Time:  viewerTime(ts),
+			PID:   1,
+			Arg:   &format.HeapCountersArg{Allocated: e.heapStats.heapAlloc, NextGC: diff},
+		})
+	}
+	e.prevHeapStats = e.heapStats
+}
+
+// Err returns an error if the emitter is in an invalid state.
+func (e *Emitter) Err() error {
+	if e.gstates[GRunnable] < 0 || e.gstates[GRunning] < 0 || e.threadStats[ThreadStateInSyscall] < 0 || e.threadStats[ThreadStateInSyscallRuntime] < 0 {
+		return fmt.Errorf(
+			"runnable=%d running=%d insyscall=%d insyscallRuntime=%d",
+			e.gstates[GRunnable],
+			e.gstates[GRunning],
+			e.threadStats[ThreadStateInSyscall],
+			e.threadStats[ThreadStateInSyscallRuntime],
+		)
+	}
+	return nil
+}
+
+func (e *Emitter) tsWithinRange(ts time.Duration) bool {
+	return e.rangeStart <= ts && ts <= e.rangeEnd
+}
+
+// OptionalEvent emits ev if it's within the time range of of the consumer, i.e.
+// the selected trace split range.
+func (e *Emitter) OptionalEvent(ev *format.Event) {
+	e.c.ConsumeViewerEvent(ev, false)
+}
+
+func (e *Emitter) Flush() {
+	e.processMeta(format.StatsSection, "STATS", 0)
+
+	if len(e.tasks) != 0 {
+		e.processMeta(format.TasksSection, "TASKS", 1)
+	}
+	for id, task := range e.tasks {
+		e.threadMeta(format.TasksSection, id, task.name, task.sortIndex)
+	}
+
+	e.processMeta(format.ProcsSection, e.resourceType, 2)
+
+	e.threadMeta(format.ProcsSection, trace.GCP, "GC", -6)
+	e.threadMeta(format.ProcsSection, trace.NetpollP, "Network", -5)
+	e.threadMeta(format.ProcsSection, trace.TimerP, "Timers", -4)
+	e.threadMeta(format.ProcsSection, trace.SyscallP, "Syscalls", -3)
+
+	for id, name := range e.resources {
+		priority := int(id)
+		if e.focusResource != 0 && id == e.focusResource {
+			// Put the focus goroutine on top.
+			priority = -2
+		}
+		e.threadMeta(format.ProcsSection, id, name, priority)
+	}
+
+	e.c.Flush()
+}
+
+func (e *Emitter) threadMeta(sectionID, tid uint64, name string, priority int) {
+	e.Event(&format.Event{
+		Name:  "thread_name",
+		Phase: "M",
+		PID:   sectionID,
+		TID:   tid,
+		Arg:   &format.NameArg{Name: name},
+	})
+	e.Event(&format.Event{
+		Name:  "thread_sort_index",
+		Phase: "M",
+		PID:   sectionID,
+		TID:   tid,
+		Arg:   &format.SortIndexArg{Index: priority},
+	})
+}
+
+func (e *Emitter) processMeta(sectionID uint64, name string, priority int) {
+	e.Event(&format.Event{
+		Name:  "process_name",
+		Phase: "M",
+		PID:   sectionID,
+		Arg:   &format.NameArg{Name: name},
+	})
+	e.Event(&format.Event{
+		Name:  "process_sort_index",
+		Phase: "M",
+		PID:   sectionID,
+		Arg:   &format.SortIndexArg{Index: priority},
+	})
+}
+
+// Stack emits the given frames and returns a unique id for the stack. No
+// pointers to the given data are being retained beyond the call to Stack.
+func (e *Emitter) Stack(stk []*trace.Frame) int {
+	return e.buildBranch(e.frameTree, stk)
+}
+
+// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree.
+func (e *Emitter) buildBranch(parent frameNode, stk []*trace.Frame) int {
+	if len(stk) == 0 {
+		return parent.id
+	}
+	last := len(stk) - 1
+	frame := stk[last]
+	stk = stk[:last]
+
+	node, ok := parent.children[frame.PC]
+	if !ok {
+		e.frameSeq++
+		node.id = e.frameSeq
+		node.children = make(map[uint64]frameNode)
+		parent.children[frame.PC] = node
+		e.c.ConsumeViewerFrame(strconv.Itoa(node.id), format.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id})
+	}
+	return e.buildBranch(node, stk)
+}
+
+type heapStats struct {
+	heapAlloc uint64
+	nextGC    uint64
+}
+
+func viewerTime(t time.Duration) float64 {
+	return float64(t) / float64(time.Microsecond)
+}
+
+type GState int
+
+const (
+	GDead GState = iota
+	GRunnable
+	GRunning
+	GWaiting
+	GWaitingGC
+
+	gStateCount
+)
+
+type ThreadState int
+
+const (
+	ThreadStateInSyscall ThreadState = iota
+	ThreadStateInSyscallRuntime
+	ThreadStateRunning
+
+	threadStateCount
+)
+
+type frameNode struct {
+	id       int
+	children map[uint64]frameNode
+}
+
+// Mapping from more reasonable color names to the reserved color names in
+// https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50
+// The chrome trace viewer allows only those as cname values.
+const (
+	colorLightMauve     = "thread_state_uninterruptible" // 182, 125, 143
+	colorOrange         = "thread_state_iowait"          // 255, 140, 0
+	colorSeafoamGreen   = "thread_state_running"         // 126, 200, 148
+	colorVistaBlue      = "thread_state_runnable"        // 133, 160, 210
+	colorTan            = "thread_state_unknown"         // 199, 155, 125
+	colorIrisBlue       = "background_memory_dump"       // 0, 180, 180
+	colorMidnightBlue   = "light_memory_dump"            // 0, 0, 180
+	colorDeepMagenta    = "detailed_memory_dump"         // 180, 0, 180
+	colorBlue           = "vsync_highlight_color"        // 0, 0, 255
+	colorGrey           = "generic_work"                 // 125, 125, 125
+	colorGreen          = "good"                         // 0, 125, 0
+	colorDarkGoldenrod  = "bad"                          // 180, 125, 0
+	colorPeach          = "terrible"                     // 180, 0, 0
+	colorBlack          = "black"                        // 0, 0, 0
+	colorLightGrey      = "grey"                         // 221, 221, 221
+	colorWhite          = "white"                        // 255, 255, 255
+	colorYellow         = "yellow"                       // 255, 255, 0
+	colorOlive          = "olive"                        // 100, 100, 0
+	colorCornflowerBlue = "rail_response"                // 67, 135, 253
+	colorSunsetOrange   = "rail_animation"               // 244, 74, 63
+	colorTangerine      = "rail_idle"                    // 238, 142, 0
+	colorShamrockGreen  = "rail_load"                    // 13, 168, 97
+	colorGreenishYellow = "startup"                      // 230, 230, 0
+	colorDarkGrey       = "heap_dump_stack_frame"        // 128, 128, 128
+	colorTawny          = "heap_dump_child_node_arrow"   // 204, 102, 0
+	colorLemon          = "cq_build_running"             // 255, 255, 119
+	colorLime           = "cq_build_passed"              // 153, 238, 102
+	colorPink           = "cq_build_failed"              // 238, 136, 136
+	colorSilver         = "cq_build_abandoned"           // 187, 187, 187
+	colorManzGreen      = "cq_build_attempt_runnig"      // 222, 222, 75
+	colorKellyGreen     = "cq_build_attempt_passed"      // 108, 218, 35
+	colorAnotherGrey    = "cq_build_attempt_failed"      // 187, 187, 187
+)
+
+var colorForTask = []string{
+	colorLightMauve,
+	colorOrange,
+	colorSeafoamGreen,
+	colorVistaBlue,
+	colorTan,
+	colorMidnightBlue,
+	colorIrisBlue,
+	colorDeepMagenta,
+	colorGreen,
+	colorDarkGoldenrod,
+	colorPeach,
+	colorOlive,
+	colorCornflowerBlue,
+	colorSunsetOrange,
+	colorTangerine,
+	colorShamrockGreen,
+	colorTawny,
+	colorLemon,
+	colorLime,
+	colorPink,
+	colorSilver,
+	colorManzGreen,
+	colorKellyGreen,
+}
+
+func pickTaskColor(id uint64) string {
+	idx := id % uint64(len(colorForTask))
+	return colorForTask[idx]
+}
diff --git a/src/internal/trace/traceviewer/format/format.go b/src/internal/trace/traceviewer/format/format.go
new file mode 100644
index 0000000..83f3276
--- /dev/null
+++ b/src/internal/trace/traceviewer/format/format.go
@@ -0,0 +1,79 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package traceviewer provides definitions of the JSON data structures
+// used by the Chrome trace viewer.
+//
+// The official description of the format is in this file:
+// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
+//
+// Note: This can't be part of the parent traceviewer package as that would
+// throw. go_bootstrap cannot depend on the cgo version of package net in ./make.bash.
+package format
+
+type Data struct {
+	Events   []*Event         `json:"traceEvents"`
+	Frames   map[string]Frame `json:"stackFrames"`
+	TimeUnit string           `json:"displayTimeUnit"`
+}
+
+type Event struct {
+	Name      string  `json:"name,omitempty"`
+	Phase     string  `json:"ph"`
+	Scope     string  `json:"s,omitempty"`
+	Time      float64 `json:"ts"`
+	Dur       float64 `json:"dur,omitempty"`
+	PID       uint64  `json:"pid"`
+	TID       uint64  `json:"tid"`
+	ID        uint64  `json:"id,omitempty"`
+	BindPoint string  `json:"bp,omitempty"`
+	Stack     int     `json:"sf,omitempty"`
+	EndStack  int     `json:"esf,omitempty"`
+	Arg       any     `json:"args,omitempty"`
+	Cname     string  `json:"cname,omitempty"`
+	Category  string  `json:"cat,omitempty"`
+}
+
+type Frame struct {
+	Name   string `json:"name"`
+	Parent int    `json:"parent,omitempty"`
+}
+
+type NameArg struct {
+	Name string `json:"name"`
+}
+
+type BlockedArg struct {
+	Blocked string `json:"blocked"`
+}
+
+type SortIndexArg struct {
+	Index int `json:"sort_index"`
+}
+
+type HeapCountersArg struct {
+	Allocated uint64
+	NextGC    uint64
+}
+
+const (
+	ProcsSection = 0 // where Goroutines or per-P timelines are presented.
+	StatsSection = 1 // where counters are presented.
+	TasksSection = 2 // where Task hierarchy & timeline is presented.
+)
+
+type GoroutineCountersArg struct {
+	Running   uint64
+	Runnable  uint64
+	GCWaiting uint64
+}
+
+type ThreadCountersArg struct {
+	Running   int64
+	InSyscall int64
+}
+
+type ThreadIDArg struct {
+	ThreadID uint64
+}
diff --git a/src/internal/trace/traceviewer/histogram.go b/src/internal/trace/traceviewer/histogram.go
new file mode 100644
index 0000000..d4c8749
--- /dev/null
+++ b/src/internal/trace/traceviewer/histogram.go
@@ -0,0 +1,86 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceviewer
+
+import (
+	"fmt"
+	"html/template"
+	"math"
+	"strings"
+	"time"
+)
+
+// TimeHistogram is an high-dynamic-range histogram for durations.
+type TimeHistogram struct {
+	Count                int
+	Buckets              []int
+	MinBucket, MaxBucket int
+}
+
+// Five buckets for every power of 10.
+var logDiv = math.Log(math.Pow(10, 1.0/5))
+
+// Add adds a single sample to the histogram.
+func (h *TimeHistogram) Add(d time.Duration) {
+	var bucket int
+	if d > 0 {
+		bucket = int(math.Log(float64(d)) / logDiv)
+	}
+	if len(h.Buckets) <= bucket {
+		h.Buckets = append(h.Buckets, make([]int, bucket-len(h.Buckets)+1)...)
+		h.Buckets = h.Buckets[:cap(h.Buckets)]
+	}
+	h.Buckets[bucket]++
+	if bucket < h.MinBucket || h.MaxBucket == 0 {
+		h.MinBucket = bucket
+	}
+	if bucket > h.MaxBucket {
+		h.MaxBucket = bucket
+	}
+	h.Count++
+}
+
+// BucketMin returns the minimum duration value for a provided bucket.
+func (h *TimeHistogram) BucketMin(bucket int) time.Duration {
+	return time.Duration(math.Exp(float64(bucket) * logDiv))
+}
+
+// ToHTML renders the histogram as HTML.
+func (h *TimeHistogram) ToHTML(urlmaker func(min, max time.Duration) string) template.HTML {
+	if h == nil || h.Count == 0 {
+		return template.HTML("")
+	}
+
+	const barWidth = 400
+
+	maxCount := 0
+	for _, count := range h.Buckets {
+		if count > maxCount {
+			maxCount = count
+		}
+	}
+
+	w := new(strings.Builder)
+	fmt.Fprintf(w, `<table>`)
+	for i := h.MinBucket; i <= h.MaxBucket; i++ {
+		// Tick label.
+		if h.Buckets[i] > 0 {
+			fmt.Fprintf(w, `<tr><td class="histoTime" align="right"><a href=%s>%s</a></td>`, urlmaker(h.BucketMin(i), h.BucketMin(i+1)), h.BucketMin(i))
+		} else {
+			fmt.Fprintf(w, `<tr><td class="histoTime" align="right">%s</td>`, h.BucketMin(i))
+		}
+		// Bucket bar.
+		width := h.Buckets[i] * barWidth / maxCount
+		fmt.Fprintf(w, `<td><div style="width:%dpx;background:blue;position:relative">&nbsp;</div></td>`, width)
+		// Bucket count.
+		fmt.Fprintf(w, `<td align="right"><div style="position:relative">%d</div></td>`, h.Buckets[i])
+		fmt.Fprintf(w, "</tr>\n")
+
+	}
+	// Final tick label.
+	fmt.Fprintf(w, `<tr><td align="right">%s</td></tr>`, h.BucketMin(h.MaxBucket+1))
+	fmt.Fprintf(w, `</table>`)
+	return template.HTML(w.String())
+}
diff --git a/src/internal/trace/traceviewer/http.go b/src/internal/trace/traceviewer/http.go
new file mode 100644
index 0000000..5258db0
--- /dev/null
+++ b/src/internal/trace/traceviewer/http.go
@@ -0,0 +1,422 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package traceviewer
+
+import (
+	"embed"
+	"fmt"
+	"html/template"
+	"net/http"
+	"strings"
+)
+
+func MainHandler(views []View) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+		if err := templMain.Execute(w, views); err != nil {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+			return
+		}
+	})
+}
+
+const CommonStyle = `
+/* See https://github.com/golang/pkgsite/blob/master/static/shared/typography/typography.css */
+body {
+  font-family:	-apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji';
+  font-size:	1rem;
+  line-height:	normal;
+  max-width:	9in;
+  margin:	1em;
+}
+h1 { font-size: 1.5rem; }
+h2 { font-size: 1.375rem; }
+h1,h2 {
+  font-weight: 600;
+  line-height: 1.25em;
+  word-break: break-word;
+}
+p  { color: grey85; font-size:85%; }
+code,
+pre,
+textarea.code {
+  font-family: SFMono-Regular, Consolas, 'Liberation Mono', Menlo, monospace;
+  font-size: 0.875rem;
+  line-height: 1.5em;
+}
+
+pre,
+textarea.code {
+  background-color: var(--color-background-accented);
+  border: var(--border);
+  border-radius: var(--border-radius);
+  color: var(--color-text);
+  overflow-x: auto;
+  padding: 0.625rem;
+  tab-size: 4;
+  white-space: pre;
+}
+`
+
+var templMain = template.Must(template.New("").Parse(`
+<html>
+<style>` + CommonStyle + `</style>
+<body>
+<h1>cmd/trace: the Go trace event viewer</h1>
+<p>
+  This web server provides various visualizations of an event log gathered during
+  the execution of a Go program that uses the <a href='https://pkg.go.dev/runtime/trace'>runtime/trace</a> package.
+</p>
+
+<h2>Event timelines for running goroutines</h2>
+{{range $i, $view := $}}
+{{if $view.Ranges}}
+{{if eq $i 0}}
+<p>
+  Large traces are split into multiple sections of equal data size
+  (not duration) to avoid overwhelming the visualizer.
+</p>
+{{end}}
+<ul>
+	{{range $index, $e := $view.Ranges}}
+		<li><a href="{{$view.URL $index}}">View trace by {{$view.Type}} ({{$e.Name}})</a></li>
+	{{end}}
+</ul>
+{{else}}
+<ul>
+	<li><a href="{{$view.URL -1}}">View trace by {{$view.Type}}</a></li>
+</ul>
+{{end}}
+{{end}}
+<p>
+  This view displays a series of timelines for a type of resource.
+  The "by proc" view consists of a timeline for each of the GOMAXPROCS
+  logical processors, showing which goroutine (if any) was running on that
+  logical processor at each moment.
+  The "by thread" view (if available) consists of a similar timeline for each
+  OS thread.
+
+  Each goroutine has an identifying number (e.g. G123), main function,
+  and color.
+
+  A colored bar represents an uninterrupted span of execution.
+
+  Execution of a goroutine may migrate from one logical processor to another,
+  causing a single colored bar to be horizontally continuous but
+  vertically displaced.
+</p>
+<p>
+  Clicking on a span reveals information about it, such as its
+  duration, its causal predecessors and successors, and the stack trace
+  at the final moment when it yielded the logical processor, for example
+  because it made a system call or tried to acquire a mutex.
+
+  Directly underneath each bar, a smaller bar or more commonly a fine
+  vertical line indicates an event occurring during its execution.
+  Some of these are related to garbage collection; most indicate that
+  a goroutine yielded its logical processor but then immediately resumed execution
+  on the same logical processor. Clicking on the event displays the stack trace
+  at the moment it occurred.
+</p>
+<p>
+  The causal relationships between spans of goroutine execution
+  can be displayed by clicking the Flow Events button at the top.
+</p>
+<p>
+  At the top ("STATS"), there are three additional timelines that
+  display statistical information.
+
+  "Goroutines" is a time series of the count of existing goroutines;
+  clicking on it displays their breakdown by state at that moment:
+  running, runnable, or waiting.
+
+  "Heap" is a time series of the amount of heap memory allocated (in orange)
+  and (in green) the allocation limit at which the next GC cycle will begin.
+
+  "Threads" shows the number of kernel threads in existence: there is
+  always one kernel thread per logical processor, and additional threads
+  are created for calls to non-Go code such as a system call or a
+  function written in C.
+</p>
+<p>
+  Above the event trace for the first logical processor are
+  traces for various runtime-internal events.
+
+  The "GC" bar shows when the garbage collector is running, and in which stage.
+  Garbage collection may temporarily affect all the logical processors
+  and the other metrics.
+
+  The "Network", "Timers", and "Syscalls" traces indicate events in
+  the runtime that cause goroutines to wake up.
+</p>
+<p>
+  The visualization allows you to navigate events at scales ranging from several
+  seconds to a handful of nanoseconds.
+
+  Consult the documentation for the Chromium <a href='https://www.chromium.org/developers/how-tos/trace-event-profiling-tool/'>Trace Event Profiling Tool<a/>
+  for help navigating the view.
+</p>
+
+<ul>
+<li><a href="/goroutines">Goroutine analysis</a></li>
+</ul>
+<p>
+  This view displays information about each set of goroutines that
+  shares the same main function.
+
+  Clicking on a main function shows links to the four types of
+  blocking profile (see below) applied to that subset of goroutines.
+
+  It also shows a table of specific goroutine instances, with various
+  execution statistics and a link to the event timeline for each one.
+
+  The timeline displays only the selected goroutine and any others it
+  interacts with via block/unblock events. (The timeline is
+  goroutine-oriented rather than logical processor-oriented.)
+</p>
+
+<h2>Profiles</h2>
+<p>
+  Each link below displays a global profile in zoomable graph form as
+  produced by <a href='https://go.dev/blog/pprof'>pprof</a>'s "web" command.
+
+  In addition there is a link to download the profile for offline
+  analysis with pprof.
+
+  All four profiles represent causes of delay that prevent a goroutine
+  from running on a logical processor: because it was waiting for the network,
+  for a synchronization operation on a mutex or channel, for a system call,
+  or for a logical processor to become available.
+</p>
+<ul>
+<li><a href="/io">Network blocking profile</a> (<a href="/io?raw=1" download="io.profile">⬇</a>)</li>
+<li><a href="/block">Synchronization blocking profile</a> (<a href="/block?raw=1" download="block.profile">⬇</a>)</li>
+<li><a href="/syscall">Syscall profile</a> (<a href="/syscall?raw=1" download="syscall.profile">⬇</a>)</li>
+<li><a href="/sched">Scheduler latency profile</a> (<a href="/sched?raw=1" download="sched.profile">⬇</a>)</li>
+</ul>
+
+<h2>User-defined tasks and regions</h2>
+<p>
+  The trace API allows a target program to annotate a <a
+  href='https://pkg.go.dev/runtime/trace#Region'>region</a> of code
+  within a goroutine, such as a key function, so that its performance
+  can be analyzed.
+
+  <a href='https://pkg.go.dev/runtime/trace#Log'>Log events</a> may be
+  associated with a region to record progress and relevant values.
+
+  The API also allows annotation of higher-level
+  <a href='https://pkg.go.dev/runtime/trace#Task'>tasks</a>,
+  which may involve work across many goroutines.
+</p>
+<p>
+  The links below display, for each region and task, a histogram of its execution times.
+
+  Each histogram bucket contains a sample trace that records the
+  sequence of events such as goroutine creations, log events, and
+  subregion start/end times.
+
+  For each task, you can click through to a logical-processor or
+  goroutine-oriented view showing the tasks and regions on the
+  timeline.
+
+  Such information may help uncover which steps in a region are
+  unexpectedly slow, or reveal relationships between the data values
+  logged in a request and its running time.
+</p>
+<ul>
+<li><a href="/usertasks">User-defined tasks</a></li>
+<li><a href="/userregions">User-defined regions</a></li>
+</ul>
+
+<h2>Garbage collection metrics</h2>
+<ul>
+<li><a href="/mmu">Minimum mutator utilization</a></li>
+</ul>
+<p>
+  This chart indicates the maximum GC pause time (the largest x value
+  for which y is zero), and more generally, the fraction of time that
+  the processors are available to application goroutines ("mutators"),
+  for any time window of a specified size, in the worst case.
+</p>
+</body>
+</html>
+`))
+
+type View struct {
+	Type   ViewType
+	Ranges []Range
+}
+
+type ViewType string
+
+const (
+	ViewProc   ViewType = "proc"
+	ViewThread ViewType = "thread"
+)
+
+func (v View) URL(rangeIdx int) string {
+	if rangeIdx < 0 {
+		return fmt.Sprintf("/trace?view=%s", v.Type)
+	}
+	return v.Ranges[rangeIdx].URL(v.Type)
+}
+
+type Range struct {
+	Name      string
+	Start     int
+	End       int
+	StartTime int64
+	EndTime   int64
+}
+
+func (r Range) URL(viewType ViewType) string {
+	return fmt.Sprintf("/trace?view=%s&start=%d&end=%d", viewType, r.Start, r.End)
+}
+
+func TraceHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if err := r.ParseForm(); err != nil {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+			return
+		}
+		html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode())
+		w.Write([]byte(html))
+	})
+}
+
+// https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962/tracing/docs/embedding-trace-viewer.md
+// This is almost verbatim copy of https://chromium-review.googlesource.com/c/catapult/+/2062938/2/tracing/bin/index.html
+var templTrace = `
+<html>
+<head>
+<script src="/static/webcomponents.min.js"></script>
+<script>
+'use strict';
+
+function onTraceViewerImportFail() {
+  document.addEventListener('DOMContentLoaded', function() {
+    document.body.textContent =
+    '/static/trace_viewer_full.html is missing. File a bug in https://golang.org/issue';
+  });
+}
+</script>
+
+<link rel="import" href="/static/trace_viewer_full.html"
+      onerror="onTraceViewerImportFail(event)">
+
+<style type="text/css">
+  html, body {
+    box-sizing: border-box;
+    overflow: hidden;
+    margin: 0px;
+    padding: 0;
+    width: 100%;
+    height: 100%;
+  }
+  #trace-viewer {
+    width: 100%;
+    height: 100%;
+  }
+  #trace-viewer:focus {
+    outline: none;
+  }
+</style>
+<script>
+'use strict';
+(function() {
+  var viewer;
+  var url;
+  var model;
+
+  function load() {
+    var req = new XMLHttpRequest();
+    var isBinary = /[.]gz$/.test(url) || /[.]zip$/.test(url);
+    req.overrideMimeType('text/plain; charset=x-user-defined');
+    req.open('GET', url, true);
+    if (isBinary)
+      req.responseType = 'arraybuffer';
+
+    req.onreadystatechange = function(event) {
+      if (req.readyState !== 4)
+        return;
+
+      window.setTimeout(function() {
+        if (req.status === 200)
+          onResult(isBinary ? req.response : req.responseText);
+        else
+          onResultFail(req.status);
+      }, 0);
+    };
+    req.send(null);
+  }
+
+  function onResultFail(err) {
+    var overlay = new tr.ui.b.Overlay();
+    overlay.textContent = err + ': ' + url + ' could not be loaded';
+    overlay.title = 'Failed to fetch data';
+    overlay.visible = true;
+  }
+
+  function onResult(result) {
+    model = new tr.Model();
+    var opts = new tr.importer.ImportOptions();
+    opts.shiftWorldToZero = false;
+    var i = new tr.importer.Import(model, opts);
+    var p = i.importTracesWithProgressDialog([result]);
+    p.then(onModelLoaded, onImportFail);
+  }
+
+  function onModelLoaded() {
+    viewer.model = model;
+    viewer.viewTitle = "trace";
+
+    if (!model || model.bounds.isEmpty)
+      return;
+    var sel = window.location.hash.substr(1);
+    if (sel === '')
+      return;
+    var parts = sel.split(':');
+    var range = new (tr.b.Range || tr.b.math.Range)();
+    range.addValue(parseFloat(parts[0]));
+    range.addValue(parseFloat(parts[1]));
+    viewer.trackView.viewport.interestRange.set(range);
+  }
+
+  function onImportFail(err) {
+    var overlay = new tr.ui.b.Overlay();
+    overlay.textContent = tr.b.normalizeException(err).message;
+    overlay.title = 'Import error';
+    overlay.visible = true;
+  }
+
+  document.addEventListener('WebComponentsReady', function() {
+    var container = document.createElement('track-view-container');
+    container.id = 'track_view_container';
+
+    viewer = document.createElement('tr-ui-timeline-view');
+    viewer.track_view_container = container;
+    Polymer.dom(viewer).appendChild(container);
+
+    viewer.id = 'trace-viewer';
+    viewer.globalMode = true;
+    Polymer.dom(document.body).appendChild(viewer);
+
+    url = '/jsontrace?{{PARAMS}}';
+    load();
+  });
+}());
+</script>
+</head>
+<body>
+</body>
+</html>
+`
+
+//go:embed static/trace_viewer_full.html static/webcomponents.min.js
+var staticContent embed.FS
+
+func StaticHandler() http.Handler {
+	return http.FileServer(http.FS(staticContent))
+}
diff --git a/src/internal/trace/traceviewer/mmu.go b/src/internal/trace/traceviewer/mmu.go
new file mode 100644
index 0000000..0bc1233
--- /dev/null
+++ b/src/internal/trace/traceviewer/mmu.go
@@ -0,0 +1,414 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Minimum mutator utilization (MMU) graphing.
+
+// TODO:
+//
+// In worst window list, show break-down of GC utilization sources
+// (STW, assist, etc). Probably requires a different MutatorUtil
+// representation.
+//
+// When a window size is selected, show a second plot of the mutator
+// utilization distribution for that window size.
+//
+// Render plot progressively so rough outline is visible quickly even
+// for very complex MUTs. Start by computing just a few window sizes
+// and then add more window sizes.
+//
+// Consider using sampling to compute an approximate MUT. This would
+// work by sampling the mutator utilization at randomly selected
+// points in time in the trace to build an empirical distribution. We
+// could potentially put confidence intervals on these estimates and
+// render this progressively as we refine the distributions.
+
+package traceviewer
+
+import (
+	"encoding/json"
+	"fmt"
+	"internal/trace"
+	"log"
+	"math"
+	"net/http"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+type MutatorUtilFunc func(trace.UtilFlags) ([][]trace.MutatorUtil, error)
+
+func MMUHandlerFunc(ranges []Range, f MutatorUtilFunc) http.HandlerFunc {
+	mmu := &mmu{
+		cache:  make(map[trace.UtilFlags]*mmuCacheEntry),
+		f:      f,
+		ranges: ranges,
+	}
+	return func(w http.ResponseWriter, r *http.Request) {
+		switch r.FormValue("mode") {
+		case "plot":
+			mmu.HandlePlot(w, r)
+			return
+		case "details":
+			mmu.HandleDetails(w, r)
+			return
+		}
+		http.ServeContent(w, r, "", time.Time{}, strings.NewReader(templMMU))
+	}
+}
+
+var utilFlagNames = map[string]trace.UtilFlags{
+	"perProc":    trace.UtilPerProc,
+	"stw":        trace.UtilSTW,
+	"background": trace.UtilBackground,
+	"assist":     trace.UtilAssist,
+	"sweep":      trace.UtilSweep,
+}
+
+func requestUtilFlags(r *http.Request) trace.UtilFlags {
+	var flags trace.UtilFlags
+	for _, flagStr := range strings.Split(r.FormValue("flags"), "|") {
+		flags |= utilFlagNames[flagStr]
+	}
+	return flags
+}
+
+type mmuCacheEntry struct {
+	init     sync.Once
+	util     [][]trace.MutatorUtil
+	mmuCurve *trace.MMUCurve
+	err      error
+}
+
+type mmu struct {
+	mu     sync.Mutex
+	cache  map[trace.UtilFlags]*mmuCacheEntry
+	f      MutatorUtilFunc
+	ranges []Range
+}
+
+func (m *mmu) get(flags trace.UtilFlags) ([][]trace.MutatorUtil, *trace.MMUCurve, error) {
+	m.mu.Lock()
+	entry := m.cache[flags]
+	if entry == nil {
+		entry = new(mmuCacheEntry)
+		m.cache[flags] = entry
+	}
+	m.mu.Unlock()
+
+	entry.init.Do(func() {
+		util, err := m.f(flags)
+		if err != nil {
+			entry.err = err
+		} else {
+			entry.util = util
+			entry.mmuCurve = trace.NewMMUCurve(util)
+		}
+	})
+	return entry.util, entry.mmuCurve, entry.err
+}
+
+// HandlePlot serves the JSON data for the MMU plot.
+func (m *mmu) HandlePlot(w http.ResponseWriter, r *http.Request) {
+	mu, mmuCurve, err := m.get(requestUtilFlags(r))
+	if err != nil {
+		http.Error(w, fmt.Sprintf("failed to produce MMU data: %v", err), http.StatusInternalServerError)
+		return
+	}
+
+	var quantiles []float64
+	for _, flagStr := range strings.Split(r.FormValue("flags"), "|") {
+		if flagStr == "mut" {
+			quantiles = []float64{0, 1 - .999, 1 - .99, 1 - .95}
+			break
+		}
+	}
+
+	// Find a nice starting point for the plot.
+	xMin := time.Second
+	for xMin > 1 {
+		if mmu := mmuCurve.MMU(xMin); mmu < 0.0001 {
+			break
+		}
+		xMin /= 1000
+	}
+	// Cover six orders of magnitude.
+	xMax := xMin * 1e6
+	// But no more than the length of the trace.
+	minEvent, maxEvent := mu[0][0].Time, mu[0][len(mu[0])-1].Time
+	for _, mu1 := range mu[1:] {
+		if mu1[0].Time < minEvent {
+			minEvent = mu1[0].Time
+		}
+		if mu1[len(mu1)-1].Time > maxEvent {
+			maxEvent = mu1[len(mu1)-1].Time
+		}
+	}
+	if maxMax := time.Duration(maxEvent - minEvent); xMax > maxMax {
+		xMax = maxMax
+	}
+	// Compute MMU curve.
+	logMin, logMax := math.Log(float64(xMin)), math.Log(float64(xMax))
+	const samples = 100
+	plot := make([][]float64, samples)
+	for i := 0; i < samples; i++ {
+		window := time.Duration(math.Exp(float64(i)/(samples-1)*(logMax-logMin) + logMin))
+		if quantiles == nil {
+			plot[i] = make([]float64, 2)
+			plot[i][1] = mmuCurve.MMU(window)
+		} else {
+			plot[i] = make([]float64, 1+len(quantiles))
+			copy(plot[i][1:], mmuCurve.MUD(window, quantiles))
+		}
+		plot[i][0] = float64(window)
+	}
+
+	// Create JSON response.
+	err = json.NewEncoder(w).Encode(map[string]any{"xMin": int64(xMin), "xMax": int64(xMax), "quantiles": quantiles, "curve": plot})
+	if err != nil {
+		log.Printf("failed to serialize response: %v", err)
+		return
+	}
+}
+
+var templMMU = `<!doctype html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+    <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
+    <script type="text/javascript">
+      google.charts.load('current', {'packages':['corechart']});
+      var chartsReady = false;
+      google.charts.setOnLoadCallback(function() { chartsReady = true; refreshChart(); });
+
+      var chart;
+      var curve;
+
+      function niceDuration(ns) {
+          if (ns < 1e3) { return ns + 'ns'; }
+          else if (ns < 1e6) { return ns / 1e3 + 'µs'; }
+          else if (ns < 1e9) { return ns / 1e6 + 'ms'; }
+          else { return ns / 1e9 + 's'; }
+      }
+
+      function niceQuantile(q) {
+        return 'p' + q*100;
+      }
+
+      function mmuFlags() {
+        var flags = "";
+        $("#options input").each(function(i, elt) {
+          if (elt.checked)
+            flags += "|" + elt.id;
+        });
+        return flags.substr(1);
+      }
+
+      function refreshChart() {
+        if (!chartsReady) return;
+        var container = $('#mmu_chart');
+        container.css('opacity', '.5');
+        refreshChart.count++;
+        var seq = refreshChart.count;
+        $.getJSON('?mode=plot&flags=' + mmuFlags())
+         .fail(function(xhr, status, error) {
+           alert('failed to load plot: ' + status);
+         })
+         .done(function(result) {
+           if (refreshChart.count === seq)
+             drawChart(result);
+         });
+      }
+      refreshChart.count = 0;
+
+      function drawChart(plotData) {
+        curve = plotData.curve;
+        var data = new google.visualization.DataTable();
+        data.addColumn('number', 'Window duration');
+        data.addColumn('number', 'Minimum mutator utilization');
+        if (plotData.quantiles) {
+          for (var i = 1; i < plotData.quantiles.length; i++) {
+            data.addColumn('number', niceQuantile(1 - plotData.quantiles[i]) + ' MU');
+          }
+        }
+        data.addRows(curve);
+        for (var i = 0; i < curve.length; i++) {
+          data.setFormattedValue(i, 0, niceDuration(curve[i][0]));
+        }
+
+        var options = {
+          chart: {
+            title: 'Minimum mutator utilization',
+          },
+          hAxis: {
+            title: 'Window duration',
+            scaleType: 'log',
+            ticks: [],
+          },
+          vAxis: {
+            title: 'Minimum mutator utilization',
+            minValue: 0.0,
+            maxValue: 1.0,
+          },
+          legend: { position: 'none' },
+          focusTarget: 'category',
+          width: 900,
+          height: 500,
+          chartArea: { width: '80%', height: '80%' },
+        };
+        for (var v = plotData.xMin; v <= plotData.xMax; v *= 10) {
+          options.hAxis.ticks.push({v:v, f:niceDuration(v)});
+        }
+        if (plotData.quantiles) {
+          options.vAxis.title = 'Mutator utilization';
+          options.legend.position = 'in';
+        }
+
+        var container = $('#mmu_chart');
+        container.empty();
+        container.css('opacity', '');
+        chart = new google.visualization.LineChart(container[0]);
+        chart = new google.visualization.LineChart(document.getElementById('mmu_chart'));
+        chart.draw(data, options);
+
+        google.visualization.events.addListener(chart, 'select', selectHandler);
+        $('#details').empty();
+      }
+
+      function selectHandler() {
+        var items = chart.getSelection();
+        if (items.length === 0) {
+          return;
+        }
+        var details = $('#details');
+        details.empty();
+        var windowNS = curve[items[0].row][0];
+        var url = '?mode=details&window=' + windowNS + '&flags=' + mmuFlags();
+        $.getJSON(url)
+         .fail(function(xhr, status, error) {
+            details.text(status + ': ' + url + ' could not be loaded');
+         })
+         .done(function(worst) {
+            details.text('Lowest mutator utilization in ' + niceDuration(windowNS) + ' windows:');
+            for (var i = 0; i < worst.length; i++) {
+              details.append($('<br>'));
+              var text = worst[i].MutatorUtil.toFixed(3) + ' at time ' + niceDuration(worst[i].Time);
+              details.append($('<a/>').text(text).attr('href', worst[i].URL));
+            }
+         });
+      }
+
+      $.when($.ready).then(function() {
+        $("#options input").click(refreshChart);
+      });
+    </script>
+    <style>
+      .help {
+        display: inline-block;
+        position: relative;
+        width: 1em;
+        height: 1em;
+        border-radius: 50%;
+        color: #fff;
+        background: #555;
+        text-align: center;
+        cursor: help;
+      }
+      .help > span {
+        display: none;
+      }
+      .help:hover > span {
+        display: block;
+        position: absolute;
+        left: 1.1em;
+        top: 1.1em;
+        background: #555;
+        text-align: left;
+        width: 20em;
+        padding: 0.5em;
+        border-radius: 0.5em;
+        z-index: 5;
+      }
+    </style>
+  </head>
+  <body>
+    <div style="position: relative">
+      <div id="mmu_chart" style="width: 900px; height: 500px; display: inline-block; vertical-align: top">Loading plot...</div>
+      <div id="options" style="display: inline-block; vertical-align: top">
+        <p>
+          <b>View</b><br>
+          <input type="radio" name="view" id="system" checked><label for="system">System</label>
+          <span class="help">?<span>Consider whole system utilization. For example, if one of four procs is available to the mutator, mutator utilization will be 0.25. This is the standard definition of an MMU.</span></span><br>
+          <input type="radio" name="view" id="perProc"><label for="perProc">Per-goroutine</label>
+          <span class="help">?<span>Consider per-goroutine utilization. When even one goroutine is interrupted by GC, mutator utilization is 0.</span></span><br>
+        </p>
+        <p>
+          <b>Include</b><br>
+          <input type="checkbox" id="stw" checked><label for="stw">STW</label>
+          <span class="help">?<span>Stop-the-world stops all goroutines simultaneously.</span></span><br>
+          <input type="checkbox" id="background" checked><label for="background">Background workers</label>
+          <span class="help">?<span>Background workers are GC-specific goroutines. 25% of the CPU is dedicated to background workers during GC.</span></span><br>
+          <input type="checkbox" id="assist" checked><label for="assist">Mark assist</label>
+          <span class="help">?<span>Mark assists are performed by allocation to prevent the mutator from outpacing GC.</span></span><br>
+          <input type="checkbox" id="sweep"><label for="sweep">Sweep</label>
+          <span class="help">?<span>Sweep reclaims unused memory between GCs. (Enabling this may be very slow.).</span></span><br>
+        </p>
+        <p>
+          <b>Display</b><br>
+          <input type="checkbox" id="mut"><label for="mut">Show percentiles</label>
+          <span class="help">?<span>Display percentile mutator utilization in addition to minimum. E.g., p99 MU drops the worst 1% of windows.</span></span><br>
+        </p>
+      </div>
+    </div>
+    <div id="details">Select a point for details.</div>
+  </body>
+</html>
+`
+
+// HandleDetails serves details of an MMU graph at a particular window.
+func (m *mmu) HandleDetails(w http.ResponseWriter, r *http.Request) {
+	_, mmuCurve, err := m.get(requestUtilFlags(r))
+	if err != nil {
+		http.Error(w, fmt.Sprintf("failed to produce MMU data: %v", err), http.StatusInternalServerError)
+		return
+	}
+
+	windowStr := r.FormValue("window")
+	window, err := strconv.ParseUint(windowStr, 10, 64)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("failed to parse window parameter %q: %v", windowStr, err), http.StatusBadRequest)
+		return
+	}
+	worst := mmuCurve.Examples(time.Duration(window), 10)
+
+	// Construct a link for each window.
+	var links []linkedUtilWindow
+	for _, ui := range worst {
+		links = append(links, m.newLinkedUtilWindow(ui, time.Duration(window)))
+	}
+
+	err = json.NewEncoder(w).Encode(links)
+	if err != nil {
+		log.Printf("failed to serialize trace: %v", err)
+		return
+	}
+}
+
+type linkedUtilWindow struct {
+	trace.UtilWindow
+	URL string
+}
+
+func (m *mmu) newLinkedUtilWindow(ui trace.UtilWindow, window time.Duration) linkedUtilWindow {
+	// Find the range containing this window.
+	var r Range
+	for _, r = range m.ranges {
+		if r.EndTime > ui.Time {
+			break
+		}
+	}
+	return linkedUtilWindow{ui, fmt.Sprintf("%s#%v:%v", r.URL(ViewProc), float64(ui.Time)/1e6, float64(ui.Time+int64(window))/1e6)}
+}
diff --git a/src/internal/trace/traceviewer/pprof.go b/src/internal/trace/traceviewer/pprof.go
new file mode 100644
index 0000000..1377b3c
--- /dev/null
+++ b/src/internal/trace/traceviewer/pprof.go
@@ -0,0 +1,150 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Serving of pprof-like profiles.
+
+package traceviewer
+
+import (
+	"bufio"
+	"fmt"
+	"internal/profile"
+	"internal/trace"
+	"net/http"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"time"
+)
+
+type ProfileFunc func(r *http.Request) ([]ProfileRecord, error)
+
+// SVGProfileHandlerFunc serves pprof-like profile generated by prof as svg.
+func SVGProfileHandlerFunc(f ProfileFunc) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		if r.FormValue("raw") != "" {
+			w.Header().Set("Content-Type", "application/octet-stream")
+
+			failf := func(s string, args ...any) {
+				w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+				w.Header().Set("X-Go-Pprof", "1")
+				http.Error(w, fmt.Sprintf(s, args...), http.StatusInternalServerError)
+			}
+			records, err := f(r)
+			if err != nil {
+				failf("failed to get records: %v", err)
+				return
+			}
+			if err := BuildProfile(records).Write(w); err != nil {
+				failf("failed to write profile: %v", err)
+				return
+			}
+			return
+		}
+
+		blockf, err := os.CreateTemp("", "block")
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to create temp file: %v", err), http.StatusInternalServerError)
+			return
+		}
+		defer func() {
+			blockf.Close()
+			os.Remove(blockf.Name())
+		}()
+		records, err := f(r)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to generate profile: %v", err), http.StatusInternalServerError)
+		}
+		blockb := bufio.NewWriter(blockf)
+		if err := BuildProfile(records).Write(blockb); err != nil {
+			http.Error(w, fmt.Sprintf("failed to write profile: %v", err), http.StatusInternalServerError)
+			return
+		}
+		if err := blockb.Flush(); err != nil {
+			http.Error(w, fmt.Sprintf("failed to flush temp file: %v", err), http.StatusInternalServerError)
+			return
+		}
+		if err := blockf.Close(); err != nil {
+			http.Error(w, fmt.Sprintf("failed to close temp file: %v", err), http.StatusInternalServerError)
+			return
+		}
+		svgFilename := blockf.Name() + ".svg"
+		if output, err := exec.Command(goCmd(), "tool", "pprof", "-svg", "-output", svgFilename, blockf.Name()).CombinedOutput(); err != nil {
+			http.Error(w, fmt.Sprintf("failed to execute go tool pprof: %v\n%s", err, output), http.StatusInternalServerError)
+			return
+		}
+		defer os.Remove(svgFilename)
+		w.Header().Set("Content-Type", "image/svg+xml")
+		http.ServeFile(w, r, svgFilename)
+	}
+}
+
+type ProfileRecord struct {
+	Stack []*trace.Frame
+	Count uint64
+	Time  time.Duration
+}
+
+func BuildProfile(prof []ProfileRecord) *profile.Profile {
+	p := &profile.Profile{
+		PeriodType: &profile.ValueType{Type: "trace", Unit: "count"},
+		Period:     1,
+		SampleType: []*profile.ValueType{
+			{Type: "contentions", Unit: "count"},
+			{Type: "delay", Unit: "nanoseconds"},
+		},
+	}
+	locs := make(map[uint64]*profile.Location)
+	funcs := make(map[string]*profile.Function)
+	for _, rec := range prof {
+		var sloc []*profile.Location
+		for _, frame := range rec.Stack {
+			loc := locs[frame.PC]
+			if loc == nil {
+				fn := funcs[frame.File+frame.Fn]
+				if fn == nil {
+					fn = &profile.Function{
+						ID:         uint64(len(p.Function) + 1),
+						Name:       frame.Fn,
+						SystemName: frame.Fn,
+						Filename:   frame.File,
+					}
+					p.Function = append(p.Function, fn)
+					funcs[frame.File+frame.Fn] = fn
+				}
+				loc = &profile.Location{
+					ID:      uint64(len(p.Location) + 1),
+					Address: frame.PC,
+					Line: []profile.Line{
+						{
+							Function: fn,
+							Line:     int64(frame.Line),
+						},
+					},
+				}
+				p.Location = append(p.Location, loc)
+				locs[frame.PC] = loc
+			}
+			sloc = append(sloc, loc)
+		}
+		p.Sample = append(p.Sample, &profile.Sample{
+			Value:    []int64{int64(rec.Count), int64(rec.Time)},
+			Location: sloc,
+		})
+	}
+	return p
+}
+
+func goCmd() string {
+	var exeSuffix string
+	if runtime.GOOS == "windows" {
+		exeSuffix = ".exe"
+	}
+	path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix)
+	if _, err := os.Stat(path); err == nil {
+		return path
+	}
+	return "go"
+}
diff --git a/src/cmd/trace/static/README.md b/src/internal/trace/traceviewer/static/README.md
similarity index 100%
rename from src/cmd/trace/static/README.md
rename to src/internal/trace/traceviewer/static/README.md
diff --git a/src/cmd/trace/static/trace_viewer_full.html b/src/internal/trace/traceviewer/static/trace_viewer_full.html
similarity index 100%
rename from src/cmd/trace/static/trace_viewer_full.html
rename to src/internal/trace/traceviewer/static/trace_viewer_full.html
diff --git a/src/cmd/trace/static/webcomponents.min.js b/src/internal/trace/traceviewer/static/webcomponents.min.js
similarity index 100%
rename from src/cmd/trace/static/webcomponents.min.js
rename to src/internal/trace/traceviewer/static/webcomponents.min.js
diff --git a/src/internal/trace/v2/base.go b/src/internal/trace/v2/base.go
new file mode 100644
index 0000000..57e5802
--- /dev/null
+++ b/src/internal/trace/v2/base.go
@@ -0,0 +1,261 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains data types that all implementations of the trace format
+// parser need to provide to the rest of the package.
+
+package trace
+
+import (
+	"fmt"
+	"math"
+	"strings"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+	"internal/trace/v2/version"
+)
+
+// maxArgs is the maximum number of arguments for "plain" events,
+// i.e. anything that could reasonably be represented as a Base.
+const maxArgs = 5
+
+// baseEvent is the basic unprocessed event. This serves as a common
+// fundamental data structure across.
+type baseEvent struct {
+	typ  event.Type
+	time Time
+	args [maxArgs - 1]uint64
+}
+
+// extra returns a slice representing extra available space in args
+// that the parser can use to pass data up into Event.
+func (e *baseEvent) extra(v version.Version) []uint64 {
+	switch v {
+	case version.Go122:
+		return e.args[len(go122.Specs()[e.typ].Args)-1:]
+	}
+	panic(fmt.Sprintf("unsupported version: go 1.%d", v))
+}
+
+// evTable contains the per-generation data necessary to
+// interpret an individual event.
+type evTable struct {
+	freq    frequency
+	strings dataTable[stringID, string]
+	stacks  dataTable[stackID, stack]
+
+	// extraStrings are strings that get generated during
+	// parsing but haven't come directly from the trace, so
+	// they don't appear in strings.
+	extraStrings   []string
+	extraStringIDs map[string]extraStringID
+	nextExtra      extraStringID
+}
+
+// addExtraString adds an extra string to the evTable and returns
+// a unique ID for the string in the table.
+func (t *evTable) addExtraString(s string) extraStringID {
+	if s == "" {
+		return 0
+	}
+	if t.extraStringIDs == nil {
+		t.extraStringIDs = make(map[string]extraStringID)
+	}
+	if id, ok := t.extraStringIDs[s]; ok {
+		return id
+	}
+	t.nextExtra++
+	id := t.nextExtra
+	t.extraStrings = append(t.extraStrings, s)
+	t.extraStringIDs[s] = id
+	return id
+}
+
+// getExtraString returns the extra string for the provided ID.
+// The ID must have been produced by addExtraString for this evTable.
+func (t *evTable) getExtraString(id extraStringID) string {
+	if id == 0 {
+		return ""
+	}
+	return t.extraStrings[id-1]
+}
+
+// dataTable is a mapping from EIs to Es.
+type dataTable[EI ~uint64, E any] struct {
+	present []uint8
+	dense   []E
+	sparse  map[EI]E
+}
+
+// insert tries to add a mapping from id to s.
+//
+// Returns an error if a mapping for id already exists, regardless
+// of whether or not s is the same in content. This should be used
+// for validation during parsing.
+func (d *dataTable[EI, E]) insert(id EI, data E) error {
+	if d.sparse == nil {
+		d.sparse = make(map[EI]E)
+	}
+	if existing, ok := d.get(id); ok {
+		return fmt.Errorf("multiple %Ts with the same ID: id=%d, new=%v, existing=%v", data, id, data, existing)
+	}
+	d.sparse[id] = data
+	return nil
+}
+
+// compactify attempts to compact sparse into dense.
+//
+// This is intended to be called only once after insertions are done.
+func (d *dataTable[EI, E]) compactify() {
+	if d.sparse == nil || len(d.dense) != 0 {
+		// Already compactified.
+		return
+	}
+	// Find the range of IDs.
+	maxID := EI(0)
+	minID := ^EI(0)
+	for id := range d.sparse {
+		if id > maxID {
+			maxID = id
+		}
+		if id < minID {
+			minID = id
+		}
+	}
+	if maxID >= math.MaxInt {
+		// We can't create a slice big enough to hold maxID elements
+		return
+	}
+	// We're willing to waste at most 2x memory.
+	if int(maxID-minID) > max(len(d.sparse), 2*len(d.sparse)) {
+		return
+	}
+	if int(minID) > len(d.sparse) {
+		return
+	}
+	size := int(maxID) + 1
+	d.present = make([]uint8, (size+7)/8)
+	d.dense = make([]E, size)
+	for id, data := range d.sparse {
+		d.dense[id] = data
+		d.present[id/8] |= uint8(1) << (id % 8)
+	}
+	d.sparse = nil
+}
+
+// get returns the E for id or false if it doesn't
+// exist. This should be used for validation during parsing.
+func (d *dataTable[EI, E]) get(id EI) (E, bool) {
+	if id == 0 {
+		return *new(E), true
+	}
+	if uint64(id) < uint64(len(d.dense)) {
+		if d.present[id/8]&(uint8(1)<<(id%8)) != 0 {
+			return d.dense[id], true
+		}
+	} else if d.sparse != nil {
+		if data, ok := d.sparse[id]; ok {
+			return data, true
+		}
+	}
+	return *new(E), false
+}
+
+// forEach iterates over all ID/value pairs in the data table.
+func (d *dataTable[EI, E]) forEach(yield func(EI, E) bool) bool {
+	for id, value := range d.dense {
+		if d.present[id/8]&(uint8(1)<<(id%8)) == 0 {
+			continue
+		}
+		if !yield(EI(id), value) {
+			return false
+		}
+	}
+	if d.sparse == nil {
+		return true
+	}
+	for id, value := range d.sparse {
+		if !yield(id, value) {
+			return false
+		}
+	}
+	return true
+}
+
+// mustGet returns the E for id or panics if it fails.
+//
+// This should only be used if id has already been validated.
+func (d *dataTable[EI, E]) mustGet(id EI) E {
+	data, ok := d.get(id)
+	if !ok {
+		panic(fmt.Sprintf("expected id %d in %T table", id, data))
+	}
+	return data
+}
+
+// frequency is nanoseconds per timestamp unit.
+type frequency float64
+
+// mul multiplies an unprocessed to produce a time in nanoseconds.
+func (f frequency) mul(t timestamp) Time {
+	return Time(float64(t) * float64(f))
+}
+
+// stringID is an index into the string table for a generation.
+type stringID uint64
+
+// extraStringID is an index into the extra string table for a generation.
+type extraStringID uint64
+
+// stackID is an index into the stack table for a generation.
+type stackID uint64
+
+// cpuSample represents a CPU profiling sample captured by the trace.
+type cpuSample struct {
+	schedCtx
+	time  Time
+	stack stackID
+}
+
+// asEvent produces a complete Event from a cpuSample. It needs
+// the evTable from the generation that created it.
+//
+// We don't just store it as an Event in generation to minimize
+// the amount of pointer data floating around.
+func (s cpuSample) asEvent(table *evTable) Event {
+	// TODO(mknyszek): This is go122-specific, but shouldn't be.
+	// Generalize this in the future.
+	e := Event{
+		table: table,
+		ctx:   s.schedCtx,
+		base: baseEvent{
+			typ:  go122.EvCPUSample,
+			time: s.time,
+		},
+	}
+	e.base.args[0] = uint64(s.stack)
+	return e
+}
+
+// stack represents a goroutine stack sample.
+type stack struct {
+	frames []frame
+}
+
+func (s stack) String() string {
+	var sb strings.Builder
+	for _, frame := range s.frames {
+		fmt.Fprintf(&sb, "\t%#v\n", frame)
+	}
+	return sb.String()
+}
+
+// frame represents a single stack frame.
+type frame struct {
+	pc     uint64
+	funcID stringID
+	fileID stringID
+	line   uint64
+}
diff --git a/src/internal/trace/v2/batch.go b/src/internal/trace/v2/batch.go
new file mode 100644
index 0000000..899eb0f
--- /dev/null
+++ b/src/internal/trace/v2/batch.go
@@ -0,0 +1,97 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+)
+
+// timestamp is an unprocessed timestamp.
+type timestamp uint64
+
+// batch represents a batch of trace events.
+// It is unparsed except for its header.
+type batch struct {
+	m    ThreadID
+	time timestamp
+	data []byte
+}
+
+func (b *batch) isStringsBatch() bool {
+	return len(b.data) > 0 && event.Type(b.data[0]) == go122.EvStrings
+}
+
+func (b *batch) isStacksBatch() bool {
+	return len(b.data) > 0 && event.Type(b.data[0]) == go122.EvStacks
+}
+
+func (b *batch) isCPUSamplesBatch() bool {
+	return len(b.data) > 0 && event.Type(b.data[0]) == go122.EvCPUSamples
+}
+
+func (b *batch) isFreqBatch() bool {
+	return len(b.data) > 0 && event.Type(b.data[0]) == go122.EvFrequency
+}
+
+// readBatch reads the next full batch from r.
+func readBatch(r *bufio.Reader) (batch, uint64, error) {
+	// Read batch header byte.
+	b, err := r.ReadByte()
+	if err != nil {
+		return batch{}, 0, err
+	}
+	if typ := event.Type(b); typ != go122.EvEventBatch {
+		return batch{}, 0, fmt.Errorf("expected batch event (%s), got %s", go122.EventString(go122.EvEventBatch), go122.EventString(typ))
+	}
+
+	// Read the batch header: gen (generation), thread (M) ID, base timestamp
+	// for the batch.
+	gen, err := binary.ReadUvarint(r)
+	if err != nil {
+		return batch{}, gen, fmt.Errorf("error reading batch gen: %w", err)
+	}
+	m, err := binary.ReadUvarint(r)
+	if err != nil {
+		return batch{}, gen, fmt.Errorf("error reading batch M ID: %w", err)
+	}
+	ts, err := binary.ReadUvarint(r)
+	if err != nil {
+		return batch{}, gen, fmt.Errorf("error reading batch timestamp: %w", err)
+	}
+
+	// Read in the size of the batch to follow.
+	size, err := binary.ReadUvarint(r)
+	if err != nil {
+		return batch{}, gen, fmt.Errorf("error reading batch size: %w", err)
+	}
+	if size > go122.MaxBatchSize {
+		return batch{}, gen, fmt.Errorf("invalid batch size %d, maximum is %d", size, go122.MaxBatchSize)
+	}
+
+	// Copy out the batch for later processing.
+	var data bytes.Buffer
+	data.Grow(int(size))
+	n, err := io.CopyN(&data, r, int64(size))
+	if n != int64(size) {
+		return batch{}, gen, fmt.Errorf("failed to read full batch: read %d but wanted %d", n, size)
+	}
+	if err != nil {
+		return batch{}, gen, fmt.Errorf("copying batch data: %w", err)
+	}
+
+	// Return the batch.
+	return batch{
+		m:    ThreadID(m),
+		time: timestamp(ts),
+		data: data.Bytes(),
+	}, gen, nil
+}
diff --git a/src/internal/trace/v2/batchcursor.go b/src/internal/trace/v2/batchcursor.go
new file mode 100644
index 0000000..8dc34fd
--- /dev/null
+++ b/src/internal/trace/v2/batchcursor.go
@@ -0,0 +1,174 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"cmp"
+	"encoding/binary"
+	"fmt"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+)
+
+type batchCursor struct {
+	m       ThreadID
+	lastTs  Time
+	idx     int       // next index into []batch
+	dataOff int       // next index into batch.data
+	ev      baseEvent // last read event
+}
+
+func (b *batchCursor) nextEvent(batches []batch, freq frequency) (ok bool, err error) {
+	// Batches should generally always have at least one event,
+	// but let's be defensive about that and accept empty batches.
+	for b.idx < len(batches) && len(batches[b.idx].data) == b.dataOff {
+		b.idx++
+		b.dataOff = 0
+		b.lastTs = 0
+	}
+	// Have we reached the end of the batches?
+	if b.idx == len(batches) {
+		return false, nil
+	}
+	// Initialize lastTs if it hasn't been yet.
+	if b.lastTs == 0 {
+		b.lastTs = freq.mul(batches[b.idx].time)
+	}
+	// Read an event out.
+	n, tsdiff, err := readTimedBaseEvent(batches[b.idx].data[b.dataOff:], &b.ev)
+	if err != nil {
+		return false, err
+	}
+	// Complete the timestamp from the cursor's last timestamp.
+	b.ev.time = freq.mul(tsdiff) + b.lastTs
+
+	// Move the cursor's timestamp forward.
+	b.lastTs = b.ev.time
+
+	// Move the cursor forward.
+	b.dataOff += n
+	return true, nil
+}
+
+func (b *batchCursor) compare(a *batchCursor) int {
+	return cmp.Compare(b.ev.time, a.ev.time)
+}
+
+// readTimedBaseEvent reads out the raw event data from b
+// into e. It does not try to interpret the arguments
+// but it does validate that the event is a regular
+// event with a timestamp (vs. a structural event).
+//
+// It requires that the event its reading be timed, which must
+// be the case for every event in a plain EventBatch.
+func readTimedBaseEvent(b []byte, e *baseEvent) (int, timestamp, error) {
+	// Get the event type.
+	typ := event.Type(b[0])
+	specs := go122.Specs()
+	if int(typ) >= len(specs) {
+		return 0, 0, fmt.Errorf("found invalid event type: %v", typ)
+	}
+	e.typ = typ
+
+	// Get spec.
+	spec := &specs[typ]
+	if len(spec.Args) == 0 || !spec.IsTimedEvent {
+		return 0, 0, fmt.Errorf("found event without a timestamp: type=%v", typ)
+	}
+	n := 1
+
+	// Read timestamp diff.
+	ts, nb := binary.Uvarint(b[n:])
+	if nb <= 0 {
+		return 0, 0, fmt.Errorf("found invalid uvarint for timestamp")
+	}
+	n += nb
+
+	// Read the rest of the arguments.
+	for i := 0; i < len(spec.Args)-1; i++ {
+		arg, nb := binary.Uvarint(b[n:])
+		if nb <= 0 {
+			return 0, 0, fmt.Errorf("found invalid uvarint")
+		}
+		e.args[i] = arg
+		n += nb
+	}
+	return n, timestamp(ts), nil
+}
+
+func heapInsert(heap []*batchCursor, bc *batchCursor) []*batchCursor {
+	// Add the cursor to the end of the heap.
+	heap = append(heap, bc)
+
+	// Sift the new entry up to the right place.
+	heapSiftUp(heap, len(heap)-1)
+	return heap
+}
+
+func heapUpdate(heap []*batchCursor, i int) {
+	// Try to sift up.
+	if heapSiftUp(heap, i) != i {
+		return
+	}
+	// Try to sift down, if sifting up failed.
+	heapSiftDown(heap, i)
+}
+
+func heapRemove(heap []*batchCursor, i int) []*batchCursor {
+	// Sift index i up to the root, ignoring actual values.
+	for i > 0 {
+		heap[(i-1)/2], heap[i] = heap[i], heap[(i-1)/2]
+		i = (i - 1) / 2
+	}
+	// Swap the root with the last element, then remove it.
+	heap[0], heap[len(heap)-1] = heap[len(heap)-1], heap[0]
+	heap = heap[:len(heap)-1]
+	// Sift the root down.
+	heapSiftDown(heap, 0)
+	return heap
+}
+
+func heapSiftUp(heap []*batchCursor, i int) int {
+	for i > 0 && heap[(i-1)/2].ev.time > heap[i].ev.time {
+		heap[(i-1)/2], heap[i] = heap[i], heap[(i-1)/2]
+		i = (i - 1) / 2
+	}
+	return i
+}
+
+func heapSiftDown(heap []*batchCursor, i int) int {
+	for {
+		m := min3(heap, i, 2*i+1, 2*i+2)
+		if m == i {
+			// Heap invariant already applies.
+			break
+		}
+		heap[i], heap[m] = heap[m], heap[i]
+		i = m
+	}
+	return i
+}
+
+func min3(b []*batchCursor, i0, i1, i2 int) int {
+	minIdx := i0
+	minT := maxTime
+	if i0 < len(b) {
+		minT = b[i0].ev.time
+	}
+	if i1 < len(b) {
+		if t := b[i1].ev.time; t < minT {
+			minT = t
+			minIdx = i1
+		}
+	}
+	if i2 < len(b) {
+		if t := b[i2].ev.time; t < minT {
+			minT = t
+			minIdx = i2
+		}
+	}
+	return minIdx
+}
diff --git a/src/internal/trace/v2/batchcursor_test.go b/src/internal/trace/v2/batchcursor_test.go
new file mode 100644
index 0000000..69731e5
--- /dev/null
+++ b/src/internal/trace/v2/batchcursor_test.go
@@ -0,0 +1,126 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+
+	"slices"
+)
+
+func TestHeap(t *testing.T) {
+	var heap []*batchCursor
+
+	// Insert a bunch of values into the heap.
+	checkHeap(t, heap)
+	heap = heapInsert(heap, makeBatchCursor(5))
+	checkHeap(t, heap)
+	for i := int64(-20); i < 20; i++ {
+		heap = heapInsert(heap, makeBatchCursor(i))
+		checkHeap(t, heap)
+	}
+
+	// Update an element in the middle to be the new minimum.
+	for i := range heap {
+		if heap[i].ev.time == 5 {
+			heap[i].ev.time = -21
+			heapUpdate(heap, i)
+			break
+		}
+	}
+	checkHeap(t, heap)
+	if heap[0].ev.time != -21 {
+		t.Fatalf("heap update failed, expected %d as heap min: %s", -21, heapDebugString(heap))
+	}
+
+	// Update the minimum element to be smaller. There should be no change.
+	heap[0].ev.time = -22
+	heapUpdate(heap, 0)
+	checkHeap(t, heap)
+	if heap[0].ev.time != -22 {
+		t.Fatalf("heap update failed, expected %d as heap min: %s", -22, heapDebugString(heap))
+	}
+
+	// Update the last element to be larger. There should be no change.
+	heap[len(heap)-1].ev.time = 21
+	heapUpdate(heap, len(heap)-1)
+	checkHeap(t, heap)
+	if heap[len(heap)-1].ev.time != 21 {
+		t.Fatalf("heap update failed, expected %d as heap min: %s", 21, heapDebugString(heap))
+	}
+
+	// Update the last element to be smaller.
+	heap[len(heap)-1].ev.time = 7
+	heapUpdate(heap, len(heap)-1)
+	checkHeap(t, heap)
+	if heap[len(heap)-1].ev.time == 21 {
+		t.Fatalf("heap update failed, unexpected %d as heap min: %s", 21, heapDebugString(heap))
+	}
+
+	// Remove an element in the middle.
+	for i := range heap {
+		if heap[i].ev.time == 5 {
+			heap = heapRemove(heap, i)
+			break
+		}
+	}
+	checkHeap(t, heap)
+	for i := range heap {
+		if heap[i].ev.time == 5 {
+			t.Fatalf("failed to remove heap elem with time %d: %s", 5, heapDebugString(heap))
+		}
+	}
+
+	// Remove tail.
+	heap = heapRemove(heap, len(heap)-1)
+	checkHeap(t, heap)
+
+	// Remove from the head, and make sure the result is sorted.
+	l := len(heap)
+	var removed []*batchCursor
+	for i := 0; i < l; i++ {
+		removed = append(removed, heap[0])
+		heap = heapRemove(heap, 0)
+		checkHeap(t, heap)
+	}
+	if !slices.IsSortedFunc(removed, (*batchCursor).compare) {
+		t.Fatalf("heap elements not removed in sorted order, got: %s", heapDebugString(removed))
+	}
+}
+
+func makeBatchCursor(v int64) *batchCursor {
+	return &batchCursor{ev: baseEvent{time: Time(v)}}
+}
+
+func heapDebugString(heap []*batchCursor) string {
+	var sb strings.Builder
+	fmt.Fprintf(&sb, "[")
+	for i := range heap {
+		if i != 0 {
+			fmt.Fprintf(&sb, ", ")
+		}
+		fmt.Fprintf(&sb, "%d", heap[i].ev.time)
+	}
+	fmt.Fprintf(&sb, "]")
+	return sb.String()
+}
+
+func checkHeap(t *testing.T, heap []*batchCursor) {
+	t.Helper()
+
+	for i := range heap {
+		if i == 0 {
+			continue
+		}
+		if heap[(i-1)/2].compare(heap[i]) > 0 {
+			t.Errorf("heap invariant not maintained between index %d and parent %d: %s", i, i/2, heapDebugString(heap))
+		}
+	}
+	if t.Failed() {
+		t.FailNow()
+	}
+}
diff --git a/src/internal/trace/v2/event.go b/src/internal/trace/v2/event.go
new file mode 100644
index 0000000..763313c
--- /dev/null
+++ b/src/internal/trace/v2/event.go
@@ -0,0 +1,780 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"time"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+	"internal/trace/v2/version"
+)
+
+// EventKind indicates the kind of event this is.
+//
+// Use this information to obtain a more specific event that
+// allows access to more detailed information.
+type EventKind uint16
+
+const (
+	EventBad EventKind = iota
+
+	// EventKindSync is an event that indicates a global synchronization
+	// point in the trace. At the point of a sync event, the
+	// trace reader can be certain that all resources (e.g. threads,
+	// goroutines) that have existed until that point have been enumerated.
+	EventSync
+
+	// EventMetric is an event that represents the value of a metric at
+	// a particular point in time.
+	EventMetric
+
+	// EventLabel attaches a label to a resource.
+	EventLabel
+
+	// EventStackSample represents an execution sample, indicating what a
+	// thread/proc/goroutine was doing at a particular point in time via
+	// its backtrace.
+	//
+	// Note: Samples should be considered a close approximation of
+	// what a thread/proc/goroutine was executing at a given point in time.
+	// These events may slightly contradict the situation StateTransitions
+	// describe, so they should only be treated as a best-effort annotation.
+	EventStackSample
+
+	// EventRangeBegin and EventRangeEnd are a pair of generic events representing
+	// a special range of time. Ranges are named and scoped to some resource
+	// (identified via ResourceKind). A range that has begun but has not ended
+	// is considered active.
+	//
+	// EvRangeBegin and EvRangeEnd will share the same name, and an End will always
+	// follow a Begin on the same instance of the resource. The associated
+	// resource ID can be obtained from the Event. ResourceNone indicates the
+	// range is globally scoped. That is, any goroutine/proc/thread can start or
+	// stop, but only one such range may be active at any given time.
+	//
+	// EventRangeActive is like EventRangeBegin, but indicates that the range was
+	// already active. In this case, the resource referenced may not be in the current
+	// context.
+	EventRangeBegin
+	EventRangeActive
+	EventRangeEnd
+
+	// EvTaskBegin and EvTaskEnd are a pair of events representing a runtime/trace.Task.
+	EventTaskBegin
+	EventTaskEnd
+
+	// EventRegionBegin and EventRegionEnd are a pair of events represent a runtime/trace.Region.
+	EventRegionBegin
+	EventRegionEnd
+
+	// EventLog represents a runtime/trace.Log call.
+	EventLog
+
+	// Transitions in state for some resource.
+	EventStateTransition
+)
+
+// String returns a string form of the EventKind.
+func (e EventKind) String() string {
+	if int(e) >= len(eventKindStrings) {
+		return eventKindStrings[0]
+	}
+	return eventKindStrings[e]
+}
+
+var eventKindStrings = [...]string{
+	EventBad:             "Bad",
+	EventSync:            "Sync",
+	EventMetric:          "Metric",
+	EventLabel:           "Label",
+	EventStackSample:     "StackSample",
+	EventRangeBegin:      "RangeBegin",
+	EventRangeActive:     "RangeActive",
+	EventRangeEnd:        "RangeEnd",
+	EventTaskBegin:       "TaskBegin",
+	EventTaskEnd:         "TaskEnd",
+	EventRegionBegin:     "RegionBegin",
+	EventRegionEnd:       "RegionEnd",
+	EventLog:             "Log",
+	EventStateTransition: "StateTransition",
+}
+
+const maxTime = Time(math.MaxInt64)
+
+// Time is a timestamp in nanoseconds.
+//
+// It corresponds to the monotonic clock on the platform that the
+// trace was taken, and so is possible to correlate with timestamps
+// for other traces taken on the same machine using the same clock
+// (i.e. no reboots in between).
+//
+// The actual absolute value of the timestamp is only meaningful in
+// relation to other timestamps from the same clock.
+//
+// BUG: Timestamps coming from traces on Windows platforms are
+// only comparable with timestamps from the same trace. Timestamps
+// across traces cannot be compared, because the system clock is
+// not used as of Go 1.22.
+//
+// BUG: Traces produced by Go versions 1.21 and earlier cannot be
+// compared with timestamps from other traces taken on the same
+// machine. This is because the system clock was not used at all
+// to collect those timestamps.
+type Time int64
+
+// Sub subtracts t0 from t, returning the duration in nanoseconds.
+func (t Time) Sub(t0 Time) time.Duration {
+	return time.Duration(int64(t) - int64(t0))
+}
+
+// Metric provides details about a Metric event.
+type Metric struct {
+	// Name is the name of the sampled metric.
+	//
+	// Names follow the same convention as metric names in the
+	// runtime/metrics package, meaning they include the unit.
+	// Names that match with the runtime/metrics package represent
+	// the same quantity. Note that this corresponds to the
+	// runtime/metrics package for the Go version this trace was
+	// collected for.
+	Name string
+
+	// Value is the sampled value of the metric.
+	//
+	// The Value's Kind is tied to the name of the metric, and so is
+	// guaranteed to be the same for metric samples for the same metric.
+	Value Value
+}
+
+// Label provides details about a Label event.
+type Label struct {
+	// Label is the label applied to some resource.
+	Label string
+
+	// Resource is the resource to which this label should be applied.
+	Resource ResourceID
+}
+
+// Range provides details about a Range event.
+type Range struct {
+	// Name is a human-readable name for the range.
+	//
+	// This name can be used to identify the end of the range for the resource
+	// its scoped to, because only one of each type of range may be active on
+	// a particular resource. The relevant resource should be obtained from the
+	// Event that produced these details. The corresponding RangeEnd will have
+	// an identical name.
+	Name string
+
+	// Scope is the resource that the range is scoped to.
+	//
+	// For example, a ResourceGoroutine scope means that the same goroutine
+	// must have a start and end for the range, and that goroutine can only
+	// have one range of a particular name active at any given time. The
+	// ID that this range is scoped to may be obtained via Event.Goroutine.
+	//
+	// The ResourceNone scope means that the range is globally scoped. As a
+	// result, any goroutine/proc/thread may start or end the range, and only
+	// one such named range may be active globally at any given time.
+	//
+	// For RangeBegin and RangeEnd events, this will always reference some
+	// resource ID in the current execution context. For RangeActive events,
+	// this may reference a resource not in the current context. Prefer Scope
+	// over the current execution context.
+	Scope ResourceID
+}
+
+// RangeAttributes provides attributes about a completed Range.
+type RangeAttribute struct {
+	// Name is the human-readable name for the range.
+	Name string
+
+	// Value is the value of the attribute.
+	Value Value
+}
+
+// TaskID is the internal ID of a task used to disambiguate tasks (even if they
+// are of the same type).
+type TaskID uint64
+
+const (
+	// NoTask indicates the lack of a task.
+	NoTask = TaskID(^uint64(0))
+
+	// BackgroundTask is the global task that events are attached to if there was
+	// no other task in the context at the point the event was emitted.
+	BackgroundTask = TaskID(0)
+)
+
+// Task provides details about a Task event.
+type Task struct {
+	// ID is a unique identifier for the task.
+	//
+	// This can be used to associate the beginning of a task with its end.
+	ID TaskID
+
+	// ParentID is the ID of the parent task.
+	Parent TaskID
+
+	// Type is the taskType that was passed to runtime/trace.NewTask.
+	//
+	// May be "" if a task's TaskBegin event isn't present in the trace.
+	Type string
+}
+
+// Region provides details about a Region event.
+type Region struct {
+	// Task is the ID of the task this region is associated with.
+	Task TaskID
+
+	// Type is the regionType that was passed to runtime/trace.StartRegion or runtime/trace.WithRegion.
+	Type string
+}
+
+// Log provides details about a Log event.
+type Log struct {
+	// Task is the ID of the task this region is associated with.
+	Task TaskID
+
+	// Category is the category that was passed to runtime/trace.Log or runtime/trace.Logf.
+	Category string
+
+	// Message is the message that was passed to runtime/trace.Log or runtime/trace.Logf.
+	Message string
+}
+
+// Stack represents a stack. It's really a handle to a stack and it's trivially comparable.
+//
+// If two Stacks are equal then their Frames are guaranteed to be identical. If they are not
+// equal, however, their Frames may still be equal.
+type Stack struct {
+	table *evTable
+	id    stackID
+}
+
+// Frames is an iterator over the frames in a Stack.
+func (s Stack) Frames(yield func(f StackFrame) bool) bool {
+	if s.id == 0 {
+		return true
+	}
+	stk := s.table.stacks.mustGet(s.id)
+	for _, f := range stk.frames {
+		sf := StackFrame{
+			PC:   f.pc,
+			Func: s.table.strings.mustGet(f.funcID),
+			File: s.table.strings.mustGet(f.fileID),
+			Line: f.line,
+		}
+		if !yield(sf) {
+			return false
+		}
+	}
+	return true
+}
+
+// NoStack is a sentinel value that can be compared against any Stack value, indicating
+// a lack of a stack trace.
+var NoStack = Stack{}
+
+// StackFrame represents a single frame of a stack.
+type StackFrame struct {
+	// PC is the program counter of the function call if this
+	// is not a leaf frame. If it's a leaf frame, it's the point
+	// at which the stack trace was taken.
+	PC uint64
+
+	// Func is the name of the function this frame maps to.
+	Func string
+
+	// File is the file which contains the source code of Func.
+	File string
+
+	// Line is the line number within File which maps to PC.
+	Line uint64
+}
+
+// Event represents a single event in the trace.
+type Event struct {
+	table *evTable
+	ctx   schedCtx
+	base  baseEvent
+}
+
+// Kind returns the kind of event that this is.
+func (e Event) Kind() EventKind {
+	return go122Type2Kind[e.base.typ]
+}
+
+// Time returns the timestamp of the event.
+func (e Event) Time() Time {
+	return e.base.time
+}
+
+// Goroutine returns the ID of the goroutine that was executing when
+// this event happened. It describes part of the execution context
+// for this event.
+//
+// Note that for goroutine state transitions this always refers to the
+// state before the transition. For example, if a goroutine is just
+// starting to run on this thread and/or proc, then this will return
+// NoGoroutine. In this case, the goroutine starting to run will be
+// can be found at Event.StateTransition().Resource.
+func (e Event) Goroutine() GoID {
+	return e.ctx.G
+}
+
+// Proc returns the ID of the proc this event event pertains to.
+//
+// Note that for proc state transitions this always refers to the
+// state before the transition. For example, if a proc is just
+// starting to run on this thread, then this will return NoProc.
+func (e Event) Proc() ProcID {
+	return e.ctx.P
+}
+
+// Thread returns the ID of the thread this event pertains to.
+//
+// Note that for thread state transitions this always refers to the
+// state before the transition. For example, if a thread is just
+// starting to run, then this will return NoThread.
+//
+// Note: tracking thread state is not currently supported, so this
+// will always return a valid thread ID. However thread state transitions
+// may be tracked in the future, and callers must be robust to this
+// possibility.
+func (e Event) Thread() ThreadID {
+	return e.ctx.M
+}
+
+// Stack returns a handle to a stack associated with the event.
+//
+// This represents a stack trace at the current moment in time for
+// the current execution context.
+func (e Event) Stack() Stack {
+	if e.base.typ == evSync {
+		return NoStack
+	}
+	if e.base.typ == go122.EvCPUSample {
+		return Stack{table: e.table, id: stackID(e.base.args[0])}
+	}
+	spec := go122.Specs()[e.base.typ]
+	if len(spec.StackIDs) == 0 {
+		return NoStack
+	}
+	// The stack for the main execution context is always the
+	// first stack listed in StackIDs. Subtract one from this
+	// because we've peeled away the timestamp argument.
+	id := stackID(e.base.args[spec.StackIDs[0]-1])
+	if id == 0 {
+		return NoStack
+	}
+	return Stack{table: e.table, id: id}
+}
+
+// Metric returns details about a Metric event.
+//
+// Panics if Kind != EventMetric.
+func (e Event) Metric() Metric {
+	if e.Kind() != EventMetric {
+		panic("Metric called on non-Metric event")
+	}
+	var m Metric
+	switch e.base.typ {
+	case go122.EvProcsChange:
+		m.Name = "/sched/gomaxprocs:threads"
+		m.Value = Value{kind: ValueUint64, scalar: e.base.args[0]}
+	case go122.EvHeapAlloc:
+		m.Name = "/memory/classes/heap/objects:bytes"
+		m.Value = Value{kind: ValueUint64, scalar: e.base.args[0]}
+	case go122.EvHeapGoal:
+		m.Name = "/gc/heap/goal:bytes"
+		m.Value = Value{kind: ValueUint64, scalar: e.base.args[0]}
+	default:
+		panic(fmt.Sprintf("internal error: unexpected event type for Metric kind: %s", go122.EventString(e.base.typ)))
+	}
+	return m
+}
+
+// Label returns details about a Label event.
+//
+// Panics if Kind != EventLabel.
+func (e Event) Label() Label {
+	if e.Kind() != EventLabel {
+		panic("Label called on non-Label event")
+	}
+	if e.base.typ != go122.EvGoLabel {
+		panic(fmt.Sprintf("internal error: unexpected event type for Label kind: %s", go122.EventString(e.base.typ)))
+	}
+	return Label{
+		Label:    e.table.strings.mustGet(stringID(e.base.args[0])),
+		Resource: ResourceID{Kind: ResourceGoroutine, id: int64(e.ctx.G)},
+	}
+}
+
+// Range returns details about an EventRangeBegin, EventRangeActive, or EventRangeEnd event.
+//
+// Panics if Kind != EventRangeBegin, Kind != EventRangeActive, and Kind != EventRangeEnd.
+func (e Event) Range() Range {
+	if kind := e.Kind(); kind != EventRangeBegin && kind != EventRangeActive && kind != EventRangeEnd {
+		panic("Range called on non-Range event")
+	}
+	var r Range
+	switch e.base.typ {
+	case go122.EvSTWBegin, go122.EvSTWEnd:
+		// N.B. ordering.advance smuggles in the STW reason as e.base.args[0]
+		// for go122.EvSTWEnd (it's already there for Begin).
+		r.Name = "stop-the-world (" + e.table.strings.mustGet(stringID(e.base.args[0])) + ")"
+		r.Scope = ResourceID{Kind: ResourceGoroutine, id: int64(e.Goroutine())}
+	case go122.EvGCBegin, go122.EvGCActive, go122.EvGCEnd:
+		r.Name = "GC concurrent mark phase"
+		r.Scope = ResourceID{Kind: ResourceNone}
+	case go122.EvGCSweepBegin, go122.EvGCSweepActive, go122.EvGCSweepEnd:
+		r.Name = "GC incremental sweep"
+		r.Scope = ResourceID{Kind: ResourceProc}
+		if e.base.typ == go122.EvGCSweepActive {
+			r.Scope.id = int64(e.base.args[0])
+		} else {
+			r.Scope.id = int64(e.Proc())
+		}
+		r.Scope.id = int64(e.Proc())
+	case go122.EvGCMarkAssistBegin, go122.EvGCMarkAssistActive, go122.EvGCMarkAssistEnd:
+		r.Name = "GC mark assist"
+		r.Scope = ResourceID{Kind: ResourceGoroutine}
+		if e.base.typ == go122.EvGCMarkAssistActive {
+			r.Scope.id = int64(e.base.args[0])
+		} else {
+			r.Scope.id = int64(e.Goroutine())
+		}
+	default:
+		panic(fmt.Sprintf("internal error: unexpected event type for Range kind: %s", go122.EventString(e.base.typ)))
+	}
+	return r
+}
+
+// RangeAttributes returns attributes for a completed range.
+//
+// Panics if Kind != EventRangeEnd.
+func (e Event) RangeAttributes() []RangeAttribute {
+	if e.Kind() != EventRangeEnd {
+		panic("Range called on non-Range event")
+	}
+	if e.base.typ != go122.EvGCSweepEnd {
+		return nil
+	}
+	return []RangeAttribute{
+		{
+			Name:  "bytes swept",
+			Value: Value{kind: ValueUint64, scalar: e.base.args[0]},
+		},
+		{
+			Name:  "bytes reclaimed",
+			Value: Value{kind: ValueUint64, scalar: e.base.args[1]},
+		},
+	}
+}
+
+// Task returns details about a TaskBegin or TaskEnd event.
+//
+// Panics if Kind != EventTaskBegin and Kind != EventTaskEnd.
+func (e Event) Task() Task {
+	if kind := e.Kind(); kind != EventTaskBegin && kind != EventTaskEnd {
+		panic("Task called on non-Task event")
+	}
+	parentID := NoTask
+	var typ string
+	switch e.base.typ {
+	case go122.EvUserTaskBegin:
+		parentID = TaskID(e.base.args[1])
+		typ = e.table.strings.mustGet(stringID(e.base.args[2]))
+	case go122.EvUserTaskEnd:
+		parentID = TaskID(e.base.extra(version.Go122)[0])
+		typ = e.table.getExtraString(extraStringID(e.base.extra(version.Go122)[1]))
+	default:
+		panic(fmt.Sprintf("internal error: unexpected event type for Task kind: %s", go122.EventString(e.base.typ)))
+	}
+	return Task{
+		ID:     TaskID(e.base.args[0]),
+		Parent: parentID,
+		Type:   typ,
+	}
+}
+
+// Region returns details about a RegionBegin or RegionEnd event.
+//
+// Panics if Kind != EventRegionBegin and Kind != EventRegionEnd.
+func (e Event) Region() Region {
+	if kind := e.Kind(); kind != EventRegionBegin && kind != EventRegionEnd {
+		panic("Region called on non-Region event")
+	}
+	if e.base.typ != go122.EvUserRegionBegin && e.base.typ != go122.EvUserRegionEnd {
+		panic(fmt.Sprintf("internal error: unexpected event type for Region kind: %s", go122.EventString(e.base.typ)))
+	}
+	return Region{
+		Task: TaskID(e.base.args[0]),
+		Type: e.table.strings.mustGet(stringID(e.base.args[1])),
+	}
+}
+
+// Log returns details about a Log event.
+//
+// Panics if Kind != EventLog.
+func (e Event) Log() Log {
+	if e.Kind() != EventLog {
+		panic("Log called on non-Log event")
+	}
+	if e.base.typ != go122.EvUserLog {
+		panic(fmt.Sprintf("internal error: unexpected event type for Log kind: %s", go122.EventString(e.base.typ)))
+	}
+	return Log{
+		Task:     TaskID(e.base.args[0]),
+		Category: e.table.strings.mustGet(stringID(e.base.args[1])),
+		Message:  e.table.strings.mustGet(stringID(e.base.args[2])),
+	}
+}
+
+// StateTransition returns details about a StateTransition event.
+//
+// Panics if Kind != EventStateTransition.
+func (e Event) StateTransition() StateTransition {
+	if e.Kind() != EventStateTransition {
+		panic("StateTransition called on non-StateTransition event")
+	}
+	var s StateTransition
+	switch e.base.typ {
+	case go122.EvProcStart:
+		s = procStateTransition(ProcID(e.base.args[0]), ProcIdle, ProcRunning)
+	case go122.EvProcStop:
+		s = procStateTransition(e.ctx.P, ProcRunning, ProcIdle)
+	case go122.EvProcSteal:
+		// N.B. ordering.advance populates e.base.extra.
+		beforeState := ProcRunning
+		if go122.ProcStatus(e.base.extra(version.Go122)[0]) == go122.ProcSyscallAbandoned {
+			// We've lost information because this ProcSteal advanced on a
+			// SyscallAbandoned state. Treat the P as idle because ProcStatus
+			// treats SyscallAbandoned as Idle. Otherwise we'll have an invalid
+			// transition.
+			beforeState = ProcIdle
+		}
+		s = procStateTransition(ProcID(e.base.args[0]), beforeState, ProcIdle)
+	case go122.EvProcStatus:
+		// N.B. ordering.advance populates e.base.extra.
+		s = procStateTransition(ProcID(e.base.args[0]), ProcState(e.base.extra(version.Go122)[0]), go122ProcStatus2ProcState[e.base.args[1]])
+	case go122.EvGoCreate:
+		s = goStateTransition(GoID(e.base.args[0]), GoNotExist, GoRunnable)
+		s.Stack = Stack{table: e.table, id: stackID(e.base.args[1])}
+	case go122.EvGoCreateSyscall:
+		s = goStateTransition(GoID(e.base.args[0]), GoNotExist, GoSyscall)
+	case go122.EvGoStart:
+		s = goStateTransition(GoID(e.base.args[0]), GoRunnable, GoRunning)
+	case go122.EvGoDestroy:
+		s = goStateTransition(e.ctx.G, GoRunning, GoNotExist)
+		s.Stack = e.Stack() // This event references the resource the event happened on.
+	case go122.EvGoDestroySyscall:
+		s = goStateTransition(e.ctx.G, GoSyscall, GoNotExist)
+	case go122.EvGoStop:
+		s = goStateTransition(e.ctx.G, GoRunning, GoRunnable)
+		s.Reason = e.table.strings.mustGet(stringID(e.base.args[0]))
+		s.Stack = e.Stack() // This event references the resource the event happened on.
+	case go122.EvGoBlock:
+		s = goStateTransition(e.ctx.G, GoRunning, GoWaiting)
+		s.Reason = e.table.strings.mustGet(stringID(e.base.args[0]))
+		s.Stack = e.Stack() // This event references the resource the event happened on.
+	case go122.EvGoUnblock:
+		s = goStateTransition(GoID(e.base.args[0]), GoWaiting, GoRunnable)
+	case go122.EvGoSyscallBegin:
+		s = goStateTransition(e.ctx.G, GoRunning, GoSyscall)
+		s.Stack = e.Stack() // This event references the resource the event happened on.
+	case go122.EvGoSyscallEnd:
+		s = goStateTransition(e.ctx.G, GoSyscall, GoRunning)
+		s.Stack = e.Stack() // This event references the resource the event happened on.
+	case go122.EvGoSyscallEndBlocked:
+		s = goStateTransition(e.ctx.G, GoSyscall, GoRunnable)
+		s.Stack = e.Stack() // This event references the resource the event happened on.
+	case go122.EvGoStatus:
+		// N.B. ordering.advance populates e.base.extra.
+		s = goStateTransition(GoID(e.base.args[0]), GoState(e.base.extra(version.Go122)[0]), go122GoStatus2GoState[e.base.args[2]])
+	default:
+		panic(fmt.Sprintf("internal error: unexpected event type for StateTransition kind: %s", go122.EventString(e.base.typ)))
+	}
+	return s
+}
+
+const evSync = ^event.Type(0)
+
+var go122Type2Kind = [...]EventKind{
+	go122.EvCPUSample:           EventStackSample,
+	go122.EvProcsChange:         EventMetric,
+	go122.EvProcStart:           EventStateTransition,
+	go122.EvProcStop:            EventStateTransition,
+	go122.EvProcSteal:           EventStateTransition,
+	go122.EvProcStatus:          EventStateTransition,
+	go122.EvGoCreate:            EventStateTransition,
+	go122.EvGoCreateSyscall:     EventStateTransition,
+	go122.EvGoStart:             EventStateTransition,
+	go122.EvGoDestroy:           EventStateTransition,
+	go122.EvGoDestroySyscall:    EventStateTransition,
+	go122.EvGoStop:              EventStateTransition,
+	go122.EvGoBlock:             EventStateTransition,
+	go122.EvGoUnblock:           EventStateTransition,
+	go122.EvGoSyscallBegin:      EventStateTransition,
+	go122.EvGoSyscallEnd:        EventStateTransition,
+	go122.EvGoSyscallEndBlocked: EventStateTransition,
+	go122.EvGoStatus:            EventStateTransition,
+	go122.EvSTWBegin:            EventRangeBegin,
+	go122.EvSTWEnd:              EventRangeEnd,
+	go122.EvGCActive:            EventRangeActive,
+	go122.EvGCBegin:             EventRangeBegin,
+	go122.EvGCEnd:               EventRangeEnd,
+	go122.EvGCSweepActive:       EventRangeActive,
+	go122.EvGCSweepBegin:        EventRangeBegin,
+	go122.EvGCSweepEnd:          EventRangeEnd,
+	go122.EvGCMarkAssistActive:  EventRangeActive,
+	go122.EvGCMarkAssistBegin:   EventRangeBegin,
+	go122.EvGCMarkAssistEnd:     EventRangeEnd,
+	go122.EvHeapAlloc:           EventMetric,
+	go122.EvHeapGoal:            EventMetric,
+	go122.EvGoLabel:             EventLabel,
+	go122.EvUserTaskBegin:       EventTaskBegin,
+	go122.EvUserTaskEnd:         EventTaskEnd,
+	go122.EvUserRegionBegin:     EventRegionBegin,
+	go122.EvUserRegionEnd:       EventRegionEnd,
+	go122.EvUserLog:             EventLog,
+	evSync:                      EventSync,
+}
+
+var go122GoStatus2GoState = [...]GoState{
+	go122.GoRunnable: GoRunnable,
+	go122.GoRunning:  GoRunning,
+	go122.GoWaiting:  GoWaiting,
+	go122.GoSyscall:  GoSyscall,
+}
+
+var go122ProcStatus2ProcState = [...]ProcState{
+	go122.ProcRunning:          ProcRunning,
+	go122.ProcIdle:             ProcIdle,
+	go122.ProcSyscall:          ProcRunning,
+	go122.ProcSyscallAbandoned: ProcIdle,
+}
+
+// String returns the event as a human-readable string.
+//
+// The format of the string is intended for debugging and is subject to change.
+func (e Event) String() string {
+	var sb strings.Builder
+	fmt.Fprintf(&sb, "M=%d P=%d G=%d", e.Thread(), e.Proc(), e.Goroutine())
+	fmt.Fprintf(&sb, " %s Time=%d", e.Kind(), e.Time())
+	// Kind-specific fields.
+	switch kind := e.Kind(); kind {
+	case EventMetric:
+		m := e.Metric()
+		fmt.Fprintf(&sb, " Name=%q Value=%s", m.Name, valueAsString(m.Value))
+	case EventLabel:
+		l := e.Label()
+		fmt.Fprintf(&sb, " Label=%q Resource=%s", l.Label, l.Resource)
+	case EventRangeBegin, EventRangeActive, EventRangeEnd:
+		r := e.Range()
+		fmt.Fprintf(&sb, " Name=%q Scope=%s", r.Name, r.Scope)
+		if kind == EventRangeEnd {
+			fmt.Fprintf(&sb, " Attributes=[")
+			for i, attr := range e.RangeAttributes() {
+				if i != 0 {
+					fmt.Fprintf(&sb, " ")
+				}
+				fmt.Fprintf(&sb, "%q=%s", attr.Name, valueAsString(attr.Value))
+			}
+			fmt.Fprintf(&sb, "]")
+		}
+	case EventTaskBegin, EventTaskEnd:
+		t := e.Task()
+		fmt.Fprintf(&sb, " ID=%d Parent=%d Type=%q", t.ID, t.Parent, t.Type)
+	case EventRegionBegin, EventRegionEnd:
+		r := e.Region()
+		fmt.Fprintf(&sb, " Task=%d Type=%q", r.Task, r.Type)
+	case EventLog:
+		l := e.Log()
+		fmt.Fprintf(&sb, " Task=%d Category=%q Message=%q", l.Task, l.Category, l.Message)
+	case EventStateTransition:
+		s := e.StateTransition()
+		fmt.Fprintf(&sb, " Resource=%s Reason=%q", s.Resource, s.Reason)
+		switch s.Resource.Kind {
+		case ResourceGoroutine:
+			id := s.Resource.Goroutine()
+			old, new := s.Goroutine()
+			fmt.Fprintf(&sb, " GoID=%d %s->%s", id, old, new)
+		case ResourceProc:
+			id := s.Resource.Proc()
+			old, new := s.Proc()
+			fmt.Fprintf(&sb, " ProcID=%d %s->%s", id, old, new)
+		}
+		if s.Stack != NoStack {
+			fmt.Fprintln(&sb)
+			fmt.Fprintln(&sb, "TransitionStack=")
+			s.Stack.Frames(func(f StackFrame) bool {
+				fmt.Fprintf(&sb, "\t%s @ 0x%x\n", f.Func, f.PC)
+				fmt.Fprintf(&sb, "\t\t%s:%d\n", f.File, f.Line)
+				return true
+			})
+		}
+	}
+	if stk := e.Stack(); stk != NoStack {
+		fmt.Fprintln(&sb)
+		fmt.Fprintln(&sb, "Stack=")
+		stk.Frames(func(f StackFrame) bool {
+			fmt.Fprintf(&sb, "\t%s @ 0x%x\n", f.Func, f.PC)
+			fmt.Fprintf(&sb, "\t\t%s:%d\n", f.File, f.Line)
+			return true
+		})
+	}
+	return sb.String()
+}
+
+// validateTableIDs checks to make sure lookups in e.table
+// will work.
+func (e Event) validateTableIDs() error {
+	if e.base.typ == evSync {
+		return nil
+	}
+	spec := go122.Specs()[e.base.typ]
+
+	// Check stacks.
+	for _, i := range spec.StackIDs {
+		id := stackID(e.base.args[i-1])
+		_, ok := e.table.stacks.get(id)
+		if !ok {
+			return fmt.Errorf("found invalid stack ID %d for event %s", id, spec.Name)
+		}
+	}
+	// N.B. Strings referenced by stack frames are validated
+	// early on, when reading the stacks in to begin with.
+
+	// Check strings.
+	for _, i := range spec.StringIDs {
+		id := stringID(e.base.args[i-1])
+		_, ok := e.table.strings.get(id)
+		if !ok {
+			return fmt.Errorf("found invalid string ID %d for event %s", id, spec.Name)
+		}
+	}
+	return nil
+}
+
+func syncEvent(table *evTable, ts Time) Event {
+	return Event{
+		table: table,
+		ctx: schedCtx{
+			G: NoGoroutine,
+			P: NoProc,
+			M: NoThread,
+		},
+		base: baseEvent{
+			typ:  evSync,
+			time: ts,
+		},
+	}
+}
diff --git a/src/internal/trace/v2/event/event.go b/src/internal/trace/v2/event/event.go
new file mode 100644
index 0000000..111dde6
--- /dev/null
+++ b/src/internal/trace/v2/event/event.go
@@ -0,0 +1,89 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package event
+
+// Type is the common in-memory representation of the low-leve
+type Type uint8
+
+// Spec is a specification for a trace event. It contains sufficient information
+// to perform basic parsing of any trace event for any version of Go.
+type Spec struct {
+	// Name is the human-readable name of the trace event.
+	Name string
+
+	// Args contains the names of each trace event's argument.
+	// Its length determines the number of arguments an event has.
+	//
+	// Argument names follow a certain structure and this structure
+	// is relied on by the testing framework to type-check arguments.
+	// The structure is is:
+	//
+	//     (?P<name>[A-Za-z]+_)?(?P<type>[A-Za-z]+)
+	//
+	// In sum, it's an optional name followed by a type. If the name
+	// is present, it is separated from the type with an underscore.
+	// The valid argument types and the Go types they map to are listed
+	// in the ArgTypes variable.
+	Args []string
+
+	// StartEv indicates the event type of the corresponding "start"
+	// event, if this event is an "end," for a pair of events that
+	// represent a time range.
+	StartEv Type
+
+	// IsTimedEvent indicates whether this is an event that both
+	// appears in the main event stream and is surfaced to the
+	// trace reader.
+	//
+	// Events that are not "timed" are considered "structural"
+	// since they either need significant reinterpretation or
+	// otherwise aren't actually surfaced by the trace reader.
+	IsTimedEvent bool
+
+	// HasData is true if the event has trailer consisting of a
+	// varint length followed by unencoded bytes of some data.
+	HasData bool
+
+	// StringIDs indicates which of the arguments are string IDs.
+	StringIDs []int
+
+	// StackIDs indicates which of the arguments are stack IDs.
+	//
+	// The list is not sorted. The first index always refers to
+	// the main stack for the current execution context of the event.
+	StackIDs []int
+
+	// IsStack indicates that the event represents a complete
+	// stack trace. Specifically, it means that after the arguments
+	// there's a varint length, followed by 4*length varints. Each
+	// group of 4 represents the PC, file ID, func ID, and line number
+	// in that order.
+	IsStack bool
+}
+
+// ArgTypes is a list of valid argument types for use in Args.
+//
+// See the documentation of Args for more details.
+var ArgTypes = [...]string{
+	"seq",     // sequence number
+	"pstatus", // P status
+	"gstatus", // G status
+	"g",       // trace.GoID
+	"m",       // trace.ThreadID
+	"p",       // trace.ProcID
+	"string",  // string ID
+	"stack",   // stack ID
+	"value",   // uint64
+	"task",    // trace.TaskID
+}
+
+// Names is a helper that produces a mapping of event names to event types.
+func Names(specs []Spec) map[string]Type {
+	nameToType := make(map[string]Type)
+	for i, spec := range specs {
+		nameToType[spec.Name] = Type(byte(i))
+	}
+	return nameToType
+}
diff --git a/src/internal/trace/v2/event/go122/event.go b/src/internal/trace/v2/event/go122/event.go
new file mode 100644
index 0000000..be7ce4c
--- /dev/null
+++ b/src/internal/trace/v2/event/go122/event.go
@@ -0,0 +1,388 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package go122
+
+import (
+	"fmt"
+	"internal/trace/v2/event"
+)
+
+const (
+	EvNone event.Type = iota // unused
+
+	// Structural events.
+	EvEventBatch // start of per-M batch of events [generation, M ID, timestamp, batch length]
+	EvStacks     // start of a section of the stack table [...EvStack]
+	EvStack      // stack table entry [ID, ...{PC, func string ID, file string ID, line #}]
+	EvStrings    // start of a section of the string dictionary [...EvString]
+	EvString     // string dictionary entry [ID, length, string]
+	EvCPUSamples // start of a section of CPU samples [...EvCPUSample]
+	EvCPUSample  // CPU profiling sample [timestamp, M ID, P ID, goroutine ID, stack ID]
+	EvFrequency  // timestamp units per sec [freq]
+
+	// Procs.
+	EvProcsChange // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack ID]
+	EvProcStart   // start of P [timestamp, P ID, P seq]
+	EvProcStop    // stop of P [timestamp]
+	EvProcSteal   // P was stolen [timestamp, P ID, P seq, M ID]
+	EvProcStatus  // P status at the start of a generation [timestamp, P ID, status]
+
+	// Goroutines.
+	EvGoCreate            // goroutine creation [timestamp, new goroutine ID, new stack ID, stack ID]
+	EvGoCreateSyscall     // goroutine appears in syscall (cgo callback) [timestamp, new goroutine ID]
+	EvGoStart             // goroutine starts running [timestamp, goroutine ID, goroutine seq]
+	EvGoDestroy           // goroutine ends [timestamp]
+	EvGoDestroySyscall    // goroutine ends in syscall (cgo callback) [timestamp]
+	EvGoStop              // goroutine yields its time, but is runnable [timestamp, reason, stack ID]
+	EvGoBlock             // goroutine blocks [timestamp, reason, stack ID]
+	EvGoUnblock           // goroutine is unblocked [timestamp, goroutine ID, goroutine seq, stack ID]
+	EvGoSyscallBegin      // syscall enter [timestamp, P seq, stack ID]
+	EvGoSyscallEnd        // syscall exit [timestamp]
+	EvGoSyscallEndBlocked // syscall exit and it blocked at some point [timestamp]
+	EvGoStatus            // goroutine status at the start of a generation [timestamp, goroutine ID, status]
+
+	// STW.
+	EvSTWBegin // STW start [timestamp, kind]
+	EvSTWEnd   // STW done [timestamp]
+
+	// GC events.
+	EvGCActive           // GC active [timestamp, seq]
+	EvGCBegin            // GC start [timestamp, seq, stack ID]
+	EvGCEnd              // GC done [timestamp, seq]
+	EvGCSweepActive      // GC sweep active [timestamp, P ID]
+	EvGCSweepBegin       // GC sweep start [timestamp, stack ID]
+	EvGCSweepEnd         // GC sweep done [timestamp, swept bytes, reclaimed bytes]
+	EvGCMarkAssistActive // GC mark assist active [timestamp, goroutine ID]
+	EvGCMarkAssistBegin  // GC mark assist start [timestamp, stack ID]
+	EvGCMarkAssistEnd    // GC mark assist done [timestamp]
+	EvHeapAlloc          // gcController.heapLive change [timestamp, heap alloc in bytes]
+	EvHeapGoal           // gcController.heapGoal() change [timestamp, heap goal in bytes]
+
+	// Annotations.
+	EvGoLabel         // apply string label to current running goroutine [timestamp, label string ID]
+	EvUserTaskBegin   // trace.NewTask [timestamp, internal task ID, internal parent task ID, name string ID, stack ID]
+	EvUserTaskEnd     // end of a task [timestamp, internal task ID, stack ID]
+	EvUserRegionBegin // trace.{Start,With}Region [timestamp, internal task ID, name string ID, stack ID]
+	EvUserRegionEnd   // trace.{End,With}Region [timestamp, internal task ID, name string ID, stack ID]
+	EvUserLog         // trace.Log [timestamp, internal task ID, key string ID, stack, value string ID]
+)
+
+// EventString returns the name of a Go 1.22 event.
+func EventString(typ event.Type) string {
+	if int(typ) < len(specs) {
+		return specs[typ].Name
+	}
+	return fmt.Sprintf("Invalid(%d)", typ)
+}
+
+func Specs() []event.Spec {
+	return specs[:]
+}
+
+var specs = [...]event.Spec{
+	// "Structural" Events.
+	EvEventBatch: event.Spec{
+		Name: "EventBatch",
+		Args: []string{"gen", "m", "time", "size"},
+	},
+	EvStacks: event.Spec{
+		Name: "Stacks",
+	},
+	EvStack: event.Spec{
+		Name:    "Stack",
+		Args:    []string{"id", "nframes"},
+		IsStack: true,
+	},
+	EvStrings: event.Spec{
+		Name: "Strings",
+	},
+	EvString: event.Spec{
+		Name:    "String",
+		Args:    []string{"id"},
+		HasData: true,
+	},
+	EvCPUSamples: event.Spec{
+		Name: "CPUSamples",
+	},
+	EvCPUSample: event.Spec{
+		Name: "CPUSample",
+		Args: []string{"time", "p", "g", "m", "stack"},
+		// N.B. There's clearly a timestamp here, but these Events
+		// are special in that they don't appear in the regular
+		// M streams.
+	},
+	EvFrequency: event.Spec{
+		Name: "Frequency",
+		Args: []string{"freq"},
+	},
+
+	// "Timed" Events.
+	EvProcsChange: event.Spec{
+		Name:         "ProcsChange",
+		Args:         []string{"dt", "procs_value", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+	},
+	EvProcStart: event.Spec{
+		Name:         "ProcStart",
+		Args:         []string{"dt", "p", "p_seq"},
+		IsTimedEvent: true,
+	},
+	EvProcStop: event.Spec{
+		Name:         "ProcStop",
+		Args:         []string{"dt"},
+		IsTimedEvent: true,
+	},
+	EvProcSteal: event.Spec{
+		Name:         "ProcSteal",
+		Args:         []string{"dt", "p", "p_seq", "m"},
+		IsTimedEvent: true,
+	},
+	EvProcStatus: event.Spec{
+		Name:         "ProcStatus",
+		Args:         []string{"dt", "p", "pstatus"},
+		IsTimedEvent: true,
+	},
+	EvGoCreate: event.Spec{
+		Name:         "GoCreate",
+		Args:         []string{"dt", "new_g", "new_stack", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{3, 2},
+	},
+	EvGoCreateSyscall: event.Spec{
+		Name:         "GoCreateSyscall",
+		Args:         []string{"dt", "new_g"},
+		IsTimedEvent: true,
+	},
+	EvGoStart: event.Spec{
+		Name:         "GoStart",
+		Args:         []string{"dt", "g", "g_seq"},
+		IsTimedEvent: true,
+	},
+	EvGoDestroy: event.Spec{
+		Name:         "GoDestroy",
+		Args:         []string{"dt"},
+		IsTimedEvent: true,
+	},
+	EvGoDestroySyscall: event.Spec{
+		Name:         "GoDestroySyscall",
+		Args:         []string{"dt"},
+		IsTimedEvent: true,
+	},
+	EvGoStop: event.Spec{
+		Name:         "GoStop",
+		Args:         []string{"dt", "reason_string", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+		StringIDs:    []int{1},
+	},
+	EvGoBlock: event.Spec{
+		Name:         "GoBlock",
+		Args:         []string{"dt", "reason_string", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+		StringIDs:    []int{1},
+	},
+	EvGoUnblock: event.Spec{
+		Name:         "GoUnblock",
+		Args:         []string{"dt", "g", "g_seq", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{3},
+	},
+	EvGoSyscallBegin: event.Spec{
+		Name:         "GoSyscallBegin",
+		Args:         []string{"dt", "p_seq", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+	},
+	EvGoSyscallEnd: event.Spec{
+		Name:         "GoSyscallEnd",
+		Args:         []string{"dt"},
+		StartEv:      EvGoSyscallBegin,
+		IsTimedEvent: true,
+	},
+	EvGoSyscallEndBlocked: event.Spec{
+		Name:         "GoSyscallEndBlocked",
+		Args:         []string{"dt"},
+		StartEv:      EvGoSyscallBegin,
+		IsTimedEvent: true,
+	},
+	EvGoStatus: event.Spec{
+		Name:         "GoStatus",
+		Args:         []string{"dt", "g", "m", "gstatus"},
+		IsTimedEvent: true,
+	},
+	EvSTWBegin: event.Spec{
+		Name:         "STWBegin",
+		Args:         []string{"dt", "kind_string", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+		StringIDs:    []int{1},
+	},
+	EvSTWEnd: event.Spec{
+		Name:         "STWEnd",
+		Args:         []string{"dt"},
+		StartEv:      EvSTWBegin,
+		IsTimedEvent: true,
+	},
+	EvGCActive: event.Spec{
+		Name:         "GCActive",
+		Args:         []string{"dt", "gc_seq"},
+		IsTimedEvent: true,
+		StartEv:      EvGCBegin,
+	},
+	EvGCBegin: event.Spec{
+		Name:         "GCBegin",
+		Args:         []string{"dt", "gc_seq", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+	},
+	EvGCEnd: event.Spec{
+		Name:         "GCEnd",
+		Args:         []string{"dt", "gc_seq"},
+		StartEv:      EvGCBegin,
+		IsTimedEvent: true,
+	},
+	EvGCSweepActive: event.Spec{
+		Name:         "GCSweepActive",
+		Args:         []string{"dt", "p"},
+		StartEv:      EvGCSweepBegin,
+		IsTimedEvent: true,
+	},
+	EvGCSweepBegin: event.Spec{
+		Name:         "GCSweepBegin",
+		Args:         []string{"dt", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{1},
+	},
+	EvGCSweepEnd: event.Spec{
+		Name:         "GCSweepEnd",
+		Args:         []string{"dt", "swept_value", "reclaimed_value"},
+		StartEv:      EvGCSweepBegin,
+		IsTimedEvent: true,
+	},
+	EvGCMarkAssistActive: event.Spec{
+		Name:         "GCMarkAssistActive",
+		Args:         []string{"dt", "g"},
+		StartEv:      EvGCMarkAssistBegin,
+		IsTimedEvent: true,
+	},
+	EvGCMarkAssistBegin: event.Spec{
+		Name:         "GCMarkAssistBegin",
+		Args:         []string{"dt", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{1},
+	},
+	EvGCMarkAssistEnd: event.Spec{
+		Name:         "GCMarkAssistEnd",
+		Args:         []string{"dt"},
+		StartEv:      EvGCMarkAssistBegin,
+		IsTimedEvent: true,
+	},
+	EvHeapAlloc: event.Spec{
+		Name:         "HeapAlloc",
+		Args:         []string{"dt", "heapalloc_value"},
+		IsTimedEvent: true,
+	},
+	EvHeapGoal: event.Spec{
+		Name:         "HeapGoal",
+		Args:         []string{"dt", "heapgoal_value"},
+		IsTimedEvent: true,
+	},
+	EvGoLabel: event.Spec{
+		Name:         "GoLabel",
+		Args:         []string{"dt", "label_string"},
+		IsTimedEvent: true,
+		StringIDs:    []int{1},
+	},
+	EvUserTaskBegin: event.Spec{
+		Name:         "UserTaskBegin",
+		Args:         []string{"dt", "task", "parent_task", "name_string", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{4},
+		StringIDs:    []int{3},
+	},
+	EvUserTaskEnd: event.Spec{
+		Name:         "UserTaskEnd",
+		Args:         []string{"dt", "task", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{2},
+	},
+	EvUserRegionBegin: event.Spec{
+		Name:         "UserRegionBegin",
+		Args:         []string{"dt", "task", "name_string", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{3},
+		StringIDs:    []int{2},
+	},
+	EvUserRegionEnd: event.Spec{
+		Name:         "UserRegionEnd",
+		Args:         []string{"dt", "task", "name_string", "stack"},
+		StartEv:      EvUserRegionBegin,
+		IsTimedEvent: true,
+		StackIDs:     []int{3},
+		StringIDs:    []int{2},
+	},
+	EvUserLog: event.Spec{
+		Name:         "UserLog",
+		Args:         []string{"dt", "task", "key_string", "value_string", "stack"},
+		IsTimedEvent: true,
+		StackIDs:     []int{4},
+		StringIDs:    []int{2, 3},
+	},
+}
+
+type GoStatus uint8
+
+const (
+	GoBad GoStatus = iota
+	GoRunnable
+	GoRunning
+	GoSyscall
+	GoWaiting
+)
+
+func (s GoStatus) String() string {
+	switch s {
+	case GoRunnable:
+		return "Runnable"
+	case GoRunning:
+		return "Running"
+	case GoSyscall:
+		return "Syscall"
+	case GoWaiting:
+		return "Waiting"
+	}
+	return "Bad"
+}
+
+type ProcStatus uint8
+
+const (
+	ProcBad ProcStatus = iota
+	ProcRunning
+	ProcIdle
+	ProcSyscall
+	ProcSyscallAbandoned
+)
+
+func (s ProcStatus) String() string {
+	switch s {
+	case ProcRunning:
+		return "Running"
+	case ProcIdle:
+		return "Idle"
+	case ProcSyscall:
+		return "Syscall"
+	}
+	return "Bad"
+}
+
+const (
+	// Various format-specific constants.
+	MaxBatchSize      = 64 << 10
+	MaxFramesPerStack = 128
+	MaxStringSize     = 1 << 10
+)
diff --git a/src/internal/trace/v2/event/requirements.go b/src/internal/trace/v2/event/requirements.go
new file mode 100644
index 0000000..c5adf2e
--- /dev/null
+++ b/src/internal/trace/v2/event/requirements.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package event
+
+// SchedReqs is a set of constraints on what the scheduling
+// context must look like.
+type SchedReqs struct {
+	Thread    Constraint
+	Proc      Constraint
+	Goroutine Constraint
+}
+
+// Constraint represents a various presence requirements.
+type Constraint uint8
+
+const (
+	MustNotHave Constraint = iota
+	MayHave
+	MustHave
+)
+
+// UserGoReqs is a common requirement among events that are running
+// or are close to running user code.
+var UserGoReqs = SchedReqs{Thread: MustHave, Proc: MustHave, Goroutine: MustHave}
diff --git a/src/internal/trace/v2/event_test.go b/src/internal/trace/v2/event_test.go
new file mode 100644
index 0000000..c81a451
--- /dev/null
+++ b/src/internal/trace/v2/event_test.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import "testing"
+
+func TestPanicEvent(t *testing.T) {
+	// Use a sync event for this because it doesn't have any extra metadata.
+	ev := syncEvent(nil, 0)
+
+	mustPanic(t, func() {
+		_ = ev.Range()
+	})
+	mustPanic(t, func() {
+		_ = ev.Metric()
+	})
+	mustPanic(t, func() {
+		_ = ev.Log()
+	})
+	mustPanic(t, func() {
+		_ = ev.Task()
+	})
+	mustPanic(t, func() {
+		_ = ev.Region()
+	})
+	mustPanic(t, func() {
+		_ = ev.Label()
+	})
+	mustPanic(t, func() {
+		_ = ev.RangeAttributes()
+	})
+}
+
+func mustPanic(t *testing.T, f func()) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Fatal("failed to panic")
+		}
+	}()
+	f()
+}
diff --git a/src/internal/trace/v2/generation.go b/src/internal/trace/v2/generation.go
new file mode 100644
index 0000000..4cdf76e
--- /dev/null
+++ b/src/internal/trace/v2/generation.go
@@ -0,0 +1,399 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bufio"
+	"bytes"
+	"cmp"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"slices"
+	"strings"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+)
+
+// generation contains all the trace data for a single
+// trace generation. It is purely data: it does not
+// track any parse state nor does it contain a cursor
+// into the generation.
+type generation struct {
+	gen        uint64
+	batches    map[ThreadID][]batch
+	cpuSamples []cpuSample
+	*evTable
+}
+
+// spilledBatch represents a batch that was read out for the next generation,
+// while reading the previous one. It's passed on when parsing the next
+// generation.
+type spilledBatch struct {
+	gen uint64
+	*batch
+}
+
+// readGeneration buffers and decodes the structural elements of a trace generation
+// out of r. spill is the first batch of the new generation (already buffered and
+// parsed from reading the last generation). Returns the generation and the first
+// batch read of the next generation, if any.
+func readGeneration(r *bufio.Reader, spill *spilledBatch) (*generation, *spilledBatch, error) {
+	g := &generation{
+		evTable: new(evTable),
+		batches: make(map[ThreadID][]batch),
+	}
+	// Process the spilled batch.
+	if spill != nil {
+		g.gen = spill.gen
+		if err := processBatch(g, *spill.batch); err != nil {
+			return nil, nil, err
+		}
+		spill = nil
+	}
+	// Read batches one at a time until we either hit EOF or
+	// the next generation.
+	for {
+		b, gen, err := readBatch(r)
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		if gen == 0 {
+			// 0 is a sentinel used by the runtime, so we'll never see it.
+			return nil, nil, fmt.Errorf("invalid generation number %d", gen)
+		}
+		if g.gen == 0 {
+			// Initialize gen.
+			g.gen = gen
+		}
+		if gen == g.gen+1 { // TODO: advance this the same way the runtime does.
+			spill = &spilledBatch{gen: gen, batch: &b}
+			break
+		}
+		if gen != g.gen {
+			// N.B. Fail as fast as possible if we see this. At first it
+			// may seem prudent to be fault-tolerant and assume we have a
+			// complete generation, parsing and returning that first. However,
+			// if the batches are mixed across generations then it's likely
+			// we won't be able to parse this generation correctly at all.
+			// Rather than return a cryptic error in that case, indicate the
+			// problem as soon as we see it.
+			return nil, nil, fmt.Errorf("generations out of order")
+		}
+		if err := processBatch(g, b); err != nil {
+			return nil, nil, err
+		}
+	}
+
+	// Check some invariants.
+	if g.freq == 0 {
+		return nil, nil, fmt.Errorf("no frequency event found")
+	}
+	// N.B. Trust that the batch order is correct. We can't validate the batch order
+	// by timestamp because the timestamps could just be plain wrong. The source of
+	// truth is the order things appear in the trace and the partial order sequence
+	// numbers on certain events. If it turns out the batch order is actually incorrect
+	// we'll very likely fail to advance a partial order from the frontier.
+
+	// Compactify stacks and strings for better lookup performance later.
+	g.stacks.compactify()
+	g.strings.compactify()
+
+	// Validate stacks.
+	if err := validateStackStrings(&g.stacks, &g.strings); err != nil {
+		return nil, nil, err
+	}
+
+	// Fix up the CPU sample timestamps, now that we have freq.
+	for i := range g.cpuSamples {
+		s := &g.cpuSamples[i]
+		s.time = g.freq.mul(timestamp(s.time))
+	}
+	// Sort the CPU samples.
+	slices.SortFunc(g.cpuSamples, func(a, b cpuSample) int {
+		return cmp.Compare(a.time, b.time)
+	})
+	return g, spill, nil
+}
+
+// processBatch adds the batch to the generation.
+func processBatch(g *generation, b batch) error {
+	switch {
+	case b.isStringsBatch():
+		if err := addStrings(&g.strings, b); err != nil {
+			return err
+		}
+	case b.isStacksBatch():
+		if err := addStacks(&g.stacks, b); err != nil {
+			return err
+		}
+	case b.isCPUSamplesBatch():
+		samples, err := addCPUSamples(g.cpuSamples, b)
+		if err != nil {
+			return err
+		}
+		g.cpuSamples = samples
+	case b.isFreqBatch():
+		freq, err := parseFreq(b)
+		if err != nil {
+			return err
+		}
+		if g.freq != 0 {
+			return fmt.Errorf("found multiple frequency events")
+		}
+		g.freq = freq
+	default:
+		g.batches[b.m] = append(g.batches[b.m], b)
+	}
+	return nil
+}
+
+// validateStackStrings makes sure all the string references in
+// the stack table are present in the string table.
+func validateStackStrings(stacks *dataTable[stackID, stack], strings *dataTable[stringID, string]) error {
+	var err error
+	stacks.forEach(func(id stackID, stk stack) bool {
+		for _, frame := range stk.frames {
+			_, ok := strings.get(frame.funcID)
+			if !ok {
+				err = fmt.Errorf("found invalid func string ID %d for stack %d", frame.funcID, id)
+				return false
+			}
+			_, ok = strings.get(frame.fileID)
+			if !ok {
+				err = fmt.Errorf("found invalid file string ID %d for stack %d", frame.fileID, id)
+				return false
+			}
+		}
+		return true
+	})
+	return err
+}
+
+// addStrings takes a batch whose first byte is an EvStrings event
+// (indicating that the batch contains only strings) and adds each
+// string contained therein to the provided strings map.
+func addStrings(stringTable *dataTable[stringID, string], b batch) error {
+	if !b.isStringsBatch() {
+		return fmt.Errorf("internal error: addStrings called on non-string batch")
+	}
+	r := bytes.NewReader(b.data)
+	hdr, err := r.ReadByte() // Consume the EvStrings byte.
+	if err != nil || event.Type(hdr) != go122.EvStrings {
+		return fmt.Errorf("missing strings batch header")
+	}
+
+	var sb strings.Builder
+	for r.Len() != 0 {
+		// Read the header.
+		ev, err := r.ReadByte()
+		if err != nil {
+			return err
+		}
+		if event.Type(ev) != go122.EvString {
+			return fmt.Errorf("expected string event, got %d", ev)
+		}
+
+		// Read the string's ID.
+		id, err := binary.ReadUvarint(r)
+		if err != nil {
+			return err
+		}
+
+		// Read the string's length.
+		len, err := binary.ReadUvarint(r)
+		if err != nil {
+			return err
+		}
+		if len > go122.MaxStringSize {
+			return fmt.Errorf("invalid string size %d, maximum is %d", len, go122.MaxStringSize)
+		}
+
+		// Copy out the string.
+		n, err := io.CopyN(&sb, r, int64(len))
+		if n != int64(len) {
+			return fmt.Errorf("failed to read full string: read %d but wanted %d", n, len)
+		}
+		if err != nil {
+			return fmt.Errorf("copying string data: %w", err)
+		}
+
+		// Add the string to the map.
+		s := sb.String()
+		sb.Reset()
+		if err := stringTable.insert(stringID(id), s); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// addStacks takes a batch whose first byte is an EvStacks event
+// (indicating that the batch contains only stacks) and adds each
+// string contained therein to the provided stacks map.
+func addStacks(stackTable *dataTable[stackID, stack], b batch) error {
+	if !b.isStacksBatch() {
+		return fmt.Errorf("internal error: addStacks called on non-stacks batch")
+	}
+	r := bytes.NewReader(b.data)
+	hdr, err := r.ReadByte() // Consume the EvStacks byte.
+	if err != nil || event.Type(hdr) != go122.EvStacks {
+		return fmt.Errorf("missing stacks batch header")
+	}
+
+	for r.Len() != 0 {
+		// Read the header.
+		ev, err := r.ReadByte()
+		if err != nil {
+			return err
+		}
+		if event.Type(ev) != go122.EvStack {
+			return fmt.Errorf("expected stack event, got %d", ev)
+		}
+
+		// Read the stack's ID.
+		id, err := binary.ReadUvarint(r)
+		if err != nil {
+			return err
+		}
+
+		// Read how many frames are in each stack.
+		nFrames, err := binary.ReadUvarint(r)
+		if err != nil {
+			return err
+		}
+		if nFrames > go122.MaxFramesPerStack {
+			return fmt.Errorf("invalid stack size %d, maximum is %d", nFrames, go122.MaxFramesPerStack)
+		}
+
+		// Each frame consists of 4 fields: pc, funcID (string), fileID (string), line.
+		frames := make([]frame, 0, nFrames)
+		for i := uint64(0); i < nFrames; i++ {
+			// Read the frame data.
+			pc, err := binary.ReadUvarint(r)
+			if err != nil {
+				return fmt.Errorf("reading frame %d's PC for stack %d: %w", i+1, id, err)
+			}
+			funcID, err := binary.ReadUvarint(r)
+			if err != nil {
+				return fmt.Errorf("reading frame %d's funcID for stack %d: %w", i+1, id, err)
+			}
+			fileID, err := binary.ReadUvarint(r)
+			if err != nil {
+				return fmt.Errorf("reading frame %d's fileID for stack %d: %w", i+1, id, err)
+			}
+			line, err := binary.ReadUvarint(r)
+			if err != nil {
+				return fmt.Errorf("reading frame %d's line for stack %d: %w", i+1, id, err)
+			}
+			frames = append(frames, frame{
+				pc:     pc,
+				funcID: stringID(funcID),
+				fileID: stringID(fileID),
+				line:   line,
+			})
+		}
+
+		// Add the stack to the map.
+		if err := stackTable.insert(stackID(id), stack{frames: frames}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// addCPUSamples takes a batch whose first byte is an EvCPUSamples event
+// (indicating that the batch contains only CPU samples) and adds each
+// sample contained therein to the provided samples list.
+func addCPUSamples(samples []cpuSample, b batch) ([]cpuSample, error) {
+	if !b.isCPUSamplesBatch() {
+		return nil, fmt.Errorf("internal error: addStrings called on non-string batch")
+	}
+	r := bytes.NewReader(b.data)
+	hdr, err := r.ReadByte() // Consume the EvCPUSamples byte.
+	if err != nil || event.Type(hdr) != go122.EvCPUSamples {
+		return nil, fmt.Errorf("missing CPU samples batch header")
+	}
+
+	for r.Len() != 0 {
+		// Read the header.
+		ev, err := r.ReadByte()
+		if err != nil {
+			return nil, err
+		}
+		if event.Type(ev) != go122.EvCPUSample {
+			return nil, fmt.Errorf("expected CPU sample event, got %d", ev)
+		}
+
+		// Read the sample's timestamp.
+		ts, err := binary.ReadUvarint(r)
+		if err != nil {
+			return nil, err
+		}
+
+		// Read the sample's M.
+		m, err := binary.ReadUvarint(r)
+		if err != nil {
+			return nil, err
+		}
+		mid := ThreadID(m)
+
+		// Read the sample's P.
+		p, err := binary.ReadUvarint(r)
+		if err != nil {
+			return nil, err
+		}
+		pid := ProcID(p)
+
+		// Read the sample's G.
+		g, err := binary.ReadUvarint(r)
+		if err != nil {
+			return nil, err
+		}
+		goid := GoID(g)
+		if g == 0 {
+			goid = NoGoroutine
+		}
+
+		// Read the sample's stack.
+		s, err := binary.ReadUvarint(r)
+		if err != nil {
+			return nil, err
+		}
+
+		// Add the sample to the slice.
+		samples = append(samples, cpuSample{
+			schedCtx: schedCtx{
+				M: mid,
+				P: pid,
+				G: goid,
+			},
+			time:  Time(ts), // N.B. this is really a "timestamp," not a Time.
+			stack: stackID(s),
+		})
+	}
+	return samples, nil
+}
+
+// parseFreq parses out a lone EvFrequency from a batch.
+func parseFreq(b batch) (frequency, error) {
+	if !b.isFreqBatch() {
+		return 0, fmt.Errorf("internal error: parseFreq called on non-frequency batch")
+	}
+	r := bytes.NewReader(b.data)
+	r.ReadByte() // Consume the EvFrequency byte.
+
+	// Read the frequency. It'll come out as timestamp units per second.
+	f, err := binary.ReadUvarint(r)
+	if err != nil {
+		return 0, err
+	}
+	// Convert to nanoseconds per timestamp unit.
+	return frequency(1.0 / (float64(f) / 1e9)), nil
+}
diff --git a/src/internal/trace/v2/internal/testgen/go122/trace.go b/src/internal/trace/v2/internal/testgen/go122/trace.go
new file mode 100644
index 0000000..42bb403
--- /dev/null
+++ b/src/internal/trace/v2/internal/testgen/go122/trace.go
@@ -0,0 +1,401 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testkit
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"os"
+	"regexp"
+	"strings"
+
+	"internal/trace/v2"
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+	"internal/trace/v2/raw"
+	"internal/trace/v2/version"
+	"internal/txtar"
+)
+
+func Main(f func(*Trace)) {
+	// Create an output file.
+	out, err := os.Create(os.Args[1])
+	if err != nil {
+		panic(err.Error())
+	}
+	defer out.Close()
+
+	// Create a new trace.
+	trace := NewTrace()
+
+	// Call the generator.
+	f(trace)
+
+	// Write out the generator's state.
+	if _, err := out.Write(trace.Generate()); err != nil {
+		panic(err.Error())
+	}
+}
+
+// Trace represents an execution trace for testing.
+//
+// It does a little bit of work to ensure that the produced trace is valid,
+// just for convenience. It mainly tracks batches and batch sizes (so they're
+// trivially correct), tracks strings and stacks, and makes sure emitted string
+// and stack batches are valid. That last part can be controlled by a few options.
+//
+// Otherwise, it performs no validation on the trace at all.
+type Trace struct {
+	// Trace data state.
+	ver             version.Version
+	names           map[string]event.Type
+	specs           []event.Spec
+	events          []raw.Event
+	gens            []*Generation
+	validTimestamps bool
+
+	// Expectation state.
+	bad      bool
+	badMatch *regexp.Regexp
+}
+
+// NewTrace creates a new trace.
+func NewTrace() *Trace {
+	ver := version.Go122
+	return &Trace{
+		names:           event.Names(ver.Specs()),
+		specs:           ver.Specs(),
+		validTimestamps: true,
+	}
+}
+
+// ExpectFailure writes down that the trace should be broken. The caller
+// must provide a pattern matching the expected error produced by the parser.
+func (t *Trace) ExpectFailure(pattern string) {
+	t.bad = true
+	t.badMatch = regexp.MustCompile(pattern)
+}
+
+// ExpectSuccess writes down that the trace should successfully parse.
+func (t *Trace) ExpectSuccess() {
+	t.bad = false
+}
+
+// RawEvent emits an event into the trace. name must correspond to one
+// of the names in Specs() result for the version that was passed to
+// this trace.
+func (t *Trace) RawEvent(typ event.Type, data []byte, args ...uint64) {
+	t.events = append(t.events, t.createEvent(typ, data, args...))
+}
+
+// DisableTimestamps makes the timestamps for all events generated after
+// this call zero. Raw events are exempted from this because the caller
+// has to pass their own timestamp into those events anyway.
+func (t *Trace) DisableTimestamps() {
+	t.validTimestamps = false
+}
+
+// Generation creates a new trace generation.
+//
+// This provides more structure than Event to allow for more easily
+// creating complex traces that are mostly or completely correct.
+func (t *Trace) Generation(gen uint64) *Generation {
+	g := &Generation{
+		trace:   t,
+		gen:     gen,
+		strings: make(map[string]uint64),
+		stacks:  make(map[stack]uint64),
+	}
+	t.gens = append(t.gens, g)
+	return g
+}
+
+// Generate creates a test file for the trace.
+func (t *Trace) Generate() []byte {
+	// Trace file contents.
+	var buf bytes.Buffer
+	tw, err := raw.NewTextWriter(&buf, version.Go122)
+	if err != nil {
+		panic(err.Error())
+	}
+
+	// Write raw top-level events.
+	for _, e := range t.events {
+		tw.WriteEvent(e)
+	}
+
+	// Write generations.
+	for _, g := range t.gens {
+		g.writeEventsTo(tw)
+	}
+
+	// Expectation file contents.
+	expect := []byte("SUCCESS\n")
+	if t.bad {
+		expect = []byte(fmt.Sprintf("FAILURE %q\n", t.badMatch))
+	}
+
+	// Create the test file's contents.
+	return txtar.Format(&txtar.Archive{
+		Files: []txtar.File{
+			{Name: "expect", Data: expect},
+			{Name: "trace", Data: buf.Bytes()},
+		},
+	})
+}
+
+func (t *Trace) createEvent(ev event.Type, data []byte, args ...uint64) raw.Event {
+	spec := t.specs[ev]
+	if ev != go122.EvStack {
+		if arity := len(spec.Args); len(args) != arity {
+			panic(fmt.Sprintf("expected %d args for %s, got %d", arity, spec.Name, len(args)))
+		}
+	}
+	return raw.Event{
+		Version: version.Go122,
+		Ev:      ev,
+		Args:    args,
+		Data:    data,
+	}
+}
+
+type stack struct {
+	stk [32]trace.StackFrame
+	len int
+}
+
+var (
+	NoString = ""
+	NoStack  = []trace.StackFrame{}
+)
+
+// Generation represents a single generation in the trace.
+type Generation struct {
+	trace   *Trace
+	gen     uint64
+	batches []*Batch
+	strings map[string]uint64
+	stacks  map[stack]uint64
+
+	// Options applied when Trace.Generate is called.
+	ignoreStringBatchSizeLimit bool
+	ignoreStackBatchSizeLimit  bool
+}
+
+// Batch starts a new event batch in the trace data.
+//
+// This is convenience function for generating correct batches.
+func (g *Generation) Batch(thread trace.ThreadID, time Time) *Batch {
+	if !g.trace.validTimestamps {
+		time = 0
+	}
+	b := &Batch{
+		gen:       g,
+		thread:    thread,
+		timestamp: time,
+	}
+	g.batches = append(g.batches, b)
+	return b
+}
+
+// String registers a string with the trace.
+//
+// This is a convenience function for easily adding correct
+// strings to traces.
+func (g *Generation) String(s string) uint64 {
+	if len(s) == 0 {
+		return 0
+	}
+	if id, ok := g.strings[s]; ok {
+		return id
+	}
+	id := uint64(len(g.strings) + 1)
+	g.strings[s] = id
+	return id
+}
+
+// Stack registers a stack with the trace.
+//
+// This is a convenience function for easily adding correct
+// stacks to traces.
+func (g *Generation) Stack(stk []trace.StackFrame) uint64 {
+	if len(stk) == 0 {
+		return 0
+	}
+	if len(stk) > 32 {
+		panic("stack too big for test")
+	}
+	var stkc stack
+	copy(stkc.stk[:], stk)
+	stkc.len = len(stk)
+	if id, ok := g.stacks[stkc]; ok {
+		return id
+	}
+	id := uint64(len(g.stacks) + 1)
+	g.stacks[stkc] = id
+	return id
+}
+
+// writeEventsTo emits event batches in the generation to tw.
+func (g *Generation) writeEventsTo(tw *raw.TextWriter) {
+	// Write event batches for the generation.
+	for _, b := range g.batches {
+		b.writeEventsTo(tw)
+	}
+
+	// Write frequency.
+	b := g.newStructuralBatch()
+	b.RawEvent(go122.EvFrequency, nil, 15625000)
+	b.writeEventsTo(tw)
+
+	// Write stacks.
+	b = g.newStructuralBatch()
+	b.RawEvent(go122.EvStacks, nil)
+	for stk, id := range g.stacks {
+		stk := stk.stk[:stk.len]
+		args := []uint64{id}
+		for _, f := range stk {
+			args = append(args, f.PC, g.String(f.Func), g.String(f.File), f.Line)
+		}
+		b.RawEvent(go122.EvStack, nil, args...)
+
+		// Flush the batch if necessary.
+		if !g.ignoreStackBatchSizeLimit && b.size > go122.MaxBatchSize/2 {
+			b.writeEventsTo(tw)
+			b = g.newStructuralBatch()
+		}
+	}
+	b.writeEventsTo(tw)
+
+	// Write strings.
+	b = g.newStructuralBatch()
+	b.RawEvent(go122.EvStrings, nil)
+	for s, id := range g.strings {
+		b.RawEvent(go122.EvString, []byte(s), id)
+
+		// Flush the batch if necessary.
+		if !g.ignoreStringBatchSizeLimit && b.size > go122.MaxBatchSize/2 {
+			b.writeEventsTo(tw)
+			b = g.newStructuralBatch()
+		}
+	}
+	b.writeEventsTo(tw)
+}
+
+func (g *Generation) newStructuralBatch() *Batch {
+	return &Batch{gen: g, thread: trace.NoThread}
+}
+
+// Batch represents an event batch.
+type Batch struct {
+	gen       *Generation
+	thread    trace.ThreadID
+	timestamp Time
+	size      uint64
+	events    []raw.Event
+}
+
+// Event emits an event into a batch. name must correspond to one
+// of the names in Specs() result for the version that was passed to
+// this trace. Callers must omit the timestamp delta.
+func (b *Batch) Event(name string, args ...any) {
+	ev, ok := b.gen.trace.names[name]
+	if !ok {
+		panic(fmt.Sprintf("invalid or unknown event %s", name))
+	}
+	var uintArgs []uint64
+	argOff := 0
+	if b.gen.trace.specs[ev].IsTimedEvent {
+		if b.gen.trace.validTimestamps {
+			uintArgs = []uint64{1}
+		} else {
+			uintArgs = []uint64{0}
+		}
+		argOff = 1
+	}
+	spec := b.gen.trace.specs[ev]
+	if arity := len(spec.Args) - argOff; len(args) != arity {
+		panic(fmt.Sprintf("expected %d args for %s, got %d", arity, spec.Name, len(args)))
+	}
+	for i, arg := range args {
+		uintArgs = append(uintArgs, b.uintArgFor(arg, spec.Args[i+argOff]))
+	}
+	b.RawEvent(ev, nil, uintArgs...)
+}
+
+func (b *Batch) uintArgFor(arg any, argSpec string) uint64 {
+	components := strings.SplitN(argSpec, "_", 2)
+	typStr := components[0]
+	if len(components) == 2 {
+		typStr = components[1]
+	}
+	var u uint64
+	switch typStr {
+	case "value":
+		u = arg.(uint64)
+	case "stack":
+		u = b.gen.Stack(arg.([]trace.StackFrame))
+	case "seq":
+		u = uint64(arg.(Seq))
+	case "pstatus":
+		u = uint64(arg.(go122.ProcStatus))
+	case "gstatus":
+		u = uint64(arg.(go122.GoStatus))
+	case "g":
+		u = uint64(arg.(trace.GoID))
+	case "m":
+		u = uint64(arg.(trace.ThreadID))
+	case "p":
+		u = uint64(arg.(trace.ProcID))
+	case "string":
+		u = b.gen.String(arg.(string))
+	case "task":
+		u = uint64(arg.(trace.TaskID))
+	default:
+		panic(fmt.Sprintf("unsupported arg type %q for spec %q", typStr, argSpec))
+	}
+	return u
+}
+
+// RawEvent emits an event into a batch. name must correspond to one
+// of the names in Specs() result for the version that was passed to
+// this trace.
+func (b *Batch) RawEvent(typ event.Type, data []byte, args ...uint64) {
+	ev := b.gen.trace.createEvent(typ, data, args...)
+
+	// Compute the size of the event and add it to the batch.
+	b.size += 1 // One byte for the event header.
+	var buf [binary.MaxVarintLen64]byte
+	for _, arg := range args {
+		b.size += uint64(binary.PutUvarint(buf[:], arg))
+	}
+	if len(data) != 0 {
+		b.size += uint64(binary.PutUvarint(buf[:], uint64(len(data))))
+		b.size += uint64(len(data))
+	}
+
+	// Add the event.
+	b.events = append(b.events, ev)
+}
+
+// writeEventsTo emits events in the batch, including the batch header, to tw.
+func (b *Batch) writeEventsTo(tw *raw.TextWriter) {
+	tw.WriteEvent(raw.Event{
+		Version: version.Go122,
+		Ev:      go122.EvEventBatch,
+		Args:    []uint64{b.gen.gen, uint64(b.thread), uint64(b.timestamp), b.size},
+	})
+	for _, e := range b.events {
+		tw.WriteEvent(e)
+	}
+}
+
+// Seq represents a sequence counter.
+type Seq uint64
+
+// Time represents a low-level trace timestamp (which does not necessarily
+// correspond to nanoseconds, like trace.Time does).
+type Time uint64
diff --git a/src/internal/trace/v2/mkexp.bash b/src/internal/trace/v2/mkexp.bash
new file mode 100755
index 0000000..8a73719
--- /dev/null
+++ b/src/internal/trace/v2/mkexp.bash
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+# Copyright 2023 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script copies this directory to golang.org/x/exp/trace.
+# Just point it at an golang.org/x/exp checkout.
+
+set -e
+if [ ! -f mkexp.bash ]; then
+	echo 'mkexp.bash must be run from $GOROOT/src/internal/trace/v2' 1>&2
+	exit 1
+fi
+
+if [ "$#" -ne 1 ]; then
+    echo 'mkexp.bash expects one argument: a path to a golang.org/x/exp git checkout'
+	exit 1
+fi
+
+# Copy.
+mkdir -p $1/trace
+cp -r ./* $1/trace
+
+# Cleanup.
+
+# Delete mkexp.bash.
+rm $1/trace/mkexp.bash
+
+# Move tools to cmd. Can't be cmd here because dist will try to build them.
+mv $1/trace/tools $1/trace/cmd
+
+# Make some packages internal.
+mv $1/trace/raw $1/trace/internal/raw
+mv $1/trace/event $1/trace/internal/event
+mv $1/trace/version $1/trace/internal/version
+mv $1/trace/testtrace $1/trace/internal/testtrace
+
+# Move the debug commands out of testdata.
+mv $1/trace/testdata/cmd $1/trace/cmd
+
+# Fix up import paths.
+find $1/trace -name '*.go' | xargs -- sed -i 's/internal\/trace\/v2/golang.org\/x\/exp\/trace/'
+find $1/trace -name '*.go' | xargs -- sed -i 's/golang.org\/x\/exp\/trace\/raw/golang.org\/x\/exp\/trace\/internal\/raw/'
+find $1/trace -name '*.go' | xargs -- sed -i 's/golang.org\/x\/exp\/trace\/event/golang.org\/x\/exp\/trace\/internal\/event/'
+find $1/trace -name '*.go' | xargs -- sed -i 's/golang.org\/x\/exp\/trace\/event\/go122/golang.org\/x\/exp\/trace\/internal\/event\/go122/'
+find $1/trace -name '*.go' | xargs -- sed -i 's/golang.org\/x\/exp\/trace\/version/golang.org\/x\/exp\/trace\/internal\/version/'
+find $1/trace -name '*.go' | xargs -- sed -i 's/golang.org\/x\/exp\/trace\/testtrace/golang.org\/x\/exp\/trace\/internal\/testtrace/'
+
+# Format the files.
+find $1/trace -name '*.go' | xargs -- gofmt -w -s
diff --git a/src/internal/trace/v2/order.go b/src/internal/trace/v2/order.go
new file mode 100644
index 0000000..cedb297
--- /dev/null
+++ b/src/internal/trace/v2/order.go
@@ -0,0 +1,1094 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"fmt"
+	"strings"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+	"internal/trace/v2/version"
+)
+
+// ordering emulates Go scheduler state for both validation and
+// for putting events in the right order.
+type ordering struct {
+	gStates     map[GoID]*gState
+	pStates     map[ProcID]*pState // TODO: The keys are dense, so this can be a slice.
+	mStates     map[ThreadID]*mState
+	activeTasks map[TaskID]taskState
+	gcSeq       uint64
+	gcState     gcState
+	initialGen  uint64
+
+	// Some events like GoDestroySyscall produce two events instead of one.
+	// extraEvent is this extra space. advance must not be called unless
+	// the extraEvent has been consumed with consumeExtraEvent.
+	//
+	// TODO(mknyszek): Replace this with a more formal queue.
+	extraEvent Event
+}
+
+// consumeExtraEvent consumes the extra event.
+func (o *ordering) consumeExtraEvent() Event {
+	if o.extraEvent.Kind() == EventBad {
+		return Event{}
+	}
+	r := o.extraEvent
+	o.extraEvent = Event{}
+	return r
+}
+
+// advance checks if it's valid to proceed with ev which came from thread m.
+//
+// Returns the schedCtx at the point of the event, whether it's OK to advance
+// with this event, and any error encountered in validation.
+//
+// It assumes the gen value passed to it is monotonically increasing across calls.
+//
+// If any error is returned, then the trace is broken and trace parsing must cease.
+// If it's not valid to advance with ev, but no error was encountered, the caller
+// should attempt to advance with other candidate events from other threads. If the
+// caller runs out of candidates, the trace is invalid.
+func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) (schedCtx, bool, error) {
+	if o.initialGen == 0 {
+		// Set the initial gen if necessary.
+		o.initialGen = gen
+	}
+
+	var curCtx, newCtx schedCtx
+	curCtx.M = m
+	newCtx.M = m
+
+	if m == NoThread {
+		curCtx.P = NoProc
+		curCtx.G = NoGoroutine
+		newCtx = curCtx
+	} else {
+		// Pull out or create the mState for this event.
+		ms, ok := o.mStates[m]
+		if !ok {
+			ms = &mState{
+				g: NoGoroutine,
+				p: NoProc,
+			}
+			o.mStates[m] = ms
+		}
+		curCtx.P = ms.p
+		curCtx.G = ms.g
+		newCtx = curCtx
+		defer func() {
+			// Update the mState for this event.
+			ms.p = newCtx.P
+			ms.g = newCtx.G
+		}()
+	}
+
+	switch typ := ev.typ; typ {
+	// Handle procs.
+	case go122.EvProcStatus:
+		pid := ProcID(ev.args[0])
+		status := go122.ProcStatus(ev.args[1])
+		if int(status) >= len(go122ProcStatus2ProcState) {
+			return curCtx, false, fmt.Errorf("invalid status for proc %d: %d", pid, status)
+		}
+		oldState := go122ProcStatus2ProcState[status]
+		if s, ok := o.pStates[pid]; ok {
+			if status == go122.ProcSyscallAbandoned && s.status == go122.ProcSyscall {
+				// ProcSyscallAbandoned is a special case of ProcSyscall. It indicates a
+				// potential loss of information, but if we're already in ProcSyscall,
+				// we haven't lost the relevant information. Promote the status and advance.
+				oldState = ProcRunning
+				ev.args[1] = uint64(go122.ProcSyscall)
+			} else if status == go122.ProcSyscallAbandoned && s.status == go122.ProcSyscallAbandoned {
+				// If we're passing through ProcSyscallAbandoned, then there's no promotion
+				// to do. We've lost the M that this P is associated with. However it got there,
+				// it's going to appear as idle in the API, so pass through as idle.
+				oldState = ProcIdle
+				ev.args[1] = uint64(go122.ProcSyscallAbandoned)
+			} else if s.status != status {
+				return curCtx, false, fmt.Errorf("inconsistent status for proc %d: old %v vs. new %v", pid, s.status, status)
+			}
+			s.seq = makeSeq(gen, 0) // Reset seq.
+		} else {
+			o.pStates[pid] = &pState{id: pid, status: status, seq: makeSeq(gen, 0)}
+			if gen == o.initialGen {
+				oldState = ProcUndetermined
+			} else {
+				oldState = ProcNotExist
+			}
+		}
+		ev.extra(version.Go122)[0] = uint64(oldState) // Smuggle in the old state for StateTransition.
+
+		// Bind the proc to the new context, if it's running.
+		if status == go122.ProcRunning || status == go122.ProcSyscall {
+			newCtx.P = pid
+		}
+		// If we're advancing through ProcSyscallAbandoned *but* oldState is running then we've
+		// promoted it to ProcSyscall. However, because it's ProcSyscallAbandoned, we know this
+		// P is about to get stolen and its status very likely isn't being emitted by the same
+		// thread it was bound to. Since this status is Running -> Running and Running is binding,
+		// we need to make sure we emit it in the right context: the context to which it is bound.
+		// Find it, and set our current context to it.
+		if status == go122.ProcSyscallAbandoned && oldState == ProcRunning {
+			// N.B. This is slow but it should be fairly rare.
+			found := false
+			for mid, ms := range o.mStates {
+				if ms.p == pid {
+					curCtx.M = mid
+					curCtx.P = pid
+					curCtx.G = ms.g
+					found = true
+				}
+			}
+			if !found {
+				return curCtx, false, fmt.Errorf("failed to find sched context for proc %d that's about to be stolen", pid)
+			}
+		}
+		return curCtx, true, nil
+	case go122.EvProcStart:
+		pid := ProcID(ev.args[0])
+		seq := makeSeq(gen, ev.args[1])
+
+		// Try to advance. We might fail here due to sequencing, because the P hasn't
+		// had a status emitted, or because we already have a P and we're in a syscall,
+		// and we haven't observed that it was stolen from us yet.
+		state, ok := o.pStates[pid]
+		if !ok || state.status != go122.ProcIdle || !seq.succeeds(state.seq) || curCtx.P != NoProc {
+			// We can't make an inference as to whether this is bad. We could just be seeing
+			// a ProcStart on a different M before the proc's state was emitted, or before we
+			// got to the right point in the trace.
+			//
+			// Note that we also don't advance here if we have a P and we're in a syscall.
+			return curCtx, false, nil
+		}
+		// We can advance this P. Check some invariants.
+		//
+		// We might have a goroutine if a goroutine is exiting a syscall.
+		reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustNotHave, Goroutine: event.MayHave}
+		if err := validateCtx(curCtx, reqs); err != nil {
+			return curCtx, false, err
+		}
+		state.status = go122.ProcRunning
+		state.seq = seq
+		newCtx.P = pid
+		return curCtx, true, nil
+	case go122.EvProcStop:
+		// We must be able to advance this P.
+		//
+		// There are 2 ways a P can stop: ProcStop and ProcSteal. ProcStop is used when the P
+		// is stopped by the same M that started it, while ProcSteal is used when another M
+		// steals the P by stopping it from a distance.
+		//
+		// Since a P is bound to an M, and we're stopping on the same M we started, it must
+		// always be possible to advance the current M's P from a ProcStop. This is also why
+		// ProcStop doesn't need a sequence number.
+		state, ok := o.pStates[curCtx.P]
+		if !ok {
+			return curCtx, false, fmt.Errorf("event %s for proc (%v) that doesn't exist", go122.EventString(typ), curCtx.P)
+		}
+		if state.status != go122.ProcRunning && state.status != go122.ProcSyscall {
+			return curCtx, false, fmt.Errorf("%s event for proc that's not %s or %s", go122.EventString(typ), go122.ProcRunning, go122.ProcSyscall)
+		}
+		reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}
+		if err := validateCtx(curCtx, reqs); err != nil {
+			return curCtx, false, err
+		}
+		state.status = go122.ProcIdle
+		newCtx.P = NoProc
+		return curCtx, true, nil
+	case go122.EvProcSteal:
+		pid := ProcID(ev.args[0])
+		seq := makeSeq(gen, ev.args[1])
+		state, ok := o.pStates[pid]
+		if !ok || (state.status != go122.ProcSyscall && state.status != go122.ProcSyscallAbandoned) || !seq.succeeds(state.seq) {
+			// We can't make an inference as to whether this is bad. We could just be seeing
+			// a ProcStart on a different M before the proc's state was emitted, or before we
+			// got to the right point in the trace.
+			return curCtx, false, nil
+		}
+		// We can advance this P. Check some invariants.
+		reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MayHave}
+		if err := validateCtx(curCtx, reqs); err != nil {
+			return curCtx, false, err
+		}
+		// Smuggle in the P state that let us advance so we can surface information to the event.
+		// Specifically, we need to make sure that the event is interpreted not as a transition of
+		// ProcRunning -> ProcIdle but ProcIdle -> ProcIdle instead.
+		//
+		// ProcRunning is binding, but we may be running with a P on the current M and we can't
+		// bind another P. This P is about to go ProcIdle anyway.
+		oldStatus := state.status
+		ev.extra(version.Go122)[0] = uint64(oldStatus)
+
+		// Update the P's status and sequence number.
+		state.status = go122.ProcIdle
+		state.seq = seq
+
+		// If we've lost information then don't try to do anything with the M.
+		// It may have moved on and we can't be sure.
+		if oldStatus == go122.ProcSyscallAbandoned {
+			return curCtx, true, nil
+		}
+
+		// Validate that the M we're stealing from is what we expect.
+		mid := ThreadID(ev.args[2]) // The M we're stealing from.
+
+		if mid == curCtx.M {
+			// We're stealing from ourselves. This behaves like a ProcStop.
+			if curCtx.P != pid {
+				return curCtx, false, fmt.Errorf("tried to self-steal proc %d (thread %d), but got proc %d instead", pid, mid, curCtx.P)
+			}
+			newCtx.P = NoProc
+			return curCtx, true, nil
+		}
+
+		// We're stealing from some other M.
+		mState, ok := o.mStates[mid]
+		if !ok {
+			return curCtx, false, fmt.Errorf("stole proc from non-existent thread %d", mid)
+		}
+
+		// Make sure we're actually stealing the right P.
+		if mState.p != pid {
+			return curCtx, false, fmt.Errorf("tried to steal proc %d from thread %d, but got proc %d instead", pid, mid, mState.p)
+		}
+
+		// Tell the M it has no P so it can proceed.
+		//
+		// This is safe because we know the P was in a syscall and
+		// the other M must be trying to get out of the syscall.
+		// GoSyscallEndBlocked cannot advance until the corresponding
+		// M loses its P.
+		mState.p = NoProc
+		return curCtx, true, nil
+
+	// Handle goroutines.
+	case go122.EvGoStatus:
+		gid := GoID(ev.args[0])
+		mid := ThreadID(ev.args[1])
+		status := go122.GoStatus(ev.args[2])
+
+		if int(status) >= len(go122GoStatus2GoState) {
+			return curCtx, false, fmt.Errorf("invalid status for goroutine %d: %d", gid, status)
+		}
+		oldState := go122GoStatus2GoState[status]
+		if s, ok := o.gStates[gid]; ok {
+			if s.status != status {
+				return curCtx, false, fmt.Errorf("inconsistent status for goroutine %d: old %v vs. new %v", gid, s.status, status)
+			}
+			s.seq = makeSeq(gen, 0) // Reset seq.
+		} else if gen == o.initialGen {
+			// Set the state.
+			o.gStates[gid] = &gState{id: gid, status: status, seq: makeSeq(gen, 0)}
+			oldState = GoUndetermined
+		} else {
+			return curCtx, false, fmt.Errorf("found goroutine status for new goroutine after the first generation: id=%v status=%v", gid, status)
+		}
+		ev.extra(version.Go122)[0] = uint64(oldState) // Smuggle in the old state for StateTransition.
+
+		switch status {
+		case go122.GoRunning:
+			// Bind the goroutine to the new context, since it's running.
+			newCtx.G = gid
+		case go122.GoSyscall:
+			if mid == NoThread {
+				return curCtx, false, fmt.Errorf("found goroutine %d in syscall without a thread", gid)
+			}
+			// Is the syscall on this thread? If so, bind it to the context.
+			// Otherwise, we're talking about a G sitting in a syscall on an M.
+			// Validate the named M.
+			if mid == curCtx.M {
+				if gen != o.initialGen && curCtx.G != gid {
+					// If this isn't the first generation, we *must* have seen this
+					// binding occur already. Even if the G was blocked in a syscall
+					// for multiple generations since trace start, we would have seen
+					// a previous GoStatus event that bound the goroutine to an M.
+					return curCtx, false, fmt.Errorf("inconsistent thread for syscalling goroutine %d: thread has goroutine %d", gid, curCtx.G)
+				}
+				newCtx.G = gid
+				break
+			}
+			// Now we're talking about a thread and goroutine that have been
+			// blocked on a syscall for the entire generation. This case must
+			// not have a P; the runtime makes sure that all Ps are traced at
+			// the beginning of a generation, which involves taking a P back
+			// from every thread.
+			ms, ok := o.mStates[mid]
+			if ok {
+				// This M has been seen. That means we must have seen this
+				// goroutine go into a syscall on this thread at some point.
+				if ms.g != gid {
+					// But the G on the M doesn't match. Something's wrong.
+					return curCtx, false, fmt.Errorf("inconsistent thread for syscalling goroutine %d: thread has goroutine %d", gid, ms.g)
+				}
+				// This case is just a Syscall->Syscall event, which needs to
+				// appear as having the G currently bound to this M.
+				curCtx.G = ms.g
+			} else if !ok {
+				// The M hasn't been seen yet. That means this goroutine
+				// has just been sitting in a syscall on this M. Create
+				// a state for it.
+				o.mStates[mid] = &mState{g: gid, p: NoProc}
+				// Don't set curCtx.G in this case because this event is the
+				// binding event (and curCtx represents the "before" state).
+			}
+			// Update the current context to the M we're talking about.
+			curCtx.M = mid
+		}
+		return curCtx, true, nil
+	case go122.EvGoCreate:
+		// Goroutines must be created on a running P, but may or may not be created
+		// by a running goroutine.
+		reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}
+		if err := validateCtx(curCtx, reqs); err != nil {
+			return curCtx, false, err
+		}
+		// If we have a goroutine, it must be running.
+		if state, ok := o.gStates[curCtx.G]; ok && state.status != go122.GoRunning {
+			return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(typ), GoRunning)
+		}
+		// This goroutine created another. Add a state for it.
+		newgid := GoID(ev.args[0])
+		if _, ok := o.gStates[newgid]; ok {
+			return curCtx, false, fmt.Errorf("tried to create goroutine (%v) that already exists", newgid)
+		}
+		o.gStates[newgid] = &gState{id: newgid, status: go122.GoRunnable, seq: makeSeq(gen, 0)}
+		return curCtx, true, nil
+	case go122.EvGoDestroy, go122.EvGoStop, go122.EvGoBlock:
+		// These are goroutine events that all require an active running
+		// goroutine on some thread. They must *always* be advance-able,
+		// since running goroutines are bound to their M.
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		state, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(typ), curCtx.G)
+		}
+		if state.status != go122.GoRunning {
+			return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(typ), GoRunning)
+		}
+		// Handle each case slightly differently; we just group them together
+		// because they have shared preconditions.
+		switch typ {
+		case go122.EvGoDestroy:
+			// This goroutine is exiting itself.
+			delete(o.gStates, curCtx.G)
+			newCtx.G = NoGoroutine
+		case go122.EvGoStop:
+			// Goroutine stopped (yielded). It's runnable but not running on this M.
+			state.status = go122.GoRunnable
+			newCtx.G = NoGoroutine
+		case go122.EvGoBlock:
+			// Goroutine blocked. It's waiting now and not running on this M.
+			state.status = go122.GoWaiting
+			newCtx.G = NoGoroutine
+		}
+		return curCtx, true, nil
+	case go122.EvGoStart:
+		gid := GoID(ev.args[0])
+		seq := makeSeq(gen, ev.args[1])
+		state, ok := o.gStates[gid]
+		if !ok || state.status != go122.GoRunnable || !seq.succeeds(state.seq) {
+			// We can't make an inference as to whether this is bad. We could just be seeing
+			// a GoStart on a different M before the goroutine was created, before it had its
+			// state emitted, or before we got to the right point in the trace yet.
+			return curCtx, false, nil
+		}
+		// We can advance this goroutine. Check some invariants.
+		reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MustNotHave}
+		if err := validateCtx(curCtx, reqs); err != nil {
+			return curCtx, false, err
+		}
+		state.status = go122.GoRunning
+		state.seq = seq
+		newCtx.G = gid
+		return curCtx, true, nil
+	case go122.EvGoUnblock:
+		// N.B. These both reference the goroutine to unblock, not the current goroutine.
+		gid := GoID(ev.args[0])
+		seq := makeSeq(gen, ev.args[1])
+		state, ok := o.gStates[gid]
+		if !ok || state.status != go122.GoWaiting || !seq.succeeds(state.seq) {
+			// We can't make an inference as to whether this is bad. We could just be seeing
+			// a GoUnblock on a different M before the goroutine was created and blocked itself,
+			// before it had its state emitted, or before we got to the right point in the trace yet.
+			return curCtx, false, nil
+		}
+		state.status = go122.GoRunnable
+		state.seq = seq
+		// N.B. No context to validate. Basically anything can unblock
+		// a goroutine (e.g. sysmon).
+		return curCtx, true, nil
+	case go122.EvGoSyscallBegin:
+		// Entering a syscall requires an active running goroutine with a
+		// proc on some thread. It is always advancable.
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		state, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(typ), curCtx.G)
+		}
+		if state.status != go122.GoRunning {
+			return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(typ), GoRunning)
+		}
+		// Goroutine entered a syscall. It's still running on this P and M.
+		state.status = go122.GoSyscall
+		pState, ok := o.pStates[curCtx.P]
+		if !ok {
+			return curCtx, false, fmt.Errorf("uninitialized proc %d found during %s", curCtx.P, go122.EventString(typ))
+		}
+		pState.status = go122.ProcSyscall
+		// Validate the P sequence number on the event and advance it.
+		//
+		// We have a P sequence number for what is supposed to be a goroutine event
+		// so that we can correctly model P stealing. Without this sequence number here,
+		// the syscall from which a ProcSteal event is stealing can be ambiguous in the
+		// face of broken timestamps. See the go122-syscall-steal-proc-ambiguous test for
+		// more details.
+		//
+		// Note that because this sequence number only exists as a tool for disambiguation,
+		// we can enforce that we have the right sequence number at this point; we don't need
+		// to back off and see if any other events will advance. This is a running P.
+		pSeq := makeSeq(gen, ev.args[0])
+		if !pSeq.succeeds(pState.seq) {
+			return curCtx, false, fmt.Errorf("failed to advance %s: can't make sequence: %s -> %s", go122.EventString(typ), pState.seq, pSeq)
+		}
+		pState.seq = pSeq
+		return curCtx, true, nil
+	case go122.EvGoSyscallEnd:
+		// This event is always advance-able because it happens on the same
+		// thread that EvGoSyscallStart happened, and the goroutine can't leave
+		// that thread until its done.
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		state, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(typ), curCtx.G)
+		}
+		if state.status != go122.GoSyscall {
+			return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(typ), GoRunning)
+		}
+		state.status = go122.GoRunning
+
+		// Transfer the P back to running from syscall.
+		pState, ok := o.pStates[curCtx.P]
+		if !ok {
+			return curCtx, false, fmt.Errorf("uninitialized proc %d found during %s", curCtx.P, go122.EventString(typ))
+		}
+		if pState.status != go122.ProcSyscall {
+			return curCtx, false, fmt.Errorf("expected proc %d in state %v, but got %v instead", curCtx.P, go122.ProcSyscall, pState.status)
+		}
+		pState.status = go122.ProcRunning
+		return curCtx, true, nil
+	case go122.EvGoSyscallEndBlocked:
+		// This event becomes advanceable when its P is not in a syscall state
+		// (lack of a P altogether is also acceptable for advancing).
+		// The transfer out of ProcSyscall can happen either voluntarily via
+		// ProcStop or involuntarily via ProcSteal. We may also acquire a new P
+		// before we get here (after the transfer out) but that's OK: that new
+		// P won't be in the ProcSyscall state anymore.
+		//
+		// Basically: while we have a preemptible P, don't advance, because we
+		// *know* from the event that we're going to lose it at some point during
+		// the syscall. We shouldn't advance until that happens.
+		if curCtx.P != NoProc {
+			pState, ok := o.pStates[curCtx.P]
+			if !ok {
+				return curCtx, false, fmt.Errorf("uninitialized proc %d found during %s", curCtx.P, go122.EventString(typ))
+			}
+			if pState.status == go122.ProcSyscall {
+				return curCtx, false, nil
+			}
+		}
+		// As mentioned above, we may have a P here if we ProcStart
+		// before this event.
+		if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustHave}); err != nil {
+			return curCtx, false, err
+		}
+		state, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(typ), curCtx.G)
+		}
+		if state.status != go122.GoSyscall {
+			return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(typ), GoRunning)
+		}
+		newCtx.G = NoGoroutine
+		state.status = go122.GoRunnable
+		return curCtx, true, nil
+	case go122.EvGoCreateSyscall:
+		// This event indicates that a goroutine is effectively
+		// being created out of a cgo callback. Such a goroutine
+		// is 'created' in the syscall state.
+		if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustNotHave}); err != nil {
+			return curCtx, false, err
+		}
+		// This goroutine is effectively being created. Add a state for it.
+		newgid := GoID(ev.args[0])
+		if _, ok := o.gStates[newgid]; ok {
+			return curCtx, false, fmt.Errorf("tried to create goroutine (%v) in syscall that already exists", newgid)
+		}
+		o.gStates[newgid] = &gState{id: newgid, status: go122.GoSyscall, seq: makeSeq(gen, 0)}
+		// Goroutine is executing. Bind it to the context.
+		newCtx.G = newgid
+		return curCtx, true, nil
+	case go122.EvGoDestroySyscall:
+		// This event indicates that a goroutine created for a
+		// cgo callback is disappearing, either because the callback
+		// ending or the C thread that called it is being destroyed.
+		//
+		// Also, treat this as if we lost our P too.
+		// The thread ID may be reused by the platform and we'll get
+		// really confused if we try to steal the P is this is running
+		// with later. The new M with the same ID could even try to
+		// steal back this P from itself!
+		//
+		// The runtime is careful to make sure that any GoCreateSyscall
+		// event will enter the runtime emitting events for reacquiring a P.
+		//
+		// Note: we might have a P here. The P might not be released
+		// eagerly by the runtime, and it might get stolen back later
+		// (or never again, if the program is going to exit).
+		if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustHave}); err != nil {
+			return curCtx, false, err
+		}
+		// Check to make sure the goroutine exists in the right state.
+		state, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(typ), curCtx.G)
+		}
+		if state.status != go122.GoSyscall {
+			return curCtx, false, fmt.Errorf("%s event for goroutine that's not %v", go122.EventString(typ), GoSyscall)
+		}
+		// This goroutine is exiting itself.
+		delete(o.gStates, curCtx.G)
+		newCtx.G = NoGoroutine
+
+		// If we have a proc, then we're dissociating from it now. See the comment at the top of the case.
+		if curCtx.P != NoProc {
+			pState, ok := o.pStates[curCtx.P]
+			if !ok {
+				return curCtx, false, fmt.Errorf("found invalid proc %d during %s", curCtx.P, go122.EventString(typ))
+			}
+			if pState.status != go122.ProcSyscall {
+				return curCtx, false, fmt.Errorf("proc %d in unexpected state %s during %s", curCtx.P, pState.status, go122.EventString(typ))
+			}
+			// See the go122-create-syscall-reuse-thread-id test case for more details.
+			pState.status = go122.ProcSyscallAbandoned
+			newCtx.P = NoProc
+
+			// Queue an extra self-ProcSteal event.
+			o.extraEvent = Event{
+				table: evt,
+				ctx:   curCtx,
+				base: baseEvent{
+					typ:  go122.EvProcSteal,
+					time: ev.time,
+				},
+			}
+			o.extraEvent.base.args[0] = uint64(curCtx.P)
+			o.extraEvent.base.extra(version.Go122)[0] = uint64(go122.ProcSyscall)
+		}
+		return curCtx, true, nil
+
+	// Handle tasks. Tasks are interesting because:
+	// - There's no Begin event required to reference a task.
+	// - End for a particular task ID can appear multiple times.
+	// As a result, there's very little to validate. The only
+	// thing we have to be sure of is that a task didn't begin
+	// after it had already begun. Task IDs are allowed to be
+	// reused, so we don't care about a Begin after an End.
+	case go122.EvUserTaskBegin:
+		id := TaskID(ev.args[0])
+		if _, ok := o.activeTasks[id]; ok {
+			return curCtx, false, fmt.Errorf("task ID conflict: %d", id)
+		}
+		// Get the parent ID, but don't validate it. There's no guarantee
+		// we actually have information on whether it's active.
+		parentID := TaskID(ev.args[1])
+		if parentID == BackgroundTask {
+			// Note: a value of 0 here actually means no parent, *not* the
+			// background task. Automatic background task attachment only
+			// applies to regions.
+			parentID = NoTask
+			ev.args[1] = uint64(NoTask)
+		}
+
+		// Validate the name and record it. We'll need to pass it through to
+		// EvUserTaskEnd.
+		nameID := stringID(ev.args[2])
+		name, ok := evt.strings.get(nameID)
+		if !ok {
+			return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, typ)
+		}
+		o.activeTasks[id] = taskState{name: name, parentID: parentID}
+		return curCtx, true, validateCtx(curCtx, event.UserGoReqs)
+	case go122.EvUserTaskEnd:
+		id := TaskID(ev.args[0])
+		if ts, ok := o.activeTasks[id]; ok {
+			// Smuggle the task info. This may happen in a different generation,
+			// which may not have the name in its string table. Add it to the extra
+			// strings table so we can look it up later.
+			ev.extra(version.Go122)[0] = uint64(ts.parentID)
+			ev.extra(version.Go122)[1] = uint64(evt.addExtraString(ts.name))
+			delete(o.activeTasks, id)
+		} else {
+			// Explicitly clear the task info.
+			ev.extra(version.Go122)[0] = uint64(NoTask)
+			ev.extra(version.Go122)[1] = uint64(evt.addExtraString(""))
+		}
+		return curCtx, true, validateCtx(curCtx, event.UserGoReqs)
+
+	// Handle user regions.
+	case go122.EvUserRegionBegin:
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		tid := TaskID(ev.args[0])
+		nameID := stringID(ev.args[1])
+		name, ok := evt.strings.get(nameID)
+		if !ok {
+			return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, typ)
+		}
+		gState, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("encountered EvUserRegionBegin without known state for current goroutine %d", curCtx.G)
+		}
+		if err := gState.beginRegion(userRegion{tid, name}); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvUserRegionEnd:
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		tid := TaskID(ev.args[0])
+		nameID := stringID(ev.args[1])
+		name, ok := evt.strings.get(nameID)
+		if !ok {
+			return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, typ)
+		}
+		gState, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("encountered EvUserRegionEnd without known state for current goroutine %d", curCtx.G)
+		}
+		if err := gState.endRegion(userRegion{tid, name}); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+
+	// Handle the GC mark phase.
+	//
+	// We have sequence numbers for both start and end because they
+	// can happen on completely different threads. We want an explicit
+	// partial order edge between start and end here, otherwise we're
+	// relying entirely on timestamps to make sure we don't advance a
+	// GCEnd for a _different_ GC cycle if timestamps are wildly broken.
+	case go122.EvGCActive:
+		seq := ev.args[0]
+		if gen == o.initialGen {
+			if o.gcState != gcUndetermined {
+				return curCtx, false, fmt.Errorf("GCActive in the first generation isn't first GC event")
+			}
+			o.gcSeq = seq
+			o.gcState = gcRunning
+			return curCtx, true, nil
+		}
+		if seq != o.gcSeq+1 {
+			// This is not the right GC cycle.
+			return curCtx, false, nil
+		}
+		if o.gcState != gcRunning {
+			return curCtx, false, fmt.Errorf("encountered GCActive while GC was not in progress")
+		}
+		o.gcSeq = seq
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvGCBegin:
+		seq := ev.args[0]
+		if o.gcState == gcUndetermined {
+			o.gcSeq = seq
+			o.gcState = gcRunning
+			return curCtx, true, nil
+		}
+		if seq != o.gcSeq+1 {
+			// This is not the right GC cycle.
+			return curCtx, false, nil
+		}
+		if o.gcState == gcRunning {
+			return curCtx, false, fmt.Errorf("encountered GCBegin while GC was already in progress")
+		}
+		o.gcSeq = seq
+		o.gcState = gcRunning
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvGCEnd:
+		seq := ev.args[0]
+		if seq != o.gcSeq+1 {
+			// This is not the right GC cycle.
+			return curCtx, false, nil
+		}
+		if o.gcState == gcNotRunning {
+			return curCtx, false, fmt.Errorf("encountered GCEnd when GC was not in progress")
+		}
+		if o.gcState == gcUndetermined {
+			return curCtx, false, fmt.Errorf("encountered GCEnd when GC was in an undetermined state")
+		}
+		o.gcSeq = seq
+		o.gcState = gcNotRunning
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+
+	// Handle simple instantaneous events that require a G.
+	case go122.EvGoLabel, go122.EvProcsChange, go122.EvUserLog:
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+
+	// Handle allocation states, which don't require a G.
+	case go122.EvHeapAlloc, go122.EvHeapGoal:
+		if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+
+	// Handle sweep, which is bound to a P and doesn't require a G.
+	case go122.EvGCSweepBegin:
+		if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}); err != nil {
+			return curCtx, false, err
+		}
+		if err := o.pStates[curCtx.P].beginRange(makeRangeType(typ, 0)); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvGCSweepActive:
+		pid := ProcID(ev.args[0])
+		// N.B. In practice Ps can't block while they're sweeping, so this can only
+		// ever reference curCtx.P. However, be lenient about this like we are with
+		// GCMarkAssistActive; there's no reason the runtime couldn't change to block
+		// in the middle of a sweep.
+		pState, ok := o.pStates[pid]
+		if !ok {
+			return curCtx, false, fmt.Errorf("encountered GCSweepActive for unknown proc %d", pid)
+		}
+		if err := pState.activeRange(makeRangeType(typ, 0), gen == o.initialGen); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvGCSweepEnd:
+		if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}); err != nil {
+			return curCtx, false, err
+		}
+		_, err := o.pStates[curCtx.P].endRange(typ)
+		if err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+
+	// Handle special goroutine-bound event ranges.
+	case go122.EvSTWBegin, go122.EvGCMarkAssistBegin:
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		desc := stringID(0)
+		if typ == go122.EvSTWBegin {
+			desc = stringID(ev.args[0])
+		}
+		gState, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("encountered event of type %d without known state for current goroutine %d", typ, curCtx.G)
+		}
+		if err := gState.beginRange(makeRangeType(typ, desc)); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvGCMarkAssistActive:
+		gid := GoID(ev.args[0])
+		// N.B. Like GoStatus, this can happen at any time, because it can
+		// reference a non-running goroutine. Don't check anything about the
+		// current scheduler context.
+		gState, ok := o.gStates[gid]
+		if !ok {
+			return curCtx, false, fmt.Errorf("uninitialized goroutine %d found during %s", gid, go122.EventString(typ))
+		}
+		if err := gState.activeRange(makeRangeType(typ, 0), gen == o.initialGen); err != nil {
+			return curCtx, false, err
+		}
+		return curCtx, true, nil
+	case go122.EvSTWEnd, go122.EvGCMarkAssistEnd:
+		if err := validateCtx(curCtx, event.UserGoReqs); err != nil {
+			return curCtx, false, err
+		}
+		gState, ok := o.gStates[curCtx.G]
+		if !ok {
+			return curCtx, false, fmt.Errorf("encountered event of type %d without known state for current goroutine %d", typ, curCtx.G)
+		}
+		desc, err := gState.endRange(typ)
+		if err != nil {
+			return curCtx, false, err
+		}
+		if typ == go122.EvSTWEnd {
+			// Smuggle the kind into the event.
+			// Don't use ev.extra here so we have symmetry with STWBegin.
+			ev.args[0] = uint64(desc)
+		}
+		return curCtx, true, nil
+	}
+	return curCtx, false, fmt.Errorf("bad event type found while ordering: %v", ev.typ)
+}
+
+// schedCtx represents the scheduling resources associated with an event.
+type schedCtx struct {
+	G GoID
+	P ProcID
+	M ThreadID
+}
+
+// validateCtx ensures that ctx conforms to some reqs, returning an error if
+// it doesn't.
+func validateCtx(ctx schedCtx, reqs event.SchedReqs) error {
+	// Check thread requirements.
+	if reqs.Thread == event.MustHave && ctx.M == NoThread {
+		return fmt.Errorf("expected a thread but didn't have one")
+	} else if reqs.Thread == event.MustNotHave && ctx.M != NoThread {
+		return fmt.Errorf("expected no thread but had one")
+	}
+
+	// Check proc requirements.
+	if reqs.Proc == event.MustHave && ctx.P == NoProc {
+		return fmt.Errorf("expected a proc but didn't have one")
+	} else if reqs.Proc == event.MustNotHave && ctx.P != NoProc {
+		return fmt.Errorf("expected no proc but had one")
+	}
+
+	// Check goroutine requirements.
+	if reqs.Goroutine == event.MustHave && ctx.G == NoGoroutine {
+		return fmt.Errorf("expected a goroutine but didn't have one")
+	} else if reqs.Goroutine == event.MustNotHave && ctx.G != NoGoroutine {
+		return fmt.Errorf("expected no goroutine but had one")
+	}
+	return nil
+}
+
+// gcState is a trinary variable for the current state of the GC.
+//
+// The third state besides "enabled" and "disabled" is "undetermined."
+type gcState uint8
+
+const (
+	gcUndetermined gcState = iota
+	gcNotRunning
+	gcRunning
+)
+
+// String returns a human-readable string for the GC state.
+func (s gcState) String() string {
+	switch s {
+	case gcUndetermined:
+		return "Undetermined"
+	case gcNotRunning:
+		return "NotRunning"
+	case gcRunning:
+		return "Running"
+	}
+	return "Bad"
+}
+
+// userRegion represents a unique user region when attached to some gState.
+type userRegion struct {
+	// name must be a resolved string because the string ID for the same
+	// string may change across generations, but we care about checking
+	// the value itself.
+	taskID TaskID
+	name   string
+}
+
+// rangeType is a way to classify special ranges of time.
+//
+// These typically correspond 1:1 with "Begin" events, but
+// they may have an optional subtype that describes the range
+// in more detail.
+type rangeType struct {
+	typ  event.Type // "Begin" event.
+	desc stringID   // Optional subtype.
+}
+
+// makeRangeType constructs a new rangeType.
+func makeRangeType(typ event.Type, desc stringID) rangeType {
+	if styp := go122.Specs()[typ].StartEv; styp != go122.EvNone {
+		typ = styp
+	}
+	return rangeType{typ, desc}
+}
+
+// gState is the state of a goroutine at a point in the trace.
+type gState struct {
+	id     GoID
+	status go122.GoStatus
+	seq    seqCounter
+
+	// regions are the active user regions for this goroutine.
+	regions []userRegion
+
+	// rangeState is the state of special time ranges bound to this goroutine.
+	rangeState
+}
+
+// beginRegion starts a user region on the goroutine.
+func (s *gState) beginRegion(r userRegion) error {
+	s.regions = append(s.regions, r)
+	return nil
+}
+
+// endRegion ends a user region on the goroutine.
+func (s *gState) endRegion(r userRegion) error {
+	if len(s.regions) == 0 {
+		// We do not know about regions that began before tracing started.
+		return nil
+	}
+	if next := s.regions[len(s.regions)-1]; next != r {
+		return fmt.Errorf("misuse of region in goroutine %v: region end %v when the inner-most active region start event is %v", s.id, r, next)
+	}
+	s.regions = s.regions[:len(s.regions)-1]
+	return nil
+}
+
+// pState is the state of a proc at a point in the trace.
+type pState struct {
+	id     ProcID
+	status go122.ProcStatus
+	seq    seqCounter
+
+	// rangeState is the state of special time ranges bound to this proc.
+	rangeState
+}
+
+// mState is the state of a thread at a point in the trace.
+type mState struct {
+	g GoID   // Goroutine bound to this M. (The goroutine's state is Executing.)
+	p ProcID // Proc bound to this M. (The proc's state is Executing.)
+}
+
+// rangeState represents the state of special time ranges.
+type rangeState struct {
+	// inFlight contains the rangeTypes of any ranges bound to a resource.
+	inFlight []rangeType
+}
+
+// beginRange begins a special range in time on the goroutine.
+//
+// Returns an error if the range is already in progress.
+func (s *rangeState) beginRange(typ rangeType) error {
+	if s.hasRange(typ) {
+		return fmt.Errorf("discovered event already in-flight for when starting event %v", go122.Specs()[typ.typ].Name)
+	}
+	s.inFlight = append(s.inFlight, typ)
+	return nil
+}
+
+// activeRange marks special range in time on the goroutine as active in the
+// initial generation, or confirms that it is indeed active in later generations.
+func (s *rangeState) activeRange(typ rangeType, isInitialGen bool) error {
+	if isInitialGen {
+		if s.hasRange(typ) {
+			return fmt.Errorf("found named active range already in first gen: %v", typ)
+		}
+		s.inFlight = append(s.inFlight, typ)
+	} else if !s.hasRange(typ) {
+		return fmt.Errorf("resource is missing active range: %v %v", go122.Specs()[typ.typ].Name, s.inFlight)
+	}
+	return nil
+}
+
+// hasRange returns true if a special time range on the goroutine as in progress.
+func (s *rangeState) hasRange(typ rangeType) bool {
+	for _, ftyp := range s.inFlight {
+		if ftyp == typ {
+			return true
+		}
+	}
+	return false
+}
+
+// endsRange ends a special range in time on the goroutine.
+//
+// This must line up with the start event type  of the range the goroutine is currently in.
+func (s *rangeState) endRange(typ event.Type) (stringID, error) {
+	st := go122.Specs()[typ].StartEv
+	idx := -1
+	for i, r := range s.inFlight {
+		if r.typ == st {
+			idx = i
+			break
+		}
+	}
+	if idx < 0 {
+		return 0, fmt.Errorf("tried to end event %v, but not in-flight", go122.Specs()[st].Name)
+	}
+	// Swap remove.
+	desc := s.inFlight[idx].desc
+	s.inFlight[idx], s.inFlight[len(s.inFlight)-1] = s.inFlight[len(s.inFlight)-1], s.inFlight[idx]
+	s.inFlight = s.inFlight[:len(s.inFlight)-1]
+	return desc, nil
+}
+
+// seqCounter represents a global sequence counter for a resource.
+type seqCounter struct {
+	gen uint64 // The generation for the local sequence counter seq.
+	seq uint64 // The sequence number local to the generation.
+}
+
+// makeSeq creates a new seqCounter.
+func makeSeq(gen, seq uint64) seqCounter {
+	return seqCounter{gen: gen, seq: seq}
+}
+
+// succeeds returns true if a is the immediate successor of b.
+func (a seqCounter) succeeds(b seqCounter) bool {
+	return a.gen == b.gen && a.seq == b.seq+1
+}
+
+// String returns a debug string representation of the seqCounter.
+func (c seqCounter) String() string {
+	return fmt.Sprintf("%d (gen=%d)", c.seq, c.gen)
+}
+
+func dumpOrdering(order *ordering) string {
+	var sb strings.Builder
+	for id, state := range order.gStates {
+		fmt.Fprintf(&sb, "G %d [status=%s seq=%s]\n", id, state.status, state.seq)
+	}
+	fmt.Fprintln(&sb)
+	for id, state := range order.pStates {
+		fmt.Fprintf(&sb, "P %d [status=%s seq=%s]\n", id, state.status, state.seq)
+	}
+	fmt.Fprintln(&sb)
+	for id, state := range order.mStates {
+		fmt.Fprintf(&sb, "M %d [g=%d p=%d]\n", id, state.g, state.p)
+	}
+	fmt.Fprintln(&sb)
+	fmt.Fprintf(&sb, "GC %d %s\n", order.gcSeq, order.gcState)
+	return sb.String()
+}
+
+// taskState represents an active task.
+type taskState struct {
+	// name is the type of the active task.
+	name string
+
+	// parentID is the parent ID of the active task.
+	parentID TaskID
+}
diff --git a/src/internal/trace/v2/raw/doc.go b/src/internal/trace/v2/raw/doc.go
new file mode 100644
index 0000000..5348737
--- /dev/null
+++ b/src/internal/trace/v2/raw/doc.go
@@ -0,0 +1,66 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package raw provides an interface to interpret and emit Go execution traces.
+It can interpret and emit execution traces in its wire format as well as a
+bespoke but simple text format.
+
+The readers and writers in this package perform no validation on or ordering of
+the input, and so are generally unsuitable for analysis. However, they're very
+useful for testing and debugging the tracer in the runtime and more sophisticated
+trace parsers.
+
+# Text format specification
+
+The trace text format produced and consumed by this package is a line-oriented
+format.
+
+The first line in each text trace is the header line.
+
+	Trace Go1.XX
+
+Following that is a series of event lines. Each event begins with an
+event name, followed by zero or more named unsigned integer arguments.
+Names are separated from their integer values by an '=' sign. Names can
+consist of any UTF-8 character except '='.
+
+For example:
+
+	EventName arg1=23 arg2=55 arg3=53
+
+Any amount of whitespace is allowed to separate each token. Whitespace
+is identified via unicode.IsSpace.
+
+Some events have additional data on following lines. There are two such
+special cases.
+
+The first special case consists of events with trailing byte-oriented data.
+The trailer begins on the following line from the event. That line consists
+of a single argument 'data' and a Go-quoted string representing the byte data
+within. Note: an explicit argument for the length is elided, because it's
+just the length of the unquoted string.
+
+For example:
+
+	String id=5
+		data="hello world\x00"
+
+These events are identified in their spec by the HasData flag.
+
+The second special case consists of stack events. These events are identified
+by the IsStack flag. These events also have a trailing unsigned integer argument
+describing the number of stack frame descriptors that follow. Each stack frame
+descriptor is on its own line following the event, consisting of four signed
+integer arguments: the PC, an integer describing the function name, an integer
+describing the file name, and the line number in that file that function was at
+at the time the stack trace was taken.
+
+For example:
+
+	Stack id=5 n=2
+		pc=1241251 func=3 file=6 line=124
+		pc=7534345 func=6 file=3 line=64
+*/
+package raw
diff --git a/src/internal/trace/v2/raw/event.go b/src/internal/trace/v2/raw/event.go
new file mode 100644
index 0000000..6f09f1f
--- /dev/null
+++ b/src/internal/trace/v2/raw/event.go
@@ -0,0 +1,60 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package raw
+
+import (
+	"strconv"
+	"strings"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/version"
+)
+
+// Event is a simple representation of a trace event.
+//
+// Note that this typically includes much more than just
+// timestamped events, and it also represents parts of the
+// trace format's framing. (But not interpreted.)
+type Event struct {
+	Version version.Version
+	Ev      event.Type
+	Args    []uint64
+	Data    []byte
+}
+
+// String returns the canonical string representation of the event.
+//
+// This format is the same format that is parsed by the TextReader
+// and emitted by the TextWriter.
+func (e *Event) String() string {
+	spec := e.Version.Specs()[e.Ev]
+
+	var s strings.Builder
+	s.WriteString(spec.Name)
+	for i := range spec.Args {
+		s.WriteString(" ")
+		s.WriteString(spec.Args[i])
+		s.WriteString("=")
+		s.WriteString(strconv.FormatUint(e.Args[i], 10))
+	}
+	if spec.IsStack {
+		frames := e.Args[len(spec.Args):]
+		for i := 0; i < len(frames); i++ {
+			if i%4 == 0 {
+				s.WriteString("\n\t")
+			} else {
+				s.WriteString(" ")
+			}
+			s.WriteString(frameFields[i%4])
+			s.WriteString("=")
+			s.WriteString(strconv.FormatUint(frames[i], 10))
+		}
+	}
+	if e.Data != nil {
+		s.WriteString("\n\tdata=")
+		s.WriteString(strconv.Quote(string(e.Data)))
+	}
+	return s.String()
+}
diff --git a/src/internal/trace/v2/raw/reader.go b/src/internal/trace/v2/raw/reader.go
new file mode 100644
index 0000000..fdcd47f
--- /dev/null
+++ b/src/internal/trace/v2/raw/reader.go
@@ -0,0 +1,110 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package raw
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/version"
+)
+
+// Reader parses trace bytes with only very basic validation
+// into an event stream.
+type Reader struct {
+	r     *bufio.Reader
+	v     version.Version
+	specs []event.Spec
+}
+
+// NewReader creates a new reader for the trace wire format.
+func NewReader(r io.Reader) (*Reader, error) {
+	br := bufio.NewReader(r)
+	v, err := version.ReadHeader(br)
+	if err != nil {
+		return nil, err
+	}
+	return &Reader{r: br, v: v, specs: v.Specs()}, nil
+}
+
+// Version returns the version of the trace that we're reading.
+func (r *Reader) Version() version.Version {
+	return r.v
+}
+
+// ReadEvent reads and returns the next trace event in the byte stream.
+func (r *Reader) ReadEvent() (Event, error) {
+	evb, err := r.r.ReadByte()
+	if err == io.EOF {
+		return Event{}, io.EOF
+	}
+	if err != nil {
+		return Event{}, err
+	}
+	if int(evb) >= len(r.specs) || evb == 0 {
+		return Event{}, fmt.Errorf("invalid event type: %d", evb)
+	}
+	ev := event.Type(evb)
+	spec := r.specs[ev]
+	args, err := r.readArgs(len(spec.Args))
+	if err != nil {
+		return Event{}, err
+	}
+	if spec.IsStack {
+		len := int(args[1])
+		for i := 0; i < len; i++ {
+			// Each stack frame has four args: pc, func ID, file ID, line number.
+			frame, err := r.readArgs(4)
+			if err != nil {
+				return Event{}, err
+			}
+			args = append(args, frame...)
+		}
+	}
+	var data []byte
+	if spec.HasData {
+		data, err = r.readData()
+		if err != nil {
+			return Event{}, err
+		}
+	}
+	return Event{
+		Version: r.v,
+		Ev:      ev,
+		Args:    args,
+		Data:    data,
+	}, nil
+}
+
+func (r *Reader) readArgs(n int) ([]uint64, error) {
+	var args []uint64
+	for i := 0; i < n; i++ {
+		val, err := binary.ReadUvarint(r.r)
+		if err != nil {
+			return nil, err
+		}
+		args = append(args, val)
+	}
+	return args, nil
+}
+
+func (r *Reader) readData() ([]byte, error) {
+	len, err := binary.ReadUvarint(r.r)
+	if err != nil {
+		return nil, err
+	}
+	var data []byte
+	for i := 0; i < int(len); i++ {
+		b, err := r.r.ReadByte()
+		if err != nil {
+			return nil, err
+		}
+		data = append(data, b)
+	}
+	return data, nil
+}
diff --git a/src/internal/trace/v2/raw/textreader.go b/src/internal/trace/v2/raw/textreader.go
new file mode 100644
index 0000000..07785f3
--- /dev/null
+++ b/src/internal/trace/v2/raw/textreader.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package raw
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"unicode"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/version"
+)
+
+// TextReader parses a text format trace with only very basic validation
+// into an event stream.
+type TextReader struct {
+	v     version.Version
+	specs []event.Spec
+	names map[string]event.Type
+	s     *bufio.Scanner
+}
+
+// NewTextReader creates a new reader for the trace text format.
+func NewTextReader(r io.Reader) (*TextReader, error) {
+	tr := &TextReader{s: bufio.NewScanner(r)}
+	line, err := tr.nextLine()
+	if err != nil {
+		return nil, err
+	}
+	trace, line := readToken(line)
+	if trace != "Trace" {
+		return nil, fmt.Errorf("failed to parse header")
+	}
+	gover, line := readToken(line)
+	if !strings.HasPrefix(gover, "Go1.") {
+		return nil, fmt.Errorf("failed to parse header Go version")
+	}
+	rawv, err := strconv.ParseUint(gover[len("Go1."):], 10, 64)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse header Go version: %v", err)
+	}
+	v := version.Version(rawv)
+	if !v.Valid() {
+		return nil, fmt.Errorf("unknown or unsupported Go version 1.%d", v)
+	}
+	tr.v = v
+	tr.specs = v.Specs()
+	tr.names = event.Names(tr.specs)
+	for _, r := range line {
+		if !unicode.IsSpace(r) {
+			return nil, fmt.Errorf("encountered unexpected non-space at the end of the header: %q", line)
+		}
+	}
+	return tr, nil
+}
+
+// Version returns the version of the trace that we're reading.
+func (r *TextReader) Version() version.Version {
+	return r.v
+}
+
+// ReadEvent reads and returns the next trace event in the text stream.
+func (r *TextReader) ReadEvent() (Event, error) {
+	line, err := r.nextLine()
+	if err != nil {
+		return Event{}, err
+	}
+	evStr, line := readToken(line)
+	ev, ok := r.names[evStr]
+	if !ok {
+		return Event{}, fmt.Errorf("unidentified event: %s", evStr)
+	}
+	spec := r.specs[ev]
+	args, err := readArgs(line, spec.Args)
+	if err != nil {
+		return Event{}, fmt.Errorf("reading args for %s: %v", evStr, err)
+	}
+	if spec.IsStack {
+		len := int(args[1])
+		for i := 0; i < len; i++ {
+			line, err := r.nextLine()
+			if err == io.EOF {
+				return Event{}, fmt.Errorf("unexpected EOF while reading stack: args=%v", args)
+			}
+			if err != nil {
+				return Event{}, err
+			}
+			frame, err := readArgs(line, frameFields)
+			if err != nil {
+				return Event{}, err
+			}
+			args = append(args, frame...)
+		}
+	}
+	var data []byte
+	if spec.HasData {
+		line, err := r.nextLine()
+		if err == io.EOF {
+			return Event{}, fmt.Errorf("unexpected EOF while reading data for %s: args=%v", evStr, args)
+		}
+		if err != nil {
+			return Event{}, err
+		}
+		data, err = readData(line)
+		if err != nil {
+			return Event{}, err
+		}
+	}
+	return Event{
+		Version: r.v,
+		Ev:      ev,
+		Args:    args,
+		Data:    data,
+	}, nil
+}
+
+func (r *TextReader) nextLine() (string, error) {
+	for {
+		if !r.s.Scan() {
+			if err := r.s.Err(); err != nil {
+				return "", err
+			}
+			return "", io.EOF
+		}
+		txt := r.s.Text()
+		tok, _ := readToken(txt)
+		if tok == "" {
+			continue // Empty line or comment.
+		}
+		return txt, nil
+	}
+}
+
+var frameFields = []string{"pc", "func", "file", "line"}
+
+func readArgs(s string, names []string) ([]uint64, error) {
+	var args []uint64
+	for _, name := range names {
+		arg, value, rest, err := readArg(s)
+		if err != nil {
+			return nil, err
+		}
+		if arg != name {
+			return nil, fmt.Errorf("expected argument %q, but got %q", name, arg)
+		}
+		args = append(args, value)
+		s = rest
+	}
+	for _, r := range s {
+		if !unicode.IsSpace(r) {
+			return nil, fmt.Errorf("encountered unexpected non-space at the end of an event: %q", s)
+		}
+	}
+	return args, nil
+}
+
+func readArg(s string) (arg string, value uint64, rest string, err error) {
+	var tok string
+	tok, rest = readToken(s)
+	if len(tok) == 0 {
+		return "", 0, s, fmt.Errorf("no argument")
+	}
+	parts := strings.SplitN(tok, "=", 2)
+	if len(parts) < 2 {
+		return "", 0, s, fmt.Errorf("malformed argument: %q", tok)
+	}
+	arg = parts[0]
+	value, err = strconv.ParseUint(parts[1], 10, 64)
+	if err != nil {
+		return arg, value, s, fmt.Errorf("failed to parse argument value %q for arg %q", parts[1], parts[0])
+	}
+	return
+}
+
+func readToken(s string) (token, rest string) {
+	tkStart := -1
+	for i, r := range s {
+		if r == '#' {
+			return "", ""
+		}
+		if !unicode.IsSpace(r) {
+			tkStart = i
+			break
+		}
+	}
+	if tkStart < 0 {
+		return "", ""
+	}
+	tkEnd := -1
+	for i, r := range s[tkStart:] {
+		if unicode.IsSpace(r) || r == '#' {
+			tkEnd = i + tkStart
+			break
+		}
+	}
+	if tkEnd < 0 {
+		return s[tkStart:], ""
+	}
+	return s[tkStart:tkEnd], s[tkEnd:]
+}
+
+func readData(line string) ([]byte, error) {
+	parts := strings.SplitN(line, "=", 2)
+	if len(parts) < 2 || strings.TrimSpace(parts[0]) != "data" {
+		return nil, fmt.Errorf("malformed data: %q", line)
+	}
+	data, err := strconv.Unquote(strings.TrimSpace(parts[1]))
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse data: %q: %v", line, err)
+	}
+	return []byte(data), nil
+}
diff --git a/src/internal/trace/v2/raw/textwriter.go b/src/internal/trace/v2/raw/textwriter.go
new file mode 100644
index 0000000..367a80b
--- /dev/null
+++ b/src/internal/trace/v2/raw/textwriter.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package raw
+
+import (
+	"fmt"
+	"io"
+
+	"internal/trace/v2/version"
+)
+
+// TextWriter emits the text format of a trace.
+type TextWriter struct {
+	w io.Writer
+	v version.Version
+}
+
+// NewTextWriter creates a new write for the trace text format.
+func NewTextWriter(w io.Writer, v version.Version) (*TextWriter, error) {
+	_, err := fmt.Fprintf(w, "Trace Go1.%d\n", v)
+	if err != nil {
+		return nil, err
+	}
+	return &TextWriter{w: w, v: v}, nil
+}
+
+// WriteEvent writes a single event to the stream.
+func (w *TextWriter) WriteEvent(e Event) error {
+	// Check version.
+	if e.Version != w.v {
+		return fmt.Errorf("mismatched version between writer (go 1.%d) and event (go 1.%d)", w.v, e.Version)
+	}
+
+	// Write event.
+	_, err := fmt.Fprintln(w.w, e.String())
+	return err
+}
diff --git a/src/internal/trace/v2/raw/writer.go b/src/internal/trace/v2/raw/writer.go
new file mode 100644
index 0000000..80596eb
--- /dev/null
+++ b/src/internal/trace/v2/raw/writer.go
@@ -0,0 +1,75 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package raw
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/version"
+)
+
+// Writer emits the wire format of a trace.
+//
+// It may not produce a byte-for-byte compatible trace from what is
+// produced by the runtime, because it may be missing extra padding
+// in the LEB128 encoding that the runtime adds but isn't necessary
+// when you know the data up-front.
+type Writer struct {
+	w     io.Writer
+	buf   []byte
+	v     version.Version
+	specs []event.Spec
+}
+
+// NewWriter creates a new byte format writer.
+func NewWriter(w io.Writer, v version.Version) (*Writer, error) {
+	_, err := version.WriteHeader(w, v)
+	return &Writer{w: w, v: v, specs: v.Specs()}, err
+}
+
+// WriteEvent writes a single event to the trace wire format stream.
+func (w *Writer) WriteEvent(e Event) error {
+	// Check version.
+	if e.Version != w.v {
+		return fmt.Errorf("mismatched version between writer (go 1.%d) and event (go 1.%d)", w.v, e.Version)
+	}
+
+	// Write event header byte.
+	w.buf = append(w.buf, uint8(e.Ev))
+
+	// Write out all arguments.
+	spec := w.specs[e.Ev]
+	for _, arg := range e.Args[:len(spec.Args)] {
+		w.buf = binary.AppendUvarint(w.buf, arg)
+	}
+	if spec.IsStack {
+		frameArgs := e.Args[len(spec.Args):]
+		for i := 0; i < len(frameArgs); i++ {
+			w.buf = binary.AppendUvarint(w.buf, frameArgs[i])
+		}
+	}
+
+	// Write out the length of the data.
+	if spec.HasData {
+		w.buf = binary.AppendUvarint(w.buf, uint64(len(e.Data)))
+	}
+
+	// Write out varint events.
+	_, err := w.w.Write(w.buf)
+	w.buf = w.buf[:0]
+	if err != nil {
+		return err
+	}
+
+	// Write out data.
+	if spec.HasData {
+		_, err := w.w.Write(e.Data)
+		return err
+	}
+	return nil
+}
diff --git a/src/internal/trace/v2/reader.go b/src/internal/trace/v2/reader.go
new file mode 100644
index 0000000..824ca23
--- /dev/null
+++ b/src/internal/trace/v2/reader.go
@@ -0,0 +1,198 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"slices"
+	"strings"
+
+	"internal/trace/v2/event/go122"
+	"internal/trace/v2/version"
+)
+
+// Reader reads a byte stream, validates it, and produces trace events.
+type Reader struct {
+	r           *bufio.Reader
+	lastTs      Time
+	gen         *generation
+	spill       *spilledBatch
+	frontier    []*batchCursor
+	cpuSamples  []cpuSample
+	order       ordering
+	emittedSync bool
+}
+
+// NewReader creates a new trace reader.
+func NewReader(r io.Reader) (*Reader, error) {
+	br := bufio.NewReader(r)
+	v, err := version.ReadHeader(br)
+	if err != nil {
+		return nil, err
+	}
+	if v != version.Go122 {
+		return nil, fmt.Errorf("unknown or unsupported version go 1.%d", v)
+	}
+	return &Reader{
+		r: br,
+		order: ordering{
+			mStates:     make(map[ThreadID]*mState),
+			pStates:     make(map[ProcID]*pState),
+			gStates:     make(map[GoID]*gState),
+			activeTasks: make(map[TaskID]taskState),
+		},
+		// Don't emit a sync event when we first go to emit events.
+		emittedSync: true,
+	}, nil
+}
+
+// ReadEvent reads a single event from the stream.
+//
+// If the stream has been exhausted, it returns an invalid
+// event and io.EOF.
+func (r *Reader) ReadEvent() (e Event, err error) {
+	// Go 1.22+ trace parsing algorithm.
+	//
+	// (1) Read in all the batches for the next generation from the stream.
+	//   (a) Use the size field in the header to quickly find all batches.
+	// (2) Parse out the strings, stacks, CPU samples, and timestamp conversion data.
+	// (3) Group each event batch by M, sorted by timestamp. (batchCursor contains the groups.)
+	// (4) Organize batchCursors in a min-heap, ordered by the timestamp of the next event for each M.
+	// (5) Try to advance the next event for the M at the top of the min-heap.
+	//   (a) On success, select that M.
+	//   (b) On failure, sort the min-heap and try to advance other Ms. Select the first M that advances.
+	//   (c) If there's nothing left to advance, goto (1).
+	// (6) Select the latest event for the selected M and get it ready to be returned.
+	// (7) Read the next event for the selected M and update the min-heap.
+	// (8) Return the selected event, goto (5) on the next call.
+
+	// Set us up to track the last timestamp and fix up
+	// the timestamp of any event that comes through.
+	defer func() {
+		if err != nil {
+			return
+		}
+		if err = e.validateTableIDs(); err != nil {
+			return
+		}
+		if e.base.time <= r.lastTs {
+			e.base.time = r.lastTs + 1
+		}
+		r.lastTs = e.base.time
+	}()
+
+	// Consume any extra events produced during parsing.
+	if ev := r.order.consumeExtraEvent(); ev.Kind() != EventBad {
+		return ev, nil
+	}
+
+	// Check if we need to refresh the generation.
+	if len(r.frontier) == 0 && len(r.cpuSamples) == 0 {
+		if !r.emittedSync {
+			r.emittedSync = true
+			return syncEvent(r.gen.evTable, r.lastTs), nil
+		}
+		if r.gen != nil && r.spill == nil {
+			// If we have a generation from the last read,
+			// and there's nothing left in the frontier, and
+			// there's no spilled batch, indicating that there's
+			// no further generation, it means we're done.
+			// Return io.EOF.
+			return Event{}, io.EOF
+		}
+		// Read the next generation.
+		r.gen, r.spill, err = readGeneration(r.r, r.spill)
+		if err != nil {
+			return Event{}, err
+		}
+
+		// Reset CPU samples cursor.
+		r.cpuSamples = r.gen.cpuSamples
+
+		// Reset frontier.
+		for m, batches := range r.gen.batches {
+			bc := &batchCursor{m: m}
+			ok, err := bc.nextEvent(batches, r.gen.freq)
+			if err != nil {
+				return Event{}, err
+			}
+			if !ok {
+				// Turns out there aren't actually any events in these batches.
+				continue
+			}
+			r.frontier = heapInsert(r.frontier, bc)
+		}
+
+		// Reset emittedSync.
+		r.emittedSync = false
+	}
+	refresh := func(i int) error {
+		bc := r.frontier[i]
+
+		// Refresh the cursor's event.
+		ok, err := bc.nextEvent(r.gen.batches[bc.m], r.gen.freq)
+		if err != nil {
+			return err
+		}
+		if ok {
+			// If we successfully refreshed, update the heap.
+			heapUpdate(r.frontier, i)
+		} else {
+			// There's nothing else to read. Delete this cursor from the frontier.
+			r.frontier = heapRemove(r.frontier, i)
+		}
+		return nil
+	}
+	// Inject a CPU sample if it comes next.
+	if len(r.cpuSamples) != 0 {
+		if len(r.frontier) == 0 || r.cpuSamples[0].time < r.frontier[0].ev.time {
+			e := r.cpuSamples[0].asEvent(r.gen.evTable)
+			r.cpuSamples = r.cpuSamples[1:]
+			return e, nil
+		}
+	}
+	// Try to advance the head of the frontier, which should have the minimum timestamp.
+	// This should be by far the most common case
+	if len(r.frontier) == 0 {
+		return Event{}, fmt.Errorf("broken trace: frontier is empty:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order))
+	}
+	bc := r.frontier[0]
+	if ctx, ok, err := r.order.advance(&bc.ev, r.gen.evTable, bc.m, r.gen.gen); err != nil {
+		return Event{}, err
+	} else if ok {
+		e := Event{table: r.gen.evTable, ctx: ctx, base: bc.ev}
+		return e, refresh(0)
+	}
+	// Sort the min-heap. A sorted min-heap is still a min-heap,
+	// but now we can iterate over the rest and try to advance in
+	// order. This path should be rare.
+	slices.SortFunc(r.frontier, (*batchCursor).compare)
+	// Try to advance the rest of the frontier, in timestamp order.
+	for i := 1; i < len(r.frontier); i++ {
+		bc := r.frontier[i]
+		if ctx, ok, err := r.order.advance(&bc.ev, r.gen.evTable, bc.m, r.gen.gen); err != nil {
+			return Event{}, err
+		} else if ok {
+			e := Event{table: r.gen.evTable, ctx: ctx, base: bc.ev}
+			return e, refresh(i)
+		}
+	}
+	return Event{}, fmt.Errorf("broken trace: failed to advance: frontier:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order))
+}
+
+func dumpFrontier(frontier []*batchCursor) string {
+	var sb strings.Builder
+	for _, bc := range frontier {
+		spec := go122.Specs()[bc.ev.typ]
+		fmt.Fprintf(&sb, "M %d [%s time=%d", bc.m, spec.Name, bc.ev.time)
+		for i, arg := range spec.Args[1:] {
+			fmt.Fprintf(&sb, " %s=%d", arg, bc.ev.args[i])
+		}
+		fmt.Fprintf(&sb, "]\n")
+	}
+	return sb.String()
+}
diff --git a/src/internal/trace/v2/reader_test.go b/src/internal/trace/v2/reader_test.go
new file mode 100644
index 0000000..393e1c8
--- /dev/null
+++ b/src/internal/trace/v2/reader_test.go
@@ -0,0 +1,172 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace_test
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+
+	"internal/trace/v2"
+	"internal/trace/v2/raw"
+	"internal/trace/v2/testtrace"
+	"internal/trace/v2/version"
+)
+
+var (
+	logEvents  = flag.Bool("log-events", false, "whether to log high-level events; significantly slows down tests")
+	dumpTraces = flag.Bool("dump-traces", false, "dump traces even on success")
+)
+
+func TestReaderGolden(t *testing.T) {
+	matches, err := filepath.Glob("./testdata/tests/*.test")
+	if err != nil {
+		t.Fatalf("failed to glob for tests: %v", err)
+	}
+	for _, testPath := range matches {
+		testPath := testPath
+		testName, err := filepath.Rel("./testdata", testPath)
+		if err != nil {
+			t.Fatalf("failed to relativize testdata path: %v", err)
+		}
+		t.Run(testName, func(t *testing.T) {
+			tr, exp, err := testtrace.ParseFile(testPath)
+			if err != nil {
+				t.Fatalf("failed to parse test file at %s: %v", testPath, err)
+			}
+			testReader(t, tr, exp)
+		})
+	}
+}
+
+func FuzzReader(f *testing.F) {
+	// Currently disabled because the parser doesn't do much validation and most
+	// getters can be made to panic. Turn this on once the parser is meant to
+	// reject invalid traces.
+	const testGetters = false
+
+	f.Fuzz(func(t *testing.T, b []byte) {
+		r, err := trace.NewReader(bytes.NewReader(b))
+		if err != nil {
+			return
+		}
+		for {
+			ev, err := r.ReadEvent()
+			if err != nil {
+				break
+			}
+
+			if !testGetters {
+				continue
+			}
+			// Make sure getters don't do anything that panics
+			switch ev.Kind() {
+			case trace.EventLabel:
+				ev.Label()
+			case trace.EventLog:
+				ev.Log()
+			case trace.EventMetric:
+				ev.Metric()
+			case trace.EventRangeActive, trace.EventRangeBegin:
+				ev.Range()
+			case trace.EventRangeEnd:
+				ev.Range()
+				ev.RangeAttributes()
+			case trace.EventStateTransition:
+				ev.StateTransition()
+			case trace.EventRegionBegin, trace.EventRegionEnd:
+				ev.Region()
+			case trace.EventTaskBegin, trace.EventTaskEnd:
+				ev.Task()
+			case trace.EventSync:
+			case trace.EventStackSample:
+			case trace.EventBad:
+			}
+		}
+	})
+}
+
+func testReader(t *testing.T, tr io.Reader, exp *testtrace.Expectation) {
+	r, err := trace.NewReader(tr)
+	if err != nil {
+		if err := exp.Check(err); err != nil {
+			t.Error(err)
+		}
+		return
+	}
+	v := testtrace.NewValidator()
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			if err := exp.Check(err); err != nil {
+				t.Error(err)
+			}
+			return
+		}
+		if *logEvents {
+			t.Log(ev.String())
+		}
+		if err := v.Event(ev); err != nil {
+			t.Error(err)
+		}
+	}
+	if err := exp.Check(nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func dumpTraceToText(t *testing.T, b []byte) string {
+	t.Helper()
+
+	br, err := raw.NewReader(bytes.NewReader(b))
+	if err != nil {
+		t.Fatalf("dumping trace: %v", err)
+	}
+	var sb strings.Builder
+	tw, err := raw.NewTextWriter(&sb, version.Go122)
+	if err != nil {
+		t.Fatalf("dumping trace: %v", err)
+	}
+	for {
+		ev, err := br.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatalf("dumping trace: %v", err)
+		}
+		if err := tw.WriteEvent(ev); err != nil {
+			t.Fatalf("dumping trace: %v", err)
+		}
+	}
+	return sb.String()
+}
+
+func dumpTraceToFile(t *testing.T, testName string, stress bool, b []byte) string {
+	t.Helper()
+
+	desc := "default"
+	if stress {
+		desc = "stress"
+	}
+	name := fmt.Sprintf("%s.%s.trace.", testName, desc)
+	f, err := os.CreateTemp("", name)
+	if err != nil {
+		t.Fatalf("creating temp file: %v", err)
+	}
+	defer f.Close()
+	if _, err := io.Copy(f, bytes.NewReader(b)); err != nil {
+		t.Fatalf("writing trace dump to %q: %v", f.Name(), err)
+	}
+	return f.Name()
+}
diff --git a/src/internal/trace/v2/resources.go b/src/internal/trace/v2/resources.go
new file mode 100644
index 0000000..f49696f
--- /dev/null
+++ b/src/internal/trace/v2/resources.go
@@ -0,0 +1,274 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import "fmt"
+
+// ThreadID is the runtime-internal M structure's ID. This is unique
+// for each OS thread.
+type ThreadID int64
+
+// NoThread indicates that the relevant events don't correspond to any
+// thread in particular.
+const NoThread = ThreadID(-1)
+
+// ProcID is the runtime-internal G structure's id field. This is unique
+// for each P.
+type ProcID int64
+
+// NoProc indicates that the relevant events don't correspond to any
+// P in particular.
+const NoProc = ProcID(-1)
+
+// GoID is the runtime-internal G structure's goid field. This is unique
+// for each goroutine.
+type GoID int64
+
+// NoGoroutine indicates that the relevant events don't correspond to any
+// goroutine in particular.
+const NoGoroutine = GoID(-1)
+
+// GoState represents the state of a goroutine.
+//
+// New GoStates may be added in the future. Users of this type must be robust
+// to that possibility.
+type GoState uint8
+
+const (
+	GoUndetermined GoState = iota // No information is known about the goroutine.
+	GoNotExist                    // Goroutine does not exist.
+	GoRunnable                    // Goroutine is runnable but not running.
+	GoRunning                     // Goroutine is running.
+	GoWaiting                     // Goroutine is waiting on something to happen.
+	GoSyscall                     // Goroutine is in a system call.
+)
+
+// Executing returns true if the state indicates that the goroutine is executing
+// and bound to its thread.
+func (s GoState) Executing() bool {
+	return s == GoRunning || s == GoSyscall
+}
+
+// String returns a human-readable representation of a GoState.
+//
+// The format of the returned string is for debugging purposes and is subject to change.
+func (s GoState) String() string {
+	switch s {
+	case GoUndetermined:
+		return "Undetermined"
+	case GoNotExist:
+		return "NotExist"
+	case GoRunnable:
+		return "Runnable"
+	case GoRunning:
+		return "Running"
+	case GoWaiting:
+		return "Waiting"
+	case GoSyscall:
+		return "Syscall"
+	}
+	return "Bad"
+}
+
+// ProcState represents the state of a proc.
+//
+// New ProcStates may be added in the future. Users of this type must be robust
+// to that possibility.
+type ProcState uint8
+
+const (
+	ProcUndetermined ProcState = iota // No information is known about the proc.
+	ProcNotExist                      // Proc does not exist.
+	ProcRunning                       // Proc is running.
+	ProcIdle                          // Proc is idle.
+)
+
+// Executing returns true if the state indicates that the proc is executing
+// and bound to its thread.
+func (s ProcState) Executing() bool {
+	return s == ProcRunning
+}
+
+// String returns a human-readable representation of a ProcState.
+//
+// The format of the returned string is for debugging purposes and is subject to change.
+func (s ProcState) String() string {
+	switch s {
+	case ProcUndetermined:
+		return "Undetermined"
+	case ProcNotExist:
+		return "NotExist"
+	case ProcRunning:
+		return "Running"
+	case ProcIdle:
+		return "Idle"
+	}
+	return "Bad"
+}
+
+// ResourceKind indicates a kind of resource that has a state machine.
+//
+// New ResourceKinds may be added in the future. Users of this type must be robust
+// to that possibility.
+type ResourceKind uint8
+
+const (
+	ResourceNone      ResourceKind = iota // No resource.
+	ResourceGoroutine                     // Goroutine.
+	ResourceProc                          // Proc.
+	ResourceThread                        // Thread.
+)
+
+// String returns a human-readable representation of a ResourceKind.
+//
+// The format of the returned string is for debugging purposes and is subject to change.
+func (r ResourceKind) String() string {
+	switch r {
+	case ResourceNone:
+		return "None"
+	case ResourceGoroutine:
+		return "Goroutine"
+	case ResourceProc:
+		return "Proc"
+	case ResourceThread:
+		return "Thread"
+	}
+	return "Bad"
+}
+
+// ResourceID represents a generic resource ID.
+type ResourceID struct {
+	// Kind is the kind of resource this ID is for.
+	Kind ResourceKind
+	id   int64
+}
+
+// MakeResourceID creates a general resource ID from a specific resource's ID.
+func MakeResourceID[T interface{ GoID | ProcID | ThreadID }](id T) ResourceID {
+	var rd ResourceID
+	var a any = id
+	switch a.(type) {
+	case GoID:
+		rd.Kind = ResourceGoroutine
+	case ProcID:
+		rd.Kind = ResourceProc
+	case ThreadID:
+		rd.Kind = ResourceThread
+	}
+	rd.id = int64(id)
+	return rd
+}
+
+// Goroutine obtains a GoID from the resource ID.
+//
+// r.Kind must be ResourceGoroutine or this function will panic.
+func (r ResourceID) Goroutine() GoID {
+	if r.Kind != ResourceGoroutine {
+		panic(fmt.Sprintf("attempted to get GoID from %s resource ID", r.Kind))
+	}
+	return GoID(r.id)
+}
+
+// Proc obtains a ProcID from the resource ID.
+//
+// r.Kind must be ResourceProc or this function will panic.
+func (r ResourceID) Proc() ProcID {
+	if r.Kind != ResourceProc {
+		panic(fmt.Sprintf("attempted to get ProcID from %s resource ID", r.Kind))
+	}
+	return ProcID(r.id)
+}
+
+// Thread obtains a ThreadID from the resource ID.
+//
+// r.Kind must be ResourceThread or this function will panic.
+func (r ResourceID) Thread() ThreadID {
+	if r.Kind != ResourceThread {
+		panic(fmt.Sprintf("attempted to get ThreadID from %s resource ID", r.Kind))
+	}
+	return ThreadID(r.id)
+}
+
+// String returns a human-readable string representation of the ResourceID.
+//
+// This representation is subject to change and is intended primarily for debugging.
+func (r ResourceID) String() string {
+	if r.Kind == ResourceNone {
+		return r.Kind.String()
+	}
+	return fmt.Sprintf("%s(%d)", r.Kind, r.id)
+}
+
+// StateTransition provides details about a StateTransition event.
+type StateTransition struct {
+	// Resource is the resource this state transition is for.
+	Resource ResourceID
+
+	// Reason is a human-readable reason for the state transition.
+	Reason string
+
+	// Stack is the stack trace of the resource making the state transition.
+	//
+	// This is distinct from the result (Event).Stack because it pertains to
+	// the transitioning resource, not any of the ones executing the event
+	// this StateTransition came from.
+	//
+	// An example of this difference is the NotExist -> Runnable transition for
+	// goroutines, which indicates goroutine creation. In this particular case,
+	// a Stack here would refer to the starting stack of the new goroutine, and
+	// an (Event).Stack would refer to the stack trace of whoever created the
+	// goroutine.
+	Stack Stack
+
+	// The actual transition data. Stored in a neutral form so that
+	// we don't need fields for every kind of resource.
+	id       int64
+	oldState uint8
+	newState uint8
+}
+
+func goStateTransition(id GoID, from, to GoState) StateTransition {
+	return StateTransition{
+		Resource: ResourceID{Kind: ResourceGoroutine, id: int64(id)},
+		oldState: uint8(from),
+		newState: uint8(to),
+	}
+}
+
+func procStateTransition(id ProcID, from, to ProcState) StateTransition {
+	return StateTransition{
+		Resource: ResourceID{Kind: ResourceProc, id: int64(id)},
+		oldState: uint8(from),
+		newState: uint8(to),
+	}
+}
+
+// Goroutine returns the state transition for a goroutine.
+//
+// Transitions to and from states that are Executing are special in that
+// they change the future execution context. In other words, future events
+// on the same thread will feature the same goroutine until it stops running.
+//
+// Panics if d.Resource.Kind is not ResourceGoroutine.
+func (d StateTransition) Goroutine() (from, to GoState) {
+	if d.Resource.Kind != ResourceGoroutine {
+		panic("Goroutine called on non-Goroutine state transition")
+	}
+	return GoState(d.oldState), GoState(d.newState)
+}
+
+// Proc returns the state transition for a proc.
+//
+// Transitions to and from states that are Executing are special in that
+// they change the future execution context. In other words, future events
+// on the same thread will feature the same goroutine until it stops running.
+//
+// Panics if d.Resource.Kind is not ResourceProc.
+func (d StateTransition) Proc() (from, to ProcState) {
+	if d.Resource.Kind != ResourceProc {
+		panic("Proc called on non-Proc state transition")
+	}
+	return ProcState(d.oldState), ProcState(d.newState)
+}
diff --git a/src/internal/trace/v2/testdata/README.md b/src/internal/trace/v2/testdata/README.md
new file mode 100644
index 0000000..0fae9ca
--- /dev/null
+++ b/src/internal/trace/v2/testdata/README.md
@@ -0,0 +1,38 @@
+# Trace test data
+
+## Trace golden tests
+
+Trace tests can be generated by running
+
+```
+go generate .
+```
+
+with the relevant toolchain in this directory.
+
+This will put the tests into a `tests` directory where the trace reader
+tests will find them.
+
+A subset of tests can be regenerated by specifying a regexp pattern for
+the names of tests to generate in the `GOTRACETEST` environment
+variable.
+Test names are defined as the name of the `.go` file that generates the
+trace, but with the `.go` extension removed.
+
+## Trace test programs
+
+The trace test programs in the `testprog` directory generate traces to
+stdout.
+Otherwise they're just normal programs.
+
+## Trace debug commands
+
+The `cmd` directory contains helpful tools for debugging traces.
+
+* `gotraceraw` parses traces without validation.
+  It can produce a text version of the trace wire format, or convert
+  the text format back into bytes.
+* `gotracevalidate` parses traces and validates them.
+  It performs more rigorous checks than the parser does on its own,
+  which helps for debugging the parser as well.
+  In fact, it performs the exact same checks that the tests do.
diff --git a/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go b/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go
new file mode 100644
index 0000000..a0d595d
--- /dev/null
+++ b/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"os"
+
+	"internal/trace/v2/raw"
+	"internal/trace/v2/version"
+)
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [mode]\n", os.Args[0])
+		fmt.Fprintf(flag.CommandLine.Output(), "\n")
+		fmt.Fprintf(flag.CommandLine.Output(), "Supported modes:")
+		fmt.Fprintf(flag.CommandLine.Output(), "\n")
+		fmt.Fprintf(flag.CommandLine.Output(), "* text2bytes - converts a text format trace to bytes\n")
+		fmt.Fprintf(flag.CommandLine.Output(), "* bytes2text - converts a byte format trace to text\n")
+		fmt.Fprintf(flag.CommandLine.Output(), "\n")
+		flag.PrintDefaults()
+	}
+	log.SetFlags(0)
+}
+
+func main() {
+	flag.Parse()
+	if narg := flag.NArg(); narg != 1 {
+		log.Fatal("expected exactly one positional argument: the mode to operate in; see -h output")
+	}
+
+	r := os.Stdin
+	w := os.Stdout
+
+	var tr traceReader
+	var tw traceWriter
+	var err error
+
+	switch flag.Arg(0) {
+	case "text2bytes":
+		tr, err = raw.NewTextReader(r)
+		if err != nil {
+			log.Fatal(err)
+		}
+		tw, err = raw.NewWriter(w, tr.Version())
+		if err != nil {
+			log.Fatal(err)
+		}
+	case "bytes2text":
+		tr, err = raw.NewReader(r)
+		if err != nil {
+			log.Fatal(err)
+		}
+		tw, err = raw.NewTextWriter(w, tr.Version())
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	for {
+		ev, err := tr.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			log.Fatal(err)
+			break
+		}
+		if err := tw.WriteEvent(ev); err != nil {
+			log.Fatal(err)
+			break
+		}
+	}
+}
+
+type traceReader interface {
+	Version() version.Version
+	ReadEvent() (raw.Event, error)
+}
+
+type traceWriter interface {
+	WriteEvent(raw.Event) error
+}
diff --git a/src/internal/trace/v2/testdata/cmd/gotracevalidate/main.go b/src/internal/trace/v2/testdata/cmd/gotracevalidate/main.go
new file mode 100644
index 0000000..944d19f
--- /dev/null
+++ b/src/internal/trace/v2/testdata/cmd/gotracevalidate/main.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"os"
+
+	"internal/trace/v2"
+	"internal/trace/v2/testtrace"
+)
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s\n", os.Args[0])
+		fmt.Fprintf(flag.CommandLine.Output(), "\n")
+		fmt.Fprintf(flag.CommandLine.Output(), "Accepts a trace at stdin and validates it.\n")
+		flag.PrintDefaults()
+	}
+	log.SetFlags(0)
+}
+
+var logEvents = flag.Bool("log-events", false, "whether to log events")
+
+func main() {
+	flag.Parse()
+
+	r, err := trace.NewReader(os.Stdin)
+	if err != nil {
+		log.Fatal(err)
+	}
+	v := testtrace.NewValidator()
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			log.Fatal(err)
+		}
+		if *logEvents {
+			log.Println(ev.String())
+		}
+		if err := v.Event(ev); err != nil {
+			log.Fatal(err)
+		}
+	}
+}
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/0cb1786dee0f090b b/src/internal/trace/v2/testdata/fuzz/FuzzReader/0cb1786dee0f090b
new file mode 100644
index 0000000..326ebe1
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/0cb1786dee0f090b
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\x190000\x01\x0100\x88\x00\b0000000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/1e45307d5b2ec36d b/src/internal/trace/v2/testdata/fuzz/FuzzReader/1e45307d5b2ec36d
new file mode 100644
index 0000000..406af9c
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/1e45307d5b2ec36d
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01000\x85\x00\b0001")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b05796f9b2fc48d b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b05796f9b2fc48d
new file mode 100644
index 0000000..50fdccd
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b05796f9b2fc48d
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00-0000\x01\x0100\x88\x00\b0000000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b9be9aebe08d511 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b9be9aebe08d511
new file mode 100644
index 0000000..6bcb99a
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b9be9aebe08d511
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\x0f00\x120\x01\x0100\x88\x00\b0000000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/344331b314da0b08 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/344331b314da0b08
new file mode 100644
index 0000000..de6e469
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/344331b314da0b08
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\b0000\x01\x01\xff00\xb8\x00\x1900\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x04\x1900\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x04\x1900\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x04\x1901\xff\xff\xff\xff\xff\xff\xff\xff0\x800")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/365d7b5b633b3f97 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/365d7b5b633b3f97
new file mode 100644
index 0000000..8dc370f
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/365d7b5b633b3f97
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x0100\x8c0\x85\x00\b0000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/4d9ddc909984e871 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/4d9ddc909984e871
new file mode 100644
index 0000000..040b2a4
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/4d9ddc909984e871
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x11\r\xa700\x01\x19000\x02$000000\x01\x0100\x05\b0000\x01\x0110\x11\r\xa700\x01\x19 00\x02\x110 0000")
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/56f073e57903588c b/src/internal/trace/v2/testdata/fuzz/FuzzReader/56f073e57903588c
new file mode 100644
index 0000000..d34fe3f
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/56f073e57903588c
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\x1f0000\x01\x0100\x88\x00\b0000000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/9d6ee7d3ddf8d566 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/9d6ee7d3ddf8d566
new file mode 100644
index 0000000..5677261
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/9d6ee7d3ddf8d566
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x11\r\xa700\x01\x19000\x02#000000\x01\x0100\x05\b0000\x01\x0110\x11\r\xa700\x01\x19 00\x02\x110 0000")
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/aeb749b6bc317b66 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/aeb749b6bc317b66
new file mode 100644
index 0000000..f93b5a9
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/aeb749b6bc317b66
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01000\x85\x00\b0000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/closing-unknown-region b/src/internal/trace/v2/testdata/fuzz/FuzzReader/closing-unknown-region
new file mode 100644
index 0000000..7433214
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/closing-unknown-region
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xf6\x9f\n\x9fÕ\xb4\x99\xb2\x06\x11\r\xa7\x02\x00\x01\x19\x05\x01\xf6\x9f\n\x02+\x04\x01\x00\x00")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/d478e18d2d6756b7 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d478e18d2d6756b7
new file mode 100644
index 0000000..3e5fda8
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d478e18d2d6756b7
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\"0000\x01\x0100\x88\x00\b0000000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/d91203cd397aa0bc b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d91203cd397aa0bc
new file mode 100644
index 0000000..d24b94a
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d91203cd397aa0bc
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01001\x85\x00\b0000")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/invalid-proc-state b/src/internal/trace/v2/testdata/fuzz/FuzzReader/invalid-proc-state
new file mode 100644
index 0000000..e5d3258
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/invalid-proc-state
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x94镴\x99\xb2\x06\x05\r\xa7\x02\x00E")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/large-id b/src/internal/trace/v2/testdata/fuzz/FuzzReader/large-id
new file mode 100644
index 0000000..0fb6273
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/large-id
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x94镴\x99\xb2\x06\f\x02\x03\xff\xff\xff\xff\xff\xff\xff\x9f\x1d\x00")
\ No newline at end of file
diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/malformed-timestamp b/src/internal/trace/v2/testdata/fuzz/FuzzReader/malformed-timestamp
new file mode 100644
index 0000000..850ca50
--- /dev/null
+++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/malformed-timestamp
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xfa\x9f\n\xa5ѕ\xb4\x99\xb2\x06\x0e\n\x97\x96\x96\x96\x96\x96\x96\x96\x96\x96\x01\x01\x01")
diff --git a/src/internal/trace/v2/testdata/generate.go b/src/internal/trace/v2/testdata/generate.go
new file mode 100644
index 0000000..c0658b2
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generate.go
@@ -0,0 +1,6 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mktests.go
+package testdata
diff --git a/src/internal/trace/v2/testdata/generators/go122-confuse-seq-across-generations.go b/src/internal/trace/v2/testdata/generators/go122-confuse-seq-across-generations.go
new file mode 100644
index 0000000..f618c41
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-confuse-seq-across-generations.go
@@ -0,0 +1,62 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Regression test for an issue found in development.
+//
+// The core of the issue is that if generation counters
+// aren't considered as part of sequence numbers, then
+// it's possible to accidentally advance without a
+// GoStatus event.
+//
+// The situation is one in which it just so happens that
+// an event on the frontier for a following generation
+// has a sequence number exactly one higher than the last
+// sequence number for e.g. a goroutine in the previous
+// generation. The parser should wait to find a GoStatus
+// event before advancing into the next generation at all.
+// It turns out this situation is pretty rare; the GoStatus
+// event almost always shows up first in practice. But it
+// can and did happen.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g1 := t.Generation(1)
+
+	// A running goroutine blocks.
+	b10 := g1.Batch(trace.ThreadID(0), 0)
+	b10.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b10.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b10.Event("GoStop", "whatever", testgen.NoStack)
+
+	// The running goroutine gets unblocked.
+	b11 := g1.Batch(trace.ThreadID(1), 0)
+	b11.Event("ProcStatus", trace.ProcID(1), go122.ProcRunning)
+	b11.Event("GoStart", trace.GoID(1), testgen.Seq(1))
+	b11.Event("GoStop", "whatever", testgen.NoStack)
+
+	g2 := t.Generation(2)
+
+	// Start running the goroutine, but later.
+	b21 := g2.Batch(trace.ThreadID(1), 3)
+	b21.Event("ProcStatus", trace.ProcID(1), go122.ProcRunning)
+	b21.Event("GoStart", trace.GoID(1), testgen.Seq(2))
+
+	// The goroutine starts running, then stops, then starts again.
+	b20 := g2.Batch(trace.ThreadID(0), 5)
+	b20.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b20.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunnable)
+	b20.Event("GoStart", trace.GoID(1), testgen.Seq(1))
+	b20.Event("GoStop", "whatever", testgen.NoStack)
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-create-syscall-reuse-thread-id.go b/src/internal/trace/v2/testdata/generators/go122-create-syscall-reuse-thread-id.go
new file mode 100644
index 0000000..107cce2
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-create-syscall-reuse-thread-id.go
@@ -0,0 +1,61 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a G being created from within a syscall.
+//
+// Specifically, it tests a scenerio wherein a C
+// thread is calling into Go, creating a goroutine in
+// a syscall (in the tracer's model). The system is free
+// to reuse thread IDs, so first a thread ID is used to
+// call into Go, and then is used for a Go-created thread.
+//
+// This is a regression test. The trace parser didn't correctly
+// model GoDestroySyscall as dropping its P (even if the runtime
+// did). It turns out this is actually fine if all the threads
+// in the trace have unique IDs, since the P just stays associated
+// with an eternally dead thread, and it's stolen by some other
+// thread later. But if thread IDs are reused, then the tracer
+// gets confused when trying to advance events on the new thread.
+// The now-dead thread which exited on a GoDestroySyscall still has
+// its P associated and this transfers to the newly-live thread
+// in the parser's state because they share a thread ID.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// A C thread calls into Go and acquires a P. It returns
+	// back to C, destroying the G.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("GoCreateSyscall", trace.GoID(4))
+	b0.Event("GoSyscallEndBlocked")
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcIdle)
+	b0.Event("ProcStart", trace.ProcID(0), testgen.Seq(1))
+	b0.Event("GoStatus", trace.GoID(4), trace.NoThread, go122.GoRunnable)
+	b0.Event("GoStart", trace.GoID(4), testgen.Seq(1))
+	b0.Event("GoSyscallBegin", testgen.Seq(2), testgen.NoStack)
+	b0.Event("GoDestroySyscall")
+
+	// A new Go-created thread with the same ID appears and
+	// starts running, then tries to steal the P from the
+	// first thread. The stealing is interesting because if
+	// the parser handles GoDestroySyscall wrong, then we
+	// have a self-steal here potentially that doesn't make
+	// sense.
+	b1 := g.Batch(trace.ThreadID(0), 0)
+	b1.Event("ProcStatus", trace.ProcID(1), go122.ProcIdle)
+	b1.Event("ProcStart", trace.ProcID(1), testgen.Seq(1))
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(3), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-create-syscall-with-p.go b/src/internal/trace/v2/testdata/generators/go122-create-syscall-with-p.go
new file mode 100644
index 0000000..4cb1c4a
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-create-syscall-with-p.go
@@ -0,0 +1,52 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a G being created from within a syscall.
+//
+// Specifically, it tests a scenerio wherein a C
+// thread is calling into Go, creating a goroutine in
+// a syscall (in the tracer's model). Because the actual
+// m can be reused, it's possible for that m to have never
+// had its P (in _Psyscall) stolen if the runtime doesn't
+// model the scenario correctly. Make sure we reject such
+// traces.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	t.ExpectFailure(".*expected a proc but didn't have one.*")
+
+	g := t.Generation(1)
+
+	// A C thread calls into Go and acquires a P. It returns
+	// back to C, destroying the G. It then comes back to Go
+	// on the same thread and again returns to C.
+	//
+	// Note: on pthread platforms this can't happen on the
+	// same thread because the m is stashed in TLS between
+	// calls into Go, until the thread dies. This is still
+	// possible on other platforms, however.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("GoCreateSyscall", trace.GoID(4))
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcIdle)
+	b0.Event("ProcStart", trace.ProcID(0), testgen.Seq(1))
+	b0.Event("GoSyscallEndBlocked")
+	b0.Event("GoStart", trace.GoID(4), testgen.Seq(1))
+	b0.Event("GoSyscallBegin", testgen.Seq(2), testgen.NoStack)
+	b0.Event("GoDestroySyscall")
+	b0.Event("GoCreateSyscall", trace.GoID(4))
+	b0.Event("GoSyscallEnd")
+	b0.Event("GoSyscallBegin", testgen.Seq(3), testgen.NoStack)
+	b0.Event("GoDestroySyscall")
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-go-create-without-running-g.go b/src/internal/trace/v2/testdata/generators/go122-go-create-without-running-g.go
new file mode 100644
index 0000000..b693245
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-go-create-without-running-g.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Regression test for an issue found in development.
+//
+// GoCreate events can happen on bare Ps in a variety of situations and
+// and earlier version of the parser assumed this wasn't possible. At
+// the time of writing, one such example is goroutines created by expiring
+// timers.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g1 := t.Generation(1)
+
+	// A goroutine gets created on a running P, then starts running.
+	b0 := g1.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoCreate", trace.GoID(5), testgen.NoStack, testgen.NoStack)
+	b0.Event("GoStart", trace.GoID(5), testgen.Seq(1))
+	b0.Event("GoStop", "whatever", testgen.NoStack)
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-ambiguous.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-ambiguous.go
new file mode 100644
index 0000000..349a575
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-ambiguous.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing.
+//
+// Specifically, it tests a scenerio wherein, without a
+// P sequence number of GoSyscallBegin, the syscall that
+// a ProcSteal applies to is ambiguous. This only happens in
+// practice when the events aren't already properly ordered
+// by timestamp, since the ProcSteal won't be seen until after
+// the correct GoSyscallBegin appears on the frontier.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	t.DisableTimestamps()
+
+	g := t.Generation(1)
+
+	// One goroutine does a syscall without blocking, then another one where
+	// it's P gets stolen.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack)
+	b0.Event("GoSyscallEnd")
+	b0.Event("GoSyscallBegin", testgen.Seq(2), testgen.NoStack)
+	b0.Event("GoSyscallEndBlocked")
+
+	// A running goroutine steals proc 0.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(2), go122.ProcRunning)
+	b1.Event("GoStatus", trace.GoID(2), trace.ThreadID(1), go122.GoRunning)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(3), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-bare-m.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-bare-m.go
new file mode 100644
index 0000000..f4c9f6e
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-bare-m.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing at a generation boundary.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine is exiting with a syscall. It already
+	// acquired a new P.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(1), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoSyscall)
+	b0.Event("GoSyscallEndBlocked")
+
+	// A bare M stole the goroutine's P at the generation boundary.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(0), go122.ProcSyscallAbandoned)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(1), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc-bare-m.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc-bare-m.go
new file mode 100644
index 0000000..e6023ba
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc-bare-m.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing at a generation boundary.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine is exiting with a syscall. It already
+	// acquired a new P.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoSyscall)
+	b0.Event("ProcStatus", trace.ProcID(1), go122.ProcIdle)
+	b0.Event("ProcStart", trace.ProcID(1), testgen.Seq(1))
+	b0.Event("GoSyscallEndBlocked")
+
+	// A bare M stole the goroutine's P at the generation boundary.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(0), go122.ProcSyscallAbandoned)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(1), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc.go
new file mode 100644
index 0000000..2232dca
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc.go
@@ -0,0 +1,36 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing at a generation boundary.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine is exiting with a syscall. It already
+	// acquired a new P.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoSyscall)
+	b0.Event("ProcStatus", trace.ProcID(1), go122.ProcIdle)
+	b0.Event("ProcStart", trace.ProcID(1), testgen.Seq(1))
+	b0.Event("GoSyscallEndBlocked")
+
+	// A running goroutine stole P0 at the generation boundary.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(2), go122.ProcRunning)
+	b1.Event("GoStatus", trace.GoID(2), trace.ThreadID(1), go122.GoRunning)
+	b1.Event("ProcStatus", trace.ProcID(0), go122.ProcSyscallAbandoned)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(1), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary.go
new file mode 100644
index 0000000..710827a
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-gen-boundary.go
@@ -0,0 +1,35 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing at a generation boundary.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine is exiting with a syscall. It already
+	// acquired a new P.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(1), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoSyscall)
+	b0.Event("GoSyscallEndBlocked")
+
+	// A running goroutine stole P0 at the generation boundary.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(2), go122.ProcRunning)
+	b1.Event("GoStatus", trace.GoID(2), trace.ThreadID(1), go122.GoRunning)
+	b1.Event("ProcStatus", trace.ProcID(0), go122.ProcSyscallAbandoned)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(1), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-reacquire-new-proc-bare-m.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-reacquire-new-proc-bare-m.go
new file mode 100644
index 0000000..24e5cb2
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-reacquire-new-proc-bare-m.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine enters a syscall, grabs a P, and starts running.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(1), go122.ProcIdle)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack)
+	b0.Event("ProcStart", trace.ProcID(1), testgen.Seq(1))
+	b0.Event("GoSyscallEndBlocked")
+
+	// A bare M steals the goroutine's P.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(2), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-reacquire-new-proc.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-reacquire-new-proc.go
new file mode 100644
index 0000000..2caefe8
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-reacquire-new-proc.go
@@ -0,0 +1,36 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine enters a syscall, grabs a P, and starts running.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(1), go122.ProcIdle)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack)
+	b0.Event("ProcStart", trace.ProcID(1), testgen.Seq(1))
+	b0.Event("GoSyscallEndBlocked")
+
+	// A running goroutine steals proc 0.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(2), go122.ProcRunning)
+	b1.Event("GoStatus", trace.GoID(2), trace.ThreadID(1), go122.GoRunning)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(2), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-self.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-self.go
new file mode 100644
index 0000000..dd94734
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-self.go
@@ -0,0 +1,37 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing.
+//
+// Specifically, it tests a scenario where a thread 'steals'
+// a P from itself. It's just a ProcStop with extra steps when
+// it happens on the same P.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	t.DisableTimestamps()
+
+	g := t.Generation(1)
+
+	// A goroutine execute a syscall and steals its own P, then starts running
+	// on that P.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack)
+	b0.Event("ProcSteal", trace.ProcID(0), testgen.Seq(2), trace.ThreadID(0))
+	b0.Event("ProcStart", trace.ProcID(0), testgen.Seq(3))
+	b0.Event("GoSyscallEndBlocked")
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-simple-bare-m.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-simple-bare-m.go
new file mode 100644
index 0000000..630eba8
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-simple-bare-m.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine enters a syscall.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack)
+	b0.Event("GoSyscallEndBlocked")
+
+	// A bare M steals the goroutine's P.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(2), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-simple.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-simple.go
new file mode 100644
index 0000000..54b43f4
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-simple.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// One goroutine enters a syscall.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack)
+	b0.Event("GoSyscallEndBlocked")
+
+	// A running goroutine steals proc 0.
+	b1 := g.Batch(trace.ThreadID(1), 0)
+	b1.Event("ProcStatus", trace.ProcID(2), go122.ProcRunning)
+	b1.Event("GoStatus", trace.GoID(2), trace.ThreadID(1), go122.GoRunning)
+	b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(2), trace.ThreadID(0))
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-sitting-in-syscall.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-sitting-in-syscall.go
new file mode 100644
index 0000000..870f8f6
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-sitting-in-syscall.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests syscall P stealing from a goroutine and thread
+// that have been in a syscall the entire generation.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g := t.Generation(1)
+
+	// Steal proc from a goroutine that's been blocked
+	// in a syscall the entire generation.
+	b0 := g.Batch(trace.ThreadID(0), 0)
+	b0.Event("ProcStatus", trace.ProcID(0), go122.ProcSyscallAbandoned)
+	b0.Event("ProcSteal", trace.ProcID(0), testgen.Seq(1), trace.ThreadID(1))
+
+	// Status event for a goroutine blocked in a syscall for the entire generation.
+	bz := g.Batch(trace.NoThread, 0)
+	bz.Event("GoStatus", trace.GoID(1), trace.ThreadID(1), go122.GoSyscall)
+}
diff --git a/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go b/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go
new file mode 100644
index 0000000..06ef96e
--- /dev/null
+++ b/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go
@@ -0,0 +1,41 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Regression test for an issue found in development.
+//
+// The issue is that EvUserTaskEnd events don't carry the
+// task type with them, so the parser needs to track that
+// information. But if the parser just tracks the string ID
+// and not the string itself, that string ID may not be valid
+// for use in future generations.
+
+package main
+
+import (
+	"internal/trace/v2"
+	"internal/trace/v2/event/go122"
+	testgen "internal/trace/v2/internal/testgen/go122"
+)
+
+func main() {
+	testgen.Main(gen)
+}
+
+func gen(t *testgen.Trace) {
+	g1 := t.Generation(1)
+
+	// A running goroutine emits a task begin.
+	b1 := g1.Batch(trace.ThreadID(0), 0)
+	b1.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b1.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b1.Event("UserTaskBegin", trace.TaskID(2), trace.TaskID(0) /* 0 means no parent, not background */, "my task", testgen.NoStack)
+
+	g2 := t.Generation(2)
+
+	// That same goroutine emits a task end in the following generation.
+	b2 := g2.Batch(trace.ThreadID(0), 5)
+	b2.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning)
+	b2.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning)
+	b2.Event("UserTaskEnd", trace.TaskID(2), testgen.NoStack)
+}
diff --git a/src/internal/trace/v2/testdata/mktests.go b/src/internal/trace/v2/testdata/mktests.go
new file mode 100644
index 0000000..96cbbe4
--- /dev/null
+++ b/src/internal/trace/v2/testdata/mktests.go
@@ -0,0 +1,162 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"internal/trace/v2/raw"
+	"internal/trace/v2/version"
+	"internal/txtar"
+	"io"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"regexp"
+)
+
+func main() {
+	log.SetFlags(0)
+	ctx, err := newContext()
+	if err != nil {
+		log.Fatal(err)
+	}
+	if err := ctx.runGenerators(); err != nil {
+		log.Fatal(err)
+	}
+	if err := ctx.runTestProg("./testprog/annotations.go"); err != nil {
+		log.Fatal(err)
+	}
+	if err := ctx.runTestProg("./testprog/annotations-stress.go"); err != nil {
+		log.Fatal(err)
+	}
+}
+
+type context struct {
+	testNames map[string]struct{}
+	filter    *regexp.Regexp
+}
+
+func newContext() (*context, error) {
+	var filter *regexp.Regexp
+	var err error
+	if pattern := os.Getenv("GOTRACETEST"); pattern != "" {
+		filter, err = regexp.Compile(pattern)
+		if err != nil {
+			return nil, fmt.Errorf("compiling regexp %q for GOTRACETEST: %v", pattern, err)
+		}
+	}
+	return &context{
+		testNames: make(map[string]struct{}),
+		filter:    filter,
+	}, nil
+}
+
+func (ctx *context) register(testName string) (skip bool, err error) {
+	if _, ok := ctx.testNames[testName]; ok {
+		return true, fmt.Errorf("duplicate test %s found", testName)
+	}
+	if ctx.filter != nil {
+		return !ctx.filter.MatchString(testName), nil
+	}
+	return false, nil
+}
+
+func (ctx *context) runGenerators() error {
+	generators, err := filepath.Glob("./generators/*.go")
+	if err != nil {
+		return fmt.Errorf("reading generators: %v", err)
+	}
+	genroot := "./tests"
+
+	if err := os.MkdirAll(genroot, 0777); err != nil {
+		return fmt.Errorf("creating generated root: %v", err)
+	}
+	for _, path := range generators {
+		name := filepath.Base(path)
+		name = name[:len(name)-len(filepath.Ext(name))]
+
+		// Skip if we have a pattern and this test doesn't match.
+		skip, err := ctx.register(name)
+		if err != nil {
+			return err
+		}
+		if skip {
+			continue
+		}
+
+		fmt.Fprintf(os.Stderr, "generating %s... ", name)
+
+		// Get the test path.
+		testPath := filepath.Join(genroot, fmt.Sprintf("%s.test", name))
+
+		// Run generator.
+		cmd := exec.Command("go", "run", path, testPath)
+		if out, err := cmd.CombinedOutput(); err != nil {
+			return fmt.Errorf("running generator %s: %v:\n%s", name, err, out)
+		}
+		fmt.Fprintln(os.Stderr)
+	}
+	return nil
+}
+
+func (ctx *context) runTestProg(progPath string) error {
+	name := filepath.Base(progPath)
+	name = name[:len(name)-len(filepath.Ext(name))]
+	name = fmt.Sprintf("go1%d-%s", version.Current, name)
+
+	// Skip if we have a pattern and this test doesn't match.
+	skip, err := ctx.register(name)
+	if err != nil {
+		return err
+	}
+	if skip {
+		return nil
+	}
+
+	// Create command.
+	var trace, stderr bytes.Buffer
+	cmd := exec.Command("go", "run", progPath)
+	// TODO(mknyszek): Remove if goexperiment.Exectracer2 becomes the default.
+	cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2")
+	cmd.Stdout = &trace
+	cmd.Stderr = &stderr
+
+	// Run trace program; the trace will appear in stdout.
+	fmt.Fprintf(os.Stderr, "running trace program %s...\n", name)
+	if err := cmd.Run(); err != nil {
+		log.Fatalf("running trace program: %v:\n%s", err, stderr.String())
+	}
+
+	// Write out the trace.
+	var textTrace bytes.Buffer
+	r, err := raw.NewReader(&trace)
+	if err != nil {
+		log.Fatalf("reading trace: %v", err)
+	}
+	w, err := raw.NewTextWriter(&textTrace, version.Current)
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			log.Fatalf("reading trace: %v", err)
+		}
+		if err := w.WriteEvent(ev); err != nil {
+			log.Fatalf("writing trace: %v", err)
+		}
+	}
+	testData := txtar.Format(&txtar.Archive{
+		Files: []txtar.File{
+			{Name: "expect", Data: []byte("SUCCESS")},
+			{Name: "trace", Data: textTrace.Bytes()},
+		},
+	})
+	return os.WriteFile(fmt.Sprintf("./tests/%s.test", name), testData, 0o664)
+}
diff --git a/src/internal/trace/v2/testdata/testprog/annotations-stress.go b/src/internal/trace/v2/testdata/testprog/annotations-stress.go
new file mode 100644
index 0000000..511d6ed
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/annotations-stress.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests user tasks, regions, and logging.
+
+//go:build ignore
+
+package main
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"os"
+	"runtime/trace"
+	"time"
+)
+
+func main() {
+	baseCtx := context.Background()
+
+	// Create a task that starts and ends entirely outside of the trace.
+	ctx0, t0 := trace.NewTask(baseCtx, "parent")
+
+	// Create a task that starts before the trace and ends during the trace.
+	ctx1, t1 := trace.NewTask(ctx0, "type1")
+
+	// Start tracing.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+	t1.End()
+
+	// Create a task that starts during the trace and ends after.
+	ctx2, t2 := trace.NewTask(ctx0, "type2")
+
+	// Create a task that starts and ends during the trace.
+	ctx3, t3 := trace.NewTask(baseCtx, "type3")
+
+	// Generate some events.
+	for i := 0; i < 2; i++ {
+		do(baseCtx, 4)
+		do(ctx0, 2)
+		do(ctx1, 3)
+		do(ctx2, 6)
+		do(ctx3, 5)
+	}
+
+	// Finish up tasks according to their lifetime relative to the trace.
+	t3.End()
+	trace.Stop()
+	t2.End()
+	t0.End()
+}
+
+func do(ctx context.Context, k int) {
+	trace.Log(ctx, "log", "before do")
+
+	var t *trace.Task
+	ctx, t = trace.NewTask(ctx, "do")
+	defer t.End()
+
+	trace.Log(ctx, "log2", "do")
+
+	// Create a region and spawn more tasks and more workers.
+	trace.WithRegion(ctx, "fanout", func() {
+		for i := 0; i < k; i++ {
+			go func(i int) {
+				trace.WithRegion(ctx, fmt.Sprintf("region%d", i), func() {
+					trace.Logf(ctx, "log", "fanout region%d", i)
+					if i == 2 {
+						do(ctx, 0)
+						return
+					}
+				})
+			}(i)
+		}
+	})
+
+	// Sleep to let things happen, but also increase the chance that we
+	// advance a generation.
+	time.Sleep(10 * time.Millisecond)
+}
diff --git a/src/internal/trace/v2/testdata/testprog/annotations.go b/src/internal/trace/v2/testdata/testprog/annotations.go
new file mode 100644
index 0000000..2507bc4
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/annotations.go
@@ -0,0 +1,60 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests user tasks, regions, and logging.
+
+//go:build ignore
+
+package main
+
+import (
+	"context"
+	"log"
+	"os"
+	"runtime/trace"
+	"sync"
+)
+
+func main() {
+	bgctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	// Create a pre-existing region. This won't end up in the trace.
+	preExistingRegion := trace.StartRegion(bgctx, "pre-existing region")
+
+	// Start tracing.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+
+	// Beginning of traced execution.
+	var wg sync.WaitGroup
+	ctx, task := trace.NewTask(bgctx, "task0") // EvUserTaskCreate("task0")
+	trace.StartRegion(ctx, "task0 region")
+
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		defer task.End() // EvUserTaskEnd("task0")
+
+		trace.StartRegion(ctx, "unended region")
+
+		trace.WithRegion(ctx, "region0", func() {
+			// EvUserRegionBegin("region0", start)
+			trace.WithRegion(ctx, "region1", func() {
+				trace.Log(ctx, "key0", "0123456789abcdef") // EvUserLog("task0", "key0", "0....f")
+			})
+			// EvUserRegionEnd("region0", end)
+		})
+	}()
+	wg.Wait()
+
+	preExistingRegion.End()
+	postExistingRegion := trace.StartRegion(bgctx, "post-existing region")
+
+	// End of traced execution.
+	trace.Stop()
+
+	postExistingRegion.End()
+}
diff --git a/src/internal/trace/v2/testdata/testprog/cgo-callback.go b/src/internal/trace/v2/testdata/testprog/cgo-callback.go
new file mode 100644
index 0000000..d636500
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/cgo-callback.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests CPU profiling.
+
+//go:build ignore
+
+package main
+
+/*
+#include <pthread.h>
+
+void go_callback();
+void go_callback2();
+
+static void *thr(void *arg) {
+    go_callback();
+    return 0;
+}
+
+static void foo() {
+    pthread_t th;
+    pthread_attr_t attr;
+    pthread_attr_init(&attr);
+    pthread_attr_setstacksize(&attr, 256 << 10);
+    pthread_create(&th, &attr, thr, 0);
+    pthread_join(th, 0);
+}
+
+static void bar() {
+    go_callback2();
+}
+*/
+import "C"
+
+import (
+	"log"
+	"os"
+	"runtime"
+	"runtime/trace"
+)
+
+//export go_callback
+func go_callback() {
+	// Do another call into C, just to test that path too.
+	C.bar()
+}
+
+//export go_callback2
+func go_callback2() {
+	runtime.GC()
+}
+
+func main() {
+	// Start tracing.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+
+	// Do a whole bunch of cgocallbacks.
+	const n = 10
+	done := make(chan bool)
+	for i := 0; i < n; i++ {
+		go func() {
+			C.foo()
+			done <- true
+		}()
+	}
+	for i := 0; i < n; i++ {
+		<-done
+	}
+
+	// Do something to steal back any Ps from the Ms, just
+	// for coverage.
+	runtime.GC()
+
+	// End of traced execution.
+	trace.Stop()
+}
diff --git a/src/internal/trace/v2/testdata/testprog/cpu-profile.go b/src/internal/trace/v2/testdata/testprog/cpu-profile.go
new file mode 100644
index 0000000..293a2ac
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/cpu-profile.go
@@ -0,0 +1,137 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests CPU profiling.
+
+//go:build ignore
+
+package main
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"internal/profile"
+	"log"
+	"os"
+	"runtime"
+	"runtime/pprof"
+	"runtime/trace"
+	"strings"
+	"time"
+)
+
+func main() {
+	cpuBuf := new(bytes.Buffer)
+	if err := pprof.StartCPUProfile(cpuBuf); err != nil {
+		log.Fatalf("failed to start CPU profile: %v", err)
+	}
+
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+
+	dur := 100 * time.Millisecond
+	func() {
+		// Create a region in the execution trace. Set and clear goroutine
+		// labels fully within that region, so we know that any CPU profile
+		// sample with the label must also be eligible for inclusion in the
+		// execution trace.
+		ctx := context.Background()
+		defer trace.StartRegion(ctx, "cpuHogger").End()
+		pprof.Do(ctx, pprof.Labels("tracing", "on"), func(ctx context.Context) {
+			cpuHogger(cpuHog1, &salt1, dur)
+		})
+		// Be sure the execution trace's view, when filtered to this goroutine
+		// via the explicit goroutine ID in each event, gets many more samples
+		// than the CPU profiler when filtered to this goroutine via labels.
+		cpuHogger(cpuHog1, &salt1, dur)
+	}()
+
+	trace.Stop()
+	pprof.StopCPUProfile()
+
+	// Summarize the CPU profile to stderr so the test can check against it.
+
+	prof, err := profile.Parse(cpuBuf)
+	if err != nil {
+		log.Fatalf("failed to parse CPU profile: %v", err)
+	}
+	// Examine the CPU profiler's view. Filter it to only include samples from
+	// the single test goroutine. Use labels to execute that filter: they should
+	// apply to all work done while that goroutine is getg().m.curg, and they
+	// should apply to no other goroutines.
+	pprofStacks := make(map[string]int)
+	for _, s := range prof.Sample {
+		if s.Label["tracing"] != nil {
+			var fns []string
+			var leaf string
+			for _, loc := range s.Location {
+				for _, line := range loc.Line {
+					fns = append(fns, fmt.Sprintf("%s:%d", line.Function.Name, line.Line))
+					leaf = line.Function.Name
+				}
+			}
+			// runtime.sigprof synthesizes call stacks when "normal traceback is
+			// impossible or has failed", using particular placeholder functions
+			// to represent common failure cases. Look for those functions in
+			// the leaf position as a sign that the call stack and its
+			// symbolization are more complex than this test can handle.
+			//
+			// TODO: Make the symbolization done by the execution tracer and CPU
+			// profiler match up even in these harder cases. See #53378.
+			switch leaf {
+			case "runtime._System", "runtime._GC", "runtime._ExternalCode", "runtime._VDSO":
+				continue
+			}
+			stack := strings.Join(fns, "|")
+			samples := int(s.Value[0])
+			pprofStacks[stack] += samples
+		}
+	}
+	for stack, samples := range pprofStacks {
+		fmt.Fprintf(os.Stderr, "%s\t%d\n", stack, samples)
+	}
+}
+
+func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
+	// We only need to get one 100 Hz clock tick, so we've got
+	// a large safety buffer.
+	// But do at least 500 iterations (which should take about 100ms),
+	// otherwise TestCPUProfileMultithreaded can fail if only one
+	// thread is scheduled during the testing period.
+	t0 := time.Now()
+	accum := *y
+	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
+		accum = f(accum)
+	}
+	*y = accum
+}
+
+var (
+	salt1 = 0
+)
+
+// The actual CPU hogging function.
+// Must not call other functions nor access heap/globals in the loop,
+// otherwise under race detector the samples will be in the race runtime.
+func cpuHog1(x int) int {
+	return cpuHog0(x, 1e5)
+}
+
+func cpuHog0(x, n int) int {
+	foo := x
+	for i := 0; i < n; i++ {
+		if i%1000 == 0 {
+			// Spend time in mcall, stored as gp.m.curg, with g0 running
+			runtime.Gosched()
+		}
+		if foo > 0 {
+			foo *= foo
+		} else {
+			foo *= foo + 1
+		}
+	}
+	return foo
+}
diff --git a/src/internal/trace/v2/testdata/testprog/futile-wakeup.go b/src/internal/trace/v2/testdata/testprog/futile-wakeup.go
new file mode 100644
index 0000000..cc48981
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/futile-wakeup.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests to make sure the runtime doesn't generate futile wakeups. For example,
+// it makes sure that a block on a channel send that unblocks briefly only to
+// immediately go back to sleep (in such a way that doesn't reveal any useful
+// information, and is purely an artifact of the runtime implementation) doesn't
+// make it into the trace.
+
+//go:build ignore
+
+package main
+
+import (
+	"context"
+	"log"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"sync"
+)
+
+func main() {
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+
+	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
+	c0 := make(chan int, 1)
+	c1 := make(chan int, 1)
+	c2 := make(chan int, 1)
+	const procs = 2
+	var done sync.WaitGroup
+	done.Add(4 * procs)
+	for p := 0; p < procs; p++ {
+		const iters = 1e3
+		go func() {
+			trace.WithRegion(context.Background(), "special", func() {
+				for i := 0; i < iters; i++ {
+					runtime.Gosched()
+					c0 <- 0
+				}
+				done.Done()
+			})
+		}()
+		go func() {
+			trace.WithRegion(context.Background(), "special", func() {
+				for i := 0; i < iters; i++ {
+					runtime.Gosched()
+					<-c0
+				}
+				done.Done()
+			})
+		}()
+		go func() {
+			trace.WithRegion(context.Background(), "special", func() {
+				for i := 0; i < iters; i++ {
+					runtime.Gosched()
+					select {
+					case c1 <- 0:
+					case c2 <- 0:
+					}
+				}
+				done.Done()
+			})
+		}()
+		go func() {
+			trace.WithRegion(context.Background(), "special", func() {
+				for i := 0; i < iters; i++ {
+					runtime.Gosched()
+					select {
+					case <-c1:
+					case <-c2:
+					}
+				}
+				done.Done()
+			})
+		}()
+	}
+	done.Wait()
+
+	trace.Stop()
+}
diff --git a/src/internal/trace/v2/testdata/testprog/gc-stress.go b/src/internal/trace/v2/testdata/testprog/gc-stress.go
new file mode 100644
index 0000000..70d3a24
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/gc-stress.go
@@ -0,0 +1,76 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a GC-heavy program. This is useful for shaking out
+// all sorts of corner cases about GC-related ranges.
+
+//go:build ignore
+
+package main
+
+import (
+	"log"
+	"math/rand"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"time"
+)
+
+type node struct {
+	children [4]*node
+	data     [128]byte
+}
+
+func makeTree(depth int) *node {
+	if depth == 0 {
+		return new(node)
+	}
+	return &node{
+		children: [4]*node{
+			makeTree(depth - 1),
+			makeTree(depth - 1),
+			makeTree(depth - 1),
+			makeTree(depth - 1),
+		},
+	}
+}
+
+var trees [16]*node
+var ballast *[16]*[8192]*node
+var sink []byte
+
+func main() {
+	for i := range trees {
+		trees[i] = makeTree(6)
+	}
+	ballast = new([16]*[8192]*node)
+	for i := range ballast {
+		ballast[i] = new([8192]*node)
+		for j := range ballast[i] {
+			ballast[i][j] = &node{
+				data: [128]byte{1, 2, 3, 4},
+			}
+		}
+	}
+	for i := 0; i < runtime.GOMAXPROCS(-1); i++ {
+		go func() {
+			for {
+				sink = make([]byte, rand.Intn(32<<10))
+			}
+		}()
+	}
+	// Increase the chance that we end up starting and stopping
+	// mid-GC by only starting to trace after a few milliseconds.
+	time.Sleep(5 * time.Millisecond)
+
+	// Start tracing.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+	defer trace.Stop()
+
+	// Let the tracing happen for a bit.
+	time.Sleep(400 * time.Millisecond)
+}
diff --git a/src/internal/trace/v2/testdata/testprog/gomaxprocs.go b/src/internal/trace/v2/testdata/testprog/gomaxprocs.go
new file mode 100644
index 0000000..2651207
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/gomaxprocs.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests increasing and decreasing GOMAXPROCS to try and
+// catch issues with stale proc state.
+
+//go:build ignore
+
+package main
+
+import (
+	"log"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"time"
+)
+
+func main() {
+	// Start a goroutine that calls runtime.GC to try and
+	// introduce some interesting events in between the
+	// GOMAXPROCS calls.
+	go func() {
+		for {
+			runtime.GC()
+			time.Sleep(1 * time.Millisecond)
+		}
+	}()
+
+	// Start tracing.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+	// Run GOMAXPROCS a bunch of times, up and down.
+	for i := 1; i <= 16; i *= 2 {
+		runtime.GOMAXPROCS(i)
+		time.Sleep(1 * time.Millisecond)
+	}
+	for i := 16; i >= 1; i /= 2 {
+		runtime.GOMAXPROCS(i)
+		time.Sleep(1 * time.Millisecond)
+	}
+	// Stop tracing.
+	trace.Stop()
+}
diff --git a/src/internal/trace/v2/testdata/testprog/many-start-stop.go b/src/internal/trace/v2/testdata/testprog/many-start-stop.go
new file mode 100644
index 0000000..2d5d063
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/many-start-stop.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests simply starting and stopping tracing multiple times.
+//
+// This is useful for finding bugs in trace state reset.
+
+//go:build ignore
+
+package main
+
+import (
+	"bytes"
+	"log"
+	"os"
+	"runtime"
+	"runtime/trace"
+)
+
+func main() {
+	// Trace a few times.
+	for i := 0; i < 10; i++ {
+		var buf bytes.Buffer
+		if err := trace.Start(&buf); err != nil {
+			log.Fatalf("failed to start tracing: %v", err)
+		}
+		runtime.GC()
+		trace.Stop()
+	}
+
+	// Start tracing again, this time writing out the result.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+	runtime.GC()
+	trace.Stop()
+}
diff --git a/src/internal/trace/v2/testdata/testprog/stacks.go b/src/internal/trace/v2/testdata/testprog/stacks.go
new file mode 100644
index 0000000..e64bc86
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/stacks.go
@@ -0,0 +1,129 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests stack symbolization.
+
+//go:build ignore
+
+package main
+
+import (
+	"log"
+	"net"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"sync"
+	"time"
+)
+
+func main() {
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+	defer trace.Stop() // in case of early return
+
+	// Now we will do a bunch of things for which we verify stacks later.
+	// It is impossible to ensure that a goroutine has actually blocked
+	// on a channel, in a select or otherwise. So we kick off goroutines
+	// that need to block first in the hope that while we are executing
+	// the rest of the test, they will block.
+	go func() { // func1
+		select {}
+	}()
+	go func() { // func2
+		var c chan int
+		c <- 0
+	}()
+	go func() { // func3
+		var c chan int
+		<-c
+	}()
+	done1 := make(chan bool)
+	go func() { // func4
+		<-done1
+	}()
+	done2 := make(chan bool)
+	go func() { // func5
+		done2 <- true
+	}()
+	c1 := make(chan int)
+	c2 := make(chan int)
+	go func() { // func6
+		select {
+		case <-c1:
+		case <-c2:
+		}
+	}()
+	var mu sync.Mutex
+	mu.Lock()
+	go func() { // func7
+		mu.Lock()
+		mu.Unlock()
+	}()
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go func() { // func8
+		wg.Wait()
+	}()
+	cv := sync.NewCond(&sync.Mutex{})
+	go func() { // func9
+		cv.L.Lock()
+		cv.Wait()
+		cv.L.Unlock()
+	}()
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		log.Fatalf("failed to listen: %v", err)
+	}
+	go func() { // func10
+		c, err := ln.Accept()
+		if err != nil {
+			log.Printf("failed to accept: %v", err)
+			return
+		}
+		c.Close()
+	}()
+	rp, wp, err := os.Pipe()
+	if err != nil {
+		log.Fatalf("failed to create a pipe: %v", err)
+	}
+	defer rp.Close()
+	defer wp.Close()
+	pipeReadDone := make(chan bool)
+	go func() { // func11
+		var data [1]byte
+		rp.Read(data[:])
+		pipeReadDone <- true
+	}()
+
+	time.Sleep(100 * time.Millisecond)
+	runtime.GC()
+	runtime.Gosched()
+	time.Sleep(100 * time.Millisecond) // the last chance for the goroutines above to block
+	done1 <- true
+	<-done2
+	select {
+	case c1 <- 0:
+	case c2 <- 0:
+	}
+	mu.Unlock()
+	wg.Done()
+	cv.Signal()
+	c, err := net.Dial("tcp", ln.Addr().String())
+	if err != nil {
+		log.Fatalf("failed to dial: %v", err)
+	}
+	c.Close()
+	var data [1]byte
+	wp.Write(data[:])
+	<-pipeReadDone
+
+	oldGoMaxProcs := runtime.GOMAXPROCS(0)
+	runtime.GOMAXPROCS(oldGoMaxProcs + 1)
+
+	trace.Stop()
+
+	runtime.GOMAXPROCS(oldGoMaxProcs)
+}
diff --git a/src/internal/trace/v2/testdata/testprog/stress-start-stop.go b/src/internal/trace/v2/testdata/testprog/stress-start-stop.go
new file mode 100644
index 0000000..72c1c59
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/stress-start-stop.go
@@ -0,0 +1,166 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a many interesting cases (network, syscalls, a little GC, busy goroutines,
+// blocked goroutines, LockOSThread, pipes, and GOMAXPROCS).
+
+//go:build ignore
+
+package main
+
+import (
+	"bytes"
+	"io"
+	"log"
+	"net"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"sync"
+	"time"
+)
+
+func main() {
+	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
+	outerDone := make(chan bool)
+
+	go func() {
+		defer func() {
+			outerDone <- true
+		}()
+
+		var wg sync.WaitGroup
+		done := make(chan bool)
+
+		wg.Add(1)
+		go func() {
+			<-done
+			wg.Done()
+		}()
+
+		rp, wp, err := os.Pipe()
+		if err != nil {
+			log.Fatalf("failed to create pipe: %v", err)
+			return
+		}
+		defer func() {
+			rp.Close()
+			wp.Close()
+		}()
+		wg.Add(1)
+		go func() {
+			var tmp [1]byte
+			rp.Read(tmp[:])
+			<-done
+			wg.Done()
+		}()
+		time.Sleep(time.Millisecond)
+
+		go func() {
+			runtime.LockOSThread()
+			for {
+				select {
+				case <-done:
+					return
+				default:
+					runtime.Gosched()
+				}
+			}
+		}()
+
+		runtime.GC()
+		// Trigger GC from malloc.
+		n := 512
+		for i := 0; i < n; i++ {
+			_ = make([]byte, 1<<20)
+		}
+
+		// Create a bunch of busy goroutines to load all Ps.
+		for p := 0; p < 10; p++ {
+			wg.Add(1)
+			go func() {
+				// Do something useful.
+				tmp := make([]byte, 1<<16)
+				for i := range tmp {
+					tmp[i]++
+				}
+				_ = tmp
+				<-done
+				wg.Done()
+			}()
+		}
+
+		// Block in syscall.
+		wg.Add(1)
+		go func() {
+			var tmp [1]byte
+			rp.Read(tmp[:])
+			<-done
+			wg.Done()
+		}()
+
+		runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+
+		// Test timers.
+		timerDone := make(chan bool)
+		go func() {
+			time.Sleep(time.Millisecond)
+			timerDone <- true
+		}()
+		<-timerDone
+
+		// A bit of network.
+		ln, err := net.Listen("tcp", "127.0.0.1:0")
+		if err != nil {
+			log.Fatalf("listen failed: %v", err)
+			return
+		}
+		defer ln.Close()
+		go func() {
+			c, err := ln.Accept()
+			if err != nil {
+				return
+			}
+			time.Sleep(time.Millisecond)
+			var buf [1]byte
+			c.Write(buf[:])
+			c.Close()
+		}()
+		c, err := net.Dial("tcp", ln.Addr().String())
+		if err != nil {
+			log.Fatalf("dial failed: %v", err)
+			return
+		}
+		var tmp [1]byte
+		c.Read(tmp[:])
+		c.Close()
+
+		go func() {
+			runtime.Gosched()
+			select {}
+		}()
+
+		// Unblock helper goroutines and wait them to finish.
+		wp.Write(tmp[:])
+		wp.Write(tmp[:])
+		close(done)
+		wg.Wait()
+	}()
+
+	const iters = 5
+	for i := 0; i < iters; i++ {
+		var w io.Writer
+		if i == iters-1 {
+			w = os.Stdout
+		} else {
+			w = new(bytes.Buffer)
+		}
+		if err := trace.Start(w); err != nil {
+			log.Fatalf("failed to start tracing: %v", err)
+		}
+		time.Sleep(time.Millisecond)
+		trace.Stop()
+	}
+	<-outerDone
+}
diff --git a/src/internal/trace/v2/testdata/testprog/stress.go b/src/internal/trace/v2/testdata/testprog/stress.go
new file mode 100644
index 0000000..99696d1
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/stress.go
@@ -0,0 +1,146 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a many interesting cases (network, syscalls, a little GC, busy goroutines,
+// blocked goroutines, LockOSThread, pipes, and GOMAXPROCS).
+
+//go:build ignore
+
+package main
+
+import (
+	"log"
+	"net"
+	"os"
+	"runtime"
+	"runtime/trace"
+	"sync"
+	"time"
+)
+
+func main() {
+	var wg sync.WaitGroup
+	done := make(chan bool)
+
+	// Create a goroutine blocked before tracing.
+	wg.Add(1)
+	go func() {
+		<-done
+		wg.Done()
+	}()
+
+	// Create a goroutine blocked in syscall before tracing.
+	rp, wp, err := os.Pipe()
+	if err != nil {
+		log.Fatalf("failed to create pipe: %v", err)
+	}
+	defer func() {
+		rp.Close()
+		wp.Close()
+	}()
+	wg.Add(1)
+	go func() {
+		var tmp [1]byte
+		rp.Read(tmp[:])
+		<-done
+		wg.Done()
+	}()
+	time.Sleep(time.Millisecond) // give the goroutine above time to block
+
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+	defer trace.Stop()
+
+	procs := runtime.GOMAXPROCS(10)
+	time.Sleep(50 * time.Millisecond) // test proc stop/start events
+
+	go func() {
+		runtime.LockOSThread()
+		for {
+			select {
+			case <-done:
+				return
+			default:
+				runtime.Gosched()
+			}
+		}
+	}()
+
+	runtime.GC()
+	// Trigger GC from malloc.
+	n := 512
+	for i := 0; i < n; i++ {
+		_ = make([]byte, 1<<20)
+	}
+
+	// Create a bunch of busy goroutines to load all Ps.
+	for p := 0; p < 10; p++ {
+		wg.Add(1)
+		go func() {
+			// Do something useful.
+			tmp := make([]byte, 1<<16)
+			for i := range tmp {
+				tmp[i]++
+			}
+			_ = tmp
+			<-done
+			wg.Done()
+		}()
+	}
+
+	// Block in syscall.
+	wg.Add(1)
+	go func() {
+		var tmp [1]byte
+		rp.Read(tmp[:])
+		<-done
+		wg.Done()
+	}()
+
+	// Test timers.
+	timerDone := make(chan bool)
+	go func() {
+		time.Sleep(time.Millisecond)
+		timerDone <- true
+	}()
+	<-timerDone
+
+	// A bit of network.
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		log.Fatalf("listen failed: %v", err)
+	}
+	defer ln.Close()
+	go func() {
+		c, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		time.Sleep(time.Millisecond)
+		var buf [1]byte
+		c.Write(buf[:])
+		c.Close()
+	}()
+	c, err := net.Dial("tcp", ln.Addr().String())
+	if err != nil {
+		log.Fatalf("dial failed: %v", err)
+	}
+	var tmp [1]byte
+	c.Read(tmp[:])
+	c.Close()
+
+	go func() {
+		runtime.Gosched()
+		select {}
+	}()
+
+	// Unblock helper goroutines and wait them to finish.
+	wp.Write(tmp[:])
+	wp.Write(tmp[:])
+	close(done)
+	wg.Wait()
+
+	runtime.GOMAXPROCS(procs)
+}
diff --git a/src/internal/trace/v2/testdata/testprog/wait-on-pipe.go b/src/internal/trace/v2/testdata/testprog/wait-on-pipe.go
new file mode 100644
index 0000000..912f5dd
--- /dev/null
+++ b/src/internal/trace/v2/testdata/testprog/wait-on-pipe.go
@@ -0,0 +1,66 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests a goroutine sitting blocked in a syscall for
+// an entire generation. This is a regression test for
+// #65196.
+
+//go:build ignore
+
+package main
+
+import (
+	"log"
+	"os"
+	"runtime/trace"
+	"syscall"
+	"time"
+)
+
+func main() {
+	// Create a pipe to block on.
+	var p [2]int
+	if err := syscall.Pipe(p[:]); err != nil {
+		log.Fatalf("failed to create pipe: %v", err)
+	}
+	rfd, wfd := p[0], p[1]
+
+	// Create a goroutine that blocks on the pipe.
+	done := make(chan struct{})
+	go func() {
+		var data [1]byte
+		_, err := syscall.Read(rfd, data[:])
+		if err != nil {
+			log.Fatalf("failed to read from pipe: %v", err)
+		}
+		done <- struct{}{}
+	}()
+
+	// Give the goroutine ample chance to block on the pipe.
+	time.Sleep(10 * time.Millisecond)
+
+	// Start tracing.
+	if err := trace.Start(os.Stdout); err != nil {
+		log.Fatalf("failed to start tracing: %v", err)
+	}
+
+	// This isn't enough to have a full generation pass by default,
+	// but it is generally enough in stress mode.
+	time.Sleep(100 * time.Millisecond)
+
+	// Write to the pipe to unblock it.
+	if _, err := syscall.Write(wfd, []byte{10}); err != nil {
+		log.Fatalf("failed to write to pipe: %v", err)
+	}
+
+	// Wait for the goroutine to unblock and start running.
+	// This is helpful to catch incorrect information written
+	// down for the syscall-blocked goroutine, since it'll start
+	// executing, and that execution information will be
+	// inconsistent.
+	<-done
+
+	// Stop tracing.
+	trace.Stop()
+}
diff --git a/src/internal/trace/v2/testdata/tests/go122-annotations-stress.test b/src/internal/trace/v2/testdata/tests/go122-annotations-stress.test
new file mode 100644
index 0000000..8da8c0f
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-annotations-stress.test
@@ -0,0 +1,1179 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=18446744073709551615 time=2753926854385 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=1986497 time=2753925247434 size=1430
+ProcStart dt=336 p=2 p_seq=1
+GoStart dt=191 g=19 g_seq=1
+HeapAlloc dt=389 heapalloc_value=1622016
+HeapAlloc dt=4453 heapalloc_value=1662976
+GoBlock dt=572 reason_string=12 stack=29
+ProcStop dt=26
+ProcStart dt=160734 p=2 p_seq=2
+ProcStop dt=21
+ProcStart dt=159292 p=0 p_seq=7
+GoStart dt=299 g=49 g_seq=1
+UserRegionBegin dt=183 task=8 name_string=33 stack=26
+UserLog dt=26 task=8 key_string=24 value_string=49 stack=27
+UserRegionEnd dt=8 task=8 name_string=33 stack=28
+GoDestroy dt=3
+GoStart dt=20 g=50 g_seq=1
+UserRegionBegin dt=40 task=8 name_string=35 stack=26
+UserLog dt=9 task=8 key_string=24 value_string=50 stack=27
+UserRegionEnd dt=2 task=8 name_string=35 stack=28
+GoDestroy dt=1
+ProcStop dt=18
+ProcStart dt=141801 p=4 p_seq=5
+ProcStop dt=18
+ProcStart dt=16860 p=4 p_seq=6
+GoUnblock dt=53 g=1 g_seq=5 stack=0
+GoUnblock dt=9 g=51 g_seq=3 stack=0
+GoStart dt=162 g=51 g_seq=4
+UserTaskEnd dt=35 task=9 stack=36
+UserRegionEnd dt=16 task=8 name_string=31 stack=28
+GoDestroy dt=2
+GoStart dt=20 g=1 g_seq=6
+UserTaskEnd dt=14 task=8 stack=54
+UserLog dt=26 task=3 key_string=24 value_string=51 stack=55
+UserTaskBegin dt=14 task=10 parent_task=3 name_string=26 stack=56
+UserLog dt=42 task=10 key_string=27 value_string=52 stack=57
+UserRegionBegin dt=12 task=10 name_string=29 stack=58
+GoCreate dt=36 new_g=35 new_stack=17 stack=59
+GoCreate dt=11 new_g=36 new_stack=17 stack=59
+GoCreate dt=18 new_g=37 new_stack=17 stack=59
+GoCreate dt=10 new_g=38 new_stack=17 stack=59
+GoCreate dt=6 new_g=39 new_stack=17 stack=59
+GoCreate dt=8 new_g=40 new_stack=17 stack=59
+UserRegionEnd dt=7 task=10 name_string=29 stack=60
+GoBlock dt=9 reason_string=19 stack=61
+GoStart dt=15 g=40 g_seq=1
+UserRegionBegin dt=110 task=10 name_string=53 stack=26
+UserLog dt=16 task=10 key_string=24 value_string=54 stack=27
+UserRegionEnd dt=2 task=10 name_string=53 stack=28
+GoDestroy dt=2
+GoStart dt=6 g=38 g_seq=1
+UserRegionBegin dt=31 task=10 name_string=30 stack=26
+UserLog dt=5 task=10 key_string=24 value_string=55 stack=27
+UserRegionEnd dt=2 task=10 name_string=30 stack=28
+GoDestroy dt=1
+GoStart dt=2 g=39 g_seq=1
+UserRegionBegin dt=23 task=10 name_string=56 stack=26
+UserLog dt=6 task=10 key_string=24 value_string=57 stack=27
+UserRegionEnd dt=1 task=10 name_string=56 stack=28
+GoDestroy dt=1
+GoStart dt=8 g=35 g_seq=1
+UserRegionBegin dt=17 task=10 name_string=33 stack=26
+UserLog dt=4 task=10 key_string=24 value_string=58 stack=27
+UserRegionEnd dt=2 task=10 name_string=33 stack=28
+GoDestroy dt=1
+GoStart dt=3 g=36 g_seq=1
+UserRegionBegin dt=19 task=10 name_string=35 stack=26
+UserLog dt=4 task=10 key_string=24 value_string=59 stack=27
+UserRegionEnd dt=2 task=10 name_string=35 stack=28
+GoDestroy dt=1
+ProcStop dt=11
+ProcStart dt=142205 p=0 p_seq=9
+ProcStop dt=19
+ProcStart dt=16811 p=0 p_seq=10
+GoUnblock dt=26 g=1 g_seq=7 stack=0
+GoStart dt=201 g=1 g_seq=8
+UserTaskEnd dt=24 task=10 stack=62
+UserLog dt=18 task=4 key_string=24 value_string=63 stack=63
+UserTaskBegin dt=11 task=12 parent_task=4 name_string=26 stack=64
+UserLog dt=21 task=12 key_string=27 value_string=64 stack=65
+UserRegionBegin dt=7 task=12 name_string=29 stack=66
+GoCreate dt=33 new_g=5 new_stack=17 stack=67
+GoCreate dt=12 new_g=6 new_stack=17 stack=67
+GoCreate dt=9 new_g=7 new_stack=17 stack=67
+GoCreate dt=8 new_g=8 new_stack=17 stack=67
+GoCreate dt=19 new_g=9 new_stack=17 stack=67
+UserRegionEnd dt=14 task=12 name_string=29 stack=68
+GoBlock dt=11 reason_string=19 stack=69
+GoStart dt=13 g=9 g_seq=1
+UserRegionBegin dt=70 task=12 name_string=56 stack=26
+UserLog dt=11 task=12 key_string=24 value_string=65 stack=27
+UserRegionEnd dt=3 task=12 name_string=56 stack=28
+GoDestroy dt=2
+GoStart dt=7 g=5 g_seq=1
+UserRegionBegin dt=24 task=12 name_string=33 stack=26
+UserLog dt=5 task=12 key_string=24 value_string=66 stack=27
+UserRegionEnd dt=2 task=12 name_string=33 stack=28
+GoDestroy dt=2
+GoStart dt=8 g=6 g_seq=1
+UserRegionBegin dt=15 task=12 name_string=35 stack=26
+UserLog dt=7 task=12 key_string=24 value_string=67 stack=27
+UserRegionEnd dt=2 task=12 name_string=35 stack=28
+GoDestroy dt=1
+GoStart dt=2 g=7 g_seq=1
+UserRegionBegin dt=13 task=12 name_string=31 stack=26
+UserLog dt=5 task=12 key_string=24 value_string=68 stack=27
+UserLog dt=6 task=12 key_string=24 value_string=69 stack=30
+UserTaskBegin dt=5 task=13 parent_task=12 name_string=26 stack=31
+UserLog dt=7 task=13 key_string=27 value_string=70 stack=32
+UserRegionBegin dt=4 task=13 name_string=29 stack=33
+UserRegionEnd dt=6 task=13 name_string=29 stack=34
+GoBlock dt=18 reason_string=19 stack=35
+GoStart dt=12 g=8 g_seq=1
+UserRegionBegin dt=22 task=12 name_string=30 stack=26
+UserLog dt=5 task=12 key_string=24 value_string=71 stack=27
+UserRegionEnd dt=2 task=12 name_string=30 stack=28
+GoDestroy dt=1
+ProcStop dt=20
+ProcStart dt=141838 p=4 p_seq=8
+ProcStop dt=16
+ProcStart dt=17652 p=4 p_seq=9
+GoUnblock dt=48 g=1 g_seq=9 stack=0
+GoUnblock dt=8 g=7 g_seq=2 stack=0
+GoStart dt=271 g=7 g_seq=3
+UserTaskEnd dt=25 task=13 stack=36
+UserRegionEnd dt=15 task=12 name_string=31 stack=28
+GoDestroy dt=4
+GoStart dt=19 g=1 g_seq=10
+UserTaskEnd dt=19 task=12 stack=70
+UserLog dt=21 task=0 key_string=24 value_string=72 stack=13
+UserTaskBegin dt=19 task=14 parent_task=0 name_string=26 stack=14
+UserLog dt=37 task=14 key_string=27 value_string=73 stack=15
+UserRegionBegin dt=6 task=14 name_string=29 stack=16
+GoCreate dt=28 new_g=41 new_stack=17 stack=18
+GoCreate dt=14 new_g=42 new_stack=17 stack=18
+GoCreate dt=12 new_g=43 new_stack=17 stack=18
+GoCreate dt=10 new_g=44 new_stack=17 stack=18
+UserRegionEnd dt=5 task=14 name_string=29 stack=19
+GoBlock dt=9 reason_string=19 stack=20
+GoStart dt=16 g=44 g_seq=1
+UserRegionBegin dt=107 task=14 name_string=30 stack=26
+UserLog dt=16 task=14 key_string=24 value_string=74 stack=27
+UserRegionEnd dt=3 task=14 name_string=30 stack=28
+GoDestroy dt=2
+GoStart dt=7 g=41 g_seq=1
+UserRegionBegin dt=30 task=14 name_string=33 stack=26
+UserLog dt=7 task=14 key_string=24 value_string=75 stack=27
+UserRegionEnd dt=2 task=14 name_string=33 stack=28
+GoDestroy dt=2
+GoStart dt=7 g=42 g_seq=1
+UserRegionBegin dt=27 task=14 name_string=35 stack=26
+UserLog dt=7 task=14 key_string=24 value_string=76 stack=27
+UserRegionEnd dt=2 task=14 name_string=35 stack=28
+GoDestroy dt=2
+ProcStop dt=28
+ProcStart dt=141923 p=0 p_seq=12
+ProcStop dt=19
+ProcStart dt=16780 p=0 p_seq=13
+GoUnblock dt=22 g=43 g_seq=2 stack=0
+GoStart dt=162 g=43 g_seq=3
+UserTaskEnd dt=16 task=15 stack=36
+UserRegionEnd dt=12 task=14 name_string=31 stack=28
+GoDestroy dt=2
+ProcStop dt=8
+ProcStart dt=1532 p=2 p_seq=9
+ProcStop dt=12
+ProcStart dt=141906 p=4 p_seq=11
+ProcStop dt=16
+ProcStart dt=16784 p=4 p_seq=12
+GoUnblock dt=20 g=1 g_seq=13 stack=0
+GoStart dt=191 g=1 g_seq=14
+UserTaskEnd dt=15 task=16 stack=45
+UserLog dt=17 task=2 key_string=24 value_string=84 stack=46
+UserTaskBegin dt=8 task=17 parent_task=2 name_string=26 stack=47
+UserLog dt=20 task=17 key_string=27 value_string=85 stack=48
+UserRegionBegin dt=6 task=17 name_string=29 stack=49
+GoCreate dt=28 new_g=45 new_stack=17 stack=50
+GoCreate dt=9 new_g=46 new_stack=17 stack=50
+GoCreate dt=10 new_g=47 new_stack=17 stack=50
+UserRegionEnd dt=5 task=17 name_string=29 stack=51
+GoBlock dt=6 reason_string=19 stack=52
+GoStart dt=10 g=47 g_seq=1
+UserRegionBegin dt=69 task=17 name_string=31 stack=26
+UserLog dt=11 task=17 key_string=24 value_string=86 stack=27
+UserLog dt=7 task=17 key_string=24 value_string=87 stack=30
+UserTaskBegin dt=5 task=18 parent_task=17 name_string=26 stack=31
+UserLog dt=7 task=18 key_string=27 value_string=88 stack=32
+UserRegionBegin dt=5 task=18 name_string=29 stack=33
+UserRegionEnd dt=4 task=18 name_string=29 stack=34
+HeapAlloc dt=35 heapalloc_value=1818624
+GoBlock dt=14 reason_string=19 stack=35
+HeapAlloc dt=11 heapalloc_value=1826816
+GoStart dt=10 g=45 g_seq=1
+UserRegionBegin dt=29 task=17 name_string=33 stack=26
+UserLog dt=9 task=17 key_string=24 value_string=89 stack=27
+UserRegionEnd dt=3 task=17 name_string=33 stack=28
+GoDestroy dt=1
+GoStart dt=5 g=46 g_seq=1
+UserRegionBegin dt=15 task=17 name_string=35 stack=26
+UserLog dt=8 task=17 key_string=24 value_string=90 stack=27
+UserRegionEnd dt=2 task=17 name_string=35 stack=28
+GoDestroy dt=1
+ProcStop dt=3
+ProcStart dt=141981 p=0 p_seq=16
+ProcStop dt=19
+ProcStart dt=17153 p=0 p_seq=17
+GoUnblock dt=44 g=1 g_seq=15 stack=0
+GoUnblock dt=11 g=47 g_seq=2 stack=0
+GoStart dt=215 g=47 g_seq=3
+UserTaskEnd dt=22 task=18 stack=36
+UserRegionEnd dt=9 task=17 name_string=31 stack=28
+GoDestroy dt=3
+GoStart dt=19 g=1 g_seq=16
+UserTaskEnd dt=13 task=17 stack=54
+UserLog dt=18 task=3 key_string=24 value_string=91 stack=55
+UserTaskBegin dt=7 task=19 parent_task=3 name_string=26 stack=56
+UserLog dt=27 task=19 key_string=27 value_string=92 stack=57
+UserRegionBegin dt=8 task=19 name_string=29 stack=58
+GoCreate dt=30 new_g=10 new_stack=17 stack=59
+GoCreate dt=9 new_g=11 new_stack=17 stack=59
+GoCreate dt=11 new_g=12 new_stack=17 stack=59
+GoCreate dt=7 new_g=13 new_stack=17 stack=59
+GoCreate dt=7 new_g=14 new_stack=17 stack=59
+GoCreate dt=9 new_g=15 new_stack=17 stack=59
+UserRegionEnd dt=5 task=19 name_string=29 stack=60
+GoBlock dt=7 reason_string=19 stack=61
+GoStart dt=17 g=15 g_seq=1
+UserRegionBegin dt=61 task=19 name_string=53 stack=26
+UserLog dt=10 task=19 key_string=24 value_string=93 stack=27
+UserRegionEnd dt=3 task=19 name_string=53 stack=28
+GoDestroy dt=1
+GoStart dt=4 g=10 g_seq=1
+UserRegionBegin dt=26 task=19 name_string=33 stack=26
+UserLog dt=7 task=19 key_string=24 value_string=94 stack=27
+UserRegionEnd dt=2 task=19 name_string=33 stack=28
+GoDestroy dt=1
+GoStart dt=4 g=11 g_seq=1
+UserRegionBegin dt=20 task=19 name_string=35 stack=26
+UserLog dt=5 task=19 key_string=24 value_string=95 stack=27
+UserRegionEnd dt=2 task=19 name_string=35 stack=28
+GoDestroy dt=1
+GoStart dt=7 g=12 g_seq=1
+UserRegionBegin dt=14 task=19 name_string=31 stack=26
+UserLog dt=4 task=19 key_string=24 value_string=96 stack=27
+UserLog dt=4 task=19 key_string=24 value_string=97 stack=30
+UserTaskBegin dt=7 task=20 parent_task=19 name_string=26 stack=31
+UserLog dt=5 task=20 key_string=27 value_string=98 stack=32
+UserRegionBegin dt=4 task=20 name_string=29 stack=33
+UserRegionEnd dt=5 task=20 name_string=29 stack=34
+GoBlock dt=9 reason_string=19 stack=35
+GoStart dt=9 g=14 g_seq=1
+UserRegionBegin dt=28 task=19 name_string=56 stack=26
+UserLog dt=7 task=19 key_string=24 value_string=99 stack=27
+UserRegionEnd dt=2 task=19 name_string=56 stack=28
+GoDestroy dt=2
+ProcStop dt=17
+ProcStart dt=141933 p=2 p_seq=11
+ProcStop dt=13
+ProcStart dt=16744 p=2 p_seq=12
+GoUnblock dt=29 g=1 g_seq=17 stack=0
+GoUnblock dt=7 g=12 g_seq=2 stack=0
+GoStart dt=172 g=12 g_seq=3
+UserTaskEnd dt=15 task=20 stack=36
+UserRegionEnd dt=8 task=19 name_string=31 stack=28
+GoDestroy dt=2
+GoStart dt=11 g=1 g_seq=18
+UserTaskEnd dt=14 task=19 stack=62
+UserLog dt=16 task=4 key_string=24 value_string=101 stack=63
+UserTaskBegin dt=6 task=21 parent_task=4 name_string=26 stack=64
+UserLog dt=25 task=21 key_string=27 value_string=102 stack=65
+UserRegionBegin dt=7 task=21 name_string=29 stack=66
+GoCreate dt=23 new_g=54 new_stack=17 stack=67
+GoCreate dt=8 new_g=55 new_stack=17 stack=67
+GoCreate dt=17 new_g=56 new_stack=17 stack=67
+GoCreate dt=8 new_g=57 new_stack=17 stack=67
+GoCreate dt=7 new_g=58 new_stack=17 stack=67
+UserRegionEnd dt=4 task=21 name_string=29 stack=68
+GoBlock dt=9 reason_string=19 stack=69
+GoStart dt=7 g=58 g_seq=1
+UserRegionBegin dt=46 task=21 name_string=56 stack=26
+UserLog dt=8 task=21 key_string=24 value_string=103 stack=27
+UserRegionEnd dt=4 task=21 name_string=56 stack=28
+GoDestroy dt=1
+GoStart dt=3 g=54 g_seq=1
+UserRegionBegin dt=19 task=21 name_string=33 stack=26
+UserLog dt=7 task=21 key_string=24 value_string=104 stack=27
+UserRegionEnd dt=2 task=21 name_string=33 stack=28
+GoDestroy dt=1
+GoStart dt=2 g=55 g_seq=1
+UserRegionBegin dt=17 task=21 name_string=35 stack=26
+UserLog dt=4 task=21 key_string=24 value_string=105 stack=27
+UserRegionEnd dt=2 task=21 name_string=35 stack=28
+GoDestroy dt=1
+GoStart dt=5 g=56 g_seq=1
+UserRegionBegin dt=16 task=21 name_string=31 stack=26
+UserLog dt=4 task=21 key_string=24 value_string=106 stack=27
+UserLog dt=3 task=21 key_string=24 value_string=107 stack=30
+UserTaskBegin dt=4 task=22 parent_task=21 name_string=26 stack=31
+UserLog dt=6 task=22 key_string=27 value_string=108 stack=32
+UserRegionBegin dt=4 task=22 name_string=29 stack=33
+UserRegionEnd dt=7 task=22 name_string=29 stack=34
+GoBlock dt=14 reason_string=19 stack=35
+GoStart dt=3 g=57 g_seq=1
+UserRegionBegin dt=22 task=21 name_string=30 stack=26
+UserLog dt=6 task=21 key_string=24 value_string=109 stack=27
+UserRegionEnd dt=2 task=21 name_string=30 stack=28
+GoDestroy dt=2
+ProcStop dt=10
+ProcStart dt=128031 p=4 p_seq=15
+ProcStop dt=16
+ProcStart dt=33758 p=2 p_seq=15
+ProcStop dt=18
+EventBatch gen=1 m=1986496 time=2753925246280 size=267
+ProcStart dt=549 p=0 p_seq=1
+GoStart dt=211 g=18 g_seq=1
+GoBlock dt=3533 reason_string=12 stack=21
+GoStart dt=41 g=21 g_seq=1
+GoBlock dt=150 reason_string=10 stack=22
+GoStart dt=93 g=20 g_seq=1
+GoSyscallBegin dt=51 p_seq=2 stack=23
+GoSyscallEnd dt=400
+GoBlock dt=582 reason_string=15 stack=25
+GoStart dt=26 g=23 g_seq=1
+HeapAlloc dt=50 heapalloc_value=1646592
+UserRegionBegin dt=2921 task=5 name_string=31 stack=26
+UserLog dt=28 task=5 key_string=24 value_string=37 stack=27
+UserLog dt=13 task=5 key_string=24 value_string=38 stack=30
+UserTaskBegin dt=15 task=6 parent_task=5 name_string=26 stack=31
+HeapAlloc dt=26 heapalloc_value=1687552
+UserLog dt=14 task=6 key_string=27 value_string=39 stack=32
+UserRegionBegin dt=9 task=6 name_string=29 stack=33
+UserRegionEnd dt=6 task=6 name_string=29 stack=34
+GoBlock dt=15 reason_string=19 stack=35
+ProcStop dt=30
+ProcStart dt=156949 p=4 p_seq=2
+GoUnblock dt=46 g=1 g_seq=1 stack=0
+GoStart dt=253 g=1 g_seq=2
+UserTaskEnd dt=27 task=5 stack=37
+UserLog dt=23 task=1 key_string=24 value_string=40 stack=38
+UserTaskBegin dt=14 task=7 parent_task=1 name_string=26 stack=39
+HeapAlloc dt=596 heapalloc_value=1695744
+HeapAlloc dt=18 heapalloc_value=1703936
+UserLog dt=17 task=7 key_string=27 value_string=41 stack=40
+UserRegionBegin dt=14 task=7 name_string=29 stack=41
+HeapAlloc dt=10 heapalloc_value=1712128
+HeapAlloc dt=17 heapalloc_value=1720320
+GoCreate dt=44 new_g=33 new_stack=17 stack=42
+GoCreate dt=175 new_g=34 new_stack=17 stack=42
+UserRegionEnd dt=50 task=7 name_string=29 stack=43
+GoBlock dt=9 reason_string=19 stack=44
+HeapAlloc dt=16 heapalloc_value=1728512
+GoStart dt=239 g=34 g_seq=1
+HeapAlloc dt=21 heapalloc_value=1736704
+UserRegionBegin dt=92 task=7 name_string=35 stack=26
+UserLog dt=15 task=7 key_string=24 value_string=42 stack=27
+UserRegionEnd dt=4 task=7 name_string=35 stack=28
+GoDestroy dt=2
+ProcStop dt=21
+ProcStart dt=800974 p=4 p_seq=10
+ProcStop dt=39
+ProcStart dt=158775 p=0 p_seq=15
+ProcStop dt=24
+ProcStart dt=159722 p=4 p_seq=13
+GoStart dt=254 g=13 g_seq=1
+UserRegionBegin dt=239 task=19 name_string=30 stack=26
+UserLog dt=23 task=19 key_string=24 value_string=100 stack=27
+UserRegionEnd dt=6 task=19 name_string=30 stack=28
+GoDestroy dt=7
+ProcStop dt=22
+EventBatch gen=1 m=1986495 time=2753925251756 size=320
+ProcStart dt=705 p=4 p_seq=1
+ProcStop dt=1279
+ProcStart dt=158975 p=0 p_seq=5
+ProcStop dt=23
+ProcStart dt=792 p=0 p_seq=6
+GoStart dt=187 g=33 g_seq=1
+UserRegionBegin dt=244 task=7 name_string=33 stack=26
+UserLog dt=32 task=7 key_string=24 value_string=43 stack=27
+UserRegionEnd dt=7 task=7 name_string=33 stack=28
+GoDestroy dt=5
+ProcStop dt=24
+ProcStart dt=160255 p=4 p_seq=4
+ProcStop dt=27
+ProcStart dt=159067 p=2 p_seq=5
+GoStart dt=222 g=37 g_seq=1
+UserRegionBegin dt=114 task=10 name_string=31 stack=26
+UserLog dt=16 task=10 key_string=24 value_string=60 stack=27
+UserLog dt=8 task=10 key_string=24 value_string=61 stack=30
+UserTaskBegin dt=8 task=11 parent_task=10 name_string=26 stack=31
+UserLog dt=19 task=11 key_string=27 value_string=62 stack=32
+UserRegionBegin dt=6 task=11 name_string=29 stack=33
+UserRegionEnd dt=7 task=11 name_string=29 stack=34
+GoBlock dt=15 reason_string=19 stack=35
+ProcStop dt=11
+ProcStart dt=160101 p=4 p_seq=7
+ProcStop dt=21
+ProcStart dt=159647 p=2 p_seq=7
+GoStart dt=277 g=43 g_seq=1
+UserRegionBegin dt=126 task=14 name_string=31 stack=26
+UserLog dt=21 task=14 key_string=24 value_string=77 stack=27
+UserLog dt=9 task=14 key_string=24 value_string=78 stack=30
+UserTaskBegin dt=8 task=15 parent_task=14 name_string=26 stack=31
+UserLog dt=17 task=15 key_string=27 value_string=79 stack=32
+UserRegionBegin dt=6 task=15 name_string=29 stack=33
+UserRegionEnd dt=8 task=15 name_string=29 stack=34
+GoBlock dt=23 reason_string=19 stack=35
+ProcStop dt=17
+ProcStart dt=159706 p=0 p_seq=14
+GoStart dt=229 g=52 g_seq=1
+UserRegionBegin dt=103 task=16 name_string=33 stack=26
+UserLog dt=20 task=16 key_string=24 value_string=83 stack=27
+UserRegionEnd dt=4 task=16 name_string=33 stack=28
+GoDestroy dt=3
+ProcStop dt=17
+ProcStart dt=319699 p=2 p_seq=10
+ProcStop dt=20
+ProcStart dt=158728 p=4 p_seq=14
+ProcStop dt=17
+ProcStart dt=110606 p=2 p_seq=13
+ProcStop dt=10
+ProcStart dt=16732 p=2 p_seq=14
+GoUnblock dt=45 g=18 g_seq=2 stack=0
+GoStart dt=184 g=18 g_seq=3
+GoBlock dt=114 reason_string=12 stack=21
+ProcStop dt=8
+ProcStart dt=16779 p=4 p_seq=16
+ProcStop dt=11
+ProcStart dt=16790 p=4 p_seq=17
+GoUnblock dt=23 g=1 g_seq=19 stack=0
+GoUnblock dt=8 g=56 g_seq=2 stack=0
+GoStart dt=142 g=56 g_seq=3
+UserTaskEnd dt=14 task=22 stack=36
+UserRegionEnd dt=8 task=21 name_string=31 stack=28
+GoDestroy dt=5
+GoStart dt=18 g=1 g_seq=20
+UserTaskEnd dt=17 task=21 stack=70
+UserTaskEnd dt=12 task=4 stack=71
+HeapAlloc dt=802 heapalloc_value=1835008
+HeapAlloc dt=41 heapalloc_value=1843200
+HeapAlloc dt=13 heapalloc_value=1851392
+EventBatch gen=1 m=1986494 time=2753925248778 size=47
+ProcStart dt=390 p=3 p_seq=1
+GoStart dt=1718 g=22 g_seq=1
+HeapAlloc dt=1807 heapalloc_value=1654784
+HeapAlloc dt=406 heapalloc_value=1671168
+HeapAlloc dt=15 heapalloc_value=1679360
+UserRegionBegin dt=49 task=5 name_string=35 stack=26
+UserLog dt=30 task=5 key_string=24 value_string=36 stack=27
+UserRegionEnd dt=5 task=5 name_string=35 stack=28
+GoDestroy dt=5
+ProcStop dt=42
+EventBatch gen=1 m=1986492 time=2753925244400 size=582
+ProcStatus dt=67 p=1 pstatus=1
+GoStatus dt=4 g=1 m=1986492 gstatus=2
+ProcsChange dt=220 procs_value=8 stack=1
+STWBegin dt=127 kind_string=21 stack=2
+HeapGoal dt=3 heapgoal_value=4194304
+ProcStatus dt=2 p=0 pstatus=2
+ProcStatus dt=2 p=2 pstatus=2
+ProcStatus dt=1 p=3 pstatus=2
+ProcStatus dt=1 p=4 pstatus=2
+ProcStatus dt=1 p=5 pstatus=2
+ProcStatus dt=1 p=6 pstatus=2
+ProcStatus dt=1 p=7 pstatus=2
+ProcsChange dt=353 procs_value=8 stack=3
+STWEnd dt=277
+HeapAlloc dt=243 heapalloc_value=1605632
+HeapAlloc dt=24 heapalloc_value=1613824
+GoCreate dt=209 new_g=18 new_stack=4 stack=5
+GoCreate dt=561 new_g=19 new_stack=6 stack=7
+GoCreate dt=25 new_g=20 new_stack=8 stack=9
+UserTaskEnd dt=309 task=2 stack=10
+UserTaskBegin dt=26 task=3 parent_task=1 name_string=22 stack=11
+UserTaskBegin dt=918 task=4 parent_task=0 name_string=23 stack=12
+UserLog dt=461 task=0 key_string=24 value_string=25 stack=13
+UserTaskBegin dt=420 task=5 parent_task=0 name_string=26 stack=14
+UserLog dt=673 task=5 key_string=27 value_string=28 stack=15
+UserRegionBegin dt=15 task=5 name_string=29 stack=16
+HeapAlloc dt=51 heapalloc_value=1630208
+GoCreate dt=24 new_g=21 new_stack=17 stack=18
+GoCreate dt=17 new_g=22 new_stack=17 stack=18
+GoCreate dt=10 new_g=23 new_stack=17 stack=18
+GoCreate dt=9 new_g=24 new_stack=17 stack=18
+UserRegionEnd dt=549 task=5 name_string=29 stack=19
+GoBlock dt=14 reason_string=19 stack=20
+GoStart dt=378 g=24 g_seq=1
+HeapAlloc dt=65 heapalloc_value=1638400
+GoUnblock dt=559 g=21 g_seq=2 stack=24
+UserRegionBegin dt=1498 task=5 name_string=30 stack=26
+UserLog dt=35 task=5 key_string=24 value_string=32 stack=27
+UserRegionEnd dt=8 task=5 name_string=30 stack=28
+GoDestroy dt=5
+GoStart dt=24 g=21 g_seq=3
+UserRegionBegin dt=60 task=5 name_string=33 stack=26
+UserLog dt=7 task=5 key_string=24 value_string=34 stack=27
+UserRegionEnd dt=2 task=5 name_string=33 stack=28
+GoDestroy dt=2
+ProcStop dt=34
+ProcStart dt=141874 p=0 p_seq=3
+ProcStop dt=21
+ProcStart dt=16770 p=0 p_seq=4
+GoUnblock dt=29 g=23 g_seq=2 stack=0
+GoStart dt=176 g=23 g_seq=3
+UserTaskEnd dt=19 task=6 stack=36
+UserRegionEnd dt=14 task=5 name_string=31 stack=28
+GoDestroy dt=2
+ProcStop dt=12
+ProcStart dt=2251 p=4 p_seq=3
+ProcStop dt=22
+ProcStart dt=141952 p=2 p_seq=3
+ProcStop dt=27
+ProcStart dt=16789 p=2 p_seq=4
+GoUnblock dt=35 g=1 g_seq=3 stack=0
+GoStart dt=214 g=1 g_seq=4
+UserTaskEnd dt=26 task=7 stack=45
+UserLog dt=27 task=2 key_string=24 value_string=44 stack=46
+UserTaskBegin dt=10 task=8 parent_task=2 name_string=26 stack=47
+HeapAlloc dt=52 heapalloc_value=1744896
+HeapAlloc dt=22 heapalloc_value=1753088
+UserLog dt=13 task=8 key_string=27 value_string=45 stack=48
+UserRegionBegin dt=11 task=8 name_string=29 stack=49
+HeapAlloc dt=7 heapalloc_value=1761280
+HeapAlloc dt=18 heapalloc_value=1769472
+GoCreate dt=52 new_g=49 new_stack=17 stack=50
+GoCreate dt=12 new_g=50 new_stack=17 stack=50
+HeapAlloc dt=11 heapalloc_value=1777664
+GoCreate dt=9 new_g=51 new_stack=17 stack=50
+UserRegionEnd dt=9 task=8 name_string=29 stack=51
+GoBlock dt=11 reason_string=19 stack=52
+HeapAlloc dt=12 heapalloc_value=1785856
+GoStart dt=14 g=51 g_seq=1
+HeapAlloc dt=18 heapalloc_value=1794048
+UserRegionBegin dt=95 task=8 name_string=31 stack=26
+UserLog dt=22 task=8 key_string=24 value_string=46 stack=27
+UserLog dt=8 task=8 key_string=24 value_string=47 stack=30
+UserTaskBegin dt=5 task=9 parent_task=8 name_string=26 stack=31
+UserLog dt=7 task=9 key_string=27 value_string=48 stack=32
+UserRegionBegin dt=4 task=9 name_string=29 stack=33
+UserRegionEnd dt=7 task=9 name_string=29 stack=34
+HeapAlloc dt=11 heapalloc_value=1802240
+GoStop dt=674 reason_string=16 stack=53
+GoStart dt=12 g=51 g_seq=2
+GoBlock dt=8 reason_string=19 stack=35
+HeapAlloc dt=16 heapalloc_value=1810432
+ProcStop dt=8
+ProcStart dt=159907 p=0 p_seq=8
+ProcStop dt=25
+ProcStart dt=159186 p=2 p_seq=6
+GoUnblock dt=22 g=37 g_seq=2 stack=0
+GoStart dt=217 g=37 g_seq=3
+UserTaskEnd dt=19 task=11 stack=36
+UserRegionEnd dt=15 task=10 name_string=31 stack=28
+GoDestroy dt=5
+ProcStop dt=16
+ProcStart dt=160988 p=0 p_seq=11
+ProcStop dt=29
+ProcStart dt=158554 p=2 p_seq=8
+GoUnblock dt=38 g=1 g_seq=11 stack=0
+GoStart dt=240 g=1 g_seq=12
+UserTaskEnd dt=25 task=14 stack=37
+UserLog dt=23 task=1 key_string=24 value_string=80 stack=38
+UserTaskBegin dt=11 task=16 parent_task=1 name_string=26 stack=39
+UserLog dt=36 task=16 key_string=27 value_string=81 stack=40
+UserRegionBegin dt=13 task=16 name_string=29 stack=41
+GoCreate dt=39 new_g=52 new_stack=17 stack=42
+GoCreate dt=23 new_g=53 new_stack=17 stack=42
+UserRegionEnd dt=11 task=16 name_string=29 stack=43
+GoBlock dt=9 reason_string=19 stack=44
+GoStart dt=244 g=53 g_seq=1
+UserRegionBegin dt=101 task=16 name_string=35 stack=26
+UserLog dt=17 task=16 key_string=24 value_string=82 stack=27
+UserRegionEnd dt=4 task=16 name_string=35 stack=28
+GoDestroy dt=3
+ProcStop dt=28
+EventBatch gen=1 m=18446744073709551615 time=2753926855140 size=56
+GoStatus dt=74 g=2 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=3 m=18446744073709551615 gstatus=4
+GoStatus dt=1 g=4 m=18446744073709551615 gstatus=4
+GoStatus dt=1 g=17 m=18446744073709551615 gstatus=4
+EventBatch gen=1 m=18446744073709551615 time=2753926855560 size=1759
+Stacks
+Stack id=45 nframes=3
+	pc=4804964 func=110 file=111 line=80
+	pc=4804052 func=112 file=113 line=84
+	pc=4803566 func=114 file=113 line=44
+Stack id=22 nframes=7
+	pc=4633935 func=115 file=116 line=90
+	pc=4633896 func=117 file=118 line=223
+	pc=4633765 func=119 file=118 line=216
+	pc=4633083 func=120 file=118 line=131
+	pc=4764601 func=121 file=122 line=152
+	pc=4765335 func=123 file=122 line=238
+	pc=4804612 func=124 file=113 line=70
+Stack id=9 nframes=2
+	pc=4802543 func=125 file=126 line=128
+	pc=4803332 func=114 file=113 line=30
+Stack id=71 nframes=2
+	pc=4803671 func=110 file=111 line=80
+	pc=4803666 func=114 file=113 line=51
+Stack id=10 nframes=2
+	pc=4803415 func=110 file=111 line=80
+	pc=4803410 func=114 file=113 line=33
+Stack id=18 nframes=4
+	pc=4804196 func=127 file=113 line=69
+	pc=4802140 func=128 file=111 line=141
+	pc=4804022 func=112 file=113 line=67
+	pc=4803543 func=114 file=113 line=43
+Stack id=37 nframes=3
+	pc=4804964 func=110 file=111 line=80
+	pc=4804052 func=112 file=113 line=84
+	pc=4803543 func=114 file=113 line=43
+Stack id=31 nframes=4
+	pc=4803865 func=112 file=113 line=61
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=55 nframes=2
+	pc=4803832 func=112 file=113 line=58
+	pc=4803609 func=114 file=113 line=46
+Stack id=47 nframes=2
+	pc=4803865 func=112 file=113 line=61
+	pc=4803589 func=114 file=113 line=45
+Stack id=38 nframes=2
+	pc=4803832 func=112 file=113 line=58
+	pc=4803566 func=114 file=113 line=44
+Stack id=56 nframes=2
+	pc=4803865 func=112 file=113 line=61
+	pc=4803609 func=114 file=113 line=46
+Stack id=33 nframes=4
+	pc=4804022 func=112 file=113 line=67
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=44 nframes=3
+	pc=4599892 func=130 file=131 line=195
+	pc=4804036 func=112 file=113 line=83
+	pc=4803566 func=114 file=113 line=44
+Stack id=3 nframes=4
+	pc=4421707 func=132 file=133 line=1382
+	pc=4533555 func=134 file=135 line=255
+	pc=4802469 func=125 file=126 line=125
+	pc=4803332 func=114 file=113 line=30
+Stack id=6 nframes=1
+	pc=4539520 func=136 file=135 line=868
+Stack id=58 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803609 func=114 file=113 line=46
+Stack id=64 nframes=2
+	pc=4803865 func=112 file=113 line=61
+	pc=4803629 func=114 file=113 line=47
+Stack id=62 nframes=3
+	pc=4804964 func=110 file=111 line=80
+	pc=4804052 func=112 file=113 line=84
+	pc=4803609 func=114 file=113 line=46
+Stack id=34 nframes=4
+	pc=4804022 func=112 file=113 line=67
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=30 nframes=4
+	pc=4803832 func=112 file=113 line=58
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=32 nframes=4
+	pc=4803943 func=112 file=113 line=64
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=26 nframes=1
+	pc=4804691 func=124 file=113 line=70
+Stack id=46 nframes=2
+	pc=4803832 func=112 file=113 line=58
+	pc=4803589 func=114 file=113 line=45
+Stack id=50 nframes=4
+	pc=4804196 func=127 file=113 line=69
+	pc=4802140 func=128 file=111 line=141
+	pc=4804022 func=112 file=113 line=67
+	pc=4803589 func=114 file=113 line=45
+Stack id=59 nframes=4
+	pc=4804196 func=127 file=113 line=69
+	pc=4802140 func=128 file=111 line=141
+	pc=4804022 func=112 file=113 line=67
+	pc=4803609 func=114 file=113 line=46
+Stack id=7 nframes=4
+	pc=4539492 func=137 file=135 line=868
+	pc=4533572 func=134 file=135 line=258
+	pc=4802469 func=125 file=126 line=125
+	pc=4803332 func=114 file=113 line=30
+Stack id=17 nframes=1
+	pc=4804512 func=124 file=113 line=69
+Stack id=57 nframes=2
+	pc=4803943 func=112 file=113 line=64
+	pc=4803609 func=114 file=113 line=46
+Stack id=41 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803566 func=114 file=113 line=44
+Stack id=63 nframes=2
+	pc=4803832 func=112 file=113 line=58
+	pc=4803629 func=114 file=113 line=47
+Stack id=60 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803609 func=114 file=113 line=46
+Stack id=5 nframes=4
+	pc=4542549 func=138 file=139 line=42
+	pc=4533560 func=134 file=135 line=257
+	pc=4802469 func=125 file=126 line=125
+	pc=4803332 func=114 file=113 line=30
+Stack id=40 nframes=2
+	pc=4803943 func=112 file=113 line=64
+	pc=4803566 func=114 file=113 line=44
+Stack id=21 nframes=3
+	pc=4217905 func=140 file=141 line=442
+	pc=4539946 func=142 file=135 line=928
+	pc=4542714 func=143 file=139 line=54
+Stack id=2 nframes=3
+	pc=4533284 func=134 file=135 line=238
+	pc=4802469 func=125 file=126 line=125
+	pc=4803332 func=114 file=113 line=30
+Stack id=53 nframes=6
+	pc=4247492 func=144 file=145 line=1374
+	pc=4599676 func=130 file=131 line=186
+	pc=4804036 func=112 file=113 line=83
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=20 nframes=3
+	pc=4599892 func=130 file=131 line=195
+	pc=4804036 func=112 file=113 line=83
+	pc=4803543 func=114 file=113 line=43
+Stack id=70 nframes=3
+	pc=4804964 func=110 file=111 line=80
+	pc=4804052 func=112 file=113 line=84
+	pc=4803629 func=114 file=113 line=47
+Stack id=15 nframes=2
+	pc=4803943 func=112 file=113 line=64
+	pc=4803543 func=114 file=113 line=43
+Stack id=65 nframes=2
+	pc=4803943 func=112 file=113 line=64
+	pc=4803629 func=114 file=113 line=47
+Stack id=28 nframes=1
+	pc=4804691 func=124 file=113 line=70
+Stack id=48 nframes=2
+	pc=4803943 func=112 file=113 line=64
+	pc=4803589 func=114 file=113 line=45
+Stack id=61 nframes=3
+	pc=4599892 func=130 file=131 line=195
+	pc=4804036 func=112 file=113 line=83
+	pc=4803609 func=114 file=113 line=46
+Stack id=13 nframes=2
+	pc=4803832 func=112 file=113 line=58
+	pc=4803543 func=114 file=113 line=43
+Stack id=29 nframes=3
+	pc=4217905 func=140 file=141 line=442
+	pc=4539946 func=142 file=135 line=928
+	pc=4539559 func=136 file=135 line=871
+Stack id=51 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803589 func=114 file=113 line=45
+Stack id=42 nframes=4
+	pc=4804196 func=127 file=113 line=69
+	pc=4802140 func=128 file=111 line=141
+	pc=4804022 func=112 file=113 line=67
+	pc=4803566 func=114 file=113 line=44
+Stack id=14 nframes=2
+	pc=4803865 func=112 file=113 line=61
+	pc=4803543 func=114 file=113 line=43
+Stack id=39 nframes=2
+	pc=4803865 func=112 file=113 line=61
+	pc=4803566 func=114 file=113 line=44
+Stack id=49 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803589 func=114 file=113 line=45
+Stack id=52 nframes=3
+	pc=4599892 func=130 file=131 line=195
+	pc=4804036 func=112 file=113 line=83
+	pc=4803589 func=114 file=113 line=45
+Stack id=24 nframes=7
+	pc=4634510 func=146 file=116 line=223
+	pc=4634311 func=117 file=118 line=240
+	pc=4633765 func=119 file=118 line=216
+	pc=4633083 func=120 file=118 line=131
+	pc=4764601 func=121 file=122 line=152
+	pc=4765335 func=123 file=122 line=238
+	pc=4804612 func=124 file=113 line=70
+Stack id=43 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803566 func=114 file=113 line=44
+Stack id=19 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803543 func=114 file=113 line=43
+Stack id=69 nframes=3
+	pc=4599892 func=130 file=131 line=195
+	pc=4804036 func=112 file=113 line=83
+	pc=4803629 func=114 file=113 line=47
+Stack id=16 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803543 func=114 file=113 line=43
+Stack id=54 nframes=3
+	pc=4804964 func=110 file=111 line=80
+	pc=4804052 func=112 file=113 line=84
+	pc=4803589 func=114 file=113 line=45
+Stack id=35 nframes=5
+	pc=4599892 func=130 file=131 line=195
+	pc=4804036 func=112 file=113 line=83
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=27 nframes=3
+	pc=4804862 func=129 file=113 line=71
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=4 nframes=1
+	pc=4542656 func=143 file=139 line=42
+Stack id=8 nframes=1
+	pc=4802720 func=147 file=126 line=128
+Stack id=66 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803629 func=114 file=113 line=47
+Stack id=1 nframes=4
+	pc=4548715 func=148 file=149 line=255
+	pc=4533263 func=134 file=135 line=237
+	pc=4802469 func=125 file=126 line=125
+	pc=4803332 func=114 file=113 line=30
+Stack id=67 nframes=4
+	pc=4804196 func=127 file=113 line=69
+	pc=4802140 func=128 file=111 line=141
+	pc=4804022 func=112 file=113 line=67
+	pc=4803629 func=114 file=113 line=47
+Stack id=23 nframes=7
+	pc=4641050 func=150 file=151 line=964
+	pc=4751591 func=152 file=153 line=209
+	pc=4751583 func=154 file=155 line=736
+	pc=4751136 func=156 file=155 line=380
+	pc=4753008 func=157 file=158 line=46
+	pc=4753000 func=159 file=160 line=183
+	pc=4802778 func=147 file=126 line=134
+Stack id=11 nframes=1
+	pc=4803445 func=114 file=113 line=36
+Stack id=68 nframes=2
+	pc=4804022 func=112 file=113 line=67
+	pc=4803629 func=114 file=113 line=47
+Stack id=36 nframes=5
+	pc=4804964 func=110 file=111 line=80
+	pc=4804052 func=112 file=113 line=84
+	pc=4804890 func=129 file=113 line=73
+	pc=4802140 func=128 file=111 line=141
+	pc=4804691 func=124 file=113 line=70
+Stack id=12 nframes=1
+	pc=4803492 func=114 file=113 line=39
+Stack id=25 nframes=1
+	pc=4802788 func=147 file=126 line=130
+EventBatch gen=1 m=18446744073709551615 time=2753925243266 size=3466
+Strings
+String id=1
+	data="Not worker"
+String id=2
+	data="GC (dedicated)"
+String id=3
+	data="GC (fractional)"
+String id=4
+	data="GC (idle)"
+String id=5
+	data="unspecified"
+String id=6
+	data="forever"
+String id=7
+	data="network"
+String id=8
+	data="select"
+String id=9
+	data="sync.(*Cond).Wait"
+String id=10
+	data="sync"
+String id=11
+	data="chan send"
+String id=12
+	data="chan receive"
+String id=13
+	data="GC mark assist wait for work"
+String id=14
+	data="GC background sweeper wait"
+String id=15
+	data="system goroutine wait"
+String id=16
+	data="preempted"
+String id=17
+	data="wait for debug call"
+String id=18
+	data="wait until GC ends"
+String id=19
+	data="sleep"
+String id=20
+	data="runtime.Gosched"
+String id=21
+	data="start trace"
+String id=22
+	data="type2"
+String id=23
+	data="type3"
+String id=24
+	data="log"
+String id=25
+	data="before do"
+String id=26
+	data="do"
+String id=27
+	data="log2"
+String id=28
+	data="do"
+String id=29
+	data="fanout"
+String id=30
+	data="region3"
+String id=31
+	data="region2"
+String id=32
+	data="fanout region3"
+String id=33
+	data="region0"
+String id=34
+	data="fanout region0"
+String id=35
+	data="region1"
+String id=36
+	data="fanout region1"
+String id=37
+	data="fanout region2"
+String id=38
+	data="before do"
+String id=39
+	data="do"
+String id=40
+	data="before do"
+String id=41
+	data="do"
+String id=42
+	data="fanout region1"
+String id=43
+	data="fanout region0"
+String id=44
+	data="before do"
+String id=45
+	data="do"
+String id=46
+	data="fanout region2"
+String id=47
+	data="before do"
+String id=48
+	data="do"
+String id=49
+	data="fanout region0"
+String id=50
+	data="fanout region1"
+String id=51
+	data="before do"
+String id=52
+	data="do"
+String id=53
+	data="region5"
+String id=54
+	data="fanout region5"
+String id=55
+	data="fanout region3"
+String id=56
+	data="region4"
+String id=57
+	data="fanout region4"
+String id=58
+	data="fanout region0"
+String id=59
+	data="fanout region1"
+String id=60
+	data="fanout region2"
+String id=61
+	data="before do"
+String id=62
+	data="do"
+String id=63
+	data="before do"
+String id=64
+	data="do"
+String id=65
+	data="fanout region4"
+String id=66
+	data="fanout region0"
+String id=67
+	data="fanout region1"
+String id=68
+	data="fanout region2"
+String id=69
+	data="before do"
+String id=70
+	data="do"
+String id=71
+	data="fanout region3"
+String id=72
+	data="before do"
+String id=73
+	data="do"
+String id=74
+	data="fanout region3"
+String id=75
+	data="fanout region0"
+String id=76
+	data="fanout region1"
+String id=77
+	data="fanout region2"
+String id=78
+	data="before do"
+String id=79
+	data="do"
+String id=80
+	data="before do"
+String id=81
+	data="do"
+String id=82
+	data="fanout region1"
+String id=83
+	data="fanout region0"
+String id=84
+	data="before do"
+String id=85
+	data="do"
+String id=86
+	data="fanout region2"
+String id=87
+	data="before do"
+String id=88
+	data="do"
+String id=89
+	data="fanout region0"
+String id=90
+	data="fanout region1"
+String id=91
+	data="before do"
+String id=92
+	data="do"
+String id=93
+	data="fanout region5"
+String id=94
+	data="fanout region0"
+String id=95
+	data="fanout region1"
+String id=96
+	data="fanout region2"
+String id=97
+	data="before do"
+String id=98
+	data="do"
+String id=99
+	data="fanout region4"
+String id=100
+	data="fanout region3"
+String id=101
+	data="before do"
+String id=102
+	data="do"
+String id=103
+	data="fanout region4"
+String id=104
+	data="fanout region0"
+String id=105
+	data="fanout region1"
+String id=106
+	data="fanout region2"
+String id=107
+	data="before do"
+String id=108
+	data="do"
+String id=109
+	data="fanout region3"
+String id=110
+	data="runtime/trace.(*Task).End"
+String id=111
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/annotation.go"
+String id=112
+	data="main.do"
+String id=113
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/trace/v2/testdata/testprog/annotations-stress.go"
+String id=114
+	data="main.main"
+String id=115
+	data="sync.(*Mutex).Lock"
+String id=116
+	data="/usr/local/google/home/mknyszek/work/go-1/src/sync/mutex.go"
+String id=117
+	data="sync.(*Pool).pinSlow"
+String id=118
+	data="/usr/local/google/home/mknyszek/work/go-1/src/sync/pool.go"
+String id=119
+	data="sync.(*Pool).pin"
+String id=120
+	data="sync.(*Pool).Get"
+String id=121
+	data="fmt.newPrinter"
+String id=122
+	data="/usr/local/google/home/mknyszek/work/go-1/src/fmt/print.go"
+String id=123
+	data="fmt.Sprintf"
+String id=124
+	data="main.do.func1.1"
+String id=125
+	data="runtime/trace.Start"
+String id=126
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go"
+String id=127
+	data="main.do.func1"
+String id=128
+	data="runtime/trace.WithRegion"
+String id=129
+	data="main.do.func1.1.1"
+String id=130
+	data="time.Sleep"
+String id=131
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/time.go"
+String id=132
+	data="runtime.startTheWorld"
+String id=133
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go"
+String id=134
+	data="runtime.StartTrace"
+String id=135
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go"
+String id=136
+	data="runtime.(*traceAdvancerState).start.func1"
+String id=137
+	data="runtime.(*traceAdvancerState).start"
+String id=138
+	data="runtime.traceStartReadCPU"
+String id=139
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2cpu.go"
+String id=140
+	data="runtime.chanrecv1"
+String id=141
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go"
+String id=142
+	data="runtime.(*wakeableSleep).sleep"
+String id=143
+	data="runtime.traceStartReadCPU.func1"
+String id=144
+	data="runtime.newobject"
+String id=145
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go"
+String id=146
+	data="sync.(*Mutex).Unlock"
+String id=147
+	data="runtime/trace.Start.func1"
+String id=148
+	data="runtime.traceLocker.Gomaxprocs"
+String id=149
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go"
+String id=150
+	data="syscall.write"
+String id=151
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/zsyscall_linux_amd64.go"
+String id=152
+	data="syscall.Write"
+String id=153
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_unix.go"
+String id=154
+	data="internal/poll.ignoringEINTRIO"
+String id=155
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unix.go"
+String id=156
+	data="internal/poll.(*FD).Write"
+String id=157
+	data="os.(*File).write"
+String id=158
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_posix.go"
+String id=159
+	data="os.(*File).Write"
+String id=160
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file.go"
diff --git a/src/internal/trace/v2/testdata/tests/go122-annotations.test b/src/internal/trace/v2/testdata/tests/go122-annotations.test
new file mode 100644
index 0000000..e468673
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-annotations.test
@@ -0,0 +1,299 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=18446744073709551615 time=28113086279559 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=167930 time=28113086277797 size=41
+ProcStart dt=505 p=1 p_seq=1
+GoStart dt=303 g=7 g_seq=1
+HeapAlloc dt=646 heapalloc_value=1892352
+HeapAlloc dt=149 heapalloc_value=1900544
+GoBlock dt=146 reason_string=12 stack=24
+GoStart dt=14 g=6 g_seq=1
+HeapAlloc dt=16 heapalloc_value=1908736
+GoBlock dt=347 reason_string=12 stack=25
+EventBatch gen=1 m=167928 time=28113086279032 size=10
+ProcStart dt=451 p=2 p_seq=1
+GoStart dt=188 g=8 g_seq=1
+EventBatch gen=1 m=167926 time=28113086275999 size=324
+ProcStatus dt=295 p=0 pstatus=1
+GoStatus dt=5 g=1 m=167926 gstatus=2
+ProcsChange dt=405 procs_value=48 stack=1
+STWBegin dt=65 kind_string=21 stack=2
+HeapGoal dt=2 heapgoal_value=4194304
+ProcStatus dt=4 p=1 pstatus=2
+ProcStatus dt=1 p=2 pstatus=2
+ProcStatus dt=1 p=3 pstatus=2
+ProcStatus dt=1 p=4 pstatus=2
+ProcStatus dt=1 p=5 pstatus=2
+ProcStatus dt=1 p=6 pstatus=2
+ProcStatus dt=1 p=7 pstatus=2
+ProcStatus dt=1 p=8 pstatus=2
+ProcStatus dt=1 p=9 pstatus=2
+ProcStatus dt=1 p=10 pstatus=2
+ProcStatus dt=1 p=11 pstatus=2
+ProcStatus dt=1 p=12 pstatus=2
+ProcStatus dt=1 p=13 pstatus=2
+ProcStatus dt=1 p=14 pstatus=2
+ProcStatus dt=1 p=15 pstatus=2
+ProcStatus dt=1 p=16 pstatus=2
+ProcStatus dt=1 p=17 pstatus=2
+ProcStatus dt=1 p=18 pstatus=2
+ProcStatus dt=1 p=19 pstatus=2
+ProcStatus dt=1 p=20 pstatus=2
+ProcStatus dt=1 p=21 pstatus=2
+ProcStatus dt=1 p=22 pstatus=2
+ProcStatus dt=1 p=23 pstatus=2
+ProcStatus dt=1 p=24 pstatus=2
+ProcStatus dt=1 p=25 pstatus=2
+ProcStatus dt=1 p=26 pstatus=2
+ProcStatus dt=1 p=27 pstatus=2
+ProcStatus dt=1 p=28 pstatus=2
+ProcStatus dt=1 p=29 pstatus=2
+ProcStatus dt=1 p=30 pstatus=2
+ProcStatus dt=1 p=31 pstatus=2
+ProcStatus dt=1 p=32 pstatus=2
+ProcStatus dt=1 p=33 pstatus=2
+ProcStatus dt=1 p=34 pstatus=2
+ProcStatus dt=1 p=35 pstatus=2
+ProcStatus dt=1 p=36 pstatus=2
+ProcStatus dt=1 p=37 pstatus=2
+ProcStatus dt=1 p=38 pstatus=2
+ProcStatus dt=1 p=39 pstatus=2
+ProcStatus dt=1 p=40 pstatus=2
+ProcStatus dt=1 p=41 pstatus=2
+ProcStatus dt=1 p=42 pstatus=2
+ProcStatus dt=1 p=43 pstatus=2
+ProcStatus dt=1 p=44 pstatus=2
+ProcStatus dt=1 p=45 pstatus=2
+ProcStatus dt=1 p=46 pstatus=2
+ProcStatus dt=1 p=47 pstatus=2
+ProcsChange dt=1 procs_value=48 stack=3
+STWEnd dt=184
+GoCreate dt=252 new_g=6 new_stack=4 stack=5
+GoCreate dt=78 new_g=7 new_stack=6 stack=7
+GoCreate dt=73 new_g=8 new_stack=8 stack=9
+UserTaskBegin dt=71 task=1 parent_task=0 name_string=22 stack=10
+UserRegionBegin dt=535 task=1 name_string=23 stack=11
+HeapAlloc dt=26 heapalloc_value=1884160
+GoCreate dt=8 new_g=9 new_stack=12 stack=13
+GoBlock dt=249 reason_string=10 stack=14
+GoStart dt=8 g=9 g_seq=1
+UserRegionBegin dt=286 task=1 name_string=24 stack=15
+UserRegionBegin dt=244 task=1 name_string=25 stack=16
+UserRegionBegin dt=6 task=1 name_string=26 stack=17
+UserLog dt=6 task=1 key_string=27 value_string=28 stack=18
+UserRegionEnd dt=4 task=1 name_string=26 stack=19
+UserRegionEnd dt=315 task=1 name_string=25 stack=20
+UserTaskEnd dt=5 task=1 stack=21
+GoUnblock dt=11 g=1 g_seq=1 stack=22
+GoDestroy dt=6
+GoStart dt=10 g=1 g_seq=2
+UserRegionBegin dt=278 task=0 name_string=29 stack=23
+EventBatch gen=1 m=18446744073709551615 time=28113086280061 size=57
+GoStatus dt=318 g=2 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=3 m=18446744073709551615 gstatus=4
+GoStatus dt=1 g=4 m=18446744073709551615 gstatus=4
+GoStatus dt=1 g=5 m=18446744073709551615 gstatus=4
+EventBatch gen=1 m=18446744073709551615 time=28113086280852 size=488
+Stacks
+Stack id=17 nframes=3
+	pc=4816080 func=30 file=31 line=45
+	pc=4813660 func=32 file=33 line=141
+	pc=4815908 func=34 file=31 line=43
+Stack id=8 nframes=1
+	pc=4814528 func=35 file=36 line=128
+Stack id=9 nframes=2
+	pc=4814351 func=37 file=36 line=128
+	pc=4815228 func=38 file=31 line=27
+Stack id=24 nframes=3
+	pc=4217457 func=39 file=40 line=442
+	pc=4544973 func=41 file=42 line=918
+	pc=4544806 func=43 file=42 line=871
+Stack id=7 nframes=4
+	pc=4544740 func=44 file=42 line=868
+	pc=4538792 func=45 file=42 line=258
+	pc=4814277 func=37 file=36 line=125
+	pc=4815228 func=38 file=31 line=27
+Stack id=22 nframes=3
+	pc=4642148 func=46 file=47 line=81
+	pc=4816326 func=48 file=47 line=87
+	pc=4815941 func=34 file=31 line=50
+Stack id=11 nframes=1
+	pc=4815364 func=38 file=31 line=34
+Stack id=5 nframes=4
+	pc=4547349 func=49 file=50 line=42
+	pc=4538780 func=45 file=42 line=257
+	pc=4814277 func=37 file=36 line=125
+	pc=4815228 func=38 file=31 line=27
+Stack id=23 nframes=1
+	pc=4815568 func=38 file=31 line=54
+Stack id=3 nframes=4
+	pc=4421860 func=51 file=52 line=1360
+	pc=4538775 func=45 file=42 line=255
+	pc=4814277 func=37 file=36 line=125
+	pc=4815228 func=38 file=31 line=27
+Stack id=21 nframes=2
+	pc=4816228 func=53 file=33 line=80
+	pc=4815926 func=34 file=31 line=50
+Stack id=1 nframes=4
+	pc=4553515 func=54 file=55 line=255
+	pc=4538503 func=45 file=42 line=237
+	pc=4814277 func=37 file=36 line=125
+	pc=4815228 func=38 file=31 line=27
+Stack id=12 nframes=1
+	pc=4815680 func=34 file=31 line=37
+Stack id=6 nframes=1
+	pc=4544768 func=43 file=42 line=868
+Stack id=2 nframes=3
+	pc=4538523 func=45 file=42 line=238
+	pc=4814277 func=37 file=36 line=125
+	pc=4815228 func=38 file=31 line=27
+Stack id=13 nframes=1
+	pc=4815492 func=38 file=31 line=37
+Stack id=4 nframes=1
+	pc=4547456 func=56 file=50 line=42
+Stack id=14 nframes=2
+	pc=4642407 func=57 file=47 line=116
+	pc=4815502 func=38 file=31 line=51
+Stack id=18 nframes=5
+	pc=4816147 func=58 file=31 line=46
+	pc=4813660 func=32 file=33 line=141
+	pc=4816080 func=30 file=31 line=45
+	pc=4813660 func=32 file=33 line=141
+	pc=4815908 func=34 file=31 line=43
+Stack id=20 nframes=1
+	pc=4815908 func=34 file=31 line=43
+Stack id=25 nframes=3
+	pc=4217457 func=39 file=40 line=442
+	pc=4544973 func=41 file=42 line=918
+	pc=4547514 func=56 file=50 line=54
+Stack id=16 nframes=1
+	pc=4815908 func=34 file=31 line=43
+Stack id=15 nframes=1
+	pc=4815838 func=34 file=31 line=41
+Stack id=19 nframes=3
+	pc=4816080 func=30 file=31 line=45
+	pc=4813660 func=32 file=33 line=141
+	pc=4815908 func=34 file=31 line=43
+Stack id=10 nframes=1
+	pc=4815332 func=38 file=31 line=33
+EventBatch gen=1 m=18446744073709551615 time=28113086274600 size=1620
+Strings
+String id=1
+	data="Not worker"
+String id=2
+	data="GC (dedicated)"
+String id=3
+	data="GC (fractional)"
+String id=4
+	data="GC (idle)"
+String id=5
+	data="unspecified"
+String id=6
+	data="forever"
+String id=7
+	data="network"
+String id=8
+	data="select"
+String id=9
+	data="sync.(*Cond).Wait"
+String id=10
+	data="sync"
+String id=11
+	data="chan send"
+String id=12
+	data="chan receive"
+String id=13
+	data="GC mark assist wait for work"
+String id=14
+	data="GC background sweeper wait"
+String id=15
+	data="system goroutine wait"
+String id=16
+	data="preempted"
+String id=17
+	data="wait for debug call"
+String id=18
+	data="wait until GC ends"
+String id=19
+	data="sleep"
+String id=20
+	data="runtime.Gosched"
+String id=21
+	data="start trace"
+String id=22
+	data="task0"
+String id=23
+	data="task0 region"
+String id=24
+	data="unended region"
+String id=25
+	data="region0"
+String id=26
+	data="region1"
+String id=27
+	data="key0"
+String id=28
+	data="0123456789abcdef"
+String id=29
+	data="post-existing region"
+String id=30
+	data="main.main.func1.1"
+String id=31
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/trace/v2/testdata/testprog/annotations.go"
+String id=32
+	data="runtime/trace.WithRegion"
+String id=33
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/annotation.go"
+String id=34
+	data="main.main.func1"
+String id=35
+	data="runtime/trace.Start.func1"
+String id=36
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go"
+String id=37
+	data="runtime/trace.Start"
+String id=38
+	data="main.main"
+String id=39
+	data="runtime.chanrecv1"
+String id=40
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go"
+String id=41
+	data="runtime.(*wakeableSleep).sleep"
+String id=42
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go"
+String id=43
+	data="runtime.(*traceAdvancerState).start.func1"
+String id=44
+	data="runtime.(*traceAdvancerState).start"
+String id=45
+	data="runtime.StartTrace"
+String id=46
+	data="sync.(*WaitGroup).Add"
+String id=47
+	data="/usr/local/google/home/mknyszek/work/go-1/src/sync/waitgroup.go"
+String id=48
+	data="sync.(*WaitGroup).Done"
+String id=49
+	data="runtime.traceStartReadCPU"
+String id=50
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2cpu.go"
+String id=51
+	data="runtime.startTheWorld"
+String id=52
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go"
+String id=53
+	data="runtime/trace.(*Task).End"
+String id=54
+	data="runtime.traceLocker.Gomaxprocs"
+String id=55
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go"
+String id=56
+	data="runtime.traceStartReadCPU.func1"
+String id=57
+	data="sync.(*WaitGroup).Wait"
+String id=58
+	data="main.main.func1.1.1"
diff --git a/src/internal/trace/v2/testdata/tests/go122-confuse-seq-across-generations.test b/src/internal/trace/v2/testdata/tests/go122-confuse-seq-across-generations.test
new file mode 100644
index 0000000..c0d6f0d
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-confuse-seq-across-generations.test
@@ -0,0 +1,36 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=13
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+GoStop dt=1 reason_string=1 stack=0
+EventBatch gen=1 m=1 time=0 size=12
+ProcStatus dt=1 p=1 pstatus=1
+GoStart dt=1 g=1 g_seq=1
+GoStop dt=1 reason_string=1 stack=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=12
+Strings
+String id=1
+	data="whatever"
+EventBatch gen=2 m=1 time=3 size=8
+ProcStatus dt=1 p=1 pstatus=1
+GoStart dt=1 g=1 g_seq=2
+EventBatch gen=2 m=0 time=5 size=17
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=1
+GoStart dt=1 g=1 g_seq=1
+GoStop dt=1 reason_string=1 stack=0
+EventBatch gen=2 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=2 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=2 m=18446744073709551615 time=0 size=12
+Strings
+String id=1
+	data="whatever"
diff --git a/src/internal/trace/v2/testdata/tests/go122-create-syscall-reuse-thread-id.test b/src/internal/trace/v2/testdata/tests/go122-create-syscall-reuse-thread-id.test
new file mode 100644
index 0000000..1820738
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-create-syscall-reuse-thread-id.test
@@ -0,0 +1,23 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=37
+GoCreateSyscall dt=1 new_g=4
+GoSyscallEndBlocked dt=1
+ProcStatus dt=1 p=0 pstatus=2
+ProcStart dt=1 p=0 p_seq=1
+GoStatus dt=1 g=4 m=18446744073709551615 gstatus=1
+GoStart dt=1 g=4 g_seq=1
+GoSyscallBegin dt=1 p_seq=2 stack=0
+GoDestroySyscall dt=1
+EventBatch gen=1 m=0 time=0 size=13
+ProcStatus dt=1 p=1 pstatus=2
+ProcStart dt=1 p=1 p_seq=1
+ProcSteal dt=1 p=0 p_seq=3 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-create-syscall-with-p.test b/src/internal/trace/v2/testdata/tests/go122-create-syscall-with-p.test
new file mode 100644
index 0000000..9b329b8
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-create-syscall-with-p.test
@@ -0,0 +1,22 @@
+-- expect --
+FAILURE ".*expected a proc but didn't have one.*"
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=34
+GoCreateSyscall dt=1 new_g=4
+ProcStatus dt=1 p=0 pstatus=2
+ProcStart dt=1 p=0 p_seq=1
+GoSyscallEndBlocked dt=1
+GoStart dt=1 g=4 g_seq=1
+GoSyscallBegin dt=1 p_seq=2 stack=0
+GoDestroySyscall dt=1
+GoCreateSyscall dt=1 new_g=4
+GoSyscallEnd dt=1
+GoSyscallBegin dt=1 p_seq=3 stack=0
+GoDestroySyscall dt=1
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-gc-stress.test b/src/internal/trace/v2/testdata/tests/go122-gc-stress.test
new file mode 100644
index 0000000..d5e7266
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-gc-stress.test
@@ -0,0 +1,4207 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=3 m=18446744073709551615 time=28114950954550 size=5
+Frequency freq=15625000
+EventBatch gen=3 m=169438 time=28114950899454 size=615
+ProcStatus dt=2 p=47 pstatus=1
+GoStatus dt=1 g=111 m=169438 gstatus=2
+GCMarkAssistActive dt=1 g=111
+GCMarkAssistEnd dt=1
+HeapAlloc dt=38 heapalloc_value=191159744
+HeapAlloc dt=134 heapalloc_value=191192512
+GCMarkAssistBegin dt=60 stack=3
+GoStop dt=2288 reason_string=20 stack=9
+GoStart dt=15 g=111 g_seq=1
+GCMarkAssistEnd dt=1860
+HeapAlloc dt=46 heapalloc_value=191585728
+GCMarkAssistBegin dt=35 stack=3
+GoBlock dt=32 reason_string=13 stack=11
+GoUnblock dt=14 g=57 g_seq=5 stack=0
+GoStart dt=9 g=57 g_seq=6
+GoLabel dt=3 label_string=2
+GoBlock dt=2925 reason_string=15 stack=5
+GoUnblock dt=12 g=57 g_seq=7 stack=0
+GoStart dt=5 g=57 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=391 reason_string=15 stack=5
+GoUnblock dt=15 g=57 g_seq=9 stack=0
+GoStart dt=7 g=57 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=307 reason_string=15 stack=5
+GoUnblock dt=7 g=57 g_seq=11 stack=0
+GoStart dt=3 g=57 g_seq=12
+GoLabel dt=2 label_string=2
+GoBlock dt=1049 reason_string=15 stack=5
+GoUnblock dt=23 g=58 g_seq=7 stack=0
+GoStart dt=8 g=58 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=1126 reason_string=15 stack=5
+GoUnblock dt=12 g=53 g_seq=3 stack=0
+GoStart dt=5 g=53 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=1751 reason_string=15 stack=5
+GoUnblock dt=12 g=53 g_seq=5 stack=0
+GoStart dt=6 g=53 g_seq=6
+GoLabel dt=3 label_string=2
+GoBlock dt=119 reason_string=15 stack=5
+GoStart dt=15 g=88 g_seq=4
+GoBlock dt=50 reason_string=13 stack=11
+GoUnblock dt=1212 g=54 g_seq=15 stack=0
+GoStart dt=6 g=54 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=2984 reason_string=15 stack=5
+GoUnblock dt=2696 g=52 g_seq=21 stack=0
+GoStart dt=3 g=52 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=2013 reason_string=15 stack=5
+GoStart dt=18 g=98 g_seq=6
+GCMarkAssistEnd dt=6
+HeapAlloc dt=44 heapalloc_value=192003520
+GCMarkAssistBegin dt=54 stack=3
+GoBlock dt=481 reason_string=13 stack=11
+GoUnblock dt=51 g=14 g_seq=17 stack=0
+GoStart dt=4 g=14 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=3954 reason_string=15 stack=5
+GoUnblock dt=59 g=57 g_seq=41 stack=0
+GoStart dt=8 g=57 g_seq=42
+GoLabel dt=1 label_string=4
+GoBlock dt=63 reason_string=15 stack=5
+GoUnblock dt=11 g=57 g_seq=43 stack=0
+GoStart dt=4 g=57 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=3186 reason_string=15 stack=5
+GoUnblock dt=11 g=57 g_seq=45 stack=0
+GoStart dt=3 g=57 g_seq=46
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+ProcStop dt=60
+ProcStart dt=50 p=47 p_seq=1
+GoUnblock dt=9 g=22 g_seq=33 stack=0
+GoStart dt=4 g=22 g_seq=34
+GoLabel dt=1 label_string=4
+GoBlock dt=97 reason_string=15 stack=5
+GoUnblock dt=9 g=22 g_seq=35 stack=0
+GoStart dt=5 g=22 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=2605 reason_string=15 stack=5
+GoUnblock dt=10 g=22 g_seq=37 stack=0
+GoStart dt=4 g=22 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=13 reason_string=15 stack=5
+ProcStop dt=37
+ProcStart dt=582 p=47 p_seq=2
+GoStart dt=504 g=97 g_seq=4
+GCMarkAssistEnd dt=5
+GCMarkAssistBegin dt=34 stack=3
+GoBlock dt=279 reason_string=13 stack=11
+ProcStop dt=30
+ProcStart dt=3780 p=47 p_seq=3
+GoUnblock dt=9 g=71 g_seq=25 stack=0
+GoStart dt=128 g=71 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=210 reason_string=15 stack=5
+GoUnblock dt=8 g=71 g_seq=27 stack=0
+GoStart dt=1 g=71 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=1627 reason_string=15 stack=5
+GoStart dt=27 g=105 g_seq=6
+GCMarkAssistEnd dt=3
+HeapAlloc dt=44 heapalloc_value=192477912
+GCMarkAssistBegin dt=77 stack=3
+GoStop dt=873 reason_string=20 stack=9
+GoUnblock dt=12 g=23 g_seq=47 stack=0
+GoStart dt=3 g=23 g_seq=48
+GoLabel dt=1 label_string=2
+GoBlock dt=36 reason_string=15 stack=5
+GoUnblock dt=6 g=23 g_seq=49 stack=0
+GoStart dt=1 g=23 g_seq=50
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+GoUnblock dt=8 g=23 g_seq=51 stack=0
+GoStart dt=3 g=23 g_seq=52
+GoLabel dt=1 label_string=2
+GoBlock dt=15 reason_string=15 stack=5
+GoStart dt=10 g=105 g_seq=7
+GoStop dt=16 reason_string=20 stack=9
+GoUnblock dt=7 g=23 g_seq=53 stack=0
+GoStart dt=3 g=23 g_seq=54
+GoLabel dt=1 label_string=2
+GoBlock dt=10 reason_string=15 stack=5
+GoUnblock dt=12 g=23 g_seq=55 stack=0
+GoStart dt=3 g=23 g_seq=56
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+GoUnblock dt=4 g=23 g_seq=57 stack=0
+GoStart dt=1 g=23 g_seq=58
+GoLabel dt=1 label_string=2
+GoBlock dt=4554 reason_string=15 stack=5
+GoStart dt=14 g=105 g_seq=10
+GCMarkAssistEnd dt=5
+HeapAlloc dt=65 heapalloc_value=193682136
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=44 reason_string=13 stack=11
+GoStart dt=15 g=83 g_seq=8
+HeapAlloc dt=221 heapalloc_value=194173656
+HeapAlloc dt=1927 heapalloc_value=195443416
+GoStop dt=5838 reason_string=16 stack=6
+GoStart dt=51 g=83 g_seq=9
+GCMarkAssistBegin dt=12 stack=3
+GoBlock dt=35 reason_string=10 stack=18
+GoStart dt=70 g=87 g_seq=6
+GCMarkAssistBegin dt=14 stack=3
+GoBlock dt=35 reason_string=13 stack=11
+ProcStop dt=77
+EventBatch gen=3 m=169436 time=28114950894898 size=160
+ProcStatus dt=2 p=34 pstatus=1
+GoStatus dt=3 g=107 m=169436 gstatus=2
+GCMarkAssistBegin dt=15 stack=3
+GoBlock dt=4050 reason_string=13 stack=11
+GoStatus dt=20 g=23 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=23 g_seq=1 stack=0
+GoStart dt=8 g=23 g_seq=2
+GoLabel dt=1 label_string=2
+GoUnblock dt=2316 g=81 g_seq=1 stack=12
+GoBlock dt=626 reason_string=15 stack=5
+GoUnblock dt=9 g=23 g_seq=3 stack=0
+GoStart dt=9 g=23 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=3975 reason_string=15 stack=5
+GoUnblock dt=35 g=23 g_seq=5 stack=0
+GoStart dt=6 g=23 g_seq=6
+GoLabel dt=1 label_string=2
+GoBlock dt=142 reason_string=15 stack=5
+GoUnblock dt=9 g=23 g_seq=7 stack=0
+GoStart dt=4 g=23 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=3815 reason_string=15 stack=5
+GoUnblock dt=10 g=23 g_seq=9 stack=0
+GoStart dt=6 g=23 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=3560 reason_string=15 stack=5
+GoUnblock dt=8 g=23 g_seq=11 stack=0
+GoStart dt=4 g=23 g_seq=12
+GoLabel dt=3 label_string=2
+GoBlock dt=2781 reason_string=15 stack=5
+GoUnblock dt=13 g=23 g_seq=13 stack=0
+GoStart dt=4 g=23 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=1277 reason_string=15 stack=5
+ProcStop dt=16
+EventBatch gen=3 m=169435 time=28114950897148 size=522
+ProcStatus dt=2 p=24 pstatus=1
+GoStatus dt=2 g=122 m=169435 gstatus=2
+GCMarkAssistActive dt=1 g=122
+GCMarkAssistEnd dt=3
+HeapAlloc dt=24 heapalloc_value=190602688
+GCMarkAssistBegin dt=95 stack=3
+GCMarkAssistEnd dt=4651
+GCMarkAssistBegin dt=50 stack=3
+GoBlock dt=2931 reason_string=13 stack=11
+ProcStop dt=1401
+ProcStart dt=18 p=24 p_seq=1
+GoUnblock dt=3524 g=28 g_seq=5 stack=0
+GoStart dt=10 g=28 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=42 reason_string=15 stack=5
+GoUnblock dt=1162 g=24 g_seq=11 stack=0
+GoStart dt=7 g=24 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=3050 reason_string=15 stack=5
+GoUnblock dt=5301 g=67 g_seq=15 stack=0
+GoStart dt=4 g=67 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=40 reason_string=15 stack=5
+ProcStop dt=64
+ProcStart dt=841 p=24 p_seq=2
+GoStatus dt=58 g=16 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=16 g_seq=1 stack=0
+GoStart dt=273 g=16 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=139 reason_string=15 stack=5
+ProcStop dt=52
+ProcStart dt=97 p=24 p_seq=3
+GoUnblock dt=5 g=16 g_seq=3 stack=0
+GoStart dt=2 g=16 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=471 reason_string=15 stack=5
+GoUnblock dt=58 g=16 g_seq=5 stack=0
+GoStart dt=6 g=16 g_seq=6
+GoLabel dt=3 label_string=4
+GoBlock dt=912 reason_string=15 stack=5
+GoUnblock dt=9 g=16 g_seq=7 stack=0
+GoStart dt=6 g=16 g_seq=8
+GoLabel dt=1 label_string=2
+GoUnblock dt=6571 g=113 g_seq=5 stack=12
+GoBlock dt=22 reason_string=15 stack=5
+ProcStop dt=73
+ProcStart dt=22914 p=30 p_seq=16
+GoStart dt=342 g=117 g_seq=4
+GCMarkAssistEnd dt=8
+HeapAlloc dt=67 heapalloc_value=196467152
+GoStop dt=5253 reason_string=16 stack=6
+GoStart dt=44 g=128 g_seq=7
+GCMarkAssistBegin dt=21 stack=3
+GoBlock dt=37 reason_string=10 stack=18
+GoStart dt=7 g=130 g_seq=5
+GoBlock dt=182 reason_string=10 stack=20
+ProcStop dt=81
+ProcStart dt=8287 p=2 p_seq=2
+GoStart dt=164 g=82 g_seq=11
+GCMarkAssistEnd dt=8
+HeapAlloc dt=169 heapalloc_value=104038048
+HeapAlloc dt=135 heapalloc_value=104189856
+HeapAlloc dt=126 heapalloc_value=104287136
+HeapAlloc dt=24 heapalloc_value=104308256
+HeapAlloc dt=28 heapalloc_value=104313888
+HeapAlloc dt=14 heapalloc_value=104399904
+GCSweepBegin dt=43 stack=28
+GCSweepEnd dt=8 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=4 heapalloc_value=104473632
+HeapAlloc dt=58 heapalloc_value=104510496
+HeapAlloc dt=22 heapalloc_value=104534432
+HeapAlloc dt=51 heapalloc_value=104654624
+GCSweepBegin dt=146 stack=28
+GCSweepEnd dt=8 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=4 heapalloc_value=104878624
+HeapAlloc dt=42 heapalloc_value=105007648
+HeapAlloc dt=29 heapalloc_value=105077280
+HeapAlloc dt=36 heapalloc_value=105105952
+HeapAlloc dt=44 heapalloc_value=105242784
+HeapAlloc dt=58 heapalloc_value=105431200
+HeapAlloc dt=128 heapalloc_value=105593760
+HeapAlloc dt=199 heapalloc_value=106209440
+GCSweepBegin dt=155 stack=28
+GCSweepEnd dt=13 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=106666272
+HeapAlloc dt=77 heapalloc_value=106901152
+HeapAlloc dt=64 heapalloc_value=107211808
+HeapAlloc dt=133 heapalloc_value=107661088
+HeapAlloc dt=34 heapalloc_value=107722528
+HeapAlloc dt=108 heapalloc_value=108207392
+GCSweepBegin dt=202 stack=28
+GCSweepEnd dt=13 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=108742816
+HeapAlloc dt=112 heapalloc_value=109093664
+HeapAlloc dt=207 heapalloc_value=109913120
+HeapAlloc dt=271 heapalloc_value=110834560
+HeapAlloc dt=212 heapalloc_value=111566720
+HeapAlloc dt=148 heapalloc_value=112190720
+HeapAlloc dt=74 heapalloc_value=112528128
+HeapAlloc dt=143 heapalloc_value=113050240
+HeapAlloc dt=19 heapalloc_value=113194368
+HeapAlloc dt=135 heapalloc_value=113615232
+GCSweepBegin dt=251 stack=27
+EventBatch gen=3 m=169434 time=28114950909315 size=660
+ProcStatus dt=2 p=7 pstatus=1
+GoStatus dt=2 g=71 m=169434 gstatus=2
+GoBlock dt=6 reason_string=15 stack=5
+GoUnblock dt=2633 g=53 g_seq=7 stack=0
+GoStart dt=7 g=53 g_seq=8
+GoLabel dt=3 label_string=4
+GoBlock dt=127 reason_string=15 stack=5
+GoUnblock dt=1358 g=52 g_seq=15 stack=0
+GoStart dt=7 g=52 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=27 reason_string=15 stack=5
+GoStart dt=1150 g=93 g_seq=4
+GCMarkAssistEnd dt=7
+HeapAlloc dt=39 heapalloc_value=191897024
+GCMarkAssistBegin dt=27 stack=3
+GoStop dt=894 reason_string=20 stack=9
+GoStart dt=13 g=93 g_seq=5
+GoBlock dt=150 reason_string=13 stack=11
+ProcStop dt=57
+ProcStart dt=14 p=7 p_seq=1
+ProcStop dt=4205
+ProcStart dt=18 p=7 p_seq=2
+GoUnblock dt=4 g=22 g_seq=17 stack=0
+GoStart dt=172 g=22 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=1298 reason_string=15 stack=5
+GoUnblock dt=12 g=22 g_seq=19 stack=0
+GoStart dt=7 g=22 g_seq=20
+GoLabel dt=1 label_string=2
+GoBlock dt=108 reason_string=15 stack=5
+GoUnblock dt=9 g=22 g_seq=21 stack=0
+GoStart dt=4 g=22 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=309 reason_string=15 stack=5
+GoUnblock dt=19 g=57 g_seq=35 stack=0
+GoStart dt=6 g=57 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=26 reason_string=15 stack=5
+GoUnblock dt=12 g=30 g_seq=15 stack=0
+GoStart dt=4 g=30 g_seq=16
+GoLabel dt=1 label_string=2
+GoBlock dt=410 reason_string=15 stack=5
+GoUnblock dt=2384 g=23 g_seq=37 stack=0
+GoStart dt=7 g=23 g_seq=38
+GoLabel dt=1 label_string=4
+GoBlock dt=119 reason_string=15 stack=5
+GoUnblock dt=58 g=25 g_seq=21 stack=0
+GoStart dt=4 g=25 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=1875 reason_string=15 stack=5
+GoUnblock dt=53 g=29 g_seq=15 stack=0
+GoStart dt=3 g=29 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=133 reason_string=15 stack=5
+GoUnblock dt=51 g=25 g_seq=25 stack=0
+GoStart dt=5 g=25 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=14 reason_string=15 stack=5
+GoUnblock dt=42 g=25 g_seq=27 stack=0
+GoStart dt=3 g=25 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=56 reason_string=15 stack=5
+GoUnblock dt=1741 g=24 g_seq=41 stack=0
+GoStart dt=4 g=24 g_seq=42
+GoLabel dt=3 label_string=2
+GoBlock dt=15 reason_string=15 stack=5
+GoUnblock dt=26 g=25 g_seq=31 stack=0
+GoStart dt=4 g=25 g_seq=32
+GoLabel dt=1 label_string=2
+GoBlock dt=2653 reason_string=15 stack=5
+GoUnblock dt=10 g=25 g_seq=33 stack=0
+GoStart dt=6 g=25 g_seq=34
+GoLabel dt=1 label_string=2
+GoBlock dt=151 reason_string=15 stack=5
+GoUnblock dt=37 g=25 g_seq=35 stack=0
+GoStart dt=3 g=25 g_seq=36
+GoLabel dt=1 label_string=4
+GoBlock dt=12 reason_string=15 stack=5
+GoUnblock dt=8 g=25 g_seq=37 stack=0
+GoStart dt=3 g=25 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=1197 reason_string=15 stack=5
+GoUnblock dt=38 g=22 g_seq=43 stack=0
+GoStart dt=7 g=22 g_seq=44
+GoLabel dt=1 label_string=4
+GoBlock dt=16 reason_string=15 stack=5
+ProcStop dt=28
+ProcStart dt=2728 p=7 p_seq=3
+GoUnblock dt=10 g=25 g_seq=39 stack=0
+GoStart dt=162 g=25 g_seq=40
+GoLabel dt=2 label_string=2
+GoBlock dt=36 reason_string=15 stack=5
+GoUnblock dt=10 g=25 g_seq=41 stack=0
+GoStart dt=4 g=25 g_seq=42
+GoLabel dt=1 label_string=2
+GoBlock dt=19 reason_string=15 stack=5
+GoUnblock dt=7 g=25 g_seq=43 stack=0
+GoStart dt=1 g=25 g_seq=44
+GoLabel dt=1 label_string=2
+GoUnblock dt=616 g=81 g_seq=6 stack=12
+GoBlock dt=1549 reason_string=15 stack=5
+GoStart dt=12 g=112 g_seq=5
+GoBlock dt=22 reason_string=13 stack=11
+GoStart dt=8 g=90 g_seq=4
+GCMarkAssistEnd dt=3
+HeapAlloc dt=2613 heapalloc_value=192625368
+GoStop dt=48 reason_string=16 stack=6
+GoUnblock dt=13 g=54 g_seq=35 stack=0
+GoStart dt=4 g=54 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=269 reason_string=15 stack=5
+GoUnblock dt=6 g=54 g_seq=37 stack=0
+GoStart dt=5 g=54 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=856 reason_string=15 stack=5
+GoUnblock dt=23 g=52 g_seq=61 stack=0
+GoStart dt=4 g=52 g_seq=62
+GoLabel dt=1 label_string=2
+GoBlock dt=33 reason_string=15 stack=5
+GoUnblock dt=13 g=52 g_seq=63 stack=0
+GoStart dt=2 g=52 g_seq=64
+GoLabel dt=1 label_string=2
+GoBlock dt=38 reason_string=15 stack=5
+GoUnblock dt=17 g=52 g_seq=65 stack=0
+GoStart dt=3 g=52 g_seq=66
+GoLabel dt=1 label_string=2
+GoBlock dt=37 reason_string=15 stack=5
+GoUnblock dt=11 g=52 g_seq=67 stack=0
+GoStart dt=4 g=52 g_seq=68
+GoLabel dt=1 label_string=2
+GoBlock dt=2457 reason_string=15 stack=5
+GoUnblock dt=11 g=52 g_seq=69 stack=0
+GoStart dt=4 g=52 g_seq=70
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+GoStart dt=23 g=114 g_seq=4
+GCMarkAssistEnd dt=457
+HeapAlloc dt=223 heapalloc_value=194968280
+GoStop dt=6900 reason_string=16 stack=4
+GoStart dt=24 g=114 g_seq=5
+GCMarkAssistBegin dt=86 stack=3
+GoBlock dt=43 reason_string=10 stack=18
+ProcStop dt=49
+ProcStart dt=475 p=7 p_seq=4
+ProcStop dt=40
+ProcStart dt=1388 p=7 p_seq=5
+GoUnblock dt=9 g=131 g_seq=8 stack=0
+GoStart dt=169 g=131 g_seq=9
+GoSyscallBegin dt=24 p_seq=6 stack=7
+GoSyscallEnd dt=184
+GoBlock dt=11 reason_string=15 stack=2
+ProcStop dt=42
+ProcStart dt=18109 p=16 p_seq=2
+GoStart dt=176 g=91 g_seq=4
+GCMarkAssistEnd dt=8
+HeapAlloc dt=22 heapalloc_value=114837120
+HeapAlloc dt=88 heapalloc_value=114853504
+GCSweepBegin dt=145 stack=27
+EventBatch gen=3 m=169433 time=28114950897465 size=806
+ProcStatus dt=2 p=2 pstatus=1
+GoStatus dt=1 g=24 m=169433 gstatus=2
+GoBlock dt=9 reason_string=15 stack=5
+GoUnblock dt=19 g=24 g_seq=1 stack=0
+GoStart dt=5 g=24 g_seq=2
+GoLabel dt=2 label_string=2
+GoBlock dt=4044 reason_string=15 stack=5
+GoUnblock dt=17 g=24 g_seq=3 stack=0
+GoStart dt=4 g=24 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=4262 reason_string=15 stack=5
+GoUnblock dt=19 g=28 g_seq=3 stack=0
+GoStart dt=4 g=28 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=461 reason_string=15 stack=5
+GoUnblock dt=4544 g=72 g_seq=15 stack=0
+GoStart dt=9 g=72 g_seq=16
+GoLabel dt=3 label_string=4
+GoBlock dt=32 reason_string=15 stack=5
+GoUnblock dt=9 g=72 g_seq=17 stack=0
+GoStart dt=2 g=72 g_seq=18
+GoLabel dt=2 label_string=2
+GoBlock dt=13 reason_string=15 stack=5
+GoUnblock dt=3 g=72 g_seq=19 stack=0
+GoStart dt=1 g=72 g_seq=20
+GoLabel dt=1 label_string=2
+GoBlock dt=237 reason_string=15 stack=5
+GoUnblock dt=8 g=72 g_seq=21 stack=0
+GoStart dt=3 g=72 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=151 reason_string=15 stack=5
+GoUnblock dt=11 g=72 g_seq=23 stack=0
+GoStart dt=6 g=72 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=3418 reason_string=15 stack=5
+ProcStop dt=1573
+ProcStart dt=17 p=2 p_seq=1
+ProcStop dt=1102
+ProcStart dt=21668 p=19 p_seq=4
+GoUnblock dt=16 g=51 g_seq=47 stack=0
+GoStart dt=7 g=51 g_seq=48
+GoLabel dt=1 label_string=2
+GoBlock dt=60 reason_string=15 stack=5
+GoUnblock dt=6 g=51 g_seq=49 stack=0
+GoStart dt=1 g=51 g_seq=50
+GoLabel dt=3 label_string=2
+GoBlock dt=5166 reason_string=15 stack=5
+GoStart dt=18 g=106 g_seq=5
+GCMarkAssistEnd dt=10
+HeapAlloc dt=56 heapalloc_value=193452760
+GCMarkAssistBegin dt=116 stack=3
+GCMarkAssistEnd dt=58
+HeapAlloc dt=47 heapalloc_value=193714904
+GoStop dt=54 reason_string=16 stack=6
+GoUnblock dt=18 g=54 g_seq=41 stack=0
+GoStart dt=4 g=54 g_seq=42
+GoLabel dt=2 label_string=2
+GoUnblock dt=16 g=105 g_seq=11 stack=12
+GoBlock dt=21 reason_string=15 stack=5
+GoStart dt=8 g=105 g_seq=12
+GCMarkAssistEnd dt=7
+HeapAlloc dt=33 heapalloc_value=193919704
+GCMarkAssistBegin dt=13 stack=3
+GCMarkAssistEnd dt=91
+HeapAlloc dt=173 heapalloc_value=194378456
+GCMarkAssistBegin dt=26 stack=3
+GoBlock dt=37 reason_string=13 stack=11
+GoStart dt=33 g=104 g_seq=2
+GCMarkAssistEnd dt=5
+HeapAlloc dt=81 heapalloc_value=194673368
+GoStop dt=2248 reason_string=16 stack=6
+GoStart dt=2855 g=104 g_seq=3
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=27 reason_string=10 stack=18
+GoStart dt=16 g=103 g_seq=5
+GCMarkAssistEnd dt=6
+HeapAlloc dt=6180 heapalloc_value=196655568
+GoStop dt=14 reason_string=16 stack=6
+GoStart dt=146 g=102 g_seq=5
+GCMarkAssistBegin dt=10 stack=3
+HeapAlloc dt=38 heapalloc_value=196663760
+GoBlock dt=16 reason_string=10 stack=18
+ProcStop dt=41
+ProcStart dt=1317 p=19 p_seq=5
+ProcStop dt=24
+ProcStart dt=2117 p=0 p_seq=5
+GoStart dt=5190 g=115 g_seq=10
+GCMarkAssistEnd dt=6
+GCSweepBegin dt=22 stack=27
+GCSweepEnd dt=727 swept_value=71303168 reclaimed_value=1302272
+HeapAlloc dt=37 heapalloc_value=103898784
+HeapAlloc dt=200 heapalloc_value=103947936
+HeapAlloc dt=63 heapalloc_value=103960224
+HeapAlloc dt=27 heapalloc_value=103997088
+HeapAlloc dt=65 heapalloc_value=104103584
+HeapAlloc dt=87 heapalloc_value=104132512
+HeapAlloc dt=63 heapalloc_value=104255392
+HeapAlloc dt=87 heapalloc_value=104267680
+HeapAlloc dt=73 heapalloc_value=104379424
+HeapAlloc dt=79 heapalloc_value=104494112
+GCSweepBegin dt=40 stack=28
+GCSweepEnd dt=7 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=8 heapalloc_value=104526880
+HeapAlloc dt=27 heapalloc_value=104589088
+HeapAlloc dt=42 heapalloc_value=104711968
+HeapAlloc dt=83 heapalloc_value=104821280
+GCSweepBegin dt=21 stack=28
+GCSweepEnd dt=4 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=2 heapalloc_value=104854048
+HeapAlloc dt=105 heapalloc_value=105064992
+GCSweepBegin dt=94 stack=28
+GCSweepEnd dt=9 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=4 heapalloc_value=105250976
+GCSweepBegin dt=29 stack=28
+GCSweepEnd dt=10 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=4 heapalloc_value=105447584
+HeapAlloc dt=30 heapalloc_value=105476256
+HeapAlloc dt=57 heapalloc_value=105566368
+GCSweepBegin dt=74 stack=28
+GCSweepEnd dt=5 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=105741216
+HeapAlloc dt=77 heapalloc_value=105921440
+HeapAlloc dt=76 heapalloc_value=106143904
+HeapAlloc dt=50 heapalloc_value=106274976
+HeapAlloc dt=113 heapalloc_value=106633504
+HeapAlloc dt=110 heapalloc_value=107036320
+HeapAlloc dt=95 heapalloc_value=107351072
+HeapAlloc dt=80 heapalloc_value=107702048
+GCSweepBegin dt=78 stack=28
+GCSweepEnd dt=6 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=2 heapalloc_value=107835936
+HeapAlloc dt=39 heapalloc_value=107904288
+HeapAlloc dt=82 heapalloc_value=108390432
+HeapAlloc dt=230 heapalloc_value=108955808
+HeapAlloc dt=126 heapalloc_value=109421344
+GCSweepBegin dt=131 stack=28
+GCSweepEnd dt=5 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=3 heapalloc_value=109929504
+GCSweepBegin dt=29 stack=28
+GCSweepEnd dt=4 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=3 heapalloc_value=110038816
+HeapAlloc dt=28 heapalloc_value=110109472
+HeapAlloc dt=93 heapalloc_value=110412672
+HeapAlloc dt=33 heapalloc_value=110547840
+HeapAlloc dt=123 heapalloc_value=111070848
+GCSweepBegin dt=155 stack=28
+GCSweepEnd dt=10 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=3 heapalloc_value=111648640
+GCSweepBegin dt=61 stack=28
+GCSweepEnd dt=8 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=3 heapalloc_value=111996800
+GCSweepBegin dt=37 stack=28
+GCSweepEnd dt=5 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=8 heapalloc_value=112149760
+HeapAlloc dt=32 heapalloc_value=112342272
+GCSweepBegin dt=75 stack=28
+GCSweepEnd dt=7 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=5 heapalloc_value=112601856
+HeapAlloc dt=61 heapalloc_value=112923264
+HeapAlloc dt=90 heapalloc_value=113262720
+HeapAlloc dt=88 heapalloc_value=113522304
+HeapAlloc dt=119 heapalloc_value=113967488
+HeapAlloc dt=59 heapalloc_value=114201216
+GCSweepBegin dt=130 stack=27
+EventBatch gen=3 m=169431 time=28114950897743 size=407
+ProcStatus dt=2 p=11 pstatus=1
+GoStatus dt=4 g=51 m=169431 gstatus=2
+GoBlock dt=6 reason_string=15 stack=5
+GoUnblock dt=13 g=51 g_seq=1 stack=0
+GoStart dt=6 g=51 g_seq=2
+GoLabel dt=1 label_string=2
+GoBlock dt=4143 reason_string=15 stack=5
+GoStatus dt=1425 g=28 m=18446744073709551615 gstatus=4
+GoUnblock dt=2 g=28 g_seq=1 stack=0
+GoStart dt=7 g=28 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=1758 reason_string=15 stack=5
+GoUnblock dt=3904 g=25 g_seq=9 stack=0
+GoStart dt=9 g=25 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=41 reason_string=15 stack=5
+ProcStop dt=1189
+ProcStart dt=16 p=11 p_seq=1
+GoUnblock dt=1157 g=57 g_seq=21 stack=0
+GoStart dt=6 g=57 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=25 reason_string=15 stack=5
+GoUnblock dt=1614 g=52 g_seq=13 stack=0
+GoStart dt=11 g=52 g_seq=14
+GoLabel dt=4 label_string=4
+GoBlock dt=86 reason_string=15 stack=5
+GoUnblock dt=4771 g=22 g_seq=11 stack=0
+GoStart dt=12 g=22 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=1413 reason_string=15 stack=5
+GoUnblock dt=10 g=22 g_seq=13 stack=0
+GoStart dt=4 g=22 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=39 reason_string=15 stack=5
+ProcStop dt=67
+ProcStart dt=2286 p=11 p_seq=2
+ProcStop dt=95
+ProcStart dt=53 p=0 p_seq=2
+GoUnblock dt=9 g=57 g_seq=33 stack=0
+GoStart dt=8 g=57 g_seq=34
+GoLabel dt=1 label_string=4
+GoBlock dt=37 reason_string=15 stack=5
+GoUnblock dt=20 g=22 g_seq=23 stack=0
+GoStart dt=3 g=22 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=1036 reason_string=15 stack=5
+GoUnblock dt=11 g=22 g_seq=25 stack=0
+GoStart dt=6 g=22 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=2130 reason_string=15 stack=5
+GoUnblock dt=11 g=22 g_seq=27 stack=0
+GoStart dt=7 g=22 g_seq=28
+GoLabel dt=2 label_string=2
+GoBlock dt=1227 reason_string=15 stack=5
+GoUnblock dt=12 g=22 g_seq=29 stack=0
+GoStart dt=6 g=22 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=31 reason_string=15 stack=5
+GoUnblock dt=7 g=22 g_seq=31 stack=0
+GoStart dt=2 g=22 g_seq=32
+GoLabel dt=1 label_string=2
+GoBlock dt=2282 reason_string=15 stack=5
+GoUnblock dt=71 g=29 g_seq=33 stack=0
+GoStart dt=4 g=29 g_seq=34
+GoLabel dt=1 label_string=4
+GoBlock dt=1234 reason_string=15 stack=5
+GoUnblock dt=8 g=29 g_seq=35 stack=0
+GoStart dt=8 g=29 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=18 reason_string=15 stack=5
+ProcStop dt=49
+ProcStart dt=10623 p=11 p_seq=5
+ProcStop dt=54
+ProcStart dt=686 p=11 p_seq=6
+GoStart dt=185 g=127 g_seq=5
+GCMarkAssistBegin dt=71 stack=3
+GoStop dt=67 reason_string=20 stack=9
+GoUnblock dt=15 g=53 g_seq=47 stack=0
+GoStart dt=3 g=53 g_seq=48
+GoLabel dt=1 label_string=2
+GoUnblock dt=661 g=121 g_seq=10 stack=12
+GoUnblock dt=7 g=88 g_seq=5 stack=12
+GoUnblock dt=8 g=87 g_seq=4 stack=12
+GoUnblock dt=2751 g=94 g_seq=10 stack=12
+GoUnblock dt=8 g=106 g_seq=7 stack=12
+GoUnblock dt=8 g=98 g_seq=9 stack=12
+GoBlock dt=18 reason_string=15 stack=5
+GoStart dt=17 g=87 g_seq=5
+GCMarkAssistEnd dt=5
+HeapAlloc dt=202 heapalloc_value=194796248
+GoStop dt=7327 reason_string=16 stack=6
+GoStart dt=68 g=84 g_seq=8
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=29 reason_string=13 stack=11
+ProcStop dt=88
+EventBatch gen=3 m=169428 time=28114950899204 size=756
+ProcStatus dt=2 p=31 pstatus=1
+GoStatus dt=5 g=104 m=169428 gstatus=2
+GCMarkAssistActive dt=1 g=104
+GCMarkAssistEnd dt=2
+HeapAlloc dt=37 heapalloc_value=191110592
+GCMarkAssistBegin dt=21 stack=3
+GoBlock dt=2670 reason_string=13 stack=11
+GoStatus dt=1400 g=22 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=22 g_seq=1 stack=0
+GoStart dt=7 g=22 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=43 reason_string=15 stack=5
+GoUnblock dt=2567 g=70 g_seq=3 stack=0
+GoStart dt=9 g=70 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=329 reason_string=15 stack=5
+GoUnblock dt=97 g=70 g_seq=5 stack=0
+GoStart dt=5 g=70 g_seq=6
+GoLabel dt=3 label_string=2
+GoUnblock dt=1728 g=84 g_seq=3 stack=12
+GoBlock dt=3527 reason_string=15 stack=5
+GoStart dt=4132 g=114 g_seq=2
+GoStatus dt=28 g=115 m=18446744073709551615 gstatus=4
+GoUnblock dt=8 g=115 g_seq=1 stack=10
+GCMarkAssistBegin dt=18 stack=3
+GoBlock dt=196 reason_string=13 stack=11
+GoStart dt=14 g=115 g_seq=2
+GoStatus dt=18 g=102 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=102 g_seq=1 stack=10
+GCMarkAssistBegin dt=13 stack=3
+GoBlock dt=371 reason_string=13 stack=11
+GoUnblock dt=9 g=30 g_seq=11 stack=0
+GoStart dt=6 g=30 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=5520 reason_string=15 stack=5
+GoUnblock dt=8 g=30 g_seq=13 stack=0
+GoStart dt=4 g=30 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=28 reason_string=15 stack=5
+GoUnblock dt=10 g=57 g_seq=37 stack=0
+GoStart dt=3 g=57 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=157 reason_string=15 stack=5
+GoUnblock dt=7 g=57 g_seq=39 stack=0
+GoStart dt=4 g=57 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=140 reason_string=15 stack=5
+GoUnblock dt=10 g=53 g_seq=25 stack=0
+GoStart dt=3 g=53 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=90 reason_string=15 stack=5
+GoUnblock dt=62 g=53 g_seq=27 stack=0
+GoStart dt=4 g=53 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=11 reason_string=15 stack=5
+GoUnblock dt=46 g=53 g_seq=29 stack=0
+GoStart dt=7 g=53 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=51 reason_string=15 stack=5
+ProcStop dt=2236
+ProcStart dt=966 p=35 p_seq=2
+GoStart dt=19 g=81 g_seq=5
+GCMarkAssistEnd dt=7
+HeapAlloc dt=67 heapalloc_value=192133920
+GCMarkAssistBegin dt=46 stack=3
+GoBlock dt=32 reason_string=13 stack=11
+ProcStop dt=57
+ProcStart dt=15 p=35 p_seq=3
+GoUnblock dt=2 g=69 g_seq=23 stack=0
+GoStart dt=2 g=69 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=224 reason_string=15 stack=5
+GoUnblock dt=52 g=69 g_seq=25 stack=0
+GoStart dt=3 g=69 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=289 reason_string=15 stack=5
+GoStart dt=23 g=118 g_seq=2
+GCMarkAssistEnd dt=7
+HeapAlloc dt=21 heapalloc_value=192207648
+GCMarkAssistBegin dt=103 stack=3
+GoBlock dt=18 reason_string=13 stack=11
+GoUnblock dt=48 g=29 g_seq=13 stack=0
+GoStart dt=1 g=29 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=19 reason_string=15 stack=5
+GoUnblock dt=44 g=25 g_seq=23 stack=0
+GoStart dt=6 g=25 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=144 reason_string=15 stack=5
+GoUnblock dt=49 g=29 g_seq=17 stack=0
+GoStart dt=1 g=29 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=777 reason_string=15 stack=5
+GoUnblock dt=56 g=52 g_seq=31 stack=0
+GoStart dt=3 g=52 g_seq=32
+GoLabel dt=1 label_string=4
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=27 g=51 g_seq=33 stack=0
+GoStart dt=5 g=51 g_seq=34
+GoLabel dt=1 label_string=2
+GoBlock dt=12 reason_string=15 stack=5
+GoUnblock dt=13 g=51 g_seq=35 stack=0
+GoStart dt=4 g=51 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=226 reason_string=15 stack=5
+GoUnblock dt=7 g=51 g_seq=37 stack=0
+GoStart dt=4 g=51 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=3928 reason_string=15 stack=5
+GoUnblock dt=14 g=51 g_seq=39 stack=0
+GoStart dt=3 g=51 g_seq=40
+GoLabel dt=3 label_string=2
+GoBlock dt=214 reason_string=15 stack=5
+GoUnblock dt=5 g=51 g_seq=41 stack=0
+GoStart dt=1 g=51 g_seq=42
+GoLabel dt=1 label_string=2
+GoBlock dt=305 reason_string=15 stack=5
+GoUnblock dt=8 g=51 g_seq=43 stack=0
+GoStart dt=5 g=51 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+ProcStop dt=47
+ProcStart dt=5058 p=35 p_seq=4
+GoUnblock dt=20 g=52 g_seq=51 stack=0
+GoStart dt=188 g=52 g_seq=52
+GoLabel dt=1 label_string=2
+GoBlock dt=33 reason_string=15 stack=5
+GoUnblock dt=9 g=52 g_seq=53 stack=0
+GoStart dt=4 g=52 g_seq=54
+GoLabel dt=1 label_string=2
+GoBlock dt=12 reason_string=15 stack=5
+GoStart dt=14 g=126 g_seq=3
+GCMarkAssistEnd dt=7
+HeapAlloc dt=2068 heapalloc_value=192592600
+GoStop dt=31 reason_string=16 stack=4
+GoUnblock dt=10 g=30 g_seq=39 stack=0
+GoStart dt=4 g=30 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=54 reason_string=15 stack=5
+GoStart dt=708 g=121 g_seq=9
+GoBlock dt=49 reason_string=13 stack=11
+GoStart dt=18 g=90 g_seq=5
+GCMarkAssistBegin dt=12 stack=3
+GoBlock dt=39 reason_string=13 stack=11
+GoUnblock dt=15 g=71 g_seq=31 stack=0
+GoStart dt=4 g=71 g_seq=32
+GoLabel dt=2 label_string=2
+GoBlock dt=1101 reason_string=15 stack=5
+GoUnblock dt=13 g=71 g_seq=33 stack=0
+GoStart dt=4 g=71 g_seq=34
+GoLabel dt=1 label_string=2
+GoBlock dt=27 reason_string=15 stack=5
+GoUnblock dt=18 g=14 g_seq=54 stack=0
+GoStart dt=4 g=14 g_seq=55
+GoLabel dt=2 label_string=2
+GoUnblock dt=2171 g=94 g_seq=8 stack=12
+GoBlock dt=28 reason_string=15 stack=5
+GoStart dt=11 g=94 g_seq=9
+GCMarkAssistEnd dt=6
+HeapAlloc dt=42 heapalloc_value=193665752
+GCMarkAssistBegin dt=100 stack=3
+GoBlock dt=30 reason_string=13 stack=11
+GoStart dt=13 g=106 g_seq=6
+HeapAlloc dt=99 heapalloc_value=193977048
+GCMarkAssistBegin dt=21 stack=3
+GoBlock dt=30 reason_string=13 stack=11
+GoStart dt=16 g=92 g_seq=4
+GCMarkAssistEnd dt=6
+HeapAlloc dt=884 heapalloc_value=195205848
+GoStop dt=3270 reason_string=16 stack=4
+GoStart dt=29 g=97 g_seq=6
+GCMarkAssistEnd dt=6
+HeapAlloc dt=42 heapalloc_value=195795408
+GCMarkAssistBegin dt=3026 stack=3
+GoBlock dt=85 reason_string=10 stack=18
+ProcStop dt=99
+EventBatch gen=3 m=169426 time=28114950897488 size=116
+ProcStatus dt=1 p=32 pstatus=1
+GoStatus dt=2 g=90 m=169426 gstatus=2
+GCMarkAssistActive dt=1 g=90
+GCMarkAssistEnd dt=3
+HeapAlloc dt=47 heapalloc_value=190627264
+GCMarkAssistBegin dt=51 stack=3
+GoBlock dt=2393 reason_string=13 stack=11
+GoStart dt=1449 g=125 g_seq=2
+GoStatus dt=26 g=127 m=18446744073709551615 gstatus=4
+GoUnblock dt=16 g=127 g_seq=1 stack=10
+GCMarkAssistBegin dt=17 stack=3
+GoStop dt=6909 reason_string=20 stack=9
+GoStart dt=20 g=125 g_seq=3
+GoBlock dt=2101 reason_string=13 stack=11
+GoUnblock dt=2575 g=71 g_seq=11 stack=0
+GoStart dt=8 g=71 g_seq=12
+GoLabel dt=3 label_string=4
+GoBlock dt=44 reason_string=15 stack=5
+GoStart dt=20 g=82 g_seq=7
+GoBlock dt=367 reason_string=13 stack=11
+GoUnblock dt=11 g=22 g_seq=9 stack=0
+GoStart dt=4 g=22 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=3492 reason_string=15 stack=5
+ProcStop dt=9
+EventBatch gen=3 m=169425 time=28114950900302 size=349
+ProcStatus dt=2 p=10 pstatus=1
+GoStatus dt=2 g=70 m=169425 gstatus=2
+GoBlock dt=8 reason_string=15 stack=5
+GoUnblock dt=15 g=70 g_seq=1 stack=0
+GoStart dt=5 g=70 g_seq=2
+GoLabel dt=1 label_string=2
+GoBlock dt=5604 reason_string=15 stack=5
+GoUnblock dt=33 g=29 g_seq=3 stack=0
+GoStart dt=5 g=29 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=1191 reason_string=15 stack=5
+GoUnblock dt=9 g=29 g_seq=5 stack=0
+GoStart dt=5 g=29 g_seq=6
+GoLabel dt=2 label_string=2
+GoBlock dt=1935 reason_string=15 stack=5
+GoUnblock dt=15 g=51 g_seq=11 stack=0
+GoStart dt=5 g=51 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=307 reason_string=15 stack=5
+ProcStop dt=4189
+ProcStart dt=15 p=10 p_seq=1
+GoUnblock dt=10 g=69 g_seq=17 stack=0
+GoStart dt=4 g=69 g_seq=18
+GoLabel dt=1 label_string=2
+GoUnblock dt=780 g=93 g_seq=3 stack=12
+GoBlock dt=6076 reason_string=15 stack=5
+GoUnblock dt=11 g=69 g_seq=19 stack=0
+GoStart dt=4 g=69 g_seq=20
+GoLabel dt=3 label_string=2
+GoUnblock dt=93 g=98 g_seq=5 stack=12
+GoBlock dt=5034 reason_string=15 stack=5
+GoUnblock dt=14 g=58 g_seq=25 stack=0
+GoStart dt=5 g=58 g_seq=26
+GoLabel dt=2 label_string=2
+GoBlock dt=1253 reason_string=15 stack=5
+GoUnblock dt=6 g=58 g_seq=27 stack=0
+GoStart dt=4 g=58 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=1031 reason_string=15 stack=5
+GoUnblock dt=6 g=58 g_seq=29 stack=0
+GoStart dt=4 g=58 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=17 reason_string=15 stack=5
+GoUnblock dt=24 g=52 g_seq=33 stack=0
+GoStart dt=6 g=52 g_seq=34
+GoLabel dt=1 label_string=2
+GoBlock dt=321 reason_string=15 stack=5
+GoUnblock dt=75 g=29 g_seq=27 stack=0
+GoStart dt=4 g=29 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=248 reason_string=15 stack=5
+GoUnblock dt=61 g=57 g_seq=47 stack=0
+GoStart dt=4 g=57 g_seq=48
+GoLabel dt=1 label_string=4
+GoBlock dt=794 reason_string=15 stack=5
+ProcStop dt=41
+ProcStart dt=15678 p=21 p_seq=3
+GoStart dt=22 g=88 g_seq=6
+GCMarkAssistEnd dt=9
+HeapAlloc dt=84 heapalloc_value=194730712
+GoStop dt=2177 reason_string=16 stack=6
+GoStart dt=2495 g=88 g_seq=7
+GCMarkAssistBegin dt=19 stack=3
+GoBlock dt=37 reason_string=10 stack=18
+GoStart dt=15 g=126 g_seq=6
+GCMarkAssistEnd dt=5
+HeapAlloc dt=27 heapalloc_value=196114896
+GCMarkAssistBegin dt=18 stack=3
+GoBlock dt=30 reason_string=10 stack=18
+GoStart dt=15 g=98 g_seq=10
+GCMarkAssistEnd dt=6
+HeapAlloc dt=48 heapalloc_value=196155856
+GoStop dt=6168 reason_string=16 stack=6
+GoStart dt=156 g=98 g_seq=11
+GCMarkAssistBegin dt=9 stack=3
+GoBlock dt=27 reason_string=10 stack=18
+GoStart dt=27 g=94 g_seq=12
+GCMarkAssistBegin dt=13 stack=3
+GoBlock dt=35 reason_string=10 stack=18
+ProcStop dt=55
+ProcStart dt=14725 p=13 p_seq=1
+GoStart dt=171 g=112 g_seq=9
+GCMarkAssistEnd dt=6
+GCSweepBegin dt=222 stack=27
+EventBatch gen=3 m=169424 time=28114950894869 size=176
+ProcStatus dt=1 p=23 pstatus=3
+GoStatus dt=2 g=131 m=169424 gstatus=3
+GoSyscallEnd dt=1
+GoBlock dt=64 reason_string=15 stack=2
+GoUnblock dt=2260 g=67 g_seq=1 stack=0
+GoStart dt=6 g=67 g_seq=2
+GoLabel dt=2 label_string=4
+GoBlock dt=530 reason_string=15 stack=5
+GoUnblock dt=12 g=69 g_seq=3 stack=0
+GoStart dt=5 g=69 g_seq=4
+GoLabel dt=1 label_string=2
+GoUnblock dt=2455 g=90 g_seq=1 stack=12
+GoBlock dt=20 reason_string=15 stack=5
+GoUnblock dt=16 g=69 g_seq=5 stack=0
+GoStart dt=7 g=69 g_seq=6
+GoLabel dt=1 label_string=2
+GoUnblock dt=493 g=120 g_seq=2 stack=12
+GoBlock dt=1777 reason_string=15 stack=5
+GoUnblock dt=832 g=69 g_seq=7 stack=0
+GoStart dt=9 g=69 g_seq=8
+GoLabel dt=2 label_string=2
+GoBlock dt=5425 reason_string=15 stack=5
+GoUnblock dt=15 g=69 g_seq=9 stack=0
+GoStart dt=4 g=69 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=1370 reason_string=15 stack=5
+ProcStop dt=2533
+ProcStart dt=11 p=23 p_seq=1
+GoUnblock dt=3354 g=54 g_seq=17 stack=0
+GoStart dt=7 g=54 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=29 reason_string=15 stack=5
+GoUnblock dt=25 g=54 g_seq=19 stack=0
+GoStart dt=5 g=54 g_seq=20
+GoLabel dt=2 label_string=2
+GoBlock dt=232 reason_string=15 stack=5
+GoUnblock dt=11 g=54 g_seq=21 stack=0
+GoStart dt=7 g=54 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=293 reason_string=15 stack=5
+ProcStop dt=11
+EventBatch gen=3 m=169423 time=28114950894865 size=525
+ProcStatus dt=2 p=30 pstatus=1
+GoStatus dt=4 g=87 m=169423 gstatus=2
+GCMarkAssistActive dt=1 g=87
+GCMarkAssistEnd dt=2
+HeapAlloc dt=98 heapalloc_value=189963712
+GoStop dt=56 reason_string=16 stack=6
+GoUnblock dt=20 g=131 g_seq=1 stack=0
+GoStart dt=7 g=131 g_seq=2
+GoSyscallBegin dt=39 p_seq=1 stack=7
+GoSyscallEnd dt=304
+GoSyscallBegin dt=19 p_seq=2 stack=7
+GoSyscallEnd dt=59
+GoSyscallBegin dt=15 p_seq=3 stack=7
+GoSyscallEnd dt=52
+GoSyscallBegin dt=11 p_seq=4 stack=7
+GoSyscallEnd dt=50
+GoSyscallBegin dt=8 p_seq=5 stack=7
+GoSyscallEnd dt=48
+GoSyscallBegin dt=10 p_seq=6 stack=7
+GoSyscallEnd dt=54
+GoSyscallBegin dt=13 p_seq=7 stack=7
+GoSyscallEnd dt=51
+GoSyscallBegin dt=12 p_seq=8 stack=7
+GoSyscallEnd dt=49
+GoSyscallBegin dt=16 p_seq=9 stack=7
+GoSyscallEnd dt=245
+GoSyscallBegin dt=12 p_seq=10 stack=7
+GoSyscallEnd dt=49
+GoSyscallBegin dt=10 p_seq=11 stack=7
+GoSyscallEnd dt=49
+GoSyscallBegin dt=10 p_seq=12 stack=7
+GoSyscallEnd dt=48
+GoSyscallBegin dt=6 p_seq=13 stack=7
+GoSyscallEnd dt=52
+GoStop dt=24 reason_string=16 stack=8
+GoUnblock dt=9 g=14 g_seq=1 stack=0
+GoStart dt=5 g=14 g_seq=2
+GoLabel dt=1 label_string=2
+GoUnblock dt=2948 g=107 g_seq=1 stack=12
+GoBlock dt=2891 reason_string=15 stack=5
+GoUnblock dt=11 g=14 g_seq=3 stack=0
+GoStart dt=5 g=14 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=1138 reason_string=15 stack=5
+GoUnblock dt=22 g=51 g_seq=5 stack=0
+GoStart dt=5 g=51 g_seq=6
+GoLabel dt=1 label_string=2
+GoUnblock dt=451 g=82 g_seq=3 stack=12
+GoBlock dt=460 reason_string=15 stack=5
+GoUnblock dt=4052 g=54 g_seq=5 stack=0
+GoStart dt=11 g=54 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=72 reason_string=15 stack=5
+GoUnblock dt=1333 g=57 g_seq=15 stack=0
+GoStart dt=8 g=57 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=283 reason_string=15 stack=5
+GoUnblock dt=1185 g=57 g_seq=19 stack=0
+GoStart dt=7 g=57 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=134 reason_string=15 stack=5
+GoUnblock dt=1144 g=53 g_seq=11 stack=0
+GoStart dt=6 g=53 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=372 reason_string=15 stack=5
+GoUnblock dt=16 g=53 g_seq=13 stack=0
+GoStart dt=7 g=53 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=8581 reason_string=15 stack=5
+ProcStop dt=76
+ProcStart dt=22 p=30 p_seq=14
+GoUnblock dt=3 g=72 g_seq=31 stack=0
+GoStart dt=7 g=72 g_seq=32
+GoLabel dt=1 label_string=4
+GoBlock dt=46 reason_string=15 stack=5
+GoUnblock dt=63 g=23 g_seq=31 stack=0
+GoStart dt=7 g=23 g_seq=32
+GoLabel dt=1 label_string=4
+GoBlock dt=34 reason_string=15 stack=5
+GoUnblock dt=14 g=23 g_seq=33 stack=0
+GoStart dt=4 g=23 g_seq=34
+GoLabel dt=2 label_string=2
+GoBlock dt=47 reason_string=15 stack=5
+GoUnblock dt=74 g=53 g_seq=19 stack=0
+GoStart dt=6 g=53 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=154 reason_string=15 stack=5
+GoStatus dt=91 g=56 m=18446744073709551615 gstatus=4
+GoUnblock dt=2 g=56 g_seq=1 stack=0
+GoStart dt=43 g=56 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=45 reason_string=15 stack=5
+GoUnblock dt=65 g=53 g_seq=23 stack=0
+GoStart dt=8 g=53 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=16 reason_string=15 stack=5
+ProcStop dt=2526
+ProcStart dt=208 p=30 p_seq=15
+GoUnblock dt=8 g=53 g_seq=37 stack=0
+GoStart dt=5 g=53 g_seq=38
+GoLabel dt=1 label_string=4
+GoBlock dt=694 reason_string=15 stack=5
+GoUnblock dt=14 g=53 g_seq=39 stack=0
+GoStart dt=4 g=53 g_seq=40
+GoLabel dt=3 label_string=2
+GoBlock dt=336 reason_string=15 stack=5
+GoUnblock dt=52 g=53 g_seq=41 stack=0
+GoStart dt=4 g=53 g_seq=42
+GoLabel dt=1 label_string=4
+GoUnblock dt=449 g=118 g_seq=1 stack=12
+GoBlock dt=17 reason_string=15 stack=5
+GoUnblock dt=65 g=24 g_seq=31 stack=0
+GoStart dt=6 g=24 g_seq=32
+GoLabel dt=2 label_string=4
+GoBlock dt=56 reason_string=15 stack=5
+GoUnblock dt=54 g=24 g_seq=33 stack=0
+GoStart dt=5 g=24 g_seq=34
+GoLabel dt=1 label_string=4
+GoBlock dt=489 reason_string=15 stack=5
+GoUnblock dt=9 g=24 g_seq=35 stack=0
+GoStart dt=4 g=24 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=1307 reason_string=15 stack=5
+ProcStop dt=84
+ProcStart dt=23944 p=15 p_seq=1
+GoStart dt=174 g=108 g_seq=3
+GCMarkAssistBegin dt=25 stack=3
+GoBlock dt=59 reason_string=10 stack=18
+ProcStop dt=71
+EventBatch gen=3 m=169421 time=28114950900230 size=330
+ProcStatus dt=1 p=33 pstatus=1
+GoStatus dt=5 g=81 m=169421 gstatus=2
+GCMarkAssistActive dt=3 g=81
+GoBlock dt=7 reason_string=13 stack=11
+GoStatus dt=1543 g=57 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=57 g_seq=1 stack=0
+GoStart dt=10 g=57 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=123 reason_string=15 stack=5
+GoStatus dt=1345 g=58 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=58 g_seq=1 stack=0
+GoStart dt=5 g=58 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=154 reason_string=15 stack=5
+GoUnblock dt=5938 g=54 g_seq=7 stack=0
+GoStart dt=7 g=54 g_seq=8
+GoLabel dt=1 label_string=4
+GoBlock dt=93 reason_string=15 stack=5
+GoStart dt=1331 g=97 g_seq=2
+GoStatus dt=26 g=93 m=18446744073709551615 gstatus=4
+GoUnblock dt=6 g=93 g_seq=1 stack=10
+GCMarkAssistBegin dt=18 stack=3
+GCMarkAssistEnd dt=1894
+HeapAlloc dt=57 heapalloc_value=191872448
+GCMarkAssistBegin dt=26 stack=3
+GoBlock dt=46 reason_string=13 stack=11
+GoUnblock dt=2442 g=52 g_seq=19 stack=0
+GoStart dt=14 g=52 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=767 reason_string=15 stack=5
+ProcStop dt=2248
+ProcStart dt=24 p=33 p_seq=1
+GoUnblock dt=8 g=72 g_seq=27 stack=0
+GoStart dt=7 g=72 g_seq=28
+GoLabel dt=1 label_string=4
+GoUnblock dt=172 g=119 g_seq=3 stack=12
+GoBlock dt=1629 reason_string=15 stack=5
+GoUnblock dt=71 g=28 g_seq=9 stack=0
+GoStart dt=7 g=28 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=276 reason_string=15 stack=5
+GoUnblock dt=72 g=28 g_seq=11 stack=0
+GoStart dt=4 g=28 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=2016 reason_string=15 stack=5
+GoUnblock dt=16 g=28 g_seq=13 stack=0
+GoStart dt=7 g=28 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=6712 reason_string=15 stack=5
+ProcStop dt=63
+ProcStart dt=20808 p=14 p_seq=1
+GoStart dt=205 g=89 g_seq=7
+GCMarkAssistEnd dt=10
+HeapAlloc dt=64 heapalloc_value=196245968
+GoStop dt=6073 reason_string=16 stack=6
+GoStart dt=21 g=89 g_seq=8
+GCMarkAssistBegin dt=15 stack=3
+GoBlock dt=38 reason_string=10 stack=18
+ProcStop dt=129
+ProcStart dt=13557 p=11 p_seq=7
+GoStart dt=202 g=116 g_seq=12
+GCMarkAssistEnd dt=10
+GCSweepBegin dt=25 stack=28
+GCSweepEnd dt=12 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=114760576
+GCSweepBegin dt=70 stack=28
+GCSweepEnd dt=5 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=1 heapalloc_value=114785152
+GCSweepBegin dt=353 stack=27
+EventBatch gen=3 m=169420 time=28114950896337 size=112
+ProcStatus dt=2 p=17 pstatus=1
+GoStatus dt=1 g=84 m=169420 gstatus=2
+GCMarkAssistActive dt=1 g=84
+GCMarkAssistEnd dt=3
+HeapAlloc dt=20 heapalloc_value=190365120
+GCMarkAssistBegin dt=42 stack=3
+GoStop dt=861 reason_string=20 stack=9
+GoStart dt=142 g=126 g_seq=1
+GoBlock dt=2538 reason_string=13 stack=11
+GoUnblock dt=1653 g=30 g_seq=1 stack=0
+GoStart dt=7 g=30 g_seq=2
+GoLabel dt=2 label_string=4
+GoBlock dt=6064 reason_string=15 stack=5
+GoUnblock dt=1633 g=25 g_seq=11 stack=0
+GoStart dt=8 g=25 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=4927 reason_string=15 stack=5
+GoUnblock dt=3569 g=67 g_seq=11 stack=0
+GoStart dt=7 g=67 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=1289 reason_string=15 stack=5
+GoUnblock dt=73 g=67 g_seq=13 stack=0
+GoStart dt=4 g=67 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=46 reason_string=15 stack=5
+ProcStop dt=52
+EventBatch gen=3 m=169419 time=28114950898971 size=132
+ProcStatus dt=2 p=13 pstatus=1
+GoStatus dt=2 g=30 m=169419 gstatus=2
+GoBlock dt=7 reason_string=15 stack=5
+GoUnblock dt=2697 g=72 g_seq=1 stack=0
+GoStart dt=8 g=72 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=969 reason_string=15 stack=5
+GoStart dt=2978 g=95 g_seq=2
+GoStatus dt=32 g=88 m=18446744073709551615 gstatus=4
+GoUnblock dt=7 g=88 g_seq=1 stack=10
+GCMarkAssistBegin dt=18 stack=3
+GoStop dt=1258 reason_string=20 stack=9
+GoStart dt=17 g=88 g_seq=2
+GoStatus dt=27 g=113 m=18446744073709551615 gstatus=4
+GoUnblock dt=5 g=113 g_seq=1 stack=10
+GCMarkAssistBegin dt=12 stack=3
+GoStop dt=1797 reason_string=20 stack=9
+GoStart dt=18 g=88 g_seq=3
+GoStop dt=2883 reason_string=20 stack=9
+GoUnblock dt=14 g=70 g_seq=7 stack=0
+GoStart dt=5 g=70 g_seq=8
+GoLabel dt=3 label_string=2
+GoBlock dt=5294 reason_string=15 stack=5
+GoStart dt=14 g=123 g_seq=5
+GoBlock dt=18 reason_string=13 stack=11
+ProcStop dt=16
+EventBatch gen=3 m=169418 time=28114950895095 size=398
+ProcStatus dt=1 p=35 pstatus=2
+ProcStart dt=2 p=35 p_seq=1
+GoStart dt=38 g=87 g_seq=1
+HeapAlloc dt=103 heapalloc_value=190086592
+GCMarkAssistBegin dt=64 stack=3
+GCMarkAssistEnd dt=3228
+HeapAlloc dt=76 heapalloc_value=190995904
+GCMarkAssistBegin dt=149 stack=3
+GoBlock dt=3823 reason_string=13 stack=11
+GoStart dt=1406 g=82 g_seq=4
+GCMarkAssistEnd dt=12
+HeapAlloc dt=82 heapalloc_value=191618496
+GCMarkAssistBegin dt=75 stack=3
+GoStop dt=4342 reason_string=20 stack=9
+GoStart dt=17 g=82 g_seq=5
+GoStop dt=987 reason_string=20 stack=9
+GoStart dt=26 g=82 g_seq=6
+GoStop dt=3601 reason_string=20 stack=9
+GoUnblock dt=14 g=58 g_seq=17 stack=0
+GoStart dt=3 g=58 g_seq=18
+GoLabel dt=3 label_string=2
+GoBlock dt=1524 reason_string=15 stack=5
+GoUnblock dt=23 g=25 g_seq=15 stack=0
+GoStart dt=8 g=25 g_seq=16
+GoLabel dt=3 label_string=2
+GoBlock dt=7942 reason_string=15 stack=5
+ProcStop dt=2920
+ProcStart dt=69 p=31 p_seq=1
+GoUnblock dt=7 g=67 g_seq=25 stack=0
+GoStart dt=5 g=67 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=1990 reason_string=15 stack=5
+GoUnblock dt=110 g=67 g_seq=29 stack=0
+GoStart dt=6 g=67 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=39 reason_string=15 stack=5
+GoUnblock dt=64 g=52 g_seq=29 stack=0
+GoStart dt=1 g=52 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=40 reason_string=15 stack=5
+GoUnblock dt=72 g=29 g_seq=19 stack=0
+GoStart dt=7 g=29 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=65 reason_string=15 stack=5
+GoUnblock dt=1007 g=23 g_seq=43 stack=0
+GoStart dt=8 g=23 g_seq=44
+GoLabel dt=1 label_string=4
+GoBlock dt=1633 reason_string=15 stack=5
+GoUnblock dt=7 g=23 g_seq=45 stack=0
+GoStart dt=160 g=23 g_seq=46
+GoLabel dt=1 label_string=2
+GoBlock dt=16 reason_string=15 stack=5
+ProcStop dt=31
+ProcStart dt=8279 p=26 p_seq=2
+GoStart dt=216 g=103 g_seq=3
+GCMarkAssistBegin dt=19 stack=3
+GoBlock dt=41 reason_string=13 stack=11
+GoUnblock dt=11 g=25 g_seq=47 stack=0
+GoStart dt=3 g=25 g_seq=48
+GoLabel dt=1 label_string=2
+GoBlock dt=1274 reason_string=15 stack=5
+ProcStop dt=46
+ProcStart dt=1294 p=26 p_seq=3
+GoStart dt=164 g=127 g_seq=6
+GoStop dt=45 reason_string=20 stack=9
+GoStart dt=17 g=127 g_seq=7
+GCMarkAssistEnd dt=1921
+GoStop dt=49 reason_string=16 stack=17
+GoUnblock dt=17 g=22 g_seq=59 stack=0
+GoStart dt=8 g=22 g_seq=60
+GoLabel dt=1 label_string=2
+GoBlock dt=75 reason_string=15 stack=5
+GoStart dt=44 g=83 g_seq=7
+GCMarkAssistEnd dt=4
+GCMarkAssistBegin dt=50 stack=3
+GCMarkAssistEnd dt=27
+HeapAlloc dt=55 heapalloc_value=193551064
+GoStop dt=47 reason_string=16 stack=4
+GoStart dt=30 g=84 g_seq=7
+HeapAlloc dt=82 heapalloc_value=193772248
+HeapAlloc dt=291 heapalloc_value=194239192
+HeapAlloc dt=198 heapalloc_value=194493144
+HeapAlloc dt=7678 heapalloc_value=196524496
+GoStop dt=18 reason_string=16 stack=6
+ProcStop dt=218
+ProcStart dt=2082 p=26 p_seq=4
+ProcStop dt=68
+ProcStart dt=16818 p=14 p_seq=2
+GoStart dt=242 g=118 g_seq=7
+GCMarkAssistEnd dt=8
+GCSweepBegin dt=32 stack=28
+GCSweepEnd dt=11 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=3 heapalloc_value=114809728
+GCSweepBegin dt=279 stack=27
+EventBatch gen=3 m=169417 time=28114950894785 size=650
+ProcStatus dt=2 p=18 pstatus=1
+GoStatus dt=2 g=120 m=169417 gstatus=2
+GCMarkAssistActive dt=1 g=120
+GCMarkAssistEnd dt=2
+HeapAlloc dt=26 heapalloc_value=189767104
+GoStop dt=131 reason_string=16 stack=4
+GoStart dt=59 g=120 g_seq=1
+HeapAlloc dt=138 heapalloc_value=190045632
+GCMarkAssistBegin dt=31 stack=3
+GCMarkAssistEnd dt=3339
+HeapAlloc dt=63 heapalloc_value=190938560
+GCMarkAssistBegin dt=150 stack=3
+GoBlock dt=270 reason_string=13 stack=11
+GoUnblock dt=4058 g=57 g_seq=3 stack=0
+GoStart dt=7 g=57 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=902 reason_string=15 stack=5
+GoUnblock dt=4049 g=30 g_seq=3 stack=0
+GoStart dt=8 g=30 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=521 reason_string=15 stack=5
+GoUnblock dt=1581 g=57 g_seq=17 stack=0
+GoStart dt=11 g=57 g_seq=18
+GoLabel dt=3 label_string=4
+GoBlock dt=37 reason_string=15 stack=5
+GoUnblock dt=14 g=69 g_seq=11 stack=0
+GoStart dt=4 g=69 g_seq=12
+GoLabel dt=3 label_string=2
+GoBlock dt=543 reason_string=15 stack=5
+GoUnblock dt=10 g=69 g_seq=13 stack=0
+GoStart dt=6 g=69 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=1813 reason_string=15 stack=5
+ProcStop dt=2875
+ProcStart dt=28 p=18 p_seq=1
+GoUnblock dt=1296 g=54 g_seq=23 stack=0
+GoStart dt=7 g=54 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=1516 reason_string=15 stack=5
+GoUnblock dt=7 g=54 g_seq=25 stack=0
+GoStart dt=5 g=54 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=6851 reason_string=15 stack=5
+GoUnblock dt=71 g=72 g_seq=41 stack=0
+GoStart dt=5 g=72 g_seq=42
+GoLabel dt=1 label_string=4
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=5 g=72 g_seq=43 stack=0
+GoStart dt=3 g=72 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=62 reason_string=15 stack=5
+GoUnblock dt=19 g=72 g_seq=45 stack=0
+GoStart dt=4 g=72 g_seq=46
+GoLabel dt=1 label_string=2
+GoBlock dt=3727 reason_string=15 stack=5
+ProcStop dt=69
+ProcStart dt=21 p=18 p_seq=2
+GoUnblock dt=10 g=70 g_seq=15 stack=0
+GoStart dt=3 g=70 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=14 reason_string=15 stack=5
+GoUnblock dt=49 g=70 g_seq=17 stack=0
+GoStart dt=3 g=70 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=2314 reason_string=15 stack=5
+ProcStop dt=46
+ProcStart dt=1398 p=18 p_seq=3
+ProcStop dt=38
+ProcStart dt=4183 p=18 p_seq=4
+GoStart dt=183 g=96 g_seq=4
+GCMarkAssistEnd dt=9
+HeapAlloc dt=36 heapalloc_value=192305952
+GCMarkAssistBegin dt=28 stack=3
+GoBlock dt=1320 reason_string=13 stack=11
+GoUnblock dt=15 g=25 g_seq=45 stack=0
+GoStart dt=7 g=25 g_seq=46
+GoLabel dt=1 label_string=2
+GoBlock dt=600 reason_string=15 stack=5
+GoStart dt=13 g=89 g_seq=5
+GCMarkAssistBegin dt=11 stack=3
+GoBlock dt=112 reason_string=13 stack=11
+GoStart dt=14 g=121 g_seq=8
+GoStop dt=1488 reason_string=20 stack=9
+GoUnblock dt=16 g=25 g_seq=49 stack=0
+GoStart dt=7 g=25 g_seq=50
+GoLabel dt=2 label_string=2
+GoUnblock dt=1803 g=115 g_seq=6 stack=12
+GoUnblock dt=5 g=93 g_seq=6 stack=12
+GoUnblock dt=6 g=85 g_seq=5 stack=12
+GoUnblock dt=3 g=104 g_seq=1 stack=12
+GoUnblock dt=6 g=108 g_seq=1 stack=12
+GoUnblock dt=4 g=120 g_seq=4 stack=12
+GoUnblock dt=4 g=126 g_seq=5 stack=12
+GoUnblock dt=7 g=114 g_seq=3 stack=12
+GoUnblock dt=5 g=86 g_seq=4 stack=12
+GoUnblock dt=4 g=110 g_seq=3 stack=12
+GoBlock dt=14 reason_string=15 stack=5
+GoUnblock dt=7 g=25 g_seq=51 stack=0
+GoStart dt=1 g=25 g_seq=52
+GoLabel dt=1 label_string=2
+GoBlock dt=12 reason_string=15 stack=5
+GoStart dt=8 g=115 g_seq=7
+GCMarkAssistEnd dt=6
+HeapAlloc dt=32 heapalloc_value=192838360
+GCMarkAssistBegin dt=55 stack=3
+GoStop dt=1501 reason_string=20 stack=9
+GoUnblock dt=18 g=51 g_seq=51 stack=0
+GoStart dt=6 g=51 g_seq=52
+GoLabel dt=1 label_string=2
+GoUnblock dt=1117 g=105 g_seq=13 stack=12
+GoBlock dt=8 reason_string=15 stack=5
+GoStart dt=8 g=105 g_seq=14
+GCMarkAssistEnd dt=6
+GCMarkAssistBegin dt=27 stack=3
+GoBlock dt=13 reason_string=13 stack=11
+GoStart dt=7 g=86 g_seq=5
+GCMarkAssistEnd dt=6
+HeapAlloc dt=12 heapalloc_value=195148504
+GCMarkAssistBegin dt=65 stack=3
+HeapAlloc dt=22 heapalloc_value=195214040
+GoBlock dt=17 reason_string=10 stack=18
+GoStart dt=2881 g=124 g_seq=5
+HeapAlloc dt=35 heapalloc_value=195517144
+HeapAlloc dt=15 heapalloc_value=195566296
+HeapAlloc dt=61 heapalloc_value=195574224
+HeapAlloc dt=16 heapalloc_value=195631568
+GCMarkAssistBegin dt=23 stack=3
+GoBlock dt=34 reason_string=10 stack=18
+GoStart dt=14 g=90 g_seq=7
+GCMarkAssistEnd dt=5
+HeapAlloc dt=4 heapalloc_value=195697104
+GCMarkAssistBegin dt=58 stack=3
+GoBlock dt=15 reason_string=10 stack=18
+GoStart dt=10 g=96 g_seq=6
+GCMarkAssistEnd dt=3
+GCMarkAssistBegin dt=37 stack=3
+GoBlock dt=16 reason_string=13 stack=11
+GoStart dt=14 g=92 g_seq=5
+GCMarkAssistBegin dt=75 stack=3
+GoBlock dt=25 reason_string=10 stack=18
+GoStart dt=9 g=119 g_seq=6
+GCMarkAssistEnd dt=5
+HeapAlloc dt=20 heapalloc_value=195926480
+GCMarkAssistBegin dt=19 stack=3
+GoBlock dt=14 reason_string=10 stack=18
+GoStart dt=9 g=85 g_seq=6
+GCMarkAssistEnd dt=3
+HeapAlloc dt=38 heapalloc_value=195983824
+GoStop dt=5763 reason_string=16 stack=6
+GoStart dt=15 g=85 g_seq=7
+GCMarkAssistBegin dt=6 stack=3
+GoBlock dt=21 reason_string=10 stack=18
+ProcStop dt=46
+ProcStart dt=17429 p=15 p_seq=2
+GoStart dt=180 g=3 g_seq=2
+EventBatch gen=3 m=169416 time=28114950894874 size=516
+ProcStatus dt=2 p=26 pstatus=1
+GoStatus dt=1 g=98 m=169416 gstatus=2
+GCMarkAssistActive dt=1 g=98
+GCMarkAssistEnd dt=2
+HeapAlloc dt=136 heapalloc_value=190004672
+GoStop dt=29 reason_string=16 stack=4
+GoStart dt=29 g=86 g_seq=1
+GCMarkAssistBegin dt=75 stack=3
+GCMarkAssistEnd dt=8456
+HeapAlloc dt=48 heapalloc_value=191569344
+GoStop dt=32 reason_string=16 stack=4
+GoStart dt=19 g=86 g_seq=2
+GCMarkAssistBegin dt=73 stack=3
+GoStop dt=324 reason_string=20 stack=9
+GoStart dt=11 g=116 g_seq=3
+GoStop dt=270 reason_string=20 stack=9
+GoUnblock dt=14 g=51 g_seq=7 stack=0
+GoStart dt=4 g=51 g_seq=8
+GoLabel dt=3 label_string=2
+GoBlock dt=4139 reason_string=15 stack=5
+GoUnblock dt=9 g=51 g_seq=9 stack=0
+GoStart dt=6 g=51 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=564 reason_string=15 stack=5
+GoUnblock dt=12 g=29 g_seq=7 stack=0
+GoStart dt=4 g=29 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=13475 reason_string=15 stack=5
+ProcStop dt=61
+ProcStart dt=17 p=26 p_seq=1
+GoUnblock dt=6 g=53 g_seq=31 stack=0
+GoStart dt=3 g=53 g_seq=32
+GoLabel dt=1 label_string=4
+GoBlock dt=18 reason_string=15 stack=5
+GoUnblock dt=6 g=53 g_seq=33 stack=0
+GoStart dt=4 g=53 g_seq=34
+GoLabel dt=1 label_string=2
+GoBlock dt=2291 reason_string=15 stack=5
+GoUnblock dt=8 g=53 g_seq=35 stack=0
+GoStart dt=4 g=53 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=32 reason_string=15 stack=5
+GoUnblock dt=68 g=29 g_seq=9 stack=0
+GoStart dt=4 g=29 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=796 reason_string=15 stack=5
+GoUnblock dt=60 g=29 g_seq=11 stack=0
+GoStart dt=4 g=29 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=643 reason_string=15 stack=5
+GoUnblock dt=61 g=53 g_seq=43 stack=0
+GoStart dt=4 g=53 g_seq=44
+GoLabel dt=1 label_string=4
+GoBlock dt=3485 reason_string=15 stack=5
+GoUnblock dt=10 g=53 g_seq=45 stack=0
+GoStart dt=5 g=53 g_seq=46
+GoLabel dt=1 label_string=2
+GoBlock dt=14 reason_string=15 stack=5
+ProcStop dt=38
+ProcStart dt=9443 p=0 p_seq=3
+GoStart dt=226 g=115 g_seq=5
+GoBlock dt=168 reason_string=13 stack=11
+GoUnblock dt=11 g=30 g_seq=37 stack=0
+GoStart dt=6 g=30 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=67 reason_string=15 stack=5
+ProcStop dt=46
+ProcStart dt=1842 p=25 p_seq=6
+GoUnblock dt=12 g=29 g_seq=37 stack=0
+GoStart dt=5 g=29 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=2260 reason_string=15 stack=5
+GoUnblock dt=16 g=29 g_seq=39 stack=0
+GoStart dt=4 g=29 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=19 reason_string=15 stack=5
+GoStart dt=34 g=99 g_seq=4
+GCMarkAssistEnd dt=7
+HeapAlloc dt=55 heapalloc_value=193501912
+GoStop dt=29 reason_string=16 stack=4
+GoUnblock dt=18 g=29 g_seq=41 stack=0
+GoStart dt=7 g=29 g_seq=42
+GoLabel dt=1 label_string=2
+GoBlock dt=10 reason_string=15 stack=5
+GoUnblock dt=14 g=29 g_seq=43 stack=0
+GoStart dt=4 g=29 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=40 reason_string=15 stack=5
+GoStart dt=16 g=111 g_seq=6
+GoBlock dt=37 reason_string=13 stack=11
+GoStart dt=13 g=125 g_seq=6
+GCMarkAssistBegin dt=13 stack=3
+GoBlock dt=34 reason_string=13 stack=11
+GoStart dt=23 g=115 g_seq=8
+GoBlock dt=61 reason_string=13 stack=11
+GoStart dt=27 g=120 g_seq=5
+GCMarkAssistEnd dt=12
+HeapAlloc dt=82 heapalloc_value=194067160
+GoStop dt=22 reason_string=16 stack=4
+GoStart dt=10 g=93 g_seq=7
+GCMarkAssistEnd dt=4
+HeapAlloc dt=663 heapalloc_value=194992856
+GCMarkAssistBegin dt=23 stack=3
+GoBlock dt=12 reason_string=13 stack=11
+GoStart dt=11 g=99 g_seq=7
+GCMarkAssistEnd dt=5
+HeapAlloc dt=4741 heapalloc_value=196180432
+GoStop dt=10 reason_string=16 stack=6
+GoStart dt=19 g=99 g_seq=8
+GCMarkAssistBegin dt=8 stack=3
+GoBlock dt=18 reason_string=10 stack=18
+GoStart dt=9 g=100 g_seq=4
+GCMarkAssistEnd dt=5
+HeapAlloc dt=101 heapalloc_value=196295120
+GoStop dt=6074 reason_string=16 stack=6
+GoStart dt=49 g=100 g_seq=5
+GCMarkAssistBegin dt=10 stack=3
+GoBlock dt=32 reason_string=13 stack=11
+ProcStop dt=67
+ProcStart dt=12947 p=10 p_seq=3
+GoStart dt=200 g=86 g_seq=7
+GoUnblock dt=38 g=124 g_seq=6 stack=30
+GCMarkAssistEnd dt=5
+HeapAlloc dt=90 heapalloc_value=113809792
+HeapAlloc dt=112 heapalloc_value=114160256
+GCSweepBegin dt=694 stack=31
+EventBatch gen=3 m=169415 time=28114950903030 size=633
+ProcStatus dt=1 p=28 pstatus=1
+GoStatus dt=3 g=91 m=169415 gstatus=2
+GCMarkAssistActive dt=1 g=91
+GCMarkAssistEnd dt=2
+HeapAlloc dt=29 heapalloc_value=191479232
+GCMarkAssistBegin dt=84 stack=3
+GoBlock dt=82 reason_string=13 stack=11
+GoStart dt=4920 g=113 g_seq=2
+GoStatus dt=31 g=123 m=18446744073709551615 gstatus=4
+GoUnblock dt=10 g=123 g_seq=1 stack=10
+GCMarkAssistBegin dt=14 stack=3
+GoStop dt=1855 reason_string=20 stack=9
+GoStart dt=15 g=113 g_seq=3
+GoStop dt=352 reason_string=20 stack=9
+GoStart dt=13 g=113 g_seq=4
+GoBlock dt=261 reason_string=13 stack=11
+GoUnblock dt=3404 g=52 g_seq=17 stack=0
+GoStart dt=7 g=52 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=1025 reason_string=15 stack=5
+GoUnblock dt=4703 g=67 g_seq=17 stack=0
+GoStart dt=8 g=67 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=1418 reason_string=15 stack=5
+GoUnblock dt=72 g=23 g_seq=29 stack=0
+GoStart dt=4 g=23 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=307 reason_string=15 stack=5
+GoUnblock dt=85 g=72 g_seq=33 stack=0
+GoStart dt=5 g=72 g_seq=34
+GoLabel dt=3 label_string=4
+GoBlock dt=30 reason_string=15 stack=5
+GoStatus dt=168 g=68 m=18446744073709551615 gstatus=4
+GoUnblock dt=2 g=68 g_seq=1 stack=0
+GoStart dt=64 g=68 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=55 reason_string=15 stack=5
+GoUnblock dt=10 g=68 g_seq=3 stack=0
+GoStart dt=3 g=68 g_seq=4
+GoLabel dt=2 label_string=2
+GoBlock dt=327 reason_string=15 stack=5
+ProcStop dt=80
+ProcStart dt=25 p=28 p_seq=1
+GoUnblock dt=7 g=30 g_seq=17 stack=0
+GoStart dt=4 g=30 g_seq=18
+GoLabel dt=3 label_string=4
+GoBlock dt=2630 reason_string=15 stack=5
+GoUnblock dt=28 g=72 g_seq=39 stack=0
+GoStart dt=12 g=72 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=77 g=30 g_seq=19 stack=0
+GoStart dt=10 g=30 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=3781 reason_string=15 stack=5
+GoUnblock dt=15 g=30 g_seq=21 stack=0
+GoStart dt=5 g=30 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=2537 reason_string=15 stack=5
+GoUnblock dt=55 g=30 g_seq=23 stack=0
+GoStart dt=5 g=30 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=478 reason_string=15 stack=5
+GoUnblock dt=8 g=30 g_seq=25 stack=0
+GoStart dt=4 g=30 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=1039 reason_string=15 stack=5
+GoStart dt=26 g=14 g_seq=37
+GoBlock dt=1631 reason_string=15 stack=5
+GoUnblock dt=22 g=52 g_seq=43 stack=0
+GoStart dt=8 g=52 g_seq=44
+GoLabel dt=3 label_string=2
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=9 g=52 g_seq=45 stack=0
+GoStart dt=3 g=52 g_seq=46
+GoLabel dt=1 label_string=2
+GoBlock dt=17 reason_string=15 stack=5
+GoUnblock dt=6 g=52 g_seq=47 stack=0
+GoStart dt=3 g=52 g_seq=48
+GoLabel dt=1 label_string=2
+GoUnblock dt=217 g=112 g_seq=3 stack=12
+GoBlock dt=298 reason_string=15 stack=5
+GoUnblock dt=8 g=52 g_seq=49 stack=0
+GoStart dt=3 g=52 g_seq=50
+GoLabel dt=1 label_string=2
+GoBlock dt=1919 reason_string=15 stack=5
+GoStart dt=16 g=121 g_seq=6
+GCMarkAssistEnd dt=6
+HeapAlloc dt=1354 heapalloc_value=192363224
+GoStop dt=25 reason_string=16 stack=4
+GoStart dt=16 g=121 g_seq=7
+GCMarkAssistBegin dt=74 stack=3
+GoStop dt=496 reason_string=20 stack=9
+GoUnblock dt=11 g=52 g_seq=55 stack=0
+GoStart dt=4 g=52 g_seq=56
+GoLabel dt=1 label_string=2
+GoUnblock dt=1666 g=94 g_seq=5 stack=12
+GoBlock dt=18 reason_string=15 stack=5
+GoUnblock dt=18 g=30 g_seq=41 stack=0
+GoStart dt=4 g=30 g_seq=42
+GoLabel dt=1 label_string=2
+GoUnblock dt=1362 g=84 g_seq=5 stack=12
+GoUnblock dt=6 g=125 g_seq=4 stack=12
+GoUnblock dt=5 g=118 g_seq=3 stack=12
+GoBlock dt=9 reason_string=15 stack=5
+GoUnblock dt=10 g=30 g_seq=43 stack=0
+GoStart dt=3 g=30 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+GoStart dt=6 g=84 g_seq=6
+GCMarkAssistEnd dt=5
+HeapAlloc dt=24 heapalloc_value=192748248
+GCMarkAssistBegin dt=83 stack=3
+GCMarkAssistEnd dt=1516
+HeapAlloc dt=28 heapalloc_value=193231576
+GoStop dt=27 reason_string=16 stack=6
+GoUnblock dt=14 g=22 g_seq=57 stack=0
+GoStart dt=3 g=22 g_seq=58
+GoLabel dt=1 label_string=2
+GoUnblock dt=16 g=81 g_seq=8 stack=12
+GoBlock dt=10 reason_string=15 stack=5
+GoStart dt=11 g=125 g_seq=5
+GCMarkAssistEnd dt=5
+HeapAlloc dt=16 heapalloc_value=193354456
+GoStop dt=95 reason_string=16 stack=6
+GoUnblock dt=34 g=22 g_seq=61 stack=0
+GoStart dt=1 g=22 g_seq=62
+GoLabel dt=1 label_string=2
+GoUnblock dt=1090 g=99 g_seq=6 stack=12
+GoBlock dt=10 reason_string=15 stack=5
+GoStart dt=8 g=81 g_seq=9
+GCMarkAssistEnd dt=5
+HeapAlloc dt=3528 heapalloc_value=195729872
+GoStop dt=10 reason_string=16 stack=6
+GoStart dt=17 g=81 g_seq=10
+GCMarkAssistBegin dt=9 stack=3
+GoBlock dt=34 reason_string=10 stack=18
+GoStart dt=20 g=121 g_seq=11
+GCMarkAssistEnd dt=4
+HeapAlloc dt=44 heapalloc_value=195852752
+GoStop dt=7425 reason_string=16 stack=6
+ProcStop dt=134
+ProcStart dt=14156 p=12 p_seq=2
+GoStart dt=200 g=84 g_seq=10
+GCMarkAssistEnd dt=6
+GCSweepBegin dt=35 stack=27
+EventBatch gen=3 m=169414 time=28114950903409 size=415
+ProcStatus dt=1 p=19 pstatus=1
+GoStatus dt=1 g=54 m=169414 gstatus=2
+GoBlock dt=7 reason_string=15 stack=5
+GoUnblock dt=2586 g=25 g_seq=5 stack=0
+GoStart dt=8 g=25 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=2605 reason_string=15 stack=5
+GoUnblock dt=1216 g=71 g_seq=3 stack=0
+GoStart dt=7 g=71 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=672 reason_string=15 stack=5
+GoUnblock dt=7231 g=23 g_seq=15 stack=0
+GoStart dt=8 g=23 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=1212 reason_string=15 stack=5
+GoUnblock dt=11 g=23 g_seq=17 stack=0
+GoStart dt=7 g=23 g_seq=18
+GoLabel dt=3 label_string=2
+GoBlock dt=82 reason_string=15 stack=5
+GoUnblock dt=9 g=23 g_seq=19 stack=0
+GoStart dt=6 g=23 g_seq=20
+GoLabel dt=1 label_string=2
+GoBlock dt=162 reason_string=15 stack=5
+ProcStop dt=99
+ProcStart dt=3257 p=19 p_seq=1
+GoUnblock dt=13 g=68 g_seq=5 stack=0
+GoStart dt=10 g=68 g_seq=6
+GoLabel dt=1 label_string=2
+GoBlock dt=43 reason_string=15 stack=5
+GoUnblock dt=12 g=68 g_seq=7 stack=0
+GoStart dt=2 g=68 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=133 reason_string=15 stack=5
+GoUnblock dt=23 g=58 g_seq=23 stack=0
+GoStart dt=6 g=58 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=2822 reason_string=15 stack=5
+GoUnblock dt=11 g=69 g_seq=21 stack=0
+GoStart dt=7 g=69 g_seq=22
+GoLabel dt=2 label_string=2
+GoBlock dt=25 reason_string=15 stack=5
+GoUnblock dt=2937 g=58 g_seq=31 stack=0
+GoStart dt=6 g=58 g_seq=32
+GoLabel dt=1 label_string=4
+GoBlock dt=20 reason_string=15 stack=5
+ProcStop dt=60
+ProcStart dt=31 p=19 p_seq=2
+GoUnblock dt=9 g=56 g_seq=7 stack=0
+GoStart dt=6 g=56 g_seq=8
+GoLabel dt=3 label_string=4
+GoBlock dt=949 reason_string=15 stack=5
+ProcStop dt=41
+ProcStart dt=433 p=19 p_seq=3
+ProcStop dt=43
+ProcStart dt=9942 p=11 p_seq=4
+ProcStop dt=50
+ProcStart dt=2351 p=22 p_seq=6
+GoUnblock dt=15 g=30 g_seq=45 stack=0
+GoStart dt=205 g=30 g_seq=46
+GoLabel dt=1 label_string=2
+GoUnblock dt=145 g=113 g_seq=7 stack=12
+GoBlock dt=21 reason_string=15 stack=5
+GoStart dt=10 g=113 g_seq=8
+GCMarkAssistEnd dt=8
+HeapAlloc dt=48 heapalloc_value=192895704
+GCMarkAssistBegin dt=118 stack=3
+GCMarkAssistEnd dt=272
+HeapAlloc dt=20 heapalloc_value=192936664
+HeapAlloc dt=89 heapalloc_value=192953048
+HeapAlloc dt=41 heapalloc_value=192994008
+HeapAlloc dt=92 heapalloc_value=193059544
+HeapAlloc dt=102 heapalloc_value=193108696
+HeapAlloc dt=94 heapalloc_value=193133272
+HeapAlloc dt=42 heapalloc_value=193141464
+HeapAlloc dt=31 heapalloc_value=193207000
+GCMarkAssistBegin dt=142 stack=3
+GoBlock dt=114 reason_string=13 stack=11
+GoStart dt=179 g=109 g_seq=5
+GCMarkAssistEnd dt=8
+GCMarkAssistBegin dt=54 stack=3
+GCMarkAssistEnd dt=720
+HeapAlloc dt=23 heapalloc_value=194427608
+HeapAlloc dt=456 heapalloc_value=195001048
+GCMarkAssistBegin dt=18 stack=3
+GoBlock dt=22 reason_string=13 stack=11
+GoStart dt=23 g=113 g_seq=10
+GCMarkAssistEnd dt=3
+HeapAlloc dt=54 heapalloc_value=195099352
+GoStop dt=6390 reason_string=16 stack=6
+GoStart dt=23 g=113 g_seq=11
+GCMarkAssistBegin dt=6 stack=3
+GoBlock dt=21 reason_string=10 stack=18
+GoStart dt=33 g=101 g_seq=6
+GCMarkAssistEnd dt=6
+HeapAlloc dt=29 heapalloc_value=196409808
+GCMarkAssistBegin dt=22 stack=3
+GoBlock dt=52 reason_string=10 stack=18
+ProcStop dt=102
+EventBatch gen=3 m=169413 time=28114950897164 size=752
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=6 g=67 m=169413 gstatus=2
+GoBlock dt=11 reason_string=15 stack=5
+GoUnblock dt=18 g=25 g_seq=1 stack=0
+GoStart dt=7 g=25 g_seq=2
+GoLabel dt=1 label_string=2
+GoBlock dt=1315 reason_string=15 stack=5
+GoUnblock dt=11 g=25 g_seq=3 stack=0
+GoStart dt=6 g=25 g_seq=4
+GoLabel dt=1 label_string=2
+GoUnblock dt=4173 g=106 g_seq=1 stack=12
+GoBlock dt=1258 reason_string=15 stack=5
+GoUnblock dt=4804 g=30 g_seq=5 stack=0
+GoStart dt=7 g=30 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=541 reason_string=15 stack=5
+GoUnblock dt=30 g=30 g_seq=7 stack=0
+GoStart dt=6 g=30 g_seq=8
+GoLabel dt=3 label_string=2
+GoBlock dt=3873 reason_string=15 stack=5
+GoUnblock dt=10 g=30 g_seq=9 stack=0
+GoStart dt=5 g=30 g_seq=10
+GoLabel dt=3 label_string=2
+GoBlock dt=3107 reason_string=15 stack=5
+GoUnblock dt=3672 g=14 g_seq=15 stack=0
+GoStart dt=6 g=14 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=442 reason_string=15 stack=5
+GoStart dt=32 g=83 g_seq=4
+GCMarkAssistEnd dt=7
+HeapAlloc dt=49 heapalloc_value=191962560
+GCMarkAssistBegin dt=108 stack=3
+GoStop dt=885 reason_string=20 stack=9
+GoStart dt=14 g=83 g_seq=5
+GoBlock dt=21 reason_string=13 stack=11
+ProcStop dt=93
+ProcStart dt=38 p=0 p_seq=1
+GoUnblock dt=7 g=53 g_seq=17 stack=0
+GoStart dt=2 g=53 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=31 reason_string=15 stack=5
+ProcStop dt=89
+ProcStart dt=45 p=11 p_seq=3
+GoUnblock dt=6 g=23 g_seq=35 stack=0
+GoStart dt=14 g=23 g_seq=36
+GoLabel dt=3 label_string=4
+GoBlock dt=2881 reason_string=15 stack=5
+GoUnblock dt=72 g=25 g_seq=17 stack=0
+GoStart dt=6 g=25 g_seq=18
+GoLabel dt=1 label_string=4
+GoBlock dt=19 reason_string=15 stack=5
+GoUnblock dt=58 g=25 g_seq=19 stack=0
+GoStart dt=3 g=25 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=13 reason_string=15 stack=5
+GoStart dt=16 g=94 g_seq=4
+GoBlock dt=356 reason_string=13 stack=11
+GoUnblock dt=80 g=52 g_seq=27 stack=0
+GoStart dt=9 g=52 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=2325 reason_string=15 stack=5
+GoUnblock dt=57 g=67 g_seq=31 stack=0
+GoStart dt=4 g=67 g_seq=32
+GoLabel dt=1 label_string=4
+GoBlock dt=2043 reason_string=15 stack=5
+GoUnblock dt=9 g=67 g_seq=33 stack=0
+GoStart dt=171 g=67 g_seq=34
+GoLabel dt=5 label_string=2
+GoBlock dt=21 reason_string=15 stack=5
+ProcStop dt=60
+ProcStart dt=1735 p=25 p_seq=4
+GoUnblock dt=61 g=22 g_seq=39 stack=0
+GoStart dt=178 g=22 g_seq=40
+GoLabel dt=1 label_string=4
+GoBlock dt=66 reason_string=15 stack=5
+GoUnblock dt=8 g=22 g_seq=41 stack=0
+GoStart dt=4 g=22 g_seq=42
+GoLabel dt=1 label_string=2
+GoBlock dt=975 reason_string=15 stack=5
+ProcStop dt=1192
+ProcStart dt=347 p=25 p_seq=5
+GoUnblock dt=11 g=131 g_seq=6 stack=0
+GoStart dt=145 g=131 g_seq=7
+GoBlock dt=21 reason_string=15 stack=2
+GoUnblock dt=30 g=14 g_seq=38 stack=0
+GoStart dt=4 g=14 g_seq=39
+GoLabel dt=1 label_string=2
+GoBlock dt=65 reason_string=15 stack=5
+GoStart dt=26 g=130 g_seq=1
+ProcStatus dt=380 p=38 pstatus=2
+ProcStatus dt=4 p=39 pstatus=2
+ProcStatus dt=4 p=40 pstatus=2
+ProcStatus dt=3 p=41 pstatus=2
+ProcStatus dt=5 p=42 pstatus=2
+ProcStatus dt=5 p=43 pstatus=2
+ProcStatus dt=2 p=44 pstatus=2
+ProcStatus dt=3 p=45 pstatus=2
+ProcStatus dt=4 p=46 pstatus=2
+GoStop dt=1488 reason_string=16 stack=15
+GoUnblock dt=17 g=51 g_seq=45 stack=0
+GoStart dt=3 g=51 g_seq=46
+GoLabel dt=3 label_string=2
+GoBlock dt=1337 reason_string=15 stack=5
+GoStart dt=13 g=81 g_seq=7
+GCMarkAssistEnd dt=6
+GCMarkAssistBegin dt=31 stack=3
+GoBlock dt=20 reason_string=13 stack=11
+GoStart dt=5 g=130 g_seq=2
+HeapAlloc dt=98 heapalloc_value=192314072
+GoBlock dt=348 reason_string=12 stack=16
+GoStart dt=31 g=103 g_seq=2
+GCMarkAssistEnd dt=7
+HeapAlloc dt=53 heapalloc_value=192428760
+GoStop dt=173 reason_string=16 stack=6
+GoUnblock dt=18 g=71 g_seq=29 stack=0
+GoStart dt=4 g=71 g_seq=30
+GoLabel dt=3 label_string=2
+GoBlock dt=1289 reason_string=15 stack=5
+GoStart dt=17 g=126 g_seq=4
+GCMarkAssistBegin dt=125 stack=3
+GoBlock dt=23 reason_string=13 stack=11
+ProcStop dt=76
+ProcStart dt=2523 p=0 p_seq=4
+GoUnblock dt=16 g=30 g_seq=47 stack=0
+GoStart dt=196 g=30 g_seq=48
+GoLabel dt=2 label_string=2
+GoUnblock dt=1834 g=125 g_seq=7 stack=12
+GoBlock dt=17 reason_string=15 stack=5
+GoStart dt=14 g=125 g_seq=8
+GCMarkAssistEnd dt=5
+HeapAlloc dt=69 heapalloc_value=194566872
+GoStop dt=2253 reason_string=16 stack=6
+GoStart dt=2080 g=125 g_seq=9
+GCMarkAssistBegin dt=14 stack=3
+GoBlock dt=41 reason_string=10 stack=18
+GoStart dt=13 g=106 g_seq=8
+GCMarkAssistEnd dt=6
+HeapAlloc dt=53 heapalloc_value=196106704
+GoStop dt=6900 reason_string=16 stack=6
+GoStart dt=57 g=121 g_seq=12
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=47 reason_string=10 stack=18
+ProcStop dt=83
+ProcStart dt=11930 p=7 p_seq=7
+GoStart dt=191 g=96 g_seq=8
+GCMarkAssistEnd dt=10
+HeapAlloc dt=59 heapalloc_value=109727392
+HeapAlloc dt=159 heapalloc_value=110336128
+HeapAlloc dt=109 heapalloc_value=110662528
+GCSweepBegin dt=144 stack=28
+GCSweepEnd dt=18 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=3 heapalloc_value=111288448
+GCSweepBegin dt=49 stack=28
+GCSweepEnd dt=14 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=5 heapalloc_value=111591296
+HeapAlloc dt=65 heapalloc_value=111888256
+HeapAlloc dt=228 heapalloc_value=112797056
+HeapAlloc dt=134 heapalloc_value=113322880
+HeapAlloc dt=83 heapalloc_value=113549696
+GCSweepBegin dt=35 stack=28
+GCSweepEnd dt=16 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=113842560
+HeapAlloc dt=75 heapalloc_value=114080128
+HeapAlloc dt=64 heapalloc_value=114307712
+HeapAlloc dt=134 heapalloc_value=114580608
+HeapAlloc dt=77 heapalloc_value=114670464
+GCSweepBegin dt=33 stack=28
+GCSweepEnd dt=6 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=3 heapalloc_value=114727808
+GCSweepBegin dt=90 stack=27
+EventBatch gen=3 m=169412 time=28114950898429 size=583
+ProcStatus dt=1 p=36 pstatus=2
+ProcStart dt=2 p=36 p_seq=1
+GoStart dt=401 g=83 g_seq=2
+GoBlock dt=1477 reason_string=13 stack=11
+GoStart dt=1208 g=81 g_seq=2
+GCMarkAssistEnd dt=9
+HeapAlloc dt=57 heapalloc_value=191348160
+GoStop dt=42 reason_string=16 stack=4
+GoStart dt=25 g=81 g_seq=3
+GCMarkAssistBegin dt=394 stack=3
+GoBlock dt=1177 reason_string=13 stack=11
+GoStart dt=28 g=106 g_seq=2
+GCMarkAssistEnd dt=10
+HeapAlloc dt=52 heapalloc_value=191503808
+GCMarkAssistBegin dt=52 stack=3
+GoStop dt=60 reason_string=20 stack=9
+GoUnblock dt=73 g=58 g_seq=3 stack=0
+GoStart dt=6 g=58 g_seq=4
+GoLabel dt=3 label_string=4
+GoBlock dt=2860 reason_string=15 stack=5
+GoUnblock dt=3777 g=24 g_seq=9 stack=0
+GoStart dt=6 g=24 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=41 reason_string=15 stack=5
+GoUnblock dt=1167 g=71 g_seq=9 stack=0
+GoStart dt=7 g=71 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=1396 reason_string=15 stack=5
+GoUnblock dt=1371 g=57 g_seq=23 stack=0
+GoStart dt=7 g=57 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=584 reason_string=15 stack=5
+GoUnblock dt=4657 g=23 g_seq=23 stack=0
+GoStart dt=7 g=23 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=40 reason_string=15 stack=5
+ProcStop dt=82
+ProcStart dt=1505 p=36 p_seq=2
+ProcStop dt=74
+ProcStart dt=19 p=36 p_seq=3
+GoUnblock dt=7 g=23 g_seq=27 stack=0
+GoStart dt=7 g=23 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=122 reason_string=15 stack=5
+GoUnblock dt=58 g=52 g_seq=25 stack=0
+GoStart dt=6 g=52 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=4034 reason_string=15 stack=5
+GoUnblock dt=75 g=14 g_seq=19 stack=0
+GoStart dt=6 g=14 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=2059 reason_string=15 stack=5
+GoUnblock dt=63 g=14 g_seq=21 stack=0
+GoStart dt=4 g=14 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=56 reason_string=15 stack=5
+ProcStop dt=49
+ProcStart dt=20 p=36 p_seq=4
+GoUnblock dt=6 g=67 g_seq=27 stack=0
+GoStart dt=2 g=67 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=13 reason_string=15 stack=5
+ProcStop dt=1721
+ProcStart dt=20316 p=36 p_seq=5
+GoStart dt=197 g=94 g_seq=11
+GCMarkAssistEnd dt=7
+HeapAlloc dt=6672 heapalloc_value=196598224
+GoStop dt=15 reason_string=16 stack=6
+GoStart dt=54 g=106 g_seq=9
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=32 reason_string=10 stack=18
+GoStart dt=41 g=103 g_seq=6
+GCMarkAssistBegin dt=15 stack=3
+GoBlock dt=84 reason_string=10 stack=18
+ProcStop dt=43
+ProcStart dt=10888 p=5 p_seq=1
+GoStart dt=189 g=120 g_seq=8
+GCMarkAssistEnd dt=7
+HeapAlloc dt=54 heapalloc_value=106433440
+HeapAlloc dt=94 heapalloc_value=106861728
+GCSweepBegin dt=92 stack=28
+GCSweepEnd dt=13 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=4 heapalloc_value=107301920
+HeapAlloc dt=65 heapalloc_value=107394848
+GCSweepBegin dt=32 stack=28
+GCSweepEnd dt=11 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=2 heapalloc_value=107616032
+HeapAlloc dt=60 heapalloc_value=107763488
+HeapAlloc dt=78 heapalloc_value=107953440
+HeapAlloc dt=65 heapalloc_value=108333088
+GCSweepBegin dt=38 stack=28
+GCSweepEnd dt=5 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=1 heapalloc_value=108423200
+GCSweepBegin dt=80 stack=28
+GCSweepEnd dt=9 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=108682656
+GCSweepBegin dt=61 stack=28
+GCSweepEnd dt=10 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=4 heapalloc_value=108816544
+HeapAlloc dt=32 heapalloc_value=108994080
+HeapAlloc dt=50 heapalloc_value=109290272
+HeapAlloc dt=112 heapalloc_value=109566240
+HeapAlloc dt=104 heapalloc_value=109973280
+GCSweepBegin dt=66 stack=29
+GCSweepEnd dt=17 swept_value=8192 reclaimed_value=0
+HeapAlloc dt=3 heapalloc_value=110183040
+HeapAlloc dt=86 heapalloc_value=110506880
+HeapAlloc dt=149 heapalloc_value=111151232
+HeapAlloc dt=24 heapalloc_value=111272064
+HeapAlloc dt=53 heapalloc_value=111368064
+HeapAlloc dt=68 heapalloc_value=111632256
+HeapAlloc dt=103 heapalloc_value=112078720
+GCSweepBegin dt=120 stack=28
+GCSweepEnd dt=7 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=3 heapalloc_value=112585472
+HeapAlloc dt=34 heapalloc_value=112616832
+HeapAlloc dt=39 heapalloc_value=112882304
+HeapAlloc dt=141 heapalloc_value=113391232
+HeapAlloc dt=80 heapalloc_value=113664384
+HeapAlloc dt=152 heapalloc_value=114242176
+HeapAlloc dt=104 heapalloc_value=114415616
+HeapAlloc dt=38 heapalloc_value=114527360
+HeapAlloc dt=28 heapalloc_value=114592896
+GCSweepBegin dt=227 stack=27
+EventBatch gen=3 m=169411 time=28114950895719 size=370
+ProcStatus dt=1 p=21 pstatus=1
+GoStatus dt=5 g=85 m=169411 gstatus=2
+GCMarkAssistActive dt=1 g=85
+GCMarkAssistEnd dt=3
+HeapAlloc dt=44 heapalloc_value=190299584
+GoStop dt=38 reason_string=16 stack=4
+GoStart dt=20 g=85 g_seq=1
+GCMarkAssistBegin dt=119 stack=3
+GoStop dt=4468 reason_string=20 stack=9
+GoStart dt=15 g=85 g_seq=2
+GoStop dt=1589 reason_string=20 stack=9
+GoStart dt=8 g=85 g_seq=3
+GCMarkAssistEnd dt=2892
+HeapAlloc dt=33 heapalloc_value=191733184
+GCMarkAssistBegin dt=98 stack=3
+GoStop dt=2309 reason_string=20 stack=9
+GoStart dt=10 g=95 g_seq=3
+GoBlock dt=153 reason_string=13 stack=11
+GoStart dt=5 g=85 g_seq=4
+GoBlock dt=18 reason_string=13 stack=11
+GoUnblock dt=3925 g=58 g_seq=13 stack=0
+GoStart dt=8 g=58 g_seq=14
+GoLabel dt=3 label_string=4
+GoBlock dt=106 reason_string=15 stack=5
+ProcStop dt=1275
+ProcStart dt=21 p=21 p_seq=1
+ProcStop dt=1335
+ProcStart dt=14 p=21 p_seq=2
+GoUnblock dt=1349 g=14 g_seq=9 stack=0
+GoStart dt=8 g=14 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=255 reason_string=15 stack=5
+GoUnblock dt=2226 g=70 g_seq=9 stack=0
+GoStart dt=8 g=70 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=398 reason_string=15 stack=5
+GoUnblock dt=8 g=70 g_seq=11 stack=0
+GoStart dt=6 g=70 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=8210 reason_string=15 stack=5
+GoUnblock dt=12 g=70 g_seq=13 stack=0
+GoStart dt=5 g=70 g_seq=14
+GoLabel dt=2 label_string=2
+GoBlock dt=2354 reason_string=15 stack=5
+GoUnblock dt=93 g=72 g_seq=47 stack=0
+GoStart dt=9 g=72 g_seq=48
+GoLabel dt=1 label_string=4
+GoBlock dt=27 reason_string=15 stack=5
+GoUnblock dt=220 g=72 g_seq=49 stack=0
+GoStart dt=7 g=72 g_seq=50
+GoLabel dt=1 label_string=2
+GoBlock dt=20 reason_string=15 stack=5
+ProcStop dt=61
+ProcStart dt=16474 p=33 p_seq=2
+GoStart dt=3475 g=107 g_seq=4
+GCMarkAssistEnd dt=9
+HeapAlloc dt=52 heapalloc_value=196041168
+GoStop dt=5585 reason_string=16 stack=6
+GoStart dt=15 g=107 g_seq=5
+GCMarkAssistBegin dt=91 stack=3
+GoBlock dt=34 reason_string=10 stack=18
+ProcStop dt=55
+ProcStart dt=1514 p=33 p_seq=3
+ProcStop dt=41
+ProcStart dt=12390 p=8 p_seq=1
+GoStart dt=166 g=100 g_seq=7
+GCMarkAssistEnd dt=5
+HeapAlloc dt=51 heapalloc_value=111353984
+GCSweepBegin dt=133 stack=28
+GCSweepEnd dt=18 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=112029568
+HeapAlloc dt=68 heapalloc_value=112301312
+HeapAlloc dt=120 heapalloc_value=112739712
+HeapAlloc dt=116 heapalloc_value=113221760
+HeapAlloc dt=53 heapalloc_value=113380224
+HeapAlloc dt=115 heapalloc_value=113768832
+HeapAlloc dt=66 heapalloc_value=114026880
+HeapAlloc dt=127 heapalloc_value=114403328
+GCSweepBegin dt=47 stack=28
+GCSweepEnd dt=10 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=114503936
+HeapAlloc dt=67 heapalloc_value=114651264
+GCSweepBegin dt=299 stack=27
+EventBatch gen=3 m=169409 time=28114950894853 size=224
+ProcStatus dt=2 p=29 pstatus=1
+GoStatus dt=3 g=126 m=169409 gstatus=2
+HeapAlloc dt=3 heapalloc_value=189824448
+GCMarkAssistBegin dt=163 stack=3
+GoStop dt=1609 reason_string=20 stack=9
+GoStart dt=26 g=98 g_seq=2
+GCMarkAssistBegin dt=17 stack=3
+GCMarkAssistEnd dt=7751
+HeapAlloc dt=77 heapalloc_value=191675840
+GoStop dt=39 reason_string=16 stack=6
+GoStart dt=20 g=116 g_seq=4
+GoBlock dt=302 reason_string=13 stack=11
+GoUnblock dt=4886 g=51 g_seq=13 stack=0
+GoStart dt=8 g=51 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=2058 reason_string=15 stack=5
+GoUnblock dt=11 g=51 g_seq=15 stack=0
+GoStart dt=6 g=51 g_seq=16
+GoLabel dt=3 label_string=2
+GoBlock dt=2936 reason_string=15 stack=5
+GoUnblock dt=35 g=58 g_seq=21 stack=0
+GoStart dt=6 g=58 g_seq=22
+GoLabel dt=3 label_string=2
+GoBlock dt=7995 reason_string=15 stack=5
+GoUnblock dt=20 g=68 g_seq=9 stack=0
+GoStart dt=6 g=68 g_seq=10
+GoLabel dt=3 label_string=2
+GoBlock dt=92 reason_string=15 stack=5
+GoUnblock dt=8 g=68 g_seq=11 stack=0
+GoStart dt=1 g=68 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=7039 reason_string=15 stack=5
+ProcStop dt=54
+ProcStart dt=14204 p=3 p_seq=1
+GoStart dt=213 g=94 g_seq=7
+GCMarkAssistBegin dt=29 stack=3
+GoBlock dt=62 reason_string=13 stack=11
+GoStart dt=20 g=124 g_seq=4
+GCMarkAssistEnd dt=6
+GCMarkAssistBegin dt=38 stack=3
+GCMarkAssistEnd dt=98
+HeapAlloc dt=118 heapalloc_value=193911512
+HeapAlloc dt=123 heapalloc_value=194116312
+HeapAlloc dt=352 heapalloc_value=194616024
+GoStop dt=3095 reason_string=16 stack=6
+GoStart dt=26 g=110 g_seq=4
+GCMarkAssistEnd dt=6
+HeapAlloc dt=30 heapalloc_value=195508952
+GoStop dt=4300 reason_string=16 stack=6
+GoStart dt=65 g=110 g_seq=5
+GCMarkAssistBegin dt=10 stack=3
+GoBlock dt=46 reason_string=10 stack=18
+ProcStop dt=124
+EventBatch gen=3 m=169408 time=28114950896863 size=856
+ProcStatus dt=1 p=22 pstatus=1
+GoStatus dt=2 g=105 m=169408 gstatus=2
+GCMarkAssistActive dt=1 g=105
+GCMarkAssistEnd dt=2
+HeapAlloc dt=22 heapalloc_value=190512576
+HeapAlloc dt=94 heapalloc_value=190537152
+GCMarkAssistBegin dt=18 stack=3
+GCMarkAssistEnd dt=1243
+HeapAlloc dt=34 heapalloc_value=190741952
+GCMarkAssistBegin dt=36 stack=3
+GCMarkAssistEnd dt=4423
+HeapAlloc dt=22 heapalloc_value=191413696
+GoStop dt=23 reason_string=16 stack=4
+GoStart dt=15 g=105 g_seq=1
+GCMarkAssistBegin dt=57 stack=3
+GoStop dt=662 reason_string=20 stack=9
+GoStart dt=12 g=105 g_seq=2
+GoStop dt=4139 reason_string=20 stack=9
+GoStart dt=11 g=105 g_seq=3
+GoStop dt=4306 reason_string=20 stack=9
+GoStart dt=15 g=105 g_seq=4
+GoBlock dt=21 reason_string=13 stack=11
+GoUnblock dt=2669 g=58 g_seq=19 stack=0
+GoStart dt=5 g=58 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=90 reason_string=15 stack=5
+GoUnblock dt=28 g=51 g_seq=17 stack=0
+GoStart dt=5 g=51 g_seq=18
+GoLabel dt=1 label_string=2
+GoBlock dt=5245 reason_string=15 stack=5
+GoUnblock dt=68 g=51 g_seq=19 stack=0
+GoStart dt=8 g=51 g_seq=20
+GoLabel dt=1 label_string=4
+GoBlock dt=14 reason_string=15 stack=5
+GoUnblock dt=6 g=51 g_seq=21 stack=0
+GoStart dt=1 g=51 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=7035 reason_string=15 stack=5
+GoUnblock dt=13 g=51 g_seq=23 stack=0
+GoStart dt=4 g=51 g_seq=24
+GoLabel dt=2 label_string=2
+GoUnblock dt=188 g=116 g_seq=5 stack=12
+GoBlock dt=65 reason_string=15 stack=5
+GoUnblock dt=9 g=51 g_seq=25 stack=0
+GoStart dt=2 g=51 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=170 reason_string=15 stack=5
+GoUnblock dt=15 g=51 g_seq=27 stack=0
+GoStart dt=6 g=51 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=33 reason_string=15 stack=5
+GoUnblock dt=7 g=51 g_seq=29 stack=0
+GoStart dt=6 g=51 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=159 reason_string=15 stack=5
+GoUnblock dt=8 g=51 g_seq=31 stack=0
+GoStart dt=3 g=51 g_seq=32
+GoLabel dt=1 label_string=2
+GoBlock dt=124 reason_string=15 stack=5
+ProcStop dt=79
+ProcStart dt=18 p=22 p_seq=1
+GoUnblock dt=4 g=29 g_seq=21 stack=0
+GoStart dt=4 g=29 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=28 reason_string=15 stack=5
+ProcStop dt=45
+ProcStart dt=12 p=22 p_seq=2
+GoUnblock dt=2 g=29 g_seq=23 stack=0
+GoStart dt=1 g=29 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=19 reason_string=15 stack=5
+GoUnblock dt=45 g=29 g_seq=25 stack=0
+GoStart dt=1 g=29 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=151 reason_string=15 stack=5
+GoUnblock dt=14 g=52 g_seq=35 stack=0
+GoStart dt=6 g=52 g_seq=36
+GoLabel dt=1 label_string=2
+GoBlock dt=13 reason_string=15 stack=5
+GoUnblock dt=4 g=52 g_seq=37 stack=0
+GoStart dt=3 g=52 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=127 reason_string=15 stack=5
+GoUnblock dt=7 g=52 g_seq=39 stack=0
+GoStart dt=1 g=52 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=11 reason_string=15 stack=5
+GoUnblock dt=6 g=52 g_seq=41 stack=0
+GoStart dt=2 g=52 g_seq=42
+GoLabel dt=1 label_string=2
+GoBlock dt=4594 reason_string=15 stack=5
+ProcStop dt=42
+ProcStart dt=1703 p=27 p_seq=42
+GoUnblock dt=17 g=22 g_seq=45 stack=0
+GoStart dt=283 g=22 g_seq=46
+GoLabel dt=2 label_string=2
+GoUnblock dt=103 g=96 g_seq=3 stack=12
+GoUnblock dt=95 g=121 g_seq=5 stack=12
+GoUnblock dt=5 g=126 g_seq=2 stack=12
+GoUnblock dt=529 g=115 g_seq=3 stack=12
+GoBlock dt=552 reason_string=15 stack=5
+GoUnblock dt=31 g=22 g_seq=47 stack=0
+GoStart dt=4 g=22 g_seq=48
+GoLabel dt=1 label_string=2
+GoUnblock dt=763 g=90 g_seq=3 stack=12
+GoBlock dt=39 reason_string=15 stack=5
+GoUnblock dt=12 g=22 g_seq=49 stack=0
+GoStart dt=4 g=22 g_seq=50
+GoLabel dt=1 label_string=2
+GoBlock dt=806 reason_string=15 stack=5
+GoStart dt=18 g=115 g_seq=4
+GCMarkAssistEnd dt=8
+HeapAlloc dt=834 heapalloc_value=192494296
+GCMarkAssistBegin dt=33 stack=3
+GoStop dt=622 reason_string=20 stack=9
+GoUnblock dt=15 g=14 g_seq=44 stack=0
+GoStart dt=5 g=14 g_seq=45
+GoLabel dt=1 label_string=2
+GoBlock dt=1768 reason_string=15 stack=5
+GoUnblock dt=11 g=14 g_seq=46 stack=0
+GoStart dt=4 g=14 g_seq=47
+GoLabel dt=1 label_string=2
+GoBlock dt=20 reason_string=15 stack=5
+GoUnblock dt=10 g=14 g_seq=48 stack=0
+GoStart dt=636 g=14 g_seq=49
+GoLabel dt=1 label_string=2
+GoBlock dt=55 reason_string=15 stack=5
+GoUnblock dt=18 g=14 g_seq=50 stack=0
+GoStart dt=3 g=14 g_seq=51
+GoLabel dt=1 label_string=2
+GoBlock dt=46 reason_string=15 stack=5
+GoUnblock dt=15 g=14 g_seq=52 stack=0
+GoStart dt=4 g=14 g_seq=53
+GoLabel dt=1 label_string=2
+GoBlock dt=26 reason_string=15 stack=5
+GoUnblock dt=29 g=70 g_seq=23 stack=0
+GoStart dt=5 g=70 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=15 reason_string=15 stack=5
+GoStart dt=30 g=94 g_seq=6
+GCMarkAssistEnd dt=5
+HeapAlloc dt=37 heapalloc_value=192699096
+GoStop dt=34 reason_string=16 stack=6
+GoUnblock dt=9 g=70 g_seq=25 stack=0
+GoStart dt=3 g=70 g_seq=26
+GoLabel dt=1 label_string=2
+GoUnblock dt=190 g=98 g_seq=7 stack=12
+GoUnblock dt=6 g=91 g_seq=1 stack=12
+GoUnblock dt=7 g=123 g_seq=6 stack=12
+GoUnblock dt=5 g=100 g_seq=3 stack=12
+GoUnblock dt=3 g=102 g_seq=3 stack=12
+GoUnblock dt=3 g=103 g_seq=4 stack=12
+GoUnblock dt=5 g=117 g_seq=3 stack=12
+GoBlock dt=45 reason_string=15 stack=5
+GoUnblock dt=8 g=70 g_seq=27 stack=0
+GoStart dt=1 g=70 g_seq=28
+GoLabel dt=1 label_string=2
+GoUnblock dt=1939 g=111 g_seq=7 stack=12
+GoUnblock dt=10 g=101 g_seq=5 stack=12
+GoBlock dt=23 reason_string=15 stack=5
+GoStart dt=15 g=98 g_seq=8
+GCMarkAssistEnd dt=8
+HeapAlloc dt=57 heapalloc_value=193960664
+GCMarkAssistBegin dt=83 stack=3
+GoBlock dt=26 reason_string=13 stack=11
+GoStart dt=7 g=91 g_seq=2
+GCMarkAssistEnd dt=6
+HeapAlloc dt=47 heapalloc_value=194296536
+GCMarkAssistBegin dt=103 stack=3
+GoBlock dt=118 reason_string=13 stack=11
+GoStart dt=20 g=123 g_seq=7
+GCMarkAssistEnd dt=4
+HeapAlloc dt=448 heapalloc_value=195058392
+GoStop dt=6487 reason_string=16 stack=6
+GoStart dt=27 g=123 g_seq=8
+GCMarkAssistBegin dt=10 stack=3
+GoBlock dt=32 reason_string=10 stack=18
+ProcStop dt=78
+ProcStart dt=16845 p=9 p_seq=1
+GoStart dt=21 g=127 g_seq=10
+GCMarkAssistEnd dt=11
+GCSweepBegin dt=37 stack=28
+GCSweepEnd dt=17 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=7 heapalloc_value=110613376
+HeapAlloc dt=77 heapalloc_value=110956160
+HeapAlloc dt=127 heapalloc_value=111501184
+HeapAlloc dt=150 heapalloc_value=112133376
+HeapAlloc dt=103 heapalloc_value=112487168
+HeapAlloc dt=158 heapalloc_value=113166976
+GCSweepBegin dt=50 stack=28
+GCSweepEnd dt=32 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=6 heapalloc_value=113407616
+HeapAlloc dt=173 heapalloc_value=114067840
+HeapAlloc dt=153 heapalloc_value=114430208
+GCSweepBegin dt=35 stack=28
+GCSweepEnd dt=4 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=4 heapalloc_value=114551936
+GCSweepBegin dt=1034 stack=27
+EventBatch gen=3 m=169407 time=28114950901555 size=528
+ProcStatus dt=2 p=4 pstatus=1
+GoStatus dt=1 g=72 m=169407 gstatus=2
+GoBlock dt=7 reason_string=15 stack=5
+GoUnblock dt=1446 g=72 g_seq=3 stack=0
+GoStart dt=9 g=72 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=394 reason_string=15 stack=5
+GoStart dt=26 g=106 g_seq=3
+GoBlock dt=149 reason_string=13 stack=11
+GoUnblock dt=2557 g=72 g_seq=5 stack=0
+GoStart dt=8 g=72 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=44 reason_string=15 stack=5
+GoUnblock dt=13 g=72 g_seq=7 stack=0
+GoStart dt=6 g=72 g_seq=8
+GoLabel dt=5 label_string=2
+GoBlock dt=1622 reason_string=15 stack=5
+GoUnblock dt=9 g=72 g_seq=9 stack=0
+GoStart dt=6 g=72 g_seq=10
+GoLabel dt=1 label_string=2
+GoUnblock dt=165 g=87 g_seq=2 stack=12
+GoBlock dt=854 reason_string=15 stack=5
+GoUnblock dt=9 g=72 g_seq=11 stack=0
+GoStart dt=4 g=72 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=398 reason_string=15 stack=5
+GoUnblock dt=20 g=72 g_seq=13 stack=0
+GoStart dt=5 g=72 g_seq=14
+GoLabel dt=1 label_string=2
+GoBlock dt=1475 reason_string=15 stack=5
+GoStart dt=1158 g=93 g_seq=2
+GoStatus dt=24 g=94 m=18446744073709551615 gstatus=4
+GoUnblock dt=5 g=94 g_seq=1 stack=10
+GCMarkAssistBegin dt=19 stack=3
+GoBlock dt=235 reason_string=13 stack=11
+GoStart dt=9 g=94 g_seq=2
+GoStatus dt=18 g=100 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=100 g_seq=1 stack=10
+GCMarkAssistBegin dt=16 stack=3
+GoStop dt=7669 reason_string=20 stack=9
+GoStart dt=9 g=94 g_seq=3
+GoStop dt=5028 reason_string=20 stack=9
+GoUnblock dt=76 g=23 g_seq=39 stack=0
+GoStart dt=4 g=23 g_seq=40
+GoLabel dt=1 label_string=4
+GoBlock dt=464 reason_string=15 stack=5
+GoUnblock dt=67 g=23 g_seq=41 stack=0
+GoStart dt=151 g=23 g_seq=42
+GoLabel dt=2 label_string=4
+GoBlock dt=3280 reason_string=15 stack=5
+GoStart dt=35 g=113 g_seq=6
+GCMarkAssistEnd dt=7
+GCMarkAssistBegin dt=65 stack=3
+GoBlock dt=63 reason_string=13 stack=11
+ProcStop dt=162
+ProcStart dt=22113 p=24 p_seq=4
+GoStart dt=228 g=111 g_seq=8
+GCMarkAssistEnd dt=11
+HeapAlloc dt=64 heapalloc_value=196401616
+GoStop dt=6120 reason_string=16 stack=6
+GoStart dt=26 g=111 g_seq=9
+GCMarkAssistBegin dt=15 stack=3
+GoBlock dt=35 reason_string=10 stack=18
+ProcStop dt=128
+ProcStart dt=7783 p=1 p_seq=3
+GoStart dt=191 g=87 g_seq=8
+GCMarkAssistEnd dt=9
+GCSweepBegin dt=33 stack=28
+GCSweepEnd dt=16 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=4 heapalloc_value=103833248
+GCSweepBegin dt=56 stack=27
+GCSweepEnd dt=1508 swept_value=4194304 reclaimed_value=3072000
+HeapAlloc dt=33 heapalloc_value=105692064
+HeapAlloc dt=115 heapalloc_value=105976736
+HeapAlloc dt=44 heapalloc_value=106034080
+HeapAlloc dt=109 heapalloc_value=106332320
+HeapAlloc dt=95 heapalloc_value=106715424
+HeapAlloc dt=80 heapalloc_value=106958496
+HeapAlloc dt=97 heapalloc_value=107330592
+HeapAlloc dt=56 heapalloc_value=107460384
+HeapAlloc dt=117 heapalloc_value=107811360
+HeapAlloc dt=62 heapalloc_value=108141856
+HeapAlloc dt=115 heapalloc_value=108472352
+HeapAlloc dt=103 heapalloc_value=108710048
+GCSweepBegin dt=51 stack=28
+GCSweepEnd dt=11 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=4 heapalloc_value=108832928
+HeapAlloc dt=51 heapalloc_value=109134624
+HeapAlloc dt=100 heapalloc_value=109470496
+HeapAlloc dt=98 heapalloc_value=109831200
+HeapAlloc dt=69 heapalloc_value=110087968
+HeapAlloc dt=117 heapalloc_value=110388096
+HeapAlloc dt=150 heapalloc_value=111005312
+HeapAlloc dt=140 heapalloc_value=111509376
+HeapAlloc dt=55 heapalloc_value=111773568
+HeapAlloc dt=105 heapalloc_value=112162048
+GCSweepBegin dt=85 stack=28
+GCSweepEnd dt=8 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=112560896
+HeapAlloc dt=68 heapalloc_value=112816768
+HeapAlloc dt=47 heapalloc_value=112988800
+HeapAlloc dt=122 heapalloc_value=113464960
+HeapAlloc dt=150 heapalloc_value=114008448
+GCSweepBegin dt=885 stack=27
+EventBatch gen=3 m=169406 time=28114950897134 size=117
+ProcStatus dt=3 p=6 pstatus=1
+GoStatus dt=5 g=52 m=169406 gstatus=2
+GoBlock dt=14 reason_string=15 stack=5
+GoUnblock dt=16 g=52 g_seq=1 stack=0
+GoStart dt=5 g=52 g_seq=2
+GoLabel dt=1 label_string=2
+GoBlock dt=3752 reason_string=15 stack=5
+GoUnblock dt=21 g=52 g_seq=3 stack=0
+GoStart dt=7 g=52 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=4444 reason_string=15 stack=5
+GoUnblock dt=12 g=52 g_seq=5 stack=0
+GoStart dt=7 g=52 g_seq=6
+GoLabel dt=1 label_string=2
+GoBlock dt=5071 reason_string=15 stack=5
+GoUnblock dt=15 g=52 g_seq=7 stack=0
+GoStart dt=6 g=52 g_seq=8
+GoLabel dt=2 label_string=2
+GoBlock dt=2302 reason_string=15 stack=5
+GoUnblock dt=14 g=52 g_seq=9 stack=0
+GoStart dt=6 g=52 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=32 reason_string=15 stack=5
+GoUnblock dt=9 g=52 g_seq=11 stack=0
+GoStart dt=6 g=52 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=22 reason_string=15 stack=5
+ProcStop dt=35
+EventBatch gen=3 m=169405 time=28114950903578 size=119
+ProcStatus dt=2 p=15 pstatus=1
+GoStatus dt=4 g=53 m=169405 gstatus=2
+GoBlock dt=8 reason_string=15 stack=5
+GoUnblock dt=5238 g=25 g_seq=7 stack=0
+GoStart dt=7 g=25 g_seq=8
+GoLabel dt=1 label_string=4
+GoBlock dt=49 reason_string=15 stack=5
+GoUnblock dt=1111 g=58 g_seq=11 stack=0
+GoStart dt=6 g=58 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=158 reason_string=15 stack=5
+GoStart dt=3143 g=100 g_seq=2
+GoStatus dt=20 g=109 m=18446744073709551615 gstatus=4
+GoUnblock dt=7 g=109 g_seq=1 stack=10
+GCMarkAssistBegin dt=17 stack=3
+GoBlock dt=2307 reason_string=13 stack=11
+GoUnblock dt=2192 g=14 g_seq=13 stack=0
+GoStart dt=4 g=14 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=1366 reason_string=15 stack=5
+GoUnblock dt=68 g=23 g_seq=21 stack=0
+GoStart dt=4 g=23 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=21 reason_string=15 stack=5
+ProcStop dt=3159
+EventBatch gen=3 m=169404 time=28114950896316 size=116
+ProcStatus dt=1 p=5 pstatus=1
+GoStatus dt=2 g=14 m=169404 gstatus=2
+GoBlock dt=5 reason_string=15 stack=5
+GoUnblock dt=1436 g=67 g_seq=3 stack=0
+GoStart dt=217 g=67 g_seq=4
+GoLabel dt=3 label_string=4
+GoBlock dt=1945 reason_string=15 stack=5
+GoStart dt=23 g=121 g_seq=3
+GoStop dt=570 reason_string=20 stack=9
+GoStart dt=14 g=121 g_seq=4
+GoBlock dt=1389 reason_string=13 stack=11
+GoUnblock dt=13 g=51 g_seq=3 stack=0
+GoStart dt=7 g=51 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=1439 reason_string=15 stack=5
+GoUnblock dt=17 g=14 g_seq=5 stack=0
+GoStart dt=5 g=14 g_seq=6
+GoLabel dt=2 label_string=2
+GoBlock dt=11474 reason_string=15 stack=5
+GoStart dt=4166 g=109 g_seq=3
+GoBlock dt=39 reason_string=13 stack=11
+GoStart dt=20 g=119 g_seq=4
+GCMarkAssistEnd dt=7
+HeapAlloc dt=68 heapalloc_value=191921600
+GCMarkAssistBegin dt=69 stack=3
+GoBlock dt=23 reason_string=13 stack=11
+ProcStop dt=59
+EventBatch gen=3 m=169402 time=28114950895074 size=135
+ProcStatus dt=2 p=9 pstatus=1
+GoStatus dt=2 g=25 m=169402 gstatus=2
+GoBlock dt=14 reason_string=15 stack=5
+GoStart dt=54 g=98 g_seq=1
+GCMarkAssistBegin dt=99 stack=3
+GCMarkAssistEnd dt=1187
+HeapAlloc dt=68 heapalloc_value=190463424
+GoStop dt=53 reason_string=16 stack=6
+GoStart dt=10 g=82 g_seq=1
+GCMarkAssistBegin dt=82 stack=3
+GoStop dt=2699 reason_string=20 stack=9
+GoStart dt=13 g=107 g_seq=2
+GCMarkAssistEnd dt=7
+GCMarkAssistBegin dt=49 stack=3
+GoBlock dt=852 reason_string=13 stack=11
+GoStart dt=29 g=90 g_seq=2
+GCMarkAssistEnd dt=3
+HeapAlloc dt=36 heapalloc_value=191233472
+GCMarkAssistBegin dt=825 stack=3
+GoBlock dt=392 reason_string=13 stack=11
+GoUnblock dt=21 g=67 g_seq=5 stack=0
+GoStart dt=5 g=67 g_seq=6
+GoLabel dt=1 label_string=2
+GoBlock dt=8638 reason_string=15 stack=5
+GoUnblock dt=9 g=67 g_seq=7 stack=0
+GoStart dt=4 g=67 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=145 reason_string=15 stack=5
+GoUnblock dt=14 g=67 g_seq=9 stack=0
+GoStart dt=5 g=67 g_seq=10
+GoLabel dt=1 label_string=2
+GoBlock dt=7067 reason_string=15 stack=5
+ProcStop dt=23
+EventBatch gen=3 m=169401 time=28114950894770 size=505
+ProcStatus dt=1 p=8 pstatus=1
+GoStatus dt=1 g=130 m=169401 gstatus=2
+ProcsChange dt=124 procs_value=48 stack=1
+GCActive dt=3 gc_seq=4
+HeapAlloc dt=600 heapalloc_value=190152128
+HeapAlloc dt=16 heapalloc_value=190160320
+HeapAlloc dt=11095 heapalloc_value=191741376
+HeapAlloc dt=179 heapalloc_value=191749568
+HeapAlloc dt=14244 heapalloc_value=192011712
+HeapAlloc dt=292 heapalloc_value=192019904
+HeapAlloc dt=244 heapalloc_value=192028096
+HeapAlloc dt=3225 heapalloc_value=192036288
+HeapAlloc dt=39 heapalloc_value=192044192
+HeapAlloc dt=60 heapalloc_value=192052000
+HeapAlloc dt=462 heapalloc_value=192060192
+HeapAlloc dt=85 heapalloc_value=192068384
+HeapAlloc dt=341 heapalloc_value=192076576
+HeapAlloc dt=314 heapalloc_value=192142112
+GoStop dt=8367 reason_string=16 stack=14
+GoUnblock dt=274 g=30 g_seq=27 stack=0
+GoStart dt=6 g=30 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=312 reason_string=15 stack=5
+GoUnblock dt=403 g=30 g_seq=29 stack=0
+GoStart dt=4 g=30 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=773 reason_string=15 stack=5
+GoUnblock dt=7 g=30 g_seq=31 stack=0
+GoStart dt=3 g=30 g_seq=32
+GoLabel dt=1 label_string=2
+GoBlock dt=8 reason_string=15 stack=5
+GoStart dt=14 g=112 g_seq=4
+GCMarkAssistEnd dt=6
+HeapAlloc dt=45 heapalloc_value=192297760
+GCMarkAssistBegin dt=107 stack=3
+GoStop dt=897 reason_string=20 stack=9
+GoUnblock dt=15 g=70 g_seq=19 stack=0
+GoStart dt=5 g=70 g_seq=20
+GoLabel dt=1 label_string=2
+GoUnblock dt=1479 g=105 g_seq=5 stack=12
+GoBlock dt=2280 reason_string=15 stack=5
+GoUnblock dt=12 g=70 g_seq=21 stack=0
+GoStart dt=5 g=70 g_seq=22
+GoLabel dt=2 label_string=2
+GoBlock dt=1253 reason_string=15 stack=5
+GoUnblock dt=23 g=71 g_seq=35 stack=0
+GoStart dt=8 g=71 g_seq=36
+GoLabel dt=2 label_string=2
+GoBlock dt=26 reason_string=15 stack=5
+GoUnblock dt=6 g=71 g_seq=37 stack=0
+GoStart dt=3 g=71 g_seq=38
+GoLabel dt=1 label_string=2
+GoBlock dt=9 reason_string=15 stack=5
+GoUnblock dt=3 g=71 g_seq=39 stack=0
+GoStart dt=2 g=71 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=3 g=71 g_seq=41 stack=0
+GoStart dt=1 g=71 g_seq=42
+GoLabel dt=1 label_string=2
+GoUnblock dt=82 g=109 g_seq=4 stack=12
+GoUnblock dt=6 g=106 g_seq=4 stack=12
+GoUnblock dt=103 g=111 g_seq=4 stack=12
+GoUnblock dt=5 g=112 g_seq=6 stack=12
+GoUnblock dt=6 g=96 g_seq=5 stack=12
+GoUnblock dt=4 g=119 g_seq=5 stack=12
+GoUnblock dt=6 g=122 g_seq=1 stack=12
+GoUnblock dt=11 g=97 g_seq=5 stack=12
+GoUnblock dt=4 g=107 g_seq=3 stack=12
+GoUnblock dt=106 g=92 g_seq=3 stack=12
+GoUnblock dt=4 g=116 g_seq=9 stack=12
+GoUnblock dt=5 g=82 g_seq=8 stack=12
+GoBlock dt=9 reason_string=15 stack=5
+GoStart dt=12 g=111 g_seq=5
+GCMarkAssistEnd dt=5
+HeapAlloc dt=22 heapalloc_value=192797400
+GCMarkAssistBegin dt=75 stack=3
+GoStop dt=22 reason_string=20 stack=9
+GoUnblock dt=11 g=25 g_seq=53 stack=0
+GoStart dt=4 g=25 g_seq=54
+GoLabel dt=1 label_string=2
+GoUnblock dt=1354 g=95 g_seq=4 stack=12
+GoUnblock dt=9 g=90 g_seq=6 stack=12
+GoUnblock dt=6 g=113 g_seq=9 stack=12
+GoUnblock dt=3 g=89 g_seq=6 stack=12
+GoBlock dt=30 reason_string=15 stack=5
+GoStart dt=10 g=112 g_seq=7
+GCMarkAssistEnd dt=5
+GCMarkAssistBegin dt=28 stack=3
+GoBlock dt=587 reason_string=13 stack=11
+GoStart dt=6 g=116 g_seq=10
+GCMarkAssistEnd dt=5
+HeapAlloc dt=54 heapalloc_value=194337496
+GCMarkAssistBegin dt=51 stack=3
+GoBlock dt=21 reason_string=13 stack=11
+GoStart dt=8 g=82 g_seq=9
+GCMarkAssistEnd dt=6
+HeapAlloc dt=63 heapalloc_value=194525912
+GCMarkAssistBegin dt=51 stack=3
+GoBlock dt=45 reason_string=13 stack=11
+GoStart dt=22 g=95 g_seq=5
+GCMarkAssistEnd dt=6
+HeapAlloc dt=1508 heapalloc_value=195394264
+GoStop dt=6034 reason_string=16 stack=6
+GoStart dt=48 g=95 g_seq=6
+GCMarkAssistBegin dt=18 stack=3
+GoBlock dt=48 reason_string=10 stack=18
+ProcStop dt=85
+ProcStart dt=20619 p=17 p_seq=1
+GoStart dt=1507 g=130 g_seq=7
+EventBatch gen=3 m=169400 time=28114950894819 size=671
+ProcStatus dt=1 p=12 pstatus=1
+GoStatus dt=2 g=112 m=169400 gstatus=2
+GCMarkAssistBegin dt=120 stack=3
+GCMarkAssistEnd dt=3298
+HeapAlloc dt=41 heapalloc_value=190758336
+GCMarkAssistBegin dt=29 stack=3
+GoStop dt=2271 reason_string=20 stack=9
+GoStart dt=14 g=112 g_seq=1
+GoStop dt=569 reason_string=20 stack=9
+GoUnblock dt=2436 g=54 g_seq=1 stack=0
+GoStart dt=18 g=54 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=31 reason_string=15 stack=5
+GoUnblock dt=5090 g=57 g_seq=13 stack=0
+GoStart dt=6 g=57 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=734 reason_string=15 stack=5
+GoUnblock dt=4144 g=71 g_seq=15 stack=0
+GoStart dt=5 g=71 g_seq=16
+GoLabel dt=1 label_string=4
+GoUnblock dt=415 g=111 g_seq=2 stack=12
+GoBlock dt=5674 reason_string=15 stack=5
+GoUnblock dt=9 g=71 g_seq=17 stack=0
+GoStart dt=5 g=71 g_seq=18
+GoLabel dt=1 label_string=2
+GoUnblock dt=693 g=83 g_seq=3 stack=12
+GoBlock dt=4708 reason_string=15 stack=5
+GoUnblock dt=14 g=71 g_seq=19 stack=0
+GoStart dt=6 g=71 g_seq=20
+GoLabel dt=3 label_string=2
+GoBlock dt=1294 reason_string=15 stack=5
+GoUnblock dt=11 g=71 g_seq=21 stack=0
+GoStart dt=4 g=71 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=2434 reason_string=15 stack=5
+GoUnblock dt=8 g=71 g_seq=23 stack=0
+GoStart dt=3 g=71 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=4227 reason_string=15 stack=5
+ProcStop dt=41
+ProcStart dt=3260 p=12 p_seq=1
+GoUnblock dt=16 g=30 g_seq=33 stack=0
+GoStart dt=143 g=30 g_seq=34
+GoLabel dt=1 label_string=2
+GoUnblock dt=553 g=89 g_seq=3 stack=12
+GoUnblock dt=971 g=127 g_seq=3 stack=12
+GoBlock dt=39 reason_string=15 stack=5
+GoStart dt=21 g=89 g_seq=4
+GCMarkAssistEnd dt=10
+HeapAlloc dt=1100 heapalloc_value=192510680
+GoStop dt=24 reason_string=16 stack=6
+GoUnblock dt=12 g=22 g_seq=51 stack=0
+GoStart dt=5 g=22 g_seq=52
+GoLabel dt=3 label_string=2
+GoBlock dt=1678 reason_string=15 stack=5
+GoUnblock dt=13 g=22 g_seq=53 stack=0
+GoStart dt=277 g=22 g_seq=54
+GoLabel dt=3 label_string=2
+GoBlock dt=960 reason_string=15 stack=5
+GoUnblock dt=8 g=22 g_seq=55 stack=0
+GoStart dt=4 g=22 g_seq=56
+GoLabel dt=1 label_string=2
+GoUnblock dt=583 g=99 g_seq=3 stack=12
+GoUnblock dt=5 g=83 g_seq=6 stack=12
+GoUnblock dt=5 g=124 g_seq=3 stack=12
+GoUnblock dt=6 g=105 g_seq=9 stack=12
+GoUnblock dt=1280 g=128 g_seq=3 stack=12
+GoUnblock dt=8 g=101 g_seq=3 stack=12
+GoBlock dt=7 reason_string=15 stack=5
+GoStart dt=11 g=128 g_seq=4
+GCMarkAssistEnd dt=7
+HeapAlloc dt=38 heapalloc_value=193297112
+GCMarkAssistBegin dt=118 stack=3
+GCMarkAssistEnd dt=44
+HeapAlloc dt=21 heapalloc_value=193403608
+GoStop dt=87 reason_string=16 stack=6
+GoStart dt=15 g=101 g_seq=4
+GCMarkAssistEnd dt=5
+HeapAlloc dt=58 heapalloc_value=193608408
+GCMarkAssistBegin dt=92 stack=3
+GoBlock dt=22 reason_string=13 stack=11
+GoStart dt=10 g=128 g_seq=5
+HeapAlloc dt=34 heapalloc_value=193829592
+HeapAlloc dt=166 heapalloc_value=194026200
+HeapAlloc dt=236 heapalloc_value=194419416
+HeapAlloc dt=885 heapalloc_value=195279576
+GoStop dt=6734 reason_string=16 stack=6
+GoUnblock dt=1628 g=130 g_seq=3 stack=0
+GoStart dt=136 g=130 g_seq=4
+HeapAlloc dt=62 heapalloc_value=196532688
+HeapAlloc dt=28 heapalloc_value=196540880
+HeapAlloc dt=22 heapalloc_value=196549072
+HeapAlloc dt=26 heapalloc_value=196557264
+HeapAlloc dt=38 heapalloc_value=196565456
+HeapAlloc dt=51 heapalloc_value=196573648
+GoStop dt=3032 reason_string=16 stack=19
+GoStart dt=10 g=117 g_seq=5
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=51 reason_string=10 stack=18
+ProcStop dt=29
+ProcStart dt=9381 p=4 p_seq=2
+GoStart dt=190 g=105 g_seq=16
+GCMarkAssistEnd dt=4
+HeapAlloc dt=76 heapalloc_value=105214112
+HeapAlloc dt=103 heapalloc_value=105517216
+HeapAlloc dt=84 heapalloc_value=105642912
+HeapAlloc dt=85 heapalloc_value=105864096
+GCSweepBegin dt=188 stack=28
+GCSweepEnd dt=17 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=2 heapalloc_value=106376096
+HeapAlloc dt=43 heapalloc_value=106518816
+HeapAlloc dt=43 heapalloc_value=106756384
+HeapAlloc dt=82 heapalloc_value=106978976
+HeapAlloc dt=42 heapalloc_value=107091616
+GCSweepBegin dt=23 stack=28
+GCSweepEnd dt=8 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=3 heapalloc_value=107310112
+HeapAlloc dt=35 heapalloc_value=107372960
+HeapAlloc dt=65 heapalloc_value=107583264
+HeapAlloc dt=141 heapalloc_value=108018976
+HeapAlloc dt=161 heapalloc_value=108567968
+GCSweepBegin dt=85 stack=28
+GCSweepEnd dt=9 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=4 heapalloc_value=108808352
+HeapAlloc dt=90 heapalloc_value=109241120
+HeapAlloc dt=139 heapalloc_value=109623584
+HeapAlloc dt=162 heapalloc_value=110175008
+HeapAlloc dt=164 heapalloc_value=110769024
+HeapAlloc dt=246 heapalloc_value=111705984
+HeapAlloc dt=187 heapalloc_value=112446208
+HeapAlloc dt=161 heapalloc_value=113148544
+HeapAlloc dt=295 heapalloc_value=114145664
+GCSweepBegin dt=159 stack=28
+GCSweepEnd dt=5 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=7 heapalloc_value=114588800
+GCSweepBegin dt=48 stack=27
+EventBatch gen=3 m=169398 time=28114950899192 size=165
+ProcStatus dt=1 p=37 pstatus=2
+ProcStart dt=2 p=37 p_seq=1
+GoStatus dt=3261 g=29 m=18446744073709551615 gstatus=4
+GoUnblock dt=6 g=29 g_seq=1 stack=0
+GoStart dt=10 g=29 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=1840 reason_string=15 stack=5
+GoStart dt=16 g=86 g_seq=3
+GoBlock dt=1090 reason_string=13 stack=11
+ProcStop dt=1389
+ProcStart dt=16 p=37 p_seq=2
+GoStart dt=1537 g=84 g_seq=4
+GCMarkAssistEnd dt=7
+HeapAlloc dt=55 heapalloc_value=191847872
+GCMarkAssistBegin dt=85 stack=3
+GoBlock dt=249 reason_string=13 stack=11
+GoUnblock dt=1134 g=58 g_seq=9 stack=0
+GoStart dt=7 g=58 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=27 reason_string=15 stack=5
+GoUnblock dt=2190 g=53 g_seq=9 stack=0
+GoStart dt=8 g=53 g_seq=10
+GoLabel dt=1 label_string=4
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=2156 g=25 g_seq=13 stack=0
+GoStart dt=4 g=25 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=20 reason_string=15 stack=5
+GoUnblock dt=1089 g=14 g_seq=7 stack=0
+GoStart dt=4 g=14 g_seq=8
+GoLabel dt=1 label_string=4
+GoBlock dt=107 reason_string=15 stack=5
+GoUnblock dt=1081 g=24 g_seq=15 stack=0
+GoStart dt=6 g=24 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=19 reason_string=15 stack=5
+ProcStop dt=1075
+EventBatch gen=3 m=169397 time=28114950897533 size=734
+ProcStatus dt=1 p=25 pstatus=1
+GoStatus dt=2 g=118 m=169397 gstatus=2
+GCMarkAssistActive dt=1 g=118
+GCMarkAssistEnd dt=2
+HeapAlloc dt=37 heapalloc_value=190684608
+GCMarkAssistBegin dt=79 stack=3
+GoBlock dt=1327 reason_string=13 stack=11
+ProcStop dt=4643
+ProcStart dt=23 p=25 p_seq=1
+GoUnblock dt=20 g=53 g_seq=1 stack=0
+GoStart dt=9 g=53 g_seq=2
+GoLabel dt=1 label_string=2
+GoBlock dt=2529 reason_string=15 stack=5
+GoStart dt=3244 g=123 g_seq=2
+GoStatus dt=30 g=97 m=18446744073709551615 gstatus=4
+GoUnblock dt=13 g=97 g_seq=1 stack=10
+GCMarkAssistBegin dt=20 stack=3
+GoStop dt=1976 reason_string=20 stack=9
+GoStart dt=15 g=123 g_seq=3
+GoStop dt=2654 reason_string=20 stack=9
+GoStart dt=12 g=123 g_seq=4
+GoStop dt=2704 reason_string=20 stack=9
+GoUnblock dt=9 g=24 g_seq=17 stack=0
+GoStart dt=4 g=24 g_seq=18
+GoLabel dt=1 label_string=2
+GoBlock dt=4029 reason_string=15 stack=5
+GoUnblock dt=14 g=24 g_seq=19 stack=0
+GoStart dt=4 g=24 g_seq=20
+GoLabel dt=1 label_string=2
+GoBlock dt=534 reason_string=15 stack=5
+GoUnblock dt=4 g=24 g_seq=21 stack=0
+GoStart dt=4 g=24 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=250 reason_string=15 stack=5
+GoUnblock dt=12 g=24 g_seq=23 stack=0
+GoStart dt=4 g=24 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=22 reason_string=15 stack=5
+ProcStop dt=71
+ProcStart dt=244 p=25 p_seq=2
+ProcStop dt=54
+ProcStart dt=25 p=25 p_seq=3
+GoUnblock dt=8 g=53 g_seq=21 stack=0
+GoStart dt=7 g=53 g_seq=22
+GoLabel dt=1 label_string=4
+GoBlock dt=86 reason_string=15 stack=5
+GoUnblock dt=59 g=56 g_seq=3 stack=0
+GoStart dt=4 g=56 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=6219 reason_string=15 stack=5
+GoUnblock dt=52 g=56 g_seq=5 stack=0
+GoStart dt=4 g=56 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=98 reason_string=15 stack=5
+GoUnblock dt=61 g=14 g_seq=27 stack=0
+GoStart dt=4 g=14 g_seq=28
+GoLabel dt=1 label_string=4
+GoBlock dt=32 reason_string=15 stack=5
+GoUnblock dt=13 g=14 g_seq=29 stack=0
+GoStart dt=5 g=14 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=2423 reason_string=15 stack=5
+ProcStop dt=36
+ProcStart dt=7135 p=31 p_seq=2
+GoStart dt=228 g=127 g_seq=4
+GCMarkAssistEnd dt=9
+HeapAlloc dt=2440 heapalloc_value=192666328
+GoStop dt=28 reason_string=16 stack=4
+GoUnblock dt=19 g=52 g_seq=57 stack=0
+GoStart dt=6 g=52 g_seq=58
+GoLabel dt=1 label_string=2
+GoBlock dt=1072 reason_string=15 stack=5
+GoUnblock dt=16 g=52 g_seq=59 stack=0
+GoStart dt=6 g=52 g_seq=60
+GoLabel dt=1 label_string=2
+GoBlock dt=19 reason_string=15 stack=5
+GoUnblock dt=17 g=54 g_seq=39 stack=0
+GoStart dt=4 g=54 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=2352 reason_string=15 stack=5
+GoStart dt=22 g=127 g_seq=8
+GCMarkAssistBegin dt=127 stack=3
+GoBlock dt=42 reason_string=13 stack=11
+GoStart dt=766 g=122 g_seq=2
+GCMarkAssistEnd dt=2
+HeapAlloc dt=19 heapalloc_value=194902744
+GCMarkAssistBegin dt=66 stack=3
+STWBegin dt=12586 kind_string=21 stack=21
+GoUnblock dt=699 g=91 g_seq=3 stack=22
+GoUnblock dt=5 g=127 g_seq=9 stack=22
+GoUnblock dt=3 g=112 g_seq=8 stack=22
+GoUnblock dt=4 g=82 g_seq=10 stack=22
+GoUnblock dt=3 g=116 g_seq=11 stack=22
+GoUnblock dt=3 g=93 g_seq=8 stack=22
+GoUnblock dt=4 g=109 g_seq=6 stack=22
+GoUnblock dt=5 g=115 g_seq=9 stack=22
+GoUnblock dt=7 g=120 g_seq=7 stack=22
+GoUnblock dt=7 g=105 g_seq=15 stack=22
+GoUnblock dt=6 g=96 g_seq=7 stack=22
+GoUnblock dt=3 g=118 g_seq=6 stack=22
+GoUnblock dt=4 g=87 g_seq=7 stack=22
+GoUnblock dt=4 g=84 g_seq=9 stack=22
+GoUnblock dt=6 g=100 g_seq=6 stack=22
+GoUnblock dt=29 g=86 g_seq=6 stack=23
+HeapAlloc dt=53 heapalloc_value=103773088
+GoStatus dt=10 g=3 m=18446744073709551615 gstatus=4
+GoUnblock dt=7 g=3 g_seq=1 stack=24
+GCEnd dt=3 gc_seq=5
+HeapGoal dt=6 heapgoal_value=207987496
+ProcsChange dt=45 procs_value=48 stack=25
+STWEnd dt=399
+GoUnblock dt=5992 g=130 g_seq=6 stack=26
+GCMarkAssistEnd dt=9
+HeapAlloc dt=11 heapalloc_value=103816864
+GCSweepBegin dt=79 stack=27
+GCSweepEnd dt=1631 swept_value=8388608 reclaimed_value=3260416
+HeapAlloc dt=14 heapalloc_value=104810272
+HeapAlloc dt=104 heapalloc_value=105001504
+HeapAlloc dt=107 heapalloc_value=105164960
+HeapAlloc dt=55 heapalloc_value=105308320
+HeapAlloc dt=200 heapalloc_value=105798560
+HeapAlloc dt=119 heapalloc_value=106091424
+HeapAlloc dt=118 heapalloc_value=106359712
+HeapAlloc dt=47 heapalloc_value=106488096
+HeapAlloc dt=44 heapalloc_value=106763424
+HeapAlloc dt=26 heapalloc_value=106820768
+HeapAlloc dt=106 heapalloc_value=107277344
+HeapAlloc dt=131 heapalloc_value=107656992
+HeapAlloc dt=71 heapalloc_value=107790880
+GCSweepBegin dt=42 stack=28
+GCSweepEnd dt=6 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=3 heapalloc_value=107860512
+HeapAlloc dt=71 heapalloc_value=108305696
+HeapAlloc dt=113 heapalloc_value=108608928
+HeapAlloc dt=129 heapalloc_value=108890272
+HeapAlloc dt=147 heapalloc_value=109508896
+HeapAlloc dt=88 heapalloc_value=109776544
+HeapAlloc dt=140 heapalloc_value=110286976
+HeapAlloc dt=151 heapalloc_value=110900096
+HeapAlloc dt=152 heapalloc_value=111433600
+HeapAlloc dt=136 heapalloc_value=111931264
+HeapAlloc dt=67 heapalloc_value=112248064
+HeapAlloc dt=209 heapalloc_value=113046144
+HeapAlloc dt=213 heapalloc_value=113949056
+HeapAlloc dt=236 heapalloc_value=114471168
+HeapAlloc dt=90 heapalloc_value=114663552
+GCSweepBegin dt=45 stack=28
+GCSweepEnd dt=10 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=3 heapalloc_value=114703232
+GCSweepBegin dt=54 stack=27
+EventBatch gen=3 m=169396 time=28114950894859 size=148
+ProcStatus dt=2 p=1 pstatus=1
+GoStatus dt=4 g=86 m=169396 gstatus=2
+GCMarkAssistActive dt=2 g=86
+GCMarkAssistEnd dt=2
+HeapAlloc dt=42 heapalloc_value=189889984
+GoStop dt=32 reason_string=16 stack=4
+GoUnblock dt=117 g=69 g_seq=1 stack=0
+GoStart dt=6 g=69 g_seq=2
+GoLabel dt=1 label_string=2
+GoBlock dt=2672 reason_string=15 stack=5
+GoStart dt=16 g=84 g_seq=1
+GoStop dt=2565 reason_string=20 stack=9
+GoStart dt=17 g=84 g_seq=2
+GoBlock dt=886 reason_string=13 stack=11
+ProcStop dt=2581
+ProcStart dt=17 p=1 p_seq=1
+ProcStop dt=4400
+ProcStart dt=16 p=1 p_seq=2
+GoStart dt=24 g=87 g_seq=3
+GCMarkAssistEnd dt=8
+HeapAlloc dt=70 heapalloc_value=191782336
+GCMarkAssistBegin dt=85 stack=3
+GoBlock dt=1055 reason_string=13 stack=11
+GoUnblock dt=20 g=54 g_seq=9 stack=0
+GoStart dt=7 g=54 g_seq=10
+GoLabel dt=3 label_string=2
+GoBlock dt=230 reason_string=15 stack=5
+GoUnblock dt=12 g=54 g_seq=11 stack=0
+GoStart dt=6 g=54 g_seq=12
+GoLabel dt=1 label_string=2
+GoBlock dt=1754 reason_string=15 stack=5
+GoUnblock dt=12 g=54 g_seq=13 stack=0
+GoStart dt=8 g=54 g_seq=14
+GoLabel dt=3 label_string=2
+GoBlock dt=1379 reason_string=15 stack=5
+ProcStop dt=15
+EventBatch gen=3 m=169395 time=28114950898507 size=532
+ProcStatus dt=2 p=14 pstatus=1
+GoStatus dt=2 g=103 m=169395 gstatus=2
+GCMarkAssistActive dt=1 g=103
+GCMarkAssistEnd dt=3
+HeapAlloc dt=40 heapalloc_value=190873024
+HeapAlloc dt=75 heapalloc_value=191036864
+GCMarkAssistBegin dt=65 stack=3
+GoBlock dt=6142 reason_string=13 stack=11
+GoStart dt=19 g=98 g_seq=3
+GCMarkAssistBegin dt=20 stack=3
+GoStop dt=1738 reason_string=20 stack=9
+GoStart dt=16 g=98 g_seq=4
+GoBlock dt=2102 reason_string=13 stack=11
+GoUnblock dt=2317 g=71 g_seq=5 stack=0
+GoStart dt=5 g=71 g_seq=6
+GoLabel dt=2 label_string=4
+GoBlock dt=128 reason_string=15 stack=5
+GoUnblock dt=2283 g=71 g_seq=13 stack=0
+GoStart dt=7 g=71 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=97 reason_string=15 stack=5
+GoUnblock dt=1168 g=24 g_seq=13 stack=0
+GoStart dt=7 g=24 g_seq=14
+GoLabel dt=1 label_string=4
+GoBlock dt=1399 reason_string=15 stack=5
+GoUnblock dt=3752 g=23 g_seq=25 stack=0
+GoStart dt=6 g=23 g_seq=26
+GoLabel dt=3 label_string=4
+GoBlock dt=1167 reason_string=15 stack=5
+GoUnblock dt=99 g=52 g_seq=23 stack=0
+GoStart dt=35 g=52 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=47 reason_string=15 stack=5
+GoUnblock dt=81 g=67 g_seq=19 stack=0
+GoStart dt=8 g=67 g_seq=20
+GoLabel dt=3 label_string=4
+GoBlock dt=3975 reason_string=15 stack=5
+GoUnblock dt=18 g=67 g_seq=21 stack=0
+GoStart dt=6 g=67 g_seq=22
+GoLabel dt=1 label_string=2
+GoBlock dt=80 reason_string=15 stack=5
+GoUnblock dt=18 g=67 g_seq=23 stack=0
+GoStart dt=6 g=67 g_seq=24
+GoLabel dt=1 label_string=2
+GoBlock dt=22 reason_string=15 stack=5
+GoUnblock dt=3174 g=14 g_seq=23 stack=0
+GoStart dt=7 g=14 g_seq=24
+GoLabel dt=1 label_string=4
+GoBlock dt=22 reason_string=15 stack=5
+GoUnblock dt=9 g=14 g_seq=25 stack=0
+GoStart dt=2 g=14 g_seq=26
+GoLabel dt=1 label_string=2
+GoBlock dt=13 reason_string=15 stack=5
+GoUnblock dt=65 g=29 g_seq=29 stack=0
+GoStart dt=8 g=29 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=18 reason_string=15 stack=5
+GoUnblock dt=13 g=29 g_seq=31 stack=0
+GoStart dt=6 g=29 g_seq=32
+GoLabel dt=2 label_string=2
+GoBlock dt=21 reason_string=15 stack=5
+GoUnblock dt=19 g=24 g_seq=37 stack=0
+GoStart dt=4 g=24 g_seq=38
+GoLabel dt=2 label_string=2
+GoBlock dt=33 reason_string=15 stack=5
+GoUnblock dt=8 g=24 g_seq=39 stack=0
+GoStart dt=3 g=24 g_seq=40
+GoLabel dt=1 label_string=2
+GoBlock dt=32 reason_string=15 stack=5
+GoUnblock dt=80 g=25 g_seq=29 stack=0
+GoStart dt=9 g=25 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=20 reason_string=15 stack=5
+GoUnblock dt=27 g=24 g_seq=43 stack=0
+GoStart dt=6 g=24 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=185 reason_string=15 stack=5
+GoUnblock dt=9 g=24 g_seq=45 stack=0
+GoStart dt=6 g=24 g_seq=46
+GoLabel dt=3 label_string=2
+GoBlock dt=10 reason_string=15 stack=5
+GoUnblock dt=6 g=24 g_seq=47 stack=0
+GoStart dt=1 g=24 g_seq=48
+GoLabel dt=1 label_string=2
+GoBlock dt=41 reason_string=15 stack=5
+ProcStop dt=59
+ProcStart dt=21430 p=4 p_seq=1
+GoStart dt=238 g=102 g_seq=4
+GCMarkAssistEnd dt=10
+HeapAlloc dt=38 heapalloc_value=196352464
+GoStop dt=5526 reason_string=16 stack=6
+ProcStop dt=240
+ProcStart dt=11401 p=6 p_seq=1
+GoStart dt=196 g=109 g_seq=7
+GCMarkAssistEnd dt=5
+HeapAlloc dt=54 heapalloc_value=108264736
+HeapAlloc dt=117 heapalloc_value=108527008
+HeapAlloc dt=77 heapalloc_value=108783776
+HeapAlloc dt=90 heapalloc_value=109036320
+HeapAlloc dt=77 heapalloc_value=109355808
+HeapAlloc dt=106 heapalloc_value=109678240
+HeapAlloc dt=70 heapalloc_value=110030624
+HeapAlloc dt=90 heapalloc_value=110205056
+HeapAlloc dt=51 heapalloc_value=110347136
+HeapAlloc dt=63 heapalloc_value=110588800
+HeapAlloc dt=69 heapalloc_value=110912384
+HeapAlloc dt=42 heapalloc_value=111111808
+HeapAlloc dt=105 heapalloc_value=111452032
+HeapAlloc dt=89 heapalloc_value=111822720
+HeapAlloc dt=106 heapalloc_value=112260352
+HeapAlloc dt=55 heapalloc_value=112397056
+HeapAlloc dt=62 heapalloc_value=112682368
+HeapAlloc dt=137 heapalloc_value=113281920
+GCSweepBegin dt=50 stack=28
+GCSweepEnd dt=8 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=4 heapalloc_value=113424000
+HeapAlloc dt=92 heapalloc_value=113908096
+GCSweepBegin dt=145 stack=31
+EventBatch gen=3 m=169394 time=28114950898962 size=373
+ProcStatus dt=1 p=20 pstatus=1
+GoStatus dt=4 g=108 m=169394 gstatus=2
+GCMarkAssistActive dt=1 g=108
+GCMarkAssistEnd dt=2
+HeapAlloc dt=25 heapalloc_value=191102400
+GCMarkAssistBegin dt=104 stack=3
+GCMarkAssistEnd dt=2445
+HeapAlloc dt=47 heapalloc_value=191372736
+GCMarkAssistBegin dt=11 stack=3
+GoBlock dt=1789 reason_string=13 stack=11
+GoUnblock dt=19 g=22 g_seq=3 stack=0
+GoStart dt=7 g=22 g_seq=4
+GoLabel dt=1 label_string=2
+GoBlock dt=3342 reason_string=15 stack=5
+GoUnblock dt=2752 g=71 g_seq=1 stack=0
+GoStart dt=7 g=71 g_seq=2
+GoLabel dt=1 label_string=4
+GoBlock dt=269 reason_string=15 stack=5
+GoStart dt=4308 g=111 g_seq=3
+GCMarkAssistEnd dt=7
+HeapAlloc dt=58 heapalloc_value=191888832
+GCMarkAssistBegin dt=42 stack=3
+GoBlock dt=148 reason_string=13 stack=11
+GoUnblock dt=1120 g=72 g_seq=25 stack=0
+GoStart dt=5 g=72 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=640 reason_string=15 stack=5
+GoStart dt=1105 g=102 g_seq=2
+GoStatus dt=19 g=117 m=18446744073709551615 gstatus=4
+GoUnblock dt=4 g=117 g_seq=1 stack=10
+GCMarkAssistBegin dt=13 stack=3
+GoBlock dt=32 reason_string=13 stack=11
+GoStart dt=8 g=117 g_seq=2
+GoStatus dt=19 g=128 m=18446744073709551615 gstatus=4
+GoUnblock dt=2 g=128 g_seq=1 stack=10
+GCMarkAssistBegin dt=5 stack=3
+GoBlock dt=15 reason_string=13 stack=11
+GoStart dt=5 g=128 g_seq=2
+GoStatus dt=12 g=92 m=18446744073709551615 gstatus=4
+GoUnblock dt=1 g=92 g_seq=1 stack=10
+GCMarkAssistBegin dt=9 stack=3
+GoBlock dt=14 reason_string=13 stack=11
+GoStart dt=7 g=92 g_seq=2
+GoStatus dt=17 g=101 m=18446744073709551615 gstatus=4
+GoUnblock dt=1 g=101 g_seq=1 stack=10
+GCMarkAssistBegin dt=7 stack=3
+GoBlock dt=10 reason_string=13 stack=11
+GoStart dt=5 g=101 g_seq=2
+GoStatus dt=11 g=99 m=18446744073709551615 gstatus=4
+GoUnblock dt=1 g=99 g_seq=1 stack=10
+GCMarkAssistBegin dt=8 stack=3
+GoBlock dt=15 reason_string=13 stack=11
+GoStart dt=6 g=99 g_seq=2
+GoStatus dt=11 g=89 m=18446744073709551615 gstatus=4
+GoUnblock dt=1 g=89 g_seq=1 stack=10
+GCMarkAssistBegin dt=10 stack=3
+GoBlock dt=15 reason_string=13 stack=11
+GoStart dt=4 g=89 g_seq=2
+GoStatus dt=11 g=124 m=18446744073709551615 gstatus=4
+GoUnblock dt=2 g=124 g_seq=1 stack=10
+GCMarkAssistBegin dt=8 stack=3
+GoBlock dt=34 reason_string=13 stack=11
+GoStart dt=5 g=124 g_seq=2
+GoStatus dt=10 g=96 m=18446744073709551615 gstatus=4
+GoUnblock dt=1 g=96 g_seq=1 stack=10
+GCMarkAssistBegin dt=4 stack=3
+GoBlock dt=14 reason_string=13 stack=11
+GoStart dt=4 g=96 g_seq=2
+GCMarkAssistBegin dt=8 stack=3
+GoBlock dt=22 reason_string=13 stack=11
+ProcStop dt=16
+EventBatch gen=3 m=169393 time=28114950894837 size=271
+ProcStatus dt=2 p=16 pstatus=1
+GoStatus dt=2 g=69 m=169393 gstatus=2
+GoBlock dt=122 reason_string=15 stack=5
+GoStatus dt=2224 g=83 m=169393 gstatus=1
+GoStart dt=1 g=83 g_seq=1
+GoStatus dt=33 g=121 m=18446744073709551615 gstatus=4
+GoUnblock dt=10 g=121 g_seq=1 stack=10
+GCMarkAssistBegin dt=16 stack=3
+GoStop dt=620 reason_string=20 stack=9
+GoStart dt=11 g=121 g_seq=2
+GoStatus dt=18 g=110 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=110 g_seq=1 stack=10
+GCMarkAssistBegin dt=12 stack=3
+GoStop dt=1840 reason_string=20 stack=9
+GoStart dt=16 g=110 g_seq=2
+GoStatus dt=19 g=125 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=125 g_seq=1 stack=10
+GCMarkAssistBegin dt=10 stack=3
+GoBlock dt=1799 reason_string=13 stack=11
+GoStart dt=1317 g=127 g_seq=2
+GoStatus dt=21 g=116 m=18446744073709551615 gstatus=4
+GoUnblock dt=9 g=116 g_seq=1 stack=10
+GCMarkAssistBegin dt=16 stack=3
+GoBlock dt=473 reason_string=13 stack=11
+GoStart dt=28 g=116 g_seq=2
+GoStatus dt=14 g=119 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=119 g_seq=1 stack=10
+GCMarkAssistBegin dt=12 stack=3
+GoStop dt=570 reason_string=20 stack=9
+GoStart dt=24 g=119 g_seq=2
+GoStatus dt=18 g=95 m=18446744073709551615 gstatus=4
+GoUnblock dt=3 g=95 g_seq=1 stack=10
+GCMarkAssistBegin dt=11 stack=3
+GoBlock dt=5206 reason_string=13 stack=11
+ProcStop dt=2547
+ProcStart dt=26 p=16 p_seq=1
+GoUnblock dt=87 g=58 g_seq=15 stack=0
+GoStart dt=8 g=58 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=579 reason_string=15 stack=5
+GoUnblock dt=23 g=69 g_seq=15 stack=0
+GoStart dt=5 g=69 g_seq=16
+GoLabel dt=1 label_string=2
+GoBlock dt=1028 reason_string=15 stack=5
+GoUnblock dt=2356 g=14 g_seq=11 stack=0
+GoStart dt=6 g=14 g_seq=12
+GoLabel dt=1 label_string=4
+GoBlock dt=1282 reason_string=15 stack=5
+ProcStop dt=8
+EventBatch gen=3 m=169392 time=28114950898262 size=651
+ProcStatus dt=1 p=3 pstatus=1
+GoStatus dt=1 g=106 m=169392 gstatus=2
+GCMarkAssistActive dt=1 g=106
+GCMarkAssistEnd dt=3
+HeapAlloc dt=34 heapalloc_value=190807488
+HeapAlloc dt=125 heapalloc_value=190832064
+GCMarkAssistBegin dt=46 stack=3
+GoBlock dt=1002 reason_string=13 stack=11
+GoStart dt=28 g=82 g_seq=2
+GoBlock dt=1446 reason_string=13 stack=11
+GoStart dt=34 g=120 g_seq=3
+GCMarkAssistEnd dt=2
+HeapAlloc dt=32 heapalloc_value=191282624
+GCMarkAssistBegin dt=115 stack=3
+GoBlock dt=25 reason_string=13 stack=11
+GoStart dt=17 g=112 g_seq=2
+GoBlock dt=2074 reason_string=13 stack=11
+GoUnblock dt=2604 g=24 g_seq=5 stack=0
+GoStart dt=7 g=24 g_seq=6
+GoLabel dt=2 label_string=4
+GoBlock dt=278 reason_string=15 stack=5
+GoUnblock dt=2267 g=58 g_seq=5 stack=0
+GoStart dt=9 g=58 g_seq=6
+GoLabel dt=1 label_string=4
+GoBlock dt=316 reason_string=15 stack=5
+GoUnblock dt=1167 g=24 g_seq=7 stack=0
+GoStart dt=6 g=24 g_seq=8
+GoLabel dt=1 label_string=4
+GoBlock dt=171 reason_string=15 stack=5
+GoUnblock dt=1155 g=71 g_seq=7 stack=0
+GoStart dt=6 g=71 g_seq=8
+GoLabel dt=1 label_string=4
+GoBlock dt=32 reason_string=15 stack=5
+GoStart dt=3316 g=109 g_seq=2
+GoStatus dt=28 g=114 m=18446744073709551615 gstatus=4
+GoUnblock dt=8 g=114 g_seq=1 stack=10
+GCMarkAssistBegin dt=18 stack=3
+GoStop dt=3860 reason_string=20 stack=9
+GoUnblock dt=14 g=57 g_seq=31 stack=0
+GoStart dt=5 g=57 g_seq=32
+GoLabel dt=3 label_string=2
+GoBlock dt=3324 reason_string=15 stack=5
+GoUnblock dt=97 g=24 g_seq=25 stack=0
+GoStart dt=6 g=24 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=1146 reason_string=15 stack=5
+GoUnblock dt=73 g=24 g_seq=27 stack=0
+GoStart dt=4 g=24 g_seq=28
+GoLabel dt=1 label_string=4
+GoUnblock dt=2655 g=81 g_seq=4 stack=12
+GoBlock dt=402 reason_string=15 stack=5
+GoUnblock dt=9 g=24 g_seq=29 stack=0
+GoStart dt=7 g=24 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=492 reason_string=15 stack=5
+GoUnblock dt=21 g=69 g_seq=27 stack=0
+GoStart dt=6 g=69 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=20 reason_string=15 stack=5
+GoUnblock dt=11 g=69 g_seq=29 stack=0
+GoStart dt=3 g=69 g_seq=30
+GoLabel dt=1 label_string=2
+GoBlock dt=459 reason_string=15 stack=5
+GoStart dt=168 g=116 g_seq=6
+GCMarkAssistEnd dt=8
+HeapAlloc dt=61 heapalloc_value=192232224
+GCMarkAssistBegin dt=39 stack=3
+GoBlock dt=2360 reason_string=13 stack=11
+ProcStop dt=53
+ProcStart dt=14760 p=10 p_seq=2
+GoStart dt=211 g=99 g_seq=5
+GCMarkAssistBegin dt=93 stack=3
+GoBlock dt=33 reason_string=13 stack=11
+GoStart dt=9 g=120 g_seq=6
+GCMarkAssistBegin dt=78 stack=3
+GoBlock dt=102 reason_string=13 stack=11
+GoStart dt=31 g=108 g_seq=2
+GCMarkAssistEnd dt=6
+HeapAlloc dt=307 heapalloc_value=194853592
+GoStop dt=7166 reason_string=16 stack=6
+GoStart dt=86 g=128 g_seq=6
+HeapAlloc dt=4873 heapalloc_value=196688336
+GoStop dt=12 reason_string=16 stack=6
+ProcStop dt=395
+ProcStart dt=8670 p=3 p_seq=2
+GoStart dt=193 g=93 g_seq=9
+GCMarkAssistEnd dt=7
+HeapAlloc dt=78 heapalloc_value=104465440
+HeapAlloc dt=122 heapalloc_value=104583584
+HeapAlloc dt=92 heapalloc_value=104769312
+HeapAlloc dt=127 heapalloc_value=104935968
+GCSweepBegin dt=109 stack=28
+GCSweepEnd dt=9 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=2 heapalloc_value=105138720
+HeapAlloc dt=77 heapalloc_value=105373856
+GCSweepBegin dt=157 stack=28
+GCSweepEnd dt=8 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=3 heapalloc_value=105708448
+GCSweepBegin dt=56 stack=28
+GCSweepEnd dt=11 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=4 heapalloc_value=105880480
+GCSweepBegin dt=48 stack=28
+GCSweepEnd dt=10 swept_value=32768 reclaimed_value=32768
+HeapAlloc dt=4 heapalloc_value=106124192
+GCSweepBegin dt=79 stack=28
+GCSweepEnd dt=7 swept_value=8192 reclaimed_value=8192
+HeapAlloc dt=2 heapalloc_value=106283168
+HeapAlloc dt=98 heapalloc_value=106567968
+HeapAlloc dt=116 heapalloc_value=107070496
+HeapAlloc dt=30 heapalloc_value=107146272
+HeapAlloc dt=105 heapalloc_value=107517728
+HeapAlloc dt=169 heapalloc_value=108084512
+HeapAlloc dt=187 heapalloc_value=108649888
+HeapAlloc dt=158 heapalloc_value=109200160
+HeapAlloc dt=200 heapalloc_value=109872160
+GCSweepBegin dt=116 stack=28
+GCSweepEnd dt=9 swept_value=24576 reclaimed_value=24576
+HeapAlloc dt=3 heapalloc_value=110229632
+HeapAlloc dt=54 heapalloc_value=110441344
+HeapAlloc dt=76 heapalloc_value=110711680
+HeapAlloc dt=100 heapalloc_value=111216768
+HeapAlloc dt=156 heapalloc_value=111708032
+HeapAlloc dt=55 heapalloc_value=111972224
+HeapAlloc dt=122 heapalloc_value=112391424
+HeapAlloc dt=160 heapalloc_value=113099392
+HeapAlloc dt=191 heapalloc_value=113713536
+HeapAlloc dt=158 heapalloc_value=114362368
+GCSweepBegin dt=88 stack=28
+GCSweepEnd dt=14 swept_value=16384 reclaimed_value=16384
+HeapAlloc dt=9 heapalloc_value=114520320
+HeapAlloc dt=56 heapalloc_value=114636672
+GCSweepBegin dt=180 stack=27
+EventBatch gen=3 m=169390 time=28114950895313 size=834
+ProcStatus dt=1 p=27 pstatus=1
+GoStatus dt=3 g=82 m=169390 gstatus=2
+GCMarkAssistActive dt=1 g=82
+GCMarkAssistEnd dt=2
+HeapAlloc dt=28 heapalloc_value=190143936
+HeapAlloc dt=270 heapalloc_value=190201280
+HeapAlloc dt=96 heapalloc_value=190209472
+HeapAlloc dt=29 heapalloc_value=190258624
+HeapAlloc dt=107 heapalloc_value=190356928
+GCMarkAssistBegin dt=57 stack=3
+GCMarkAssistEnd dt=502
+HeapAlloc dt=27 heapalloc_value=190430656
+GoStop dt=26 reason_string=16 stack=4
+GoStart dt=12 g=131 g_seq=3
+GoSyscallBegin dt=17 p_seq=1 stack=7
+GoSyscallEnd dt=205
+GoSyscallBegin dt=19 p_seq=2 stack=7
+GoSyscallEnd dt=2580
+GoSyscallBegin dt=16 p_seq=3 stack=7
+GoSyscallEnd dt=71
+GoSyscallBegin dt=15 p_seq=4 stack=7
+GoSyscallEnd dt=72
+GoSyscallBegin dt=25 p_seq=5 stack=7
+GoSyscallEnd dt=76
+GoSyscallBegin dt=12 p_seq=6 stack=7
+GoSyscallEnd dt=69
+GoSyscallBegin dt=11 p_seq=7 stack=7
+GoSyscallEnd dt=62
+GoSyscallBegin dt=13 p_seq=8 stack=7
+GoSyscallEnd dt=67
+GoSyscallBegin dt=16 p_seq=9 stack=7
+GoSyscallEnd dt=64
+GoSyscallBegin dt=12 p_seq=10 stack=7
+GoSyscallEnd dt=65
+GoSyscallBegin dt=14 p_seq=11 stack=7
+GoSyscallEnd dt=226
+GoSyscallBegin dt=14 p_seq=12 stack=7
+GoSyscallEnd dt=69
+GoSyscallBegin dt=17 p_seq=13 stack=7
+GoSyscallEnd dt=72
+GoSyscallBegin dt=15 p_seq=14 stack=7
+GoSyscallEnd dt=66
+GoSyscallBegin dt=18 p_seq=15 stack=7
+GoSyscallEnd dt=63
+GoSyscallBegin dt=13 p_seq=16 stack=7
+GoSyscallEnd dt=69
+GoSyscallBegin dt=17 p_seq=17 stack=7
+GoSyscallEnd dt=66
+GoSyscallBegin dt=109 p_seq=18 stack=7
+GoSyscallEnd dt=73
+GoSyscallBegin dt=13 p_seq=19 stack=7
+GoSyscallEnd dt=68
+GoSyscallBegin dt=16 p_seq=20 stack=7
+GoSyscallEnd dt=63
+GoSyscallBegin dt=15 p_seq=21 stack=7
+GoSyscallEnd dt=82
+GoSyscallBegin dt=11 p_seq=22 stack=7
+GoSyscallEnd dt=177
+GoSyscallBegin dt=14 p_seq=23 stack=7
+GoSyscallEnd dt=62
+GoSyscallBegin dt=13 p_seq=24 stack=7
+GoSyscallEnd dt=90
+GoSyscallBegin dt=11 p_seq=25 stack=7
+GoSyscallEnd dt=69
+GoSyscallBegin dt=13 p_seq=26 stack=7
+GoSyscallEnd dt=65
+GoSyscallBegin dt=15 p_seq=27 stack=7
+GoSyscallEnd dt=72
+GoSyscallBegin dt=15 p_seq=28 stack=7
+GoSyscallEnd dt=73
+GoSyscallBegin dt=18 p_seq=29 stack=7
+GoSyscallEnd dt=80
+GoSyscallBegin dt=21 p_seq=30 stack=7
+GoSyscallEnd dt=72
+GoSyscallBegin dt=17 p_seq=31 stack=7
+GoSyscallEnd dt=67
+GoSyscallBegin dt=12 p_seq=32 stack=7
+GoSyscallEnd dt=171
+GoSyscallBegin dt=16 p_seq=33 stack=7
+GoSyscallEnd dt=76
+GoSyscallBegin dt=18 p_seq=34 stack=7
+GoSyscallEnd dt=78
+GoSyscallBegin dt=13 p_seq=35 stack=7
+GoSyscallEnd dt=77
+GoSyscallBegin dt=20 p_seq=36 stack=7
+GoSyscallEnd dt=77
+GoBlock dt=16 reason_string=15 stack=2
+GoUnblock dt=1400 g=54 g_seq=3 stack=0
+GoStart dt=8 g=54 g_seq=4
+GoLabel dt=1 label_string=4
+GoBlock dt=2659 reason_string=15 stack=5
+GoUnblock dt=13 g=22 g_seq=5 stack=0
+GoStart dt=5 g=22 g_seq=6
+GoLabel dt=1 label_string=2
+GoBlock dt=2498 reason_string=15 stack=5
+GoUnblock dt=10 g=22 g_seq=7 stack=0
+GoStart dt=7 g=22 g_seq=8
+GoLabel dt=2 label_string=2
+GoBlock dt=4213 reason_string=15 stack=5
+GoUnblock dt=1324 g=57 g_seq=25 stack=0
+GoStart dt=11 g=57 g_seq=26
+GoLabel dt=1 label_string=4
+GoBlock dt=256 reason_string=15 stack=5
+GoUnblock dt=8 g=57 g_seq=27 stack=0
+GoStart dt=5 g=57 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=485 reason_string=15 stack=5
+GoUnblock dt=8 g=57 g_seq=29 stack=0
+GoStart dt=6 g=57 g_seq=30
+GoLabel dt=3 label_string=2
+GoBlock dt=504 reason_string=15 stack=5
+ProcStop dt=3771
+ProcStart dt=29 p=27 p_seq=37
+GoUnblock dt=9 g=22 g_seq=15 stack=0
+GoStart dt=5 g=22 g_seq=16
+GoLabel dt=1 label_string=4
+GoBlock dt=123 reason_string=15 stack=5
+GoUnblock dt=19 g=28 g_seq=7 stack=0
+GoStart dt=2 g=28 g_seq=8
+GoLabel dt=1 label_string=2
+GoBlock dt=67 reason_string=15 stack=5
+GoUnblock dt=73 g=72 g_seq=29 stack=0
+GoStart dt=8 g=72 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=1357 reason_string=15 stack=5
+GoUnblock dt=71 g=53 g_seq=15 stack=0
+GoStart dt=5 g=53 g_seq=16
+GoLabel dt=2 label_string=4
+GoBlock dt=53 reason_string=15 stack=5
+ProcStop dt=61
+ProcStart dt=29 p=27 p_seq=38
+GoUnblock dt=4 g=72 g_seq=35 stack=0
+GoStart dt=4 g=72 g_seq=36
+GoLabel dt=1 label_string=4
+GoBlock dt=775 reason_string=15 stack=5
+GoUnblock dt=11 g=72 g_seq=37 stack=0
+GoStart dt=5 g=72 g_seq=38
+GoLabel dt=3 label_string=2
+GoBlock dt=2553 reason_string=15 stack=5
+GoUnblock dt=23 g=54 g_seq=27 stack=0
+GoStart dt=7 g=54 g_seq=28
+GoLabel dt=1 label_string=2
+GoBlock dt=5185 reason_string=15 stack=5
+ProcStop dt=46
+ProcStart dt=1102 p=27 p_seq=39
+GoUnblock dt=17 g=14 g_seq=31 stack=0
+GoStart dt=191 g=14 g_seq=32
+GoLabel dt=5 label_string=2
+GoBlock dt=26 reason_string=15 stack=5
+GoUnblock dt=7 g=14 g_seq=33 stack=0
+GoStart dt=2 g=14 g_seq=34
+GoLabel dt=1 label_string=2
+GoBlock dt=81 reason_string=15 stack=5
+GoUnblock dt=11 g=14 g_seq=35 stack=0
+GoStart dt=6 g=14 g_seq=36
+GoLabel dt=1 label_string=2
+GoUnblock dt=257 g=97 g_seq=3 stack=12
+GoStop dt=1123 reason_string=16 stack=13
+GoUnblock dt=612 g=131 g_seq=4 stack=0
+GoStart dt=5 g=131 g_seq=5
+GoSyscallBegin dt=23 p_seq=40 stack=7
+GoSyscallEnd dt=200
+GoSyscallBegin dt=13 p_seq=41 stack=7
+GoSyscallEnd dt=179
+GoBlock dt=6 reason_string=15 stack=2
+ProcStop dt=31
+ProcStart dt=1232 p=22 p_seq=3
+GoUnblock dt=16 g=14 g_seq=40 stack=0
+GoStart dt=157 g=14 g_seq=41
+GoLabel dt=2 label_string=2
+GoUnblock dt=343 g=103 g_seq=1 stack=12
+GoBlock dt=2805 reason_string=15 stack=5
+ProcStop dt=68
+ProcStart dt=17 p=22 p_seq=4
+GoUnblock dt=3 g=14 g_seq=42 stack=0
+GoStart dt=4 g=14 g_seq=43
+GoLabel dt=1 label_string=4
+GoUnblock dt=609 g=116 g_seq=7 stack=12
+GoBlock dt=9 reason_string=15 stack=5
+GoStart dt=10 g=116 g_seq=8
+GCMarkAssistEnd dt=7
+HeapAlloc dt=60 heapalloc_value=192527064
+GCMarkAssistBegin dt=41 stack=3
+GoBlock dt=47 reason_string=13 stack=11
+GoUnblock dt=13 g=30 g_seq=35 stack=0
+GoStart dt=4 g=30 g_seq=36
+GoLabel dt=2 label_string=2
+GoBlock dt=266 reason_string=15 stack=5
+GoStart dt=16 g=105 g_seq=8
+GoBlock dt=18 reason_string=13 stack=11
+GoUnblock dt=55 g=54 g_seq=29 stack=0
+GoStart dt=8 g=54 g_seq=30
+GoLabel dt=1 label_string=4
+GoBlock dt=13 reason_string=15 stack=5
+GoUnblock dt=10 g=54 g_seq=31 stack=0
+GoStart dt=1 g=54 g_seq=32
+GoLabel dt=1 label_string=2
+GoBlock dt=46 reason_string=15 stack=5
+ProcStop dt=57
+ProcStart dt=14 p=22 p_seq=5
+GoUnblock dt=4 g=54 g_seq=33 stack=0
+GoStart dt=159 g=54 g_seq=34
+GoLabel dt=1 label_string=4
+GoBlock dt=8 reason_string=15 stack=5
+ProcStop dt=32
+ProcStart dt=3156 p=29 p_seq=1
+GoUnblock dt=15 g=71 g_seq=43 stack=0
+GoStart dt=165 g=71 g_seq=44
+GoLabel dt=1 label_string=2
+GoBlock dt=1463 reason_string=15 stack=5
+GoStart dt=22 g=118 g_seq=4
+GCMarkAssistEnd dt=6
+HeapAlloc dt=903 heapalloc_value=195328728
+GoStop dt=6525 reason_string=16 stack=6
+GoStart dt=46 g=118 g_seq=5
+GCMarkAssistBegin dt=12 stack=3
+GoBlock dt=31 reason_string=13 stack=11
+ProcStop dt=194
+EventBatch gen=3 m=18446744073709551615 time=28114950975784 size=435
+GoStatus dt=1 g=1 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=2 m=18446744073709551615 gstatus=4
+GoStatus dt=6 g=4 m=18446744073709551615 gstatus=4
+GoStatus dt=5 g=5 m=18446744073709551615 gstatus=4
+GoStatus dt=4 g=6 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=7 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=17 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=33 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=8 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=9 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=10 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=18 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=11 m=18446744073709551615 gstatus=4
+GoStatus dt=4 g=34 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=19 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=12 m=18446744073709551615 gstatus=4
+GoStatus dt=2 g=20 m=18446744073709551615 gstatus=4
+GoStatus dt=4 g=35 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=13 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=21 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=36 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=49 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=50 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=15 m=18446744073709551615 gstatus=4
+GoStatus dt=4 g=65 m=18446744073709551615 gstatus=4
+GoStatus dt=2 g=66 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=26 m=18446744073709551615 gstatus=4
+GoStatus dt=4 g=55 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=27 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=37 m=18446744073709551615 gstatus=4
+GoStatus dt=3 g=129 m=18446744073709551615 gstatus=4
+EventBatch gen=3 m=18446744073709551615 time=28114950976078 size=1132
+Stacks
+Stack id=20 nframes=2
+	pc=4540421 func=22 file=23 line=363
+	pc=4546157 func=24 file=23 line=874
+Stack id=21 nframes=5
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=18 nframes=6
+	pc=4296626 func=34 file=35 line=807
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=26 nframes=7
+	pc=4300939 func=36 file=35 line=1196
+	pc=4297301 func=34 file=35 line=926
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=7 nframes=7
+	pc=4709082 func=37 file=38 line=964
+	pc=4738119 func=39 file=40 line=209
+	pc=4738111 func=41 file=42 line=736
+	pc=4737664 func=43 file=42 line=380
+	pc=4739536 func=44 file=45 line=46
+	pc=4739528 func=46 file=47 line=183
+	pc=4803162 func=48 file=49 line=134
+Stack id=10 nframes=4
+	pc=4295522 func=50 file=35 line=627
+	pc=4246870 func=29 file=28 line=1288
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=29 nframes=8
+	pc=4556437 func=51 file=52 line=352
+	pc=4341796 func=53 file=54 line=521
+	pc=4279859 func=55 file=56 line=127
+	pc=4277746 func=57 file=58 line=182
+	pc=4244580 func=59 file=28 line=944
+	pc=4245653 func=29 file=28 line=1116
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=14 nframes=1
+	pc=4546157 func=24 file=23 line=874
+Stack id=17 nframes=1
+	pc=0 func=0 file=0 line=0
+Stack id=19 nframes=2
+	pc=4540420 func=22 file=23 line=353
+	pc=4546157 func=24 file=23 line=874
+Stack id=13 nframes=1
+	pc=0 func=0 file=0 line=0
+Stack id=5 nframes=2
+	pc=4418893 func=60 file=61 line=402
+	pc=4301860 func=62 file=35 line=1297
+Stack id=25 nframes=7
+	pc=4298957 func=36 file=35 line=1087
+	pc=4297301 func=34 file=35 line=926
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=4 nframes=2
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=30 nframes=6
+	pc=4297308 func=34 file=35 line=817
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=11 nframes=6
+	pc=4314276 func=63 file=26 line=749
+	pc=4312530 func=25 file=26 line=589
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=6 nframes=2
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=15 nframes=1
+	pc=4546157 func=24 file=23 line=874
+Stack id=8 nframes=1
+	pc=0 func=0 file=0 line=0
+Stack id=12 nframes=2
+	pc=4614055 func=64 file=65 line=474
+	pc=4302129 func=62 file=35 line=1357
+Stack id=3 nframes=6
+	pc=4556897 func=66 file=52 line=378
+	pc=4312252 func=25 file=26 line=536
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=9 nframes=5
+	pc=4312495 func=25 file=26 line=576
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=24 nframes=8
+	pc=4614055 func=64 file=65 line=474
+	pc=4298031 func=36 file=35 line=964
+	pc=4297301 func=34 file=35 line=926
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=23 nframes=6
+	pc=4297239 func=34 file=35 line=914
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=2 nframes=1
+	pc=4803172 func=48 file=49 line=130
+Stack id=28 nframes=8
+	pc=4556437 func=51 file=52 line=352
+	pc=4341796 func=53 file=54 line=521
+	pc=4280028 func=55 file=56 line=147
+	pc=4277746 func=57 file=58 line=182
+	pc=4244580 func=59 file=28 line=944
+	pc=4246070 func=29 file=28 line=1145
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=27 nframes=5
+	pc=4353658 func=67 file=68 line=958
+	pc=4278148 func=69 file=58 line=234
+	pc=4246244 func=29 file=28 line=1160
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=16 nframes=3
+	pc=4217457 func=70 file=71 line=442
+	pc=4546317 func=72 file=23 line=918
+	pc=4546150 func=24 file=23 line=871
+Stack id=31 nframes=8
+	pc=4353658 func=67 file=68 line=958
+	pc=4280657 func=73 file=56 line=254
+	pc=4280247 func=55 file=56 line=170
+	pc=4277746 func=57 file=58 line=182
+	pc=4244580 func=59 file=28 line=944
+	pc=4246070 func=29 file=28 line=1145
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+Stack id=1 nframes=3
+	pc=4554859 func=74 file=52 line=255
+	pc=4540633 func=22 file=23 line=391
+	pc=4546157 func=24 file=23 line=874
+Stack id=22 nframes=10
+	pc=4558967 func=75 file=76 line=166
+	pc=4558898 func=77 file=52 line=445
+	pc=4447453 func=78 file=61 line=3712
+	pc=4314041 func=79 file=26 line=714
+	pc=4297238 func=34 file=35 line=909
+	pc=4312466 func=25 file=26 line=564
+	pc=4247187 func=27 file=28 line=1333
+	pc=4245160 func=29 file=28 line=1021
+	pc=4502184 func=30 file=31 line=103
+	pc=4804475 func=32 file=33 line=60
+EventBatch gen=3 m=18446744073709551615 time=28114950894688 size=2762
+Strings
+String id=1
+	data="Not worker"
+String id=2
+	data="GC (dedicated)"
+String id=3
+	data="GC (fractional)"
+String id=4
+	data="GC (idle)"
+String id=5
+	data="unspecified"
+String id=6
+	data="forever"
+String id=7
+	data="network"
+String id=8
+	data="select"
+String id=9
+	data="sync.(*Cond).Wait"
+String id=10
+	data="sync"
+String id=11
+	data="chan send"
+String id=12
+	data="chan receive"
+String id=13
+	data="GC mark assist wait for work"
+String id=14
+	data="GC background sweeper wait"
+String id=15
+	data="system goroutine wait"
+String id=16
+	data="preempted"
+String id=17
+	data="wait for debug call"
+String id=18
+	data="wait until GC ends"
+String id=19
+	data="sleep"
+String id=20
+	data="runtime.Gosched"
+String id=21
+	data="GC mark termination"
+String id=22
+	data="runtime.traceAdvance"
+String id=23
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go"
+String id=24
+	data="runtime.(*traceAdvancerState).start.func1"
+String id=25
+	data="runtime.gcAssistAlloc"
+String id=26
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcmark.go"
+String id=27
+	data="runtime.deductAssistCredit"
+String id=28
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go"
+String id=29
+	data="runtime.mallocgc"
+String id=30
+	data="runtime.makeslice"
+String id=31
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/slice.go"
+String id=32
+	data="main.main.func1"
+String id=33
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/trace/v2/testdata/testprog/gc-stress.go"
+String id=34
+	data="runtime.gcMarkDone"
+String id=35
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgc.go"
+String id=36
+	data="runtime.gcMarkTermination"
+String id=37
+	data="syscall.write"
+String id=38
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/zsyscall_linux_amd64.go"
+String id=39
+	data="syscall.Write"
+String id=40
+	data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_unix.go"
+String id=41
+	data="internal/poll.ignoringEINTRIO"
+String id=42
+	data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unix.go"
+String id=43
+	data="internal/poll.(*FD).Write"
+String id=44
+	data="os.(*File).write"
+String id=45
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_posix.go"
+String id=46
+	data="os.(*File).Write"
+String id=47
+	data="/usr/local/google/home/mknyszek/work/go-1/src/os/file.go"
+String id=48
+	data="runtime/trace.Start.func1"
+String id=49
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go"
+String id=50
+	data="runtime.gcStart"
+String id=51
+	data="runtime.traceLocker.GCSweepSpan"
+String id=52
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go"
+String id=53
+	data="runtime.(*sweepLocked).sweep"
+String id=54
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcsweep.go"
+String id=55
+	data="runtime.(*mcentral).cacheSpan"
+String id=56
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcentral.go"
+String id=57
+	data="runtime.(*mcache).refill"
+String id=58
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcache.go"
+String id=59
+	data="runtime.(*mcache).nextFree"
+String id=60
+	data="runtime.gopark"
+String id=61
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go"
+String id=62
+	data="runtime.gcBgMarkWorker"
+String id=63
+	data="runtime.gcParkAssist"
+String id=64
+	data="runtime.systemstack_switch"
+String id=65
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/asm_amd64.s"
+String id=66
+	data="runtime.traceLocker.GCMarkAssistStart"
+String id=67
+	data="runtime.(*mheap).alloc"
+String id=68
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mheap.go"
+String id=69
+	data="runtime.(*mcache).allocLarge"
+String id=70
+	data="runtime.chanrecv1"
+String id=71
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go"
+String id=72
+	data="runtime.(*wakeableSleep).sleep"
+String id=73
+	data="runtime.(*mcentral).grow"
+String id=74
+	data="runtime.traceLocker.Gomaxprocs"
+String id=75
+	data="runtime.traceLocker.stack"
+String id=76
+	data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2event.go"
+String id=77
+	data="runtime.traceLocker.GoUnpark"
+String id=78
+	data="runtime.injectglist"
+String id=79
+	data="runtime.gcWakeAllAssists"
diff --git a/src/internal/trace/v2/testdata/tests/go122-go-create-without-running-g.test b/src/internal/trace/v2/testdata/tests/go122-go-create-without-running-g.test
new file mode 100644
index 0000000..494c444
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-go-create-without-running-g.test
@@ -0,0 +1,17 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=17
+ProcStatus dt=1 p=0 pstatus=1
+GoCreate dt=1 new_g=5 new_stack=0 stack=0
+GoStart dt=1 g=5 g_seq=1
+GoStop dt=1 reason_string=1 stack=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=12
+Strings
+String id=1
+	data="whatever"
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-ambiguous.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-ambiguous.test
new file mode 100644
index 0000000..0d88af4
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-ambiguous.test
@@ -0,0 +1,21 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=21
+ProcStatus dt=0 p=0 pstatus=1
+GoStatus dt=0 g=1 m=0 gstatus=2
+GoSyscallBegin dt=0 p_seq=1 stack=0
+GoSyscallEnd dt=0
+GoSyscallBegin dt=0 p_seq=2 stack=0
+GoSyscallEndBlocked dt=0
+EventBatch gen=1 m=1 time=0 size=14
+ProcStatus dt=0 p=2 pstatus=1
+GoStatus dt=0 g=2 m=1 gstatus=2
+ProcSteal dt=0 p=0 p_seq=3 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-bare-m.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-bare-m.test
new file mode 100644
index 0000000..bbfc9cc
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-bare-m.test
@@ -0,0 +1,17 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=11
+ProcStatus dt=1 p=1 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=3
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=9
+ProcStatus dt=1 p=0 pstatus=4
+ProcSteal dt=1 p=0 p_seq=1 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc-bare-m.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc-bare-m.test
new file mode 100644
index 0000000..8e29132
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc-bare-m.test
@@ -0,0 +1,18 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=15
+GoStatus dt=1 g=1 m=0 gstatus=3
+ProcStatus dt=1 p=1 pstatus=2
+ProcStart dt=1 p=1 p_seq=1
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=9
+ProcStatus dt=1 p=0 pstatus=4
+ProcSteal dt=1 p=0 p_seq=1 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc.test
new file mode 100644
index 0000000..3b26e8f
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary-reacquire-new-proc.test
@@ -0,0 +1,20 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=15
+GoStatus dt=1 g=1 m=0 gstatus=3
+ProcStatus dt=1 p=1 pstatus=2
+ProcStart dt=1 p=1 p_seq=1
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=18
+ProcStatus dt=1 p=2 pstatus=1
+GoStatus dt=1 g=2 m=1 gstatus=2
+ProcStatus dt=1 p=0 pstatus=4
+ProcSteal dt=1 p=0 p_seq=1 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary.test
new file mode 100644
index 0000000..133d8a5
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-gen-boundary.test
@@ -0,0 +1,19 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=11
+ProcStatus dt=1 p=1 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=3
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=18
+ProcStatus dt=1 p=2 pstatus=1
+GoStatus dt=1 g=2 m=1 gstatus=2
+ProcStatus dt=1 p=0 pstatus=4
+ProcSteal dt=1 p=0 p_seq=1 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-reacquire-new-proc-bare-m.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-reacquire-new-proc-bare-m.test
new file mode 100644
index 0000000..fa68c82
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-reacquire-new-proc-bare-m.test
@@ -0,0 +1,19 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=23
+ProcStatus dt=1 p=1 pstatus=2
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+GoSyscallBegin dt=1 p_seq=1 stack=0
+ProcStart dt=1 p=1 p_seq=1
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=5
+ProcSteal dt=1 p=0 p_seq=2 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-reacquire-new-proc.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-reacquire-new-proc.test
new file mode 100644
index 0000000..85c19fc
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-reacquire-new-proc.test
@@ -0,0 +1,21 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=23
+ProcStatus dt=1 p=1 pstatus=2
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+GoSyscallBegin dt=1 p_seq=1 stack=0
+ProcStart dt=1 p=1 p_seq=1
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=14
+ProcStatus dt=1 p=2 pstatus=1
+GoStatus dt=1 g=2 m=1 gstatus=2
+ProcSteal dt=1 p=0 p_seq=2 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-self.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-self.test
new file mode 100644
index 0000000..6484eb6
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-self.test
@@ -0,0 +1,17 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=24
+ProcStatus dt=0 p=0 pstatus=1
+GoStatus dt=0 g=1 m=0 gstatus=2
+GoSyscallBegin dt=0 p_seq=1 stack=0
+ProcSteal dt=0 p=0 p_seq=2 m=0
+ProcStart dt=0 p=0 p_seq=3
+GoSyscallEndBlocked dt=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-simple-bare-m.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-simple-bare-m.test
new file mode 100644
index 0000000..d338722
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-simple-bare-m.test
@@ -0,0 +1,17 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=15
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+GoSyscallBegin dt=1 p_seq=1 stack=0
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=5
+ProcSteal dt=1 p=0 p_seq=2 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-simple.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-simple.test
new file mode 100644
index 0000000..a1f9db4
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-simple.test
@@ -0,0 +1,19 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=15
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+GoSyscallBegin dt=1 p_seq=1 stack=0
+GoSyscallEndBlocked dt=1
+EventBatch gen=1 m=1 time=0 size=14
+ProcStatus dt=1 p=2 pstatus=1
+GoStatus dt=1 g=2 m=1 gstatus=2
+ProcSteal dt=1 p=0 p_seq=2 m=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-sitting-in-syscall.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-sitting-in-syscall.test
new file mode 100644
index 0000000..58c41c5
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-sitting-in-syscall.test
@@ -0,0 +1,15 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=9
+ProcStatus dt=1 p=0 pstatus=4
+ProcSteal dt=1 p=0 p_seq=1 m=1
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+GoStatus dt=1 g=1 m=1 gstatus=3
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testdata/tests/go122-task-across-generations.test b/src/internal/trace/v2/testdata/tests/go122-task-across-generations.test
new file mode 100644
index 0000000..0b8abd7
--- /dev/null
+++ b/src/internal/trace/v2/testdata/tests/go122-task-across-generations.test
@@ -0,0 +1,26 @@
+-- expect --
+SUCCESS
+-- trace --
+Trace Go1.22
+EventBatch gen=1 m=0 time=0 size=15
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+UserTaskBegin dt=1 task=2 parent_task=0 name_string=1 stack=0
+EventBatch gen=1 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=1 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=1 m=18446744073709551615 time=0 size=11
+Strings
+String id=1
+	data="my task"
+EventBatch gen=2 m=0 time=5 size=13
+ProcStatus dt=1 p=0 pstatus=1
+GoStatus dt=1 g=1 m=0 gstatus=2
+UserTaskEnd dt=1 task=2 stack=0
+EventBatch gen=2 m=18446744073709551615 time=0 size=5
+Frequency freq=15625000
+EventBatch gen=2 m=18446744073709551615 time=0 size=1
+Stacks
+EventBatch gen=2 m=18446744073709551615 time=0 size=1
+Strings
diff --git a/src/internal/trace/v2/testtrace/expectation.go b/src/internal/trace/v2/testtrace/expectation.go
new file mode 100644
index 0000000..3e5394a
--- /dev/null
+++ b/src/internal/trace/v2/testtrace/expectation.go
@@ -0,0 +1,81 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testtrace
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// Expectation represents the expected result of some operation.
+type Expectation struct {
+	failure      bool
+	errorMatcher *regexp.Regexp
+}
+
+// ExpectSuccess returns an Expectation that trivially expects success.
+func ExpectSuccess() *Expectation {
+	return new(Expectation)
+}
+
+// Check validates whether err conforms to the expectation. Returns
+// an error if it does not conform.
+//
+// Conformance means that if failure is true, then err must be non-nil.
+// If err is non-nil, then it must match errorMatcher.
+func (e *Expectation) Check(err error) error {
+	if !e.failure && err != nil {
+		return fmt.Errorf("unexpected error while reading the trace: %v", err)
+	}
+	if e.failure && err == nil {
+		return fmt.Errorf("expected error while reading the trace: want something matching %q, got none", e.errorMatcher)
+	}
+	if e.failure && err != nil && !e.errorMatcher.MatchString(err.Error()) {
+		return fmt.Errorf("unexpected error while reading the trace: want something matching %q, got %s", e.errorMatcher, err.Error())
+	}
+	return nil
+}
+
+// ParseExpectation parses the serialized form of an Expectation.
+func ParseExpectation(data []byte) (*Expectation, error) {
+	exp := new(Expectation)
+	s := bufio.NewScanner(bytes.NewReader(data))
+	if s.Scan() {
+		c := strings.SplitN(s.Text(), " ", 2)
+		switch c[0] {
+		case "SUCCESS":
+		case "FAILURE":
+			exp.failure = true
+			if len(c) != 2 {
+				return exp, fmt.Errorf("bad header line for FAILURE: %q", s.Text())
+			}
+			matcher, err := parseMatcher(c[1])
+			if err != nil {
+				return exp, err
+			}
+			exp.errorMatcher = matcher
+		default:
+			return exp, fmt.Errorf("bad header line: %q", s.Text())
+		}
+		return exp, nil
+	}
+	return exp, s.Err()
+}
+
+func parseMatcher(quoted string) (*regexp.Regexp, error) {
+	pattern, err := strconv.Unquote(quoted)
+	if err != nil {
+		return nil, fmt.Errorf("malformed pattern: not correctly quoted: %s: %v", quoted, err)
+	}
+	matcher, err := regexp.Compile(pattern)
+	if err != nil {
+		return nil, fmt.Errorf("malformed pattern: not a valid regexp: %s: %v", pattern, err)
+	}
+	return matcher, nil
+}
diff --git a/src/internal/trace/v2/testtrace/format.go b/src/internal/trace/v2/testtrace/format.go
new file mode 100644
index 0000000..2e2e975
--- /dev/null
+++ b/src/internal/trace/v2/testtrace/format.go
@@ -0,0 +1,56 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testtrace
+
+import (
+	"bytes"
+	"fmt"
+	"internal/trace/v2/raw"
+	"internal/txtar"
+	"io"
+)
+
+// ParseFile parses a test file generated by the testgen package.
+func ParseFile(testPath string) (io.Reader, *Expectation, error) {
+	ar, err := txtar.ParseFile(testPath)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to read test file for %s: %v", testPath, err)
+	}
+	if len(ar.Files) != 2 {
+		return nil, nil, fmt.Errorf("malformed test %s: wrong number of files", testPath)
+	}
+	if ar.Files[0].Name != "expect" {
+		return nil, nil, fmt.Errorf("malformed test %s: bad filename %s", testPath, ar.Files[0].Name)
+	}
+	if ar.Files[1].Name != "trace" {
+		return nil, nil, fmt.Errorf("malformed test %s: bad filename %s", testPath, ar.Files[1].Name)
+	}
+	tr, err := raw.NewTextReader(bytes.NewReader(ar.Files[1].Data))
+	if err != nil {
+		return nil, nil, fmt.Errorf("malformed test %s: bad trace file: %v", testPath, err)
+	}
+	var buf bytes.Buffer
+	tw, err := raw.NewWriter(&buf, tr.Version())
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to create trace byte writer: %v", err)
+	}
+	for {
+		ev, err := tr.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return nil, nil, fmt.Errorf("malformed test %s: bad trace file: %v", testPath, err)
+		}
+		if err := tw.WriteEvent(ev); err != nil {
+			return nil, nil, fmt.Errorf("internal error during %s: failed to write trace bytes: %v", testPath, err)
+		}
+	}
+	exp, err := ParseExpectation(ar.Files[0].Data)
+	if err != nil {
+		return nil, nil, fmt.Errorf("internal error during %s: failed to parse expectation %q: %v", testPath, string(ar.Files[0].Data), err)
+	}
+	return &buf, exp, nil
+}
diff --git a/src/internal/trace/v2/testtrace/validation.go b/src/internal/trace/v2/testtrace/validation.go
new file mode 100644
index 0000000..021c778
--- /dev/null
+++ b/src/internal/trace/v2/testtrace/validation.go
@@ -0,0 +1,361 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testtrace
+
+import (
+	"errors"
+	"fmt"
+	"internal/trace/v2"
+	"slices"
+	"strings"
+)
+
+// Validator is a type used for validating a stream of trace.Events.
+type Validator struct {
+	lastTs   trace.Time
+	gs       map[trace.GoID]*goState
+	ps       map[trace.ProcID]*procState
+	ms       map[trace.ThreadID]*schedContext
+	ranges   map[trace.ResourceID][]string
+	tasks    map[trace.TaskID]string
+	seenSync bool
+}
+
+type schedContext struct {
+	M trace.ThreadID
+	P trace.ProcID
+	G trace.GoID
+}
+
+type goState struct {
+	state   trace.GoState
+	binding *schedContext
+}
+
+type procState struct {
+	state   trace.ProcState
+	binding *schedContext
+}
+
+// NewValidator creates a new Validator.
+func NewValidator() *Validator {
+	return &Validator{
+		gs:     make(map[trace.GoID]*goState),
+		ps:     make(map[trace.ProcID]*procState),
+		ms:     make(map[trace.ThreadID]*schedContext),
+		ranges: make(map[trace.ResourceID][]string),
+		tasks:  make(map[trace.TaskID]string),
+	}
+}
+
+// Event validates ev as the next event in a stream of trace.Events.
+//
+// Returns an error if validation fails.
+func (v *Validator) Event(ev trace.Event) error {
+	e := new(errAccumulator)
+
+	// Validate timestamp order.
+	if v.lastTs != 0 {
+		if ev.Time() <= v.lastTs {
+			e.Errorf("timestamp out-of-order for %+v", ev)
+		} else {
+			v.lastTs = ev.Time()
+		}
+	} else {
+		v.lastTs = ev.Time()
+	}
+
+	// Validate event stack.
+	checkStack(e, ev.Stack())
+
+	switch ev.Kind() {
+	case trace.EventSync:
+		// Just record that we've seen a Sync at some point.
+		v.seenSync = true
+	case trace.EventMetric:
+		m := ev.Metric()
+		if !strings.Contains(m.Name, ":") {
+			// Should have a ":" as per runtime/metrics convention.
+			e.Errorf("invalid metric name %q", m.Name)
+		}
+		// Make sure the value is OK.
+		if m.Value.Kind() == trace.ValueBad {
+			e.Errorf("invalid value")
+		}
+		switch m.Value.Kind() {
+		case trace.ValueUint64:
+			// Just make sure it doesn't panic.
+			_ = m.Value.Uint64()
+		}
+	case trace.EventLabel:
+		l := ev.Label()
+
+		// Check label.
+		if l.Label == "" {
+			e.Errorf("invalid label %q", l.Label)
+		}
+
+		// Check label resource.
+		if l.Resource.Kind == trace.ResourceNone {
+			e.Errorf("label resource none")
+		}
+		switch l.Resource.Kind {
+		case trace.ResourceGoroutine:
+			id := l.Resource.Goroutine()
+			if _, ok := v.gs[id]; !ok {
+				e.Errorf("label for invalid goroutine %d", id)
+			}
+		case trace.ResourceProc:
+			id := l.Resource.Proc()
+			if _, ok := v.ps[id]; !ok {
+				e.Errorf("label for invalid proc %d", id)
+			}
+		case trace.ResourceThread:
+			id := l.Resource.Thread()
+			if _, ok := v.ms[id]; !ok {
+				e.Errorf("label for invalid thread %d", id)
+			}
+		}
+	case trace.EventStackSample:
+		// Not much to check here. It's basically a sched context and a stack.
+		// The sched context is also not guaranteed to align with other events.
+		// We already checked the stack above.
+	case trace.EventStateTransition:
+		// Validate state transitions.
+		//
+		// TODO(mknyszek): A lot of logic is duplicated between goroutines and procs.
+		// The two are intentionally handled identically; from the perspective of the
+		// API, resources all have the same general properties. Consider making this
+		// code generic over resources and implementing validation just once.
+		tr := ev.StateTransition()
+		checkStack(e, tr.Stack)
+		switch tr.Resource.Kind {
+		case trace.ResourceGoroutine:
+			// Basic state transition validation.
+			id := tr.Resource.Goroutine()
+			old, new := tr.Goroutine()
+			if new == trace.GoUndetermined {
+				e.Errorf("transition to undetermined state for goroutine %d", id)
+			}
+			if v.seenSync && old == trace.GoUndetermined {
+				e.Errorf("undetermined goroutine %d after first global sync", id)
+			}
+			if new == trace.GoNotExist && v.hasAnyRange(trace.MakeResourceID(id)) {
+				e.Errorf("goroutine %d died with active ranges", id)
+			}
+			state, ok := v.gs[id]
+			if ok {
+				if old != state.state {
+					e.Errorf("bad old state for goroutine %d: got %s, want %s", id, old, state.state)
+				}
+				state.state = new
+			} else {
+				if old != trace.GoUndetermined && old != trace.GoNotExist {
+					e.Errorf("bad old state for unregistered goroutine %d: %s", id, old)
+				}
+				state = &goState{state: new}
+				v.gs[id] = state
+			}
+			// Validate sched context.
+			if new.Executing() {
+				ctx := v.getOrCreateThread(e, ev.Thread())
+				if ctx != nil {
+					if ctx.G != trace.NoGoroutine && ctx.G != id {
+						e.Errorf("tried to run goroutine %d when one was already executing (%d) on thread %d", id, ctx.G, ev.Thread())
+					}
+					ctx.G = id
+					state.binding = ctx
+				}
+			} else if old.Executing() && !new.Executing() {
+				if tr.Stack != ev.Stack() {
+					// This is a case where the transition is happening to a goroutine that is also executing, so
+					// these two stacks should always match.
+					e.Errorf("StateTransition.Stack doesn't match Event.Stack")
+				}
+				ctx := state.binding
+				if ctx != nil {
+					if ctx.G != id {
+						e.Errorf("tried to stop goroutine %d when it wasn't currently executing (currently executing %d) on thread %d", id, ctx.G, ev.Thread())
+					}
+					ctx.G = trace.NoGoroutine
+					state.binding = nil
+				} else {
+					e.Errorf("stopping goroutine %d not bound to any active context", id)
+				}
+			}
+		case trace.ResourceProc:
+			// Basic state transition validation.
+			id := tr.Resource.Proc()
+			old, new := tr.Proc()
+			if new == trace.ProcUndetermined {
+				e.Errorf("transition to undetermined state for proc %d", id)
+			}
+			if v.seenSync && old == trace.ProcUndetermined {
+				e.Errorf("undetermined proc %d after first global sync", id)
+			}
+			if new == trace.ProcNotExist && v.hasAnyRange(trace.MakeResourceID(id)) {
+				e.Errorf("proc %d died with active ranges", id)
+			}
+			state, ok := v.ps[id]
+			if ok {
+				if old != state.state {
+					e.Errorf("bad old state for proc %d: got %s, want %s", id, old, state.state)
+				}
+				state.state = new
+			} else {
+				if old != trace.ProcUndetermined && old != trace.ProcNotExist {
+					e.Errorf("bad old state for unregistered proc %d: %s", id, old)
+				}
+				state = &procState{state: new}
+				v.ps[id] = state
+			}
+			// Validate sched context.
+			if new.Executing() {
+				ctx := v.getOrCreateThread(e, ev.Thread())
+				if ctx != nil {
+					if ctx.P != trace.NoProc && ctx.P != id {
+						e.Errorf("tried to run proc %d when one was already executing (%d) on thread %d", id, ctx.P, ev.Thread())
+					}
+					ctx.P = id
+					state.binding = ctx
+				}
+			} else if old.Executing() && !new.Executing() {
+				ctx := state.binding
+				if ctx != nil {
+					if ctx.P != id {
+						e.Errorf("tried to stop proc %d when it wasn't currently executing (currently executing %d) on thread %d", id, ctx.P, ctx.M)
+					}
+					ctx.P = trace.NoProc
+					state.binding = nil
+				} else {
+					e.Errorf("stopping proc %d not bound to any active context", id)
+				}
+			}
+		}
+	case trace.EventRangeBegin, trace.EventRangeActive, trace.EventRangeEnd:
+		// Validate ranges.
+		r := ev.Range()
+		switch ev.Kind() {
+		case trace.EventRangeBegin:
+			if v.hasRange(r.Scope, r.Name) {
+				e.Errorf("already active range %q on %v begun again", r.Name, r.Scope)
+			}
+			v.addRange(r.Scope, r.Name)
+		case trace.EventRangeActive:
+			if !v.hasRange(r.Scope, r.Name) {
+				v.addRange(r.Scope, r.Name)
+			}
+		case trace.EventRangeEnd:
+			if !v.hasRange(r.Scope, r.Name) {
+				e.Errorf("inactive range %q on %v ended", r.Name, r.Scope)
+			}
+			v.deleteRange(r.Scope, r.Name)
+		}
+	case trace.EventTaskBegin:
+		// Validate task begin.
+		t := ev.Task()
+		if t.ID == trace.NoTask || t.ID == trace.BackgroundTask {
+			// The background task should never have an event emitted for it.
+			e.Errorf("found invalid task ID for task of type %s", t.Type)
+		}
+		if t.Parent == trace.BackgroundTask {
+			// It's not possible for a task to be a subtask of the background task.
+			e.Errorf("found background task as the parent for task of type %s", t.Type)
+		}
+		// N.B. Don't check the task type. Empty string is a valid task type.
+		v.tasks[t.ID] = t.Type
+	case trace.EventTaskEnd:
+		// Validate task end.
+		// We can see a task end without a begin, so ignore a task without information.
+		// Instead, if we've seen the task begin, just make sure the task end lines up.
+		t := ev.Task()
+		if typ, ok := v.tasks[t.ID]; ok {
+			if t.Type != typ {
+				e.Errorf("task end type %q doesn't match task start type %q for task %d", t.Type, typ, t.ID)
+			}
+			delete(v.tasks, t.ID)
+		}
+	case trace.EventLog:
+		// There's really not much here to check, except that we can
+		// generate a Log. The category and message are entirely user-created,
+		// so we can't make any assumptions as to what they are. We also
+		// can't validate the task, because proving the task's existence is very
+		// much best-effort.
+		_ = ev.Log()
+	}
+	return e.Errors()
+}
+
+func (v *Validator) hasRange(r trace.ResourceID, name string) bool {
+	ranges, ok := v.ranges[r]
+	return ok && slices.Contains(ranges, name)
+}
+
+func (v *Validator) addRange(r trace.ResourceID, name string) {
+	ranges, _ := v.ranges[r]
+	ranges = append(ranges, name)
+	v.ranges[r] = ranges
+}
+
+func (v *Validator) hasAnyRange(r trace.ResourceID) bool {
+	ranges, ok := v.ranges[r]
+	return ok && len(ranges) != 0
+}
+
+func (v *Validator) deleteRange(r trace.ResourceID, name string) {
+	ranges, ok := v.ranges[r]
+	if !ok {
+		return
+	}
+	i := slices.Index(ranges, name)
+	if i < 0 {
+		return
+	}
+	v.ranges[r] = slices.Delete(ranges, i, i+1)
+}
+
+func (v *Validator) getOrCreateThread(e *errAccumulator, m trace.ThreadID) *schedContext {
+	if m == trace.NoThread {
+		e.Errorf("must have thread, but thread ID is none")
+		return nil
+	}
+	s, ok := v.ms[m]
+	if !ok {
+		s = &schedContext{M: m, P: trace.NoProc, G: trace.NoGoroutine}
+		v.ms[m] = s
+		return s
+	}
+	return s
+}
+
+func checkStack(e *errAccumulator, stk trace.Stack) {
+	// Check for non-empty values, but we also check for crashes due to incorrect validation.
+	i := 0
+	stk.Frames(func(f trace.StackFrame) bool {
+		if i == 0 {
+			// Allow for one fully zero stack.
+			//
+			// TODO(mknyszek): Investigate why that happens.
+			return true
+		}
+		if f.Func == "" || f.File == "" || f.PC == 0 || f.Line == 0 {
+			e.Errorf("invalid stack frame %#v: missing information", f)
+		}
+		i++
+		return true
+	})
+}
+
+type errAccumulator struct {
+	errs []error
+}
+
+func (e *errAccumulator) Errorf(f string, args ...any) {
+	e.errs = append(e.errs, fmt.Errorf(f, args...))
+}
+
+func (e *errAccumulator) Errors() error {
+	return errors.Join(e.errs...)
+}
diff --git a/src/internal/trace/v2/trace_test.go b/src/internal/trace/v2/trace_test.go
new file mode 100644
index 0000000..7cc7508
--- /dev/null
+++ b/src/internal/trace/v2/trace_test.go
@@ -0,0 +1,606 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace_test
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"internal/testenv"
+	"internal/trace/v2"
+	"internal/trace/v2/testtrace"
+	"io"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"testing"
+)
+
+func TestTraceAnnotations(t *testing.T) {
+	testTraceProg(t, "annotations.go", func(t *testing.T, tb, _ []byte, _ bool) {
+		type evDesc struct {
+			kind trace.EventKind
+			task trace.TaskID
+			args []string
+		}
+		want := []evDesc{
+			{trace.EventTaskBegin, trace.TaskID(1), []string{"task0"}},
+			{trace.EventRegionBegin, trace.TaskID(1), []string{"region0"}},
+			{trace.EventRegionBegin, trace.TaskID(1), []string{"region1"}},
+			{trace.EventLog, trace.TaskID(1), []string{"key0", "0123456789abcdef"}},
+			{trace.EventRegionEnd, trace.TaskID(1), []string{"region1"}},
+			{trace.EventRegionEnd, trace.TaskID(1), []string{"region0"}},
+			{trace.EventTaskEnd, trace.TaskID(1), []string{"task0"}},
+			//  Currently, pre-existing region is not recorded to avoid allocations.
+			{trace.EventRegionBegin, trace.BackgroundTask, []string{"post-existing region"}},
+		}
+		r, err := trace.NewReader(bytes.NewReader(tb))
+		if err != nil {
+			t.Error(err)
+		}
+		for {
+			ev, err := r.ReadEvent()
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Fatal(err)
+			}
+			for i, wantEv := range want {
+				if wantEv.kind != ev.Kind() {
+					continue
+				}
+				match := false
+				switch ev.Kind() {
+				case trace.EventTaskBegin, trace.EventTaskEnd:
+					task := ev.Task()
+					match = task.ID == wantEv.task && task.Type == wantEv.args[0]
+				case trace.EventRegionBegin, trace.EventRegionEnd:
+					reg := ev.Region()
+					match = reg.Task == wantEv.task && reg.Type == wantEv.args[0]
+				case trace.EventLog:
+					log := ev.Log()
+					match = log.Task == wantEv.task && log.Category == wantEv.args[0] && log.Message == wantEv.args[1]
+				}
+				if match {
+					want[i] = want[len(want)-1]
+					want = want[:len(want)-1]
+					break
+				}
+			}
+		}
+		if len(want) != 0 {
+			for _, ev := range want {
+				t.Errorf("no match for %s TaskID=%d Args=%#v", ev.kind, ev.task, ev.args)
+			}
+		}
+	})
+}
+
+func TestTraceAnnotationsStress(t *testing.T) {
+	testTraceProg(t, "annotations-stress.go", nil)
+}
+
+func TestTraceCgoCallback(t *testing.T) {
+	testenv.MustHaveCGO(t)
+
+	switch runtime.GOOS {
+	case "plan9", "windows":
+		t.Skipf("cgo callback test requires pthreads and is not supported on %s", runtime.GOOS)
+	}
+	testTraceProg(t, "cgo-callback.go", nil)
+}
+
+func TestTraceCPUProfile(t *testing.T) {
+	testTraceProg(t, "cpu-profile.go", func(t *testing.T, tb, stderr []byte, _ bool) {
+		// Parse stderr which has a CPU profile summary, if everything went well.
+		// (If it didn't, we shouldn't even make it here.)
+		scanner := bufio.NewScanner(bytes.NewReader(stderr))
+		pprofSamples := 0
+		pprofStacks := make(map[string]int)
+		for scanner.Scan() {
+			var stack string
+			var samples int
+			_, err := fmt.Sscanf(scanner.Text(), "%s\t%d", &stack, &samples)
+			if err != nil {
+				t.Fatalf("failed to parse CPU profile summary in stderr: %s\n\tfull:\n%s", scanner.Text(), stderr)
+			}
+			pprofStacks[stack] = samples
+			pprofSamples += samples
+		}
+		if err := scanner.Err(); err != nil {
+			t.Fatalf("failed to parse CPU profile summary in stderr: %v", err)
+		}
+		if pprofSamples == 0 {
+			t.Skip("CPU profile did not include any samples while tracing was active")
+		}
+
+		// Examine the execution tracer's view of the CPU profile samples. Filter it
+		// to only include samples from the single test goroutine. Use the goroutine
+		// ID that was recorded in the events: that should reflect getg().m.curg,
+		// same as the profiler's labels (even when the M is using its g0 stack).
+		totalTraceSamples := 0
+		traceSamples := 0
+		traceStacks := make(map[string]int)
+		r, err := trace.NewReader(bytes.NewReader(tb))
+		if err != nil {
+			t.Error(err)
+		}
+		var hogRegion *trace.Event
+		var hogRegionClosed bool
+		for {
+			ev, err := r.ReadEvent()
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Fatal(err)
+			}
+			if ev.Kind() == trace.EventRegionBegin && ev.Region().Type == "cpuHogger" {
+				hogRegion = &ev
+			}
+			if ev.Kind() == trace.EventStackSample {
+				totalTraceSamples++
+				if hogRegion != nil && ev.Goroutine() == hogRegion.Goroutine() {
+					traceSamples++
+					var fns []string
+					ev.Stack().Frames(func(frame trace.StackFrame) bool {
+						if frame.Func != "runtime.goexit" {
+							fns = append(fns, fmt.Sprintf("%s:%d", frame.Func, frame.Line))
+						}
+						return true
+					})
+					stack := strings.Join(fns, "|")
+					traceStacks[stack]++
+				}
+			}
+			if ev.Kind() == trace.EventRegionEnd && ev.Region().Type == "cpuHogger" {
+				hogRegionClosed = true
+			}
+		}
+		if hogRegion == nil {
+			t.Fatalf("execution trace did not identify cpuHogger goroutine")
+		} else if !hogRegionClosed {
+			t.Fatalf("execution trace did not close cpuHogger region")
+		}
+
+		// The execution trace may drop CPU profile samples if the profiling buffer
+		// overflows. Based on the size of profBufWordCount, that takes a bit over
+		// 1900 CPU samples or 19 thread-seconds at a 100 Hz sample rate. If we've
+		// hit that case, then we definitely have at least one full buffer's worth
+		// of CPU samples, so we'll call that success.
+		overflowed := totalTraceSamples >= 1900
+		if traceSamples < pprofSamples {
+			t.Logf("execution trace did not include all CPU profile samples; %d in profile, %d in trace", pprofSamples, traceSamples)
+			if !overflowed {
+				t.Fail()
+			}
+		}
+
+		for stack, traceSamples := range traceStacks {
+			pprofSamples := pprofStacks[stack]
+			delete(pprofStacks, stack)
+			if traceSamples < pprofSamples {
+				t.Logf("execution trace did not include all CPU profile samples for stack %q; %d in profile, %d in trace",
+					stack, pprofSamples, traceSamples)
+				if !overflowed {
+					t.Fail()
+				}
+			}
+		}
+		for stack, pprofSamples := range pprofStacks {
+			t.Logf("CPU profile included %d samples at stack %q not present in execution trace", pprofSamples, stack)
+			if !overflowed {
+				t.Fail()
+			}
+		}
+
+		if t.Failed() {
+			t.Logf("execution trace CPU samples:")
+			for stack, samples := range traceStacks {
+				t.Logf("%d: %q", samples, stack)
+			}
+			t.Logf("CPU profile:\n%s", stderr)
+		}
+	})
+}
+
+func TestTraceFutileWakeup(t *testing.T) {
+	testTraceProg(t, "futile-wakeup.go", func(t *testing.T, tb, _ []byte, _ bool) {
+		// Check to make sure that no goroutine in the "special" trace region
+		// ends up blocking, unblocking, then immediately blocking again.
+		//
+		// The goroutines are careful to call runtime.Gosched in between blocking,
+		// so there should never be a clean block/unblock on the goroutine unless
+		// the runtime was generating extraneous events.
+		const (
+			entered = iota
+			blocked
+			runnable
+			running
+		)
+		gs := make(map[trace.GoID]int)
+		seenSpecialGoroutines := false
+		r, err := trace.NewReader(bytes.NewReader(tb))
+		if err != nil {
+			t.Error(err)
+		}
+		for {
+			ev, err := r.ReadEvent()
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Fatal(err)
+			}
+			// Only track goroutines in the special region we control, so runtime
+			// goroutines don't interfere (it's totally valid in traces for a
+			// goroutine to block, run, and block again; that's not what we care about).
+			if ev.Kind() == trace.EventRegionBegin && ev.Region().Type == "special" {
+				seenSpecialGoroutines = true
+				gs[ev.Goroutine()] = entered
+			}
+			if ev.Kind() == trace.EventRegionEnd && ev.Region().Type == "special" {
+				delete(gs, ev.Goroutine())
+			}
+			// Track state transitions for goroutines we care about.
+			//
+			// The goroutines we care about will advance through the state machine
+			// of entered -> blocked -> runnable -> running. If in the running state
+			// we block, then we have a futile wakeup. Because of the runtime.Gosched
+			// on these specially marked goroutines, we should end up back in runnable
+			// first. If at any point we go to a different state, switch back to entered
+			// and wait for the next time the goroutine blocks.
+			if ev.Kind() != trace.EventStateTransition {
+				continue
+			}
+			st := ev.StateTransition()
+			if st.Resource.Kind != trace.ResourceGoroutine {
+				continue
+			}
+			id := st.Resource.Goroutine()
+			state, ok := gs[id]
+			if !ok {
+				continue
+			}
+			_, new := st.Goroutine()
+			switch state {
+			case entered:
+				if new == trace.GoWaiting {
+					state = blocked
+				} else {
+					state = entered
+				}
+			case blocked:
+				if new == trace.GoRunnable {
+					state = runnable
+				} else {
+					state = entered
+				}
+			case runnable:
+				if new == trace.GoRunning {
+					state = running
+				} else {
+					state = entered
+				}
+			case running:
+				if new == trace.GoWaiting {
+					t.Fatalf("found futile wakeup on goroutine %d", id)
+				} else {
+					state = entered
+				}
+			}
+			gs[id] = state
+		}
+		if !seenSpecialGoroutines {
+			t.Fatal("did not see a goroutine in a the region 'special'")
+		}
+	})
+}
+
+func TestTraceGCStress(t *testing.T) {
+	testTraceProg(t, "gc-stress.go", nil)
+}
+
+func TestTraceGOMAXPROCS(t *testing.T) {
+	testTraceProg(t, "gomaxprocs.go", nil)
+}
+
+func TestTraceStacks(t *testing.T) {
+	testTraceProg(t, "stacks.go", func(t *testing.T, tb, _ []byte, stress bool) {
+		type frame struct {
+			fn   string
+			line int
+		}
+		type evDesc struct {
+			kind   trace.EventKind
+			match  string
+			frames []frame
+		}
+		// mainLine is the line number of `func main()` in testprog/stacks.go.
+		const mainLine = 21
+		want := []evDesc{
+			{trace.EventStateTransition, "Goroutine Running->Runnable", []frame{
+				{"main.main", mainLine + 82},
+			}},
+			{trace.EventStateTransition, "Goroutine NotExist->Runnable", []frame{
+				{"main.main", mainLine + 11},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"runtime.block", 0},
+				{"main.main.func1", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"runtime.chansend1", 0},
+				{"main.main.func2", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"runtime.chanrecv1", 0},
+				{"main.main.func3", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"runtime.chanrecv1", 0},
+				{"main.main.func4", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
+				{"runtime.chansend1", 0},
+				{"main.main", mainLine + 84},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"runtime.chansend1", 0},
+				{"main.main.func5", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
+				{"runtime.chanrecv1", 0},
+				{"main.main", mainLine + 85},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"runtime.selectgo", 0},
+				{"main.main.func6", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
+				{"runtime.selectgo", 0},
+				{"main.main", mainLine + 86},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"sync.(*Mutex).Lock", 0},
+				{"main.main.func7", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
+				{"sync.(*Mutex).Unlock", 0},
+				{"main.main", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"sync.(*WaitGroup).Wait", 0},
+				{"main.main.func8", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
+				{"sync.(*WaitGroup).Add", 0},
+				{"sync.(*WaitGroup).Done", 0},
+				{"main.main", mainLine + 91},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"sync.(*Cond).Wait", 0},
+				{"main.main.func9", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Waiting->Runnable", []frame{
+				{"sync.(*Cond).Signal", 0},
+				{"main.main", 0},
+			}},
+			{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+				{"time.Sleep", 0},
+				{"main.main", 0},
+			}},
+			{trace.EventMetric, "/sched/gomaxprocs:threads", []frame{
+				{"runtime.startTheWorld", 0}, // this is when the current gomaxprocs is logged.
+				{"runtime.startTheWorldGC", 0},
+				{"runtime.GOMAXPROCS", 0},
+				{"main.main", 0},
+			}},
+		}
+		if !stress {
+			// Only check for this stack if !stress because traceAdvance alone could
+			// allocate enough memory to trigger a GC if called frequently enough.
+			// This might cause the runtime.GC call we're trying to match against to
+			// coalesce with an active GC triggered this by traceAdvance. In that case
+			// we won't have an EventRangeBegin event that matches the stace trace we're
+			// looking for, since runtime.GC will not have triggered the GC.
+			gcEv := evDesc{trace.EventRangeBegin, "GC concurrent mark phase", []frame{
+				{"runtime.GC", 0},
+				{"main.main", 0},
+			}}
+			want = append(want, gcEv)
+		}
+		if runtime.GOOS != "windows" && runtime.GOOS != "plan9" {
+			want = append(want, []evDesc{
+				{trace.EventStateTransition, "Goroutine Running->Waiting", []frame{
+					{"internal/poll.(*FD).Accept", 0},
+					{"net.(*netFD).accept", 0},
+					{"net.(*TCPListener).accept", 0},
+					{"net.(*TCPListener).Accept", 0},
+					{"main.main.func10", 0},
+				}},
+				{trace.EventStateTransition, "Goroutine Running->Syscall", []frame{
+					{"syscall.read", 0},
+					{"syscall.Read", 0},
+					{"internal/poll.ignoringEINTRIO", 0},
+					{"internal/poll.(*FD).Read", 0},
+					{"os.(*File).read", 0},
+					{"os.(*File).Read", 0},
+					{"main.main.func11", 0},
+				}},
+			}...)
+		}
+		stackMatches := func(stk trace.Stack, frames []frame) bool {
+			i := 0
+			match := true
+			stk.Frames(func(f trace.StackFrame) bool {
+				if f.Func != frames[i].fn {
+					match = false
+					return false
+				}
+				if line := uint64(frames[i].line); line != 0 && line != f.Line {
+					match = false
+					return false
+				}
+				i++
+				return true
+			})
+			return match
+		}
+		r, err := trace.NewReader(bytes.NewReader(tb))
+		if err != nil {
+			t.Error(err)
+		}
+		for {
+			ev, err := r.ReadEvent()
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Fatal(err)
+			}
+			for i, wantEv := range want {
+				if wantEv.kind != ev.Kind() {
+					continue
+				}
+				match := false
+				switch ev.Kind() {
+				case trace.EventStateTransition:
+					st := ev.StateTransition()
+					str := ""
+					switch st.Resource.Kind {
+					case trace.ResourceGoroutine:
+						old, new := st.Goroutine()
+						str = fmt.Sprintf("%s %s->%s", st.Resource.Kind, old, new)
+					}
+					match = str == wantEv.match
+				case trace.EventRangeBegin:
+					rng := ev.Range()
+					match = rng.Name == wantEv.match
+				case trace.EventMetric:
+					metric := ev.Metric()
+					match = metric.Name == wantEv.match
+				}
+				match = match && stackMatches(ev.Stack(), wantEv.frames)
+				if match {
+					want[i] = want[len(want)-1]
+					want = want[:len(want)-1]
+					break
+				}
+			}
+		}
+		if len(want) != 0 {
+			for _, ev := range want {
+				t.Errorf("no match for %s Match=%s Stack=%#v", ev.kind, ev.match, ev.frames)
+			}
+		}
+	})
+}
+
+func TestTraceStress(t *testing.T) {
+	switch runtime.GOOS {
+	case "js", "wasip1":
+		t.Skip("no os.Pipe on " + runtime.GOOS)
+	}
+	testTraceProg(t, "stress.go", nil)
+}
+
+func TestTraceStressStartStop(t *testing.T) {
+	switch runtime.GOOS {
+	case "js", "wasip1":
+		t.Skip("no os.Pipe on " + runtime.GOOS)
+	}
+	testTraceProg(t, "stress-start-stop.go", nil)
+}
+
+func TestTraceManyStartStop(t *testing.T) {
+	testTraceProg(t, "many-start-stop.go", nil)
+}
+
+func TestTraceWaitOnPipe(t *testing.T) {
+	switch runtime.GOOS {
+	case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
+		testTraceProg(t, "wait-on-pipe.go", nil)
+		return
+	}
+	t.Skip("no applicable syscall.Pipe on " + runtime.GOOS)
+}
+
+func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace, stderr []byte, stress bool)) {
+	testenv.MustHaveGoRun(t)
+
+	// Check if we're on a builder.
+	onBuilder := testenv.Builder() != ""
+	onOldBuilder := !strings.Contains(testenv.Builder(), "gotip") && !strings.Contains(testenv.Builder(), "go1")
+
+	testPath := filepath.Join("./testdata/testprog", progName)
+	testName := progName
+	runTest := func(t *testing.T, stress bool) {
+		// Run the program and capture the trace, which is always written to stdout.
+		cmd := testenv.Command(t, testenv.GoToolPath(t), "run", testPath)
+		cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2")
+		if stress {
+			// Advance a generation constantly.
+			cmd.Env = append(cmd.Env, "GODEBUG=traceadvanceperiod=0")
+		}
+		// Capture stdout and stderr.
+		//
+		// The protoocol for these programs is that stdout contains the trace data
+		// and stderr is an expectation in string format.
+		var traceBuf, errBuf bytes.Buffer
+		cmd.Stdout = &traceBuf
+		cmd.Stderr = &errBuf
+		// Run the program.
+		if err := cmd.Run(); err != nil {
+			if errBuf.Len() != 0 {
+				t.Logf("stderr: %s", string(errBuf.Bytes()))
+			}
+			t.Fatal(err)
+		}
+		tb := traceBuf.Bytes()
+
+		// Test the trace and the parser.
+		testReader(t, bytes.NewReader(tb), testtrace.ExpectSuccess())
+
+		// Run some extra validation.
+		if !t.Failed() && extra != nil {
+			extra(t, tb, errBuf.Bytes(), stress)
+		}
+
+		// Dump some more information on failure.
+		if t.Failed() && onBuilder {
+			// Dump directly to the test log on the builder, since this
+			// data is critical for debugging and this is the only way
+			// we can currently make sure it's retained.
+			t.Log("found bad trace; dumping to test log...")
+			s := dumpTraceToText(t, tb)
+			if onOldBuilder && len(s) > 1<<20+512<<10 {
+				// The old build infrastructure truncates logs at ~2 MiB.
+				// Let's assume we're the only failure and give ourselves
+				// up to 1.5 MiB to dump the trace.
+				//
+				// TODO(mknyszek): Remove this when we've migrated off of
+				// the old infrastructure.
+				t.Logf("text trace too large to dump (%d bytes)", len(s))
+			} else {
+				t.Log(s)
+			}
+		} else if t.Failed() || *dumpTraces {
+			// We asked to dump the trace or failed. Write the trace to a file.
+			t.Logf("wrote trace to file: %s", dumpTraceToFile(t, testName, stress, tb))
+		}
+	}
+	t.Run("Default", func(t *testing.T) {
+		runTest(t, false)
+	})
+	t.Run("Stress", func(t *testing.T) {
+		if testing.Short() {
+			t.Skip("skipping trace reader stress tests in short mode")
+		}
+		runTest(t, true)
+	})
+}
diff --git a/src/internal/trace/v2/value.go b/src/internal/trace/v2/value.go
new file mode 100644
index 0000000..bd2cba7
--- /dev/null
+++ b/src/internal/trace/v2/value.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import "fmt"
+
+// Value is a dynamically-typed value obtained from a trace.
+type Value struct {
+	kind   ValueKind
+	scalar uint64
+}
+
+// ValueKind is the type of a dynamically-typed value from a trace.
+type ValueKind uint8
+
+const (
+	ValueBad ValueKind = iota
+	ValueUint64
+)
+
+// Kind returns the ValueKind of the value.
+//
+// It represents the underlying structure of the value.
+//
+// New ValueKinds may be added in the future. Users of this type must be robust
+// to that possibility.
+func (v Value) Kind() ValueKind {
+	return v.kind
+}
+
+// Uint64 returns the uint64 value for a MetricSampleUint64.
+//
+// Panics if this metric sample's Kind is not MetricSampleUint64.
+func (v Value) Uint64() uint64 {
+	if v.kind != ValueUint64 {
+		panic("Uint64 called on Value of a different Kind")
+	}
+	return v.scalar
+}
+
+// valueAsString produces a debug string value.
+//
+// This isn't just Value.String because we may want to use that to store
+// string values in the future.
+func valueAsString(v Value) string {
+	switch v.Kind() {
+	case ValueUint64:
+		return fmt.Sprintf("Uint64(%d)", v.scalar)
+	}
+	return "Bad"
+}
diff --git a/src/internal/trace/v2/version/version.go b/src/internal/trace/v2/version/version.go
new file mode 100644
index 0000000..28189f8
--- /dev/null
+++ b/src/internal/trace/v2/version/version.go
@@ -0,0 +1,57 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package version
+
+import (
+	"fmt"
+	"io"
+
+	"internal/trace/v2/event"
+	"internal/trace/v2/event/go122"
+)
+
+// Version represents the version of a trace file.
+type Version uint32
+
+const (
+	Go122   Version = 22
+	Current         = Go122
+)
+
+var versions = map[Version][]event.Spec{
+	Go122: go122.Specs(),
+}
+
+// Specs returns the set of event.Specs for this version.
+func (v Version) Specs() []event.Spec {
+	return versions[v]
+}
+
+func (v Version) Valid() bool {
+	_, ok := versions[v]
+	return ok
+}
+
+// headerFmt is the format of the header of all Go execution traces.
+const headerFmt = "go 1.%d trace\x00\x00\x00"
+
+// ReadHeader reads the version of the trace out of the trace file's
+// header, whose prefix must be present in v.
+func ReadHeader(r io.Reader) (Version, error) {
+	var v Version
+	_, err := fmt.Fscanf(r, headerFmt, &v)
+	if err != nil {
+		return v, fmt.Errorf("bad file format: not a Go execution trace?")
+	}
+	if !v.Valid() {
+		return v, fmt.Errorf("unknown or unsupported trace version go 1.%d", v)
+	}
+	return v, nil
+}
+
+// WriteHeader writes a header for a trace version v to w.
+func WriteHeader(w io.Writer, v Version) (int, error) {
+	return fmt.Fprintf(w, headerFmt, v)
+}
diff --git a/src/internal/types/errors/codes.go b/src/internal/types/errors/codes.go
index 62358c7..cae688f 100644
--- a/src/internal/types/errors/codes.go
+++ b/src/internal/types/errors/codes.go
@@ -1004,12 +1004,12 @@
 	//  }
 	InvalidIterVar
 
-	// InvalidRangeExpr occurs when the type of a range expression is not array,
-	// slice, string, map, or channel.
+	// InvalidRangeExpr occurs when the type of a range expression is not
+	// a valid type for use with a range loop.
 	//
 	// Example:
-	//  func f(i int) {
-	//  	for j := range i {
+	//  func f(f float64) {
+	//  	for j := range f {
 	//  		println(j)
 	//  	}
 	//  }
diff --git a/src/internal/types/testdata/check/builtins0.go b/src/internal/types/testdata/check/builtins0.go
index ed4769e..12d8fbf 100644
--- a/src/internal/types/testdata/check/builtins0.go
+++ b/src/internal/types/testdata/check/builtins0.go
@@ -792,16 +792,16 @@
 type S3 struct { // offset
 	a int64  //  0
 	b int32  //  8
-}                // 12
+}                // 16
 
 type S4 struct { // offset
 	S3       //  0
 	int32    // 12
-}                // 16
+}                // 24
 
 type S5 struct {   // offset
 	a [3]int32 //  0
-	b int32    // 12
+	b int32    // 16
 }                  // 16
 
 func (S2) m() {}
@@ -936,16 +936,16 @@
 	assert(unsafe.Sizeof(y2) == 8)
 
 	var y3 S3
-	assert(unsafe.Sizeof(y3) == 12)
+	assert(unsafe.Sizeof(y3) == 16)
 
 	var y4 S4
-	assert(unsafe.Sizeof(y4) == 16)
+	assert(unsafe.Sizeof(y4) == 24)
 
 	var y5 S5
 	assert(unsafe.Sizeof(y5) == 16)
 
 	var a3 [10]S3
-	assert(unsafe.Sizeof(a3) == 156)
+	assert(unsafe.Sizeof(a3) == 160)
 
 	// test case for issue 5670
 	type T struct {
diff --git a/src/internal/types/testdata/check/const1.go b/src/internal/types/testdata/check/const1.go
index c912801..80dde1a 100644
--- a/src/internal/types/testdata/check/const1.go
+++ b/src/internal/types/testdata/check/const1.go
@@ -75,10 +75,10 @@
 	_ int8 = maxInt8 /* ERROR "overflows" */ + 1
 	_ int8 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = int8(minInt8 /* ERROR "cannot convert" */ - 1)
+	_ = int8(minInt8 /* ERROR "overflows" */ - 1)
 	_ = int8(minInt8)
 	_ = int8(maxInt8)
-	_ = int8(maxInt8 /* ERROR "cannot convert" */ + 1)
+	_ = int8(maxInt8 /* ERROR "overflows" */ + 1)
 	_ = int8(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -89,10 +89,10 @@
 	_ int16 = maxInt16 /* ERROR "overflows" */ + 1
 	_ int16 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = int16(minInt16 /* ERROR "cannot convert" */ - 1)
+	_ = int16(minInt16 /* ERROR "overflows" */ - 1)
 	_ = int16(minInt16)
 	_ = int16(maxInt16)
-	_ = int16(maxInt16 /* ERROR "cannot convert" */ + 1)
+	_ = int16(maxInt16 /* ERROR "overflows" */ + 1)
 	_ = int16(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -103,10 +103,10 @@
 	_ int32 = maxInt32 /* ERROR "overflows" */ + 1
 	_ int32 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = int32(minInt32 /* ERROR "cannot convert" */ - 1)
+	_ = int32(minInt32 /* ERROR "overflows" */ - 1)
 	_ = int32(minInt32)
 	_ = int32(maxInt32)
-	_ = int32(maxInt32 /* ERROR "cannot convert" */ + 1)
+	_ = int32(maxInt32 /* ERROR "overflows" */ + 1)
 	_ = int32(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -117,10 +117,10 @@
 	_ int64 = maxInt64 /* ERROR "overflows" */ + 1
 	_ int64 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = int64(minInt64 /* ERROR "cannot convert" */ - 1)
+	_ = int64(minInt64 /* ERROR "overflows" */ - 1)
 	_ = int64(minInt64)
 	_ = int64(maxInt64)
-	_ = int64(maxInt64 /* ERROR "cannot convert" */ + 1)
+	_ = int64(maxInt64 /* ERROR "overflows" */ + 1)
 	_ = int64(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -131,10 +131,10 @@
 	_ int = maxInt /* ERROR "overflows" */ + 1
 	_ int = smallestFloat64 /* ERROR "truncated" */
 
-	_ = int(minInt /* ERROR "cannot convert" */ - 1)
+	_ = int(minInt /* ERROR "overflows" */ - 1)
 	_ = int(minInt)
 	_ = int(maxInt)
-	_ = int(maxInt /* ERROR "cannot convert" */ + 1)
+	_ = int(maxInt /* ERROR "overflows" */ + 1)
 	_ = int(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -145,10 +145,10 @@
 	_ uint8 = maxUint8 /* ERROR "overflows" */ + 1
 	_ uint8 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = uint8(0 /* ERROR "cannot convert" */ - 1)
+	_ = uint8(0 /* ERROR "overflows" */ - 1)
 	_ = uint8(0)
 	_ = uint8(maxUint8)
-	_ = uint8(maxUint8 /* ERROR "cannot convert" */ + 1)
+	_ = uint8(maxUint8 /* ERROR "overflows" */ + 1)
 	_ = uint8(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -159,10 +159,10 @@
 	_ uint16 = maxUint16 /* ERROR "overflows" */ + 1
 	_ uint16 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = uint16(0 /* ERROR "cannot convert" */ - 1)
+	_ = uint16(0 /* ERROR "overflows" */ - 1)
 	_ = uint16(0)
 	_ = uint16(maxUint16)
-	_ = uint16(maxUint16 /* ERROR "cannot convert" */ + 1)
+	_ = uint16(maxUint16 /* ERROR "overflows" */ + 1)
 	_ = uint16(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -173,10 +173,10 @@
 	_ uint32 = maxUint32 /* ERROR "overflows" */ + 1
 	_ uint32 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = uint32(0 /* ERROR "cannot convert" */ - 1)
+	_ = uint32(0 /* ERROR "overflows" */ - 1)
 	_ = uint32(0)
 	_ = uint32(maxUint32)
-	_ = uint32(maxUint32 /* ERROR "cannot convert" */ + 1)
+	_ = uint32(maxUint32 /* ERROR "overflows" */ + 1)
 	_ = uint32(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -187,10 +187,10 @@
 	_ uint64 = maxUint64 /* ERROR "overflows" */ + 1
 	_ uint64 = smallestFloat64 /* ERROR "truncated" */
 
-	_ = uint64(0 /* ERROR "cannot convert" */ - 1)
+	_ = uint64(0 /* ERROR "overflows" */ - 1)
 	_ = uint64(0)
 	_ = uint64(maxUint64)
-	_ = uint64(maxUint64 /* ERROR "cannot convert" */ + 1)
+	_ = uint64(maxUint64 /* ERROR "overflows" */ + 1)
 	_ = uint64(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -201,10 +201,10 @@
 	_ uint = maxUint /* ERROR "overflows" */ + 1
 	_ uint = smallestFloat64 /* ERROR "truncated" */
 
-	_ = uint(0 /* ERROR "cannot convert" */ - 1)
+	_ = uint(0 /* ERROR "overflows" */ - 1)
 	_ = uint(0)
 	_ = uint(maxUint)
-	_ = uint(maxUint /* ERROR "cannot convert" */ + 1)
+	_ = uint(maxUint /* ERROR "overflows" */ + 1)
 	_ = uint(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
@@ -215,10 +215,10 @@
 	_ uintptr = maxUintptr /* ERROR "overflows" */ + 1
 	_ uintptr = smallestFloat64 /* ERROR "truncated" */
 
-	_ = uintptr(0 /* ERROR "cannot convert" */ - 1)
+	_ = uintptr(0 /* ERROR "overflows" */ - 1)
 	_ = uintptr(0)
 	_ = uintptr(maxUintptr)
-	_ = uintptr(maxUintptr /* ERROR "cannot convert" */ + 1)
+	_ = uintptr(maxUintptr /* ERROR "overflows" */ + 1)
 	_ = uintptr(smallestFloat64 /* ERROR "cannot convert" */)
 )
 
diff --git a/src/internal/types/testdata/check/constdecl.go b/src/internal/types/testdata/check/constdecl.go
index e7b871b..9ace419 100644
--- a/src/internal/types/testdata/check/constdecl.go
+++ b/src/internal/types/testdata/check/constdecl.go
@@ -125,7 +125,7 @@
 	ok = byte(iota + 253)
 	bad
 	barn
-	bard // ERROR "cannot convert"
+	bard // ERROR "overflows"
 )
 
 const (
diff --git a/src/internal/types/testdata/check/cycles5.go b/src/internal/types/testdata/check/cycles5.go
index a863aa8..a614505 100644
--- a/src/internal/types/testdata/check/cycles5.go
+++ b/src/internal/types/testdata/check/cycles5.go
@@ -1,3 +1,5 @@
+// -gotypesalias=0
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/src/internal/types/testdata/check/cycles5a.go b/src/internal/types/testdata/check/cycles5a.go
new file mode 100644
index 0000000..ed5853e
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles5a.go
@@ -0,0 +1,202 @@
+// -gotypesalias=1
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// test case from issue #18395
+
+type (
+	A interface { B }
+	B interface { C }
+	C interface { D; F() A }
+	D interface { G() B }
+)
+
+var _ = A(nil).G // G must be found
+
+
+// test case from issue #21804
+
+type sourceBridge interface {
+	listVersions() ([]Version, error)
+}
+
+type Constraint interface {
+	copyTo(*ConstraintMsg)
+}
+
+type ConstraintMsg struct{}
+
+func (m *ConstraintMsg) asUnpairedVersion() UnpairedVersion {
+	return nil
+}
+
+type Version interface {
+	Constraint
+}
+
+type UnpairedVersion interface {
+	Version
+}
+
+var _ Constraint = UnpairedVersion(nil)
+
+
+// derived test case from issue #21804
+
+type (
+	_ interface{ m(B1) }
+	A1 interface{ a(D1) }
+	B1 interface{ A1 }
+	C1 interface{ B1 }
+	D1 interface{ C1 }
+)
+
+var _ A1 = C1(nil)
+
+
+// derived test case from issue #22701
+
+func F(x I4) interface{} {
+	return x.Method()
+}
+
+type Unused interface {
+	RefersToI1(a I1)
+}
+
+type I1 interface {
+	I2
+	I3
+}
+
+type I2 interface {
+	RefersToI4() I4
+}
+
+type I3 interface {
+	Method() interface{}
+}
+
+type I4 interface {
+	I1
+}
+
+
+// check embedding of error interface
+
+type Error interface{ error }
+
+var err Error
+var _ = err.Error()
+
+
+// more esoteric cases
+
+type (
+	T1 interface { T2 }
+	T2 /* ERROR "invalid recursive type" */ T2
+)
+
+type (
+	T3 interface { T4 }
+	T4 /* ERROR "invalid recursive type" */ T5
+	T5 = T6
+	T6 = T7
+	T7 = T4
+)
+
+
+// arbitrary code may appear inside an interface
+
+const n = unsafe.Sizeof(func(){})
+
+type I interface {
+	m([unsafe.Sizeof(func() { I.m(nil, [n]byte{}) })]byte)
+}
+
+
+// test cases for varias alias cycles
+
+type T10 /* ERROR "invalid recursive type" */ = *T10                 // issue #25141
+type T11 /* ERROR "invalid recursive type" */ = interface{ f(T11) }  // issue #23139
+
+// issue #18640
+type (
+	aa = bb
+	bb struct {
+		*aa
+	}
+)
+
+type (
+	a struct{ *b }
+	b = c
+	c struct{ *b }
+)
+
+// issue #24939
+type (
+	_ interface {
+		M(P)
+	}
+
+	M interface {
+		F() P
+	}
+
+	P = interface {
+		I() M
+	}
+)
+
+// issue #8699
+type T12 /* ERROR "invalid recursive type" */ [len(a12)]int
+var a12 = makeArray()
+func makeArray() (res T12) { return }
+
+// issue #20770
+var r /* ERROR "invalid cycle in declaration of r" */ = newReader()
+func newReader() r
+
+// variations of the theme of #8699 and #20770
+var arr /* ERROR "cycle" */ = f()
+func f() [len(arr)]int
+
+// issue #25790
+func ff(ff /* ERROR "not a type" */ )
+func gg((gg /* ERROR "not a type" */ ))
+
+type T13 /* ERROR "invalid recursive type T13" */ [len(b13)]int
+var b13 T13
+
+func g1() [unsafe.Sizeof(g1)]int
+func g2() [unsafe.Sizeof(x2)]int
+var x2 = g2
+
+// verify that we get the correct sizes for the functions above
+// (note: assert is statically evaluated in go/types test mode)
+func init() {
+	assert(unsafe.Sizeof(g1) == 8)
+	assert(unsafe.Sizeof(x2) == 8)
+}
+
+func h() [h /* ERROR "no value" */ ()[0]]int { panic(0) }
+
+var c14 /* ERROR "cycle" */ T14
+type T14 [uintptr(unsafe.Sizeof(&c14))]byte
+
+// issue #34333
+type T15 /* ERROR "invalid recursive type T15" */ struct {
+	f func() T16
+	b T16
+}
+
+type T16 struct {
+	T15
+}
\ No newline at end of file
diff --git a/src/internal/types/testdata/check/decls4.go b/src/internal/types/testdata/check/decls4.go
index c47a68d..7c06390 100644
--- a/src/internal/types/testdata/check/decls4.go
+++ b/src/internal/types/testdata/check/decls4.go
@@ -59,7 +59,7 @@
 )
 
 // alias receiver types
-func (Ai /* ERROR "cannot define new methods on non-local type int" */) m1() {}
+func (Ai /* ERRORx "cannot define new methods on non-local type (int|Ai)" */) m1() {}
 func (T0) m1() {}
 func (A0) m1 /* ERROR "already declared" */ () {}
 func (A0) m2 () {}
@@ -115,8 +115,8 @@
 	B2 = int
 )
 
-func (B0 /* ERROR "cannot define new methods on non-local type int" */ ) m() {}
-func (B1 /* ERROR "cannot define new methods on non-local type int" */ ) n() {}
+func (B0 /* ERRORx "cannot define new methods on non-local type (int|B)" */ ) m() {}
+func (B1 /* ERRORx "cannot define new methods on non-local type (int|B)" */ ) n() {}
 
 // cycles
 type (
diff --git a/src/internal/types/testdata/check/expr0.go b/src/internal/types/testdata/check/expr0.go
index eba991e..26dc589 100644
--- a/src/internal/types/testdata/check/expr0.go
+++ b/src/internal/types/testdata/check/expr0.go
@@ -28,7 +28,7 @@
 
 	// byte
 	_ = byte(0)
-	_ = byte(- /* ERROR "cannot convert" */ 1)
+	_ = byte(- /* ERROR "overflows" */ 1)
 	_ = - /* ERROR "-byte(1) (constant -1 of type byte) overflows byte" */ byte(1) // test for issue 11367
 	_ = byte /* ERROR "overflows byte" */ (0) - byte(1)
 	_ = ~ /* ERROR "cannot use ~ outside of interface or type constraint (use ^ for bitwise complement)" */ byte(0)
diff --git a/src/internal/types/testdata/check/issues0.go b/src/internal/types/testdata/check/issues0.go
index 6039df9..2f4d266 100644
--- a/src/internal/types/testdata/check/issues0.go
+++ b/src/internal/types/testdata/check/issues0.go
@@ -282,7 +282,7 @@
 }
 
 type issue25301c interface {
-	notE // ERROR "non-interface type struct{}"
+	notE // ERRORx "non-interface type (struct{}|notE)"
 }
 
 type notE = struct{}
diff --git a/src/internal/types/testdata/check/stmt0.go b/src/internal/types/testdata/check/stmt0.go
index 5232285..b61f1c7 100644
--- a/src/internal/types/testdata/check/stmt0.go
+++ b/src/internal/types/testdata/check/stmt0.go
@@ -805,7 +805,6 @@
 
 func rangeloops1() {
 	var (
-		x int
 		a [10]float32
 		b []string
 		p *[10]complex128
@@ -815,11 +814,12 @@
 		c chan int
 		sc chan<- int
 		rc <-chan int
+		xs struct{}
 	)
 
-	for range x /* ERROR "cannot range over" */ {}
-	for _ = range x /* ERROR "cannot range over" */ {}
-	for i := range x /* ERROR "cannot range over" */ {}
+	for range xs /* ERROR "cannot range over" */ {}
+	for _ = range xs /* ERROR "cannot range over" */ {}
+	for i := range xs /* ERROR "cannot range over" */ { _ = i }
 
 	for range a {}
 	for i := range a {
@@ -953,10 +953,10 @@
 	for y /* ERROR "declared and not used" */ := range "" {
 		_ = "" /* ERROR "mismatched types untyped string and untyped int" */ + 1
 	}
-	for range 1 /* ERROR "cannot range over 1" */ {
+	for range 1.5 /* ERROR "cannot range over 1.5" */ {
 		_ = "" /* ERROR "mismatched types untyped string and untyped int" */ + 1
 	}
-	for y := range 1 /* ERROR "cannot range over 1" */ {
+	for y := range 1.5 /* ERROR "cannot range over 1.5" */ {
 		_ = "" /* ERROR "mismatched types untyped string and untyped int" */ + 1
 	}
 }
diff --git a/src/internal/types/testdata/check/typeinference.go b/src/internal/types/testdata/check/typeinference.go
index 0478d93..8dac938 100644
--- a/src/internal/types/testdata/check/typeinference.go
+++ b/src/internal/types/testdata/check/typeinference.go
@@ -8,8 +8,9 @@
 
 // basic inference
 type Tb[P ~*Q, Q any] int
+
 func _() {
-	var x Tb /* ERROR "got 1 arguments" */ [*int]
+	var x Tb /* ERROR "not enough type arguments for type Tb: have 1, want 2" */ [*int]
 	var y Tb[*int, int]
 	x = y /* ERRORx `cannot use y .* in assignment` */
 	_ = x
@@ -17,8 +18,9 @@
 
 // recursive inference
 type Tr[A any, B *C, C *D, D *A] int
+
 func _() {
-	var x Tr /* ERROR "got 1 arguments" */ [string]
+	var x Tr /* ERROR "not enough type arguments for type Tr: have 1, want 4" */ [string]
 	var y Tr[string, ***string, **string, *string]
 	var z Tr[int, ***int, **int, *int]
 	x = y /* ERRORx `cannot use y .* in assignment` */
@@ -28,22 +30,30 @@
 
 // other patterns of inference
 type To0[A any, B []A] int
-type To1[A any, B struct{a A}] int
+type To1[A any, B struct{ a A }] int
 type To2[A any, B [][]A] int
 type To3[A any, B [3]*A] int
-type To4[A any, B any, C struct{a A; b B}] int
+type To4[A any, B any, C struct {
+	a A
+	b B
+}] int
+
 func _() {
-	var _ To0 /* ERROR "got 1 arguments" */ [int]
-	var _ To1 /* ERROR "got 1 arguments" */ [int]
-	var _ To2 /* ERROR "got 1 arguments" */ [int]
-	var _ To3 /* ERROR "got 1 arguments" */ [int]
-	var _ To4 /* ERROR "got 2 arguments" */ [int, string]
+	var _ To0 /* ERROR "not enough type arguments for type To0: have 1, want 2" */ [int]
+	var _ To1 /* ERROR "not enough type arguments for type To1: have 1, want 2" */ [int]
+	var _ To2 /* ERROR "not enough type arguments for type To2: have 1, want 2" */ [int]
+	var _ To3 /* ERROR "not enough type arguments for type To3: have 1, want 2" */ [int]
+	var _ To4 /* ERROR "not enough type arguments for type To4: have 2, want 3" */ [int, string]
 }
 
 // failed inference
 type Tf0[A, B any] int
-type Tf1[A any, B ~struct{a A; c C}, C any] int
+type Tf1[A any, B ~struct {
+	a A
+	c C
+}, C any] int
+
 func _() {
-	var _ Tf0 /* ERROR "got 1 arguments but 2 type parameters" */ [int]
-	var _ Tf1 /* ERROR "got 1 arguments but 3 type parameters" */ [int]
+	var _ Tf0 /* ERROR "not enough type arguments for type Tf0: have 1, want 2" */ [int]
+	var _ Tf1 /* ERROR "not enough type arguments for type Tf1: have 1, want 3" */ [int]
 }
diff --git a/src/internal/types/testdata/check/typeinst0.go b/src/internal/types/testdata/check/typeinst0.go
index bbcdaec..155f1ef 100644
--- a/src/internal/types/testdata/check/typeinst0.go
+++ b/src/internal/types/testdata/check/typeinst0.go
@@ -42,7 +42,7 @@
 // TODO(gri) better error messages
 type _ T1[] // ERROR "expected type argument list"
 type _ T1[x /* ERROR "not a type" */ ]
-type _ T1 /* ERROR "got 2 arguments but 1 type parameters" */ [int, float32]
+type _ T1 /* ERROR "too many type arguments for type T1: have 2, want 1" */ [int, float32]
 
 var _ T2[int] = T2[int]{}
 
diff --git a/src/internal/types/testdata/examples/inference2.go b/src/internal/types/testdata/examples/inference2.go
index 6097c2b..91f9df1 100644
--- a/src/internal/types/testdata/examples/inference2.go
+++ b/src/internal/types/testdata/examples/inference2.go
@@ -27,9 +27,9 @@
 	_  func(int) int = f3[int]
 
 	v6 func(int, int)     = f4
-	v7 func(int, string)  = f4 // ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)"
+	v7 func(int, string)  = f4 // ERROR "inferred type func(int, int) for func(P, P) does not match type func(int, string) of v7"
 	v8 func(int) []int    = f5
-	v9 func(string) []int = f5 // ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P"
+	v9 func(string) []int = f5 // ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of v9"
 
 	_, _ func(int) = f1, f1
 	_, _ func(int) = f1, f2 // ERROR "cannot infer P"
@@ -49,9 +49,13 @@
 	v5 = f3[int]
 
 	v6 = f4
-	v7 = f4 // ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)"
+	v7 = f4 // ERROR "inferred type func(int, int) for func(P, P) does not match type func(int, string) of v7"
 	v8 = f5
-	v9 = f5 // ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P"
+	v9 = f5 // ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of v9"
+
+	// non-trivial LHS
+	var a [2]func(string) []int
+	a[0] = f5 // ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of a[0]"
 }
 
 // Return statements
@@ -62,11 +66,11 @@
 
 func _() func(int, int) { return f4 }
 func _() func(int, string) {
-	return f4 /* ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)" */
+	return f4 /* ERROR "inferred type func(int, int) for func(P, P) does not match type func(int, string) of result variable" */
 }
 func _() func(int) []int { return f5 }
 func _() func(string) []int {
-	return f5 /* ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P" */
+	return f5 /* ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of result variable" */
 }
 
 func _() (_, _ func(int)) { return f1, f1 }
diff --git a/src/internal/types/testdata/fixedbugs/issue25838.go b/src/internal/types/testdata/fixedbugs/issue25838.go
index adbd138..b0ea98e 100644
--- a/src/internal/types/testdata/fixedbugs/issue25838.go
+++ b/src/internal/types/testdata/fixedbugs/issue25838.go
@@ -24,3 +24,17 @@
 	P = *T
 	T P
 )
+
+func newA(c funcAlias) A {
+	return A{c: c}
+}
+
+type B struct {
+	a *A
+}
+
+type A struct {
+	c funcAlias
+}
+
+type funcAlias = func(B)
diff --git a/src/internal/types/testdata/fixedbugs/issue28251.go b/src/internal/types/testdata/fixedbugs/issue28251.go
index 77fd369..71e727e 100644
--- a/src/internal/types/testdata/fixedbugs/issue28251.go
+++ b/src/internal/types/testdata/fixedbugs/issue28251.go
@@ -60,6 +60,6 @@
         T11 = T
 )
 
-func (T9 /* ERROR "invalid receiver type **T" */ ) m9() {}
+func (T9 /* ERRORx `invalid receiver type (\*\*T|T9)` */ ) m9() {}
 func _() { (T{}).m9 /* ERROR "has no field or method m9" */ () }
 func _() { (&T{}).m9 /* ERROR "has no field or method m9" */ () }
diff --git a/src/internal/types/testdata/fixedbugs/issue46461.go b/src/internal/types/testdata/fixedbugs/issue46461.go
index ae70048..e823013 100644
--- a/src/internal/types/testdata/fixedbugs/issue46461.go
+++ b/src/internal/types/testdata/fixedbugs/issue46461.go
@@ -1,3 +1,5 @@
+// -gotypesalias=0
+
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/src/internal/types/testdata/fixedbugs/issue46461a.go b/src/internal/types/testdata/fixedbugs/issue46461a.go
new file mode 100644
index 0000000..e4b8e1a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46461a.go
@@ -0,0 +1,23 @@
+// -gotypesalias=1
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// test case 1
+type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int
+
+type X int
+
+func (X) M() T[X] { return 0 }
+
+// test case 2
+type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{}
+
+// test case 3
+// TODO(gri) should report error only once
+type A2 /* ERROR "invalid recursive type" */ /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] }
+
+type I interface{ A2[I]; M() A2[I] }
diff --git a/src/internal/types/testdata/fixedbugs/issue47968.go b/src/internal/types/testdata/fixedbugs/issue47968.go
index c516eee..83a1786 100644
--- a/src/internal/types/testdata/fixedbugs/issue47968.go
+++ b/src/internal/types/testdata/fixedbugs/issue47968.go
@@ -14,8 +14,8 @@
 
 type A2 = T[int]
 
-func (A2 /* ERROR "cannot define new methods on instantiated type T[int]" */) m3()   {}
-func (_ /* ERROR "cannot define new methods on instantiated type T[int]" */ A2) m4() {}
+func (A2 /* ERRORx `cannot define new methods on instantiated type (T\[int\]|A2)` */) m3()   {}
+func (_ /* ERRORx `cannot define new methods on instantiated type (T\[int\]|A2)` */ A2) m4() {}
 
 func (T[int]) m5()                                     {} // int is the type parameter name, not an instantiation
 func (T[* /* ERROR "must be an identifier" */ int]) m6() {} // syntax error
diff --git a/src/internal/types/testdata/fixedbugs/issue49541.go b/src/internal/types/testdata/fixedbugs/issue49541.go
index da37311..665ed1d 100644
--- a/src/internal/types/testdata/fixedbugs/issue49541.go
+++ b/src/internal/types/testdata/fixedbugs/issue49541.go
@@ -13,7 +13,7 @@
 // TODO(gri): with type-type inference enabled we should only report one error
 // below. See issue #50588.
 
-func _[A any](s S /* ERROR "got 1 arguments but 2 type parameters" */ [A]) {
+func _[A any](s S /* ERROR "not enough type arguments for type S: have 1, want 2" */ [A]) {
 	// we should see no follow-on errors below
 	s.f = 1
 	s.m()
@@ -22,7 +22,7 @@
 // another test case from the issue
 
 func _() {
-	X /* ERROR "cannot infer Q" */ (Interface[*F /* ERROR "got 1 arguments but 2 type parameters" */ [string]](Impl{}))
+	X /* ERROR "cannot infer Q" */ (Interface[*F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [string]](Impl{}))
 }
 
 func X[Q Qer](fs Interface[Q]) {
diff --git a/src/internal/types/testdata/fixedbugs/issue50729b.go b/src/internal/types/testdata/fixedbugs/issue50729b.go
new file mode 100644
index 0000000..bc1f440
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50729b.go
@@ -0,0 +1,15 @@
+// -gotypesalias=1
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type d[T any] struct{}
+type (
+	b d[a]
+)
+
+type a = func(c)
+type c struct{ a }
diff --git a/src/internal/types/testdata/fixedbugs/issue50779.go b/src/internal/types/testdata/fixedbugs/issue50779.go
index 09ddf53..59c0f2d 100644
--- a/src/internal/types/testdata/fixedbugs/issue50779.go
+++ b/src/internal/types/testdata/fixedbugs/issue50779.go
@@ -1,3 +1,5 @@
+// -gotypesalias=0
+
 // Copyright 2022 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/src/internal/types/testdata/fixedbugs/issue50779a.go b/src/internal/types/testdata/fixedbugs/issue50779a.go
new file mode 100644
index 0000000..d0e9905
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50779a.go
@@ -0,0 +1,25 @@
+// -gotypesalias=1
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type AC interface {
+	C
+}
+
+type ST []int
+
+type R[S any, P any] struct{}
+
+type SR = R[SS, ST]
+
+type SS interface {
+	NSR(any) *SR
+}
+
+type C interface {
+	NSR(any) *SR
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50929.go b/src/internal/types/testdata/fixedbugs/issue50929.go
index 64c7cd6..a665e22 100644
--- a/src/internal/types/testdata/fixedbugs/issue50929.go
+++ b/src/internal/types/testdata/fixedbugs/issue50929.go
@@ -16,7 +16,7 @@
 
 func _() {
 	// TODO(gri) only report one error below (issue #50932)
-	var x F /* ERROR "got 1 arguments but 2 type parameters" */ [int]
+	var x F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [int]
 	G(x /* ERROR "does not match" */)
 }
 
@@ -46,9 +46,9 @@
 	fmt.Println(c)
 }
 
-func MMD[Rc RC /* ERROR "got 1 arguments" */ [RG], RG any, G any]() M /* ERROR "got 2 arguments" */ [Rc, RG] {
+func MMD[Rc RC /* ERROR "not enough type arguments for type RC: have 1, want 2" */ [RG], RG any, G any]() M /* ERROR "not enough type arguments for type" */ [Rc, RG] {
 
-	var nFn NFn /* ERROR "got 2 arguments" */ [Rc, RG]
+	var nFn NFn /* ERROR "not enough type arguments for type NFn: have 2, want 3" */ [Rc, RG]
 
 	var empty Rc
 	switch any(empty).(type) {
@@ -58,11 +58,11 @@
 		nFn = NSG /* ERROR "cannot use NSG[G]" */ [G]
 	}
 
-	return M /* ERROR "got 2 arguments" */ [Rc, RG]{
+	return M /* ERROR "not enough type arguments for type M: have 2, want 3" */ [Rc, RG]{
 		Fn: func(rc Rc) {
-			NC(nFn /* ERROR "does not match" */ )
+			NC(nFn /* ERROR "does not match" */)
 		},
 	}
 
-	return M /* ERROR "got 2 arguments" */ [Rc, RG]{}
+	return M /* ERROR "not enough type arguments for type M: have 2, want 3" */ [Rc, RG]{}
 }
diff --git a/src/internal/types/testdata/fixedbugs/issue51232.go b/src/internal/types/testdata/fixedbugs/issue51232.go
index 27693a3..c5832d2 100644
--- a/src/internal/types/testdata/fixedbugs/issue51232.go
+++ b/src/internal/types/testdata/fixedbugs/issue51232.go
@@ -11,20 +11,20 @@
 type Fn[RCT RC[RG], RG any] func(RCT)
 
 type F[RCT RC[RG], RG any] interface {
-	Fn() Fn /* ERROR "got 1 arguments" */ [RCT]
+	Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT]
 }
 
 type concreteF[RCT RC[RG], RG any] struct {
-	makeFn func() Fn /* ERROR "got 1 arguments" */ [RCT]
+	makeFn func() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT]
 }
 
-func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "got 1 arguments" */ [RCT] {
+func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] {
 	return c.makeFn()
 }
 
-func NewConcrete[RCT RC[RG], RG any](Rc RCT) F /* ERROR "got 1 arguments" */ [RCT] {
+func NewConcrete[RCT RC[RG], RG any](Rc RCT) F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [RCT] {
 	// TODO(rfindley): eliminate the duplicate error below.
-	return & /* ERRORx `cannot use .* as F\[RCT\]` */ concreteF /* ERROR "got 1 arguments" */ [RCT]{
+	return & /* ERRORx `cannot use .* as F\[RCT\]` */ concreteF /* ERROR "not enough type arguments for type concreteF: have 1, want 2" */ [RCT]{
 		makeFn: nil,
 	}
 }
diff --git a/src/internal/types/testdata/fixedbugs/issue51233.go b/src/internal/types/testdata/fixedbugs/issue51233.go
index e2f97fc..d96d3d1 100644
--- a/src/internal/types/testdata/fixedbugs/issue51233.go
+++ b/src/internal/types/testdata/fixedbugs/issue51233.go
@@ -12,16 +12,16 @@
 
 type Fn[RCT RC[RG], RG any] func(RCT)
 
-type FFn[RCT RC[RG], RG any] func() Fn /* ERROR "got 1 arguments" */ [RCT]
+type FFn[RCT RC[RG], RG any] func() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT]
 
 type F[RCT RC[RG], RG any] interface {
-	Fn() Fn /* ERROR "got 1 arguments" */ [RCT]
+	Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT]
 }
 
 type concreteF[RCT RC[RG], RG any] struct {
-	makeFn FFn /* ERROR "got 1 arguments" */ [RCT]
+	makeFn FFn /* ERROR "not enough type arguments for type FFn: have 1, want 2" */ [RCT]
 }
 
-func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "got 1 arguments" */ [RCT] {
+func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] {
 	return c.makeFn()
 }
diff --git a/src/internal/types/testdata/fixedbugs/issue51339.go b/src/internal/types/testdata/fixedbugs/issue51339.go
index 65c2134..fd10daa 100644
--- a/src/internal/types/testdata/fixedbugs/issue51339.go
+++ b/src/internal/types/testdata/fixedbugs/issue51339.go
@@ -9,10 +9,12 @@
 
 type T[P any, B *P] struct{}
 
-func (T /* ERROR "cannot use generic type" */ ) m0() {}
+func (T /* ERROR "cannot use generic type" */) m0() {}
 
 // TODO(rfindley): eliminate the duplicate errors here.
-func (/* ERROR "got 1 type parameter, but receiver base type declares 2" */ T /* ERROR "got 1 arguments but 2 type parameters" */ [_]) m1() {}
+func ( /* ERROR "got 1 type parameter, but receiver base type declares 2" */ T /* ERROR "not enough type arguments for type" */ [_]) m1() {
+}
 func (T[_, _]) m2() {}
+
 // TODO(gri) this error is unfortunate (issue #51343)
-func (T /* ERROR "got 3 arguments but 2 type parameters" */ [_, _, _]) m3() {}
+func (T /* ERROR "too many type arguments for type" */ [_, _, _]) m3() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue60434.go b/src/internal/types/testdata/fixedbugs/issue60434.go
new file mode 100644
index 0000000..e1d7652
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60434.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that there are no type inference errors
+// if function arguments are invalid.
+
+package p
+
+func f[S any](S) {}
+
+var s struct{ x int }
+
+func _() {
+	f(s.y /* ERROR "s.y undefined" */)
+	f(1 /* ERROR "cannot convert 1" */ / s)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60688.go b/src/internal/types/testdata/fixedbugs/issue60688.go
index 38d90ee..61b9f91 100644
--- a/src/internal/types/testdata/fixedbugs/issue60688.go
+++ b/src/internal/types/testdata/fixedbugs/issue60688.go
@@ -13,4 +13,4 @@
 // be identical to match).
 // The result is an error from type inference, rather than an
 // error from an assignment mismatch.
-var f func(int, String) = g // ERROR "type func(int, String) of variable in assignment does not match inferred type func(int, string) for func(P, string)"
+var f func(int, String) = g // ERROR "inferred type func(int, string) for func(P, string) does not match type func(int, String) of f"
diff --git a/src/internal/types/testdata/fixedbugs/issue60747.go b/src/internal/types/testdata/fixedbugs/issue60747.go
new file mode 100644
index 0000000..6587a4e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60747.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P any](P) P { panic(0) }
+
+var v func(string) int = f // ERROR "inferred type func(string) string for func(P) P does not match type func(string) int of v"
+
+func _() func(string) int {
+	return f // ERROR "inferred type func(string) string for func(P) P does not match type func(string) int of result variable"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue61685.go b/src/internal/types/testdata/fixedbugs/issue61685.go
new file mode 100644
index 0000000..b88b222
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue61685.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T any](x any) {
+	f /* ERROR "T (type I[T]) does not satisfy I[T] (wrong type for method m)" */ (x.(I[T]))
+}
+
+func f[T I[T]](T) {}
+
+type I[T any] interface {
+	m(T)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue61822.go b/src/internal/types/testdata/fixedbugs/issue61822.go
new file mode 100644
index 0000000..0a91ebb
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue61822.go
@@ -0,0 +1,19 @@
+// -lang=go1.19
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+
+package p
+
+type I[P any] interface {
+	~string | ~int
+	Error() P
+}
+
+func _[P I[string]]() {
+	var x P
+	var _ error = x
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue63563.go b/src/internal/types/testdata/fixedbugs/issue63563.go
new file mode 100644
index 0000000..b813485
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue63563.go
@@ -0,0 +1,37 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var (
+	_ = int8(1 /* ERROR "constant 255 overflows int8" */ <<8 - 1)
+	_ = int16(1 /* ERROR "constant 65535 overflows int16" */ <<16 - 1)
+	_ = int32(1 /* ERROR "constant 4294967295 overflows int32" */ <<32 - 1)
+	_ = int64(1 /* ERROR "constant 18446744073709551615 overflows int64" */ <<64 - 1)
+
+	_ = uint8(1 /* ERROR "constant 256 overflows uint8" */ << 8)
+	_ = uint16(1 /* ERROR "constant 65536 overflows uint16" */ << 16)
+	_ = uint32(1 /* ERROR "constant 4294967296 overflows uint32" */ << 32)
+	_ = uint64(1 /* ERROR "constant 18446744073709551616 overflows uint64" */ << 64)
+)
+
+func _[P int8 | uint8]() {
+	_ = P(0)
+	_ = P(1 /* ERROR "constant 255 overflows int8 (in P)" */ <<8 - 1)
+}
+
+func _[P int16 | uint16]() {
+	_ = P(0)
+	_ = P(1 /* ERROR "constant 65535 overflows int16 (in P)" */ <<16 - 1)
+}
+
+func _[P int32 | uint32]() {
+	_ = P(0)
+	_ = P(1 /* ERROR "constant 4294967295 overflows int32 (in P)" */ <<32 - 1)
+}
+
+func _[P int64 | uint64]() {
+	_ = P(0)
+	_ = P(1 /* ERROR "constant 18446744073709551615 overflows int64 (in P)" */ <<64 - 1)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue64406.go b/src/internal/types/testdata/fixedbugs/issue64406.go
new file mode 100644
index 0000000..54b959d
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue64406.go
@@ -0,0 +1,23 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue64406
+
+import (
+	"unsafe"
+)
+
+func sliceData[E any, S ~[]E](s S) *E {
+	return unsafe.SliceData(s)
+}
+
+func slice[E any, S ~*E](s S) []E {
+	return unsafe.Slice(s, 0)
+}
+
+func f() {
+	s := []uint32{0}
+	_ = sliceData(s)
+	_ = slice(&s)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue64704.go b/src/internal/types/testdata/fixedbugs/issue64704.go
new file mode 100644
index 0000000..c8e9056
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue64704.go
@@ -0,0 +1,12 @@
+// -lang=go1.21
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+	for range 10 /* ERROR "cannot range over 10 (untyped int constant): requires go1.22 or later" */ {
+	}
+}
diff --git a/src/internal/types/testdata/spec/assignability.go b/src/internal/types/testdata/spec/assignability.go
index 6670870..2ce9a4a 100644
--- a/src/internal/types/testdata/spec/assignability.go
+++ b/src/internal/types/testdata/spec/assignability.go
@@ -37,7 +37,7 @@
 // and at least one of V or T is not a named type."
 // (here a named type is a type with a name)
 func _[TP1, TP2 Interface](X1 TP1, X2 TP2) {
-	b = B // ERRORx `cannot use B .* as int value`
+	b = B // ERRORx `cannot use B .* as (int|_Basic.*) value`
 	a = A
 	l = L
 	s = S
@@ -134,7 +134,7 @@
 		_ TP0 = C // ERRORx `cannot use C .* as TP0 value`
 		_ TP1 = c
 		_ TP1 = C // ERRORx `cannot use C .* as TP1 value`
-		_ TP2 = c // ERRORx `.* cannot assign chan int to chan byte`
+		_ TP2 = c // ERRORx `.* cannot assign (chan int|_Chan.*) to chan byte`
 	)
 }
 
@@ -148,7 +148,7 @@
 	I = X0
 	c = X1
 	C = X1 // ERRORx `cannot use X1 .* as Chan value`
-	c = X2 // ERRORx `.* cannot assign chan byte \(in TP2\) to chan int`
+	c = X2 // ERRORx `.* cannot assign chan byte \(in TP2\) to (chan int|_Chan.*)`
 }
 
 // "x is the predeclared identifier nil and T is a pointer, function, slice, map, channel, or interface type"
diff --git a/src/internal/types/testdata/spec/range.go b/src/internal/types/testdata/spec/range.go
new file mode 100644
index 0000000..4ae270d
--- /dev/null
+++ b/src/internal/types/testdata/spec/range.go
@@ -0,0 +1,157 @@
+// -goexperiment=rangefunc
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type MyInt int32
+type MyBool bool
+type MyString string
+type MyFunc1 func(func(int) bool)
+type MyFunc2 func(int) bool
+type MyFunc3 func(MyFunc2)
+
+type T struct{}
+
+func (*T) PM() {}
+func (T) M()   {}
+
+func f1()                             {}
+func f2(func())                       {}
+func f4(func(int) bool)               {}
+func f5(func(int, string) bool)       {}
+func f7(func(int) MyBool)             {}
+func f8(func(MyInt, MyString) MyBool) {}
+
+func test() {
+	// TODO: Would be nice to 'for range T.M' and 'for range (*T).PM' directly,
+	// but there is no gofmt-friendly way to write the error pattern in the right place.
+	m1 := T.M
+	for range m1 /* ERROR "cannot range over m1 (variable of type func(T)): func must be func(yield func(...) bool): argument is not func" */ {
+	}
+	m2 := (*T).PM
+	for range m2 /* ERROR "cannot range over m2 (variable of type func(*T)): func must be func(yield func(...) bool): argument is not func" */ {
+	}
+	for range f1 /* ERROR "cannot range over f1 (value of type func()): func must be func(yield func(...) bool): wrong argument count" */ {
+	}
+	for range f2 /* ERROR "cannot range over f2 (value of type func(func())): func must be func(yield func(...) bool): yield func does not return bool" */ {
+	}
+	for range f4 /* ERROR "range over f4 (value of type func(func(int) bool)) must have one iteration variable" */ {
+	}
+	for _ = range f4 {
+	}
+	for _, _ = range f5 {
+	}
+	for _ = range f7 {
+	}
+	for _, _ = range f8 {
+	}
+	for range 1 {
+	}
+	for range uint8(1) {
+	}
+	for range int64(1) {
+	}
+	for range MyInt(1) {
+	}
+	for range 'x' {
+	}
+	for range 1.0 /* ERROR "cannot range over 1.0 (untyped float constant 1)" */ {
+	}
+	for _ = range MyFunc1(nil) {
+	}
+	for _ = range MyFunc3(nil) {
+	}
+	for _ = range (func(MyFunc2))(nil) {
+	}
+
+	var i int
+	var s string
+	var mi MyInt
+	var ms MyString
+	for i := range f4 {
+		_ = i
+	}
+	for i = range f4 {
+		_ = i
+	}
+	for i, s := range f5 {
+		_, _ = i, s
+	}
+	for i, s = range f5 {
+		_, _ = i, s
+	}
+	for i, _ := range f5 {
+		_ = i
+	}
+	for i, _ = range f5 {
+		_ = i
+	}
+	for i := range f7 {
+		_ = i
+	}
+	for i = range f7 {
+		_ = i
+	}
+	for mi, _ := range f8 {
+		_ = mi
+	}
+	for mi, _ = range f8 {
+		_ = mi
+	}
+	for mi, ms := range f8 {
+		_, _ = mi, ms
+	}
+	for i /* ERROR "cannot use i (value of type MyInt) as int value in assignment" */, s /* ERROR "cannot use s (value of type MyString) as string value in assignment" */ = range f8 {
+		_, _ = mi, ms
+	}
+	for mi, ms := range f8 {
+		i, s = mi /* ERROR "cannot use mi (variable of type MyInt) as int value in assignment" */, ms /* ERROR "cannot use ms (variable of type MyString) as string value in assignment" */
+	}
+	for mi, ms = range f8 {
+		_, _ = mi, ms
+	}
+
+	for i := range 10 {
+		_ = i
+	}
+	for i = range 10 {
+		_ = i
+	}
+	for i, j /* ERROR "range over 10 (untyped int constant) permits only one iteration variable" */ := range 10 {
+		_, _ = i, j
+	}
+	for mi := range MyInt(10) {
+		_ = mi
+	}
+	for mi = range MyInt(10) {
+		_ = mi
+	}
+}
+
+func _[T int | string](x T) {
+	for range x /* ERROR "cannot range over x (variable of type T constrained by int | string): no core type" */ {
+	}
+}
+
+func _[T int | int64](x T) {
+	for range x /* ERROR "cannot range over x (variable of type T constrained by int | int64): no core type" */ {
+	}
+}
+
+func _[T ~int](x T) {
+	for range x { // ok
+	}
+}
+
+func _[T any](x func(func(T) bool)) {
+	for _ = range x { // ok
+	}
+}
+
+func _[T ~func(func(int) bool)](x T) {
+	for _ = range x { // ok
+	}
+}
diff --git a/src/internal/types/testdata/spec/range_int.go b/src/internal/types/testdata/spec/range_int.go
new file mode 100644
index 0000000..7f722e2
--- /dev/null
+++ b/src/internal/types/testdata/spec/range_int.go
@@ -0,0 +1,131 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a subset of the tests in range.go for range over integers,
+// with extra tests, and without the need for -goexperiment=range.
+
+package p
+
+// test framework assumes 64-bit int/uint sizes by default
+const (
+	maxInt  = 1<<63 - 1
+	maxUint = 1<<64 - 1
+)
+
+type MyInt int32
+
+func _() {
+	for range -1 {
+	}
+	for range 0 {
+	}
+	for range 1 {
+	}
+	for range uint8(1) {
+	}
+	for range int64(1) {
+	}
+	for range MyInt(1) {
+	}
+	for range 'x' {
+	}
+	for range 1.0 /* ERROR "cannot range over 1.0 (untyped float constant 1)" */ {
+	}
+
+	var i int
+	var mi MyInt
+	for i := range 10 {
+		_ = i
+	}
+	for i = range 10 {
+		_ = i
+	}
+	for i, j /* ERROR "range over 10 (untyped int constant) permits only one iteration variable" */ := range 10 {
+		_, _ = i, j
+	}
+	for i = range MyInt /* ERROR "cannot use MyInt(10) (constant 10 of type MyInt) as int value in range clause" */ (10) {
+		_ = i
+	}
+	for mi := range MyInt(10) {
+		_ = mi
+	}
+	for mi = range MyInt(10) {
+		_ = mi
+	}
+}
+
+func _[T int | string](x T) {
+	for range x /* ERROR "cannot range over x (variable of type T constrained by int | string): no core type" */ {
+	}
+}
+
+func _[T int | int64](x T) {
+	for range x /* ERROR "cannot range over x (variable of type T constrained by int | int64): no core type" */ {
+	}
+}
+
+func _[T ~int](x T) {
+	for range x { // ok
+	}
+}
+
+func issue65133() {
+	for range maxInt {
+	}
+	for range maxInt /* ERROR "cannot use maxInt + 1 (untyped int constant 9223372036854775808) as int value in range clause (overflows)" */ + 1 {
+	}
+	for range maxUint /* ERROR "cannot use maxUint (untyped int constant 18446744073709551615) as int value in range clause (overflows)" */ {
+	}
+
+	for i := range maxInt {
+		_ = i
+	}
+	for i := range maxInt /* ERROR "cannot use maxInt + 1 (untyped int constant 9223372036854775808) as int value in range clause (overflows)" */ + 1 {
+		_ = i
+	}
+	for i := range maxUint /* ERROR "cannot use maxUint (untyped int constant 18446744073709551615) as int value in range clause (overflows)" */ {
+		_ = i
+	}
+
+	var i int
+	_ = i
+	for i = range maxInt {
+	}
+	for i = range maxInt /* ERROR "cannot use maxInt + 1 (untyped int constant 9223372036854775808) as int value in range clause (overflows)" */ + 1 {
+	}
+	for i = range maxUint /* ERROR "cannot use maxUint (untyped int constant 18446744073709551615) as int value in range clause (overflows)" */ {
+	}
+
+	var j uint
+	_ = j
+	for j = range maxInt {
+	}
+	for j = range maxInt + 1 {
+	}
+	for j = range maxUint {
+	}
+	for j = range maxUint /* ERROR "cannot use maxUint + 1 (untyped int constant 18446744073709551616) as uint value in range clause (overflows)" */ + 1 {
+	}
+
+	for range 256 {
+	}
+	for _ = range 256 {
+	}
+	for i = range 256 {
+	}
+	for i := range 256 {
+		_ = i
+	}
+
+	var u8 uint8
+	_ = u8
+	for u8 = range - /* ERROR "cannot use -1 (untyped int constant) as uint8 value in range clause (overflows)" */ 1 {
+	}
+	for u8 = range 0 {
+	}
+	for u8 = range 255 {
+	}
+	for u8 = range 256 /* ERROR "cannot use 256 (untyped int constant) as uint8 value in range clause (overflows)" */ {
+	}
+}
diff --git a/src/internal/xcoff/file.go b/src/internal/xcoff/file.go
index 9135822..12f78cc 100644
--- a/src/internal/xcoff/file.go
+++ b/src/internal/xcoff/file.go
@@ -225,7 +225,7 @@
 	if _, err := sr.Seek(int64(hdrsz)+int64(opthdr), io.SeekStart); err != nil {
 		return nil, err
 	}
-	c := saferio.SliceCap((**Section)(nil), uint64(nscns))
+	c := saferio.SliceCap[*Section](uint64(nscns))
 	if c < 0 {
 		return nil, fmt.Errorf("too many XCOFF sections (%d)", nscns)
 	}
@@ -399,7 +399,7 @@
 		if sect.Relptr == 0 {
 			continue
 		}
-		c := saferio.SliceCap((*Reloc)(nil), uint64(sect.Nreloc))
+		c := saferio.SliceCap[Reloc](uint64(sect.Nreloc))
 		if c < 0 {
 			return nil, fmt.Errorf("too many relocs (%d) for section %d", sect.Nreloc, sectNum)
 		}
diff --git a/src/internal/zstd/block.go b/src/internal/zstd/block.go
index bd3040c..11a99cd 100644
--- a/src/internal/zstd/block.go
+++ b/src/internal/zstd/block.go
@@ -50,10 +50,9 @@
 		if off < len(data) {
 			return r.makeError(off, "extraneous data after no sequences")
 		}
-		if len(litbuf) == 0 {
-			return r.makeError(off, "no sequences and no literals")
-		}
+
 		r.buffer = append(r.buffer, litbuf...)
+
 		return nil
 	}
 
@@ -374,9 +373,7 @@
 		}
 	}
 
-	if len(litbuf) > 0 {
-		r.buffer = append(r.buffer, litbuf...)
-	}
+	r.buffer = append(r.buffer, litbuf...)
 
 	if rbr.cnt != 0 {
 		return r.makeError(off, "extraneous data after sequences")
@@ -391,46 +388,38 @@
 		return rbr.makeError("invalid zero offset")
 	}
 
+	// Offset may point into the buffer or the window and
+	// match may extend past the end of the initial buffer.
+	// |--r.window--|--r.buffer--|
+	//        |<-----offset------|
+	//        |------match----------->|
+	bufferOffset := uint32(0)
 	lenBlock := uint32(len(r.buffer))
 	if lenBlock < offset {
-		lenWindow := uint32(len(r.window))
-		windowOffset := offset - lenBlock
-		if windowOffset > lenWindow {
+		lenWindow := r.window.len()
+		copy := offset - lenBlock
+		if copy > lenWindow {
 			return rbr.makeError("offset past window")
 		}
-		from := lenWindow - windowOffset
-		if from+match <= lenWindow {
-			r.buffer = append(r.buffer, r.window[from:from+match]...)
-			return nil
+		windowOffset := lenWindow - copy
+		if copy > match {
+			copy = match
 		}
-		r.buffer = append(r.buffer, r.window[from:]...)
-		copied := lenWindow - from
-		offset -= copied
-		match -= copied
-
-		if offset == 0 && match > 0 {
-			return rbr.makeError("invalid offset")
-		}
-	}
-
-	from := lenBlock - offset
-	if offset >= match {
-		r.buffer = append(r.buffer, r.buffer[from:from+match]...)
-		return nil
+		r.buffer = r.window.appendTo(r.buffer, windowOffset, windowOffset+copy)
+		match -= copy
+	} else {
+		bufferOffset = lenBlock - offset
 	}
 
 	// We are being asked to copy data that we are adding to the
 	// buffer in the same copy.
 	for match > 0 {
-		var copy uint32
-		if offset >= match {
+		copy := uint32(len(r.buffer)) - bufferOffset
+		if copy > match {
 			copy = match
-		} else {
-			copy = offset
 		}
-		r.buffer = append(r.buffer, r.buffer[from:from+copy]...)
+		r.buffer = append(r.buffer, r.buffer[bufferOffset:bufferOffset+copy]...)
 		match -= copy
-		from += copy
 	}
 	return nil
 }
diff --git a/src/internal/zstd/fse.go b/src/internal/zstd/fse.go
index ea661d4..f03a792 100644
--- a/src/internal/zstd/fse.go
+++ b/src/internal/zstd/fse.go
@@ -208,7 +208,7 @@
 // We use these for literal/match/length values.
 // Those require mapping the symbol to a baseline value,
 // and then reading zero or more bits and adding the value to the baseline.
-// Rather than looking thees up in separate tables,
+// Rather than looking these up in separate tables,
 // we convert the FSE table to an FSE baseline table.
 type fseBaselineEntry struct {
 	baseline uint32 // baseline for value that this entry represents
diff --git a/src/internal/zstd/fuzz_test.go b/src/internal/zstd/fuzz_test.go
index bb6f0a9..4b5c996 100644
--- a/src/internal/zstd/fuzz_test.go
+++ b/src/internal/zstd/fuzz_test.go
@@ -22,6 +22,9 @@
 	"(\xb5/\xfd\x1002000$\x05\x0010\xcc0\xa8100000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
 	"(\xb5/\xfd\x1002000$\x05\x0000\xcc0\xa8100d\x0000001000000000000000000000000000000000000000000000000000000000000000000000000\x000000000000000000000000000000000000000000000000000000000000000000000000000000",
 	"(\xb5/\xfd001\x00\x0000000000000000000",
+	"(\xb5/\xfd00\xec\x00\x00&@\x05\x05A7002\x02\x00\x02\x00\x02\x0000000000000000",
+	"(\xb5/\xfd00\xec\x00\x00V@\x05\x0517002\x02\x00\x02\x00\x02\x0000000000000000",
+	"\x50\x2a\x4d\x18\x02\x00\x00\x00",
 }
 
 // This is a simple fuzzer to see if the decompressor panics.
@@ -43,9 +46,7 @@
 // explore the space of decompressor behavior, since it can't see
 // what the compressor is doing. But it's better than nothing.
 func FuzzDecompressor(f *testing.F) {
-	if _, err := os.Stat("/usr/bin/zstd"); err != nil {
-		f.Skip("skipping because /usr/bin/zstd does not exist")
-	}
+	zstd := findZstd(f)
 
 	for _, test := range tests {
 		f.Add([]byte(test.uncompressed))
@@ -61,7 +62,7 @@
 	f.Add(bigData(f))
 
 	f.Fuzz(func(t *testing.T, b []byte) {
-		cmd := exec.Command("/usr/bin/zstd", "-z")
+		cmd := exec.Command(zstd, "-z")
 		cmd.Stdin = bytes.NewReader(b)
 		var compressed bytes.Buffer
 		cmd.Stdout = &compressed
@@ -84,9 +85,7 @@
 // Fuzz test to check that if we can decompress some data,
 // so can zstd, and that we get the same result.
 func FuzzReverse(f *testing.F) {
-	if _, err := os.Stat("/usr/bin/zstd"); err != nil {
-		f.Skip("skipping because /usr/bin/zstd does not exist")
-	}
+	zstd := findZstd(f)
 
 	for _, test := range tests {
 		f.Add([]byte(test.compressed))
@@ -100,7 +99,7 @@
 		r := NewReader(bytes.NewReader(b))
 		goExp, goErr := io.ReadAll(r)
 
-		cmd := exec.Command("/usr/bin/zstd", "-d")
+		cmd := exec.Command(zstd, "-d")
 		cmd.Stdin = bytes.NewReader(b)
 		var uncompressed bytes.Buffer
 		cmd.Stdout = &uncompressed
diff --git a/src/internal/zstd/literals.go b/src/internal/zstd/literals.go
index b46d668..11ef859 100644
--- a/src/internal/zstd/literals.go
+++ b/src/internal/zstd/literals.go
@@ -214,6 +214,14 @@
 	if totalStreamsSize < 6 {
 		return nil, r.makeError(off, "total streams size too small for jump table")
 	}
+	// RFC 3.1.1.3.1.6.
+	// "The decompressed size of each stream is equal to (Regenerated_Size+3)/4,
+	// except for the last stream, which may be up to 3 bytes smaller,
+	// to reach a total decompressed size as specified in Regenerated_Size."
+	regeneratedStreamSize := (regeneratedSize + 3) / 4
+	if regeneratedSize < regeneratedStreamSize*3 {
+		return nil, r.makeError(off, "regenerated size too small to decode streams")
+	}
 
 	streamSize1 := binary.LittleEndian.Uint16(data[off:])
 	streamSize2 := binary.LittleEndian.Uint16(data[off+2:])
@@ -262,8 +270,6 @@
 		return nil, err
 	}
 
-	regeneratedStreamSize := (regeneratedSize + 3) / 4
-
 	out1 := len(outbuf)
 	out2 := out1 + regeneratedStreamSize
 	out3 := out2 + regeneratedStreamSize
diff --git a/src/internal/zstd/testdata/1890a371.gettysburg.txt-100x.zst b/src/internal/zstd/testdata/1890a371.gettysburg.txt-100x.zst
new file mode 100644
index 0000000..afb4a27
--- /dev/null
+++ b/src/internal/zstd/testdata/1890a371.gettysburg.txt-100x.zst
Binary files differ
diff --git a/src/internal/zstd/testdata/README b/src/internal/zstd/testdata/README
new file mode 100644
index 0000000..1a6dbb3
--- /dev/null
+++ b/src/internal/zstd/testdata/README
@@ -0,0 +1,10 @@
+This directory holds files for testing zstd.NewReader.
+
+Each one is a Zstandard compressed file named as hash.arbitrary-name.zst,
+where hash is the first eight hexadecimal digits of the SHA256 hash
+of the expected uncompressed content:
+
+	zstd -d < 1890a371.gettysburg.txt-100x.zst | sha256sum | head -c 8
+	1890a371
+
+The test uses hash value to verify decompression result.
diff --git a/src/internal/zstd/testdata/f2a8e35c.helloworld-11000x.zst b/src/internal/zstd/testdata/f2a8e35c.helloworld-11000x.zst
new file mode 100644
index 0000000..87a8aca
--- /dev/null
+++ b/src/internal/zstd/testdata/f2a8e35c.helloworld-11000x.zst
Binary files differ
diff --git a/src/internal/zstd/testdata/fcf30b99.zero-dictionary-ids.zst b/src/internal/zstd/testdata/fcf30b99.zero-dictionary-ids.zst
new file mode 100644
index 0000000..1be89e8
--- /dev/null
+++ b/src/internal/zstd/testdata/fcf30b99.zero-dictionary-ids.zst
Binary files differ
diff --git a/src/internal/zstd/window.go b/src/internal/zstd/window.go
new file mode 100644
index 0000000..f9c5f04
--- /dev/null
+++ b/src/internal/zstd/window.go
@@ -0,0 +1,90 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+// window stores up to size bytes of data.
+// It is implemented as a circular buffer:
+// sequential save calls append to the data slice until
+// its length reaches configured size and after that,
+// save calls overwrite previously saved data at off
+// and update off such that it always points at
+// the byte stored before others.
+type window struct {
+	size int
+	data []byte
+	off  int
+}
+
+// reset clears stored data and configures window size.
+func (w *window) reset(size int) {
+	w.data = w.data[:0]
+	w.off = 0
+	w.size = size
+}
+
+// len returns the number of stored bytes.
+func (w *window) len() uint32 {
+	return uint32(len(w.data))
+}
+
+// save stores up to size last bytes from the buf.
+func (w *window) save(buf []byte) {
+	if w.size == 0 {
+		return
+	}
+	if len(buf) == 0 {
+		return
+	}
+
+	if len(buf) >= w.size {
+		from := len(buf) - w.size
+		w.data = append(w.data[:0], buf[from:]...)
+		w.off = 0
+		return
+	}
+
+	// Update off to point to the oldest remaining byte.
+	free := w.size - len(w.data)
+	if free == 0 {
+		n := copy(w.data[w.off:], buf)
+		if n == len(buf) {
+			w.off += n
+		} else {
+			w.off = copy(w.data, buf[n:])
+		}
+	} else {
+		if free >= len(buf) {
+			w.data = append(w.data, buf...)
+		} else {
+			w.data = append(w.data, buf[:free]...)
+			w.off = copy(w.data, buf[free:])
+		}
+	}
+}
+
+// appendTo appends stored bytes between from and to indices to the buf.
+// Index from must be less or equal to index to and to must be less or equal to w.len().
+func (w *window) appendTo(buf []byte, from, to uint32) []byte {
+	dataLen := uint32(len(w.data))
+	from += uint32(w.off)
+	to += uint32(w.off)
+
+	wrap := false
+	if from > dataLen {
+		from -= dataLen
+		wrap = !wrap
+	}
+	if to > dataLen {
+		to -= dataLen
+		wrap = !wrap
+	}
+
+	if wrap {
+		buf = append(buf, w.data[from:]...)
+		return append(buf, w.data[:to]...)
+	} else {
+		return append(buf, w.data[from:to]...)
+	}
+}
diff --git a/src/internal/zstd/window_test.go b/src/internal/zstd/window_test.go
new file mode 100644
index 0000000..afa2eef
--- /dev/null
+++ b/src/internal/zstd/window_test.go
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+)
+
+func makeSequence(start, n int) (seq []byte) {
+	for i := 0; i < n; i++ {
+		seq = append(seq, byte(start+i))
+	}
+	return
+}
+
+func TestWindow(t *testing.T) {
+	for size := 0; size <= 3; size++ {
+		for i := 0; i <= 2*size; i++ {
+			a := makeSequence('a', i)
+			for j := 0; j <= 2*size; j++ {
+				b := makeSequence('a'+i, j)
+				for k := 0; k <= 2*size; k++ {
+					c := makeSequence('a'+i+j, k)
+
+					t.Run(fmt.Sprintf("%d-%d-%d-%d", size, i, j, k), func(t *testing.T) {
+						testWindow(t, size, a, b, c)
+					})
+				}
+			}
+		}
+	}
+}
+
+// testWindow tests window by saving three sequences of bytes to it.
+// Third sequence tests read offset that can become non-zero only after second save.
+func testWindow(t *testing.T, size int, a, b, c []byte) {
+	var w window
+	w.reset(size)
+
+	w.save(a)
+	w.save(b)
+	w.save(c)
+
+	var tail []byte
+	tail = append(tail, a...)
+	tail = append(tail, b...)
+	tail = append(tail, c...)
+
+	if len(tail) > size {
+		tail = tail[len(tail)-size:]
+	}
+
+	if w.len() != uint32(len(tail)) {
+		t.Errorf("wrong data length: got: %d, want: %d", w.len(), len(tail))
+	}
+
+	var from, to uint32
+	for from = 0; from <= uint32(len(tail)); from++ {
+		for to = from; to <= uint32(len(tail)); to++ {
+			got := w.appendTo(nil, from, to)
+			want := tail[from:to]
+
+			if !bytes.Equal(got, want) {
+				t.Errorf("wrong data at [%d:%d]: got %q, want %q", from, to, got, want)
+			}
+		}
+	}
+}
diff --git a/src/internal/zstd/xxhash_test.go b/src/internal/zstd/xxhash_test.go
index 646cee8..68ca558 100644
--- a/src/internal/zstd/xxhash_test.go
+++ b/src/internal/zstd/xxhash_test.go
@@ -42,7 +42,11 @@
 		t.Skip("skipping expensive test in short mode")
 	}
 
-	data := bigData(t)
+	data, err := os.ReadFile("../../testdata/Isaac.Newton-Opticks.txt")
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	var xh xxhash64
 	xh.reset()
 	i := 0
@@ -63,10 +67,16 @@
 	}
 }
 
-func FuzzXXHash(f *testing.F) {
-	if _, err := os.Stat("/usr/bin/xxhsum"); err != nil {
-		f.Skip("skipping because /usr/bin/xxhsum does not exist")
+func findXxhsum(t testing.TB) string {
+	xxhsum, err := exec.LookPath("xxhsum")
+	if err != nil {
+		t.Skip("skipping because xxhsum not found")
 	}
+	return xxhsum
+}
+
+func FuzzXXHash(f *testing.F) {
+	xxhsum := findXxhsum(f)
 
 	for _, test := range xxHashTests {
 		f.Add([]byte(test.data))
@@ -80,7 +90,7 @@
 	f.Add(bigData(f))
 
 	f.Fuzz(func(t *testing.T, b []byte) {
-		cmd := exec.Command("/usr/bin/xxhsum", "-H64")
+		cmd := exec.Command(xxhsum, "-H64")
 		cmd.Stdin = bytes.NewReader(b)
 		var hhsumHash bytes.Buffer
 		cmd.Stdout = &hhsumHash
diff --git a/src/internal/zstd/zstd.go b/src/internal/zstd/zstd.go
index a860789..0230076 100644
--- a/src/internal/zstd/zstd.go
+++ b/src/internal/zstd/zstd.go
@@ -59,8 +59,7 @@
 	huffmanTableBits int
 
 	// The window for back references.
-	windowSize int    // maximum required window size
-	window     []byte // window data
+	window window
 
 	// A buffer available to hold a compressed block.
 	compressedBuf []byte
@@ -105,14 +104,13 @@
 	r.frameSizeUnknown = false
 	r.remainingFrameSize = 0
 	r.blockOffset = 0
-	// buffer
+	r.buffer = r.buffer[:0]
 	r.off = 0
 	// repeatedOffset1
 	// repeatedOffset2
 	// repeatedOffset3
 	// huffmanTable
 	// huffmanTableBits
-	// windowSize
 	// window
 	// compressedBuf
 	// literals
@@ -171,7 +169,7 @@
 
 	// Read magic number. RFC 3.1.1.
 	if _, err := io.ReadFull(r.r, r.scratch[:4]); err != nil {
-		// We require that the stream contain at least one frame.
+		// We require that the stream contains at least one frame.
 		if err == io.EOF && !r.readOneFrame {
 			err = io.ErrUnexpectedEOF
 		}
@@ -185,6 +183,7 @@
 			if err := r.skipFrame(); err != nil {
 				return err
 			}
+			r.readOneFrame = true
 			goto retry
 		}
 
@@ -222,13 +221,15 @@
 		r.checksum.reset()
 	}
 
-	if descriptor&3 != 0 {
-		return r.makeError(relativeOffset, "dictionaries are not supported")
+	// Dictionary_ID_Flag. RFC 3.1.1.1.1.6.
+	dictionaryIdSize := 0
+	if dictIdFlag := descriptor & 3; dictIdFlag != 0 {
+		dictionaryIdSize = 1 << (dictIdFlag - 1)
 	}
 
 	relativeOffset++
 
-	headerSize := windowDescriptorSize + fcsFieldSize
+	headerSize := windowDescriptorSize + dictionaryIdSize + fcsFieldSize
 
 	if _, err := io.ReadFull(r.r, r.scratch[:headerSize]); err != nil {
 		return r.wrapNonEOFError(relativeOffset, err)
@@ -236,11 +237,8 @@
 
 	// Figure out the maximum amount of data we need to retain
 	// for backreferences.
-
-	if singleSegment {
-		// No window required, as all the data is in a single buffer.
-		r.windowSize = 0
-	} else {
+	var windowSize int
+	if !singleSegment {
 		// Window descriptor. RFC 3.1.1.1.2.
 		windowDescriptor := r.scratch[0]
 		exponent := uint64(windowDescriptor >> 3)
@@ -248,25 +246,29 @@
 		windowLog := exponent + 10
 		windowBase := uint64(1) << windowLog
 		windowAdd := (windowBase / 8) * mantissa
-		windowSize := windowBase + windowAdd
+		windowSize = int(windowBase + windowAdd)
 
 		// Default zstd sets limits on the window size.
 		if fuzzing && (windowLog > 31 || windowSize > 1<<27) {
 			return r.makeError(relativeOffset, "windowSize too large")
 		}
-
-		// RFC 8878 permits us to set an 8M max on window size.
-		if windowSize > 8<<20 {
-			windowSize = 8 << 20
-		}
-
-		r.windowSize = int(windowSize)
 	}
 
-	// Frame_Content_Size. RFC 3.1.1.4.
+	// Dictionary_ID. RFC 3.1.1.1.3.
+	if dictionaryIdSize != 0 {
+		dictionaryId := r.scratch[windowDescriptorSize : windowDescriptorSize+dictionaryIdSize]
+		// Allow only zero Dictionary ID.
+		for _, b := range dictionaryId {
+			if b != 0 {
+				return r.makeError(relativeOffset, "dictionaries are not supported")
+			}
+		}
+	}
+
+	// Frame_Content_Size. RFC 3.1.1.1.4.
 	r.frameSizeUnknown = false
 	r.remainingFrameSize = 0
-	fb := r.scratch[windowDescriptorSize:]
+	fb := r.scratch[windowDescriptorSize+dictionaryIdSize:]
 	switch fcsFieldSize {
 	case 0:
 		r.frameSizeUnknown = true
@@ -282,6 +284,18 @@
 		panic("unreachable")
 	}
 
+	// RFC 3.1.1.1.2.
+	// When Single_Segment_Flag is set, Window_Descriptor is not present.
+	// In this case, Window_Size is Frame_Content_Size.
+	if singleSegment {
+		windowSize = int(r.remainingFrameSize)
+	}
+
+	// RFC 8878 3.1.1.1.1.2. permits us to set an 8M max on window size.
+	if windowSize > 8<<20 {
+		windowSize = 8 << 20
+	}
+
 	relativeOffset += headerSize
 
 	r.sawFrameHeader = true
@@ -293,7 +307,7 @@
 	r.repeatedOffset2 = 4
 	r.repeatedOffset3 = 8
 	r.huffmanTableBits = 0
-	r.window = r.window[:0]
+	r.window.reset(windowSize)
 	r.seqTables[0] = nil
 	r.seqTables[1] = nil
 	r.seqTables[2] = nil
@@ -312,12 +326,34 @@
 	relativeOffset += 4
 
 	size := binary.LittleEndian.Uint32(r.scratch[:4])
+	if size == 0 {
+		r.blockOffset += int64(relativeOffset)
+		return nil
+	}
 
 	if seeker, ok := r.r.(io.Seeker); ok {
-		if _, err := seeker.Seek(int64(size), io.SeekCurrent); err != nil {
-			return err
+		r.blockOffset += int64(relativeOffset)
+		// Implementations of Seeker do not always detect invalid offsets,
+		// so check that the new offset is valid by comparing to the end.
+		prev, err := seeker.Seek(0, io.SeekCurrent)
+		if err != nil {
+			return r.wrapError(0, err)
 		}
-		r.blockOffset += int64(relativeOffset) + int64(size)
+		end, err := seeker.Seek(0, io.SeekEnd)
+		if err != nil {
+			return r.wrapError(0, err)
+		}
+		if prev > end-int64(size) {
+			r.blockOffset += end - prev
+			return r.makeEOFError(0)
+		}
+
+		// The new offset is valid, so seek to it.
+		_, err = seeker.Seek(prev+int64(size), io.SeekStart)
+		if err != nil {
+			return r.wrapError(0, err)
+		}
+		r.blockOffset += int64(size)
 		return nil
 	}
 
@@ -368,7 +404,7 @@
 	// Maximum block size is smaller of window size and 128K.
 	// We don't record the window size for a single segment frame,
 	// so just use 128K. RFC 3.1.1.2.3, 3.1.1.2.4.
-	if blockSize > 128<<10 || (r.windowSize > 0 && blockSize > r.windowSize) {
+	if blockSize > 128<<10 || (r.window.size > 0 && blockSize > r.window.size) {
 		return r.makeError(relativeOffset, "block size too large")
 	}
 
@@ -414,7 +450,7 @@
 	}
 
 	if !lastBlock {
-		r.saveWindow(r.buffer)
+		r.window.save(r.buffer)
 	} else {
 		if !r.frameSizeUnknown && r.remainingFrameSize != 0 {
 			return r.makeError(relativeOffset, "not enough uncompressed bytes for frame")
@@ -449,28 +485,6 @@
 	r.buffer = r.buffer[:size]
 }
 
-// saveWindow saves bytes in the backreference window.
-// TODO: use a circular buffer for less data movement.
-func (r *Reader) saveWindow(buf []byte) {
-	if r.windowSize == 0 {
-		return
-	}
-
-	if len(buf) >= r.windowSize {
-		from := len(buf) - r.windowSize
-		r.window = append(r.window[:0], buf[from:]...)
-		return
-	}
-
-	keep := r.windowSize - len(buf) // must be positive
-	if keep < len(r.window) {
-		remove := len(r.window) - keep
-		copy(r.window[:], r.window[remove:])
-	}
-
-	r.window = append(r.window, buf...)
-}
-
 // zstdError is an error while decompressing.
 type zstdError struct {
 	offset int64
diff --git a/src/internal/zstd/zstd_test.go b/src/internal/zstd/zstd_test.go
index bc75e0f..f2a2e1b 100644
--- a/src/internal/zstd/zstd_test.go
+++ b/src/internal/zstd/zstd_test.go
@@ -6,12 +6,14 @@
 
 import (
 	"bytes"
+	"crypto/sha256"
 	"fmt"
 	"internal/race"
 	"internal/testenv"
 	"io"
 	"os"
 	"os/exec"
+	"path/filepath"
 	"strings"
 	"sync"
 	"testing"
@@ -90,6 +92,22 @@
 		"0\x00\x00\x00\x00\x000\x00\x00\x00\x00\x001\x00\x00\x00\x00\x000000",
 		"(\xb5/\xfd\x04X\x8d\x00\x00P0\x000\x001\x000000\x03T\x02\x00\x01\x01m\xf9\xb7G",
 	},
+	{
+		"empty block",
+		"",
+		"\x28\xb5\x2f\xfd\x00\x00\x15\x00\x00\x00\x00",
+	},
+	{
+		"single skippable frame",
+		"",
+		"\x50\x2a\x4d\x18\x00\x00\x00\x00",
+	},
+	{
+		"two skippable frames",
+		"",
+		"\x50\x2a\x4d\x18\x00\x00\x00\x00" +
+			"\x50\x2a\x4d\x18\x00\x00\x00\x00",
+	},
 }
 
 func TestSamples(t *testing.T) {
@@ -109,16 +127,39 @@
 	}
 }
 
+func TestReset(t *testing.T) {
+	input := strings.NewReader("")
+	r := NewReader(input)
+	for _, test := range tests {
+		test := test
+		t.Run(test.name, func(t *testing.T) {
+			input.Reset(test.compressed)
+			r.Reset(input)
+			got, err := io.ReadAll(r)
+			if err != nil {
+				t.Fatal(err)
+			}
+			gotstr := string(got)
+			if gotstr != test.uncompressed {
+				t.Errorf("got %q want %q", gotstr, test.uncompressed)
+			}
+		})
+	}
+}
+
 var (
 	bigDataOnce  sync.Once
 	bigDataBytes []byte
 	bigDataErr   error
 )
 
-// bigData returns the contents of our large test file.
+// bigData returns the contents of our large test file repeated multiple times.
 func bigData(t testing.TB) []byte {
 	bigDataOnce.Do(func() {
 		bigDataBytes, bigDataErr = os.ReadFile("../../testdata/Isaac.Newton-Opticks.txt")
+		if bigDataErr == nil {
+			bigDataBytes = bytes.Repeat(bigDataBytes, 20)
+		}
 	})
 	if bigDataErr != nil {
 		t.Fatal(bigDataErr)
@@ -126,10 +167,17 @@
 	return bigDataBytes
 }
 
+func findZstd(t testing.TB) string {
+	zstd, err := exec.LookPath("zstd")
+	if err != nil {
+		t.Skip("skipping because zstd not found")
+	}
+	return zstd
+}
+
 var (
 	zstdBigOnce  sync.Once
 	zstdBigBytes []byte
-	zstdBigSkip  bool
 	zstdBigErr   error
 )
 
@@ -139,13 +187,10 @@
 func zstdBigData(t testing.TB) []byte {
 	input := bigData(t)
 
-	zstdBigOnce.Do(func() {
-		if _, err := os.Stat("/usr/bin/zstd"); err != nil {
-			zstdBigSkip = true
-			return
-		}
+	zstd := findZstd(t)
 
-		cmd := exec.Command("/usr/bin/zstd", "-z")
+	zstdBigOnce.Do(func() {
+		cmd := exec.Command(zstd, "-z")
 		cmd.Stdin = bytes.NewReader(input)
 		var compressed bytes.Buffer
 		cmd.Stdout = &compressed
@@ -157,9 +202,6 @@
 
 		zstdBigBytes = compressed.Bytes()
 	})
-	if zstdBigSkip {
-		t.Skip("skipping because /usr/bin/zstd does not exist")
-	}
 	if zstdBigErr != nil {
 		t.Fatal(zstdBigErr)
 	}
@@ -176,7 +218,7 @@
 	data := bigData(t)
 	compressed := zstdBigData(t)
 
-	t.Logf("/usr/bin/zstd compressed %d bytes to %d", len(data), len(compressed))
+	t.Logf("zstd compressed %d bytes to %d", len(data), len(compressed))
 
 	r := NewReader(bytes.NewReader(compressed))
 	got, err := io.ReadAll(r)
@@ -229,6 +271,50 @@
 	}
 }
 
+func TestFileSamples(t *testing.T) {
+	samples, err := os.ReadDir("testdata")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, sample := range samples {
+		name := sample.Name()
+		if !strings.HasSuffix(name, ".zst") {
+			continue
+		}
+
+		t.Run(name, func(t *testing.T) {
+			f, err := os.Open(filepath.Join("testdata", name))
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			r := NewReader(f)
+			h := sha256.New()
+			if _, err := io.Copy(h, r); err != nil {
+				t.Fatal(err)
+			}
+			got := fmt.Sprintf("%x", h.Sum(nil))[:8]
+
+			want, _, _ := strings.Cut(name, ".")
+			if got != want {
+				t.Errorf("Wrong uncompressed content hash: got %s, want %s", got, want)
+			}
+		})
+	}
+}
+
+func TestReaderBad(t *testing.T) {
+	for i, s := range badStrings {
+		t.Run(fmt.Sprintf("badStrings#%d", i), func(t *testing.T) {
+			_, err := io.Copy(io.Discard, NewReader(strings.NewReader(s)))
+			if err == nil {
+				t.Error("expected error")
+			}
+		})
+	}
+}
+
 func BenchmarkLarge(b *testing.B) {
 	b.StopTimer()
 	b.ReportAllocs()
diff --git a/src/io/fs/format.go b/src/io/fs/format.go
index f490341..60b40df 100644
--- a/src/io/fs/format.go
+++ b/src/io/fs/format.go
@@ -9,7 +9,7 @@
 )
 
 // FormatFileInfo returns a formatted version of info for human readability.
-// Implementations of FileInfo can call this from a String method.
+// Implementations of [FileInfo] can call this from a String method.
 // The output for a file named "hello.go", 100 bytes, mode 0o644, created
 // January 1, 1970 at noon is
 //
@@ -52,7 +52,7 @@
 }
 
 // FormatDirEntry returns a formatted version of dir for human readability.
-// Implementations of DirEntry can call this from a String method.
+// Implementations of [DirEntry] can call this from a String method.
 // The outputs for a directory named subdir and a file named hello.go are:
 //
 //	d subdir/
diff --git a/src/io/fs/fs.go b/src/io/fs/fs.go
index 4ce4d1a..6891d75 100644
--- a/src/io/fs/fs.go
+++ b/src/io/fs/fs.go
@@ -5,6 +5,9 @@
 // Package fs defines basic interfaces to a file system.
 // A file system can be provided by the host operating system
 // but also by other packages.
+//
+// See the [testing/fstest] package for support with testing
+// implementations of file systems.
 package fs
 
 import (
@@ -17,7 +20,10 @@
 //
 // The FS interface is the minimum implementation required of the file system.
 // A file system may implement additional interfaces,
-// such as ReadFileFS, to provide additional or optimized functionality.
+// such as [ReadFileFS], to provide additional or optimized functionality.
+//
+// [testing/fstest.TestFS] may be used to test implementations of an FS for
+// correctness.
 type FS interface {
 	// Open opens the named file.
 	//
@@ -43,7 +49,7 @@
 // Note that paths are slash-separated on all systems, even Windows.
 // Paths containing other characters such as backslash and colon
 // are accepted as valid, but those characters must never be
-// interpreted by an FS implementation as path element separators.
+// interpreted by an [FS] implementation as path element separators.
 func ValidPath(name string) bool {
 	if !utf8.ValidString(name) {
 		return false
@@ -73,8 +79,8 @@
 
 // A File provides access to a single file.
 // The File interface is the minimum implementation required of the file.
-// Directory files should also implement ReadDirFile.
-// A file may implement io.ReaderAt or io.Seeker as optimizations.
+// Directory files should also implement [ReadDirFile].
+// A file may implement [io.ReaderAt] or [io.Seeker] as optimizations.
 type File interface {
 	Stat() (FileInfo, error)
 	Read([]byte) (int, error)
@@ -82,7 +88,7 @@
 }
 
 // A DirEntry is an entry read from a directory
-// (using the ReadDir function or a ReadDirFile's ReadDir method).
+// (using the [ReadDir] function or a [ReadDirFile]'s ReadDir method).
 type DirEntry interface {
 	// Name returns the name of the file (or subdirectory) described by the entry.
 	// This name is only the final element of the path (the base name), not the entire path.
@@ -132,7 +138,7 @@
 
 // Generic file system errors.
 // Errors returned by file systems can be tested against these errors
-// using errors.Is.
+// using [errors.Is].
 var (
 	ErrInvalid    = errInvalid()    // "invalid argument"
 	ErrPermission = errPermission() // "permission denied"
@@ -147,7 +153,7 @@
 func errNotExist() error   { return oserror.ErrNotExist }
 func errClosed() error     { return oserror.ErrClosed }
 
-// A FileInfo describes a file and is returned by Stat.
+// A FileInfo describes a file and is returned by [Stat].
 type FileInfo interface {
 	Name() string       // base name of the file
 	Size() int64        // length in bytes for regular files; system-dependent for others
@@ -161,10 +167,10 @@
 // The bits have the same definition on all systems, so that
 // information about files can be moved from one system
 // to another portably. Not all bits apply to all systems.
-// The only required bit is ModeDir for directories.
+// The only required bit is [ModeDir] for directories.
 type FileMode uint32
 
-// The defined file mode bits are the most significant bits of the FileMode.
+// The defined file mode bits are the most significant bits of the [FileMode].
 // The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
 // The values of these bits should be considered part of the public API and
 // may be used in wire protocols or disk representations: they must not be
@@ -219,7 +225,7 @@
 }
 
 // IsDir reports whether m describes a directory.
-// That is, it tests for the ModeDir bit being set in m.
+// That is, it tests for the [ModeDir] bit being set in m.
 func (m FileMode) IsDir() bool {
 	return m&ModeDir != 0
 }
@@ -230,12 +236,12 @@
 	return m&ModeType == 0
 }
 
-// Perm returns the Unix permission bits in m (m & ModePerm).
+// Perm returns the Unix permission bits in m (m & [ModePerm]).
 func (m FileMode) Perm() FileMode {
 	return m & ModePerm
 }
 
-// Type returns type bits in m (m & ModeType).
+// Type returns type bits in m (m & [ModeType]).
 func (m FileMode) Type() FileMode {
 	return m & ModeType
 }
diff --git a/src/io/fs/glob.go b/src/io/fs/glob.go
index 0e529cd..db17156 100644
--- a/src/io/fs/glob.go
+++ b/src/io/fs/glob.go
@@ -20,15 +20,15 @@
 
 // Glob returns the names of all files matching pattern or nil
 // if there is no matching file. The syntax of patterns is the same
-// as in path.Match. The pattern may describe hierarchical names such as
+// as in [path.Match]. The pattern may describe hierarchical names such as
 // usr/*/bin/ed.
 //
 // Glob ignores file system errors such as I/O errors reading directories.
-// The only possible returned error is path.ErrBadPattern, reporting that
+// The only possible returned error is [path.ErrBadPattern], reporting that
 // the pattern is malformed.
 //
-// If fs implements GlobFS, Glob calls fs.Glob.
-// Otherwise, Glob uses ReadDir to traverse the directory tree
+// If fs implements [GlobFS], Glob calls fs.Glob.
+// Otherwise, Glob uses [ReadDir] to traverse the directory tree
 // and look for matches for the pattern.
 func Glob(fsys FS, pattern string) (matches []string, err error) {
 	return globWithLimit(fsys, pattern, 0)
diff --git a/src/io/fs/readdir.go b/src/io/fs/readdir.go
index 42aca49..22ced48 100644
--- a/src/io/fs/readdir.go
+++ b/src/io/fs/readdir.go
@@ -10,7 +10,7 @@
 )
 
 // ReadDirFS is the interface implemented by a file system
-// that provides an optimized implementation of ReadDir.
+// that provides an optimized implementation of [ReadDir].
 type ReadDirFS interface {
 	FS
 
@@ -22,7 +22,7 @@
 // ReadDir reads the named directory
 // and returns a list of directory entries sorted by filename.
 //
-// If fs implements ReadDirFS, ReadDir calls fs.ReadDir.
+// If fs implements [ReadDirFS], ReadDir calls fs.ReadDir.
 // Otherwise ReadDir calls fs.Open and uses ReadDir and Close
 // on the returned file.
 func ReadDir(fsys FS, name string) ([]DirEntry, error) {
@@ -71,7 +71,7 @@
 	return FormatDirEntry(di)
 }
 
-// FileInfoToDirEntry returns a DirEntry that returns information from info.
+// FileInfoToDirEntry returns a [DirEntry] that returns information from info.
 // If info is nil, FileInfoToDirEntry returns nil.
 func FileInfoToDirEntry(info FileInfo) DirEntry {
 	if info == nil {
diff --git a/src/io/fs/readdir_test.go b/src/io/fs/readdir_test.go
index a2b2c12..4c409ae 100644
--- a/src/io/fs/readdir_test.go
+++ b/src/io/fs/readdir_test.go
@@ -5,6 +5,7 @@
 package fs_test
 
 import (
+	"errors"
 	. "io/fs"
 	"os"
 	"testing"
@@ -91,3 +92,20 @@
 		})
 	}
 }
+
+func errorPath(err error) string {
+	var perr *PathError
+	if !errors.As(err, &perr) {
+		return ""
+	}
+	return perr.Path
+}
+
+func TestReadDirPath(t *testing.T) {
+	fsys := os.DirFS(t.TempDir())
+	_, err1 := ReadDir(fsys, "non-existent")
+	_, err2 := ReadDir(struct{ FS }{fsys}, "non-existent")
+	if s1, s2 := errorPath(err1), errorPath(err2); s1 != s2 {
+		t.Fatalf("s1: %s != s2: %s", s1, s2)
+	}
+}
diff --git a/src/io/fs/readfile.go b/src/io/fs/readfile.go
index d3c181c..41ca5bf 100644
--- a/src/io/fs/readfile.go
+++ b/src/io/fs/readfile.go
@@ -7,7 +7,7 @@
 import "io"
 
 // ReadFileFS is the interface implemented by a file system
-// that provides an optimized implementation of ReadFile.
+// that provides an optimized implementation of [ReadFile].
 type ReadFileFS interface {
 	FS
 
@@ -22,13 +22,13 @@
 }
 
 // ReadFile reads the named file from the file system fs and returns its contents.
-// A successful call returns a nil error, not io.EOF.
+// A successful call returns a nil error, not [io.EOF].
 // (Because ReadFile reads the whole file, the expected EOF
 // from the final Read is not treated as an error to be reported.)
 //
-// If fs implements ReadFileFS, ReadFile calls fs.ReadFile.
+// If fs implements [ReadFileFS], ReadFile calls fs.ReadFile.
 // Otherwise ReadFile calls fs.Open and uses Read and Close
-// on the returned file.
+// on the returned [File].
 func ReadFile(fsys FS, name string) ([]byte, error) {
 	if fsys, ok := fsys.(ReadFileFS); ok {
 		return fsys.ReadFile(name)
diff --git a/src/io/fs/readfile_test.go b/src/io/fs/readfile_test.go
index 07219c1..3c521f6 100644
--- a/src/io/fs/readfile_test.go
+++ b/src/io/fs/readfile_test.go
@@ -6,6 +6,7 @@
 
 import (
 	. "io/fs"
+	"os"
 	"testing"
 	"testing/fstest"
 	"time"
@@ -57,3 +58,12 @@
 		t.Fatalf(`ReadFile(sub(.), "hello.txt") = %q, %v, want %q, nil`, data, err, "hello, world")
 	}
 }
+
+func TestReadFilePath(t *testing.T) {
+	fsys := os.DirFS(t.TempDir())
+	_, err1 := ReadFile(fsys, "non-existent")
+	_, err2 := ReadFile(struct{ FS }{fsys}, "non-existent")
+	if s1, s2 := errorPath(err1), errorPath(err2); s1 != s2 {
+		t.Fatalf("s1: %s != s2: %s", s1, s2)
+	}
+}
diff --git a/src/io/fs/stat.go b/src/io/fs/stat.go
index 735a6e3..bbb91c2 100644
--- a/src/io/fs/stat.go
+++ b/src/io/fs/stat.go
@@ -13,10 +13,10 @@
 	Stat(name string) (FileInfo, error)
 }
 
-// Stat returns a FileInfo describing the named file from the file system.
+// Stat returns a [FileInfo] describing the named file from the file system.
 //
-// If fs implements StatFS, Stat calls fs.Stat.
-// Otherwise, Stat opens the file to stat it.
+// If fs implements [StatFS], Stat calls fs.Stat.
+// Otherwise, Stat opens the [File] to stat it.
 func Stat(fsys FS, name string) (FileInfo, error) {
 	if fsys, ok := fsys.(StatFS); ok {
 		return fsys.Stat(name)
diff --git a/src/io/fs/sub.go b/src/io/fs/sub.go
index ae20e03..9999e63 100644
--- a/src/io/fs/sub.go
+++ b/src/io/fs/sub.go
@@ -17,19 +17,19 @@
 	Sub(dir string) (FS, error)
 }
 
-// Sub returns an FS corresponding to the subtree rooted at fsys's dir.
+// Sub returns an [FS] corresponding to the subtree rooted at fsys's dir.
 //
 // If dir is ".", Sub returns fsys unchanged.
-// Otherwise, if fs implements SubFS, Sub returns fsys.Sub(dir).
-// Otherwise, Sub returns a new FS implementation sub that,
+// Otherwise, if fs implements [SubFS], Sub returns fsys.Sub(dir).
+// Otherwise, Sub returns a new [FS] implementation sub that,
 // in effect, implements sub.Open(name) as fsys.Open(path.Join(dir, name)).
 // The implementation also translates calls to ReadDir, ReadFile, and Glob appropriately.
 //
 // Note that Sub(os.DirFS("/"), "prefix") is equivalent to os.DirFS("/prefix")
 // and that neither of them guarantees to avoid operating system
-// accesses outside "/prefix", because the implementation of os.DirFS
+// accesses outside "/prefix", because the implementation of [os.DirFS]
 // does not check for symbolic links inside "/prefix" that point to
-// other directories. That is, os.DirFS is not a general substitute for a
+// other directories. That is, [os.DirFS] is not a general substitute for a
 // chroot-style security mechanism, and Sub does not change that fact.
 func Sub(fsys FS, dir string) (FS, error) {
 	if !ValidPath(dir) {
diff --git a/src/io/fs/walk.go b/src/io/fs/walk.go
index baf559e..2e8a8db 100644
--- a/src/io/fs/walk.go
+++ b/src/io/fs/walk.go
@@ -9,60 +9,60 @@
 	"path"
 )
 
-// SkipDir is used as a return value from WalkDirFuncs to indicate that
+// SkipDir is used as a return value from [WalkDirFunc] to indicate that
 // the directory named in the call is to be skipped. It is not returned
 // as an error by any function.
 var SkipDir = errors.New("skip this directory")
 
-// SkipAll is used as a return value from WalkDirFuncs to indicate that
+// SkipAll is used as a return value from [WalkDirFunc] to indicate that
 // all remaining files and directories are to be skipped. It is not returned
 // as an error by any function.
 var SkipAll = errors.New("skip everything and stop the walk")
 
-// WalkDirFunc is the type of the function called by WalkDir to visit
+// WalkDirFunc is the type of the function called by [WalkDir] to visit
 // each file or directory.
 //
-// The path argument contains the argument to WalkDir as a prefix.
+// The path argument contains the argument to [WalkDir] as a prefix.
 // That is, if WalkDir is called with root argument "dir" and finds a file
 // named "a" in that directory, the walk function will be called with
 // argument "dir/a".
 //
-// The d argument is the fs.DirEntry for the named path.
+// The d argument is the [DirEntry] for the named path.
 //
-// The error result returned by the function controls how WalkDir
-// continues. If the function returns the special value SkipDir, WalkDir
+// The error result returned by the function controls how [WalkDir]
+// continues. If the function returns the special value [SkipDir], WalkDir
 // skips the current directory (path if d.IsDir() is true, otherwise
 // path's parent directory). If the function returns the special value
-// SkipAll, WalkDir skips all remaining files and directories. Otherwise,
+// [SkipAll], WalkDir skips all remaining files and directories. Otherwise,
 // if the function returns a non-nil error, WalkDir stops entirely and
 // returns that error.
 //
 // The err argument reports an error related to path, signaling that
-// WalkDir will not walk into that directory. The function can decide how
+// [WalkDir] will not walk into that directory. The function can decide how
 // to handle that error; as described earlier, returning the error will
 // cause WalkDir to stop walking the entire tree.
 //
-// WalkDir calls the function with a non-nil err argument in two cases.
+// [WalkDir] calls the function with a non-nil err argument in two cases.
 //
-// First, if the initial fs.Stat on the root directory fails, WalkDir
+// First, if the initial [Stat] on the root directory fails, WalkDir
 // calls the function with path set to root, d set to nil, and err set to
-// the error from fs.Stat.
+// the error from [fs.Stat].
 //
-// Second, if a directory's ReadDir method fails, WalkDir calls the
+// Second, if a directory's ReadDir method (see [ReadDirFile]) fails, WalkDir calls the
 // function with path set to the directory's path, d set to an
-// fs.DirEntry describing the directory, and err set to the error from
+// [DirEntry] describing the directory, and err set to the error from
 // ReadDir. In this second case, the function is called twice with the
 // path of the directory: the first call is before the directory read is
 // attempted and has err set to nil, giving the function a chance to
-// return SkipDir or SkipAll and avoid the ReadDir entirely. The second call
+// return [SkipDir] or [SkipAll] and avoid the ReadDir entirely. The second call
 // is after a failed ReadDir and reports the error from ReadDir.
 // (If ReadDir succeeds, there is no second call.)
 //
-// The differences between WalkDirFunc compared to filepath.WalkFunc are:
+// The differences between WalkDirFunc compared to [path/filepath.WalkFunc] are:
 //
-//   - The second argument has type fs.DirEntry instead of fs.FileInfo.
-//   - The function is called before reading a directory, to allow SkipDir
-//     or SkipAll to bypass the directory read entirely or skip all remaining
+//   - The second argument has type [DirEntry] instead of [FileInfo].
+//   - The function is called before reading a directory, to allow [SkipDir]
+//     or [SkipAll] to bypass the directory read entirely or skip all remaining
 //     files and directories respectively.
 //   - If a directory read fails, the function is called a second time
 //     for that directory to report the error.
@@ -106,7 +106,7 @@
 // directory in the tree, including root.
 //
 // All errors that arise visiting files and directories are filtered by fn:
-// see the fs.WalkDirFunc documentation for details.
+// see the [fs.WalkDirFunc] documentation for details.
 //
 // The files are walked in lexical order, which makes the output deterministic
 // but requires WalkDir to read an entire directory into memory before proceeding
@@ -119,23 +119,10 @@
 	if err != nil {
 		err = fn(root, nil, err)
 	} else {
-		err = walkDir(fsys, root, &statDirEntry{info}, fn)
+		err = walkDir(fsys, root, FileInfoToDirEntry(info), fn)
 	}
 	if err == SkipDir || err == SkipAll {
 		return nil
 	}
 	return err
 }
-
-type statDirEntry struct {
-	info FileInfo
-}
-
-func (d *statDirEntry) Name() string            { return d.info.Name() }
-func (d *statDirEntry) IsDir() bool             { return d.info.IsDir() }
-func (d *statDirEntry) Type() FileMode          { return d.info.Mode().Type() }
-func (d *statDirEntry) Info() (FileInfo, error) { return d.info, nil }
-
-func (d *statDirEntry) String() string {
-	return FormatDirEntry(d)
-}
diff --git a/src/io/io.go b/src/io/io.go
index 01f36e0..7f16e18 100644
--- a/src/io/io.go
+++ b/src/io/io.go
@@ -39,7 +39,7 @@
 // because callers will test for EOF using ==.)
 // Functions should return EOF only to signal a graceful end of input.
 // If the EOF occurs unexpectedly in a structured data stream,
-// the appropriate error is either ErrUnexpectedEOF or some other error
+// the appropriate error is either [ErrUnexpectedEOF] or some other error
 // giving more detail.
 var EOF = errors.New("EOF")
 
@@ -47,9 +47,9 @@
 // middle of reading a fixed-size block or data structure.
 var ErrUnexpectedEOF = errors.New("unexpected EOF")
 
-// ErrNoProgress is returned by some clients of a Reader when
+// ErrNoProgress is returned by some clients of a [Reader] when
 // many calls to Read have failed to return any data or error,
-// usually the sign of a broken Reader implementation.
+// usually the sign of a broken [Reader] implementation.
 var ErrNoProgress = errors.New("multiple Read calls return no data or error")
 
 // Reader is the interface that wraps the basic Read method.
@@ -112,9 +112,9 @@
 //
 // Seek sets the offset for the next Read or Write to offset,
 // interpreted according to whence:
-// SeekStart means relative to the start of the file,
-// SeekCurrent means relative to the current offset, and
-// SeekEnd means relative to the end
+// [SeekStart] means relative to the start of the file,
+// [SeekCurrent] means relative to the current offset, and
+// [SeekEnd] means relative to the end
 // (for example, offset = -2 specifies the penultimate byte of the file).
 // Seek returns the new offset relative to the start of the
 // file or an error, if any.
@@ -185,7 +185,7 @@
 // The return value n is the number of bytes read.
 // Any error except EOF encountered during the read is also returned.
 //
-// The Copy function uses ReaderFrom if available.
+// The [Copy] function uses [ReaderFrom] if available.
 type ReaderFrom interface {
 	ReadFrom(r Reader) (n int64, err error)
 }
@@ -257,7 +257,7 @@
 // byte was consumed, and the returned byte value is undefined.
 //
 // ReadByte provides an efficient interface for byte-at-time
-// processing. A Reader that does not implement  ByteReader
+// processing. A [Reader] that does not implement  ByteReader
 // can be wrapped using bufio.NewReader to add this method.
 type ByteReader interface {
 	ReadByte() (byte, error)
@@ -269,7 +269,7 @@
 // UnreadByte causes the next call to ReadByte to return the last byte read.
 // If the last operation was not a successful call to ReadByte, UnreadByte may
 // return an error, unread the last byte read (or the byte prior to the
-// last-unread byte), or (in implementations that support the Seeker interface)
+// last-unread byte), or (in implementations that support the [Seeker] interface)
 // seek to one byte before the current offset.
 type ByteScanner interface {
 	ByteReader
@@ -296,7 +296,7 @@
 // UnreadRune causes the next call to ReadRune to return the last rune read.
 // If the last operation was not a successful call to ReadRune, UnreadRune may
 // return an error, unread the last rune read (or the rune prior to the
-// last-unread rune), or (in implementations that support the Seeker interface)
+// last-unread rune), or (in implementations that support the [Seeker] interface)
 // seek to the start of the rune before the current offset.
 type RuneScanner interface {
 	RuneReader
@@ -309,8 +309,8 @@
 }
 
 // WriteString writes the contents of the string s to w, which accepts a slice of bytes.
-// If w implements StringWriter, its WriteString method is invoked directly.
-// Otherwise, w.Write is called exactly once.
+// If w implements [StringWriter], [StringWriter.WriteString] is invoked directly.
+// Otherwise, [Writer.Write] is called exactly once.
 func WriteString(w Writer, s string) (n int, err error) {
 	if sw, ok := w.(StringWriter); ok {
 		return sw.WriteString(s)
@@ -322,8 +322,8 @@
 // It returns the number of bytes copied and an error if fewer bytes were read.
 // The error is EOF only if no bytes were read.
 // If an EOF happens after reading fewer than min bytes,
-// ReadAtLeast returns ErrUnexpectedEOF.
-// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer.
+// ReadAtLeast returns [ErrUnexpectedEOF].
+// If min is greater than the length of buf, ReadAtLeast returns [ErrShortBuffer].
 // On return, n >= min if and only if err == nil.
 // If r returns an error having read at least min bytes, the error is dropped.
 func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error) {
@@ -347,7 +347,7 @@
 // It returns the number of bytes copied and an error if fewer bytes were read.
 // The error is EOF only if no bytes were read.
 // If an EOF happens after reading some but not all the bytes,
-// ReadFull returns ErrUnexpectedEOF.
+// ReadFull returns [ErrUnexpectedEOF].
 // On return, n == len(buf) if and only if err == nil.
 // If r returns an error having read at least len(buf) bytes, the error is dropped.
 func ReadFull(r Reader, buf []byte) (n int, err error) {
@@ -359,8 +359,7 @@
 // error encountered while copying.
 // On return, written == n if and only if err == nil.
 //
-// If dst implements the ReaderFrom interface,
-// the copy is implemented using it.
+// If dst implements [ReaderFrom], the copy is implemented using it.
 func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
 	written, err = Copy(dst, LimitReader(src, n))
 	if written == n {
@@ -381,9 +380,9 @@
 // Because Copy is defined to read from src until EOF, it does
 // not treat an EOF from Read as an error to be reported.
 //
-// If src implements the WriterTo interface,
+// If src implements [WriterTo],
 // the copy is implemented by calling src.WriteTo(dst).
-// Otherwise, if dst implements the ReaderFrom interface,
+// Otherwise, if dst implements [ReaderFrom],
 // the copy is implemented by calling dst.ReadFrom(src).
 func Copy(dst Writer, src Reader) (written int64, err error) {
 	return copyBuffer(dst, src, nil)
@@ -394,7 +393,7 @@
 // temporary one. If buf is nil, one is allocated; otherwise if it has
 // zero length, CopyBuffer panics.
 //
-// If either src implements WriterTo or dst implements ReaderFrom,
+// If either src implements [WriterTo] or dst implements [ReaderFrom],
 // buf will not be used to perform the copy.
 func CopyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
 	if buf != nil && len(buf) == 0 {
@@ -482,7 +481,7 @@
 	return
 }
 
-// NewSectionReader returns a SectionReader that reads from r
+// NewSectionReader returns a [SectionReader] that reads from r
 // starting at offset off and stops with EOF after n bytes.
 func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
 	var remaining int64
@@ -494,16 +493,17 @@
 		// Assume we can read up to an offset of 1<<63 - 1.
 		remaining = maxint64
 	}
-	return &SectionReader{r, off, off, remaining}
+	return &SectionReader{r, off, off, remaining, n}
 }
 
 // SectionReader implements Read, Seek, and ReadAt on a section
-// of an underlying ReaderAt.
+// of an underlying [ReaderAt].
 type SectionReader struct {
-	r     ReaderAt
-	base  int64
+	r     ReaderAt // constant after creation
+	base  int64    // constant after creation
 	off   int64
-	limit int64
+	limit int64 // constant after creation
+	n     int64 // constant after creation
 }
 
 func (s *SectionReader) Read(p []byte) (n int, err error) {
@@ -540,7 +540,7 @@
 }
 
 func (s *SectionReader) ReadAt(p []byte, off int64) (n int, err error) {
-	if off < 0 || off >= s.limit-s.base {
+	if off < 0 || off >= s.Size() {
 		return 0, EOF
 	}
 	off += s.base
@@ -558,6 +558,14 @@
 // Size returns the size of the section in bytes.
 func (s *SectionReader) Size() int64 { return s.limit - s.base }
 
+// Outer returns the underlying [ReaderAt] and offsets for the section.
+//
+// The returned values are the same that were passed to [NewSectionReader]
+// when the [SectionReader] was created.
+func (s *SectionReader) Outer() (r ReaderAt, off int64, n int64) {
+	return s.r, s.base, s.n
+}
+
 // An OffsetWriter maps writes at offset base to offset base+off in the underlying writer.
 type OffsetWriter struct {
 	w    WriterAt
@@ -565,7 +573,7 @@
 	off  int64 // the current offset
 }
 
-// NewOffsetWriter returns an OffsetWriter that writes to w
+// NewOffsetWriter returns an [OffsetWriter] that writes to w
 // starting at offset off.
 func NewOffsetWriter(w WriterAt, off int64) *OffsetWriter {
 	return &OffsetWriter{w, off, off}
@@ -602,7 +610,7 @@
 	return offset - o.base, nil
 }
 
-// TeeReader returns a Reader that writes to w what it reads from r.
+// TeeReader returns a [Reader] that writes to w what it reads from r.
 // All reads from r performed through it are matched with
 // corresponding writes to w. There is no internal buffering -
 // the write must complete before the read completes.
@@ -626,7 +634,7 @@
 	return
 }
 
-// Discard is a Writer on which all Write calls succeed
+// Discard is a [Writer] on which all Write calls succeed
 // without doing anything.
 var Discard Writer = discard{}
 
@@ -667,9 +675,9 @@
 	}
 }
 
-// NopCloser returns a ReadCloser with a no-op Close method wrapping
-// the provided Reader r.
-// If r implements WriterTo, the returned ReadCloser will implement WriterTo
+// NopCloser returns a [ReadCloser] with a no-op Close method wrapping
+// the provided [Reader] r.
+// If r implements [WriterTo], the returned [ReadCloser] will implement [WriterTo]
 // by forwarding calls to r.
 func NopCloser(r Reader) ReadCloser {
 	if _, ok := r.(WriterTo); ok {
diff --git a/src/io/io_test.go b/src/io/io_test.go
index c09b5e3..9491ffa 100644
--- a/src/io/io_test.go
+++ b/src/io/io_test.go
@@ -384,6 +384,9 @@
 		if n, err := s.ReadAt(buf, int64(tt.at)); n != len(tt.exp) || string(buf[:n]) != tt.exp || err != tt.err {
 			t.Fatalf("%d: ReadAt(%d) = %q, %v; expected %q, %v", i, tt.at, buf[:n], err, tt.exp, tt.err)
 		}
+		if _r, off, n := s.Outer(); _r != r || off != int64(tt.off) || n != int64(tt.n) {
+			t.Fatalf("%d: Outer() = %v, %d, %d; expected %v, %d, %d", i, _r, off, n, r, tt.off, tt.n)
+		}
 	}
 }
 
@@ -445,6 +448,9 @@
 	if n != 0 || err != EOF {
 		t.Errorf("Read = %v, %v, want 0, EOF", n, err)
 	}
+	if _r, off, n := sr.Outer(); _r != r || off != 3 || n != maxint64 {
+		t.Fatalf("Outer = %v, %d, %d; expected %v, %d, %d", _r, off, n, r, 3, int64(maxint64))
+	}
 }
 
 // largeWriter returns an invalid count that is larger than the number
diff --git a/src/io/ioutil/tempfile.go b/src/io/ioutil/tempfile.go
index 5360d96..47b2e40 100644
--- a/src/io/ioutil/tempfile.go
+++ b/src/io/ioutil/tempfile.go
@@ -9,12 +9,12 @@
 )
 
 // TempFile creates a new temporary file in the directory dir,
-// opens the file for reading and writing, and returns the resulting *os.File.
+// opens the file for reading and writing, and returns the resulting *[os.File].
 // The filename is generated by taking pattern and adding a random
 // string to the end. If pattern includes a "*", the random string
 // replaces the last "*".
 // If dir is the empty string, TempFile uses the default directory
-// for temporary files (see os.TempDir).
+// for temporary files (see [os.TempDir]).
 // Multiple programs calling TempFile simultaneously
 // will not choose the same file. The caller can use f.Name()
 // to find the pathname of the file. It is the caller's responsibility
@@ -30,7 +30,7 @@
 // random string to the end. If pattern includes a "*", the random string
 // replaces the last "*". TempDir returns the name of the new directory.
 // If dir is the empty string, TempDir uses the
-// default directory for temporary files (see os.TempDir).
+// default directory for temporary files (see [os.TempDir]).
 // Multiple programs calling TempDir simultaneously
 // will not choose the same directory. It is the caller's responsibility
 // to remove the directory when no longer needed.
diff --git a/src/io/pipe.go b/src/io/pipe.go
index 2724e3f..f34cf25 100644
--- a/src/io/pipe.go
+++ b/src/io/pipe.go
@@ -123,9 +123,7 @@
 }
 
 // A PipeReader is the read half of a pipe.
-type PipeReader struct {
-	p *pipe
-}
+type PipeReader struct{ pipe }
 
 // Read implements the standard Read interface:
 // it reads data from the pipe, blocking until a writer
@@ -133,11 +131,11 @@
 // If the write end is closed with an error, that error is
 // returned as err; otherwise err is EOF.
 func (r *PipeReader) Read(data []byte) (n int, err error) {
-	return r.p.read(data)
+	return r.pipe.read(data)
 }
 
 // Close closes the reader; subsequent writes to the
-// write half of the pipe will return the error ErrClosedPipe.
+// write half of the pipe will return the error [ErrClosedPipe].
 func (r *PipeReader) Close() error {
 	return r.CloseWithError(nil)
 }
@@ -148,21 +146,19 @@
 // CloseWithError never overwrites the previous error if it exists
 // and always returns nil.
 func (r *PipeReader) CloseWithError(err error) error {
-	return r.p.closeRead(err)
+	return r.pipe.closeRead(err)
 }
 
 // A PipeWriter is the write half of a pipe.
-type PipeWriter struct {
-	p *pipe
-}
+type PipeWriter struct{ r PipeReader }
 
 // Write implements the standard Write interface:
 // it writes data to the pipe, blocking until one or more readers
 // have consumed all the data or the read end is closed.
 // If the read end is closed with an error, that err is
-// returned as err; otherwise err is ErrClosedPipe.
+// returned as err; otherwise err is [ErrClosedPipe].
 func (w *PipeWriter) Write(data []byte) (n int, err error) {
-	return w.p.write(data)
+	return w.r.pipe.write(data)
 }
 
 // Close closes the writer; subsequent reads from the
@@ -178,17 +174,17 @@
 // CloseWithError never overwrites the previous error if it exists
 // and always returns nil.
 func (w *PipeWriter) CloseWithError(err error) error {
-	return w.p.closeWrite(err)
+	return w.r.pipe.closeWrite(err)
 }
 
 // Pipe creates a synchronous in-memory pipe.
-// It can be used to connect code expecting an io.Reader
-// with code expecting an io.Writer.
+// It can be used to connect code expecting an [io.Reader]
+// with code expecting an [io.Writer].
 //
 // Reads and Writes on the pipe are matched one to one
 // except when multiple Reads are needed to consume a single Write.
-// That is, each Write to the PipeWriter blocks until it has satisfied
-// one or more Reads from the PipeReader that fully consume
+// That is, each Write to the [PipeWriter] blocks until it has satisfied
+// one or more Reads from the [PipeReader] that fully consume
 // the written data.
 // The data is copied directly from the Write to the corresponding
 // Read (or Reads); there is no internal buffering.
@@ -197,10 +193,10 @@
 // Parallel calls to Read and parallel calls to Write are also safe:
 // the individual calls will be gated sequentially.
 func Pipe() (*PipeReader, *PipeWriter) {
-	p := &pipe{
+	pw := &PipeWriter{r: PipeReader{pipe: pipe{
 		wrCh: make(chan []byte),
 		rdCh: make(chan int),
 		done: make(chan struct{}),
-	}
-	return &PipeReader{p}, &PipeWriter{p}
+	}}}
+	return &pw.r, pw
 }
diff --git a/src/iter/iter.go b/src/iter/iter.go
new file mode 100644
index 0000000..40e4770
--- /dev/null
+++ b/src/iter/iter.go
@@ -0,0 +1,169 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+// Package iter provides basic definitions and operations
+// related to iteration in Go.
+//
+// This package is experimental and can only be imported
+// when building with GOEXPERIMENT=rangefunc.
+package iter
+
+import (
+	"internal/race"
+	"unsafe"
+)
+
+// Seq is an iterator over sequences of individual values.
+// When called as seq(yield), seq calls yield(v) for each value v in the sequence,
+// stopping early if yield returns false.
+type Seq[V any] func(yield func(V) bool)
+
+// Seq2 is an iterator over sequences of pairs of values, most commonly key-value pairs.
+// When called as seq(yield), seq calls yield(k, v) for each pair (k, v) in the sequence,
+// stopping early if yield returns false.
+type Seq2[K, V any] func(yield func(K, V) bool)
+
+type coro struct{}
+
+//go:linkname newcoro runtime.newcoro
+func newcoro(func(*coro)) *coro
+
+//go:linkname coroswitch runtime.coroswitch
+func coroswitch(*coro)
+
+// Pull converts the “push-style” iterator sequence seq
+// into a “pull-style” iterator accessed by the two functions
+// next and stop.
+//
+// Next returns the next value in the sequence
+// and a boolean indicating whether the value is valid.
+// When the sequence is over, next returns the zero V and false.
+// It is valid to call next after reaching the end of the sequence
+// or after calling stop. These calls will continue
+// to return the zero V and false.
+//
+// Stop ends the iteration. It must be called when the caller is
+// no longer interested in next values and next has not yet
+// signaled that the sequence is over (with a false boolean return).
+// It is valid to call stop multiple times and when next has
+// already returned false.
+//
+// It is an error to call next or stop from multiple goroutines
+// simultaneously.
+func Pull[V any](seq Seq[V]) (next func() (V, bool), stop func()) {
+	var (
+		v     V
+		ok    bool
+		done  bool
+		racer int
+	)
+	c := newcoro(func(c *coro) {
+		race.Acquire(unsafe.Pointer(&racer))
+		yield := func(v1 V) bool {
+			if done {
+				return false
+			}
+			v, ok = v1, true
+			race.Release(unsafe.Pointer(&racer))
+			coroswitch(c)
+			race.Acquire(unsafe.Pointer(&racer))
+			return !done
+		}
+		seq(yield)
+		var v0 V
+		v, ok = v0, false
+		done = true
+		race.Release(unsafe.Pointer(&racer))
+	})
+	next = func() (v1 V, ok1 bool) {
+		race.Write(unsafe.Pointer(&racer)) // detect races
+		if done {
+			return
+		}
+		race.Release(unsafe.Pointer(&racer))
+		coroswitch(c)
+		race.Acquire(unsafe.Pointer(&racer))
+		return v, ok
+	}
+	stop = func() {
+		race.Write(unsafe.Pointer(&racer)) // detect races
+		if !done {
+			done = true
+			race.Release(unsafe.Pointer(&racer))
+			coroswitch(c)
+			race.Acquire(unsafe.Pointer(&racer))
+		}
+	}
+	return next, stop
+}
+
+// Pull2 converts the “push-style” iterator sequence seq
+// into a “pull-style” iterator accessed by the two functions
+// next and stop.
+//
+// Next returns the next pair in the sequence
+// and a boolean indicating whether the pair is valid.
+// When the sequence is over, next returns a pair of zero values and false.
+// It is valid to call next after reaching the end of the sequence
+// or after calling stop. These calls will continue
+// to return a pair of zero values and false.
+//
+// Stop ends the iteration. It must be called when the caller is
+// no longer interested in next values and next has not yet
+// signaled that the sequence is over (with a false boolean return).
+// It is valid to call stop multiple times and when next has
+// already returned false.
+//
+// It is an error to call next or stop from multiple goroutines
+// simultaneously.
+func Pull2[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func()) {
+	var (
+		k     K
+		v     V
+		ok    bool
+		done  bool
+		racer int
+	)
+	c := newcoro(func(c *coro) {
+		race.Acquire(unsafe.Pointer(&racer))
+		yield := func(k1 K, v1 V) bool {
+			if done {
+				return false
+			}
+			k, v, ok = k1, v1, true
+			race.Release(unsafe.Pointer(&racer))
+			coroswitch(c)
+			race.Acquire(unsafe.Pointer(&racer))
+			return !done
+		}
+		seq(yield)
+		var k0 K
+		var v0 V
+		k, v, ok = k0, v0, false
+		done = true
+		race.Release(unsafe.Pointer(&racer))
+	})
+	next = func() (k1 K, v1 V, ok1 bool) {
+		race.Write(unsafe.Pointer(&racer)) // detect races
+		if done {
+			return
+		}
+		race.Release(unsafe.Pointer(&racer))
+		coroswitch(c)
+		race.Acquire(unsafe.Pointer(&racer))
+		return k, v, ok
+	}
+	stop = func() {
+		race.Write(unsafe.Pointer(&racer)) // detect races
+		if !done {
+			done = true
+			race.Release(unsafe.Pointer(&racer))
+			coroswitch(c)
+			race.Acquire(unsafe.Pointer(&racer))
+		}
+	}
+	return next, stop
+}
diff --git a/src/iter/pull_test.go b/src/iter/pull_test.go
new file mode 100644
index 0000000..38e0ee9
--- /dev/null
+++ b/src/iter/pull_test.go
@@ -0,0 +1,118 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+package iter
+
+import (
+	"fmt"
+	"runtime"
+	"testing"
+)
+
+func count(n int) Seq[int] {
+	return func(yield func(int) bool) {
+		for i := range n {
+			if !yield(i) {
+				break
+			}
+		}
+	}
+}
+
+func squares(n int) Seq2[int, int64] {
+	return func(yield func(int, int64) bool) {
+		for i := range n {
+			if !yield(i, int64(i)*int64(i)) {
+				break
+			}
+		}
+	}
+}
+
+func TestPull(t *testing.T) {
+
+	for end := 0; end <= 3; end++ {
+		t.Run(fmt.Sprint(end), func(t *testing.T) {
+			ng := runtime.NumGoroutine()
+			wantNG := func(want int) {
+				if xg := runtime.NumGoroutine() - ng; xg != want {
+					t.Helper()
+					t.Errorf("have %d extra goroutines, want %d", xg, want)
+				}
+			}
+			wantNG(0)
+			next, stop := Pull(count(3))
+			wantNG(1)
+			for i := range end {
+				v, ok := next()
+				if v != i || ok != true {
+					t.Fatalf("next() = %d, %v, want %d, %v", v, ok, i, true)
+				}
+				wantNG(1)
+			}
+			wantNG(1)
+			if end < 3 {
+				stop()
+				wantNG(0)
+			}
+			for range 2 {
+				v, ok := next()
+				if v != 0 || ok != false {
+					t.Fatalf("next() = %d, %v, want %d, %v", v, ok, 0, false)
+				}
+				wantNG(0)
+			}
+			wantNG(0)
+
+			stop()
+			stop()
+			stop()
+			wantNG(0)
+		})
+	}
+}
+
+func TestPull2(t *testing.T) {
+	for end := 0; end <= 3; end++ {
+		t.Run(fmt.Sprint(end), func(t *testing.T) {
+			ng := runtime.NumGoroutine()
+			wantNG := func(want int) {
+				if xg := runtime.NumGoroutine() - ng; xg != want {
+					t.Helper()
+					t.Errorf("have %d extra goroutines, want %d", xg, want)
+				}
+			}
+			wantNG(0)
+			next, stop := Pull2(squares(3))
+			wantNG(1)
+			for i := range end {
+				k, v, ok := next()
+				if k != i || v != int64(i*i) || ok != true {
+					t.Fatalf("next() = %d, %d, %v, want %d, %d, %v", k, v, ok, i, i*i, true)
+				}
+				wantNG(1)
+			}
+			wantNG(1)
+			if end < 3 {
+				stop()
+				wantNG(0)
+			}
+			for range 2 {
+				k, v, ok := next()
+				if v != 0 || ok != false {
+					t.Fatalf("next() = %d, %d, %v, want %d, %d, %v", k, v, ok, 0, 0, false)
+				}
+				wantNG(0)
+			}
+			wantNG(0)
+
+			stop()
+			stop()
+			stop()
+			wantNG(0)
+		})
+	}
+}
diff --git a/src/log/log.go b/src/log/log.go
index 9d5440e..d4c9c13 100644
--- a/src/log/log.go
+++ b/src/log/log.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package log implements a simple logging package. It defines a type, Logger,
+// Package log implements a simple logging package. It defines a type, [Logger],
 // with methods for formatting output. It also has a predefined 'standard'
 // Logger accessible through helper functions Print[f|ln], Fatal[f|ln], and
 // Panic[f|ln], which are easier to use than creating a Logger manually.
@@ -10,7 +10,7 @@
 // of each logged message.
 // Every log message is output on a separate line: if the message being
 // printed does not end in a newline, the logger will add one.
-// The Fatal functions call os.Exit(1) after writing the log message.
+// The Fatal functions call [os.Exit](1) after writing the log message.
 // The Panic functions call panic after writing the log message.
 package log
 
@@ -25,7 +25,7 @@
 	"time"
 )
 
-// These flags define which text to prefix to each log entry generated by the Logger.
+// These flags define which text to prefix to each log entry generated by the [Logger].
 // Bits are or'ed together to control what's printed.
 // With the exception of the Lmsgprefix flag, there is no
 // control over the order they appear (the order listed here)
@@ -51,7 +51,7 @@
 )
 
 // A Logger represents an active logging object that generates lines of
-// output to an io.Writer. Each logging operation makes a single call to
+// output to an [io.Writer]. Each logging operation makes a single call to
 // the Writer's Write method. A Logger can be used simultaneously from
 // multiple goroutines; it guarantees to serialize access to the Writer.
 type Logger struct {
@@ -63,10 +63,10 @@
 	isDiscard atomic.Bool
 }
 
-// New creates a new Logger. The out variable sets the
+// New creates a new [Logger]. The out variable sets the
 // destination to which log data will be written.
 // The prefix appears at the beginning of each generated log line, or
-// after the log header if the Lmsgprefix flag is provided.
+// after the log header if the [Lmsgprefix] flag is provided.
 // The flag argument defines the logging properties.
 func New(out io.Writer, prefix string, flag int) *Logger {
 	l := new(Logger)
@@ -255,7 +255,7 @@
 }
 
 // Print calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Print.
+// Arguments are handled in the manner of [fmt.Print].
 func (l *Logger) Print(v ...any) {
 	l.output(0, 2, func(b []byte) []byte {
 		return fmt.Append(b, v...)
@@ -263,7 +263,7 @@
 }
 
 // Printf calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Printf.
+// Arguments are handled in the manner of [fmt.Printf].
 func (l *Logger) Printf(format string, v ...any) {
 	l.output(0, 2, func(b []byte) []byte {
 		return fmt.Appendf(b, format, v...)
@@ -271,26 +271,26 @@
 }
 
 // Println calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Println.
+// Arguments are handled in the manner of [fmt.Println].
 func (l *Logger) Println(v ...any) {
 	l.output(0, 2, func(b []byte) []byte {
 		return fmt.Appendln(b, v...)
 	})
 }
 
-// Fatal is equivalent to l.Print() followed by a call to os.Exit(1).
+// Fatal is equivalent to l.Print() followed by a call to [os.Exit](1).
 func (l *Logger) Fatal(v ...any) {
 	l.Output(2, fmt.Sprint(v...))
 	os.Exit(1)
 }
 
-// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
+// Fatalf is equivalent to l.Printf() followed by a call to [os.Exit](1).
 func (l *Logger) Fatalf(format string, v ...any) {
 	l.Output(2, fmt.Sprintf(format, v...))
 	os.Exit(1)
 }
 
-// Fatalln is equivalent to l.Println() followed by a call to os.Exit(1).
+// Fatalln is equivalent to l.Println() followed by a call to [os.Exit](1).
 func (l *Logger) Fatalln(v ...any) {
 	l.Output(2, fmt.Sprintln(v...))
 	os.Exit(1)
@@ -318,13 +318,13 @@
 }
 
 // Flags returns the output flags for the logger.
-// The flag bits are Ldate, Ltime, and so on.
+// The flag bits are [Ldate], [Ltime], and so on.
 func (l *Logger) Flags() int {
 	return int(l.flag.Load())
 }
 
 // SetFlags sets the output flags for the logger.
-// The flag bits are Ldate, Ltime, and so on.
+// The flag bits are [Ldate], [Ltime], and so on.
 func (l *Logger) SetFlags(flag int) {
 	l.flag.Store(int32(flag))
 }
@@ -355,13 +355,13 @@
 }
 
 // Flags returns the output flags for the standard logger.
-// The flag bits are Ldate, Ltime, and so on.
+// The flag bits are [Ldate], [Ltime], and so on.
 func Flags() int {
 	return std.Flags()
 }
 
 // SetFlags sets the output flags for the standard logger.
-// The flag bits are Ldate, Ltime, and so on.
+// The flag bits are [Ldate], [Ltime], and so on.
 func SetFlags(flag int) {
 	std.SetFlags(flag)
 }
@@ -384,7 +384,7 @@
 // These functions write to the standard logger.
 
 // Print calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Print.
+// Arguments are handled in the manner of [fmt.Print].
 func Print(v ...any) {
 	std.output(0, 2, func(b []byte) []byte {
 		return fmt.Append(b, v...)
@@ -392,7 +392,7 @@
 }
 
 // Printf calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Printf.
+// Arguments are handled in the manner of [fmt.Printf].
 func Printf(format string, v ...any) {
 	std.output(0, 2, func(b []byte) []byte {
 		return fmt.Appendf(b, format, v...)
@@ -400,46 +400,46 @@
 }
 
 // Println calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Println.
+// Arguments are handled in the manner of [fmt.Println].
 func Println(v ...any) {
 	std.output(0, 2, func(b []byte) []byte {
 		return fmt.Appendln(b, v...)
 	})
 }
 
-// Fatal is equivalent to Print() followed by a call to os.Exit(1).
+// Fatal is equivalent to [Print] followed by a call to [os.Exit](1).
 func Fatal(v ...any) {
 	std.Output(2, fmt.Sprint(v...))
 	os.Exit(1)
 }
 
-// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
+// Fatalf is equivalent to [Printf] followed by a call to [os.Exit](1).
 func Fatalf(format string, v ...any) {
 	std.Output(2, fmt.Sprintf(format, v...))
 	os.Exit(1)
 }
 
-// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
+// Fatalln is equivalent to [Println] followed by a call to [os.Exit](1).
 func Fatalln(v ...any) {
 	std.Output(2, fmt.Sprintln(v...))
 	os.Exit(1)
 }
 
-// Panic is equivalent to Print() followed by a call to panic().
+// Panic is equivalent to [Print] followed by a call to panic().
 func Panic(v ...any) {
 	s := fmt.Sprint(v...)
 	std.Output(2, s)
 	panic(s)
 }
 
-// Panicf is equivalent to Printf() followed by a call to panic().
+// Panicf is equivalent to [Printf] followed by a call to panic().
 func Panicf(format string, v ...any) {
 	s := fmt.Sprintf(format, v...)
 	std.Output(2, s)
 	panic(s)
 }
 
-// Panicln is equivalent to Println() followed by a call to panic().
+// Panicln is equivalent to [Println] followed by a call to panic().
 func Panicln(v ...any) {
 	s := fmt.Sprintln(v...)
 	std.Output(2, s)
@@ -451,7 +451,7 @@
 // Logger. A newline is appended if the last character of s is not
 // already a newline. Calldepth is the count of the number of
 // frames to skip when computing the file name and line number
-// if Llongfile or Lshortfile is set; a value of 1 will print the details
+// if [Llongfile] or [Lshortfile] is set; a value of 1 will print the details
 // for the caller of Output.
 func Output(calldepth int, s string) error {
 	return std.Output(calldepth+1, s) // +1 for this frame.
diff --git a/src/log/slog/attr.go b/src/log/slog/attr.go
index 90e343b..2f45946 100644
--- a/src/log/slog/attr.go
+++ b/src/log/slog/attr.go
@@ -46,18 +46,18 @@
 	return Attr{key, BoolValue(v)}
 }
 
-// Time returns an Attr for a time.Time.
+// Time returns an Attr for a [time.Time].
 // It discards the monotonic portion.
 func Time(key string, v time.Time) Attr {
 	return Attr{key, TimeValue(v)}
 }
 
-// Duration returns an Attr for a time.Duration.
+// Duration returns an Attr for a [time.Duration].
 func Duration(key string, v time.Duration) Attr {
 	return Attr{key, DurationValue(v)}
 }
 
-// Group returns an Attr for a Group Value.
+// Group returns an Attr for a Group [Value].
 // The first argument is the key; the remaining arguments
 // are converted to Attrs as in [Logger.Log].
 //
diff --git a/src/log/slog/doc.go b/src/log/slog/doc.go
index 088df61..0015593 100644
--- a/src/log/slog/doc.go
+++ b/src/log/slog/doc.go
@@ -41,7 +41,7 @@
 	2022/11/08 15:28:26 INFO hello count=3
 
 For more control over the output format, create a logger with a different handler.
-This statement uses [New] to create a new logger with a TextHandler
+This statement uses [New] to create a new logger with a [TextHandler]
 that writes structured records in text form to standard error:
 
 	logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
@@ -210,7 +210,7 @@
 
 is the most efficient way to achieve the same output as
 
-	slog.Info("hello", "count", 3)
+	slog.InfoContext(ctx, "hello", "count", 3)
 
 # Customizing a type's logging behavior
 
@@ -222,7 +222,7 @@
 
 A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
 method handles these cases carefully, avoiding infinite loops and unbounded recursion.
-Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly.
+Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly.
 
 # Wrapping output methods
 
@@ -231,8 +231,8 @@
 incorrect source information for functions that wrap slog. For instance, if you
 define this function in file mylog.go:
 
-	func Infof(format string, args ...any) {
-	    slog.Default().Info(fmt.Sprintf(format, args...))
+	func Infof(logger *slog.Logger, format string, args ...any) {
+	    logger.Info(fmt.Sprintf(format, args...))
 	}
 
 and you call it like this in main.go:
diff --git a/src/log/slog/example_log_level_test.go b/src/log/slog/example_log_level_test.go
new file mode 100644
index 0000000..ca8db416
--- /dev/null
+++ b/src/log/slog/example_log_level_test.go
@@ -0,0 +1,58 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slog_test
+
+import (
+	"log"
+	"log/slog"
+	"log/slog/internal/slogtest"
+	"os"
+)
+
+// This example shows how to use slog.SetLogLoggerLevel to change the minimal level
+// of the internal default handler for slog package before calling slog.SetDefault.
+func ExampleSetLogLoggerLevel_log() {
+	defer log.SetFlags(log.Flags()) // revert changes after the example
+	log.SetFlags(0)
+	defer log.SetOutput(log.Writer()) // revert changes after the example
+	log.SetOutput(os.Stdout)
+
+	// Default logging level is slog.LevelInfo.
+	log.Print("log debug") // log debug
+	slog.Debug("debug")    // no output
+	slog.Info("info")      // INFO info
+
+	// Set the default logging level to slog.LevelDebug.
+	currentLogLevel := slog.SetLogLoggerLevel(slog.LevelDebug)
+	defer slog.SetLogLoggerLevel(currentLogLevel) // revert changes after the example
+
+	log.Print("log debug") // log debug
+	slog.Debug("debug")    // DEBUG debug
+	slog.Info("info")      // INFO info
+
+	// Output:
+	// log debug
+	// INFO info
+	// log debug
+	// DEBUG debug
+	// INFO info
+}
+
+// This example shows how to use slog.SetLogLoggerLevel to change the minimal level
+// of the internal writer that uses the custom handler for log package after
+// calling slog.SetDefault.
+func ExampleSetLogLoggerLevel_slog() {
+	// Set the default logging level to slog.LevelError.
+	currentLogLevel := slog.SetLogLoggerLevel(slog.LevelError)
+	defer slog.SetLogLoggerLevel(currentLogLevel) // revert changes after the example
+
+	defer slog.SetDefault(slog.Default()) // revert changes after the example
+	slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: slogtest.RemoveTime})))
+
+	log.Print("error") // level=ERROR msg=error
+
+	// Output:
+	// level=ERROR msg=error
+}
diff --git a/src/log/slog/example_test.go b/src/log/slog/example_test.go
index a677456..b03cc01 100644
--- a/src/log/slog/example_test.go
+++ b/src/log/slog/example_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"log/slog"
-	"log/slog/internal/slogtest"
 	"net/http"
 	"os"
 	"time"
@@ -16,7 +15,16 @@
 	r, _ := http.NewRequest("GET", "localhost", nil)
 	// ...
 
-	logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: slogtest.RemoveTime}))
+	logger := slog.New(
+		slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
+			ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
+				if a.Key == slog.TimeKey && len(groups) == 0 {
+					return slog.Attr{}
+				}
+				return a
+			},
+		}),
+	)
 	logger.Info("finished",
 		slog.Group("req",
 			slog.String("method", r.Method),
diff --git a/src/log/slog/handler.go b/src/log/slog/handler.go
index a73983c..2ff85b5 100644
--- a/src/log/slog/handler.go
+++ b/src/log/slog/handler.go
@@ -9,13 +9,14 @@
 	"fmt"
 	"io"
 	"log/slog/internal/buffer"
+	"reflect"
 	"slices"
 	"strconv"
 	"sync"
 	"time"
 )
 
-// A Handler handles log records produced by a Logger..
+// A Handler handles log records produced by a Logger.
 //
 // A typical handler may print log records to standard error,
 // or write them to a file or database, or perhaps augment them
@@ -75,11 +76,11 @@
 	// A Handler should treat WithGroup as starting a Group of Attrs that ends
 	// at the end of the log event. That is,
 	//
-	//     logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2))
+	//     logger.WithGroup("s").LogAttrs(ctx, level, msg, slog.Int("a", 1), slog.Int("b", 2))
 	//
 	// should behave like
 	//
-	//     logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2)))
+	//     logger.LogAttrs(ctx, level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2)))
 	//
 	// If the name is empty, WithGroup returns the receiver.
 	WithGroup(name string) Handler
@@ -99,7 +100,7 @@
 }
 
 func (*defaultHandler) Enabled(_ context.Context, l Level) bool {
-	return l >= LevelInfo
+	return l >= logLoggerLevel.Level()
 }
 
 // Collect the level, attributes and message in a string and
@@ -124,7 +125,7 @@
 	return &defaultHandler{h.ch.withGroup(name), h.output}
 }
 
-// HandlerOptions are options for a TextHandler or JSONHandler.
+// HandlerOptions are options for a [TextHandler] or [JSONHandler].
 // A zero HandlerOptions consists entirely of default values.
 type HandlerOptions struct {
 	// AddSource causes the handler to compute the source code position
@@ -178,7 +179,7 @@
 	// message of the log call. The associated value is a string.
 	MessageKey = "msg"
 	// SourceKey is the key used by the built-in handlers for the source file
-	// and line of the log call. The associated value is a string.
+	// and line of the log call. The associated value is a *[Source].
 	SourceKey = "source"
 )
 
@@ -232,18 +233,24 @@
 	state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "")
 	defer state.free()
 	state.prefix.WriteString(h.groupPrefix)
-	if len(h2.preformattedAttrs) > 0 {
+	if pfa := h2.preformattedAttrs; len(pfa) > 0 {
 		state.sep = h.attrSep()
+		if h2.json && pfa[len(pfa)-1] == '{' {
+			state.sep = ""
+		}
 	}
+	// Remember the position in the buffer, in case all attrs are empty.
+	pos := state.buf.Len()
 	state.openGroups()
-	for _, a := range as {
-		state.appendAttr(a)
+	if !state.appendAttrs(as) {
+		state.buf.SetLen(pos)
+	} else {
+		// Remember the new prefix for later keys.
+		h2.groupPrefix = state.prefix.String()
+		// Remember how many opened groups are in preformattedAttrs,
+		// so we don't open them again when we handle a Record.
+		h2.nOpenGroups = len(h2.groups)
 	}
-	// Remember the new prefix for later keys.
-	h2.groupPrefix = state.prefix.String()
-	// Remember how many opened groups are in preformattedAttrs,
-	// so we don't open them again when we handle a Record.
-	h2.nOpenGroups = len(h2.groups)
 	return h2
 }
 
@@ -309,10 +316,13 @@
 
 func (s *handleState) appendNonBuiltIns(r Record) {
 	// preformatted Attrs
-	if len(s.h.preformattedAttrs) > 0 {
+	if pfa := s.h.preformattedAttrs; len(pfa) > 0 {
 		s.buf.WriteString(s.sep)
-		s.buf.Write(s.h.preformattedAttrs)
+		s.buf.Write(pfa)
 		s.sep = s.h.attrSep()
+		if s.h.json && pfa[len(pfa)-1] == '{' {
+			s.sep = ""
+		}
 	}
 	// Attrs in Record -- unlike the built-in ones, they are in groups started
 	// from WithGroup.
@@ -320,12 +330,24 @@
 	nOpenGroups := s.h.nOpenGroups
 	if r.NumAttrs() > 0 {
 		s.prefix.WriteString(s.h.groupPrefix)
+		// The group may turn out to be empty even though it has attrs (for
+		// example, ReplaceAttr may delete all the attrs).
+		// So remember where we are in the buffer, to restore the position
+		// later if necessary.
+		pos := s.buf.Len()
 		s.openGroups()
 		nOpenGroups = len(s.h.groups)
+		empty := true
 		r.Attrs(func(a Attr) bool {
-			s.appendAttr(a)
+			if s.appendAttr(a) {
+				empty = false
+			}
 			return true
 		})
+		if empty {
+			s.buf.SetLen(pos)
+			nOpenGroups = s.h.nOpenGroups
+		}
 	}
 	if s.h.json {
 		// Close all open groups.
@@ -427,23 +449,36 @@
 	}
 }
 
-// appendAttr appends the Attr's key and value using app.
+// appendAttrs appends the slice of Attrs.
+// It reports whether something was appended.
+func (s *handleState) appendAttrs(as []Attr) bool {
+	nonEmpty := false
+	for _, a := range as {
+		if s.appendAttr(a) {
+			nonEmpty = true
+		}
+	}
+	return nonEmpty
+}
+
+// appendAttr appends the Attr's key and value.
 // It handles replacement and checking for an empty key.
-// after replacement).
-func (s *handleState) appendAttr(a Attr) {
+// It reports whether something was appended.
+func (s *handleState) appendAttr(a Attr) bool {
+	a.Value = a.Value.Resolve()
 	if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup {
 		var gs []string
 		if s.groups != nil {
 			gs = *s.groups
 		}
-		// Resolve before calling ReplaceAttr, so the user doesn't have to.
-		a.Value = a.Value.Resolve()
+		// a.Value is resolved before calling ReplaceAttr, so the user doesn't have to.
 		a = rep(gs, a)
+		// The ReplaceAttr function may return an unresolved Attr.
+		a.Value = a.Value.Resolve()
 	}
-	a.Value = a.Value.Resolve()
 	// Elide empty Attrs.
 	if a.isEmpty() {
-		return
+		return false
 	}
 	// Special case: Source.
 	if v := a.Value; v.Kind() == KindAny {
@@ -459,12 +494,18 @@
 		attrs := a.Value.Group()
 		// Output only non-empty groups.
 		if len(attrs) > 0 {
+			// The group may turn out to be empty even though it has attrs (for
+			// example, ReplaceAttr may delete all the attrs).
+			// So remember where we are in the buffer, to restore the position
+			// later if necessary.
+			pos := s.buf.Len()
 			// Inline a group with an empty key.
 			if a.Key != "" {
 				s.openGroup(a.Key)
 			}
-			for _, aa := range attrs {
-				s.appendAttr(aa)
+			if !s.appendAttrs(attrs) {
+				s.buf.SetLen(pos)
+				return false
 			}
 			if a.Key != "" {
 				s.closeGroup(a.Key)
@@ -474,6 +515,7 @@
 		s.appendKey(a.Key)
 		s.appendValue(a.Value)
 	}
+	return true
 }
 
 func (s *handleState) appendError(err error) {
@@ -512,6 +554,23 @@
 }
 
 func (s *handleState) appendValue(v Value) {
+	defer func() {
+		if r := recover(); r != nil {
+			// If it panics with a nil pointer, the most likely cases are
+			// an encoding.TextMarshaler or error fails to guard against nil,
+			// in which case "<nil>" seems to be the feasible choice.
+			//
+			// Adapted from the code in fmt/print.go.
+			if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() {
+				s.appendString("<nil>")
+				return
+			}
+
+			// Otherwise just print the original panic message.
+			s.appendString(fmt.Sprintf("!PANIC: %v", r))
+		}
+	}()
+
 	var err error
 	if s.h.json {
 		err = appendJSONValue(s, v)
@@ -527,41 +586,19 @@
 	if s.h.json {
 		appendJSONTime(s, t)
 	} else {
-		writeTimeRFC3339Millis(s.buf, t)
+		*s.buf = appendRFC3339Millis(*s.buf, t)
 	}
 }
 
-// This takes half the time of Time.AppendFormat.
-func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) {
-	year, month, day := t.Date()
-	buf.WritePosIntWidth(year, 4)
-	buf.WriteByte('-')
-	buf.WritePosIntWidth(int(month), 2)
-	buf.WriteByte('-')
-	buf.WritePosIntWidth(day, 2)
-	buf.WriteByte('T')
-	hour, min, sec := t.Clock()
-	buf.WritePosIntWidth(hour, 2)
-	buf.WriteByte(':')
-	buf.WritePosIntWidth(min, 2)
-	buf.WriteByte(':')
-	buf.WritePosIntWidth(sec, 2)
-	ns := t.Nanosecond()
-	buf.WriteByte('.')
-	buf.WritePosIntWidth(ns/1e6, 3)
-	_, offsetSeconds := t.Zone()
-	if offsetSeconds == 0 {
-		buf.WriteByte('Z')
-	} else {
-		offsetMinutes := offsetSeconds / 60
-		if offsetMinutes < 0 {
-			buf.WriteByte('-')
-			offsetMinutes = -offsetMinutes
-		} else {
-			buf.WriteByte('+')
-		}
-		buf.WritePosIntWidth(offsetMinutes/60, 2)
-		buf.WriteByte(':')
-		buf.WritePosIntWidth(offsetMinutes%60, 2)
-	}
+func appendRFC3339Millis(b []byte, t time.Time) []byte {
+	// Format according to time.RFC3339Nano since it is highly optimized,
+	// but truncate it to use millisecond resolution.
+	// Unfortunately, that format trims trailing 0s, so add 1/10 millisecond
+	// to guarantee that there are exactly 4 digits after the period.
+	const prefixLen = len("2006-01-02T15:04:05.000")
+	n := len(b)
+	t = t.Truncate(time.Millisecond).Add(time.Millisecond / 10)
+	b = t.AppendFormat(b, time.RFC3339Nano)
+	b = append(b[:n+prefixLen], b[n+prefixLen+1:]...) // drop the 4th digit
+	return b
 }
diff --git a/src/log/slog/handler_test.go b/src/log/slog/handler_test.go
index 4f10ee5..8ce3452 100644
--- a/src/log/slog/handler_test.go
+++ b/src/log/slog/handler_test.go
@@ -11,7 +11,6 @@
 	"context"
 	"encoding/json"
 	"io"
-	"log/slog/internal/buffer"
 	"path/filepath"
 	"slices"
 	"strconv"
@@ -436,6 +435,99 @@
 			wantText: `time.mins=3 time.secs=2 msg=message`,
 			wantJSON: `{"time":{"mins":3,"secs":2},"msg":"message"}`,
 		},
+		{
+			name:     "replace empty",
+			replace:  func([]string, Attr) Attr { return Attr{} },
+			attrs:    []Attr{Group("g", Int("a", 1))},
+			wantText: "",
+			wantJSON: `{}`,
+		},
+		{
+			name: "replace empty 1",
+			with: func(h Handler) Handler {
+				return h.WithGroup("g").WithAttrs([]Attr{Int("a", 1)})
+			},
+			replace:  func([]string, Attr) Attr { return Attr{} },
+			attrs:    []Attr{Group("h", Int("b", 2))},
+			wantText: "",
+			wantJSON: `{}`,
+		},
+		{
+			name: "replace empty 2",
+			with: func(h Handler) Handler {
+				return h.WithGroup("g").WithAttrs([]Attr{Int("a", 1)}).WithGroup("h").WithAttrs([]Attr{Int("b", 2)})
+			},
+			replace:  func([]string, Attr) Attr { return Attr{} },
+			attrs:    []Attr{Group("i", Int("c", 3))},
+			wantText: "",
+			wantJSON: `{}`,
+		},
+		{
+			name:     "replace empty 3",
+			with:     func(h Handler) Handler { return h.WithGroup("g") },
+			replace:  func([]string, Attr) Attr { return Attr{} },
+			attrs:    []Attr{Int("a", 1)},
+			wantText: "",
+			wantJSON: `{}`,
+		},
+		{
+			name: "replace empty inline",
+			with: func(h Handler) Handler {
+				return h.WithGroup("g").WithAttrs([]Attr{Int("a", 1)}).WithGroup("h").WithAttrs([]Attr{Int("b", 2)})
+			},
+			replace:  func([]string, Attr) Attr { return Attr{} },
+			attrs:    []Attr{Group("", Int("c", 3))},
+			wantText: "",
+			wantJSON: `{}`,
+		},
+		{
+			name: "replace partial empty attrs 1",
+			with: func(h Handler) Handler {
+				return h.WithGroup("g").WithAttrs([]Attr{Int("a", 1)}).WithGroup("h").WithAttrs([]Attr{Int("b", 2)})
+			},
+			replace: func(groups []string, attr Attr) Attr {
+				return removeKeys(TimeKey, LevelKey, MessageKey, "a")(groups, attr)
+			},
+			attrs:    []Attr{Group("i", Int("c", 3))},
+			wantText: "g.h.b=2 g.h.i.c=3",
+			wantJSON: `{"g":{"h":{"b":2,"i":{"c":3}}}}`,
+		},
+		{
+			name: "replace partial empty attrs 2",
+			with: func(h Handler) Handler {
+				return h.WithGroup("g").WithAttrs([]Attr{Int("a", 1)}).WithAttrs([]Attr{Int("n", 4)}).WithGroup("h").WithAttrs([]Attr{Int("b", 2)})
+			},
+			replace: func(groups []string, attr Attr) Attr {
+				return removeKeys(TimeKey, LevelKey, MessageKey, "a", "b")(groups, attr)
+			},
+			attrs:    []Attr{Group("i", Int("c", 3))},
+			wantText: "g.n=4 g.h.i.c=3",
+			wantJSON: `{"g":{"n":4,"h":{"i":{"c":3}}}}`,
+		},
+		{
+			name: "replace partial empty attrs 3",
+			with: func(h Handler) Handler {
+				return h.WithGroup("g").WithAttrs([]Attr{Int("x", 0)}).WithAttrs([]Attr{Int("a", 1)}).WithAttrs([]Attr{Int("n", 4)}).WithGroup("h").WithAttrs([]Attr{Int("b", 2)})
+			},
+			replace: func(groups []string, attr Attr) Attr {
+				return removeKeys(TimeKey, LevelKey, MessageKey, "a", "c")(groups, attr)
+			},
+			attrs:    []Attr{Group("i", Int("c", 3))},
+			wantText: "g.x=0 g.n=4 g.h.b=2",
+			wantJSON: `{"g":{"x":0,"n":4,"h":{"b":2}}}`,
+		},
+		{
+			name: "replace resolved group",
+			replace: func(groups []string, a Attr) Attr {
+				if a.Value.Kind() == KindGroup {
+					return Attr{"bad", IntValue(1)}
+				}
+				return removeKeys(TimeKey, LevelKey, MessageKey)(groups, a)
+			},
+			attrs:    []Attr{Any("name", logValueName{"Perry", "Platypus"})},
+			wantText: "name.first=Perry name.last=Platypus",
+			wantJSON: `{"name":{"first":"Perry","last":"Platypus"}}`,
+		},
 	} {
 		r := NewRecord(testTime, LevelInfo, "message", callerPC(2))
 		line := strconv.Itoa(r.source().Line)
@@ -603,11 +695,8 @@
 		time.Date(2000, 1, 2, 3, 4, 5, 400, time.Local),
 		time.Date(2000, 11, 12, 3, 4, 500, 5e7, time.UTC),
 	} {
+		got := string(appendRFC3339Millis(nil, tm))
 		want := tm.Format(rfc3339Millis)
-		buf := buffer.New()
-		defer buf.Free()
-		writeTimeRFC3339Millis(buf, tm)
-		got := buf.String()
 		if got != want {
 			t.Errorf("got %s, want %s", got, want)
 		}
@@ -615,12 +704,10 @@
 }
 
 func BenchmarkWriteTime(b *testing.B) {
-	buf := buffer.New()
-	defer buf.Free()
 	tm := time.Date(2022, 3, 4, 5, 6, 7, 823456789, time.Local)
 	b.ResetTimer()
+	var buf []byte
 	for i := 0; i < b.N; i++ {
-		writeTimeRFC3339Millis(buf, tm)
-		buf.Reset()
+		buf = appendRFC3339Millis(buf[:0], tm)
 	}
 }
diff --git a/src/log/slog/internal/benchmarks/handlers_test.go b/src/log/slog/internal/benchmarks/handlers_test.go
index 6c00c80..0e0fbf1 100644
--- a/src/log/slog/internal/benchmarks/handlers_test.go
+++ b/src/log/slog/internal/benchmarks/handlers_test.go
@@ -1,3 +1,7 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package benchmarks
 
 import (
diff --git a/src/log/slog/internal/buffer/buffer.go b/src/log/slog/internal/buffer/buffer.go
index c4fcefd..310ec37 100644
--- a/src/log/slog/internal/buffer/buffer.go
+++ b/src/log/slog/internal/buffer/buffer.go
@@ -32,7 +32,7 @@
 }
 
 func (b *Buffer) Reset() {
-	*b = (*b)[:0]
+	b.SetLen(0)
 }
 
 func (b *Buffer) Write(p []byte) (int, error) {
@@ -50,35 +50,14 @@
 	return nil
 }
 
-func (b *Buffer) WritePosInt(i int) {
-	b.WritePosIntWidth(i, 0)
-}
-
-// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left
-// by zeroes to the given width. Use a width of 0 to omit padding.
-func (b *Buffer) WritePosIntWidth(i, width int) {
-	// Cheap integer to fixed-width decimal ASCII.
-	// Copied from log/log.go.
-
-	if i < 0 {
-		panic("negative int")
-	}
-
-	// Assemble decimal in reverse order.
-	var bb [20]byte
-	bp := len(bb) - 1
-	for i >= 10 || width > 1 {
-		width--
-		q := i / 10
-		bb[bp] = byte('0' + i - q*10)
-		bp--
-		i = q
-	}
-	// i < 10
-	bb[bp] = byte('0' + i)
-	b.Write(bb[bp:])
-}
-
 func (b *Buffer) String() string {
 	return string(*b)
 }
+
+func (b *Buffer) Len() int {
+	return len(*b)
+}
+
+func (b *Buffer) SetLen(n int) {
+	*b = (*b)[:n]
+}
diff --git a/src/log/slog/internal/buffer/buffer_test.go b/src/log/slog/internal/buffer/buffer_test.go
index 40b1d1f..06f8284 100644
--- a/src/log/slog/internal/buffer/buffer_test.go
+++ b/src/log/slog/internal/buffer/buffer_test.go
@@ -16,10 +16,9 @@
 	b.WriteString("hello")
 	b.WriteByte(',')
 	b.Write([]byte(" world"))
-	b.WritePosIntWidth(17, 4)
 
 	got := b.String()
-	want := "hello, world0017"
+	want := "hello, world"
 	if got != want {
 		t.Errorf("got %q, want %q", got, want)
 	}
diff --git a/src/log/slog/json_handler.go b/src/log/slog/json_handler.go
index 1c51ab0..da3eae1 100644
--- a/src/log/slog/json_handler.go
+++ b/src/log/slog/json_handler.go
@@ -18,13 +18,13 @@
 	"unicode/utf8"
 )
 
-// JSONHandler is a Handler that writes Records to an io.Writer as
+// JSONHandler is a [Handler] that writes Records to an [io.Writer] as
 // line-delimited JSON objects.
 type JSONHandler struct {
 	*commonHandler
 }
 
-// NewJSONHandler creates a JSONHandler that writes to w,
+// NewJSONHandler creates a [JSONHandler] that writes to w,
 // using the given options.
 // If opts is nil, the default options are used.
 func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
@@ -47,7 +47,7 @@
 	return h.commonHandler.enabled(level)
 }
 
-// WithAttrs returns a new JSONHandler whose attributes consists
+// WithAttrs returns a new [JSONHandler] whose attributes consists
 // of h's attributes followed by attrs.
 func (h *JSONHandler) WithAttrs(attrs []Attr) Handler {
 	return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
@@ -57,7 +57,7 @@
 	return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)}
 }
 
-// Handle formats its argument Record as a JSON object on a single line.
+// Handle formats its argument [Record] as a JSON object on a single line.
 //
 // If the Record's time is zero, the time is omitted.
 // Otherwise, the key is "time"
@@ -81,7 +81,7 @@
 // First, an Attr whose Value is of type error is formatted as a string, by
 // calling its Error method. Only errors in Attrs receive this special treatment,
 // not errors embedded in structs, slices, maps or other data structures that
-// are processed by the encoding/json package.
+// are processed by the [encoding/json] package.
 //
 // Second, an encoding failure does not cause Handle to return an error.
 // Instead, the error message is formatted as a string.
@@ -226,7 +226,7 @@
 	return buf
 }
 
-var hex = "0123456789abcdef"
+const hex = "0123456789abcdef"
 
 // Copied from encoding/json/tables.go.
 //
diff --git a/src/log/slog/level.go b/src/log/slog/level.go
index cd1213a..7cddf4c 100644
--- a/src/log/slog/level.go
+++ b/src/log/slog/level.go
@@ -16,6 +16,8 @@
 // The higher the level, the more important or severe the event.
 type Level int
 
+// Names for common levels.
+//
 // Level numbers are inherently arbitrary,
 // but we picked them to satisfy three constraints.
 // Any system can map them to another numbering scheme if it wishes.
@@ -38,8 +40,6 @@
 // Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
 // does not. But those OpenTelemetry levels can still be represented as slog
 // Levels by using the appropriate integers.
-//
-// Names for common levels.
 const (
 	LevelDebug Level = -4
 	LevelInfo  Level = 0
@@ -146,14 +146,14 @@
 }
 
 // Level returns the receiver.
-// It implements Leveler.
+// It implements [Leveler].
 func (l Level) Level() Level { return l }
 
-// A LevelVar is a Level variable, to allow a Handler level to change
+// A LevelVar is a [Level] variable, to allow a [Handler] level to change
 // dynamically.
-// It implements Leveler as well as a Set method,
+// It implements [Leveler] as well as a Set method,
 // and it is safe for use by multiple goroutines.
-// The zero LevelVar corresponds to LevelInfo.
+// The zero LevelVar corresponds to [LevelInfo].
 type LevelVar struct {
 	val atomic.Int64
 }
@@ -189,12 +189,12 @@
 	return nil
 }
 
-// A Leveler provides a Level value.
+// A Leveler provides a [Level] value.
 //
 // As Level itself implements Leveler, clients typically supply
-// a Level value wherever a Leveler is needed, such as in HandlerOptions.
+// a Level value wherever a Leveler is needed, such as in [HandlerOptions].
 // Clients who need to vary the level dynamically can provide a more complex
-// Leveler implementation such as *LevelVar.
+// Leveler implementation such as *[LevelVar].
 type Leveler interface {
 	Level() Level
 }
diff --git a/src/log/slog/logger.go b/src/log/slog/logger.go
index a068085..10aa6a2 100644
--- a/src/log/slog/logger.go
+++ b/src/log/slog/logger.go
@@ -14,18 +14,50 @@
 	"time"
 )
 
-var defaultLogger atomic.Value
+var defaultLogger atomic.Pointer[Logger]
+
+var logLoggerLevel LevelVar
+
+// SetLogLoggerLevel controls the level for the bridge to the [log] package.
+//
+// Before [SetDefault] is called, slog top-level logging functions call the default [log.Logger].
+// In that mode, SetLogLoggerLevel sets the minimum level for those calls.
+// By default, the minimum level is Info, so calls to [Debug]
+// (as well as top-level logging calls at lower levels)
+// will not be passed to the log.Logger. After calling
+//
+//	slog.SetLogLoggerLevel(slog.LevelDebug)
+//
+// calls to [Debug] will be passed to the log.Logger.
+//
+// After [SetDefault] is called, calls to the default [log.Logger] are passed to the
+// slog default handler. In that mode,
+// SetLogLoggerLevel sets the level at which those calls are logged.
+// That is, after calling
+//
+//	slog.SetLogLoggerLevel(slog.LevelDebug)
+//
+// A call to [log.Printf] will result in output at level [LevelDebug].
+//
+// SetLogLoggerLevel returns the previous value.
+func SetLogLoggerLevel(level Level) (oldLevel Level) {
+	oldLevel = logLoggerLevel.Level()
+	logLoggerLevel.Set(level)
+	return
+}
 
 func init() {
 	defaultLogger.Store(New(newDefaultHandler(loginternal.DefaultOutput)))
 }
 
-// Default returns the default Logger.
-func Default() *Logger { return defaultLogger.Load().(*Logger) }
+// Default returns the default [Logger].
+func Default() *Logger { return defaultLogger.Load() }
 
-// SetDefault makes l the default Logger.
+// SetDefault makes l the default [Logger], which is used by
+// the top-level functions [Info], [Debug] and so on.
 // After this call, output from the log package's default Logger
-// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
+// (as with [log.Print], etc.) will be logged using l's Handler,
+// at a level controlled by [SetLogLoggerLevel].
 func SetDefault(l *Logger) {
 	defaultLogger.Store(l)
 	// If the default's handler is a defaultHandler, then don't use a handleWriter,
@@ -36,7 +68,7 @@
 	// See TestSetDefault.
 	if _, ok := l.Handler().(*defaultHandler); !ok {
 		capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0
-		log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC})
+		log.SetOutput(&handlerWriter{l.Handler(), &logLoggerLevel, capturePC})
 		log.SetFlags(0) // we want just the log message, no time or location
 	}
 }
@@ -45,12 +77,13 @@
 // It is used to link the default log.Logger to the default slog.Logger.
 type handlerWriter struct {
 	h         Handler
-	level     Level
+	level     Leveler
 	capturePC bool
 }
 
 func (w *handlerWriter) Write(buf []byte) (int, error) {
-	if !w.h.Enabled(context.Background(), w.level) {
+	level := w.level.Level()
+	if !w.h.Enabled(context.Background(), level) {
 		return 0, nil
 	}
 	var pc uintptr
@@ -66,13 +99,13 @@
 	if len(buf) > 0 && buf[len(buf)-1] == '\n' {
 		buf = buf[:len(buf)-1]
 	}
-	r := NewRecord(time.Now(), w.level, string(buf), pc)
+	r := NewRecord(time.Now(), level, string(buf), pc)
 	return origLen, w.h.Handle(context.Background(), r)
 }
 
 // A Logger records structured information about each call to its
 // Log, Debug, Info, Warn, and Error methods.
-// For each call, it creates a Record and passes it to a Handler.
+// For each call, it creates a [Record] and passes it to a [Handler].
 //
 // To create a new Logger, call [New] or a Logger method
 // that begins "With".
@@ -113,7 +146,6 @@
 	c := l.clone()
 	c.handler = l.handler.WithGroup(name)
 	return c
-
 }
 
 // New creates a new Logger with the given non-nil Handler.
@@ -124,7 +156,7 @@
 	return &Logger{handler: h}
 }
 
-// With calls Logger.With on the default logger.
+// With calls [Logger.With] on the default logger.
 func With(args ...any) *Logger {
 	return Default().With(args...)
 }
@@ -137,7 +169,7 @@
 	return l.Handler().Enabled(ctx, level)
 }
 
-// NewLogLogger returns a new log.Logger such that each call to its Output method
+// NewLogLogger returns a new [log.Logger] such that each call to its Output method
 // dispatches a Record to the specified handler. The logger acts as a bridge from
 // the older log API to newer structured logging handlers.
 func NewLogLogger(h Handler, level Level) *log.Logger {
@@ -163,42 +195,42 @@
 	l.logAttrs(ctx, level, msg, attrs...)
 }
 
-// Debug logs at LevelDebug.
+// Debug logs at [LevelDebug].
 func (l *Logger) Debug(msg string, args ...any) {
 	l.log(context.Background(), LevelDebug, msg, args...)
 }
 
-// DebugContext logs at LevelDebug with the given context.
+// DebugContext logs at [LevelDebug] with the given context.
 func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) {
 	l.log(ctx, LevelDebug, msg, args...)
 }
 
-// Info logs at LevelInfo.
+// Info logs at [LevelInfo].
 func (l *Logger) Info(msg string, args ...any) {
 	l.log(context.Background(), LevelInfo, msg, args...)
 }
 
-// InfoContext logs at LevelInfo with the given context.
+// InfoContext logs at [LevelInfo] with the given context.
 func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) {
 	l.log(ctx, LevelInfo, msg, args...)
 }
 
-// Warn logs at LevelWarn.
+// Warn logs at [LevelWarn].
 func (l *Logger) Warn(msg string, args ...any) {
 	l.log(context.Background(), LevelWarn, msg, args...)
 }
 
-// WarnContext logs at LevelWarn with the given context.
+// WarnContext logs at [LevelWarn] with the given context.
 func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) {
 	l.log(ctx, LevelWarn, msg, args...)
 }
 
-// Error logs at LevelError.
+// Error logs at [LevelError].
 func (l *Logger) Error(msg string, args ...any) {
 	l.log(context.Background(), LevelError, msg, args...)
 }
 
-// ErrorContext logs at LevelError with the given context.
+// ErrorContext logs at [LevelError] with the given context.
 func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) {
 	l.log(ctx, LevelError, msg, args...)
 }
@@ -245,52 +277,52 @@
 	_ = l.Handler().Handle(ctx, r)
 }
 
-// Debug calls Logger.Debug on the default logger.
+// Debug calls [Logger.Debug] on the default logger.
 func Debug(msg string, args ...any) {
 	Default().log(context.Background(), LevelDebug, msg, args...)
 }
 
-// DebugContext calls Logger.DebugContext on the default logger.
+// DebugContext calls [Logger.DebugContext] on the default logger.
 func DebugContext(ctx context.Context, msg string, args ...any) {
 	Default().log(ctx, LevelDebug, msg, args...)
 }
 
-// Info calls Logger.Info on the default logger.
+// Info calls [Logger.Info] on the default logger.
 func Info(msg string, args ...any) {
 	Default().log(context.Background(), LevelInfo, msg, args...)
 }
 
-// InfoContext calls Logger.InfoContext on the default logger.
+// InfoContext calls [Logger.InfoContext] on the default logger.
 func InfoContext(ctx context.Context, msg string, args ...any) {
 	Default().log(ctx, LevelInfo, msg, args...)
 }
 
-// Warn calls Logger.Warn on the default logger.
+// Warn calls [Logger.Warn] on the default logger.
 func Warn(msg string, args ...any) {
 	Default().log(context.Background(), LevelWarn, msg, args...)
 }
 
-// WarnContext calls Logger.WarnContext on the default logger.
+// WarnContext calls [Logger.WarnContext] on the default logger.
 func WarnContext(ctx context.Context, msg string, args ...any) {
 	Default().log(ctx, LevelWarn, msg, args...)
 }
 
-// Error calls Logger.Error on the default logger.
+// Error calls [Logger.Error] on the default logger.
 func Error(msg string, args ...any) {
 	Default().log(context.Background(), LevelError, msg, args...)
 }
 
-// ErrorContext calls Logger.ErrorContext on the default logger.
+// ErrorContext calls [Logger.ErrorContext] on the default logger.
 func ErrorContext(ctx context.Context, msg string, args ...any) {
 	Default().log(ctx, LevelError, msg, args...)
 }
 
-// Log calls Logger.Log on the default logger.
+// Log calls [Logger.Log] on the default logger.
 func Log(ctx context.Context, level Level, msg string, args ...any) {
 	Default().log(ctx, level, msg, args...)
 }
 
-// LogAttrs calls Logger.LogAttrs on the default logger.
+// LogAttrs calls [Logger.LogAttrs] on the default logger.
 func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
 	Default().logAttrs(ctx, level, msg, attrs...)
 }
diff --git a/src/log/slog/logger_test.go b/src/log/slog/logger_test.go
index 2f5b319..bb1c8a1 100644
--- a/src/log/slog/logger_test.go
+++ b/src/log/slog/logger_test.go
@@ -22,7 +22,13 @@
 	"time"
 )
 
-const timeRE = `\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}(Z|[+-]\d{2}:\d{2})`
+// textTimeRE is a regexp to match log timestamps for Text handler.
+// This is RFC3339Nano with the fixed 3 digit sub-second precision.
+const textTimeRE = `\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}(Z|[+-]\d{2}:\d{2})`
+
+// jsonTimeRE is a regexp to match log timestamps for Text handler.
+// This is RFC3339Nano with an arbitrary sub-second precision.
+const jsonTimeRE = `\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[+-]\d{2}:\d{2})`
 
 func TestLogTextHandler(t *testing.T) {
 	ctx := context.Background()
@@ -33,7 +39,7 @@
 	check := func(want string) {
 		t.Helper()
 		if want != "" {
-			want = "time=" + timeRE + " " + want
+			want = "time=" + textTimeRE + " " + want
 		}
 		checkLogOutput(t, buf.String(), want)
 		buf.Reset()
@@ -72,9 +78,13 @@
 	// tests might change the default logger using SetDefault. Also ensure we
 	// restore the default logger at the end of the test.
 	currentLogger := Default()
+	currentLogWriter := log.Writer()
+	currentLogFlags := log.Flags()
 	SetDefault(New(newDefaultHandler(loginternal.DefaultOutput)))
 	t.Cleanup(func() {
 		SetDefault(currentLogger)
+		log.SetOutput(currentLogWriter)
+		log.SetFlags(currentLogFlags)
 	})
 
 	// The default slog.Logger's handler uses the log package's default output.
@@ -83,6 +93,13 @@
 	Info("msg", "a", 1)
 	checkLogOutput(t, logbuf.String(), `logger_test.go:\d+: INFO msg a=1`)
 	logbuf.Reset()
+	Info("msg", "p", nil)
+	checkLogOutput(t, logbuf.String(), `logger_test.go:\d+: INFO msg p=<nil>`)
+	logbuf.Reset()
+	var r *regexp.Regexp
+	Info("msg", "r", r)
+	checkLogOutput(t, logbuf.String(), `logger_test.go:\d+: INFO msg r=<nil>`)
+	logbuf.Reset()
 	Warn("msg", "b", 2)
 	checkLogOutput(t, logbuf.String(), `logger_test.go:\d+: WARN msg b=2`)
 	logbuf.Reset()
@@ -107,7 +124,7 @@
 	// log.Logger's output goes through the handler.
 	SetDefault(New(NewTextHandler(&slogbuf, &HandlerOptions{AddSource: true})))
 	log.Print("msg2")
-	checkLogOutput(t, slogbuf.String(), "time="+timeRE+` level=INFO source=.*logger_test.go:\d{3}"? msg=msg2`)
+	checkLogOutput(t, slogbuf.String(), "time="+textTimeRE+` level=INFO source=.*logger_test.go:\d{3}"? msg=msg2`)
 
 	// The default log.Logger always outputs at Info level.
 	slogbuf.Reset()
@@ -174,6 +191,7 @@
 		}
 	}
 
+	defer SetDefault(Default()) // restore
 	logger := New(h)
 	SetDefault(logger)
 
@@ -265,7 +283,7 @@
 		s := "abc"
 		i := 2000
 		d := time.Second
-		wantAllocs(t, 11, func() {
+		wantAllocs(t, 10, func() {
 			dl.Info("hello",
 				"n", i, "s", s, "d", d,
 				"n", i, "s", s, "d", d,
@@ -346,6 +364,71 @@
 	}
 }
 
+// Test defaultHandler minimum level without calling slog.SetDefault.
+func TestLogLoggerLevelForDefaultHandler(t *testing.T) {
+	// Revert any changes to the default logger, flags, and level of log and slog.
+	currentLogLoggerLevel := logLoggerLevel.Level()
+	currentLogWriter := log.Writer()
+	currentLogFlags := log.Flags()
+	t.Cleanup(func() {
+		logLoggerLevel.Set(currentLogLoggerLevel)
+		log.SetOutput(currentLogWriter)
+		log.SetFlags(currentLogFlags)
+	})
+
+	var logBuf bytes.Buffer
+	log.SetOutput(&logBuf)
+	log.SetFlags(0)
+
+	for _, test := range []struct {
+		logLevel Level
+		logFn    func(string, ...any)
+		want     string
+	}{
+		{LevelDebug, Debug, "DEBUG a"},
+		{LevelDebug, Info, "INFO a"},
+		{LevelInfo, Debug, ""},
+		{LevelInfo, Info, "INFO a"},
+	} {
+		SetLogLoggerLevel(test.logLevel)
+		test.logFn("a")
+		checkLogOutput(t, logBuf.String(), test.want)
+		logBuf.Reset()
+	}
+}
+
+// Test handlerWriter minimum level by calling slog.SetDefault.
+func TestLogLoggerLevelForHandlerWriter(t *testing.T) {
+	removeTime := func(_ []string, a Attr) Attr {
+		if a.Key == TimeKey {
+			return Attr{}
+		}
+		return a
+	}
+
+	// Revert any changes to the default logger. This is important because other
+	// tests might change the default logger using SetDefault. Also ensure we
+	// restore the default logger at the end of the test.
+	currentLogger := Default()
+	currentLogLoggerLevel := logLoggerLevel.Level()
+	currentLogWriter := log.Writer()
+	currentFlags := log.Flags()
+	t.Cleanup(func() {
+		SetDefault(currentLogger)
+		logLoggerLevel.Set(currentLogLoggerLevel)
+		log.SetOutput(currentLogWriter)
+		log.SetFlags(currentFlags)
+	})
+
+	var logBuf bytes.Buffer
+	log.SetOutput(&logBuf)
+	log.SetFlags(0)
+	SetLogLoggerLevel(LevelError)
+	SetDefault(New(NewTextHandler(&logBuf, &HandlerOptions{ReplaceAttr: removeTime})))
+	log.Print("error")
+	checkLogOutput(t, logBuf.String(), `level=ERROR msg=error`)
+}
+
 func TestLoggerError(t *testing.T) {
 	var buf bytes.Buffer
 
@@ -370,7 +453,7 @@
 	h := NewTextHandler(&buf, nil)
 	ll := NewLogLogger(h, LevelWarn)
 	ll.Print("hello")
-	checkLogOutput(t, buf.String(), "time="+timeRE+` level=WARN msg=hello`)
+	checkLogOutput(t, buf.String(), "time="+textTimeRE+` level=WARN msg=hello`)
 }
 
 func TestLoggerNoOps(t *testing.T) {
@@ -571,3 +654,64 @@
 		t.Errorf("got %d allocs, want %d", got, want)
 	}
 }
+
+// panicTextAndJsonMarshaler is a type that panics in MarshalText and MarshalJSON.
+type panicTextAndJsonMarshaler struct {
+	msg any
+}
+
+func (p panicTextAndJsonMarshaler) MarshalText() ([]byte, error) {
+	panic(p.msg)
+}
+
+func (p panicTextAndJsonMarshaler) MarshalJSON() ([]byte, error) {
+	panic(p.msg)
+}
+
+func TestPanics(t *testing.T) {
+	// Revert any changes to the default logger. This is important because other
+	// tests might change the default logger using SetDefault. Also ensure we
+	// restore the default logger at the end of the test.
+	currentLogger := Default()
+	currentLogWriter := log.Writer()
+	currentLogFlags := log.Flags()
+	t.Cleanup(func() {
+		SetDefault(currentLogger)
+		log.SetOutput(currentLogWriter)
+		log.SetFlags(currentLogFlags)
+	})
+
+	var logBuf bytes.Buffer
+	log.SetOutput(&logBuf)
+	log.SetFlags(log.Lshortfile &^ log.LstdFlags)
+
+	SetDefault(New(newDefaultHandler(loginternal.DefaultOutput)))
+	for _, pt := range []struct {
+		in  any
+		out string
+	}{
+		{(*panicTextAndJsonMarshaler)(nil), `logger_test.go:\d+: INFO msg p=<nil>`},
+		{panicTextAndJsonMarshaler{io.ErrUnexpectedEOF}, `logger_test.go:\d+: INFO msg p="!PANIC: unexpected EOF"`},
+		{panicTextAndJsonMarshaler{"panicking"}, `logger_test.go:\d+: INFO msg p="!PANIC: panicking"`},
+		{panicTextAndJsonMarshaler{42}, `logger_test.go:\d+: INFO msg p="!PANIC: 42"`},
+	} {
+		Info("msg", "p", pt.in)
+		checkLogOutput(t, logBuf.String(), pt.out)
+		logBuf.Reset()
+	}
+
+	SetDefault(New(NewJSONHandler(&logBuf, nil)))
+	for _, pt := range []struct {
+		in  any
+		out string
+	}{
+		{(*panicTextAndJsonMarshaler)(nil), `{"time":"` + jsonTimeRE + `","level":"INFO","msg":"msg","p":null}`},
+		{panicTextAndJsonMarshaler{io.ErrUnexpectedEOF}, `{"time":"` + jsonTimeRE + `","level":"INFO","msg":"msg","p":"!PANIC: unexpected EOF"}`},
+		{panicTextAndJsonMarshaler{"panicking"}, `{"time":"` + jsonTimeRE + `","level":"INFO","msg":"msg","p":"!PANIC: panicking"}`},
+		{panicTextAndJsonMarshaler{42}, `{"time":"` + jsonTimeRE + `","level":"INFO","msg":"msg","p":"!PANIC: 42"}`},
+	} {
+		Info("msg", "p", pt.in)
+		checkLogOutput(t, logBuf.String(), pt.out)
+		logBuf.Reset()
+	}
+}
diff --git a/src/log/slog/record.go b/src/log/slog/record.go
index 67b76f3..97c8701 100644
--- a/src/log/slog/record.go
+++ b/src/log/slog/record.go
@@ -50,7 +50,7 @@
 	back []Attr
 }
 
-// NewRecord creates a Record from the given arguments.
+// NewRecord creates a [Record] from the given arguments.
 // Use [Record.AddAttrs] to add attributes to the Record.
 //
 // NewRecord is intended for logging APIs that want to support a [Handler] as
@@ -72,12 +72,12 @@
 	return r
 }
 
-// NumAttrs returns the number of attributes in the Record.
+// NumAttrs returns the number of attributes in the [Record].
 func (r Record) NumAttrs() int {
 	return r.nFront + len(r.back)
 }
 
-// Attrs calls f on each Attr in the Record.
+// Attrs calls f on each Attr in the [Record].
 // Iteration stops if f returns false.
 func (r Record) Attrs(f func(Attr) bool) {
 	for i := 0; i < r.nFront; i++ {
@@ -92,7 +92,7 @@
 	}
 }
 
-// AddAttrs appends the given Attrs to the Record's list of Attrs.
+// AddAttrs appends the given Attrs to the [Record]'s list of Attrs.
 // It omits empty groups.
 func (r *Record) AddAttrs(attrs ...Attr) {
 	var i int
@@ -109,7 +109,9 @@
 	if cap(r.back) > len(r.back) {
 		end := r.back[:len(r.back)+1][len(r.back)]
 		if !end.isEmpty() {
-			panic("copies of a slog.Record were both modified")
+			// Don't panic; copy and muddle through.
+			r.back = slices.Clip(r.back)
+			r.back = append(r.back, String("!BUG", "AddAttrs unsafely called on copy of Record made without using Record.Clone"))
 		}
 	}
 	ne := countEmptyGroups(attrs[i:])
@@ -122,7 +124,7 @@
 }
 
 // Add converts the args to Attrs as described in [Logger.Log],
-// then appends the Attrs to the Record's list of Attrs.
+// then appends the Attrs to the [Record]'s list of Attrs.
 // It omits empty groups.
 func (r *Record) Add(args ...any) {
 	var a Attr
@@ -136,12 +138,11 @@
 			r.nFront++
 		} else {
 			if r.back == nil {
-				r.back = make([]Attr, 0, countAttrs(args))
+				r.back = make([]Attr, 0, countAttrs(args)+1)
 			}
 			r.back = append(r.back, a)
 		}
 	}
-
 }
 
 // countAttrs returns the number of Attrs that would be created from args.
@@ -192,7 +193,7 @@
 	Line int    `json:"line"`
 }
 
-// attrs returns the non-zero fields of s as a slice of attrs.
+// group returns the non-zero fields of s as a slice of attrs.
 // It is similar to a LogValue method, but we don't want Source
 // to implement LogValuer because it would be resolved before
 // the ReplaceAttr function was called.
diff --git a/src/log/slog/record_test.go b/src/log/slog/record_test.go
index 15d9330..931ab66 100644
--- a/src/log/slog/record_test.go
+++ b/src/log/slog/record_test.go
@@ -96,12 +96,15 @@
 	r1.back = b
 	// Make a copy that shares state.
 	r2 := r1
-	// Adding to both should panic.
+	// Adding to both should insert a special Attr in the second.
+	r1AttrsBefore := attrsSlice(r1)
 	r1.AddAttrs(Int("p", 0))
-	if !panics(func() { r2.AddAttrs(Int("p", 1)) }) {
-		t.Error("expected panic")
-	}
+	r2.AddAttrs(Int("p", 1))
+	check(r1, append(slices.Clip(r1AttrsBefore), Int("p", 0)))
 	r1Attrs := attrsSlice(r1)
+	check(r2, append(slices.Clip(r1AttrsBefore),
+		String("!BUG", "AddAttrs unsafely called on copy of Record made without using Record.Clone"), Int("p", 1)))
+
 	// Adding to a clone is fine.
 	r2 = r1.Clone()
 	check(r2, r1Attrs)
diff --git a/src/log/slog/text_handler.go b/src/log/slog/text_handler.go
index 58edb2f6..6819e63 100644
--- a/src/log/slog/text_handler.go
+++ b/src/log/slog/text_handler.go
@@ -16,13 +16,13 @@
 	"unicode/utf8"
 )
 
-// TextHandler is a Handler that writes Records to an io.Writer as a
+// TextHandler is a [Handler] that writes Records to an [io.Writer] as a
 // sequence of key=value pairs separated by spaces and followed by a newline.
 type TextHandler struct {
 	*commonHandler
 }
 
-// NewTextHandler creates a TextHandler that writes to w,
+// NewTextHandler creates a [TextHandler] that writes to w,
 // using the given options.
 // If opts is nil, the default options are used.
 func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
@@ -45,7 +45,7 @@
 	return h.commonHandler.enabled(level)
 }
 
-// WithAttrs returns a new TextHandler whose attributes consists
+// WithAttrs returns a new [TextHandler] whose attributes consists
 // of h's attributes followed by attrs.
 func (h *TextHandler) WithAttrs(attrs []Attr) Handler {
 	return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
@@ -55,7 +55,7 @@
 	return &TextHandler{commonHandler: h.commonHandler.withGroup(name)}
 }
 
-// Handle formats its argument Record as a single line of space-separated
+// Handle formats its argument [Record] as a single line of space-separated
 // key=value items.
 //
 // If the Record's time is zero, the time is omitted.
@@ -75,7 +75,7 @@
 // [HandlerOptions.ReplaceAttr].
 //
 // If a value implements [encoding.TextMarshaler], the result of MarshalText is
-// written. Otherwise, the result of fmt.Sprint is written.
+// written. Otherwise, the result of [fmt.Sprint] is written.
 //
 // Keys and values are quoted with [strconv.Quote] if they contain Unicode space
 // characters, non-printing characters, '"' or '='.
diff --git a/src/log/slog/value.go b/src/log/slog/value.go
index 224848f..d278d9b 100644
--- a/src/log/slog/value.go
+++ b/src/log/slog/value.go
@@ -40,7 +40,7 @@
 	groupptr  *Attr // used in Value.any when the Value is a []Attr
 )
 
-// Kind is the kind of a Value.
+// Kind is the kind of a [Value].
 type Kind int
 
 // The following list is sorted alphabetically, but it's also important that
@@ -105,32 +105,32 @@
 
 //////////////// Constructors
 
-// StringValue returns a new Value for a string.
+// StringValue returns a new [Value] for a string.
 func StringValue(value string) Value {
 	return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))}
 }
 
-// IntValue returns a Value for an int.
+// IntValue returns a [Value] for an int.
 func IntValue(v int) Value {
 	return Int64Value(int64(v))
 }
 
-// Int64Value returns a Value for an int64.
+// Int64Value returns a [Value] for an int64.
 func Int64Value(v int64) Value {
 	return Value{num: uint64(v), any: KindInt64}
 }
 
-// Uint64Value returns a Value for a uint64.
+// Uint64Value returns a [Value] for a uint64.
 func Uint64Value(v uint64) Value {
 	return Value{num: v, any: KindUint64}
 }
 
-// Float64Value returns a Value for a floating-point number.
+// Float64Value returns a [Value] for a floating-point number.
 func Float64Value(v float64) Value {
 	return Value{num: math.Float64bits(v), any: KindFloat64}
 }
 
-// BoolValue returns a Value for a bool.
+// BoolValue returns a [Value] for a bool.
 func BoolValue(v bool) Value {
 	u := uint64(0)
 	if v {
@@ -143,7 +143,7 @@
 // Values. (No user-provided value has this type.)
 type timeLocation *time.Location
 
-// TimeValue returns a Value for a time.Time.
+// TimeValue returns a [Value] for a [time.Time].
 // It discards the monotonic portion.
 func TimeValue(v time.Time) Value {
 	if v.IsZero() {
@@ -156,12 +156,12 @@
 	return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())}
 }
 
-// DurationValue returns a Value for a time.Duration.
+// DurationValue returns a [Value] for a [time.Duration].
 func DurationValue(v time.Duration) Value {
 	return Value{num: uint64(v.Nanoseconds()), any: KindDuration}
 }
 
-// GroupValue returns a new Value for a list of Attrs.
+// GroupValue returns a new [Value] for a list of Attrs.
 // The caller must not subsequently mutate the argument slice.
 func GroupValue(as ...Attr) Value {
 	// Remove empty groups.
@@ -190,21 +190,21 @@
 	return n
 }
 
-// AnyValue returns a Value for the supplied value.
+// AnyValue returns a [Value] for the supplied value.
 //
 // If the supplied value is of type Value, it is returned
 // unmodified.
 //
 // Given a value of one of Go's predeclared string, bool, or
 // (non-complex) numeric types, AnyValue returns a Value of kind
-// String, Bool, Uint64, Int64, or Float64. The width of the
-// original numeric type is not preserved.
+// [KindString], [KindBool], [KindUint64], [KindInt64], or [KindFloat64].
+// The width of the original numeric type is not preserved.
 //
-// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
-// KindTime or KindDuration. The monotonic time is not preserved.
+// Given a [time.Time] or [time.Duration] value, AnyValue returns a Value of kind
+// [KindTime] or [KindDuration]. The monotonic time is not preserved.
 //
 // For nil, or values of all other types, including named types whose
-// underlying type is numeric, AnyValue returns a value of kind KindAny.
+// underlying type is numeric, AnyValue returns a value of kind [KindAny].
 func AnyValue(v any) Value {
 	switch v := v.(type) {
 	case string:
@@ -285,7 +285,7 @@
 	}
 }
 
-// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
+// String returns Value's value as a string, formatted like [fmt.Sprint]. Unlike
 // the methods Int64, Float64, and so on, which panic if v is of the
 // wrong kind, String never panics.
 func (v Value) String() string {
@@ -327,22 +327,22 @@
 	return v.bool()
 }
 
-func (a Value) bool() bool {
-	return a.num == 1
+func (v Value) bool() bool {
+	return v.num == 1
 }
 
-// Duration returns v's value as a time.Duration. It panics
+// Duration returns v's value as a [time.Duration]. It panics
 // if v is not a time.Duration.
-func (a Value) Duration() time.Duration {
-	if g, w := a.Kind(), KindDuration; g != w {
+func (v Value) Duration() time.Duration {
+	if g, w := v.Kind(), KindDuration; g != w {
 		panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
 	}
 
-	return a.duration()
+	return v.duration()
 }
 
-func (a Value) duration() time.Duration {
-	return time.Duration(int64(a.num))
+func (v Value) duration() time.Duration {
+	return time.Duration(int64(v.num))
 }
 
 // Float64 returns v's value as a float64. It panics
@@ -355,11 +355,11 @@
 	return v.float()
 }
 
-func (a Value) float() float64 {
-	return math.Float64frombits(a.num)
+func (v Value) float() float64 {
+	return math.Float64frombits(v.num)
 }
 
-// Time returns v's value as a time.Time. It panics
+// Time returns v's value as a [time.Time]. It panics
 // if v is not a time.Time.
 func (v Value) Time() time.Time {
 	if g, w := v.Kind(), KindTime; g != w {
@@ -383,7 +383,7 @@
 }
 
 // Group returns v's value as a []Attr.
-// It panics if v's Kind is not KindGroup.
+// It panics if v's [Kind] is not [KindGroup].
 func (v Value) Group() []Attr {
 	if sp, ok := v.any.(groupptr); ok {
 		return unsafe.Slice((*Attr)(sp), v.num)
@@ -470,13 +470,13 @@
 
 const maxLogValues = 100
 
-// Resolve repeatedly calls LogValue on v while it implements LogValuer,
+// Resolve repeatedly calls LogValue on v while it implements [LogValuer],
 // and returns the result.
 // If v resolves to a group, the group's attributes' values are not recursively
 // resolved.
 // If the number of LogValue calls exceeds a threshold, a Value containing an
 // error is returned.
-// Resolve's return value is guaranteed not to be of Kind KindLogValuer.
+// Resolve's return value is guaranteed not to be of Kind [KindLogValuer].
 func (v Value) Resolve() (rv Value) {
 	orig := v
 	defer func() {
diff --git a/src/log/syslog/syslog.go b/src/log/syslog/syslog.go
index 03e5263..362dd95 100644
--- a/src/log/syslog/syslog.go
+++ b/src/log/syslog/syslog.go
@@ -18,9 +18,9 @@
 )
 
 // The Priority is a combination of the syslog facility and
-// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity
-// message from the FTP facility. The default severity is LOG_EMERG;
-// the default facility is LOG_KERN.
+// severity. For example, [LOG_ALERT] | [LOG_FTP] sends an alert severity
+// message from the FTP facility. The default severity is [LOG_EMERG];
+// the default facility is [LOG_KERN].
 type Priority int
 
 const severityMask = 0x07
@@ -103,7 +103,7 @@
 // New establishes a new connection to the system log daemon. Each
 // write to the returned writer sends a log message with the given
 // priority (a combination of the syslog facility and severity) and
-// prefix tag. If tag is empty, the os.Args[0] is used.
+// prefix tag. If tag is empty, the [os.Args][0] is used.
 func New(priority Priority, tag string) (*Writer, error) {
 	return Dial("", "", priority, tag)
 }
@@ -111,7 +111,7 @@
 // Dial establishes a connection to a log daemon by connecting to
 // address raddr on the specified network. Each write to the returned
 // writer sends a log message with the facility and severity
-// (from priority) and tag. If tag is empty, the os.Args[0] is used.
+// (from priority) and tag. If tag is empty, the [os.Args][0] is used.
 // If network is empty, Dial will connect to the local syslog server.
 // Otherwise, see the documentation for net.Dial for valid values
 // of network and raddr.
@@ -191,56 +191,56 @@
 	return nil
 }
 
-// Emerg logs a message with severity LOG_EMERG, ignoring the severity
+// Emerg logs a message with severity [LOG_EMERG], ignoring the severity
 // passed to New.
 func (w *Writer) Emerg(m string) error {
 	_, err := w.writeAndRetry(LOG_EMERG, m)
 	return err
 }
 
-// Alert logs a message with severity LOG_ALERT, ignoring the severity
+// Alert logs a message with severity [LOG_ALERT], ignoring the severity
 // passed to New.
 func (w *Writer) Alert(m string) error {
 	_, err := w.writeAndRetry(LOG_ALERT, m)
 	return err
 }
 
-// Crit logs a message with severity LOG_CRIT, ignoring the severity
+// Crit logs a message with severity [LOG_CRIT], ignoring the severity
 // passed to New.
 func (w *Writer) Crit(m string) error {
 	_, err := w.writeAndRetry(LOG_CRIT, m)
 	return err
 }
 
-// Err logs a message with severity LOG_ERR, ignoring the severity
+// Err logs a message with severity [LOG_ERR], ignoring the severity
 // passed to New.
 func (w *Writer) Err(m string) error {
 	_, err := w.writeAndRetry(LOG_ERR, m)
 	return err
 }
 
-// Warning logs a message with severity LOG_WARNING, ignoring the
+// Warning logs a message with severity [LOG_WARNING], ignoring the
 // severity passed to New.
 func (w *Writer) Warning(m string) error {
 	_, err := w.writeAndRetry(LOG_WARNING, m)
 	return err
 }
 
-// Notice logs a message with severity LOG_NOTICE, ignoring the
+// Notice logs a message with severity [LOG_NOTICE], ignoring the
 // severity passed to New.
 func (w *Writer) Notice(m string) error {
 	_, err := w.writeAndRetry(LOG_NOTICE, m)
 	return err
 }
 
-// Info logs a message with severity LOG_INFO, ignoring the severity
+// Info logs a message with severity [LOG_INFO], ignoring the severity
 // passed to New.
 func (w *Writer) Info(m string) error {
 	_, err := w.writeAndRetry(LOG_INFO, m)
 	return err
 }
 
-// Debug logs a message with severity LOG_DEBUG, ignoring the severity
+// Debug logs a message with severity [LOG_DEBUG], ignoring the severity
 // passed to New.
 func (w *Writer) Debug(m string) error {
 	_, err := w.writeAndRetry(LOG_DEBUG, m)
@@ -305,10 +305,10 @@
 	return n.conn.Close()
 }
 
-// NewLogger creates a log.Logger whose output is written to the
+// NewLogger creates a [log.Logger] whose output is written to the
 // system log service with the specified priority, a combination of
 // the syslog facility and severity. The logFlag argument is the flag
-// set passed through to log.New to create the Logger.
+// set passed through to [log.New] to create the Logger.
 func NewLogger(p Priority, logFlag int) (*log.Logger, error) {
 	s, err := New(p, "")
 	if err != nil {
diff --git a/src/make.bash b/src/make.bash
index 755b3b0..76ad516 100755
--- a/src/make.bash
+++ b/src/make.bash
@@ -67,23 +67,23 @@
 # timing information to this file. Useful for profiling where the
 # time goes when these scripts run.
 #
-# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.17.13 for bootstrap.
+# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.20.6 for bootstrap.
 # If $GOROOT_BOOTSTRAP/bin/go is missing, $(go env GOROOT) is
-# tried for all "go" in $PATH. By default, one of $HOME/go1.17.13,
-# $HOME/sdk/go1.17.13, or $HOME/go1.4, whichever exists, in that order.
+# tried for all "go" in $PATH. By default, one of $HOME/go1.20.6,
+# $HOME/sdk/go1.20.6, or $HOME/go1.4, whichever exists, in that order.
 # We still check $HOME/go1.4 to allow for build scripts that still hard-code
 # that name even though they put newer Go toolchains there.
 
-bootgo=1.17.13
+bootgo=1.20.6
 
 set -e
 
-if [ ! -f run.bash ]; then
+if [[ ! -f run.bash ]]; then
 	echo 'make.bash must be run from $GOROOT/src' 1>&2
 	exit 1
 fi
 
-if [ "$GOBUILDTIMELOGFILE" != "" ]; then
+if [[ "$GOBUILDTIMELOGFILE" != "" ]]; then
 	echo $(LC_TIME=C date) start make.bash >"$GOBUILDTIMELOGFILE"
 fi
 
@@ -114,7 +114,7 @@
 # so loop through the possible selinux mount points.
 for se_mount in /selinux /sys/fs/selinux
 do
-	if [ -d $se_mount -a -f $se_mount/booleans/allow_execstack -a -x /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
+	if [[ -d $se_mount && -f $se_mount/booleans/allow_execstack && -x /usr/sbin/selinuxenabled ]] && /usr/sbin/selinuxenabled; then
 		if ! cat $se_mount/booleans/allow_execstack | grep -c '^1 1$' >> /dev/null ; then
 			echo "WARNING: the default SELinux policy on, at least, Fedora 12 breaks "
 			echo "Go. You can enable the features that Go needs via the following "
@@ -134,7 +134,7 @@
 # Test for debian/kFreeBSD.
 # cmd/dist will detect kFreeBSD as freebsd/$GOARCH, but we need to
 # disable cgo manually.
-if [ "$(uname -s)" = "GNU/kFreeBSD" ]; then
+if [[ "$(uname -s)" == "GNU/kFreeBSD" ]]; then
 	export CGO_ENABLED=0
 fi
 
@@ -145,17 +145,17 @@
 
 verbose=false
 vflag=""
-if [ "$1" = "-v" ]; then
+if [[ "$1" == "-v" ]]; then
 	verbose=true
 	vflag=-v
 	shift
 fi
 
 goroot_bootstrap_set=${GOROOT_BOOTSTRAP+"true"}
-if [ -z "$GOROOT_BOOTSTRAP" ]; then
+if [[ -z "$GOROOT_BOOTSTRAP" ]]; then
 	GOROOT_BOOTSTRAP="$HOME/go1.4"
 	for d in sdk/go$bootgo go$bootgo; do
-		if [ -d "$HOME/$d" ]; then
+		if [[ -d "$HOME/$d" ]]; then
 			GOROOT_BOOTSTRAP="$HOME/$d"
 		fi
 	done
@@ -168,10 +168,10 @@
 
 export GOROOT="$(cd .. && pwd)"
 IFS=$'\n'; for go_exe in $(type -ap go); do
-	if [ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]; then
+	if [[ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]]; then
 		goroot=$(GOROOT= nogoenv "$go_exe" env GOROOT)
-		if [ "$goroot" != "$GOROOT" ]; then
-			if [ "$goroot_bootstrap_set" = "true" ]; then
+		if [[ "$goroot" != "$GOROOT" ]]; then
+			if [[ "$goroot_bootstrap_set" == "true" ]]; then
 				printf 'WARNING: %s does not exist, found %s from env\n' "$GOROOT_BOOTSTRAP/bin/go" "$go_exe" >&2
 				printf 'WARNING: set %s as GOROOT_BOOTSTRAP\n' "$goroot" >&2
 			fi
@@ -179,7 +179,7 @@
 		fi
 	fi
 done; unset IFS
-if [ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]; then
+if [[ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]]; then
 	echo "ERROR: Cannot find $GOROOT_BOOTSTRAP/bin/go." >&2
 	echo "Set \$GOROOT_BOOTSTRAP to a working Go tree >= Go $bootgo." >&2
 	exit 1
@@ -192,7 +192,7 @@
 if $verbose; then
 	echo cmd/dist
 fi
-if [ "$GOROOT_BOOTSTRAP" = "$GOROOT" ]; then
+if [[ "$GOROOT_BOOTSTRAP" == "$GOROOT" ]]; then
 	echo "ERROR: \$GOROOT_BOOTSTRAP must not be set to \$GOROOT" >&2
 	echo "Set \$GOROOT_BOOTSTRAP to a working Go tree >= Go $bootgo." >&2
 	exit 1
@@ -202,7 +202,7 @@
 
 # -e doesn't propagate out of eval, so check success by hand.
 eval $(./cmd/dist/dist env -p || echo FAIL=true)
-if [ "$FAIL" = true ]; then
+if [[ "$FAIL" == true ]]; then
 	exit 1
 fi
 
@@ -210,10 +210,10 @@
 	echo
 fi
 
-if [ "$1" = "--dist-tool" ]; then
+if [[ "$1" == "--dist-tool" ]]; then
 	# Stop after building dist tool.
 	mkdir -p "$GOTOOLDIR"
-	if [ "$2" != "" ]; then
+	if [[ "$2" != "" ]]; then
 		cp cmd/dist/dist "$2"
 	fi
 	mv cmd/dist/dist "$GOTOOLDIR"/dist
diff --git a/src/make.bat b/src/make.bat
index 3b861cb..d9f9b6c 100644
--- a/src/make.bat
+++ b/src/make.bat
@@ -77,7 +77,10 @@
 if not "x%GOROOT_BOOTSTRAP%"=="x" goto bootstrapset

 for /f "tokens=*" %%g in ('where go 2^>nul') do (

 	if "x%GOROOT_BOOTSTRAP%"=="x" (

+		setlocal

+		call :nogoenv

 		for /f "tokens=*" %%i in ('"%%g" env GOROOT 2^>nul') do (

+			endlocal

 			if /I not "%%i"=="%GOROOT_TEMP%" (

 				set GOROOT_BOOTSTRAP=%%i

 			)

@@ -85,7 +88,7 @@
 	)

 )

 

-set bootgo=1.17.13

+set bootgo=1.20.6

 if "x%GOROOT_BOOTSTRAP%"=="x" if exist "%HOMEDRIVE%%HOMEPATH%\go%bootgo%" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\go%bootgo%

 if "x%GOROOT_BOOTSTRAP%"=="x" if exist "%HOMEDRIVE%%HOMEPATH%\sdk\go%bootgo%" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\sdk\go%bootgo%

 if "x%GOROOT_BOOTSTRAP%"=="x" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\Go1.4

@@ -96,18 +99,13 @@
 set GOROOT_TEMP=

 

 setlocal

-set GOOS=

-set GOARCH=

-set GOEXPERIMENT=

+call :nogoenv

 for /f "tokens=*" %%g IN ('"%GOROOT_BOOTSTRAP%\bin\go" version') do (set GOROOT_BOOTSTRAP_VERSION=%%g)

 set GOROOT_BOOTSTRAP_VERSION=%GOROOT_BOOTSTRAP_VERSION:go version =%

 echo Building Go cmd/dist using %GOROOT_BOOTSTRAP%. (%GOROOT_BOOTSTRAP_VERSION%)

 if x%vflag==x-v echo cmd/dist

 set GOROOT=%GOROOT_BOOTSTRAP%

 set GOBIN=

-set GO111MODULE=off

-set GOENV=off

-set GOFLAGS=

 "%GOROOT_BOOTSTRAP%\bin\go.exe" build -o cmd\dist\dist.exe .\cmd\dist

 endlocal

 if errorlevel 1 goto fail

@@ -158,7 +156,7 @@
 .\cmd\dist\dist.exe bootstrap -a %vflag% %bootstrapflags%

 if errorlevel 1 goto fail

 del .\cmd\dist\dist.exe

-goto end

+goto :eof

 

 :: DO NOT ADD ANY NEW CODE HERE.

 :: The bootstrap+del above are the final step of make.bat.

@@ -169,7 +167,16 @@
 :copydist

 mkdir "%GOTOOLDIR%" 2>NUL

 copy cmd\dist\dist.exe "%GOTOOLDIR%\"

-goto end

+goto :eof

+

+:nogoenv

+set GO111MODULE=off

+set GOENV=off

+set GOOS=

+set GOARCH=

+set GOEXPERIMENT=

+set GOFLAGS=

+goto :eof

 

 :bootstrapfail

 echo ERROR: Cannot find %GOROOT_BOOTSTRAP%\bin\go.exe

@@ -178,5 +185,3 @@
 :fail

 set GOBUILDFAIL=1

 if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%

-

-:end

diff --git a/src/make.rc b/src/make.rc
index 834c1f1..b10be7d 100755
--- a/src/make.rc
+++ b/src/make.rc
@@ -51,7 +51,7 @@
 	GO111MODULE=off GOENV=off GOOS=() GOARCH=() GOEXPERIMENT=() GOFLAGS=() $*
 }
 
-bootgo = 1.17.13
+bootgo = 1.20.6
 GOROOT = `{cd .. && pwd}
 goroot_bootstrap_set = 'true'
 if(! ~ $#GOROOT_BOOTSTRAP 1){
diff --git a/src/maps/example_test.go b/src/maps/example_test.go
index 779c66d..3d6b7d1 100644
--- a/src/maps/example_test.go
+++ b/src/maps/example_test.go
@@ -10,6 +10,72 @@
 	"strings"
 )
 
+func ExampleClone() {
+	m1 := map[string]int{
+		"key": 1,
+	}
+	m2 := maps.Clone(m1)
+	m2["key"] = 100
+	fmt.Println(m1["key"])
+	fmt.Println(m2["key"])
+
+	m3 := map[string][]int{
+		"key": {1, 2, 3},
+	}
+	m4 := maps.Clone(m3)
+	fmt.Println(m4["key"][0])
+	m4["key"][0] = 100
+	fmt.Println(m3["key"][0])
+	fmt.Println(m4["key"][0])
+
+	// Output:
+	// 1
+	// 100
+	// 1
+	// 100
+	// 100
+}
+
+func ExampleCopy() {
+	m1 := map[string]int{
+		"one": 1,
+		"two": 2,
+	}
+	m2 := map[string]int{
+		"one": 10,
+	}
+
+	maps.Copy(m2, m1)
+	fmt.Println("m2 is:", m2)
+
+	m2["one"] = 100
+	fmt.Println("m1 is:", m1)
+	fmt.Println("m2 is:", m2)
+
+	m3 := map[string][]int{
+		"one": {1, 2, 3},
+		"two": {4, 5, 6},
+	}
+	m4 := map[string][]int{
+		"one": {7, 8, 9},
+	}
+
+	maps.Copy(m4, m3)
+	fmt.Println("m4 is:", m4)
+
+	m4["one"][0] = 100
+	fmt.Println("m3 is:", m3)
+	fmt.Println("m4 is:", m4)
+
+	// Output:
+	// m2 is: map[one:1 two:2]
+	// m1 is: map[one:1 two:2]
+	// m2 is: map[one:100 two:2]
+	// m4 is: map[one:[1 2 3] two:[4 5 6]]
+	// m3 is: map[one:[100 2 3] two:[4 5 6]]
+	// m4 is: map[one:[100 2 3] two:[4 5 6]]
+}
+
 func ExampleDeleteFunc() {
 	m := map[string]int{
 		"one":   1,
@@ -25,6 +91,30 @@
 	// map[four:4 two:2]
 }
 
+func ExampleEqual() {
+	m1 := map[int]string{
+		1:    "one",
+		10:   "Ten",
+		1000: "THOUSAND",
+	}
+	m2 := map[int]string{
+		1:    "one",
+		10:   "Ten",
+		1000: "THOUSAND",
+	}
+	m3 := map[int]string{
+		1:    "one",
+		10:   "ten",
+		1000: "thousand",
+	}
+
+	fmt.Println(maps.Equal(m1, m2))
+	fmt.Println(maps.Equal(m1, m3))
+	// Output:
+	// true
+	// false
+}
+
 func ExampleEqualFunc() {
 	m1 := map[int]string{
 		1:    "one",
diff --git a/src/maps/maps_test.go b/src/maps/maps_test.go
index 5e3f9ca..fa30fe8 100644
--- a/src/maps/maps_test.go
+++ b/src/maps/maps_test.go
@@ -182,3 +182,61 @@
 		}
 	}
 }
+
+func TestCloneLarge(t *testing.T) {
+	// See issue 64474.
+	type K [17]float64 // > 128 bytes
+	type V [17]float64
+
+	var zero float64
+	negZero := -zero
+
+	for tst := 0; tst < 3; tst++ {
+		// Initialize m with a key and value.
+		m := map[K]V{}
+		var k1 K
+		var v1 V
+		m[k1] = v1
+
+		switch tst {
+		case 0: // nothing, just a 1-entry map
+		case 1:
+			// Add more entries to make it 2 buckets
+			// 1 entry already
+			// 7 more fill up 1 bucket
+			// 1 more to grow to 2 buckets
+			for i := 0; i < 7+1; i++ {
+				m[K{float64(i) + 1}] = V{}
+			}
+		case 2:
+			// Capture the map mid-grow
+			// 1 entry already
+			// 7 more fill up 1 bucket
+			// 5 more (13 total) fill up 2 buckets
+			// 13 more (26 total) fill up 4 buckets
+			// 1 more to start the 4->8 bucket grow
+			for i := 0; i < 7+5+13+1; i++ {
+				m[K{float64(i) + 1}] = V{}
+			}
+		}
+
+		// Clone m, which should freeze the map's contents.
+		c := Clone(m)
+
+		// Update m with new key and value.
+		k2, v2 := k1, v1
+		k2[0] = negZero
+		v2[0] = 1.0
+		m[k2] = v2
+
+		// Make sure c still has its old key and value.
+		for k, v := range c {
+			if math.Signbit(k[0]) {
+				t.Errorf("tst%d: sign bit of key changed; got %v want %v", tst, k, k1)
+			}
+			if v != v1 {
+				t.Errorf("tst%d: value changed; got %v want %v", tst, v, v1)
+			}
+		}
+	}
+}
diff --git a/src/math/big/arith_386.s b/src/math/big/arith_386.s
index 8cf4665..90f6a8c 100644
--- a/src/math/big/arith_386.s
+++ b/src/math/big/arith_386.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_amd64.go b/src/math/big/arith_amd64.go
index 89108fe..3db7258 100644
--- a/src/math/big/arith_amd64.go
+++ b/src/math/big/arith_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 package big
 
diff --git a/src/math/big/arith_amd64.s b/src/math/big/arith_amd64.s
index b1e914c..a5b65b1 100644
--- a/src/math/big/arith_amd64.s
+++ b/src/math/big/arith_amd64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_arm.s b/src/math/big/arith_arm.s
index 10054bd..ece3a96 100644
--- a/src/math/big/arith_arm.s
+++ b/src/math/big/arith_arm.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_arm64.s b/src/math/big/arith_arm64.s
index addf2d6..204006e 100644
--- a/src/math/big/arith_arm64.s
+++ b/src/math/big/arith_arm64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_decl.go b/src/math/big/arith_decl.go
index 9b254f2..f14f8d6 100644
--- a/src/math/big/arith_decl.go
+++ b/src/math/big/arith_decl.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 package big
 
diff --git a/src/math/big/arith_decl_pure.go b/src/math/big/arith_decl_pure.go
index 75f3ed2..4d7bbc8 100644
--- a/src/math/big/arith_decl_pure.go
+++ b/src/math/big/arith_decl_pure.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build math_big_pure_go
-// +build math_big_pure_go
 
 package big
 
diff --git a/src/math/big/arith_decl_s390x.go b/src/math/big/arith_decl_s390x.go
index 4193f32..6539166 100644
--- a/src/math/big/arith_decl_s390x.go
+++ b/src/math/big/arith_decl_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 package big
 
diff --git a/src/math/big/arith_loong64.s b/src/math/big/arith_loong64.s
index 0ae3031..847e312 100644
--- a/src/math/big/arith_loong64.s
+++ b/src/math/big/arith_loong64.s
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !math_big_pure_go,loong64
+//go:build !math_big_pure_go && loong64
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_mips64x.s b/src/math/big/arith_mips64x.s
index 3ee6e27..393a3ef 100644
--- a/src/math/big/arith_mips64x.s
+++ b/src/math/big/arith_mips64x.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go && (mips64 || mips64le)
-// +build !math_big_pure_go
-// +build mips64 mips64le
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_mipsx.s b/src/math/big/arith_mipsx.s
index b1d3282..cdb4bbc 100644
--- a/src/math/big/arith_mipsx.s
+++ b/src/math/big/arith_mipsx.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go && (mips || mipsle)
-// +build !math_big_pure_go
-// +build mips mipsle
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_ppc64x.s b/src/math/big/arith_ppc64x.s
index 0613f5c..9512a12 100644
--- a/src/math/big/arith_ppc64x.s
+++ b/src/math/big/arith_ppc64x.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go && (ppc64 || ppc64le)
-// +build !math_big_pure_go
-// +build ppc64 ppc64le
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_riscv64.s b/src/math/big/arith_riscv64.s
index cb9ac18..bad3249 100644
--- a/src/math/big/arith_riscv64.s
+++ b/src/math/big/arith_riscv64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go && riscv64
-// +build !math_big_pure_go,riscv64
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_s390x.s b/src/math/big/arith_s390x.s
index aa6590e..01a7bb2 100644
--- a/src/math/big/arith_s390x.s
+++ b/src/math/big/arith_s390x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 #include "textflag.h"
 
diff --git a/src/math/big/arith_s390x_test.go b/src/math/big/arith_s390x_test.go
index 8375ddb..0b91cc1 100644
--- a/src/math/big/arith_s390x_test.go
+++ b/src/math/big/arith_s390x_test.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build s390x && !math_big_pure_go
-// +build s390x,!math_big_pure_go
 
 package big
 
@@ -15,7 +14,7 @@
 // vector-capable machine
 
 func TestFunVVnovec(t *testing.T) {
-	if hasVX == true {
+	if hasVX {
 		for _, a := range sumVV {
 			arg := a
 			testFunVV(t, "addVV_novec", addVV_novec, arg)
diff --git a/src/math/big/arith_wasm.s b/src/math/big/arith_wasm.s
index 93eb16d..fd51031 100644
--- a/src/math/big/arith_wasm.s
+++ b/src/math/big/arith_wasm.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !math_big_pure_go
-// +build !math_big_pure_go
 
 #include "textflag.h"
 
diff --git a/src/math/big/calibrate_test.go b/src/math/big/calibrate_test.go
index 4fa663f..d85833a 100644
--- a/src/math/big/calibrate_test.go
+++ b/src/math/big/calibrate_test.go
@@ -15,7 +15,7 @@
 // Calculates lower and upper thresholds for when basicSqr
 // is faster than standard multiplication.
 
-// Usage: go test -run=TestCalibrate -v -calibrate
+// Usage: go test -run='^TestCalibrate$' -v -calibrate
 
 package big
 
diff --git a/src/math/big/doc.go b/src/math/big/doc.go
index fee5a65..2038546 100644
--- a/src/math/big/doc.go
+++ b/src/math/big/doc.go
@@ -10,7 +10,7 @@
 	Rat    rational numbers
 	Float  floating-point numbers
 
-The zero value for an Int, Rat, or Float correspond to 0. Thus, new
+The zero value for an [Int], [Rat], or [Float] correspond to 0. Thus, new
 values can be declared in the usual ways and denote 0 without further
 initialization:
 
@@ -23,9 +23,9 @@
 
 	func NewT(v V) *T
 
-For instance, NewInt(x) returns an *Int set to the value of the int64
-argument x, NewRat(a, b) returns a *Rat set to the fraction a/b where
-a and b are int64 values, and NewFloat(f) returns a *Float initialized
+For instance, [NewInt](x) returns an *[Int] set to the value of the int64
+argument x, [NewRat](a, b) returns a *[Rat] set to the fraction a/b where
+a and b are int64 values, and [NewFloat](f) returns a *[Float] initialized
 to the float64 argument f. More flexibility is provided with explicit
 setters, for instance:
 
@@ -42,7 +42,7 @@
 	func (z *T) Binary(x, y *T) *T    // z = x binary y
 	func (x *T) Pred() P              // p = pred(x)
 
-with T one of Int, Rat, or Float. For unary and binary operations, the
+with T one of [Int], [Rat], or [Float]. For unary and binary operations, the
 result is the receiver (usually named z in that case; see below); if it
 is one of the operands x or y it may be safely overwritten (and its memory
 reused).
@@ -81,18 +81,18 @@
 enable simple call chaining.
 
 Methods which don't require a result value to be passed in (for instance,
-Int.Sign), simply return the result. In this case, the receiver is typically
+[Int.Sign]), simply return the result. In this case, the receiver is typically
 the first operand, named x:
 
 	func (x *Int) Sign() int
 
 Various methods support conversions between strings and corresponding
-numeric values, and vice versa: *Int, *Rat, and *Float values implement
+numeric values, and vice versa: *[Int], *[Rat], and *[Float] values implement
 the Stringer interface for a (default) string representation of the value,
 but also provide SetString methods to initialize a value from a string in
 a variety of supported formats (see the respective SetString documentation).
 
-Finally, *Int, *Rat, and *Float satisfy [fmt.Scanner] for scanning
-and (except for *Rat) the Formatter interface for formatted printing.
+Finally, *[Int], *[Rat], and *[Float] satisfy [fmt.Scanner] for scanning
+and (except for *[Rat]) the Formatter interface for formatted printing.
 */
 package big
diff --git a/src/math/big/float.go b/src/math/big/float.go
index 2f0635a..1c97ec9 100644
--- a/src/math/big/float.go
+++ b/src/math/big/float.go
@@ -36,7 +36,7 @@
 //
 // Unless specified otherwise, all operations (including setters) that
 // specify a *Float variable for the result (usually via the receiver
-// with the exception of MantExp), round the numeric result according
+// with the exception of [Float.MantExp]), round the numeric result according
 // to the precision and rounding mode of the result variable.
 //
 // If the provided result precision is 0 (see below), it is set to the
@@ -47,20 +47,20 @@
 // their mode is the zero value for RoundingMode (ToNearestEven).
 //
 // By setting the desired precision to 24 or 53 and using matching rounding
-// mode (typically ToNearestEven), Float operations produce the same results
+// mode (typically [ToNearestEven]), Float operations produce the same results
 // as the corresponding float32 or float64 IEEE-754 arithmetic for operands
 // that correspond to normal (i.e., not denormal) float32 or float64 numbers.
 // Exponent underflow and overflow lead to a 0 or an Infinity for different
 // values than IEEE-754 because Float exponents have a much larger range.
 //
 // The zero (uninitialized) value for a Float is ready to use and represents
-// the number +0.0 exactly, with precision 0 and rounding mode ToNearestEven.
+// the number +0.0 exactly, with precision 0 and rounding mode [ToNearestEven].
 //
 // Operations always take pointer arguments (*Float) rather
 // than Float values, and each unique Float value requires
 // its own unique *Float pointer. To "copy" a Float value,
 // an existing (or newly allocated) Float must be set to
-// a new value using the Float.Set method; shallow copies
+// a new value using the [Float.Set] method; shallow copies
 // of Floats are not supported and may lead to errors.
 type Float struct {
 	prec uint32
@@ -72,7 +72,7 @@
 	exp  int32
 }
 
-// An ErrNaN panic is raised by a Float operation that would lead to
+// An ErrNaN panic is raised by a [Float] operation that would lead to
 // a NaN under IEEE-754 rules. An ErrNaN implements the error interface.
 type ErrNaN struct {
 	msg string
@@ -82,9 +82,9 @@
 	return err.msg
 }
 
-// NewFloat allocates and returns a new Float set to x,
-// with precision 53 and rounding mode ToNearestEven.
-// NewFloat panics with ErrNaN if x is a NaN.
+// NewFloat allocates and returns a new [Float] set to x,
+// with precision 53 and rounding mode [ToNearestEven].
+// NewFloat panics with [ErrNaN] if x is a NaN.
 func NewFloat(x float64) *Float {
 	if math.IsNaN(x) {
 		panic(ErrNaN{"NewFloat(NaN)"})
@@ -126,9 +126,9 @@
 	inf
 )
 
-// RoundingMode determines how a Float value is rounded to the
-// desired precision. Rounding may change the Float value; the
-// rounding error is described by the Float's Accuracy.
+// RoundingMode determines how a [Float] value is rounded to the
+// desired precision. Rounding may change the [Float] value; the
+// rounding error is described by the [Float]'s [Accuracy].
 type RoundingMode byte
 
 // These constants define supported rounding modes.
@@ -144,10 +144,10 @@
 //go:generate stringer -type=RoundingMode
 
 // Accuracy describes the rounding error produced by the most recent
-// operation that generated a Float value, relative to the exact value.
+// operation that generated a [Float] value, relative to the exact value.
 type Accuracy int8
 
-// Constants describing the Accuracy of a Float.
+// Constants describing the [Accuracy] of a [Float].
 const (
 	Below Accuracy = -1
 	Exact Accuracy = 0
@@ -160,7 +160,7 @@
 // value of z. Rounding occurs according to z's rounding mode if the mantissa
 // cannot be represented in prec bits without loss of precision.
 // SetPrec(0) maps all finite values to ±0; infinite values remain unchanged.
-// If prec > MaxPrec, it is set to MaxPrec.
+// If prec > [MaxPrec], it is set to [MaxPrec].
 func (z *Float) SetPrec(prec uint) *Float {
 	z.acc = Exact // optimistically assume no rounding is needed
 
@@ -196,7 +196,7 @@
 
 // SetMode sets z's rounding mode to mode and returns an exact z.
 // z remains unchanged otherwise.
-// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to Exact.
+// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to [Exact].
 func (z *Float) SetMode(mode RoundingMode) *Float {
 	z.mode = mode
 	z.acc = Exact
@@ -302,9 +302,9 @@
 
 // SetMantExp sets z to mant × 2**exp and returns z.
 // The result z has the same precision and rounding mode
-// as mant. SetMantExp is an inverse of MantExp but does
+// as mant. SetMantExp is an inverse of [Float.MantExp] but does
 // not require 0.5 <= |mant| < 1.0. Specifically, for a
-// given x of type *Float, SetMantExp relates to MantExp
+// given x of type *[Float], SetMantExp relates to [Float.MantExp]
 // as follows:
 //
 //	mant := new(Float)
@@ -548,7 +548,7 @@
 
 // SetFloat64 sets z to the (possibly rounded) value of x and returns z.
 // If z's precision is 0, it is changed to 53 (and rounding will have
-// no effect). SetFloat64 panics with ErrNaN if x is a NaN.
+// no effect). SetFloat64 panics with [ErrNaN] if x is a NaN.
 func (z *Float) SetFloat64(x float64) *Float {
 	if z.prec == 0 {
 		z.prec = 53
@@ -637,7 +637,7 @@
 // SetInf sets z to the infinite Float -Inf if signbit is
 // set, or +Inf if signbit is not set, and returns z. The
 // precision of z is unchanged and the result is always
-// Exact.
+// [Exact].
 func (z *Float) SetInf(signbit bool) *Float {
 	z.acc = Exact
 	z.form = inf
@@ -734,10 +734,10 @@
 }
 
 // Uint64 returns the unsigned integer resulting from truncating x
-// towards zero. If 0 <= x <= math.MaxUint64, the result is Exact
-// if x is an integer and Below otherwise.
-// The result is (0, Above) for x < 0, and (math.MaxUint64, Below)
-// for x > math.MaxUint64.
+// towards zero. If 0 <= x <= math.MaxUint64, the result is [Exact]
+// if x is an integer and [Below] otherwise.
+// The result is (0, [Above]) for x < 0, and ([math.MaxUint64], [Below])
+// for x > [math.MaxUint64].
 func (x *Float) Uint64() (uint64, Accuracy) {
 	if debugFloat {
 		x.validate()
@@ -779,10 +779,10 @@
 }
 
 // Int64 returns the integer resulting from truncating x towards zero.
-// If math.MinInt64 <= x <= math.MaxInt64, the result is Exact if x is
-// an integer, and Above (x < 0) or Below (x > 0) otherwise.
-// The result is (math.MinInt64, Above) for x < math.MinInt64,
-// and (math.MaxInt64, Below) for x > math.MaxInt64.
+// If [math.MinInt64] <= x <= [math.MaxInt64], the result is [Exact] if x is
+// an integer, and [Above] (x < 0) or [Below] (x > 0) otherwise.
+// The result is ([math.MinInt64], [Above]) for x < [math.MinInt64],
+// and ([math.MaxInt64], [Below]) for x > [math.MaxInt64].
 func (x *Float) Int64() (int64, Accuracy) {
 	if debugFloat {
 		x.validate()
@@ -834,10 +834,10 @@
 }
 
 // Float32 returns the float32 value nearest to x. If x is too small to be
-// represented by a float32 (|x| < math.SmallestNonzeroFloat32), the result
-// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
-// If x is too large to be represented by a float32 (|x| > math.MaxFloat32),
-// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
+// represented by a float32 (|x| < [math.SmallestNonzeroFloat32]), the result
+// is (0, [Below]) or (-0, [Above]), respectively, depending on the sign of x.
+// If x is too large to be represented by a float32 (|x| > [math.MaxFloat32]),
+// the result is (+Inf, [Above]) or (-Inf, [Below]), depending on the sign of x.
 func (x *Float) Float32() (float32, Accuracy) {
 	if debugFloat {
 		x.validate()
@@ -954,10 +954,10 @@
 }
 
 // Float64 returns the float64 value nearest to x. If x is too small to be
-// represented by a float64 (|x| < math.SmallestNonzeroFloat64), the result
-// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
-// If x is too large to be represented by a float64 (|x| > math.MaxFloat64),
-// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
+// represented by a float64 (|x| < [math.SmallestNonzeroFloat64]), the result
+// is (0, [Below]) or (-0, [Above]), respectively, depending on the sign of x.
+// If x is too large to be represented by a float64 (|x| > [math.MaxFloat64]),
+// the result is (+Inf, [Above]) or (-Inf, [Below]), depending on the sign of x.
 func (x *Float) Float64() (float64, Accuracy) {
 	if debugFloat {
 		x.validate()
@@ -1075,10 +1075,10 @@
 
 // Int returns the result of truncating x towards zero;
 // or nil if x is an infinity.
-// The result is Exact if x.IsInt(); otherwise it is Below
-// for x > 0, and Above for x < 0.
-// If a non-nil *Int argument z is provided, Int stores
-// the result in z instead of allocating a new Int.
+// The result is [Exact] if x.IsInt(); otherwise it is [Below]
+// for x > 0, and [Above] for x < 0.
+// If a non-nil *[Int] argument z is provided, [Int] stores
+// the result in z instead of allocating a new [Int].
 func (x *Float) Int(z *Int) (*Int, Accuracy) {
 	if debugFloat {
 		x.validate()
@@ -1132,9 +1132,9 @@
 
 // Rat returns the rational number corresponding to x;
 // or nil if x is an infinity.
-// The result is Exact if x is not an Inf.
-// If a non-nil *Rat argument z is provided, Rat stores
-// the result in z instead of allocating a new Rat.
+// The result is [Exact] if x is not an Inf.
+// If a non-nil *[Rat] argument z is provided, [Rat] stores
+// the result in z instead of allocating a new [Rat].
 func (x *Float) Rat(z *Rat) (*Rat, Accuracy) {
 	if debugFloat {
 		x.validate()
@@ -1444,7 +1444,7 @@
 // it is changed to the larger of x's or y's precision before the operation.
 // Rounding is performed according to z's precision and rounding mode; and
 // z's accuracy reports the result error relative to the exact (not rounded)
-// result. Add panics with ErrNaN if x and y are infinities with opposite
+// result. Add panics with [ErrNaN] if x and y are infinities with opposite
 // signs. The value of z is undefined in that case.
 func (z *Float) Add(x, y *Float) *Float {
 	if debugFloat {
@@ -1517,8 +1517,8 @@
 }
 
 // Sub sets z to the rounded difference x-y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Sub panics with ErrNaN if x and y are infinities with equal
+// Precision, rounding, and accuracy reporting are as for [Float.Add].
+// Sub panics with [ErrNaN] if x and y are infinities with equal
 // signs. The value of z is undefined in that case.
 func (z *Float) Sub(x, y *Float) *Float {
 	if debugFloat {
@@ -1584,8 +1584,8 @@
 }
 
 // Mul sets z to the rounded product x*y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Mul panics with ErrNaN if one operand is zero and the other
+// Precision, rounding, and accuracy reporting are as for [Float.Add].
+// Mul panics with [ErrNaN] if one operand is zero and the other
 // operand an infinity. The value of z is undefined in that case.
 func (z *Float) Mul(x, y *Float) *Float {
 	if debugFloat {
@@ -1629,8 +1629,8 @@
 }
 
 // Quo sets z to the rounded quotient x/y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Quo panics with ErrNaN if both operands are zero or infinities.
+// Precision, rounding, and accuracy reporting are as for [Float.Add].
+// Quo panics with [ErrNaN] if both operands are zero or infinities.
 // The value of z is undefined in that case.
 func (z *Float) Quo(x, y *Float) *Float {
 	if debugFloat {
diff --git a/src/math/big/float_test.go b/src/math/big/float_test.go
index 7d6bf03..bb045a0 100644
--- a/src/math/big/float_test.go
+++ b/src/math/big/float_test.go
@@ -194,13 +194,11 @@
 func alike32(x, y float32) bool {
 	// we can ignore NaNs
 	return x == y && math.Signbit(float64(x)) == math.Signbit(float64(y))
-
 }
 
 func alike64(x, y float64) bool {
 	// we can ignore NaNs
 	return x == y && math.Signbit(x) == math.Signbit(y)
-
 }
 
 func TestFloatMantExp(t *testing.T) {
diff --git a/src/math/big/floatconv.go b/src/math/big/floatconv.go
index 6501185..d8c69b8 100644
--- a/src/math/big/floatconv.go
+++ b/src/math/big/floatconv.go
@@ -16,7 +16,7 @@
 
 // SetString sets z to the value of s and returns z and a boolean indicating
 // success. s must be a floating-point number of the same format as accepted
-// by Parse, with base argument 0. The entire string (not just a prefix) must
+// by [Float.Parse], with base argument 0. The entire string (not just a prefix) must
 // be valid for success. If the operation failed, the value of z is undefined
 // but the returned value is nil.
 func (z *Float) SetString(s string) (*Float, bool) {
@@ -290,9 +290,9 @@
 
 var _ fmt.Scanner = (*Float)(nil) // *Float must implement fmt.Scanner
 
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// Scan is a support routine for [fmt.Scanner]; it sets z to the value of
 // the scanned number. It accepts formats whose verbs are supported by
-// fmt.Scan for floating point values, which are:
+// [fmt.Scan] for floating point values, which are:
 // 'b' (binary), 'e', 'E', 'f', 'F', 'g' and 'G'.
 // Scan doesn't handle ±Inf.
 func (z *Float) Scan(s fmt.ScanState, ch rune) error {
diff --git a/src/math/big/floatmarsh.go b/src/math/big/floatmarsh.go
index 2a78c69..8a908ce 100644
--- a/src/math/big/floatmarsh.go
+++ b/src/math/big/floatmarsh.go
@@ -15,8 +15,8 @@
 // Gob codec version. Permits backward-compatible changes to the encoding.
 const floatGobVersion byte = 1
 
-// GobEncode implements the gob.GobEncoder interface.
-// The Float value and all its attributes (precision,
+// GobEncode implements the [encoding/gob.GobEncoder] interface.
+// The [Float] value and all its attributes (precision,
 // rounding mode, accuracy) are marshaled.
 func (x *Float) GobEncode() ([]byte, error) {
 	if x == nil {
@@ -58,7 +58,7 @@
 	return buf, nil
 }
 
-// GobDecode implements the gob.GobDecoder interface.
+// GobDecode implements the [encoding/gob.GobDecoder] interface.
 // The result is rounded per the precision and rounding mode of
 // z unless z's precision is 0, in which case z is set exactly
 // to the decoded value.
@@ -106,8 +106,8 @@
 	return nil
 }
 
-// MarshalText implements the encoding.TextMarshaler interface.
-// Only the Float value is marshaled (in full precision), other
+// MarshalText implements the [encoding.TextMarshaler] interface.
+// Only the [Float] value is marshaled (in full precision), other
 // attributes such as precision or accuracy are ignored.
 func (x *Float) MarshalText() (text []byte, err error) {
 	if x == nil {
@@ -117,7 +117,7 @@
 	return x.Append(buf, 'g', -1), nil
 }
 
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
 // The result is rounded per the precision and rounding mode of z.
 // If z's precision is 0, it is changed to 64 before rounding takes
 // effect.
diff --git a/src/math/big/ftoa.go b/src/math/big/ftoa.go
index 5506e6e..f7a4345 100644
--- a/src/math/big/ftoa.go
+++ b/src/math/big/ftoa.go
@@ -53,7 +53,7 @@
 }
 
 // String formats x like x.Text('g', 10).
-// (String must be called explicitly, Float.Format does not support %s verb.)
+// (String must be called explicitly, [Float.Format] does not support %s verb.)
 func (x *Float) String() string {
 	return x.Text('g', 10)
 }
@@ -444,16 +444,9 @@
 	return strconv.AppendInt(buf, int64(x.exp), 10)
 }
 
-func min(x, y int) int {
-	if x < y {
-		return x
-	}
-	return y
-}
-
 var _ fmt.Formatter = &floatZero // *Float must implement fmt.Formatter
 
-// Format implements fmt.Formatter. It accepts all the regular
+// Format implements [fmt.Formatter]. It accepts all the regular
 // formats for floating-point numbers ('b', 'e', 'E', 'f', 'F',
 // 'g', 'G', 'x') as well as 'p' and 'v'. See (*Float).Text for the
 // interpretation of 'p'. The 'v' format is handled like 'g'.
diff --git a/src/math/big/int.go b/src/math/big/int.go
index 2cc3d7b..b79b459 100644
--- a/src/math/big/int.go
+++ b/src/math/big/int.go
@@ -20,7 +20,7 @@
 // than Int values, and each unique Int value requires
 // its own unique *Int pointer. To "copy" an Int value,
 // an existing (or newly allocated) Int must be set to
-// a new value using the Int.Set method; shallow copies
+// a new value using the [Int.Set] method; shallow copies
 // of Ints are not supported and may lead to errors.
 //
 // Note that methods may leak the Int's value through timing side-channels.
@@ -74,7 +74,7 @@
 	return z
 }
 
-// NewInt allocates and returns a new Int set to x.
+// NewInt allocates and returns a new [Int] set to x.
 func NewInt(x int64) *Int {
 	// This code is arranged to be inlineable and produce
 	// zero allocations when inlined. See issue 29951.
@@ -102,9 +102,9 @@
 }
 
 // Bits provides raw (unchecked but fast) access to x by returning its
-// absolute value as a little-endian Word slice. The result and x share
+// absolute value as a little-endian [Word] slice. The result and x share
 // the same underlying array.
-// Bits is intended to support implementation of missing low-level Int
+// Bits is intended to support implementation of missing low-level [Int]
 // functionality outside this package; it should be avoided otherwise.
 func (x *Int) Bits() []Word {
 	// This function is used in cryptographic operations. It must not leak
@@ -114,9 +114,9 @@
 }
 
 // SetBits provides raw (unchecked but fast) access to z by setting its
-// value to abs, interpreted as a little-endian Word slice, and returning
+// value to abs, interpreted as a little-endian [Word] slice, and returning
 // z. The result and abs share the same underlying array.
-// SetBits is intended to support implementation of missing low-level Int
+// SetBits is intended to support implementation of missing low-level [Int]
 // functionality outside this package; it should be avoided otherwise.
 func (z *Int) SetBits(abs []Word) *Int {
 	z.abs = nat(abs).norm()
@@ -263,7 +263,7 @@
 
 // Quo sets z to the quotient x/y for y != 0 and returns z.
 // If y == 0, a division-by-zero run-time panic occurs.
-// Quo implements truncated division (like Go); see QuoRem for more details.
+// Quo implements truncated division (like Go); see [Int.QuoRem] for more details.
 func (z *Int) Quo(x, y *Int) *Int {
 	z.abs, _ = z.abs.div(nil, x.abs, y.abs)
 	z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
@@ -272,7 +272,7 @@
 
 // Rem sets z to the remainder x%y for y != 0 and returns z.
 // If y == 0, a division-by-zero run-time panic occurs.
-// Rem implements truncated modulus (like Go); see QuoRem for more details.
+// Rem implements truncated modulus (like Go); see [Int.QuoRem] for more details.
 func (z *Int) Rem(x, y *Int) *Int {
 	_, z.abs = nat(nil).div(z.abs, x.abs, y.abs)
 	z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
@@ -298,7 +298,7 @@
 
 // Div sets z to the quotient x/y for y != 0 and returns z.
 // If y == 0, a division-by-zero run-time panic occurs.
-// Div implements Euclidean division (unlike Go); see DivMod for more details.
+// Div implements Euclidean division (unlike Go); see [Int.DivMod] for more details.
 func (z *Int) Div(x, y *Int) *Int {
 	y_neg := y.neg // z may be an alias for y
 	var r Int
@@ -315,7 +315,7 @@
 
 // Mod sets z to the modulus x%y for y != 0 and returns z.
 // If y == 0, a division-by-zero run-time panic occurs.
-// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
+// Mod implements Euclidean modulus (unlike Go); see [Int.DivMod] for more details.
 func (z *Int) Mod(x, y *Int) *Int {
 	y0 := y // save y
 	if z == y || alias(z.abs, y.abs) {
@@ -346,7 +346,7 @@
 // div and mod”. ACM Transactions on Programming Languages and
 // Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
 // ACM press.)
-// See QuoRem for T-division and modulus (like Go).
+// See [Int.QuoRem] for T-division and modulus (like Go).
 func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
 	y0 := y // save y
 	if z == y || alias(z.abs, y.abs) {
@@ -475,7 +475,7 @@
 // (not just a prefix) must be valid for success. If SetString fails,
 // the value of z is undefined but the returned value is nil.
 //
-// The base argument must be 0 or a value between 2 and MaxBase.
+// The base argument must be 0 or a value between 2 and [MaxBase].
 // For base 0, the number prefix determines the actual base: A prefix of
 // “0b” or “0B” selects base 2, “0”, “0o” or “0O” selects base 8,
 // and “0x” or “0X” selects base 16. Otherwise, the selected base is 10
@@ -519,7 +519,7 @@
 
 // Bytes returns the absolute value of x as a big-endian byte slice.
 //
-// To use a fixed length slice, or a preallocated one, use FillBytes.
+// To use a fixed length slice, or a preallocated one, use [Int.FillBytes].
 func (x *Int) Bytes() []byte {
 	// This function is used in cryptographic operations. It must not leak
 	// anything but the Int's sign and bit size through side-channels. Any
@@ -881,8 +881,8 @@
 
 // Rand sets z to a pseudo-random number in [0, n) and returns z.
 //
-// As this uses the math/rand package, it must not be used for
-// security-sensitive work. Use crypto/rand.Int instead.
+// As this uses the [math/rand] package, it must not be used for
+// security-sensitive work. Use [crypto/rand.Int] instead.
 func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
 	// z.neg is not modified before the if check, because z and n might alias.
 	if n.neg || len(n.abs) == 0 {
diff --git a/src/math/big/int_test.go b/src/math/big/int_test.go
index cb964a4..088bce0 100644
--- a/src/math/big/int_test.go
+++ b/src/math/big/int_test.go
@@ -200,12 +200,22 @@
 			"638952175999932299156089414639761565182862536979208272237582" +
 			"511852109168640000000000000000000000", // -99!
 	},
+
+	// overflow situations
+	{math.MaxInt64 - 0, math.MaxInt64, "9223372036854775807"},
+	{math.MaxInt64 - 1, math.MaxInt64, "85070591730234615838173535747377725442"},
+	{math.MaxInt64 - 2, math.MaxInt64, "784637716923335094969050127519550606919189611815754530810"},
+	{math.MaxInt64 - 3, math.MaxInt64, "7237005577332262206126809393809643289012107973151163787181513908099760521240"},
 }
 
 func TestMulRangeZ(t *testing.T) {
 	var tmp Int
 	// test entirely positive ranges
 	for i, r := range mulRangesN {
+		// skip mulRangesN entries that overflow int64
+		if int64(r.a) < 0 || int64(r.b) < 0 {
+			continue
+		}
 		prod := tmp.MulRange(int64(r.a), int64(r.b)).String()
 		if prod != r.prod {
 			t.Errorf("#%da: got %s; want %s", i, prod, r.prod)
diff --git a/src/math/big/intconv.go b/src/math/big/intconv.go
index 04e8c24..51e75ff 100644
--- a/src/math/big/intconv.go
+++ b/src/math/big/intconv.go
@@ -52,7 +52,7 @@
 
 var _ fmt.Formatter = intOne // *Int must implement fmt.Formatter
 
-// Format implements fmt.Formatter. It accepts the formats
+// Format implements [fmt.Formatter]. It accepts the formats
 // 'b' (binary), 'o' (octal with 0 prefix), 'O' (octal with 0o prefix),
 // 'd' (decimal), 'x' (lowercase hexadecimal), and
 // 'X' (uppercase hexadecimal).
@@ -230,7 +230,7 @@
 
 var _ fmt.Scanner = intOne // *Int must implement fmt.Scanner
 
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// Scan is a support routine for [fmt.Scanner]; it sets z to the value of
 // the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
 // 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
 func (z *Int) Scan(s fmt.ScanState, ch rune) error {
diff --git a/src/math/big/intmarsh.go b/src/math/big/intmarsh.go
index ce429ff..56eeefb 100644
--- a/src/math/big/intmarsh.go
+++ b/src/math/big/intmarsh.go
@@ -14,7 +14,7 @@
 // Gob codec version. Permits backward-compatible changes to the encoding.
 const intGobVersion byte = 1
 
-// GobEncode implements the gob.GobEncoder interface.
+// GobEncode implements the [encoding/gob.GobEncoder] interface.
 func (x *Int) GobEncode() ([]byte, error) {
 	if x == nil {
 		return nil, nil
@@ -29,7 +29,7 @@
 	return buf[i:], nil
 }
 
-// GobDecode implements the gob.GobDecoder interface.
+// GobDecode implements the [encoding/gob.GobDecoder] interface.
 func (z *Int) GobDecode(buf []byte) error {
 	if len(buf) == 0 {
 		// Other side sent a nil or default value.
@@ -45,7 +45,7 @@
 	return nil
 }
 
-// MarshalText implements the encoding.TextMarshaler interface.
+// MarshalText implements the [encoding.TextMarshaler] interface.
 func (x *Int) MarshalText() (text []byte, err error) {
 	if x == nil {
 		return []byte("<nil>"), nil
@@ -53,7 +53,7 @@
 	return x.abs.itoa(x.neg, 10), nil
 }
 
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
 func (z *Int) UnmarshalText(text []byte) error {
 	if _, ok := z.setFromScanner(bytes.NewReader(text), 0); !ok {
 		return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
@@ -65,7 +65,7 @@
 // (programs that explicitly look for these two methods). JSON works
 // fine with the TextMarshaler only.
 
-// MarshalJSON implements the json.Marshaler interface.
+// MarshalJSON implements the [encoding/json.Marshaler] interface.
 func (x *Int) MarshalJSON() ([]byte, error) {
 	if x == nil {
 		return []byte("null"), nil
@@ -73,7 +73,7 @@
 	return x.abs.itoa(x.neg, 10), nil
 }
 
-// UnmarshalJSON implements the json.Unmarshaler interface.
+// UnmarshalJSON implements the [encoding/json.Unmarshaler] interface.
 func (z *Int) UnmarshalJSON(text []byte) error {
 	// Ignore null, like in the main JSON package.
 	if string(text) == "null" {
diff --git a/src/math/big/nat.go b/src/math/big/nat.go
index 90ce6d1..ecb7d36 100644
--- a/src/math/big/nat.go
+++ b/src/math/big/nat.go
@@ -389,13 +389,6 @@
 	}
 }
 
-func max(x, y int) int {
-	if x > y {
-		return x
-	}
-	return y
-}
-
 // karatsubaLen computes an approximation to the maximum k <= n such that
 // k = p<<i for a number p <= threshold and an i >= 0. Thus, the
 // result is the largest number that can be divided repeatedly by 2 before
@@ -631,7 +624,7 @@
 	case a+1 == b:
 		return z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b))
 	}
-	m := (a + b) / 2
+	m := a + (b-a)/2 // avoid overflow
 	return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
 }
 
diff --git a/src/math/big/nat_test.go b/src/math/big/nat_test.go
index b84a7be..4722548 100644
--- a/src/math/big/nat_test.go
+++ b/src/math/big/nat_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"fmt"
+	"math"
 	"runtime"
 	"strings"
 	"testing"
@@ -155,6 +156,10 @@
 			"638952175999932299156089414639761565182862536979208272237582" +
 			"51185210916864000000000000000000000000", // 100!
 	},
+	{math.MaxUint64 - 0, math.MaxUint64, "18446744073709551615"},
+	{math.MaxUint64 - 1, math.MaxUint64, "340282366920938463408034375210639556610"},
+	{math.MaxUint64 - 2, math.MaxUint64, "6277101735386680761794095221682035635525021984684230311930"},
+	{math.MaxUint64 - 3, math.MaxUint64, "115792089237316195360799967654821100226821973275796746098729803619699194331160"},
 }
 
 func TestMulRangeN(t *testing.T) {
diff --git a/src/math/big/rat.go b/src/math/big/rat.go
index 700a643..cb32b78 100644
--- a/src/math/big/rat.go
+++ b/src/math/big/rat.go
@@ -18,7 +18,7 @@
 // than Rat values, and each unique Rat value requires
 // its own unique *Rat pointer. To "copy" a Rat value,
 // an existing (or newly allocated) Rat must be set to
-// a new value using the Rat.Set method; shallow copies
+// a new value using the [Rat.Set] method; shallow copies
 // of Rats are not supported and may lead to errors.
 type Rat struct {
 	// To make zero values for Rat work w/o initialization,
@@ -29,7 +29,7 @@
 	a, b Int
 }
 
-// NewRat creates a new Rat with numerator a and denominator b.
+// NewRat creates a new [Rat] with numerator a and denominator b.
 func NewRat(a, b int64) *Rat {
 	return new(Rat).SetFrac64(a, b)
 }
@@ -411,8 +411,8 @@
 
 // Denom returns the denominator of x; it is always > 0.
 // The result is a reference to x's denominator, unless
-// x is an uninitialized (zero value) Rat, in which case
-// the result is a new Int of value 1. (To initialize x,
+// x is an uninitialized (zero value) [Rat], in which case
+// the result is a new [Int] of value 1. (To initialize x,
 // any operation that sets x will do, including x.Set(x).)
 // If the result is a reference to x's denominator it
 // may change if a new value is assigned to x, and vice versa.
diff --git a/src/math/big/ratconv.go b/src/math/big/ratconv.go
index 8537a67..dd99aec 100644
--- a/src/math/big/ratconv.go
+++ b/src/math/big/ratconv.go
@@ -378,3 +378,82 @@
 
 	return string(buf)
 }
+
+// Note: FloatPrec (below) is in this file rather than rat.go because
+//       its results are relevant for decimal representation/printing.
+
+// FloatPrec returns the number n of non-repeating digits immediately
+// following the decimal point of the decimal representation of x.
+// The boolean result indicates whether a decimal representation of x
+// with that many fractional digits is exact or rounded.
+//
+// Examples:
+//
+//	x      n    exact    decimal representation n fractional digits
+//	0      0    true     0
+//	1      0    true     1
+//	1/2    1    true     0.5
+//	1/3    0    false    0       (0.333... rounded)
+//	1/4    2    true     0.25
+//	1/6    1    false    0.2     (0.166... rounded)
+func (x *Rat) FloatPrec() (n int, exact bool) {
+	// Determine q and largest p2, p5 such that d = q·2^p2·5^p5.
+	// The results n, exact are:
+	//
+	//     n = max(p2, p5)
+	//     exact = q == 1
+	//
+	// For details see:
+	// https://en.wikipedia.org/wiki/Repeating_decimal#Reciprocals_of_integers_not_coprime_to_10
+	d := x.Denom().abs // d >= 1
+
+	// Determine p2 by counting factors of 2.
+	// p2 corresponds to the trailing zero bits in d.
+	// Do this first to reduce q as much as possible.
+	var q nat
+	p2 := d.trailingZeroBits()
+	q = q.shr(d, p2)
+
+	// Determine p5 by counting factors of 5.
+	// Build a table starting with an initial power of 5,
+	// and use repeated squaring until the factor doesn't
+	// divide q anymore. Then use the table to determine
+	// the power of 5 in q.
+	const fp = 13        // f == 5^fp
+	var tab []nat        // tab[i] == (5^fp)^(2^i) == 5^(fp·2^i)
+	f := nat{1220703125} // == 5^fp (must fit into a uint32 Word)
+	var t, r nat         // temporaries
+	for {
+		if _, r = t.div(r, q, f); len(r) != 0 {
+			break // f doesn't divide q evenly
+		}
+		tab = append(tab, f)
+		f = nat(nil).sqr(f) // nat(nil) to ensure a new f for each table entry
+	}
+
+	// Factor q using the table entries, if any.
+	// We start with the largest factor f = tab[len(tab)-1]
+	// that evenly divides q. It does so at most once because
+	// otherwise f·f would also divide q. That can't be true
+	// because f·f is the next higher table entry, contradicting
+	// how f was chosen in the first place.
+	// The same reasoning applies to the subsequent factors.
+	var p5 uint
+	for i := len(tab) - 1; i >= 0; i-- {
+		if t, r = t.div(r, q, tab[i]); len(r) == 0 {
+			p5 += fp * (1 << i) // tab[i] == 5^(fp·2^i)
+			q = q.set(t)
+		}
+	}
+
+	// If fp != 1, we may still have multiples of 5 left.
+	for {
+		if t, r = t.div(r, q, natFive); len(r) != 0 {
+			break
+		}
+		p5++
+		q = q.set(t)
+	}
+
+	return int(max(p2, p5)), q.cmp(natOne) == 0
+}
diff --git a/src/math/big/ratconv_test.go b/src/math/big/ratconv_test.go
index 45a3560..93e89ad 100644
--- a/src/math/big/ratconv_test.go
+++ b/src/math/big/ratconv_test.go
@@ -624,3 +624,142 @@
 		}
 	}
 }
+func TestFloatPrec(t *testing.T) {
+	var tests = []struct {
+		f    string
+		prec int
+		ok   bool
+		fdec string
+	}{
+		// examples from the issue #50489
+		{"10/100", 1, true, "0.1"},
+		{"3/100", 2, true, "0.03"},
+		{"10", 0, true, "10"},
+
+		// more examples
+		{"zero", 0, true, "0"},      // test uninitialized zero value for Rat
+		{"0", 0, true, "0"},         // 0
+		{"1", 0, true, "1"},         // 1
+		{"1/2", 1, true, "0.5"},     // 0.5
+		{"1/3", 0, false, "0"},      // 0.(3)
+		{"1/4", 2, true, "0.25"},    // 0.25
+		{"1/5", 1, true, "0.2"},     // 0.2
+		{"1/6", 1, false, "0.2"},    // 0.1(6)
+		{"1/7", 0, false, "0"},      // 0.(142857)
+		{"1/8", 3, true, "0.125"},   // 0.125
+		{"1/9", 0, false, "0"},      // 0.(1)
+		{"1/10", 1, true, "0.1"},    // 0.1
+		{"1/11", 0, false, "0"},     // 0.(09)
+		{"1/12", 2, false, "0.08"},  // 0.08(3)
+		{"1/13", 0, false, "0"},     // 0.(076923)
+		{"1/14", 1, false, "0.1"},   // 0.0(714285)
+		{"1/15", 1, false, "0.1"},   // 0.0(6)
+		{"1/16", 4, true, "0.0625"}, // 0.0625
+
+		{"10/2", 0, true, "5"},                    // 5
+		{"10/3", 0, false, "3"},                   // 3.(3)
+		{"10/6", 0, false, "2"},                   // 1.(6)
+		{"1/10000000", 7, true, "0.0000001"},      // 0.0000001
+		{"1/3125", 5, true, "0.00032"},            // "0.00032"
+		{"1/1024", 10, true, "0.0009765625"},      // 0.0009765625
+		{"1/304000", 7, false, "0.0000033"},       // 0.0000032(894736842105263157)
+		{"1/48828125", 11, true, "0.00000002048"}, // 0.00000002048
+	}
+
+	for _, test := range tests {
+		var f Rat
+
+		// check uninitialized zero value
+		if test.f != "zero" {
+			_, ok := f.SetString(test.f)
+			if !ok {
+				t.Fatalf("invalid test case: f = %s", test.f)
+			}
+		}
+
+		// results for f and -f must be the same
+		fdec := test.fdec
+		for i := 0; i < 2; i++ {
+			prec, ok := f.FloatPrec()
+			if prec != test.prec || ok != test.ok {
+				t.Errorf("%s: FloatPrec(%s): got prec, ok = %d, %v; want %d, %v", test.f, &f, prec, ok, test.prec, test.ok)
+			}
+			s := f.FloatString(test.prec)
+			if s != fdec {
+				t.Errorf("%s: FloatString(%s, %d): got %s; want %s", test.f, &f, prec, s, fdec)
+			}
+			// proceed with -f but don't add a "-" before a "0"
+			if f.Sign() > 0 {
+				f.Neg(&f)
+				fdec = "-" + fdec
+			}
+		}
+	}
+}
+
+func BenchmarkFloatPrecExact(b *testing.B) {
+	for _, n := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} {
+		// d := 5^n
+		d := NewInt(5)
+		p := NewInt(int64(n))
+		d.Exp(d, p, nil)
+
+		// r := 1/d
+		var r Rat
+		r.SetFrac(NewInt(1), d)
+
+		b.Run(fmt.Sprint(n), func(b *testing.B) {
+			for i := 0; i < b.N; i++ {
+				prec, ok := r.FloatPrec()
+				if prec != n || !ok {
+					b.Fatalf("got exact, ok = %d, %v; want %d, %v", prec, ok, uint64(n), true)
+				}
+			}
+		})
+	}
+}
+
+func BenchmarkFloatPrecMixed(b *testing.B) {
+	for _, n := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} {
+		// d := (3·5·7·11)^n
+		d := NewInt(3 * 5 * 7 * 11)
+		p := NewInt(int64(n))
+		d.Exp(d, p, nil)
+
+		// r := 1/d
+		var r Rat
+		r.SetFrac(NewInt(1), d)
+
+		b.Run(fmt.Sprint(n), func(b *testing.B) {
+			for i := 0; i < b.N; i++ {
+				prec, ok := r.FloatPrec()
+				if prec != n || ok {
+					b.Fatalf("got exact, ok = %d, %v; want %d, %v", prec, ok, uint64(n), false)
+				}
+			}
+		})
+	}
+}
+
+func BenchmarkFloatPrecInexact(b *testing.B) {
+	for _, n := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} {
+		// d := 5^n + 1
+		d := NewInt(5)
+		p := NewInt(int64(n))
+		d.Exp(d, p, nil)
+		d.Add(d, NewInt(1))
+
+		// r := 1/d
+		var r Rat
+		r.SetFrac(NewInt(1), d)
+
+		b.Run(fmt.Sprint(n), func(b *testing.B) {
+			for i := 0; i < b.N; i++ {
+				_, ok := r.FloatPrec()
+				if ok {
+					b.Fatalf("got unexpected ok")
+				}
+			}
+		})
+	}
+}
diff --git a/src/math/big/ratmarsh.go b/src/math/big/ratmarsh.go
index b69c59d..033fb44 100644
--- a/src/math/big/ratmarsh.go
+++ b/src/math/big/ratmarsh.go
@@ -16,7 +16,7 @@
 // Gob codec version. Permits backward-compatible changes to the encoding.
 const ratGobVersion byte = 1
 
-// GobEncode implements the gob.GobEncoder interface.
+// GobEncode implements the [encoding/gob.GobEncoder] interface.
 func (x *Rat) GobEncode() ([]byte, error) {
 	if x == nil {
 		return nil, nil
@@ -39,7 +39,7 @@
 	return buf[j:], nil
 }
 
-// GobDecode implements the gob.GobDecoder interface.
+// GobDecode implements the [encoding/gob.GobDecoder] interface.
 func (z *Rat) GobDecode(buf []byte) error {
 	if len(buf) == 0 {
 		// Other side sent a nil or default value.
@@ -68,7 +68,7 @@
 	return nil
 }
 
-// MarshalText implements the encoding.TextMarshaler interface.
+// MarshalText implements the [encoding.TextMarshaler] interface.
 func (x *Rat) MarshalText() (text []byte, err error) {
 	if x.IsInt() {
 		return x.a.MarshalText()
@@ -76,7 +76,7 @@
 	return x.marshal(), nil
 }
 
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
 func (z *Rat) UnmarshalText(text []byte) error {
 	// TODO(gri): get rid of the []byte/string conversion
 	if _, ok := z.SetString(string(text)); !ok {
diff --git a/src/math/bits/bits.go b/src/math/bits/bits.go
index c1c7b79..235d63e 100644
--- a/src/math/bits/bits.go
+++ b/src/math/bits/bits.go
@@ -21,7 +21,7 @@
 
 // --- LeadingZeros ---
 
-// LeadingZeros returns the number of leading zero bits in x; the result is UintSize for x == 0.
+// LeadingZeros returns the number of leading zero bits in x; the result is [UintSize] for x == 0.
 func LeadingZeros(x uint) int { return UintSize - Len(x) }
 
 // LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0.
@@ -55,7 +55,7 @@
 	54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
 }
 
-// TrailingZeros returns the number of trailing zero bits in x; the result is UintSize for x == 0.
+// TrailingZeros returns the number of trailing zero bits in x; the result is [UintSize] for x == 0.
 func TrailingZeros(x uint) int {
 	if UintSize == 32 {
 		return TrailingZeros32(uint32(x))
@@ -169,7 +169,7 @@
 
 // --- RotateLeft ---
 
-// RotateLeft returns the value of x rotated left by (k mod UintSize) bits.
+// RotateLeft returns the value of x rotated left by (k mod [UintSize]) bits.
 // To rotate x right by k bits, call RotateLeft(x, -k).
 //
 // This function's execution time does not depend on the inputs.
@@ -578,14 +578,14 @@
 }
 
 // Rem32 returns the remainder of (hi, lo) divided by y. Rem32 panics
-// for y == 0 (division by zero) but, unlike Div32, it doesn't panic
+// for y == 0 (division by zero) but, unlike [Div32], it doesn't panic
 // on a quotient overflow.
 func Rem32(hi, lo, y uint32) uint32 {
 	return uint32((uint64(hi)<<32 | uint64(lo)) % uint64(y))
 }
 
 // Rem64 returns the remainder of (hi, lo) divided by y. Rem64 panics
-// for y == 0 (division by zero) but, unlike Div64, it doesn't panic
+// for y == 0 (division by zero) but, unlike [Div64], it doesn't panic
 // on a quotient overflow.
 func Rem64(hi, lo, y uint64) uint64 {
 	// We scale down hi so that hi < y, then use Div64 to compute the
diff --git a/src/math/bits/bits_errors.go b/src/math/bits/bits_errors.go
index 61cb5c9..353d2f6 100644
--- a/src/math/bits/bits_errors.go
+++ b/src/math/bits/bits_errors.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !compiler_bootstrap
-// +build !compiler_bootstrap
 
 package bits
 
diff --git a/src/math/bits/bits_errors_bootstrap.go b/src/math/bits/bits_errors_bootstrap.go
index 4d610d3..6b14e41 100644
--- a/src/math/bits/bits_errors_bootstrap.go
+++ b/src/math/bits/bits_errors_bootstrap.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build compiler_bootstrap
-// +build compiler_bootstrap
 
 // This version used only for bootstrap (on this path we want
 // to avoid use of go:linkname as applied to variables).
diff --git a/src/math/bits/make_examples.go b/src/math/bits/make_examples.go
index 92e9aab..4bd7f58 100644
--- a/src/math/bits/make_examples.go
+++ b/src/math/bits/make_examples.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // This program generates example_test.go.
 
diff --git a/src/math/bits/make_tables.go b/src/math/bits/make_tables.go
index 867025e..d067361 100644
--- a/src/math/bits/make_tables.go
+++ b/src/math/bits/make_tables.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // This program generates bits_tables.go.
 
diff --git a/src/math/cmplx/pow.go b/src/math/cmplx/pow.go
index 666bba2..434f80f 100644
--- a/src/math/cmplx/pow.go
+++ b/src/math/cmplx/pow.go
@@ -43,7 +43,7 @@
 //    IEEE      -10,+10     30000       9.4e-15     1.5e-15
 
 // Pow returns x**y, the base-x exponential of y.
-// For generalized compatibility with math.Pow:
+// For generalized compatibility with [math.Pow]:
 //
 //	Pow(0, ±0) returns 1+0i
 //	Pow(0, c) for real(c)<0 returns Inf+0i if imag(c) is zero, otherwise Inf+Inf i.
diff --git a/src/math/erfinv.go b/src/math/erfinv.go
index eed0feb..8e630f9 100644
--- a/src/math/erfinv.go
+++ b/src/math/erfinv.go
@@ -116,7 +116,7 @@
 	return ans
 }
 
-// Erfcinv returns the inverse of Erfc(x).
+// Erfcinv returns the inverse of [Erfc](x).
 //
 // Special cases are:
 //
diff --git a/src/math/exp.go b/src/math/exp.go
index 760795f..050e0ee 100644
--- a/src/math/exp.go
+++ b/src/math/exp.go
@@ -138,7 +138,7 @@
 
 // Exp2 returns 2**x, the base-2 exponential of x.
 //
-// Special cases are the same as Exp.
+// Special cases are the same as [Exp].
 func Exp2(x float64) float64 {
 	if haveArchExp2 {
 		return archExp2(x)
diff --git a/src/math/expm1.go b/src/math/expm1.go
index ff1c82f..f8e45d9 100644
--- a/src/math/expm1.go
+++ b/src/math/expm1.go
@@ -114,7 +114,7 @@
 //
 
 // Expm1 returns e**x - 1, the base-e exponential of x minus 1.
-// It is more accurate than Exp(x) - 1 when x is near zero.
+// It is more accurate than [Exp](x) - 1 when x is near zero.
 //
 // Special cases are:
 //
diff --git a/src/math/floor_ppc64x.s b/src/math/floor_ppc64x.s
index 584c27e..e9c5d49 100644
--- a/src/math/floor_ppc64x.s
+++ b/src/math/floor_ppc64x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ppc64 || ppc64le
-// +build ppc64 ppc64le
 
 #include "textflag.h"
 
diff --git a/src/math/huge_test.go b/src/math/huge_test.go
index 568b0c8..2eadb7f 100644
--- a/src/math/huge_test.go
+++ b/src/math/huge_test.go
@@ -2,11 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Disabled for s390x because it uses assembly routines that are not
-// accurate for huge arguments.
-
-//go:build !s390x
-
 package math_test
 
 import (
diff --git a/src/math/hypot.go b/src/math/hypot.go
index 6ae70c1..03c3602 100644
--- a/src/math/hypot.go
+++ b/src/math/hypot.go
@@ -8,7 +8,7 @@
 	Hypot -- sqrt(p*p + q*q), but overflows only if the result does.
 */
 
-// Hypot returns Sqrt(p*p + q*q), taking care to avoid
+// Hypot returns [Sqrt](p*p + q*q), taking care to avoid
 // unnecessary overflow and underflow.
 //
 // Special cases are:
diff --git a/src/math/ldexp.go b/src/math/ldexp.go
index df365c0..fad099d 100644
--- a/src/math/ldexp.go
+++ b/src/math/ldexp.go
@@ -4,7 +4,7 @@
 
 package math
 
-// Ldexp is the inverse of Frexp.
+// Ldexp is the inverse of [Frexp].
 // It returns frac × 2**exp.
 //
 // Special cases are:
diff --git a/src/math/lgamma.go b/src/math/lgamma.go
index 4058ad6..5a7ea2a 100644
--- a/src/math/lgamma.go
+++ b/src/math/lgamma.go
@@ -163,7 +163,7 @@
 	-1.63092934096575273989e-03, // 0xBF5AB89D0B9E43E4
 }
 
-// Lgamma returns the natural logarithm and sign (-1 or +1) of Gamma(x).
+// Lgamma returns the natural logarithm and sign (-1 or +1) of [Gamma](x).
 //
 // Special cases are:
 //
diff --git a/src/math/log10.go b/src/math/log10.go
index e6916a5..02c3a75 100644
--- a/src/math/log10.go
+++ b/src/math/log10.go
@@ -5,7 +5,7 @@
 package math
 
 // Log10 returns the decimal logarithm of x.
-// The special cases are the same as for Log.
+// The special cases are the same as for [Log].
 func Log10(x float64) float64 {
 	if haveArchLog10 {
 		return archLog10(x)
@@ -18,7 +18,7 @@
 }
 
 // Log2 returns the binary logarithm of x.
-// The special cases are the same as for Log.
+// The special cases are the same as for [Log].
 func Log2(x float64) float64 {
 	if haveArchLog2 {
 		return archLog2(x)
diff --git a/src/math/log1p.go b/src/math/log1p.go
index 3a7b385..bfcb813 100644
--- a/src/math/log1p.go
+++ b/src/math/log1p.go
@@ -84,7 +84,7 @@
 //       See HP-15C Advanced Functions Handbook, p.193.
 
 // Log1p returns the natural logarithm of 1 plus its argument x.
-// It is more accurate than Log(1 + x) when x is near zero.
+// It is more accurate than [Log](1 + x) when x is near zero.
 //
 // Special cases are:
 //
diff --git a/src/math/modf_ppc64x.s b/src/math/modf_ppc64x.s
index 1303067..410b523 100644
--- a/src/math/modf_ppc64x.s
+++ b/src/math/modf_ppc64x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ppc64 || ppc64le
-// +build ppc64 ppc64le
 
 #include "textflag.h"
 
diff --git a/src/math/rand/exp.go b/src/math/rand/exp.go
index c1162c1..55d7d7d 100644
--- a/src/math/rand/exp.go
+++ b/src/math/rand/exp.go
@@ -21,7 +21,7 @@
 )
 
 // ExpFloat64 returns an exponentially distributed float64 in the range
-// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (0, +[math.MaxFloat64]] with an exponential distribution whose rate parameter
 // (lambda) is 1 and whose mean is 1/lambda (1).
 // To produce a distribution with a different rate parameter,
 // callers can adjust the output using:
diff --git a/src/math/rand/normal.go b/src/math/rand/normal.go
index 6654479..4d441d4 100644
--- a/src/math/rand/normal.go
+++ b/src/math/rand/normal.go
@@ -28,7 +28,7 @@
 }
 
 // NormFloat64 returns a normally distributed float64 in
-// the range -math.MaxFloat64 through +math.MaxFloat64 inclusive,
+// the range -[math.MaxFloat64] through +[math.MaxFloat64] inclusive,
 // with standard normal distribution (mean = 0, stddev = 1).
 // To produce a different normal distribution, callers can
 // adjust the output using:
diff --git a/src/math/rand/rand.go b/src/math/rand/rand.go
index cc1f95c..a8ed9c0 100644
--- a/src/math/rand/rand.go
+++ b/src/math/rand/rand.go
@@ -33,10 +33,10 @@
 	Seed(seed int64)
 }
 
-// A Source64 is a Source that can also generate
+// A Source64 is a [Source] that can also generate
 // uniformly-distributed pseudo-random uint64 values in
 // the range [0, 1<<64) directly.
-// If a Rand r's underlying Source s implements Source64,
+// If a [Rand] r's underlying [Source] s implements Source64,
 // then r.Uint64 returns the result of one call to s.Uint64
 // instead of making two calls to s.Int63.
 type Source64 interface {
@@ -44,10 +44,10 @@
 	Uint64() uint64
 }
 
-// NewSource returns a new pseudo-random Source seeded with the given value.
-// Unlike the default Source used by top-level functions, this source is not
+// NewSource returns a new pseudo-random [Source] seeded with the given value.
+// Unlike the default [Source] used by top-level functions, this source is not
 // safe for concurrent use by multiple goroutines.
-// The returned Source implements Source64.
+// The returned [Source] implements [Source64].
 func NewSource(seed int64) Source {
 	return newSource(seed)
 }
@@ -73,7 +73,7 @@
 	readPos int8
 }
 
-// New returns a new Rand that uses random values from src
+// New returns a new [Rand] that uses random values from src
 // to generate other random values.
 func New(src Source) *Rand {
 	s64, _ := src.(Source64)
@@ -81,7 +81,7 @@
 }
 
 // Seed uses the provided seed value to initialize the generator to a deterministic state.
-// Seed should not be called concurrently with any other Rand method.
+// Seed should not be called concurrently with any other [Rand] method.
 func (r *Rand) Seed(seed int64) {
 	if lk, ok := r.src.(*lockedSource); ok {
 		lk.seedPos(seed, &r.readPos)
@@ -273,7 +273,7 @@
 	switch src := r.src.(type) {
 	case *lockedSource:
 		return src.read(p, &r.readVal, &r.readPos)
-	case *fastSource:
+	case *runtimeSource:
 		return src.read(p, &r.readVal, &r.readPos)
 	}
 	return read(p, r.src, &r.readVal, &r.readPos)
@@ -328,8 +328,8 @@
 		r.Seed(1)
 	} else {
 		r = &Rand{
-			src: &fastSource{},
-			s64: &fastSource{},
+			src: &runtimeSource{},
+			s64: &runtimeSource{},
 		}
 	}
 
@@ -346,29 +346,29 @@
 	return r
 }
 
-//go:linkname fastrand64
-func fastrand64() uint64
+//go:linkname runtime_rand runtime.rand
+func runtime_rand() uint64
 
-// fastSource is an implementation of Source64 that uses the runtime
+// runtimeSource is an implementation of Source64 that uses the runtime
 // fastrand functions.
-type fastSource struct {
+type runtimeSource struct {
 	// The mutex is used to avoid race conditions in Read.
 	mu sync.Mutex
 }
 
-func (*fastSource) Int63() int64 {
-	return int64(fastrand64() & rngMask)
+func (*runtimeSource) Int63() int64 {
+	return int64(runtime_rand() & rngMask)
 }
 
-func (*fastSource) Seed(int64) {
-	panic("internal error: call to fastSource.Seed")
+func (*runtimeSource) Seed(int64) {
+	panic("internal error: call to runtimeSource.Seed")
 }
 
-func (*fastSource) Uint64() uint64 {
-	return fastrand64()
+func (*runtimeSource) Uint64() uint64 {
+	return runtime_rand()
 }
 
-func (fs *fastSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) {
+func (fs *runtimeSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) {
 	fs.mu.Lock()
 	n, err = read(p, fs, readVal, readPos)
 	fs.mu.Unlock()
@@ -378,7 +378,7 @@
 // Seed uses the provided seed value to initialize the default Source to a
 // deterministic state. Seed values that have the same remainder when
 // divided by 2³¹-1 generate the same pseudo-random sequence.
-// Seed, unlike the Rand.Seed method, is safe for concurrent use.
+// Seed, unlike the [Rand.Seed] method, is safe for concurrent use.
 //
 // If Seed is not called, the generator is seeded randomly at program startup.
 //
@@ -405,7 +405,7 @@
 	// Otherwise either
 	// 1) orig == nil, which is the normal case when Seed is the first
 	// top-level function to be called, or
-	// 2) orig is already a fastSource, in which case we need to change
+	// 2) orig is already a runtimeSource, in which case we need to change
 	// to a lockedSource.
 	// Either way we do the same thing.
 
@@ -419,67 +419,67 @@
 }
 
 // Int63 returns a non-negative pseudo-random 63-bit integer as an int64
-// from the default Source.
+// from the default [Source].
 func Int63() int64 { return globalRand().Int63() }
 
 // Uint32 returns a pseudo-random 32-bit value as a uint32
-// from the default Source.
+// from the default [Source].
 func Uint32() uint32 { return globalRand().Uint32() }
 
 // Uint64 returns a pseudo-random 64-bit value as a uint64
-// from the default Source.
+// from the default [Source].
 func Uint64() uint64 { return globalRand().Uint64() }
 
 // Int31 returns a non-negative pseudo-random 31-bit integer as an int32
-// from the default Source.
+// from the default [Source].
 func Int31() int32 { return globalRand().Int31() }
 
-// Int returns a non-negative pseudo-random int from the default Source.
+// Int returns a non-negative pseudo-random int from the default [Source].
 func Int() int { return globalRand().Int() }
 
 // Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n)
-// from the default Source.
+// from the default [Source].
 // It panics if n <= 0.
 func Int63n(n int64) int64 { return globalRand().Int63n(n) }
 
 // Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n)
-// from the default Source.
+// from the default [Source].
 // It panics if n <= 0.
 func Int31n(n int32) int32 { return globalRand().Int31n(n) }
 
 // Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n)
-// from the default Source.
+// from the default [Source].
 // It panics if n <= 0.
 func Intn(n int) int { return globalRand().Intn(n) }
 
 // Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0)
-// from the default Source.
+// from the default [Source].
 func Float64() float64 { return globalRand().Float64() }
 
 // Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0)
-// from the default Source.
+// from the default [Source].
 func Float32() float32 { return globalRand().Float32() }
 
 // Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
-// in the half-open interval [0,n) from the default Source.
+// in the half-open interval [0,n) from the default [Source].
 func Perm(n int) []int { return globalRand().Perm(n) }
 
-// Shuffle pseudo-randomizes the order of elements using the default Source.
+// Shuffle pseudo-randomizes the order of elements using the default [Source].
 // n is the number of elements. Shuffle panics if n < 0.
 // swap swaps the elements with indexes i and j.
 func Shuffle(n int, swap func(i, j int)) { globalRand().Shuffle(n, swap) }
 
-// Read generates len(p) random bytes from the default Source and
+// Read generates len(p) random bytes from the default [Source] and
 // writes them into p. It always returns len(p) and a nil error.
-// Read, unlike the Rand.Read method, is safe for concurrent use.
+// Read, unlike the [Rand.Read] method, is safe for concurrent use.
 //
-// Deprecated: For almost all use cases, crypto/rand.Read is more appropriate.
+// Deprecated: For almost all use cases, [crypto/rand.Read] is more appropriate.
 func Read(p []byte) (n int, err error) { return globalRand().Read(p) }
 
 // NormFloat64 returns a normally distributed float64 in the range
-// [-math.MaxFloat64, +math.MaxFloat64] with
+// [-[math.MaxFloat64], +[math.MaxFloat64]] with
 // standard normal distribution (mean = 0, stddev = 1)
-// from the default Source.
+// from the default [Source].
 // To produce a different normal distribution, callers can
 // adjust the output using:
 //
@@ -487,8 +487,8 @@
 func NormFloat64() float64 { return globalRand().NormFloat64() }
 
 // ExpFloat64 returns an exponentially distributed float64 in the range
-// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
-// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source.
+// (0, +[math.MaxFloat64]] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1) from the default [Source].
 // To produce a distribution with a different rate parameter,
 // callers can adjust the output using:
 //
diff --git a/src/math/rand/rand_test.go b/src/math/rand/rand_test.go
index 7eba1dc..9f074fe 100644
--- a/src/math/rand/rand_test.go
+++ b/src/math/rand/rand_test.go
@@ -14,6 +14,7 @@
 	. "math/rand"
 	"os"
 	"runtime"
+	"strings"
 	"sync"
 	"testing"
 	"testing/iotest"
@@ -33,13 +34,6 @@
 	maxError    float64
 }
 
-func max(a, b float64) float64 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
 func nearEqual(a, b, closeEnough, maxError float64) bool {
 	absDiff := math.Abs(a - b)
 	if absDiff < closeEnough { // Necessary when one value is zero and one value is close to zero.
@@ -338,7 +332,7 @@
 func hasSlowFloatingPoint() bool {
 	switch runtime.GOARCH {
 	case "arm":
-		return os.Getenv("GOARM") == "5"
+		return os.Getenv("GOARM") == "5" || strings.HasSuffix(os.Getenv("GOARM"), ",softfloat")
 	case "mips", "mipsle", "mips64", "mips64le":
 		// Be conservative and assume that all mips boards
 		// have emulated floating point.
diff --git a/src/math/rand/v2/auto_test.go b/src/math/rand/v2/auto_test.go
new file mode 100644
index 0000000..f689733
--- /dev/null
+++ b/src/math/rand/v2/auto_test.go
@@ -0,0 +1,40 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand_test
+
+import (
+	. "math/rand/v2"
+	"testing"
+)
+
+// This test is first, in its own file with an alphabetically early name,
+// to try to make sure that it runs early. It has the best chance of
+// detecting deterministic seeding if it's the first test that runs.
+
+func TestAuto(t *testing.T) {
+	// Pull out 10 int64s from the global source
+	// and then check that they don't appear in that
+	// order in the deterministic seeded result.
+	var out []int64
+	for i := 0; i < 10; i++ {
+		out = append(out, Int64())
+	}
+
+	// Look for out in seeded output.
+	// Strictly speaking, we should look for them in order,
+	// but this is good enough and not significantly more
+	// likely to have a false positive.
+	r := New(NewPCG(1, 0))
+	found := 0
+	for i := 0; i < 1000; i++ {
+		x := r.Int64()
+		if x == out[found] {
+			found++
+			if found == len(out) {
+				t.Fatalf("found unseeded output in Seed(1) output")
+			}
+		}
+	}
+}
diff --git a/src/math/rand/v2/chacha8.go b/src/math/rand/v2/chacha8.go
new file mode 100644
index 0000000..6b9aa72
--- /dev/null
+++ b/src/math/rand/v2/chacha8.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import "internal/chacha8rand"
+
+// A ChaCha8 is a ChaCha8-based cryptographically strong
+// random number generator.
+type ChaCha8 struct {
+	state chacha8rand.State
+}
+
+// NewChaCha8 returns a new ChaCha8 seeded with the given seed.
+func NewChaCha8(seed [32]byte) *ChaCha8 {
+	c := new(ChaCha8)
+	c.state.Init(seed)
+	return c
+}
+
+// Seed resets the ChaCha8 to behave the same way as NewChaCha8(seed).
+func (c *ChaCha8) Seed(seed [32]byte) {
+	c.state.Init(seed)
+}
+
+// Uint64 returns a uniformly distributed random uint64 value.
+func (c *ChaCha8) Uint64() uint64 {
+	for {
+		x, ok := c.state.Next()
+		if ok {
+			return x
+		}
+		c.state.Refill()
+	}
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (c *ChaCha8) UnmarshalBinary(data []byte) error {
+	return chacha8rand.Unmarshal(&c.state, data)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (c *ChaCha8) MarshalBinary() ([]byte, error) {
+	return chacha8rand.Marshal(&c.state), nil
+}
diff --git a/src/math/rand/v2/chacha8_test.go b/src/math/rand/v2/chacha8_test.go
new file mode 100644
index 0000000..2c55b47
--- /dev/null
+++ b/src/math/rand/v2/chacha8_test.go
@@ -0,0 +1,531 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand_test
+
+import (
+	. "math/rand/v2"
+	"testing"
+)
+
+func TestChaCha8(t *testing.T) {
+	p := NewChaCha8(chacha8seed)
+	for i, x := range chacha8output {
+		if u := p.Uint64(); u != x {
+			t.Errorf("ChaCha8 #%d = %#x, want %#x", i, u, x)
+		}
+	}
+
+	p.Seed(chacha8seed)
+	for i, x := range chacha8output {
+		if u := p.Uint64(); u != x {
+			t.Errorf("ChaCha8 #%d = %#x, want %#x", i, u, x)
+		}
+	}
+}
+
+func TestChaCha8Marshal(t *testing.T) {
+	p := NewChaCha8(chacha8seed)
+	for i, x := range chacha8output {
+		enc, err := p.MarshalBinary()
+		if err != nil {
+			t.Fatalf("#%d: MarshalBinary: %v", i, err)
+		}
+		if string(enc) != chacha8marshal[i] {
+			t.Fatalf("#%d: MarshalBinary=%q, want %q", i, enc, chacha8marshal[i])
+		}
+		*p = ChaCha8{}
+		if err := p.UnmarshalBinary(enc); err != nil {
+			t.Fatalf("#%d: UnmarshalBinary: %v", i, err)
+		}
+		if u := p.Uint64(); u != x {
+			t.Errorf("ChaCha8 #%d = %#x, want %#x", i, u, x)
+		}
+	}
+}
+
+func BenchmarkChaCha8(b *testing.B) {
+	p := NewChaCha8([32]byte{1, 2, 3, 4, 5})
+	var t uint64
+	for n := b.N; n > 0; n-- {
+		t += p.Uint64()
+	}
+	Sink = t
+}
+
+// Golden output test to make sure algorithm never changes,
+// so that its use in math/rand/v2 stays stable.
+
+var chacha8seed = [32]byte([]byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ123456"))
+
+var chacha8output = []uint64{
+	0xb773b6063d4616a5, 0x1160af22a66abc3c, 0x8c2599d9418d287c, 0x7ee07e037edc5cd6,
+	0xcfaa9ee02d1c16ad, 0x0e090eef8febea79, 0x3c82d271128b5b3e, 0x9c5addc11252a34f,
+	0xdf79bb617d6ceea6, 0x36d553591f9d736a, 0xeef0d14e181ee01f, 0x089bfc760ae58436,
+	0xd9e52b59cc2ad268, 0xeb2fb4444b1b8aba, 0x4f95c8a692c46661, 0xc3c6323217cae62c,
+	0x91ebb4367f4e2e7e, 0x784cf2c6a0ec9bc6, 0x5c34ec5c34eabe20, 0x4f0a8f515570daa8,
+	0xfc35dcb4113d6bf2, 0x5b0da44c645554bc, 0x6d963da3db21d9e1, 0xeeaefc3150e500f3,
+	0x2d37923dda3750a5, 0x380d7a626d4bc8b0, 0xeeaf68ede3d7ee49, 0xf4356695883b717c,
+	0x846a9021392495a4, 0x8e8510549630a61b, 0x18dc02545dbae493, 0x0f8f9ff0a65a3d43,
+	0xccf065f7190ff080, 0xfd76d1aa39673330, 0x95d232936cba6433, 0x6c7456d1070cbd17,
+	0x462acfdaff8c6562, 0x5bafab866d34fc6a, 0x0c862f78030a2988, 0xd39a83e407c3163d,
+	0xc00a2b7b45f22ebf, 0x564307c62466b1a9, 0x257e0424b0c072d4, 0x6fb55e99496c28fe,
+	0xae9873a88f5cd4e0, 0x4657362ac60d3773, 0x1c83f91ecdf23e8e, 0x6fdc0792c15387c0,
+	0x36dad2a30dfd2b5c, 0xa4b593290595bdb7, 0x4de18934e4cc02c5, 0xcdc0d604f015e3a7,
+	0xfba0dbf69ad80321, 0x60e8bea3d139de87, 0xd18a4d851ef48756, 0x6366447c2215f34a,
+	0x05682e97d3d007ee, 0x4c0e8978c6d54ab2, 0xcf1e9f6a6712edc2, 0x061439414c80cfd3,
+	0xd1a8b6e2745c0ead, 0x31a7918d45c410e8, 0xabcc61ad90216eec, 0x4040d92d2032a71a,
+	0x3cd2f66ffb40cd68, 0xdcd051c07295857a, 0xeab55cbcd9ab527e, 0x18471dce781bdaac,
+	0xf7f08cd144dc7252, 0x5804e0b13d7f40d1, 0x5cb1a446e4b2d35b, 0xe6d4a728d2138a06,
+	0x05223e40ca60dad8, 0x2d61ec3206ac6a68, 0xab692356874c17b8, 0xc30954417676de1c,
+	0x4f1ace3732225624, 0xfba9510813988338, 0x997f200f52752e11, 0x1116aaafe86221fa,
+	0x07ce3b5cb2a13519, 0x2956bc72bc458314, 0x4188b7926140eb78, 0x56ca6dbfd4adea4d,
+	0x7fe3c22349340ce5, 0x35c08f9c37675f8a, 0x11e1c7fbef5ed521, 0x98adc8464ec1bc75,
+	0xd163b2c73d1203f8, 0x8c761ee043a2f3f3, 0x24b99d6accecd7b7, 0x793e31aa112f0370,
+	0x8e87dc2a19285139, 0x4247ae04f7096e25, 0x514f3122926fe20f, 0xdc6fb3f045d2a7e9,
+	0x15cb30cecdd18eba, 0xcbc7fdecf6900274, 0x3fb5c696dc8ba021, 0xd1664417c8d274e6,
+	0x05f7e445ea457278, 0xf920bbca1b9db657, 0x0c1950b4da22cb99, 0xf875baf1af09e292,
+	0xbed3d7b84250f838, 0xf198e8080fd74160, 0xc9eda51d9b7ea703, 0xf709ef55439bf8f6,
+	0xd20c74feebf116fc, 0x305668eb146d7546, 0x829af3ec10d89787, 0x15b8f9697b551dbc,
+	0xfc823c6c8e64b8c9, 0x345585e8183b40bc, 0x674b4171d6581368, 0x1234d81cd670e9f7,
+	0x0e505210d8a55e19, 0xe8258d69eeeca0dc, 0x05d4c452e8baf67e, 0xe8dbe30116a45599,
+	0x1cf08ce1b1176f00, 0xccf7d0a4b81ecb49, 0x303fea136b2c430e, 0x861d6c139c06c871,
+	0x5f41df72e05e0487, 0x25bd7e1e1ae26b1d, 0xbe9f4004d662a41d, 0x65bf58d483188546,
+	0xd1b27cff69db13cc, 0x01a6663372c1bb36, 0x578dd7577b727f4d, 0x19c78f066c083cf6,
+	0xdbe014d4f9c391bb, 0x97fbb2dd1d13ffb3, 0x31c91e0af9ef8d4f, 0x094dfc98402a43ba,
+	0x069bd61bea37b752, 0x5b72d762e8d986ca, 0x72ee31865904bc85, 0xd1f5fdc5cd36c33e,
+	0xba9b4980a8947cad, 0xece8f05eac49ab43, 0x65fe1184abae38e7, 0x2d7cb9dea5d31452,
+	0xcc71489476e467e3, 0x4c03a258a578c68c, 0x00efdf9ecb0fd8fc, 0x9924cad471e2666d,
+	0x87f8668318f765e9, 0xcb4dc57c1b55f5d8, 0xd373835a86604859, 0xe526568b5540e482,
+	0x1f39040f08586fec, 0xb764f3f00293f8e6, 0x049443a2f6bd50a8, 0x76fec88697d3941a,
+	0x3efb70d039bae7a2, 0xe2f4611368eca8a8, 0x7c007a96e01d2425, 0xbbcce5768e69c5bf,
+	0x784fb4985c42aac3, 0xf72b5091aa223874, 0x3630333fb1e62e07, 0x8e7319ebdebbb8de,
+	0x2a3982bca959fa00, 0xb2b98b9f964ba9b3, 0xf7e31014adb71951, 0xebd0fca3703acc82,
+	0xec654e2a2fe6419a, 0xb326132d55a52e2c, 0x2248c57f44502978, 0x32710c2f342daf16,
+	0x0517b47b5acb2bec, 0x4c7a718fca270937, 0xd69142bed0bcc541, 0xe40ebcb8ff52ce88,
+	0x3e44a2dbc9f828d4, 0xc74c2f4f8f873f58, 0x3dbf648eb799e45b, 0x33f22475ee0e86f8,
+	0x1eb4f9ee16d47f65, 0x40f8d2b8712744e3, 0xb886b4da3cb14572, 0x2086326fbdd6f64d,
+	0xcc3de5907dd882b9, 0xa2e8b49a5ee909df, 0xdbfb8e7823964c10, 0x70dd6089ef0df8d5,
+	0x30141663cdd9c99f, 0x04b805325c240365, 0x7483d80314ac12d6, 0x2b271cb91aa7f5f9,
+	0x97e2245362abddf0, 0x5a84f614232a9fab, 0xf71125fcda4b7fa2, 0x1ca5a61d74b27267,
+	0x38cc6a9b3adbcb45, 0xdde1bb85dc653e39, 0xe9d0c8fa64f89fd4, 0x02c5fb1ecd2b4188,
+	0xf2bd137bca5756e5, 0xadefe25d121be155, 0x56cd1c3c5d893a8e, 0x4c50d337beb65bb9,
+	0x918c5151675cf567, 0xaba649ffcfb56a1e, 0x20c74ab26a2247cd, 0x71166bac853c08da,
+	0xb07befe2e584fc5d, 0xda45ff2a588dbf32, 0xdb98b03c4d75095e, 0x60285ae1aaa65a4c,
+	0xf93b686a263140b8, 0xde469752ee1c180e, 0xcec232dc04129aae, 0xeb916baa1835ea04,
+	0xd49c21c8b64388ff, 0x72a82d9658864888, 0x003348ef7eac66a8, 0x7f6f67e655b209eb,
+	0x532ffb0b7a941b25, 0xd940ade6128deede, 0xdf24f2a1af89fe23, 0x95aa3b4988195ae0,
+	0x3da649404f94be4a, 0x692dad132c3f7e27, 0x40aee76ecaaa9eb8, 0x1294a01e09655024,
+	0x6df797abdba4e4f5, 0xea2fb6024c1d7032, 0x5f4e0492295489fc, 0x57972914ea22e06a,
+	0x9a8137d133aad473, 0xa2e6dd6ae7cdf2f3, 0x9f42644f18086647, 0x16d03301c170bd3e,
+	0x908c416fa546656d, 0xe081503be22e123e, 0x077cf09116c4cc72, 0xcbd25cd264b7f229,
+	0x3db2f468ec594031, 0x46c00e734c9badd5, 0xd0ec0ac72075d861, 0x3037cb3cf80b7630,
+	0x574c3d7b3a2721c6, 0xae99906a0076824b, 0xb175a5418b532e70, 0xd8b3e251ee231ddd,
+	0xb433eec25dca1966, 0x530f30dc5cff9a93, 0x9ff03d98b53cd335, 0xafc4225076558cdf,
+	0xef81d3a28284402a, 0x110bdbf51c110a28, 0x9ae1b255d027e8f6, 0x7de3e0aa24688332,
+	0xe483c3ecd2067ee2, 0xf829328b276137e6, 0xa413ccad57562cad, 0xe6118e8b496acb1f,
+	0x8288dca6da5ec01f, 0xa53777dc88c17255, 0x8a00f1e0d5716eda, 0x618e6f47b7a720a8,
+	0x9e3907b0c692a841, 0x978b42ca963f34f3, 0x75e4b0cd98a7d7ef, 0xde4dbd6e0b5f4752,
+	0x0252e4153f34493f, 0x50f0e7d803734ef9, 0x237766a38ed167ee, 0x4124414001ee39a0,
+	0xd08df643e535bb21, 0x34f575b5a9a80b74, 0x2c343af87297f755, 0xcd8b6d99d821f7cb,
+	0xe376fd7256fc48ae, 0xe1b06e7334352885, 0xfa87b26f86c169eb, 0x36c1604665a971de,
+	0xdba147c2239c8e80, 0x6b208e69fc7f0e24, 0x8795395b6f2b60c3, 0x05dabee9194907f4,
+	0xb98175142f5ed902, 0x5e1701e2021ddc81, 0x0875aba2755eed08, 0x778d83289251de95,
+	0x3bfbe46a039ecb31, 0xb24704fce4cbd7f9, 0x6985ffe9a7c91e3d, 0xc8efb13df249dabb,
+	0xb1037e64b0f4c9f6, 0x55f69fd197d6b7c3, 0x672589d71d68a90c, 0xbebdb8224f50a77e,
+	0x3f589f80007374a7, 0xd307f4635954182a, 0xcff5850c10d4fd90, 0xc6da02dfb6408e15,
+	0x93daeef1e2b1a485, 0x65d833208aeea625, 0xe2b13fa13ed3b5fa, 0x67053538130fb68e,
+	0xc1042f6598218fa9, 0xee5badca749b8a2e, 0x6d22a3f947dae37d, 0xb62c6d1657f4dbaf,
+	0x6e007de69704c20b, 0x1af2b913fc3841d8, 0xdc0e47348e2e8e22, 0x9b1ddef1cf958b22,
+	0x632ed6b0233066b8, 0xddd02d3311bed8f2, 0xf147cfe1834656e9, 0x399aaa49d511597a,
+	0x6b14886979ec0309, 0x64fc4ac36b5afb97, 0xb82f78e07f7cf081, 0x10925c9a323d0e1b,
+	0xf451c79ee13c63f6, 0x7c2fc180317876c7, 0x35a12bd9eecb7d22, 0x335654a539621f90,
+	0xcc32a3f35db581f0, 0xc60748a80b2369cb, 0x7c4dd3b08591156b, 0xac1ced4b6de22291,
+	0xa32cfa2df134def5, 0x627108918dea2a53, 0x0555b1608fcb4ff4, 0x143ee7ac43aaa33c,
+	0xdae90ce7cf4fc218, 0x4d68fc2582bcf4b5, 0x37094e1849135d71, 0xf7857e09f3d49fd8,
+	0x007538c503768be7, 0xedf648ba2f6be601, 0xaa347664dd72513e, 0xbe63893c6ef23b86,
+	0x130b85710605af97, 0xdd765c6b1ef6ab56, 0xf3249a629a97dc6b, 0x2a114f9020fab8e5,
+	0x5a69e027cfc6ad08, 0x3c4ccb36f1a5e050, 0x2e9e7d596834f0a5, 0x2430be6858fce789,
+	0xe90b862f2466e597, 0x895e2884f159a9ec, 0x26ab8fa4902fcb57, 0xa6efff5c54e1fa50,
+	0x333ac4e5811a8255, 0xa58d515f02498611, 0xfe5a09dcb25c6ef4, 0x03898988ab5f5818,
+	0x289ff6242af6c617, 0x3d9dd59fd381ea23, 0x52d7d93d8a8aae51, 0xc76a123d511f786f,
+	0xf68901edaf00c46c, 0x8c630871b590de80, 0x05209c308991e091, 0x1f809f99b4788177,
+	0x11170c2eb6c19fd8, 0x44433c779062ba58, 0xc0acb51af1874c45, 0x9f2e134284809fa1,
+	0xedb523bd15c619fa, 0x02d97fd53ecc23c0, 0xacaf05a34462374c, 0xddd9c6d34bffa11f,
+}
+
+var chacha8marshal = []string{
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x00ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x01ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x02ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x03ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x04ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x05ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x06ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\aABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\bABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\tABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\nABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\vABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\fABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\rABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x0eABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x0fABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x10ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x11ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x12ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x13ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x14ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x15ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x16ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x17ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x18ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x19ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1aABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1bABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1cABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1dABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1eABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1fABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00 ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00!ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\"ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00#ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00$ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00%ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00&ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00'ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00(ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00)ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00*ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00+ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00,ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00-ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00.ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00/ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x000ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x001ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x002ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x003ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x004ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x005ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x006ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x007ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x008ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x009ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00:ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00;ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00<ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00=ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00>ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00?ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00@ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00AABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00BABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00CABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00DABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00EABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00FABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00GABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00HABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00IABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00JABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00KABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00LABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00MABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00NABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00OABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00PABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00QABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00RABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00SABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00TABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00UABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00VABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00WABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00XABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00YABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00ZABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00[ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\\ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00]ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00^ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00_ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00`ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00aABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00bABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00cABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00dABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00eABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00fABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00gABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00hABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00iABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00jABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00kABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00lABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00mABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00nABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00oABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00pABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00qABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00rABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00sABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00tABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00uABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00vABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00wABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00xABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00yABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00zABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00{ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00|ABCDEFGHIJKLMNOPQRSTUVWXYZ123456",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x01>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x02>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x03>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x04>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x05>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x06>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\a>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\b>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\t>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\n>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\v>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\r>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x0e>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x0f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x10>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x11>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x12>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x13>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x14>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x15>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x16>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x17>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x18>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x19>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1a>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1b>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1c>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1d>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1e>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00 >\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00!>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\">\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00#>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00$>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00%>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00&>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00'>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00(>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00)>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00*>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00+>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00,>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00->\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00.>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00/>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x000>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x001>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x002>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x003>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x004>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x005>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x006>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x007>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x008>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x009>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00:>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00;>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00<>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00=>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00>>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00?>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00@>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00A>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00B>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00C>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00D>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00E>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00F>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00G>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00H>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00I>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00J>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00K>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00L>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00M>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00N>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00O>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00P>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00Q>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00R>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00S>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00T>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00U>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00V>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00W>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00X>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00Y>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00Z>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00[>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\\>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00]>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00^>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00_>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00`>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00a>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00b>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00c>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00d>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00e>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00g>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00h>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00i>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00j>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00k>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00l>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00m>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00n>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00o>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00p>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00q>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00r>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00s>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00t>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00u>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00v>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00w>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00x>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00y>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00z>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00{>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00|>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x01K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x02K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x03K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x04K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x05K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x06K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\aK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\bK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\tK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\nK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\vK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\rK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x0eK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x0fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x10K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x11K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x12K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x13K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x14K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x15K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x16K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x17K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x18K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x19K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1aK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1bK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1cK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1dK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1eK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\x1fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00 K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00!K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\"K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00#K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00$K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00%K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00&K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00'K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00(K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00)K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00*K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00+K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00,K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00-K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00.K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00/K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x000K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x001K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x002K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x003K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x004K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x005K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x006K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x007K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x008K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x009K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00:K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00;K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00<K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00=K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00>K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00?K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00@K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00AK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00BK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00CK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00DK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00EK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00FK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00GK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00HK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00IK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00JK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00KK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00LK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00MK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00NK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00OK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00PK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00QK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00RK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00SK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00TK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00UK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00VK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00WK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00XK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00YK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00ZK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00[K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00\\K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00]K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00^K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00_K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00`K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00aK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00bK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00cK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00dK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00eK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00gK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00hK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00iK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00jK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00kK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00lK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00mK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00nK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00oK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00pK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00qK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00rK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00sK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00tK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00uK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00vK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00wK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00xK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00yK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00zK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+	"chacha8:\x00\x00\x00\x00\x00\x00\x00{K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk",
+}
diff --git a/src/math/rand/v2/example_test.go b/src/math/rand/v2/example_test.go
new file mode 100644
index 0000000..070b0ad0
--- /dev/null
+++ b/src/math/rand/v2/example_test.go
@@ -0,0 +1,142 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand_test
+
+import (
+	"fmt"
+	"math/rand/v2"
+	"os"
+	"strings"
+	"text/tabwriter"
+	"time"
+)
+
+// These tests serve as an example but also make sure we don't change
+// the output of the random number generator when given a fixed seed.
+
+func Example() {
+	answers := []string{
+		"It is certain",
+		"It is decidedly so",
+		"Without a doubt",
+		"Yes definitely",
+		"You may rely on it",
+		"As I see it yes",
+		"Most likely",
+		"Outlook good",
+		"Yes",
+		"Signs point to yes",
+		"Reply hazy try again",
+		"Ask again later",
+		"Better not tell you now",
+		"Cannot predict now",
+		"Concentrate and ask again",
+		"Don't count on it",
+		"My reply is no",
+		"My sources say no",
+		"Outlook not so good",
+		"Very doubtful",
+	}
+	fmt.Println("Magic 8-Ball says:", answers[rand.IntN(len(answers))])
+}
+
+// This example shows the use of each of the methods on a *Rand.
+// The use of the global functions is the same, without the receiver.
+func Example_rand() {
+	// Create and seed the generator.
+	// Typically a non-fixed seed should be used, such as Uint64(), Uint64().
+	// Using a fixed seed will produce the same output on every run.
+	r := rand.New(rand.NewPCG(1, 2))
+
+	// The tabwriter here helps us generate aligned output.
+	w := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0)
+	defer w.Flush()
+	show := func(name string, v1, v2, v3 any) {
+		fmt.Fprintf(w, "%s\t%v\t%v\t%v\n", name, v1, v2, v3)
+	}
+
+	// Float32 and Float64 values are in [0, 1).
+	show("Float32", r.Float32(), r.Float32(), r.Float32())
+	show("Float64", r.Float64(), r.Float64(), r.Float64())
+
+	// ExpFloat64 values have an average of 1 but decay exponentially.
+	show("ExpFloat64", r.ExpFloat64(), r.ExpFloat64(), r.ExpFloat64())
+
+	// NormFloat64 values have an average of 0 and a standard deviation of 1.
+	show("NormFloat64", r.NormFloat64(), r.NormFloat64(), r.NormFloat64())
+
+	// Int32, Int64, and Uint32 generate values of the given width.
+	// The Int method (not shown) is like either Int32 or Int64
+	// depending on the size of 'int'.
+	show("Int32", r.Int32(), r.Int32(), r.Int32())
+	show("Int64", r.Int64(), r.Int64(), r.Int64())
+	show("Uint32", r.Uint32(), r.Uint32(), r.Uint32())
+
+	// IntN, Int32N, and Int64N limit their output to be < n.
+	// They do so more carefully than using r.Int()%n.
+	show("IntN(10)", r.IntN(10), r.IntN(10), r.IntN(10))
+	show("Int32N(10)", r.Int32N(10), r.Int32N(10), r.Int32N(10))
+	show("Int64N(10)", r.Int64N(10), r.Int64N(10), r.Int64N(10))
+
+	// Perm generates a random permutation of the numbers [0, n).
+	show("Perm", r.Perm(5), r.Perm(5), r.Perm(5))
+	// Output:
+	// Float32     0.95955694          0.8076733            0.8135684
+	// Float64     0.4297927436037299  0.797802349388613    0.3883664855410056
+	// ExpFloat64  0.43463410545541104 0.5513632046504593   0.7426404617374481
+	// NormFloat64 -0.9303318111676635 -0.04750789419852852 0.22248301107582735
+	// Int32       2020777787          260808523            851126509
+	// Int64       5231057920893523323 4257872588489500903  158397175702351138
+	// Uint32      314478343           1418758728           208955345
+	// IntN(10)    6                   2                    0
+	// Int32N(10)  3                   7                    7
+	// Int64N(10)  8                   9                    4
+	// Perm        [0 3 1 4 2]         [4 1 2 0 3]          [4 3 2 0 1]
+}
+
+func ExamplePerm() {
+	for _, value := range rand.Perm(3) {
+		fmt.Println(value)
+	}
+
+	// Unordered output: 1
+	// 2
+	// 0
+}
+
+func ExampleN() {
+	// Print an int64 in the half-open interval [0, 100).
+	fmt.Println(rand.N(int64(100)))
+
+	// Sleep for a random duration between 0 and 100 milliseconds.
+	time.Sleep(rand.N(100 * time.Millisecond))
+}
+
+func ExampleShuffle() {
+	words := strings.Fields("ink runs from the corners of my mouth")
+	rand.Shuffle(len(words), func(i, j int) {
+		words[i], words[j] = words[j], words[i]
+	})
+	fmt.Println(words)
+}
+
+func ExampleShuffle_slicesInUnison() {
+	numbers := []byte("12345")
+	letters := []byte("ABCDE")
+	// Shuffle numbers, swapping corresponding entries in letters at the same time.
+	rand.Shuffle(len(numbers), func(i, j int) {
+		numbers[i], numbers[j] = numbers[j], numbers[i]
+		letters[i], letters[j] = letters[j], letters[i]
+	})
+	for i := range numbers {
+		fmt.Printf("%c: %c\n", letters[i], numbers[i])
+	}
+}
+
+func ExampleIntN() {
+	fmt.Println(rand.IntN(100))
+	fmt.Println(rand.IntN(100))
+	fmt.Println(rand.IntN(100))
+}
diff --git a/src/math/rand/v2/exp.go b/src/math/rand/v2/exp.go
new file mode 100644
index 0000000..ed7f727
--- /dev/null
+++ b/src/math/rand/v2/exp.go
@@ -0,0 +1,222 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+	"math"
+)
+
+/*
+ * Exponential distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * https://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+	re = 7.69711747013104972
+)
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1).
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+//	sample = ExpFloat64() / desiredRateParameter
+func (r *Rand) ExpFloat64() float64 {
+	for {
+		u := r.Uint64()
+		j := uint32(u)
+		i := uint8(u >> 32)
+		x := float64(j) * float64(we[i])
+		if j < ke[i] {
+			return x
+		}
+		if i == 0 {
+			return re - math.Log(r.Float64())
+		}
+		if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) {
+			return x
+		}
+	}
+}
+
+var ke = [256]uint32{
+	0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
+	0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
+	0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78,
+	0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651,
+	0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca,
+	0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8,
+	0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea,
+	0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba,
+	0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed,
+	0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662,
+	0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3,
+	0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace,
+	0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6,
+	0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7,
+	0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415,
+	0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4,
+	0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36,
+	0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46,
+	0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac,
+	0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245,
+	0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52,
+	0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06,
+	0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0,
+	0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9,
+	0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76,
+	0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516,
+	0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289,
+	0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed,
+	0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb,
+	0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e,
+	0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a,
+	0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1,
+	0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b,
+	0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621,
+	0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d,
+	0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3,
+	0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73,
+	0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88,
+	0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a,
+	0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb,
+	0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176,
+	0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be,
+	0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192,
+	0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed,
+	0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936,
+	0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b,
+	0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4,
+	0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1,
+	0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482,
+	0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023,
+	0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
+	0xe6da6ecf,
+}
+var we = [256]float32{
+	2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
+	3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
+	5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11,
+	7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11,
+	9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10,
+	1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10,
+	1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10,
+	1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10,
+	1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10,
+	1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10,
+	1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10,
+	1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10,
+	1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10,
+	1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10,
+	2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10,
+	2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10,
+	2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10,
+	2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10,
+	2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10,
+	2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10,
+	2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10,
+	2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10,
+	2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10,
+	2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10,
+	3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10,
+	3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10,
+	3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10,
+	3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10,
+	3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10,
+	3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10,
+	3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10,
+	3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10,
+	3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10,
+	4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10,
+	4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10,
+	4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10,
+	4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10,
+	4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10,
+	4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10,
+	4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10,
+	4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10,
+	5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10,
+	5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10,
+	5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10,
+	5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10,
+	5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10,
+	5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10,
+	6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10,
+	6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10,
+	6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10,
+	6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10,
+	6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10,
+	7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10,
+	7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10,
+	7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10,
+	8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10,
+	8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10,
+	8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10,
+	9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10,
+	9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09,
+	1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09,
+	1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09,
+	1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
+	1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
+}
+var fe = [256]float32{
+	1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
+	0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
+	0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665,
+	0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967,
+	0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896,
+	0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092,
+	0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386,
+	0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495,
+	0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752,
+	0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325,
+	0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955,
+	0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694,
+	0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218,
+	0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763,
+	0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044,
+	0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796,
+	0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408,
+	0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928,
+	0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393,
+	0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625,
+	0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107,
+	0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878,
+	0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438,
+	0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682,
+	0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852,
+	0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479,
+	0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354,
+	0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494,
+	0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119,
+	0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624,
+	0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574,
+	0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672,
+	0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763,
+	0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816,
+	0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919,
+	0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274,
+	0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195,
+	0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106,
+	0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434,
+	0.062193416, 0.060783047, 0.059384305, 0.057997175,
+	0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236,
+	0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623,
+	0.043502413, 0.042254124, 0.041017443, 0.039792392,
+	0.038578995, 0.037377283, 0.036187284, 0.035009038,
+	0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566,
+	0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421,
+	0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867,
+	0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392,
+	0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414,
+	0.008780315, 0.007963077, 0.0071633533, 0.006381906,
+	0.0056196423, 0.0048776558, 0.004157295, 0.0034602648,
+	0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693,
+	0.00045413437,
+}
diff --git a/src/math/rand/v2/export_test.go b/src/math/rand/v2/export_test.go
new file mode 100644
index 0000000..16ecb20
--- /dev/null
+++ b/src/math/rand/v2/export_test.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+func GetNormalDistributionParameters() (float64, [128]uint32, [128]float32, [128]float32) {
+	return rn, kn, wn, fn
+}
+
+func GetExponentialDistributionParameters() (float64, [256]uint32, [256]float32, [256]float32) {
+	return re, ke, we, fe
+}
diff --git a/src/math/rand/v2/normal.go b/src/math/rand/v2/normal.go
new file mode 100644
index 0000000..ea1ae40
--- /dev/null
+++ b/src/math/rand/v2/normal.go
@@ -0,0 +1,157 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+	"math"
+)
+
+/*
+ * Normal distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * http://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+	rn = 3.442619855899
+)
+
+func absInt32(i int32) uint32 {
+	if i < 0 {
+		return uint32(-i)
+	}
+	return uint32(i)
+}
+
+// NormFloat64 returns a normally distributed float64 in
+// the range -math.MaxFloat64 through +math.MaxFloat64 inclusive,
+// with standard normal distribution (mean = 0, stddev = 1).
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+//	sample = NormFloat64() * desiredStdDev + desiredMean
+func (r *Rand) NormFloat64() float64 {
+	for {
+		u := r.Uint64()
+		j := int32(u) // Possibly negative
+		i := u >> 32 & 0x7F
+		x := float64(j) * float64(wn[i])
+		if absInt32(j) < kn[i] {
+			// This case should be hit better than 99% of the time.
+			return x
+		}
+
+		if i == 0 {
+			// This extra work is only required for the base strip.
+			for {
+				x = -math.Log(r.Float64()) * (1.0 / rn)
+				y := -math.Log(r.Float64())
+				if y+y >= x*x {
+					break
+				}
+			}
+			if j > 0 {
+				return rn + x
+			}
+			return -rn - x
+		}
+		if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
+			return x
+		}
+	}
+}
+
+var kn = [128]uint32{
+	0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
+	0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
+	0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7,
+	0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883,
+	0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30,
+	0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa,
+	0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d,
+	0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18,
+	0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924,
+	0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a,
+	0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4,
+	0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62,
+	0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e,
+	0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473,
+	0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd,
+	0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568,
+	0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08,
+	0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc,
+	0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94,
+	0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb,
+	0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075,
+	0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba,
+	0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded,
+	0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72,
+	0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
+	0x7ba90bdc, 0x7a722176, 0x77d664e5,
+}
+var wn = [128]float32{
+	1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
+	2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
+	2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10,
+	3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10,
+	3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10,
+	4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10,
+	4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10,
+	4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10,
+	5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10,
+	5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10,
+	5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10,
+	5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10,
+	6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10,
+	6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10,
+	6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10,
+	6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10,
+	7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10,
+	7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10,
+	7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10,
+	7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10,
+	8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10,
+	8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10,
+	8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10,
+	9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10,
+	9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10,
+	9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09,
+	1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09,
+	1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09,
+	1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09,
+	1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09,
+	1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
+	1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
+}
+var fn = [128]float32{
+	1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303,
+	0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177,
+	0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569,
+	0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277,
+	0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434,
+	0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903,
+	0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055,
+	0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766,
+	0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872,
+	0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642,
+	0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446,
+	0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685,
+	0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484,
+	0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807,
+	0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239,
+	0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877,
+	0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499,
+	0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037,
+	0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265,
+	0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602,
+	0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467,
+	0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058,
+	0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863,
+	0.040742867, 0.03688439, 0.033087887, 0.029356318,
+	0.025693292, 0.022103304, 0.018592102, 0.015167298,
+	0.011839478, 0.008624485, 0.005548995, 0.0026696292,
+}
diff --git a/src/math/rand/v2/pcg.go b/src/math/rand/v2/pcg.go
new file mode 100644
index 0000000..77708d7
--- /dev/null
+++ b/src/math/rand/v2/pcg.go
@@ -0,0 +1,121 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+	"errors"
+	"math/bits"
+)
+
+// https://numpy.org/devdocs/reference/random/upgrading-pcg64.html
+// https://github.com/imneme/pcg-cpp/commit/871d0494ee9c9a7b7c43f753e3d8ca47c26f8005
+
+// A PCG is a PCG generator with 128 bits of internal state.
+// A zero PCG is equivalent to NewPCG(0, 0).
+type PCG struct {
+	hi uint64
+	lo uint64
+}
+
+// NewPCG returns a new PCG seeded with the given values.
+func NewPCG(seed1, seed2 uint64) *PCG {
+	return &PCG{seed1, seed2}
+}
+
+// Seed resets the PCG to behave the same way as NewPCG(seed1, seed2).
+func (p *PCG) Seed(seed1, seed2 uint64) {
+	p.hi = seed1
+	p.lo = seed2
+}
+
+// binary.bigEndian.Uint64, copied to avoid dependency
+func beUint64(b []byte) uint64 {
+	_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+	return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+		uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+// binary.bigEndian.PutUint64, copied to avoid dependency
+func bePutUint64(b []byte, v uint64) {
+	_ = b[7] // early bounds check to guarantee safety of writes below
+	b[0] = byte(v >> 56)
+	b[1] = byte(v >> 48)
+	b[2] = byte(v >> 40)
+	b[3] = byte(v >> 32)
+	b[4] = byte(v >> 24)
+	b[5] = byte(v >> 16)
+	b[6] = byte(v >> 8)
+	b[7] = byte(v)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (p *PCG) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 20)
+	copy(b, "pcg:")
+	bePutUint64(b[4:], p.hi)
+	bePutUint64(b[4+8:], p.lo)
+	return b, nil
+}
+
+var errUnmarshalPCG = errors.New("invalid PCG encoding")
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (p *PCG) UnmarshalBinary(data []byte) error {
+	if len(data) != 20 || string(data[:4]) != "pcg:" {
+		return errUnmarshalPCG
+	}
+	p.hi = beUint64(data[4:])
+	p.lo = beUint64(data[4+8:])
+	return nil
+}
+
+func (p *PCG) next() (hi, lo uint64) {
+	// https://github.com/imneme/pcg-cpp/blob/428802d1a5/include/pcg_random.hpp#L161
+	//
+	// Numpy's PCG multiplies by the 64-bit value cheapMul
+	// instead of the 128-bit value used here and in the official PCG code.
+	// This does not seem worthwhile, at least for Go: not having any high
+	// bits in the multiplier reduces the effect of low bits on the highest bits,
+	// and it only saves 1 multiply out of 3.
+	// (On 32-bit systems, it saves 1 out of 6, since Mul64 is doing 4.)
+	const (
+		mulHi = 2549297995355413924
+		mulLo = 4865540595714422341
+		incHi = 6364136223846793005
+		incLo = 1442695040888963407
+	)
+
+	// state = state * mul + inc
+	hi, lo = bits.Mul64(p.lo, mulLo)
+	hi += p.hi*mulLo + p.lo*mulHi
+	lo, c := bits.Add64(lo, incLo, 0)
+	hi, _ = bits.Add64(hi, incHi, c)
+	p.lo = lo
+	p.hi = hi
+	return hi, lo
+}
+
+// Uint64 return a uniformly-distributed random uint64 value.
+func (p *PCG) Uint64() uint64 {
+	hi, lo := p.next()
+
+	// XSL-RR would be
+	//	hi, lo := p.next()
+	//	return bits.RotateLeft64(lo^hi, -int(hi>>58))
+	// but Numpy uses DXSM and O'Neill suggests doing the same.
+	// See https://github.com/golang/go/issues/21835#issuecomment-739065688
+	// and following comments.
+
+	// DXSM "double xorshift multiply"
+	// https://github.com/imneme/pcg-cpp/blob/428802d1a5/include/pcg_random.hpp#L1015
+
+	// https://github.com/imneme/pcg-cpp/blob/428802d1a5/include/pcg_random.hpp#L176
+	const cheapMul = 0xda942042e4dd58b5
+	hi ^= hi >> 32
+	hi *= cheapMul
+	hi ^= hi >> 48
+	hi *= (lo | 1)
+	return hi
+}
diff --git a/src/math/rand/v2/pcg_test.go b/src/math/rand/v2/pcg_test.go
new file mode 100644
index 0000000..db866c8
--- /dev/null
+++ b/src/math/rand/v2/pcg_test.go
@@ -0,0 +1,79 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand_test
+
+import (
+	. "math/rand/v2"
+	"testing"
+)
+
+func BenchmarkPCG_DXSM(b *testing.B) {
+	var p PCG
+	var t uint64
+	for n := b.N; n > 0; n-- {
+		t += p.Uint64()
+	}
+	Sink = t
+}
+
+func TestPCGMarshal(t *testing.T) {
+	var p PCG
+	const (
+		seed1 = 0x123456789abcdef0
+		seed2 = 0xfedcba9876543210
+		want  = "pcg:\x12\x34\x56\x78\x9a\xbc\xde\xf0\xfe\xdc\xba\x98\x76\x54\x32\x10"
+	)
+	p.Seed(seed1, seed2)
+	data, err := p.MarshalBinary()
+	if string(data) != want || err != nil {
+		t.Errorf("MarshalBinary() = %q, %v, want %q, nil", data, err, want)
+	}
+
+	q := PCG{}
+	if err := q.UnmarshalBinary([]byte(want)); err != nil {
+		t.Fatalf("UnmarshalBinary(): %v", err)
+	}
+	if q != p {
+		t.Fatalf("after round trip, q = %#x, but p = %#x", q, p)
+	}
+
+	qu := q.Uint64()
+	pu := p.Uint64()
+	if qu != pu {
+		t.Errorf("after round trip, q.Uint64() = %#x, but p.Uint64() = %#x", qu, pu)
+	}
+}
+
+func TestPCG(t *testing.T) {
+	p := NewPCG(1, 2)
+	want := []uint64{
+		0xc4f5a58656eef510,
+		0x9dcec3ad077dec6c,
+		0xc8d04605312f8088,
+		0xcbedc0dcb63ac19a,
+		0x3bf98798cae97950,
+		0xa8c6d7f8d485abc,
+		0x7ffa3780429cd279,
+		0x730ad2626b1c2f8e,
+		0x21ff2330f4a0ad99,
+		0x2f0901a1947094b0,
+		0xa9735a3cfbe36cef,
+		0x71ddb0a01a12c84a,
+		0xf0e53e77a78453bb,
+		0x1f173e9663be1e9d,
+		0x657651da3ac4115e,
+		0xc8987376b65a157b,
+		0xbb17008f5fca28e7,
+		0x8232bd645f29ed22,
+		0x12be8f07ad14c539,
+		0x54908a48e8e4736e,
+	}
+
+	for i, x := range want {
+		if u := p.Uint64(); u != x {
+			t.Errorf("PCG #%d = %#x, want %#x", i, u, x)
+		}
+	}
+}
diff --git a/src/math/rand/v2/race_test.go b/src/math/rand/v2/race_test.go
new file mode 100644
index 0000000..5ab7a21
--- /dev/null
+++ b/src/math/rand/v2/race_test.go
@@ -0,0 +1,44 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand_test
+
+import (
+	. "math/rand/v2"
+	"sync"
+	"testing"
+)
+
+// TestConcurrent exercises the rand API concurrently, triggering situations
+// where the race detector is likely to detect issues.
+func TestConcurrent(t *testing.T) {
+	const (
+		numRoutines = 10
+		numCycles   = 10
+	)
+	var wg sync.WaitGroup
+	defer wg.Wait()
+	wg.Add(numRoutines)
+	for i := 0; i < numRoutines; i++ {
+		go func(i int) {
+			defer wg.Done()
+			var seed int64
+			for j := 0; j < numCycles; j++ {
+				seed += int64(ExpFloat64())
+				seed += int64(Float32())
+				seed += int64(Float64())
+				seed += int64(IntN(Int()))
+				seed += int64(Int32N(Int32()))
+				seed += int64(Int64N(Int64()))
+				seed += int64(NormFloat64())
+				seed += int64(Uint32())
+				seed += int64(Uint64())
+				for _, p := range Perm(10) {
+					seed += int64(p)
+				}
+			}
+			_ = seed
+		}(i)
+	}
+}
diff --git a/src/math/rand/v2/rand.go b/src/math/rand/v2/rand.go
new file mode 100644
index 0000000..f490408
--- /dev/null
+++ b/src/math/rand/v2/rand.go
@@ -0,0 +1,363 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rand implements pseudo-random number generators suitable for tasks
+// such as simulation, but it should not be used for security-sensitive work.
+//
+// Random numbers are generated by a [Source], usually wrapped in a [Rand].
+// Both types should be used by a single goroutine at a time: sharing among
+// multiple goroutines requires some kind of synchronization.
+//
+// Top-level functions, such as [Float64] and [Int],
+// are safe for concurrent use by multiple goroutines.
+//
+// This package's outputs might be easily predictable regardless of how it's
+// seeded. For random numbers suitable for security-sensitive work, see the
+// crypto/rand package.
+package rand
+
+import (
+	"math/bits"
+	_ "unsafe" // for go:linkname
+)
+
+// A Source is a source of uniformly-distributed
+// pseudo-random uint64 values in the range [0, 1<<64).
+//
+// A Source is not safe for concurrent use by multiple goroutines.
+type Source interface {
+	Uint64() uint64
+}
+
+// A Rand is a source of random numbers.
+type Rand struct {
+	src Source
+}
+
+// New returns a new Rand that uses random values from src
+// to generate other random values.
+func New(src Source) *Rand {
+	return &Rand{src: src}
+}
+
+// Int64 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (r *Rand) Int64() int64 { return int64(r.src.Uint64() &^ (1 << 63)) }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32.
+func (r *Rand) Uint32() uint32 { return uint32(r.src.Uint64() >> 32) }
+
+// Uint64 returns a pseudo-random 64-bit value as a uint64.
+func (r *Rand) Uint64() uint64 { return r.src.Uint64() }
+
+// Int32 returns a non-negative pseudo-random 31-bit integer as an int32.
+func (r *Rand) Int32() int32 { return int32(r.src.Uint64() >> 33) }
+
+// Int returns a non-negative pseudo-random int.
+func (r *Rand) Int() int { return int(uint(r.src.Uint64()) << 1 >> 1) }
+
+// Int64N returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n <= 0.
+func (r *Rand) Int64N(n int64) int64 {
+	if n <= 0 {
+		panic("invalid argument to Int64N")
+	}
+	return int64(r.uint64n(uint64(n)))
+}
+
+// Uint64N returns, as a uint64, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n == 0.
+func (r *Rand) Uint64N(n uint64) uint64 {
+	if n == 0 {
+		panic("invalid argument to Uint64N")
+	}
+	return r.uint64n(n)
+}
+
+// uint64n is the no-bounds-checks version of Uint64N.
+func (r *Rand) uint64n(n uint64) uint64 {
+	if is32bit && uint64(uint32(n)) == n {
+		return uint64(r.uint32n(uint32(n)))
+	}
+	if n&(n-1) == 0 { // n is power of two, can mask
+		return r.Uint64() & (n - 1)
+	}
+
+	// Suppose we have a uint64 x uniform in the range [0,2⁶⁴)
+	// and want to reduce it to the range [0,n) preserving exact uniformity.
+	// We can simulate a scaling arbitrary precision x * (n/2⁶⁴) by
+	// the high bits of a double-width multiply of x*n, meaning (x*n)/2⁶⁴.
+	// Since there are 2⁶⁴ possible inputs x and only n possible outputs,
+	// the output is necessarily biased if n does not divide 2⁶⁴.
+	// In general (x*n)/2⁶⁴ = k for x*n in [k*2⁶⁴,(k+1)*2⁶⁴).
+	// There are either floor(2⁶⁴/n) or ceil(2⁶⁴/n) possible products
+	// in that range, depending on k.
+	// But suppose we reject the sample and try again when
+	// x*n is in [k*2⁶⁴, k*2⁶⁴+(2⁶⁴%n)), meaning rejecting fewer than n possible
+	// outcomes out of the 2⁶⁴.
+	// Now there are exactly floor(2⁶⁴/n) possible ways to produce
+	// each output value k, so we've restored uniformity.
+	// To get valid uint64 math, 2⁶⁴ % n = (2⁶⁴ - n) % n = -n % n,
+	// so the direct implementation of this algorithm would be:
+	//
+	//	hi, lo := bits.Mul64(r.Uint64(), n)
+	//	thresh := -n % n
+	//	for lo < thresh {
+	//		hi, lo = bits.Mul64(r.Uint64(), n)
+	//	}
+	//
+	// That still leaves an expensive 64-bit division that we would rather avoid.
+	// We know that thresh < n, and n is usually much less than 2⁶⁴, so we can
+	// avoid the last four lines unless lo < n.
+	//
+	// See also:
+	// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction
+	// https://lemire.me/blog/2016/06/30/fast-random-shuffling
+	hi, lo := bits.Mul64(r.Uint64(), n)
+	if lo < n {
+		thresh := -n % n
+		for lo < thresh {
+			hi, lo = bits.Mul64(r.Uint64(), n)
+		}
+	}
+	return hi
+}
+
+// uint32n is an identical computation to uint64n
+// but optimized for 32-bit systems.
+func (r *Rand) uint32n(n uint32) uint32 {
+	if n&(n-1) == 0 { // n is power of two, can mask
+		return uint32(r.Uint64()) & (n - 1)
+	}
+	// On 64-bit systems we still use the uint64 code below because
+	// the probability of a random uint64 lo being < a uint32 n is near zero,
+	// meaning the unbiasing loop almost never runs.
+	// On 32-bit systems, here we need to implement that same logic in 32-bit math,
+	// both to preserve the exact output sequence observed on 64-bit machines
+	// and to preserve the optimization that the unbiasing loop almost never runs.
+	//
+	// We want to compute
+	// 	hi, lo := bits.Mul64(r.Uint64(), n)
+	// In terms of 32-bit halves, this is:
+	// 	x1:x0 := r.Uint64()
+	// 	0:hi, lo1:lo0 := bits.Mul64(x1:x0, 0:n)
+	// Writing out the multiplication in terms of bits.Mul32 allows
+	// using direct hardware instructions and avoiding
+	// the computations involving these zeros.
+	x := r.Uint64()
+	lo1a, lo0 := bits.Mul32(uint32(x), n)
+	hi, lo1b := bits.Mul32(uint32(x>>32), n)
+	lo1, c := bits.Add32(lo1a, lo1b, 0)
+	hi += c
+	if lo1 == 0 && lo0 < uint32(n) {
+		n64 := uint64(n)
+		thresh := uint32(-n64 % n64)
+		for lo1 == 0 && lo0 < thresh {
+			x := r.Uint64()
+			lo1a, lo0 = bits.Mul32(uint32(x), n)
+			hi, lo1b = bits.Mul32(uint32(x>>32), n)
+			lo1, c = bits.Add32(lo1a, lo1b, 0)
+			hi += c
+		}
+	}
+	return hi
+}
+
+// Int32N returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n <= 0.
+func (r *Rand) Int32N(n int32) int32 {
+	if n <= 0 {
+		panic("invalid argument to Int32N")
+	}
+	return int32(r.uint64n(uint64(n)))
+}
+
+// Uint32N returns, as a uint32, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n == 0.
+func (r *Rand) Uint32N(n uint32) uint32 {
+	if n == 0 {
+		panic("invalid argument to Uint32N")
+	}
+	return uint32(r.uint64n(uint64(n)))
+}
+
+const is32bit = ^uint(0)>>32 == 0
+
+// IntN returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n <= 0.
+func (r *Rand) IntN(n int) int {
+	if n <= 0 {
+		panic("invalid argument to IntN")
+	}
+	return int(r.uint64n(uint64(n)))
+}
+
+// UintN returns, as a uint, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n == 0.
+func (r *Rand) UintN(n uint) uint {
+	if n == 0 {
+		panic("invalid argument to UintN")
+	}
+	return uint(r.uint64n(uint64(n)))
+}
+
+// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0).
+func (r *Rand) Float64() float64 {
+	// There are exactly 1<<53 float64s in [0,1). Use Intn(1<<53) / (1<<53).
+	return float64(r.Uint64()<<11>>11) / (1 << 53)
+}
+
+// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0).
+func (r *Rand) Float32() float32 {
+	// There are exactly 1<<24 float32s in [0,1). Use Intn(1<<24) / (1<<24).
+	return float32(r.Uint32()<<8>>8) / (1 << 24)
+}
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
+// in the half-open interval [0,n).
+func (r *Rand) Perm(n int) []int {
+	p := make([]int, n)
+	for i := range p {
+		p[i] = i
+	}
+	r.Shuffle(len(p), func(i, j int) { p[i], p[j] = p[j], p[i] })
+	return p
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func (r *Rand) Shuffle(n int, swap func(i, j int)) {
+	if n < 0 {
+		panic("invalid argument to Shuffle")
+	}
+
+	// Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
+	// Shuffle really ought not be called with n that doesn't fit in 32 bits.
+	// Not only will it take a very long time, but with 2³¹! possible permutations,
+	// there's no way that any PRNG can have a big enough internal state to
+	// generate even a minuscule percentage of the possible permutations.
+	// Nevertheless, the right API signature accepts an int n, so handle it as best we can.
+	for i := n - 1; i > 0; i-- {
+		j := int(r.uint64n(uint64(i + 1)))
+		swap(i, j)
+	}
+}
+
+/*
+ * Top-level convenience functions
+ */
+
+// globalRand is the source of random numbers for the top-level
+// convenience functions.
+var globalRand = &Rand{src: &runtimeSource{}}
+
+//go:linkname runtime_rand runtime.rand
+func runtime_rand() uint64
+
+// runtimeSource is a Source that uses the runtime fastrand functions.
+type runtimeSource struct{}
+
+func (*runtimeSource) Uint64() uint64 {
+	return runtime_rand()
+}
+
+// Int64 returns a non-negative pseudo-random 63-bit integer as an int64
+// from the default Source.
+func Int64() int64 { return globalRand.Int64() }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32
+// from the default Source.
+func Uint32() uint32 { return globalRand.Uint32() }
+
+// Uint64N returns, as a uint64, a pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Uint64N(n uint64) uint64 { return globalRand.Uint64N(n) }
+
+// Uint32N returns, as a uint32, a pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Uint32N(n uint32) uint32 { return globalRand.Uint32N(n) }
+
+// Uint64 returns a pseudo-random 64-bit value as a uint64
+// from the default Source.
+func Uint64() uint64 { return globalRand.Uint64() }
+
+// Int32 returns a non-negative pseudo-random 31-bit integer as an int32
+// from the default Source.
+func Int32() int32 { return globalRand.Int32() }
+
+// Int returns a non-negative pseudo-random int from the default Source.
+func Int() int { return globalRand.Int() }
+
+// Int64N returns, as an int64, a pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Int64N(n int64) int64 { return globalRand.Int64N(n) }
+
+// Int32N returns, as an int32, a pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Int32N(n int32) int32 { return globalRand.Int32N(n) }
+
+// IntN returns, as an int, a pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func IntN(n int) int { return globalRand.IntN(n) }
+
+// UintN returns, as a uint, a pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func UintN(n uint) uint { return globalRand.UintN(n) }
+
+// N returns a pseudo-random number in the half-open interval [0,n) from the default Source.
+// The type parameter Int can be any integer type.
+// It panics if n <= 0.
+func N[Int intType](n Int) Int {
+	if n <= 0 {
+		panic("invalid argument to N")
+	}
+	return Int(globalRand.uint64n(uint64(n)))
+}
+
+type intType interface {
+	~int | ~int8 | ~int16 | ~int32 | ~int64 |
+		~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0)
+// from the default Source.
+func Float64() float64 { return globalRand.Float64() }
+
+// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0)
+// from the default Source.
+func Float32() float32 { return globalRand.Float32() }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
+// in the half-open interval [0,n) from the default Source.
+func Perm(n int) []int { return globalRand.Perm(n) }
+
+// Shuffle pseudo-randomizes the order of elements using the default Source.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) }
+
+// NormFloat64 returns a normally distributed float64 in the range
+// [-math.MaxFloat64, +math.MaxFloat64] with
+// standard normal distribution (mean = 0, stddev = 1)
+// from the default Source.
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+//	sample = NormFloat64() * desiredStdDev + desiredMean
+func NormFloat64() float64 { return globalRand.NormFloat64() }
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source.
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+//	sample = ExpFloat64() / desiredRateParameter
+func ExpFloat64() float64 { return globalRand.ExpFloat64() }
diff --git a/src/math/rand/v2/rand_test.go b/src/math/rand/v2/rand_test.go
new file mode 100644
index 0000000..c4b53fa
--- /dev/null
+++ b/src/math/rand/v2/rand_test.go
@@ -0,0 +1,787 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand_test
+
+import (
+	"errors"
+	"fmt"
+	"internal/testenv"
+	"math"
+	. "math/rand/v2"
+	"os"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"testing"
+)
+
+const (
+	numTestSamples = 10000
+)
+
+var rn, kn, wn, fn = GetNormalDistributionParameters()
+var re, ke, we, fe = GetExponentialDistributionParameters()
+
+type statsResults struct {
+	mean        float64
+	stddev      float64
+	closeEnough float64
+	maxError    float64
+}
+
+func max(a, b float64) float64 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func nearEqual(a, b, closeEnough, maxError float64) bool {
+	absDiff := math.Abs(a - b)
+	if absDiff < closeEnough { // Necessary when one value is zero and one value is close to zero.
+		return true
+	}
+	return absDiff/max(math.Abs(a), math.Abs(b)) < maxError
+}
+
+var testSeeds = []uint64{1, 1754801282, 1698661970, 1550503961}
+
+// checkSimilarDistribution returns success if the mean and stddev of the
+// two statsResults are similar.
+func (this *statsResults) checkSimilarDistribution(expected *statsResults) error {
+	if !nearEqual(this.mean, expected.mean, expected.closeEnough, expected.maxError) {
+		s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", this.mean, expected.mean, expected.closeEnough, expected.maxError)
+		fmt.Println(s)
+		return errors.New(s)
+	}
+	if !nearEqual(this.stddev, expected.stddev, expected.closeEnough, expected.maxError) {
+		s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", this.stddev, expected.stddev, expected.closeEnough, expected.maxError)
+		fmt.Println(s)
+		return errors.New(s)
+	}
+	return nil
+}
+
+func getStatsResults(samples []float64) *statsResults {
+	res := new(statsResults)
+	var sum, squaresum float64
+	for _, s := range samples {
+		sum += s
+		squaresum += s * s
+	}
+	res.mean = sum / float64(len(samples))
+	res.stddev = math.Sqrt(squaresum/float64(len(samples)) - res.mean*res.mean)
+	return res
+}
+
+func checkSampleDistribution(t *testing.T, samples []float64, expected *statsResults) {
+	t.Helper()
+	actual := getStatsResults(samples)
+	err := actual.checkSimilarDistribution(expected)
+	if err != nil {
+		t.Errorf(err.Error())
+	}
+}
+
+func checkSampleSliceDistributions(t *testing.T, samples []float64, nslices int, expected *statsResults) {
+	t.Helper()
+	chunk := len(samples) / nslices
+	for i := 0; i < nslices; i++ {
+		low := i * chunk
+		var high int
+		if i == nslices-1 {
+			high = len(samples) - 1
+		} else {
+			high = (i + 1) * chunk
+		}
+		checkSampleDistribution(t, samples[low:high], expected)
+	}
+}
+
+//
+// Normal distribution tests
+//
+
+func generateNormalSamples(nsamples int, mean, stddev float64, seed uint64) []float64 {
+	r := New(NewPCG(seed, seed))
+	samples := make([]float64, nsamples)
+	for i := range samples {
+		samples[i] = r.NormFloat64()*stddev + mean
+	}
+	return samples
+}
+
+func testNormalDistribution(t *testing.T, nsamples int, mean, stddev float64, seed uint64) {
+	//fmt.Printf("testing nsamples=%v mean=%v stddev=%v seed=%v\n", nsamples, mean, stddev, seed);
+
+	samples := generateNormalSamples(nsamples, mean, stddev, seed)
+	errorScale := max(1.0, stddev) // Error scales with stddev
+	expected := &statsResults{mean, stddev, 0.10 * errorScale, 0.08 * errorScale}
+
+	// Make sure that the entire set matches the expected distribution.
+	checkSampleDistribution(t, samples, expected)
+
+	// Make sure that each half of the set matches the expected distribution.
+	checkSampleSliceDistributions(t, samples, 2, expected)
+
+	// Make sure that each 7th of the set matches the expected distribution.
+	checkSampleSliceDistributions(t, samples, 7, expected)
+}
+
+// Actual tests
+
+func TestStandardNormalValues(t *testing.T) {
+	for _, seed := range testSeeds {
+		testNormalDistribution(t, numTestSamples, 0, 1, seed)
+	}
+}
+
+func TestNonStandardNormalValues(t *testing.T) {
+	sdmax := 1000.0
+	mmax := 1000.0
+	if testing.Short() {
+		sdmax = 5
+		mmax = 5
+	}
+	for sd := 0.5; sd < sdmax; sd *= 2 {
+		for m := 0.5; m < mmax; m *= 2 {
+			for _, seed := range testSeeds {
+				testNormalDistribution(t, numTestSamples, m, sd, seed)
+				if testing.Short() {
+					break
+				}
+			}
+		}
+	}
+}
+
+//
+// Exponential distribution tests
+//
+
+func generateExponentialSamples(nsamples int, rate float64, seed uint64) []float64 {
+	r := New(NewPCG(seed, seed))
+	samples := make([]float64, nsamples)
+	for i := range samples {
+		samples[i] = r.ExpFloat64() / rate
+	}
+	return samples
+}
+
+func testExponentialDistribution(t *testing.T, nsamples int, rate float64, seed uint64) {
+	//fmt.Printf("testing nsamples=%v rate=%v seed=%v\n", nsamples, rate, seed);
+
+	mean := 1 / rate
+	stddev := mean
+
+	samples := generateExponentialSamples(nsamples, rate, seed)
+	errorScale := max(1.0, 1/rate) // Error scales with the inverse of the rate
+	expected := &statsResults{mean, stddev, 0.10 * errorScale, 0.20 * errorScale}
+
+	// Make sure that the entire set matches the expected distribution.
+	checkSampleDistribution(t, samples, expected)
+
+	// Make sure that each half of the set matches the expected distribution.
+	checkSampleSliceDistributions(t, samples, 2, expected)
+
+	// Make sure that each 7th of the set matches the expected distribution.
+	checkSampleSliceDistributions(t, samples, 7, expected)
+}
+
+// Actual tests
+
+func TestStandardExponentialValues(t *testing.T) {
+	for _, seed := range testSeeds {
+		testExponentialDistribution(t, numTestSamples, 1, seed)
+	}
+}
+
+func TestNonStandardExponentialValues(t *testing.T) {
+	for rate := 0.05; rate < 10; rate *= 2 {
+		for _, seed := range testSeeds {
+			testExponentialDistribution(t, numTestSamples, rate, seed)
+			if testing.Short() {
+				break
+			}
+		}
+	}
+}
+
+//
+// Table generation tests
+//
+
+func initNorm() (testKn []uint32, testWn, testFn []float32) {
+	const m1 = 1 << 31
+	var (
+		dn float64 = rn
+		tn         = dn
+		vn float64 = 9.91256303526217e-3
+	)
+
+	testKn = make([]uint32, 128)
+	testWn = make([]float32, 128)
+	testFn = make([]float32, 128)
+
+	q := vn / math.Exp(-0.5*dn*dn)
+	testKn[0] = uint32((dn / q) * m1)
+	testKn[1] = 0
+	testWn[0] = float32(q / m1)
+	testWn[127] = float32(dn / m1)
+	testFn[0] = 1.0
+	testFn[127] = float32(math.Exp(-0.5 * dn * dn))
+	for i := 126; i >= 1; i-- {
+		dn = math.Sqrt(-2.0 * math.Log(vn/dn+math.Exp(-0.5*dn*dn)))
+		testKn[i+1] = uint32((dn / tn) * m1)
+		tn = dn
+		testFn[i] = float32(math.Exp(-0.5 * dn * dn))
+		testWn[i] = float32(dn / m1)
+	}
+	return
+}
+
+func initExp() (testKe []uint32, testWe, testFe []float32) {
+	const m2 = 1 << 32
+	var (
+		de float64 = re
+		te         = de
+		ve float64 = 3.9496598225815571993e-3
+	)
+
+	testKe = make([]uint32, 256)
+	testWe = make([]float32, 256)
+	testFe = make([]float32, 256)
+
+	q := ve / math.Exp(-de)
+	testKe[0] = uint32((de / q) * m2)
+	testKe[1] = 0
+	testWe[0] = float32(q / m2)
+	testWe[255] = float32(de / m2)
+	testFe[0] = 1.0
+	testFe[255] = float32(math.Exp(-de))
+	for i := 254; i >= 1; i-- {
+		de = -math.Log(ve/de + math.Exp(-de))
+		testKe[i+1] = uint32((de / te) * m2)
+		te = de
+		testFe[i] = float32(math.Exp(-de))
+		testWe[i] = float32(de / m2)
+	}
+	return
+}
+
+// compareUint32Slices returns the first index where the two slices
+// disagree, or <0 if the lengths are the same and all elements
+// are identical.
+func compareUint32Slices(s1, s2 []uint32) int {
+	if len(s1) != len(s2) {
+		if len(s1) > len(s2) {
+			return len(s2) + 1
+		}
+		return len(s1) + 1
+	}
+	for i := range s1 {
+		if s1[i] != s2[i] {
+			return i
+		}
+	}
+	return -1
+}
+
+// compareFloat32Slices returns the first index where the two slices
+// disagree, or <0 if the lengths are the same and all elements
+// are identical.
+func compareFloat32Slices(s1, s2 []float32) int {
+	if len(s1) != len(s2) {
+		if len(s1) > len(s2) {
+			return len(s2) + 1
+		}
+		return len(s1) + 1
+	}
+	for i := range s1 {
+		if !nearEqual(float64(s1[i]), float64(s2[i]), 0, 1e-7) {
+			return i
+		}
+	}
+	return -1
+}
+
+func TestNormTables(t *testing.T) {
+	testKn, testWn, testFn := initNorm()
+	if i := compareUint32Slices(kn[0:], testKn); i >= 0 {
+		t.Errorf("kn disagrees at index %v; %v != %v", i, kn[i], testKn[i])
+	}
+	if i := compareFloat32Slices(wn[0:], testWn); i >= 0 {
+		t.Errorf("wn disagrees at index %v; %v != %v", i, wn[i], testWn[i])
+	}
+	if i := compareFloat32Slices(fn[0:], testFn); i >= 0 {
+		t.Errorf("fn disagrees at index %v; %v != %v", i, fn[i], testFn[i])
+	}
+}
+
+func TestExpTables(t *testing.T) {
+	testKe, testWe, testFe := initExp()
+	if i := compareUint32Slices(ke[0:], testKe); i >= 0 {
+		t.Errorf("ke disagrees at index %v; %v != %v", i, ke[i], testKe[i])
+	}
+	if i := compareFloat32Slices(we[0:], testWe); i >= 0 {
+		t.Errorf("we disagrees at index %v; %v != %v", i, we[i], testWe[i])
+	}
+	if i := compareFloat32Slices(fe[0:], testFe); i >= 0 {
+		t.Errorf("fe disagrees at index %v; %v != %v", i, fe[i], testFe[i])
+	}
+}
+
+func hasSlowFloatingPoint() bool {
+	switch runtime.GOARCH {
+	case "arm":
+		return os.Getenv("GOARM") == "5"
+	case "mips", "mipsle", "mips64", "mips64le":
+		// Be conservative and assume that all mips boards
+		// have emulated floating point.
+		// TODO: detect what it actually has.
+		return true
+	}
+	return false
+}
+
+func TestFloat32(t *testing.T) {
+	// For issue 6721, the problem came after 7533753 calls, so check 10e6.
+	num := int(10e6)
+	// But do the full amount only on builders (not locally).
+	// But ARM5 floating point emulation is slow (Issue 10749), so
+	// do less for that builder:
+	if testing.Short() && (testenv.Builder() == "" || hasSlowFloatingPoint()) {
+		num /= 100 // 1.72 seconds instead of 172 seconds
+	}
+
+	r := testRand()
+	for ct := 0; ct < num; ct++ {
+		f := r.Float32()
+		if f >= 1 {
+			t.Fatal("Float32() should be in range [0,1). ct:", ct, "f:", f)
+		}
+	}
+}
+
+func TestShuffleSmall(t *testing.T) {
+	// Check that Shuffle allows n=0 and n=1, but that swap is never called for them.
+	r := testRand()
+	for n := 0; n <= 1; n++ {
+		r.Shuffle(n, func(i, j int) { t.Fatalf("swap called, n=%d i=%d j=%d", n, i, j) })
+	}
+}
+
+// encodePerm converts from a permuted slice of length n, such as Perm generates, to an int in [0, n!).
+// See https://en.wikipedia.org/wiki/Lehmer_code.
+// encodePerm modifies the input slice.
+func encodePerm(s []int) int {
+	// Convert to Lehmer code.
+	for i, x := range s {
+		r := s[i+1:]
+		for j, y := range r {
+			if y > x {
+				r[j]--
+			}
+		}
+	}
+	// Convert to int in [0, n!).
+	m := 0
+	fact := 1
+	for i := len(s) - 1; i >= 0; i-- {
+		m += s[i] * fact
+		fact *= len(s) - i
+	}
+	return m
+}
+
+// TestUniformFactorial tests several ways of generating a uniform value in [0, n!).
+func TestUniformFactorial(t *testing.T) {
+	r := New(NewPCG(1, 2))
+	top := 6
+	if testing.Short() {
+		top = 3
+	}
+	for n := 3; n <= top; n++ {
+		t.Run(fmt.Sprintf("n=%d", n), func(t *testing.T) {
+			// Calculate n!.
+			nfact := 1
+			for i := 2; i <= n; i++ {
+				nfact *= i
+			}
+
+			// Test a few different ways to generate a uniform distribution.
+			p := make([]int, n) // re-usable slice for Shuffle generator
+			tests := [...]struct {
+				name string
+				fn   func() int
+			}{
+				{name: "Int32N", fn: func() int { return int(r.Int32N(int32(nfact))) }},
+				{name: "Perm", fn: func() int { return encodePerm(r.Perm(n)) }},
+				{name: "Shuffle", fn: func() int {
+					// Generate permutation using Shuffle.
+					for i := range p {
+						p[i] = i
+					}
+					r.Shuffle(n, func(i, j int) { p[i], p[j] = p[j], p[i] })
+					return encodePerm(p)
+				}},
+			}
+
+			for _, test := range tests {
+				t.Run(test.name, func(t *testing.T) {
+					// Gather chi-squared values and check that they follow
+					// the expected normal distribution given n!-1 degrees of freedom.
+					// See https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test and
+					// https://www.johndcook.com/Beautiful_Testing_ch10.pdf.
+					nsamples := 10 * nfact
+					if nsamples < 1000 {
+						nsamples = 1000
+					}
+					samples := make([]float64, nsamples)
+					for i := range samples {
+						// Generate some uniformly distributed values and count their occurrences.
+						const iters = 1000
+						counts := make([]int, nfact)
+						for i := 0; i < iters; i++ {
+							counts[test.fn()]++
+						}
+						// Calculate chi-squared and add to samples.
+						want := iters / float64(nfact)
+						var χ2 float64
+						for _, have := range counts {
+							err := float64(have) - want
+							χ2 += err * err
+						}
+						χ2 /= want
+						samples[i] = χ2
+					}
+
+					// Check that our samples approximate the appropriate normal distribution.
+					dof := float64(nfact - 1)
+					expected := &statsResults{mean: dof, stddev: math.Sqrt(2 * dof)}
+					errorScale := max(1.0, expected.stddev)
+					expected.closeEnough = 0.10 * errorScale
+					expected.maxError = 0.08 // TODO: What is the right value here? See issue 21211.
+					checkSampleDistribution(t, samples, expected)
+				})
+			}
+		})
+	}
+}
+
+// Benchmarks
+
+var Sink uint64
+
+func testRand() *Rand {
+	return New(NewPCG(1, 2))
+}
+
+func BenchmarkSourceUint64(b *testing.B) {
+	s := NewPCG(1, 2)
+	var t uint64
+	for n := b.N; n > 0; n-- {
+		t += s.Uint64()
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkGlobalInt64(b *testing.B) {
+	var t int64
+	for n := b.N; n > 0; n-- {
+		t += Int64()
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkGlobalInt64Parallel(b *testing.B) {
+	b.RunParallel(func(pb *testing.PB) {
+		var t int64
+		for pb.Next() {
+			t += Int64()
+		}
+		atomic.AddUint64(&Sink, uint64(t))
+	})
+}
+
+func BenchmarkGlobalUint64(b *testing.B) {
+	var t uint64
+	for n := b.N; n > 0; n-- {
+		t += Uint64()
+	}
+	Sink = t
+}
+
+func BenchmarkGlobalUint64Parallel(b *testing.B) {
+	b.RunParallel(func(pb *testing.PB) {
+		var t uint64
+		for pb.Next() {
+			t += Uint64()
+		}
+		atomic.AddUint64(&Sink, t)
+	})
+}
+
+func BenchmarkInt64(b *testing.B) {
+	r := testRand()
+	var t int64
+	for n := b.N; n > 0; n-- {
+		t += r.Int64()
+	}
+	Sink = uint64(t)
+}
+
+var AlwaysFalse = false
+
+func keep[T int | uint | int32 | uint32 | int64 | uint64](x T) T {
+	if AlwaysFalse {
+		return -x
+	}
+	return x
+}
+
+func BenchmarkUint64(b *testing.B) {
+	r := testRand()
+	var t uint64
+	for n := b.N; n > 0; n-- {
+		t += r.Uint64()
+	}
+	Sink = t
+}
+
+func BenchmarkGlobalIntN1000(b *testing.B) {
+	var t int
+	arg := keep(1000)
+	for n := b.N; n > 0; n-- {
+		t += IntN(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkIntN1000(b *testing.B) {
+	r := testRand()
+	var t int
+	arg := keep(1000)
+	for n := b.N; n > 0; n-- {
+		t += r.IntN(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N1000(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(1000))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N1e8(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(1e8))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N1e9(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(1e9))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N2e9(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(2e9))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N1e18(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(1e18))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N2e18(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(2e18))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt64N4e18(b *testing.B) {
+	r := testRand()
+	var t int64
+	arg := keep(int64(4e18))
+	for n := b.N; n > 0; n-- {
+		t += r.Int64N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt32N1000(b *testing.B) {
+	r := testRand()
+	var t int32
+	arg := keep(int32(1000))
+	for n := b.N; n > 0; n-- {
+		t += r.Int32N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt32N1e8(b *testing.B) {
+	r := testRand()
+	var t int32
+	arg := keep(int32(1e8))
+	for n := b.N; n > 0; n-- {
+		t += r.Int32N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt32N1e9(b *testing.B) {
+	r := testRand()
+	var t int32
+	arg := keep(int32(1e9))
+	for n := b.N; n > 0; n-- {
+		t += r.Int32N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkInt32N2e9(b *testing.B) {
+	r := testRand()
+	var t int32
+	arg := keep(int32(2e9))
+	for n := b.N; n > 0; n-- {
+		t += r.Int32N(arg)
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkFloat32(b *testing.B) {
+	r := testRand()
+	var t float32
+	for n := b.N; n > 0; n-- {
+		t += r.Float32()
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkFloat64(b *testing.B) {
+	r := testRand()
+	var t float64
+	for n := b.N; n > 0; n-- {
+		t += r.Float64()
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkExpFloat64(b *testing.B) {
+	r := testRand()
+	var t float64
+	for n := b.N; n > 0; n-- {
+		t += r.ExpFloat64()
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkNormFloat64(b *testing.B) {
+	r := testRand()
+	var t float64
+	for n := b.N; n > 0; n-- {
+		t += r.NormFloat64()
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkPerm3(b *testing.B) {
+	r := testRand()
+	var t int
+	for n := b.N; n > 0; n-- {
+		t += r.Perm(3)[0]
+	}
+	Sink = uint64(t)
+
+}
+
+func BenchmarkPerm30(b *testing.B) {
+	r := testRand()
+	var t int
+	for n := b.N; n > 0; n-- {
+		t += r.Perm(30)[0]
+	}
+	Sink = uint64(t)
+}
+
+func BenchmarkPerm30ViaShuffle(b *testing.B) {
+	r := testRand()
+	var t int
+	for n := b.N; n > 0; n-- {
+		p := make([]int, 30)
+		for i := range p {
+			p[i] = i
+		}
+		r.Shuffle(30, func(i, j int) { p[i], p[j] = p[j], p[i] })
+		t += p[0]
+	}
+	Sink = uint64(t)
+}
+
+// BenchmarkShuffleOverhead uses a minimal swap function
+// to measure just the shuffling overhead.
+func BenchmarkShuffleOverhead(b *testing.B) {
+	r := testRand()
+	for n := b.N; n > 0; n-- {
+		r.Shuffle(30, func(i, j int) {
+			if i < 0 || i >= 30 || j < 0 || j >= 30 {
+				b.Fatalf("bad swap(%d, %d)", i, j)
+			}
+		})
+	}
+}
+
+func BenchmarkConcurrent(b *testing.B) {
+	const goroutines = 4
+	var wg sync.WaitGroup
+	wg.Add(goroutines)
+	for i := 0; i < goroutines; i++ {
+		go func() {
+			defer wg.Done()
+			for n := b.N; n > 0; n-- {
+				Int64()
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func TestN(t *testing.T) {
+	for i := 0; i < 1000; i++ {
+		v := N(10)
+		if v < 0 || v >= 10 {
+			t.Fatalf("N(10) returned %d", v)
+		}
+	}
+}
diff --git a/src/math/rand/v2/regress_test.go b/src/math/rand/v2/regress_test.go
new file mode 100644
index 0000000..c85d584
--- /dev/null
+++ b/src/math/rand/v2/regress_test.go
@@ -0,0 +1,563 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that random number sequences generated by a specific seed
+// do not change from version to version.
+//
+// Do NOT make changes to the golden outputs. If bugs need to be fixed
+// in the underlying code, find ways to fix them that do not affect the
+// outputs.
+
+package rand_test
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"go/format"
+	"io"
+	. "math/rand/v2"
+	"os"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+var update = flag.Bool("update", false, "update golden results for regression test")
+
+func TestRegress(t *testing.T) {
+	var int32s = []int32{1, 10, 32, 1 << 20, 1<<20 + 1, 1000000000, 1 << 30, 1<<31 - 2, 1<<31 - 1}
+	var uint32s = []uint32{1, 10, 32, 1 << 20, 1<<20 + 1, 1000000000, 1 << 30, 1<<31 - 2, 1<<31 - 1, 1<<32 - 2, 1<<32 - 1}
+	var int64s = []int64{1, 10, 32, 1 << 20, 1<<20 + 1, 1000000000, 1 << 30, 1<<31 - 2, 1<<31 - 1, 1000000000000000000, 1 << 60, 1<<63 - 2, 1<<63 - 1}
+	var uint64s = []uint64{1, 10, 32, 1 << 20, 1<<20 + 1, 1000000000, 1 << 30, 1<<31 - 2, 1<<31 - 1, 1000000000000000000, 1 << 60, 1<<63 - 2, 1<<63 - 1, 1<<64 - 2, 1<<64 - 1}
+	var permSizes = []int{0, 1, 5, 8, 9, 10, 16}
+
+	n := reflect.TypeOf(New(NewPCG(1, 2))).NumMethod()
+	p := 0
+	var buf bytes.Buffer
+	if *update {
+		fmt.Fprintf(&buf, "var regressGolden = []any{\n")
+	}
+	for i := 0; i < n; i++ {
+		if *update && i > 0 {
+			fmt.Fprintf(&buf, "\n")
+		}
+		r := New(NewPCG(1, 2))
+		rv := reflect.ValueOf(r)
+		m := rv.Type().Method(i)
+		mv := rv.Method(i)
+		mt := mv.Type()
+		if mt.NumOut() == 0 {
+			continue
+		}
+		for repeat := 0; repeat < 20; repeat++ {
+			var args []reflect.Value
+			var argstr string
+			if mt.NumIn() == 1 {
+				var x any
+				switch mt.In(0).Kind() {
+				default:
+					t.Fatalf("unexpected argument type for r.%s", m.Name)
+
+				case reflect.Int:
+					if m.Name == "Perm" {
+						x = permSizes[repeat%len(permSizes)]
+						break
+					}
+					big := int64s[repeat%len(int64s)]
+					if int64(int(big)) != big {
+						// On 32-bit machine.
+						// Consume an Int64 like on a 64-bit machine,
+						// to keep the golden data the same on different architectures.
+						r.Int64N(big)
+						if *update {
+							t.Fatalf("must run -update on 64-bit machine")
+						}
+						p++
+						continue
+					}
+					x = int(big)
+
+				case reflect.Uint:
+					big := uint64s[repeat%len(uint64s)]
+					if uint64(uint(big)) != big {
+						r.Uint64N(big) // what would happen on 64-bit machine, to keep stream in sync
+						if *update {
+							t.Fatalf("must run -update on 64-bit machine")
+						}
+						p++
+						continue
+					}
+					x = uint(big)
+
+				case reflect.Int32:
+					x = int32s[repeat%len(int32s)]
+
+				case reflect.Int64:
+					x = int64s[repeat%len(int64s)]
+
+				case reflect.Uint32:
+					x = uint32s[repeat%len(uint32s)]
+
+				case reflect.Uint64:
+					x = uint64s[repeat%len(uint64s)]
+				}
+				argstr = fmt.Sprint(x)
+				args = append(args, reflect.ValueOf(x))
+			}
+
+			var out any
+			out = mv.Call(args)[0].Interface()
+			if m.Name == "Int" || m.Name == "IntN" {
+				out = int64(out.(int))
+			}
+			if m.Name == "Uint" || m.Name == "UintN" {
+				out = uint64(out.(uint))
+			}
+			if *update {
+				var val string
+				big := int64(1 << 60)
+				if int64(int(big)) != big && (m.Name == "Int" || m.Name == "IntN") {
+					// 32-bit machine cannot print 64-bit results
+					val = "truncated"
+				} else if reflect.TypeOf(out).Kind() == reflect.Slice {
+					val = fmt.Sprintf("%#v", out)
+				} else {
+					val = fmt.Sprintf("%T(%v)", out, out)
+				}
+				fmt.Fprintf(&buf, "\t%s, // %s(%s)\n", val, m.Name, argstr)
+			} else if p >= len(regressGolden) {
+				t.Errorf("r.%s(%s) = %v, missing golden value", m.Name, argstr, out)
+			} else {
+				want := regressGolden[p]
+				if m.Name == "Int" {
+					want = int64(int(uint(want.(int64)) << 1 >> 1))
+				}
+				if !reflect.DeepEqual(out, want) {
+					t.Errorf("r.%s(%s) = %v, want %v", m.Name, argstr, out, want)
+				}
+			}
+			p++
+		}
+	}
+	if *update {
+		replace(t, "regress_test.go", buf.Bytes())
+	}
+}
+
+func TestUpdateExample(t *testing.T) {
+	if !*update {
+		t.Skip("-update not given")
+	}
+
+	oldStdout := os.Stdout
+	defer func() {
+		os.Stdout = oldStdout
+	}()
+
+	r, w, err := os.Pipe()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer r.Close()
+	defer w.Close()
+
+	go func() {
+		os.Stdout = w
+		Example_rand()
+		os.Stdout = oldStdout
+		w.Close()
+	}()
+	out, err := io.ReadAll(r)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "\t// Output:\n")
+	for _, line := range strings.Split(string(out), "\n") {
+		if line != "" {
+			fmt.Fprintf(&buf, "\t// %s\n", line)
+		}
+	}
+
+	replace(t, "example_test.go", buf.Bytes())
+
+	// Exit so that Example_rand cannot fail.
+	fmt.Printf("UPDATED; ignore non-zero exit status\n")
+	os.Exit(1)
+}
+
+// replace substitutes the definition text from new into the content of file.
+// The text in new is of the form
+//
+//	var whatever = T{
+//		...
+//	}
+//
+// Replace searches file for an exact match for the text of the first line,
+// finds the closing brace, and then substitutes new for what used to be in the file.
+// This lets us update the regressGolden table during go test -update.
+func replace(t *testing.T, file string, new []byte) {
+	first, _, _ := bytes.Cut(new, []byte("\n"))
+	first = append(append([]byte("\n"), first...), '\n')
+	data, err := os.ReadFile(file)
+	if err != nil {
+		t.Fatal(err)
+	}
+	i := bytes.Index(data, first)
+	if i < 0 {
+		t.Fatalf("cannot find %q in %s", first, file)
+	}
+	j := bytes.Index(data[i+1:], []byte("\n}\n"))
+	if j < 0 {
+		t.Fatalf("cannot find end in %s", file)
+	}
+	data = append(append(data[:i+1:i+1], new...), data[i+1+j+1:]...)
+	data, err = format.Source(data)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := os.WriteFile(file, data, 0666); err != nil {
+		t.Fatal(err)
+	}
+}
+
+var regressGolden = []any{
+	float64(0.5931317151369719),   // ExpFloat64()
+	float64(0.0680034588807843),   // ExpFloat64()
+	float64(0.036496967459790364), // ExpFloat64()
+	float64(2.460335459645379),    // ExpFloat64()
+	float64(1.5792300208419903),   // ExpFloat64()
+	float64(0.9149501499404387),   // ExpFloat64()
+	float64(0.43463410545541104),  // ExpFloat64()
+	float64(0.5513632046504593),   // ExpFloat64()
+	float64(0.7426404617374481),   // ExpFloat64()
+	float64(1.2334925132631804),   // ExpFloat64()
+	float64(0.892529142200442),    // ExpFloat64()
+	float64(0.21508763681487764),  // ExpFloat64()
+	float64(1.0208588200798545),   // ExpFloat64()
+	float64(0.7650739736831382),   // ExpFloat64()
+	float64(0.7772788529257701),   // ExpFloat64()
+	float64(1.102732861281323),    // ExpFloat64()
+	float64(0.6982243043885805),   // ExpFloat64()
+	float64(0.4981788638202421),   // ExpFloat64()
+	float64(0.15806532306947937),  // ExpFloat64()
+	float64(0.9419163802459202),   // ExpFloat64()
+
+	float32(0.95955694),  // Float32()
+	float32(0.8076733),   // Float32()
+	float32(0.8135684),   // Float32()
+	float32(0.92872405),  // Float32()
+	float32(0.97472525),  // Float32()
+	float32(0.5485458),   // Float32()
+	float32(0.97740936),  // Float32()
+	float32(0.042272687), // Float32()
+	float32(0.99663067),  // Float32()
+	float32(0.035181105), // Float32()
+	float32(0.45059562),  // Float32()
+	float32(0.86597633),  // Float32()
+	float32(0.8954844),   // Float32()
+	float32(0.090798736), // Float32()
+	float32(0.46218646),  // Float32()
+	float32(0.5955118),   // Float32()
+	float32(0.08985227),  // Float32()
+	float32(0.19820237),  // Float32()
+	float32(0.7443699),   // Float32()
+	float32(0.56461),     // Float32()
+
+	float64(0.6764556596678251),  // Float64()
+	float64(0.4613862177205994),  // Float64()
+	float64(0.5085473976760264),  // Float64()
+	float64(0.4297927436037299),  // Float64()
+	float64(0.797802349388613),   // Float64()
+	float64(0.3883664855410056),  // Float64()
+	float64(0.8192750264193612),  // Float64()
+	float64(0.3381816951746133),  // Float64()
+	float64(0.9730458047755973),  // Float64()
+	float64(0.281449117585586),   // Float64()
+	float64(0.6047654075331631),  // Float64()
+	float64(0.9278107175107462),  // Float64()
+	float64(0.16387541502137226), // Float64()
+	float64(0.7263900707339023),  // Float64()
+	float64(0.6974917552729882),  // Float64()
+	float64(0.7640946923790318),  // Float64()
+	float64(0.7188183661358182),  // Float64()
+	float64(0.5856191500346635),  // Float64()
+	float64(0.9549597149363428),  // Float64()
+	float64(0.5168804691962643),  // Float64()
+
+	int64(4969059760275911952), // Int()
+	int64(2147869220224756844), // Int()
+	int64(5246770554000605320), // Int()
+	int64(5471241176507662746), // Int()
+	int64(4321634407747778896), // Int()
+	int64(760102831717374652),  // Int()
+	int64(9221744211007427193), // Int()
+	int64(8289669384274456462), // Int()
+	int64(2449715415482412441), // Int()
+	int64(3389241988064777392), // Int()
+	int64(2986830195847294191), // Int()
+	int64(8204908297817606218), // Int()
+	int64(8134976985547166651), // Int()
+	int64(2240328155279531677), // Int()
+	int64(7311121042813227358), // Int()
+	int64(5231057920893523323), // Int()
+	int64(4257872588489500903), // Int()
+	int64(158397175702351138),  // Int()
+	int64(1350674201389090105), // Int()
+	int64(6093522341581845358), // Int()
+
+	int32(1652216515), // Int32()
+	int32(1323786710), // Int32()
+	int32(1684546306), // Int32()
+	int32(1710678126), // Int32()
+	int32(503104460),  // Int32()
+	int32(88487615),   // Int32()
+	int32(1073552320), // Int32()
+	int32(965044529),  // Int32()
+	int32(285184408),  // Int32()
+	int32(394559696),  // Int32()
+	int32(1421454622), // Int32()
+	int32(955177040),  // Int32()
+	int32(2020777787), // Int32()
+	int32(260808523),  // Int32()
+	int32(851126509),  // Int32()
+	int32(1682717115), // Int32()
+	int32(1569423431), // Int32()
+	int32(1092181682), // Int32()
+	int32(157239171),  // Int32()
+	int32(709379364),  // Int32()
+
+	int32(0),          // Int32N(1)
+	int32(6),          // Int32N(10)
+	int32(8),          // Int32N(32)
+	int32(704922),     // Int32N(1048576)
+	int32(245656),     // Int32N(1048577)
+	int32(41205257),   // Int32N(1000000000)
+	int32(43831929),   // Int32N(1073741824)
+	int32(965044528),  // Int32N(2147483646)
+	int32(285184408),  // Int32N(2147483647)
+	int32(0),          // Int32N(1)
+	int32(6),          // Int32N(10)
+	int32(10),         // Int32N(32)
+	int32(283579),     // Int32N(1048576)
+	int32(127348),     // Int32N(1048577)
+	int32(396336665),  // Int32N(1000000000)
+	int32(911873403),  // Int32N(1073741824)
+	int32(1569423430), // Int32N(2147483646)
+	int32(1092181681), // Int32N(2147483647)
+	int32(0),          // Int32N(1)
+	int32(3),          // Int32N(10)
+
+	int64(4969059760275911952), // Int64()
+	int64(2147869220224756844), // Int64()
+	int64(5246770554000605320), // Int64()
+	int64(5471241176507662746), // Int64()
+	int64(4321634407747778896), // Int64()
+	int64(760102831717374652),  // Int64()
+	int64(9221744211007427193), // Int64()
+	int64(8289669384274456462), // Int64()
+	int64(2449715415482412441), // Int64()
+	int64(3389241988064777392), // Int64()
+	int64(2986830195847294191), // Int64()
+	int64(8204908297817606218), // Int64()
+	int64(8134976985547166651), // Int64()
+	int64(2240328155279531677), // Int64()
+	int64(7311121042813227358), // Int64()
+	int64(5231057920893523323), // Int64()
+	int64(4257872588489500903), // Int64()
+	int64(158397175702351138),  // Int64()
+	int64(1350674201389090105), // Int64()
+	int64(6093522341581845358), // Int64()
+
+	int64(0),                   // Int64N(1)
+	int64(6),                   // Int64N(10)
+	int64(8),                   // Int64N(32)
+	int64(704922),              // Int64N(1048576)
+	int64(245656),              // Int64N(1048577)
+	int64(41205257),            // Int64N(1000000000)
+	int64(43831929),            // Int64N(1073741824)
+	int64(965044528),           // Int64N(2147483646)
+	int64(285184408),           // Int64N(2147483647)
+	int64(183731176326946086),  // Int64N(1000000000000000000)
+	int64(680987186633600239),  // Int64N(1152921504606846976)
+	int64(4102454148908803108), // Int64N(9223372036854775806)
+	int64(8679174511200971228), // Int64N(9223372036854775807)
+	int64(0),                   // Int64N(1)
+	int64(3),                   // Int64N(10)
+	int64(27),                  // Int64N(32)
+	int64(665831),              // Int64N(1048576)
+	int64(533292),              // Int64N(1048577)
+	int64(73220195),            // Int64N(1000000000)
+	int64(686060398),           // Int64N(1073741824)
+
+	int64(0),                   // IntN(1)
+	int64(6),                   // IntN(10)
+	int64(8),                   // IntN(32)
+	int64(704922),              // IntN(1048576)
+	int64(245656),              // IntN(1048577)
+	int64(41205257),            // IntN(1000000000)
+	int64(43831929),            // IntN(1073741824)
+	int64(965044528),           // IntN(2147483646)
+	int64(285184408),           // IntN(2147483647)
+	int64(183731176326946086),  // IntN(1000000000000000000)
+	int64(680987186633600239),  // IntN(1152921504606846976)
+	int64(4102454148908803108), // IntN(9223372036854775806)
+	int64(8679174511200971228), // IntN(9223372036854775807)
+	int64(0),                   // IntN(1)
+	int64(3),                   // IntN(10)
+	int64(27),                  // IntN(32)
+	int64(665831),              // IntN(1048576)
+	int64(533292),              // IntN(1048577)
+	int64(73220195),            // IntN(1000000000)
+	int64(686060398),           // IntN(1073741824)
+
+	float64(0.37944549835531083),  // NormFloat64()
+	float64(0.07473804659119399),  // NormFloat64()
+	float64(0.20006841200604142),  // NormFloat64()
+	float64(-1.1253144115495104),  // NormFloat64()
+	float64(-0.4005883316435388),  // NormFloat64()
+	float64(-3.0853771402394736),  // NormFloat64()
+	float64(1.932330243076978),    // NormFloat64()
+	float64(1.726131393719264),    // NormFloat64()
+	float64(-0.11707238034168332), // NormFloat64()
+	float64(-0.9303318111676635),  // NormFloat64()
+	float64(-0.04750789419852852), // NormFloat64()
+	float64(0.22248301107582735),  // NormFloat64()
+	float64(-1.83630520614272),    // NormFloat64()
+	float64(0.7259521217919809),   // NormFloat64()
+	float64(0.8806882871913041),   // NormFloat64()
+	float64(-1.5022903484270484),  // NormFloat64()
+	float64(0.5972577266810571),   // NormFloat64()
+	float64(1.5631937339973658),   // NormFloat64()
+	float64(-0.3841235370075905),  // NormFloat64()
+	float64(-0.2967295854430667),  // NormFloat64()
+
+	[]int{},                             // Perm(0)
+	[]int{0},                            // Perm(1)
+	[]int{1, 4, 2, 0, 3},                // Perm(5)
+	[]int{4, 3, 6, 1, 5, 2, 7, 0},       // Perm(8)
+	[]int{6, 5, 1, 8, 7, 2, 0, 3, 4},    // Perm(9)
+	[]int{9, 4, 2, 5, 6, 8, 1, 7, 0, 3}, // Perm(10)
+	[]int{5, 9, 3, 1, 4, 2, 10, 7, 15, 11, 0, 14, 13, 8, 6, 12}, // Perm(16)
+	[]int{},                             // Perm(0)
+	[]int{0},                            // Perm(1)
+	[]int{4, 2, 1, 3, 0},                // Perm(5)
+	[]int{0, 2, 3, 1, 5, 4, 6, 7},       // Perm(8)
+	[]int{2, 0, 8, 3, 4, 7, 6, 5, 1},    // Perm(9)
+	[]int{0, 6, 5, 3, 8, 4, 1, 2, 9, 7}, // Perm(10)
+	[]int{9, 14, 4, 11, 13, 8, 0, 6, 2, 12, 3, 7, 1, 10, 5, 15}, // Perm(16)
+	[]int{},                             // Perm(0)
+	[]int{0},                            // Perm(1)
+	[]int{2, 4, 0, 3, 1},                // Perm(5)
+	[]int{3, 2, 1, 0, 7, 5, 4, 6},       // Perm(8)
+	[]int{1, 3, 4, 5, 0, 2, 7, 8, 6},    // Perm(9)
+	[]int{1, 8, 4, 7, 2, 6, 5, 9, 0, 3}, // Perm(10)
+
+	uint32(3304433030), // Uint32()
+	uint32(2647573421), // Uint32()
+	uint32(3369092613), // Uint32()
+	uint32(3421356252), // Uint32()
+	uint32(1006208920), // Uint32()
+	uint32(176975231),  // Uint32()
+	uint32(2147104640), // Uint32()
+	uint32(1930089058), // Uint32()
+	uint32(570368816),  // Uint32()
+	uint32(789119393),  // Uint32()
+	uint32(2842909244), // Uint32()
+	uint32(1910354080), // Uint32()
+	uint32(4041555575), // Uint32()
+	uint32(521617046),  // Uint32()
+	uint32(1702253018), // Uint32()
+	uint32(3365434230), // Uint32()
+	uint32(3138846863), // Uint32()
+	uint32(2184363364), // Uint32()
+	uint32(314478343),  // Uint32()
+	uint32(1418758728), // Uint32()
+
+	uint32(0),          // Uint32N(1)
+	uint32(6),          // Uint32N(10)
+	uint32(8),          // Uint32N(32)
+	uint32(704922),     // Uint32N(1048576)
+	uint32(245656),     // Uint32N(1048577)
+	uint32(41205257),   // Uint32N(1000000000)
+	uint32(43831929),   // Uint32N(1073741824)
+	uint32(965044528),  // Uint32N(2147483646)
+	uint32(285184408),  // Uint32N(2147483647)
+	uint32(789119393),  // Uint32N(4294967294)
+	uint32(2842909244), // Uint32N(4294967295)
+	uint32(0),          // Uint32N(1)
+	uint32(9),          // Uint32N(10)
+	uint32(29),         // Uint32N(32)
+	uint32(266590),     // Uint32N(1048576)
+	uint32(821640),     // Uint32N(1048577)
+	uint32(730819735),  // Uint32N(1000000000)
+	uint32(522841378),  // Uint32N(1073741824)
+	uint32(157239171),  // Uint32N(2147483646)
+	uint32(709379364),  // Uint32N(2147483647)
+
+	uint64(14192431797130687760), // Uint64()
+	uint64(11371241257079532652), // Uint64()
+	uint64(14470142590855381128), // Uint64()
+	uint64(14694613213362438554), // Uint64()
+	uint64(4321634407747778896),  // Uint64()
+	uint64(760102831717374652),   // Uint64()
+	uint64(9221744211007427193),  // Uint64()
+	uint64(8289669384274456462),  // Uint64()
+	uint64(2449715415482412441),  // Uint64()
+	uint64(3389241988064777392),  // Uint64()
+	uint64(12210202232702069999), // Uint64()
+	uint64(8204908297817606218),  // Uint64()
+	uint64(17358349022401942459), // Uint64()
+	uint64(2240328155279531677),  // Uint64()
+	uint64(7311121042813227358),  // Uint64()
+	uint64(14454429957748299131), // Uint64()
+	uint64(13481244625344276711), // Uint64()
+	uint64(9381769212557126946),  // Uint64()
+	uint64(1350674201389090105),  // Uint64()
+	uint64(6093522341581845358),  // Uint64()
+
+	uint64(0),                   // Uint64N(1)
+	uint64(6),                   // Uint64N(10)
+	uint64(8),                   // Uint64N(32)
+	uint64(704922),              // Uint64N(1048576)
+	uint64(245656),              // Uint64N(1048577)
+	uint64(41205257),            // Uint64N(1000000000)
+	uint64(43831929),            // Uint64N(1073741824)
+	uint64(965044528),           // Uint64N(2147483646)
+	uint64(285184408),           // Uint64N(2147483647)
+	uint64(183731176326946086),  // Uint64N(1000000000000000000)
+	uint64(680987186633600239),  // Uint64N(1152921504606846976)
+	uint64(4102454148908803108), // Uint64N(9223372036854775806)
+	uint64(8679174511200971228), // Uint64N(9223372036854775807)
+	uint64(2240328155279531676), // Uint64N(18446744073709551614)
+	uint64(7311121042813227357), // Uint64N(18446744073709551615)
+	uint64(0),                   // Uint64N(1)
+	uint64(7),                   // Uint64N(10)
+	uint64(2),                   // Uint64N(32)
+	uint64(312633),              // Uint64N(1048576)
+	uint64(346376),              // Uint64N(1048577)
+
+	uint64(0),                   // UintN(1)
+	uint64(6),                   // UintN(10)
+	uint64(8),                   // UintN(32)
+	uint64(704922),              // UintN(1048576)
+	uint64(245656),              // UintN(1048577)
+	uint64(41205257),            // UintN(1000000000)
+	uint64(43831929),            // UintN(1073741824)
+	uint64(965044528),           // UintN(2147483646)
+	uint64(285184408),           // UintN(2147483647)
+	uint64(183731176326946086),  // UintN(1000000000000000000)
+	uint64(680987186633600239),  // UintN(1152921504606846976)
+	uint64(4102454148908803108), // UintN(9223372036854775806)
+	uint64(8679174511200971228), // UintN(9223372036854775807)
+	uint64(2240328155279531676), // UintN(18446744073709551614)
+	uint64(7311121042813227357), // UintN(18446744073709551615)
+	uint64(0),                   // UintN(1)
+	uint64(7),                   // UintN(10)
+	uint64(2),                   // UintN(32)
+	uint64(312633),              // UintN(1048576)
+	uint64(346376),              // UintN(1048577)
+}
diff --git a/src/math/rand/v2/zipf.go b/src/math/rand/v2/zipf.go
new file mode 100644
index 0000000..f04c814
--- /dev/null
+++ b/src/math/rand/v2/zipf.go
@@ -0,0 +1,77 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// W.Hormann, G.Derflinger:
+// "Rejection-Inversion to Generate Variates
+// from Monotone Discrete Distributions"
+// http://eeyore.wu-wien.ac.at/papers/96-04-04.wh-der.ps.gz
+
+package rand
+
+import "math"
+
+// A Zipf generates Zipf distributed variates.
+type Zipf struct {
+	r            *Rand
+	imax         float64
+	v            float64
+	q            float64
+	s            float64
+	oneminusQ    float64
+	oneminusQinv float64
+	hxm          float64
+	hx0minusHxm  float64
+}
+
+func (z *Zipf) h(x float64) float64 {
+	return math.Exp(z.oneminusQ*math.Log(z.v+x)) * z.oneminusQinv
+}
+
+func (z *Zipf) hinv(x float64) float64 {
+	return math.Exp(z.oneminusQinv*math.Log(z.oneminusQ*x)) - z.v
+}
+
+// NewZipf returns a Zipf variate generator.
+// The generator generates values k ∈ [0, imax]
+// such that P(k) is proportional to (v + k) ** (-s).
+// Requirements: s > 1 and v >= 1.
+func NewZipf(r *Rand, s float64, v float64, imax uint64) *Zipf {
+	z := new(Zipf)
+	if s <= 1.0 || v < 1 {
+		return nil
+	}
+	z.r = r
+	z.imax = float64(imax)
+	z.v = v
+	z.q = s
+	z.oneminusQ = 1.0 - z.q
+	z.oneminusQinv = 1.0 / z.oneminusQ
+	z.hxm = z.h(z.imax + 0.5)
+	z.hx0minusHxm = z.h(0.5) - math.Exp(math.Log(z.v)*(-z.q)) - z.hxm
+	z.s = 1 - z.hinv(z.h(1.5)-math.Exp(-z.q*math.Log(z.v+1.0)))
+	return z
+}
+
+// Uint64 returns a value drawn from the Zipf distribution described
+// by the Zipf object.
+func (z *Zipf) Uint64() uint64 {
+	if z == nil {
+		panic("rand: nil Zipf")
+	}
+	k := 0.0
+
+	for {
+		r := z.r.Float64() // r on [0,1]
+		ur := z.hxm + r*z.hx0minusHxm
+		x := z.hinv(ur)
+		k = math.Floor(x + 0.5)
+		if k-x <= z.s {
+			break
+		}
+		if ur >= z.h(k+0.5)-math.Exp(-math.Log(k+z.v)*z.q) {
+			break
+		}
+	}
+	return uint64(k)
+}
diff --git a/src/math/rand/zipf.go b/src/math/rand/zipf.go
index f04c814..83c8e33 100644
--- a/src/math/rand/zipf.go
+++ b/src/math/rand/zipf.go
@@ -32,7 +32,7 @@
 	return math.Exp(z.oneminusQinv*math.Log(z.oneminusQ*x)) - z.v
 }
 
-// NewZipf returns a Zipf variate generator.
+// NewZipf returns a [Zipf] variate generator.
 // The generator generates values k ∈ [0, imax]
 // such that P(k) is proportional to (v + k) ** (-s).
 // Requirements: s > 1 and v >= 1.
@@ -53,8 +53,8 @@
 	return z
 }
 
-// Uint64 returns a value drawn from the Zipf distribution described
-// by the Zipf object.
+// Uint64 returns a value drawn from the [Zipf] distribution described
+// by the [Zipf] object.
 func (z *Zipf) Uint64() uint64 {
 	if z == nil {
 		panic("rand: nil Zipf")
diff --git a/src/math/sin_s390x.s b/src/math/sin_s390x.s
index 7eb2206..79d564b 100644
--- a/src/math/sin_s390x.s
+++ b/src/math/sin_s390x.s
@@ -73,6 +73,10 @@
 	BLTU    L17
 	FMOVD   F0, F5
 L2:
+	MOVD    $sincosxlim<>+0(SB), R1
+	FMOVD   0(R1), F1
+	FCMPU   F5, F1
+	BGT     L16
 	MOVD    $sincoss7<>+0(SB), R1
 	FMOVD   0(R1), F4
 	MOVD    $sincoss6<>+0(SB), R1
@@ -205,6 +209,8 @@
 	RET
 
 
+L16:
+	BR     ·sin(SB)		//tail call
 sinIsZero:
 	FMOVD   F0, ret+8(FP)
 	RET
@@ -223,6 +229,10 @@
 	BLTU    L35
 	FMOVD   F0, F1
 L21:
+	MOVD    $sincosxlim<>+0(SB), R1
+	FMOVD   0(R1), F2
+	FCMPU   F1, F2
+	BGT     L30
 	MOVD    $sincosc7<>+0(SB), R1
 	FMOVD   0(R1), F4
 	MOVD    $sincosc6<>+0(SB), R1
@@ -354,3 +364,6 @@
 	FNEG    F0, F0
 	FMOVD   F0, ret+8(FP)
 	RET
+
+L30:
+	BR     ·cos(SB)		//tail call
diff --git a/src/math/tan_s390x.s b/src/math/tan_s390x.s
index 8226760..6a4c449 100644
--- a/src/math/tan_s390x.s
+++ b/src/math/tan_s390x.s
@@ -49,10 +49,9 @@
 	FMOVD	F0, F2
 L2:
 	MOVD	$·tanxlim<>+0(SB), R1
-	WORD	$0xED201000	//cdb	%f2,0(%r1)
-	BYTE	$0x00
-	BYTE	$0x19
-	BGE	L11
+	FMOVD	0(R1), F1
+	FCMPU	F2, F1
+	BGT	L9
 	BVS	L11
 	MOVD	$·tanxadd<>+0(SB), R1
 	FMOVD	88(R5), F6
@@ -105,6 +104,8 @@
 L10:
 	WORD	$0xB3130020	//lcdbr	%f2,%f0
 	BR	L2
+L9:
+	BR	·tan(SB)
 atanIsZero:
 	FMOVD	F0, ret+8(FP)
 	RET
diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
index d422729..bfa9f68 100644
--- a/src/mime/multipart/formdata_test.go
+++ b/src/mime/multipart/formdata_test.go
@@ -452,6 +452,48 @@
 	}
 }
 
+func TestReadFormEndlessHeaderLine(t *testing.T) {
+	for _, test := range []struct {
+		name   string
+		prefix string
+	}{{
+		name:   "name",
+		prefix: "X-",
+	}, {
+		name:   "value",
+		prefix: "X-Header: ",
+	}, {
+		name:   "continuation",
+		prefix: "X-Header: foo\r\n  ",
+	}} {
+		t.Run(test.name, func(t *testing.T) {
+			const eol = "\r\n"
+			s := `--boundary` + eol
+			s += `Content-Disposition: form-data; name="a"` + eol
+			s += `Content-Type: text/plain` + eol
+			s += test.prefix
+			fr := io.MultiReader(
+				strings.NewReader(s),
+				neverendingReader('X'),
+			)
+			r := NewReader(fr, "boundary")
+			_, err := r.ReadForm(1 << 20)
+			if err != ErrMessageTooLarge {
+				t.Fatalf("ReadForm(1 << 20): %v, want ErrMessageTooLarge", err)
+			}
+		})
+	}
+}
+
+type neverendingReader byte
+
+func (r neverendingReader) Read(p []byte) (n int, err error) {
+	for i := range p {
+		p[i] = byte(r)
+	}
+	return len(p), nil
+}
+
 func BenchmarkReadForm(b *testing.B) {
 	for _, test := range []struct {
 		name string
diff --git a/src/net/cgo_stub.go b/src/net/cgo_stub.go
index b26b11a..a4f6b4b 100644
--- a/src/net/cgo_stub.go
+++ b/src/net/cgo_stub.go
@@ -9,7 +9,7 @@
 //   (Darwin always provides the cgo functions, in cgo_unix_syscall.go)
 // - on wasip1, where cgo is never available
 
-//go:build (netgo && unix) || (unix && !cgo && !darwin) || wasip1
+//go:build (netgo && unix) || (unix && !cgo && !darwin) || js || wasip1
 
 package net
 
diff --git a/src/net/cgo_unix.go b/src/net/cgo_unix.go
index f10f3ea..7ed5daa 100644
--- a/src/net/cgo_unix.go
+++ b/src/net/cgo_unix.go
@@ -80,7 +80,7 @@
 func cgoLookupPort(ctx context.Context, network, service string) (port int, err error) {
 	var hints _C_struct_addrinfo
 	switch network {
-	case "": // no hints
+	case "ip": // no hints
 	case "tcp", "tcp4", "tcp6":
 		*_C_ai_socktype(&hints) = _C_SOCK_STREAM
 		*_C_ai_protocol(&hints) = _C_IPPROTO_TCP
@@ -120,6 +120,8 @@
 			if err == nil { // see golang.org/issue/6232
 				err = syscall.EMFILE
 			}
+		case _C_EAI_SERVICE, _C_EAI_NONAME: // Darwin returns EAI_NONAME.
+			return 0, &DNSError{Err: "unknown port", Name: network + "/" + service, IsNotFound: true}
 		default:
 			err = addrinfoErrno(gerrno)
 			isTemporary = addrinfoErrno(gerrno).Temporary()
@@ -140,7 +142,7 @@
 			return int(p[0])<<8 | int(p[1]), nil
 		}
 	}
-	return 0, &DNSError{Err: "unknown port", Name: network + "/" + service}
+	return 0, &DNSError{Err: "unknown port", Name: network + "/" + service, IsNotFound: true}
 }
 
 func cgoLookupHostIP(network, name string) (addrs []IPAddr, err error) {
@@ -317,8 +319,15 @@
 	acquireThread()
 	defer releaseThread()
 
-	state := (*_C_struct___res_state)(_C_malloc(unsafe.Sizeof(_C_struct___res_state{})))
-	defer _C_free(unsafe.Pointer(state))
+	resStateSize := unsafe.Sizeof(_C_struct___res_state{})
+	var state *_C_struct___res_state
+	if resStateSize > 0 {
+		mem := _C_malloc(resStateSize)
+		defer _C_free(mem)
+		memSlice := unsafe.Slice((*byte)(mem), resStateSize)
+		clear(memSlice)
+		state = (*_C_struct___res_state)(unsafe.Pointer(&memSlice[0]))
+	}
 	if err := _C_res_ninit(state); err != nil {
 		return nil, errors.New("res_ninit failure: " + err.Error())
 	}
diff --git a/src/net/cgo_unix_cgo.go b/src/net/cgo_unix_cgo.go
index d11f3e3..7c609ed 100644
--- a/src/net/cgo_unix_cgo.go
+++ b/src/net/cgo_unix_cgo.go
@@ -37,6 +37,7 @@
 	_C_EAI_AGAIN    = C.EAI_AGAIN
 	_C_EAI_NODATA   = C.EAI_NODATA
 	_C_EAI_NONAME   = C.EAI_NONAME
+	_C_EAI_SERVICE  = C.EAI_SERVICE
 	_C_EAI_OVERFLOW = C.EAI_OVERFLOW
 	_C_EAI_SYSTEM   = C.EAI_SYSTEM
 	_C_IPPROTO_TCP  = C.IPPROTO_TCP
@@ -55,7 +56,6 @@
 	_C_struct_sockaddr = C.struct_sockaddr
 )
 
-func _C_GoString(p *_C_char) string      { return C.GoString(p) }
 func _C_malloc(n uintptr) unsafe.Pointer { return C.malloc(C.size_t(n)) }
 func _C_free(p unsafe.Pointer)           { C.free(p) }
 
diff --git a/src/net/cgo_unix_syscall.go b/src/net/cgo_unix_syscall.go
index 2eb8df1..ac9aaa7 100644
--- a/src/net/cgo_unix_syscall.go
+++ b/src/net/cgo_unix_syscall.go
@@ -19,6 +19,7 @@
 	_C_AF_UNSPEC    = syscall.AF_UNSPEC
 	_C_EAI_AGAIN    = unix.EAI_AGAIN
 	_C_EAI_NONAME   = unix.EAI_NONAME
+	_C_EAI_SERVICE  = unix.EAI_SERVICE
 	_C_EAI_NODATA   = unix.EAI_NODATA
 	_C_EAI_OVERFLOW = unix.EAI_OVERFLOW
 	_C_EAI_SYSTEM   = unix.EAI_SYSTEM
@@ -39,10 +40,6 @@
 	_C_struct_sockaddr    = syscall.RawSockaddr
 )
 
-func _C_GoString(p *_C_char) string {
-	return unix.GoString(p)
-}
-
 func _C_free(p unsafe.Pointer) { runtime.KeepAlive(p) }
 
 func _C_malloc(n uintptr) unsafe.Pointer {
diff --git a/src/net/conf.go b/src/net/conf.go
index 77cc635..15d73cf 100644
--- a/src/net/conf.go
+++ b/src/net/conf.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js
-
 package net
 
 import (
@@ -153,7 +151,7 @@
 	}
 }
 
-// goosPreferCgo reports whether the GOOS value passed in prefers
+// goosPrefersCgo reports whether the GOOS value passed in prefers
 // the cgo resolver.
 func goosPrefersCgo() bool {
 	switch runtime.GOOS {
@@ -185,7 +183,24 @@
 // required to use the go resolver. The provided Resolver is optional.
 // This will report true if the cgo resolver is not available.
 func (c *conf) mustUseGoResolver(r *Resolver) bool {
-	return c.netGo || r.preferGo() || !cgoAvailable
+	if !cgoAvailable {
+		return true
+	}
+
+	if runtime.GOOS == "plan9" {
+		// TODO(bradfitz): for now we only permit use of the PreferGo
+		// implementation when there's a non-nil Resolver with a
+		// non-nil Dialer. This is a sign that they the code is trying
+		// to use their DNS-speaking net.Conn (such as an in-memory
+		// DNS cache) and they don't want to actually hit the network.
+		// Once we add support for looking the default DNS servers
+		// from plan9, though, then we can relax this.
+		if r == nil || r.Dial == nil {
+			return false
+		}
+	}
+
+	return c.netGo || r.preferGo()
 }
 
 // addrLookupOrder determines which strategy to use to resolve addresses.
@@ -221,16 +236,7 @@
 		// Go resolver was explicitly requested
 		// or cgo resolver is not available.
 		// Figure out the order below.
-		switch c.goos {
-		case "windows":
-			// TODO(bradfitz): implement files-based
-			// lookup on Windows too? I guess /etc/hosts
-			// kinda exists on Windows. But for now, only
-			// do DNS.
-			fallbackOrder = hostLookupDNS
-		default:
-			fallbackOrder = hostLookupFilesDNS
-		}
+		fallbackOrder = hostLookupFilesDNS
 		canUseCgo = false
 	} else if c.netCgo {
 		// Cgo resolver was explicitly requested.
@@ -516,7 +522,7 @@
 	return stringsEqualFold(h, "_gateway")
 }
 
-// isOutbound reports whether h should be considered a "outbound"
+// isOutbound reports whether h should be considered an "outbound"
 // name for the myhostname NSS module.
 func isOutbound(h string) bool {
 	return stringsEqualFold(h, "_outbound")
diff --git a/src/net/conn_test.go b/src/net/conn_test.go
index 4f391b0..d1e1e7b 100644
--- a/src/net/conn_test.go
+++ b/src/net/conn_test.go
@@ -2,10 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// This file implements API tests across platforms and will never have a build
-// tag.
-
-//go:build !js && !wasip1
+// This file implements API tests across platforms and should never have a build
+// constraint.
 
 package net
 
@@ -21,44 +19,46 @@
 
 func TestConnAndListener(t *testing.T) {
 	for i, network := range []string{"tcp", "unix", "unixpacket"} {
-		if !testableNetwork(network) {
-			t.Logf("skipping %s test", network)
-			continue
-		}
+		i, network := i, network
+		t.Run(network, func(t *testing.T) {
+			if !testableNetwork(network) {
+				t.Skipf("skipping %s test", network)
+			}
 
-		ls := newLocalServer(t, network)
-		defer ls.teardown()
-		ch := make(chan error, 1)
-		handler := func(ls *localServer, ln Listener) { ls.transponder(ln, ch) }
-		if err := ls.buildup(handler); err != nil {
-			t.Fatal(err)
-		}
-		if ls.Listener.Addr().Network() != network {
-			t.Fatalf("got %s; want %s", ls.Listener.Addr().Network(), network)
-		}
+			ls := newLocalServer(t, network)
+			defer ls.teardown()
+			ch := make(chan error, 1)
+			handler := func(ls *localServer, ln Listener) { ls.transponder(ln, ch) }
+			if err := ls.buildup(handler); err != nil {
+				t.Fatal(err)
+			}
+			if ls.Listener.Addr().Network() != network {
+				t.Fatalf("got %s; want %s", ls.Listener.Addr().Network(), network)
+			}
 
-		c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer c.Close()
-		if c.LocalAddr().Network() != network || c.RemoteAddr().Network() != network {
-			t.Fatalf("got %s->%s; want %s->%s", c.LocalAddr().Network(), c.RemoteAddr().Network(), network, network)
-		}
-		c.SetDeadline(time.Now().Add(someTimeout))
-		c.SetReadDeadline(time.Now().Add(someTimeout))
-		c.SetWriteDeadline(time.Now().Add(someTimeout))
+			c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer c.Close()
+			if c.LocalAddr().Network() != network || c.RemoteAddr().Network() != network {
+				t.Fatalf("got %s->%s; want %s->%s", c.LocalAddr().Network(), c.RemoteAddr().Network(), network, network)
+			}
+			c.SetDeadline(time.Now().Add(someTimeout))
+			c.SetReadDeadline(time.Now().Add(someTimeout))
+			c.SetWriteDeadline(time.Now().Add(someTimeout))
 
-		if _, err := c.Write([]byte("CONN AND LISTENER TEST")); err != nil {
-			t.Fatal(err)
-		}
-		rb := make([]byte, 128)
-		if _, err := c.Read(rb); err != nil {
-			t.Fatal(err)
-		}
+			if _, err := c.Write([]byte("CONN AND LISTENER TEST")); err != nil {
+				t.Fatal(err)
+			}
+			rb := make([]byte, 128)
+			if _, err := c.Read(rb); err != nil {
+				t.Fatal(err)
+			}
 
-		for err := range ch {
-			t.Errorf("#%d: %v", i, err)
-		}
+			for err := range ch {
+				t.Errorf("#%d: %v", i, err)
+			}
+		})
 	}
 }
diff --git a/src/net/dial.go b/src/net/dial.go
index 79bc495..a6565c3 100644
--- a/src/net/dial.go
+++ b/src/net/dial.go
@@ -6,6 +6,7 @@
 
 import (
 	"context"
+	"internal/bytealg"
 	"internal/godebug"
 	"internal/nettrace"
 	"syscall"
@@ -64,7 +65,7 @@
 //
 // The zero value for each field is equivalent to dialing
 // without that option. Dialing with the zero value of Dialer
-// is therefore equivalent to just calling the Dial function.
+// is therefore equivalent to just calling the [Dial] function.
 //
 // It is safe to call Dialer's methods concurrently.
 type Dialer struct {
@@ -226,7 +227,7 @@
 }
 
 func parseNetwork(ctx context.Context, network string, needsProto bool) (afnet string, proto int, err error) {
-	i := last(network, ':')
+	i := bytealg.LastIndexByteString(network, ':')
 	if i < 0 { // no colon
 		switch network {
 		case "tcp", "tcp4", "tcp6":
@@ -337,7 +338,7 @@
 	return d.mptcpStatus.get()
 }
 
-// SetMultipathTCP directs the Dial methods to use, or not use, MPTCP,
+// SetMultipathTCP directs the [Dial] methods to use, or not use, MPTCP,
 // if supported by the operating system. This method overrides the
 // system default and the GODEBUG=multipathtcp=... setting if any.
 //
@@ -362,7 +363,7 @@
 // brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80".
 // The zone specifies the scope of the literal IPv6 address as defined
 // in RFC 4007.
-// The functions JoinHostPort and SplitHostPort manipulate a pair of
+// The functions [JoinHostPort] and [SplitHostPort] manipulate a pair of
 // host and port in this form.
 // When using TCP, and the host resolves to multiple IP addresses,
 // Dial will try each IP address in order until one succeeds.
@@ -400,7 +401,7 @@
 	return d.Dial(network, address)
 }
 
-// DialTimeout acts like Dial but takes a timeout.
+// DialTimeout acts like [Dial] but takes a timeout.
 //
 // The timeout includes name resolution, if required.
 // When using TCP, and the host in the address parameter resolves to
@@ -427,8 +428,8 @@
 // See func Dial for a description of the network and address
 // parameters.
 //
-// Dial uses context.Background internally; to specify the context, use
-// DialContext.
+// Dial uses [context.Background] internally; to specify the context, use
+// [Dialer.DialContext].
 func (d *Dialer) Dial(network, address string) (Conn, error) {
 	return d.DialContext(context.Background(), network, address)
 }
@@ -449,7 +450,7 @@
 // the connect to each single address will be given 15 seconds to complete
 // before trying the next one.
 //
-// See func Dial for a description of the network and address
+// See func [Dial] for a description of the network and address
 // parameters.
 func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) {
 	if ctx == nil {
@@ -457,6 +458,7 @@
 	}
 	deadline := d.deadline(ctx, time.Now())
 	if !deadline.IsZero() {
+		testHookStepTime()
 		if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
 			subCtx, cancel := context.WithDeadline(ctx, deadline)
 			defer cancel()
@@ -698,7 +700,7 @@
 	return lc.mptcpStatus.get()
 }
 
-// SetMultipathTCP directs the Listen method to use, or not use, MPTCP,
+// SetMultipathTCP directs the [Listen] method to use, or not use, MPTCP,
 // if supported by the operating system. This method overrides the
 // system default and the GODEBUG=multipathtcp=... setting if any.
 //
@@ -793,14 +795,14 @@
 // addresses.
 // If the port in the address parameter is empty or "0", as in
 // "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
-// The Addr method of Listener can be used to discover the chosen
+// The [Addr] method of [Listener] can be used to discover the chosen
 // port.
 //
-// See func Dial for a description of the network and address
+// See func [Dial] for a description of the network and address
 // parameters.
 //
 // Listen uses context.Background internally; to specify the context, use
-// ListenConfig.Listen.
+// [ListenConfig.Listen].
 func Listen(network, address string) (Listener, error) {
 	var lc ListenConfig
 	return lc.Listen(context.Background(), network, address)
@@ -823,14 +825,14 @@
 // addresses.
 // If the port in the address parameter is empty or "0", as in
 // "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
-// The LocalAddr method of PacketConn can be used to discover the
+// The LocalAddr method of [PacketConn] can be used to discover the
 // chosen port.
 //
-// See func Dial for a description of the network and address
+// See func [Dial] for a description of the network and address
 // parameters.
 //
 // ListenPacket uses context.Background internally; to specify the context, use
-// ListenConfig.ListenPacket.
+// [ListenConfig.ListenPacket].
 func ListenPacket(network, address string) (PacketConn, error) {
 	var lc ListenConfig
 	return lc.ListenPacket(context.Background(), network, address)
diff --git a/src/net/dial_test.go b/src/net/dial_test.go
index ca9f0da..1d0832e 100644
--- a/src/net/dial_test.go
+++ b/src/net/dial_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -784,6 +782,7 @@
 					"connection refused",
 					"unreachable",
 					"no route to host",
+					"invalid argument",
 				}
 				e := err.Error()
 				for _, ignore := range ignorable {
@@ -982,6 +981,8 @@
 	switch runtime.GOOS {
 	case "plan9":
 		t.Skipf("not supported on %s", runtime.GOOS)
+	case "js", "wasip1":
+		t.Skipf("skipping: fake net does not support Dialer.Control")
 	}
 
 	t.Run("StreamDial", func(t *testing.T) {
@@ -1025,40 +1026,52 @@
 	switch runtime.GOOS {
 	case "plan9":
 		t.Skipf("%s does not have full support of socktest", runtime.GOOS)
+	case "js", "wasip1":
+		t.Skipf("skipping: fake net does not support Dialer.ControlContext")
 	}
 	t.Run("StreamDial", func(t *testing.T) {
 		for i, network := range []string{"tcp", "tcp4", "tcp6", "unix", "unixpacket"} {
-			if !testableNetwork(network) {
-				continue
-			}
-			ln := newLocalListener(t, network)
-			defer ln.Close()
-			var id int
-			d := Dialer{ControlContext: func(ctx context.Context, network string, address string, c syscall.RawConn) error {
-				id = ctx.Value("id").(int)
-				return controlOnConnSetup(network, address, c)
-			}}
-			c, err := d.DialContext(context.WithValue(context.Background(), "id", i+1), network, ln.Addr().String())
-			if err != nil {
-				t.Error(err)
-				continue
-			}
-			if id != i+1 {
-				t.Errorf("got id %d, want %d", id, i+1)
-			}
-			c.Close()
+			t.Run(network, func(t *testing.T) {
+				if !testableNetwork(network) {
+					t.Skipf("skipping: %s not available", network)
+				}
+
+				ln := newLocalListener(t, network)
+				defer ln.Close()
+				var id int
+				d := Dialer{ControlContext: func(ctx context.Context, network string, address string, c syscall.RawConn) error {
+					id = ctx.Value("id").(int)
+					return controlOnConnSetup(network, address, c)
+				}}
+				c, err := d.DialContext(context.WithValue(context.Background(), "id", i+1), network, ln.Addr().String())
+				if err != nil {
+					t.Fatal(err)
+				}
+				if id != i+1 {
+					t.Errorf("got id %d, want %d", id, i+1)
+				}
+				c.Close()
+			})
 		}
 	})
 }
 
 // mustHaveExternalNetwork is like testenv.MustHaveExternalNetwork
-// except that it won't skip testing on non-mobile builders.
+// except on non-Linux, non-mobile builders it permits the test to
+// run in -short mode.
 func mustHaveExternalNetwork(t *testing.T) {
 	t.Helper()
+	definitelyHasLongtestBuilder := runtime.GOOS == "linux"
 	mobile := runtime.GOOS == "android" || runtime.GOOS == "ios"
-	if testenv.Builder() == "" || mobile {
-		testenv.MustHaveExternalNetwork(t)
+	fake := runtime.GOOS == "js" || runtime.GOOS == "wasip1"
+	if testenv.Builder() != "" && !definitelyHasLongtestBuilder && !mobile && !fake {
+		// On a non-Linux, non-mobile builder (e.g., freebsd-amd64-13_0).
+		//
+		// Don't skip testing because otherwise the test may never run on
+		// any builder if this port doesn't also have a -longtest builder.
+		return
 	}
+	testenv.MustHaveExternalNetwork(t)
 }
 
 type contextWithNonZeroDeadline struct {
diff --git a/src/net/dnsclient.go b/src/net/dnsclient.go
index b609dbd..204620b 100644
--- a/src/net/dnsclient.go
+++ b/src/net/dnsclient.go
@@ -8,15 +8,17 @@
 	"internal/bytealg"
 	"internal/itoa"
 	"sort"
+	_ "unsafe" // for go:linkname
 
 	"golang.org/x/net/dns/dnsmessage"
 )
 
 // provided by runtime
-func fastrandu() uint
+//go:linkname runtime_rand runtime.rand
+func runtime_rand() uint64
 
 func randInt() int {
-	return int(fastrandu() >> 1) // clear sign bit
+	return int(uint(runtime_rand()) >> 1) // clear sign bit
 }
 
 func randIntn(n int) int {
diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go
index dab5144..c291d5e 100644
--- a/src/net/dnsclient_unix.go
+++ b/src/net/dnsclient_unix.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js
-
 // DNS client: see RFC 1035.
 // Has to be linked into package net for Dial.
 
@@ -17,6 +15,7 @@
 import (
 	"context"
 	"errors"
+	"internal/bytealg"
 	"internal/itoa"
 	"io"
 	"os"
@@ -205,7 +204,9 @@
 
 // checkHeader performs basic sanity checks on the header.
 func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header) error {
-	if h.RCode == dnsmessage.RCodeNameError {
+	rcode := extractExtendedRCode(*p, h)
+
+	if rcode == dnsmessage.RCodeNameError {
 		return errNoSuchHost
 	}
 
@@ -216,17 +217,17 @@
 
 	// libresolv continues to the next server when it receives
 	// an invalid referral response. See golang.org/issue/15434.
-	if h.RCode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone {
+	if rcode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone {
 		return errLameReferral
 	}
 
-	if h.RCode != dnsmessage.RCodeSuccess && h.RCode != dnsmessage.RCodeNameError {
+	if rcode != dnsmessage.RCodeSuccess && rcode != dnsmessage.RCodeNameError {
 		// None of the error codes make sense
 		// for the query we sent. If we didn't get
 		// a name error and we didn't get success,
 		// the server is behaving incorrectly or
 		// having temporary trouble.
-		if h.RCode == dnsmessage.RCodeServerFailure {
+		if rcode == dnsmessage.RCodeServerFailure {
 			return errServerTemporarilyMisbehaving
 		}
 		return errServerMisbehaving
@@ -253,6 +254,23 @@
 	}
 }
 
+// extractExtendedRCode extracts the extended RCode from the OPT resource (EDNS(0))
+// If an OPT record is not found, the RCode from the hdr is returned.
+func extractExtendedRCode(p dnsmessage.Parser, hdr dnsmessage.Header) dnsmessage.RCode {
+	p.SkipAllAnswers()
+	p.SkipAllAuthorities()
+	for {
+		ahdr, err := p.AdditionalHeader()
+		if err != nil {
+			return hdr.RCode
+		}
+		if ahdr.Type == dnsmessage.TypeOPT {
+			return ahdr.ExtendedRCode(hdr.RCode)
+		}
+		p.SkipAdditional()
+	}
+}
+
 // Do a lookup for a single name, which must be rooted
 // (otherwise answer will not find the answers).
 func (r *Resolver) tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype dnsmessage.Type) (dnsmessage.Parser, string, error) {
@@ -479,10 +497,6 @@
 
 // nameList returns a list of names for sequential DNS queries.
 func (conf *dnsConfig) nameList(name string) []string {
-	if avoidDNS(name) {
-		return nil
-	}
-
 	// Check name length (see isDomainName).
 	l := len(name)
 	rooted := l > 0 && name[l-1] == '.'
@@ -492,27 +506,31 @@
 
 	// If name is rooted (trailing dot), try only that name.
 	if rooted {
+		if avoidDNS(name) {
+			return nil
+		}
 		return []string{name}
 	}
 
-	hasNdots := count(name, '.') >= conf.ndots
+	hasNdots := bytealg.CountString(name, '.') >= conf.ndots
 	name += "."
 	l++
 
 	// Build list of search choices.
 	names := make([]string, 0, 1+len(conf.search))
 	// If name has enough dots, try unsuffixed first.
-	if hasNdots {
+	if hasNdots && !avoidDNS(name) {
 		names = append(names, name)
 	}
 	// Try suffixes that are not too long (see isDomainName).
 	for _, suffix := range conf.search {
-		if l+len(suffix) <= 254 {
-			names = append(names, name+suffix)
+		fqdn := name + suffix
+		if !avoidDNS(fqdn) && len(fqdn) <= 254 {
+			names = append(names, fqdn)
 		}
 	}
 	// Try unsuffixed, if not tried first above.
-	if !hasNdots {
+	if !hasNdots && !avoidDNS(name) {
 		names = append(names, name)
 	}
 	return names
@@ -586,8 +604,7 @@
 
 // goLookupIP is the native Go implementation of LookupIP.
 // The libc versions are in cgo_*.go.
-func (r *Resolver) goLookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
-	order, conf := systemConf().hostLookupOrder(r, host)
+func (r *Resolver) goLookupIP(ctx context.Context, network, host string, order hostLookupOrder, conf *dnsConfig) (addrs []IPAddr, err error) {
 	addrs, _, err = r.goLookupIPCNAMEOrder(ctx, network, host, order, conf)
 	return
 }
@@ -699,7 +716,7 @@
 				h, err := result.p.AnswerHeader()
 				if err != nil && err != dnsmessage.ErrSectionDone {
 					lastErr = &DNSError{
-						Err:    "cannot marshal DNS message",
+						Err:    errCannotUnmarshalDNSMessage.Error(),
 						Name:   name,
 						Server: result.server,
 					}
@@ -712,7 +729,7 @@
 					a, err := result.p.AResource()
 					if err != nil {
 						lastErr = &DNSError{
-							Err:    "cannot marshal DNS message",
+							Err:    errCannotUnmarshalDNSMessage.Error(),
 							Name:   name,
 							Server: result.server,
 						}
@@ -727,7 +744,7 @@
 					aaaa, err := result.p.AAAAResource()
 					if err != nil {
 						lastErr = &DNSError{
-							Err:    "cannot marshal DNS message",
+							Err:    errCannotUnmarshalDNSMessage.Error(),
 							Name:   name,
 							Server: result.server,
 						}
@@ -742,7 +759,7 @@
 					c, err := result.p.CNAMEResource()
 					if err != nil {
 						lastErr = &DNSError{
-							Err:    "cannot marshal DNS message",
+							Err:    errCannotUnmarshalDNSMessage.Error(),
 							Name:   name,
 							Server: result.server,
 						}
@@ -755,7 +772,7 @@
 				default:
 					if err := result.p.SkipAnswer(); err != nil {
 						lastErr = &DNSError{
-							Err:    "cannot marshal DNS message",
+							Err:    errCannotUnmarshalDNSMessage.Error(),
 							Name:   name,
 							Server: result.server,
 						}
@@ -847,7 +864,7 @@
 		}
 		if err != nil {
 			return nil, &DNSError{
-				Err:    "cannot marshal DNS message",
+				Err:    errCannotUnmarshalDNSMessage.Error(),
 				Name:   addr,
 				Server: server,
 			}
@@ -856,7 +873,7 @@
 			err := p.SkipAnswer()
 			if err != nil {
 				return nil, &DNSError{
-					Err:    "cannot marshal DNS message",
+					Err:    errCannotUnmarshalDNSMessage.Error(),
 					Name:   addr,
 					Server: server,
 				}
@@ -866,7 +883,7 @@
 		ptr, err := p.PTRResource()
 		if err != nil {
 			return nil, &DNSError{
-				Err:    "cannot marshal DNS message",
+				Err:    errCannotUnmarshalDNSMessage.Error(),
 				Name:   addr,
 				Server: server,
 			}
diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go
index 8d435a5..0da3630 100644
--- a/src/net/dnsclient_unix_test.go
+++ b/src/net/dnsclient_unix_test.go
@@ -10,12 +10,12 @@
 	"context"
 	"errors"
 	"fmt"
-	"internal/testenv"
 	"os"
 	"path"
 	"path/filepath"
 	"reflect"
 	"runtime"
+	"slices"
 	"strings"
 	"sync"
 	"sync/atomic"
@@ -191,6 +191,19 @@
 	}
 }
 
+func TestNameListAvoidDNS(t *testing.T) {
+	c := &dnsConfig{search: []string{"go.dev.", "onion."}}
+	got := c.nameList("www")
+	if !slices.Equal(got, []string{"www.", "www.go.dev."}) {
+		t.Fatalf(`nameList("www") = %v, want "www.", "www.go.dev."`, got)
+	}
+
+	got = c.nameList("www.onion")
+	if !slices.Equal(got, []string{"www.onion.go.dev."}) {
+		t.Fatalf(`nameList("www.onion") = %v, want "www.onion.go.dev."`, got)
+	}
+}
+
 var fakeDNSServerSuccessful = fakeDNSServer{rh: func(_, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) {
 	r := dnsmessage.Message{
 		Header: dnsmessage.Header{
@@ -221,7 +234,7 @@
 func TestLookupTorOnion(t *testing.T) {
 	defer dnsWaitGroup.Wait()
 	r := Resolver{PreferGo: true, Dial: fakeDNSServerSuccessful.DialContext}
-	addrs, err := r.LookupIPAddr(context.Background(), "foo.onion")
+	addrs, err := r.LookupIPAddr(context.Background(), "foo.onion.")
 	if err != nil {
 		t.Fatalf("lookup = %v; want nil", err)
 	}
@@ -606,8 +619,8 @@
 		t.Fatal(err)
 	}
 	// Redirect host file lookups.
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
-	testHookHostsPath = "testdata/hosts"
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
+	hostsFilePath = "testdata/hosts"
 
 	for _, order := range []hostLookupOrder{hostLookupFilesDNS, hostLookupDNSFiles} {
 		name := fmt.Sprintf("order %v", order)
@@ -1953,8 +1966,8 @@
 	DefaultResolver = &r
 	defer func() { DefaultResolver = originalDefault }()
 	// Redirect host file lookups.
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
-	testHookHostsPath = "testdata/hosts"
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
+	hostsFilePath = "testdata/hosts"
 
 	tests := []struct {
 		name string
@@ -2173,8 +2186,8 @@
 }
 
 func TestGoLookupIPCNAMEOrderHostsAliasesFilesOnlyMode(t *testing.T) {
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
-	testHookHostsPath = "testdata/aliases"
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
+	hostsFilePath = "testdata/aliases"
 	mode := hostLookupFiles
 
 	for _, v := range lookupStaticHostAliasesTest {
@@ -2183,8 +2196,8 @@
 }
 
 func TestGoLookupIPCNAMEOrderHostsAliasesFilesDNSMode(t *testing.T) {
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
-	testHookHostsPath = "testdata/aliases"
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
+	hostsFilePath = "testdata/aliases"
 	mode := hostLookupFilesDNS
 
 	for _, v := range lookupStaticHostAliasesTest {
@@ -2200,11 +2213,8 @@
 }
 
 func TestGoLookupIPCNAMEOrderHostsAliasesDNSFilesMode(t *testing.T) {
-	if testenv.Builder() == "" {
-		t.Skip("Makes assumptions about local networks and (re)naming that aren't always true")
-	}
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
-	testHookHostsPath = "testdata/aliases"
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
+	hostsFilePath = "testdata/aliases"
 	mode := hostLookupDNSFiles
 
 	for _, v := range goLookupIPCNAMEOrderDNSFilesModeTests {
@@ -2213,9 +2223,29 @@
 }
 
 func testGoLookupIPCNAMEOrderHostsAliases(t *testing.T, mode hostLookupOrder, lookup, lookupRes string) {
+	fake := fakeDNSServer{
+		rh: func(_, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) {
+			var answers []dnsmessage.Resource
+
+			if mode != hostLookupDNSFiles {
+				t.Fatal("received unexpected DNS query")
+			}
+
+			return dnsmessage.Message{
+				Header: dnsmessage.Header{
+					ID:       q.Header.ID,
+					Response: true,
+				},
+				Questions: []dnsmessage.Question{q.Questions[0]},
+				Answers:   answers,
+			}, nil
+		},
+	}
+
+	r := Resolver{PreferGo: true, Dial: fake.DialContext}
 	ins := []string{lookup, absDomainName(lookup), strings.ToLower(lookup), strings.ToUpper(lookup)}
 	for _, in := range ins {
-		_, res, err := goResolver.goLookupIPCNAMEOrder(context.Background(), "ip", in, mode, nil)
+		_, res, err := r.goLookupIPCNAMEOrder(context.Background(), "ip", in, mode, nil)
 		if err != nil {
 			t.Errorf("expected err == nil, but got error: %v", err)
 		}
@@ -2511,7 +2541,7 @@
 }
 
 func TestLookupOrderFilesNoSuchHost(t *testing.T) {
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
 	if runtime.GOOS != "openbsd" {
 		defer setSystemNSS(getSystemNSS(), 0)
 		setSystemNSS(nssStr(t, "hosts: files"), time.Hour)
@@ -2538,7 +2568,7 @@
 	if err := os.WriteFile(tmpFile, []byte{}, 0660); err != nil {
 		t.Fatal(err)
 	}
-	testHookHostsPath = tmpFile
+	hostsFilePath = tmpFile
 
 	const testName = "test.invalid"
 
@@ -2598,3 +2628,34 @@
 		}
 	}
 }
+
+func TestExtendedRCode(t *testing.T) {
+	fake := fakeDNSServer{
+		rh: func(_, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) {
+			fraudSuccessCode := dnsmessage.RCodeSuccess | 1<<10
+
+			var edns0Hdr dnsmessage.ResourceHeader
+			edns0Hdr.SetEDNS0(maxDNSPacketSize, fraudSuccessCode, false)
+
+			return dnsmessage.Message{
+				Header: dnsmessage.Header{
+					ID:       q.Header.ID,
+					Response: true,
+					RCode:    fraudSuccessCode,
+				},
+				Questions: []dnsmessage.Question{q.Questions[0]},
+				Additionals: []dnsmessage.Resource{{
+					Header: edns0Hdr,
+					Body:   &dnsmessage.OPTResource{},
+				}},
+			}, nil
+		},
+	}
+
+	r := &Resolver{PreferGo: true, Dial: fake.DialContext}
+	_, _, err := r.tryOneName(context.Background(), getSystemDNSConfig(), "go.dev.", dnsmessage.TypeA)
+	var dnsErr *DNSError
+	if !(errors.As(err, &dnsErr) && dnsErr.Err == errServerMisbehaving.Error()) {
+		t.Fatalf("r.tryOneName(): unexpected error: %v", err)
+	}
+}
diff --git a/src/net/dnsconfig_unix.go b/src/net/dnsconfig_unix.go
index 69b3004..b0a3182 100644
--- a/src/net/dnsconfig_unix.go
+++ b/src/net/dnsconfig_unix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !windows
+//go:build !windows
 
 // Read system DNS config from /etc/resolv.conf
 
diff --git a/src/net/dnsname_test.go b/src/net/dnsname_test.go
index 4a5f01a..601a33a 100644
--- a/src/net/dnsname_test.go
+++ b/src/net/dnsname_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
diff --git a/src/net/error_plan9_test.go b/src/net/error_plan9_test.go
index 1270af1..f86c96c 100644
--- a/src/net/error_plan9_test.go
+++ b/src/net/error_plan9_test.go
@@ -7,7 +7,6 @@
 import "syscall"
 
 var (
-	errTimedout       = syscall.ETIMEDOUT
 	errOpNotSupported = syscall.EPLAN9
 
 	abortedConnRequestErrors []error
diff --git a/src/net/error_posix.go b/src/net/error_posix.go
index c8dc069..84f8044 100644
--- a/src/net/error_posix.go
+++ b/src/net/error_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
diff --git a/src/net/error_test.go b/src/net/error_test.go
index 4538765..f82e863 100644
--- a/src/net/error_test.go
+++ b/src/net/error_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -157,32 +155,33 @@
 
 	d := Dialer{Timeout: someTimeout}
 	for i, tt := range dialErrorTests {
-		c, err := d.Dial(tt.network, tt.address)
-		if err == nil {
-			t.Errorf("#%d: should fail; %s:%s->%s", i, c.LocalAddr().Network(), c.LocalAddr(), c.RemoteAddr())
-			c.Close()
-			continue
-		}
-		if tt.network == "tcp" || tt.network == "udp" {
-			nerr := err
-			if op, ok := nerr.(*OpError); ok {
-				nerr = op.Err
+		i, tt := i, tt
+		t.Run(fmt.Sprint(i), func(t *testing.T) {
+			c, err := d.Dial(tt.network, tt.address)
+			if err == nil {
+				t.Errorf("should fail; %s:%s->%s", c.LocalAddr().Network(), c.LocalAddr(), c.RemoteAddr())
+				c.Close()
+				return
 			}
-			if sys, ok := nerr.(*os.SyscallError); ok {
-				nerr = sys.Err
+			if tt.network == "tcp" || tt.network == "udp" {
+				nerr := err
+				if op, ok := nerr.(*OpError); ok {
+					nerr = op.Err
+				}
+				if sys, ok := nerr.(*os.SyscallError); ok {
+					nerr = sys.Err
+				}
+				if nerr == errOpNotSupported {
+					t.Fatalf("should fail without %v; %s:%s->", nerr, tt.network, tt.address)
+				}
 			}
-			if nerr == errOpNotSupported {
-				t.Errorf("#%d: should fail without %v; %s:%s->", i, nerr, tt.network, tt.address)
-				continue
+			if c != nil {
+				t.Errorf("Dial returned non-nil interface %T(%v) with err != nil", c, c)
 			}
-		}
-		if c != nil {
-			t.Errorf("Dial returned non-nil interface %T(%v) with err != nil", c, c)
-		}
-		if err = parseDialError(err); err != nil {
-			t.Errorf("#%d: %v", i, err)
-			continue
-		}
+			if err = parseDialError(err); err != nil {
+				t.Error(err)
+			}
+		})
 	}
 }
 
@@ -208,10 +207,11 @@
 			t.Errorf("%s: should fail", network)
 			continue
 		}
-		if err = parseDialError(err); err != nil {
+		if err := parseDialError(err); err != nil {
 			t.Errorf("%s: %v", network, err)
 			continue
 		}
+		t.Logf("%s: error as expected: %v", network, err)
 	}
 }
 
@@ -220,6 +220,7 @@
 	case "plan9":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
+
 	if !supportsIPv4() || !supportsIPv6() {
 		t.Skip("both IPv4 and IPv6 are required")
 	}
@@ -236,38 +237,42 @@
 		// control name resolution.
 		{"tcp6", "", &TCPAddr{IP: IP{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}}},
 	} {
-		var err error
-		var c Conn
-		var op string
-		if tt.lit != "" {
-			c, err = Dial(tt.network, JoinHostPort(tt.lit, "0"))
-			op = fmt.Sprintf("Dial(%q, %q)", tt.network, JoinHostPort(tt.lit, "0"))
-		} else {
-			c, err = DialTCP(tt.network, nil, tt.addr)
-			op = fmt.Sprintf("DialTCP(%q, %q)", tt.network, tt.addr)
+		desc := tt.lit
+		if desc == "" {
+			desc = tt.addr.String()
 		}
-		if err == nil {
-			c.Close()
-			t.Errorf("%s succeeded, want error", op)
-			continue
-		}
-		if perr := parseDialError(err); perr != nil {
-			t.Errorf("%s: %v", op, perr)
-			continue
-		}
-		operr := err.(*OpError).Err
-		aerr, ok := operr.(*AddrError)
-		if !ok {
-			t.Errorf("%s: %v is %T, want *AddrError", op, err, operr)
-			continue
-		}
-		want := tt.lit
-		if tt.lit == "" {
-			want = tt.addr.IP.String()
-		}
-		if aerr.Addr != want {
-			t.Errorf("%s: %v, error Addr=%q, want %q", op, err, aerr.Addr, want)
-		}
+		t.Run(fmt.Sprintf("%s/%s", tt.network, desc), func(t *testing.T) {
+			var err error
+			var c Conn
+			var op string
+			if tt.lit != "" {
+				c, err = Dial(tt.network, JoinHostPort(tt.lit, "0"))
+				op = fmt.Sprintf("Dial(%q, %q)", tt.network, JoinHostPort(tt.lit, "0"))
+			} else {
+				c, err = DialTCP(tt.network, nil, tt.addr)
+				op = fmt.Sprintf("DialTCP(%q, %q)", tt.network, tt.addr)
+			}
+			t.Logf("%s: %v", op, err)
+			if err == nil {
+				c.Close()
+				t.Fatalf("%s succeeded, want error", op)
+			}
+			if perr := parseDialError(err); perr != nil {
+				t.Fatal(perr)
+			}
+			operr := err.(*OpError).Err
+			aerr, ok := operr.(*AddrError)
+			if !ok {
+				t.Fatalf("OpError.Err is %T, want *AddrError", operr)
+			}
+			want := tt.lit
+			if tt.lit == "" {
+				want = tt.addr.IP.String()
+			}
+			if aerr.Addr != want {
+				t.Errorf("error Addr=%q, want %q", aerr.Addr, want)
+			}
+		})
 	}
 }
 
@@ -305,32 +310,32 @@
 	defer sw.Set(socktest.FilterListen, nil)
 
 	for i, tt := range listenErrorTests {
-		ln, err := Listen(tt.network, tt.address)
-		if err == nil {
-			t.Errorf("#%d: should fail; %s:%s->", i, ln.Addr().Network(), ln.Addr())
-			ln.Close()
-			continue
-		}
-		if tt.network == "tcp" {
-			nerr := err
-			if op, ok := nerr.(*OpError); ok {
-				nerr = op.Err
+		t.Run(fmt.Sprintf("%s_%s", tt.network, tt.address), func(t *testing.T) {
+			ln, err := Listen(tt.network, tt.address)
+			if err == nil {
+				t.Errorf("#%d: should fail; %s:%s->", i, ln.Addr().Network(), ln.Addr())
+				ln.Close()
+				return
 			}
-			if sys, ok := nerr.(*os.SyscallError); ok {
-				nerr = sys.Err
+			if tt.network == "tcp" {
+				nerr := err
+				if op, ok := nerr.(*OpError); ok {
+					nerr = op.Err
+				}
+				if sys, ok := nerr.(*os.SyscallError); ok {
+					nerr = sys.Err
+				}
+				if nerr == errOpNotSupported {
+					t.Fatalf("#%d: should fail without %v; %s:%s->", i, nerr, tt.network, tt.address)
+				}
 			}
-			if nerr == errOpNotSupported {
-				t.Errorf("#%d: should fail without %v; %s:%s->", i, nerr, tt.network, tt.address)
-				continue
+			if ln != nil {
+				t.Errorf("Listen returned non-nil interface %T(%v) with err != nil", ln, ln)
 			}
-		}
-		if ln != nil {
-			t.Errorf("Listen returned non-nil interface %T(%v) with err != nil", ln, ln)
-		}
-		if err = parseDialError(err); err != nil {
-			t.Errorf("#%d: %v", i, err)
-			continue
-		}
+			if err = parseDialError(err); err != nil {
+				t.Errorf("#%d: %v", i, err)
+			}
+		})
 	}
 }
 
@@ -361,19 +366,20 @@
 	}
 
 	for i, tt := range listenPacketErrorTests {
-		c, err := ListenPacket(tt.network, tt.address)
-		if err == nil {
-			t.Errorf("#%d: should fail; %s:%s->", i, c.LocalAddr().Network(), c.LocalAddr())
-			c.Close()
-			continue
-		}
-		if c != nil {
-			t.Errorf("ListenPacket returned non-nil interface %T(%v) with err != nil", c, c)
-		}
-		if err = parseDialError(err); err != nil {
-			t.Errorf("#%d: %v", i, err)
-			continue
-		}
+		t.Run(fmt.Sprintf("%s_%s", tt.network, tt.address), func(t *testing.T) {
+			c, err := ListenPacket(tt.network, tt.address)
+			if err == nil {
+				t.Errorf("#%d: should fail; %s:%s->", i, c.LocalAddr().Network(), c.LocalAddr())
+				c.Close()
+				return
+			}
+			if c != nil {
+				t.Errorf("ListenPacket returned non-nil interface %T(%v) with err != nil", c, c)
+			}
+			if err = parseDialError(err); err != nil {
+				t.Errorf("#%d: %v", i, err)
+			}
+		})
 	}
 }
 
@@ -557,49 +563,57 @@
 }
 
 func TestCloseError(t *testing.T) {
-	ln := newLocalListener(t, "tcp")
-	defer ln.Close()
-	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer c.Close()
+	t.Run("tcp", func(t *testing.T) {
+		ln := newLocalListener(t, "tcp")
+		defer ln.Close()
+		c, err := Dial(ln.Addr().Network(), ln.Addr().String())
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer c.Close()
 
-	for i := 0; i < 3; i++ {
-		err = c.(*TCPConn).CloseRead()
-		if perr := parseCloseError(err, true); perr != nil {
-			t.Errorf("#%d: %v", i, perr)
+		for i := 0; i < 3; i++ {
+			err = c.(*TCPConn).CloseRead()
+			if perr := parseCloseError(err, true); perr != nil {
+				t.Errorf("#%d: %v", i, perr)
+			}
 		}
-	}
-	for i := 0; i < 3; i++ {
-		err = c.(*TCPConn).CloseWrite()
-		if perr := parseCloseError(err, true); perr != nil {
-			t.Errorf("#%d: %v", i, perr)
+		for i := 0; i < 3; i++ {
+			err = c.(*TCPConn).CloseWrite()
+			if perr := parseCloseError(err, true); perr != nil {
+				t.Errorf("#%d: %v", i, perr)
+			}
 		}
-	}
-	for i := 0; i < 3; i++ {
-		err = c.Close()
-		if perr := parseCloseError(err, false); perr != nil {
-			t.Errorf("#%d: %v", i, perr)
+		for i := 0; i < 3; i++ {
+			err = c.Close()
+			if perr := parseCloseError(err, false); perr != nil {
+				t.Errorf("#%d: %v", i, perr)
+			}
+			err = ln.Close()
+			if perr := parseCloseError(err, false); perr != nil {
+				t.Errorf("#%d: %v", i, perr)
+			}
 		}
-		err = ln.Close()
-		if perr := parseCloseError(err, false); perr != nil {
-			t.Errorf("#%d: %v", i, perr)
-		}
-	}
+	})
 
-	pc, err := ListenPacket("udp", "127.0.0.1:0")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer pc.Close()
-
-	for i := 0; i < 3; i++ {
-		err = pc.Close()
-		if perr := parseCloseError(err, false); perr != nil {
-			t.Errorf("#%d: %v", i, perr)
+	t.Run("udp", func(t *testing.T) {
+		if !testableNetwork("udp") {
+			t.Skipf("skipping: udp not available")
 		}
-	}
+
+		pc, err := ListenPacket("udp", "127.0.0.1:0")
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer pc.Close()
+
+		for i := 0; i < 3; i++ {
+			err = pc.Close()
+			if perr := parseCloseError(err, false); perr != nil {
+				t.Errorf("#%d: %v", i, perr)
+			}
+		}
+	})
 }
 
 // parseAcceptError parses nestedErr and reports whether it is a valid
diff --git a/src/net/error_unix_test.go b/src/net/error_unix_test.go
index 291a723..963ba21 100644
--- a/src/net/error_unix_test.go
+++ b/src/net/error_unix_test.go
@@ -13,7 +13,6 @@
 )
 
 var (
-	errTimedout       = syscall.ETIMEDOUT
 	errOpNotSupported = syscall.EOPNOTSUPP
 
 	abortedConnRequestErrors = []error{syscall.ECONNABORTED} // see accept in fd_unix.go
diff --git a/src/net/error_windows_test.go b/src/net/error_windows_test.go
index 25825f9..7847af0 100644
--- a/src/net/error_windows_test.go
+++ b/src/net/error_windows_test.go
@@ -10,7 +10,6 @@
 )
 
 var (
-	errTimedout       = syscall.ETIMEDOUT
 	errOpNotSupported = syscall.EOPNOTSUPP
 
 	abortedConnRequestErrors = []error{syscall.ERROR_NETNAME_DELETED, syscall.WSAECONNRESET} // see accept in fd_windows.go
diff --git a/src/net/external_test.go b/src/net/external_test.go
index 0709b9d..38788ef 100644
--- a/src/net/external_test.go
+++ b/src/net/external_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
diff --git a/src/net/fd_fake.go b/src/net/fd_fake.go
new file mode 100644
index 0000000..ae567ac
--- /dev/null
+++ b/src/net/fd_fake.go
@@ -0,0 +1,169 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js || wasip1
+
+package net
+
+import (
+	"internal/poll"
+	"runtime"
+	"time"
+)
+
+const (
+	readSyscallName  = "fd_read"
+	writeSyscallName = "fd_write"
+)
+
+// Network file descriptor.
+type netFD struct {
+	pfd poll.FD
+
+	// immutable until Close
+	family      int
+	sotype      int
+	isConnected bool // handshake completed or use of association with peer
+	net         string
+	laddr       Addr
+	raddr       Addr
+
+	// The only networking available in WASI preview 1 is the ability to
+	// sock_accept on a pre-opened socket, and then fd_read, fd_write,
+	// fd_close, and sock_shutdown on the resulting connection. We
+	// intercept applicable netFD calls on this instance, and then pass
+	// the remainder of the netFD calls to fakeNetFD.
+	*fakeNetFD
+}
+
+func newFD(net string, sysfd int) *netFD {
+	return newPollFD(net, poll.FD{
+		Sysfd:         sysfd,
+		IsStream:      true,
+		ZeroReadIsEOF: true,
+	})
+}
+
+func newPollFD(net string, pfd poll.FD) *netFD {
+	var laddr Addr
+	var raddr Addr
+	// WASI preview 1 does not have functions like getsockname/getpeername,
+	// so we cannot get access to the underlying IP address used by connections.
+	//
+	// However, listeners created by FileListener are of type *TCPListener,
+	// which can be asserted by a Go program. The (*TCPListener).Addr method
+	// documents that the returned value will be of type *TCPAddr, we satisfy
+	// the documented behavior by creating addresses of the expected type here.
+	switch net {
+	case "tcp":
+		laddr = new(TCPAddr)
+		raddr = new(TCPAddr)
+	case "udp":
+		laddr = new(UDPAddr)
+		raddr = new(UDPAddr)
+	default:
+		laddr = unknownAddr{}
+		raddr = unknownAddr{}
+	}
+	return &netFD{
+		pfd:   pfd,
+		net:   net,
+		laddr: laddr,
+		raddr: raddr,
+	}
+}
+
+func (fd *netFD) init() error {
+	return fd.pfd.Init(fd.net, true)
+}
+
+func (fd *netFD) name() string {
+	return "unknown"
+}
+
+func (fd *netFD) accept() (netfd *netFD, err error) {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.accept(fd.laddr)
+	}
+	d, _, errcall, err := fd.pfd.Accept()
+	if err != nil {
+		if errcall != "" {
+			err = wrapSyscallError(errcall, err)
+		}
+		return nil, err
+	}
+	netfd = newFD("tcp", d)
+	if err = netfd.init(); err != nil {
+		netfd.Close()
+		return nil, err
+	}
+	return netfd, nil
+}
+
+func (fd *netFD) setAddr(laddr, raddr Addr) {
+	fd.laddr = laddr
+	fd.raddr = raddr
+	runtime.SetFinalizer(fd, (*netFD).Close)
+}
+
+func (fd *netFD) Close() error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.Close()
+	}
+	runtime.SetFinalizer(fd, nil)
+	return fd.pfd.Close()
+}
+
+func (fd *netFD) shutdown(how int) error {
+	if fd.fakeNetFD != nil {
+		return nil
+	}
+	err := fd.pfd.Shutdown(how)
+	runtime.KeepAlive(fd)
+	return wrapSyscallError("shutdown", err)
+}
+
+func (fd *netFD) Read(p []byte) (n int, err error) {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.Read(p)
+	}
+	n, err = fd.pfd.Read(p)
+	runtime.KeepAlive(fd)
+	return n, wrapSyscallError(readSyscallName, err)
+}
+
+func (fd *netFD) Write(p []byte) (nn int, err error) {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.Write(p)
+	}
+	nn, err = fd.pfd.Write(p)
+	runtime.KeepAlive(fd)
+	return nn, wrapSyscallError(writeSyscallName, err)
+}
+
+func (fd *netFD) SetDeadline(t time.Time) error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.SetDeadline(t)
+	}
+	return fd.pfd.SetDeadline(t)
+}
+
+func (fd *netFD) SetReadDeadline(t time.Time) error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.SetReadDeadline(t)
+	}
+	return fd.pfd.SetReadDeadline(t)
+}
+
+func (fd *netFD) SetWriteDeadline(t time.Time) error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.SetWriteDeadline(t)
+	}
+	return fd.pfd.SetWriteDeadline(t)
+}
+
+type unknownAddr struct{}
+
+func (unknownAddr) Network() string { return "unknown" }
+func (unknownAddr) String() string  { return "unknown" }
diff --git a/src/net/fd_js.go b/src/net/fd_js.go
new file mode 100644
index 0000000..0fce036
--- /dev/null
+++ b/src/net/fd_js.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fake networking for js/wasm. It is intended to allow tests of other package to pass.
+
+//go:build js
+
+package net
+
+import (
+	"os"
+	"syscall"
+)
+
+func (fd *netFD) closeRead() error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.closeRead()
+	}
+	return os.NewSyscallError("closeRead", syscall.ENOTSUP)
+}
+
+func (fd *netFD) closeWrite() error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.closeWrite()
+	}
+	return os.NewSyscallError("closeRead", syscall.ENOTSUP)
+}
diff --git a/src/net/fd_wasip1.go b/src/net/fd_wasip1.go
index 74d0b0b..d50effc 100644
--- a/src/net/fd_wasip1.go
+++ b/src/net/fd_wasip1.go
@@ -7,124 +7,9 @@
 package net
 
 import (
-	"internal/poll"
-	"runtime"
 	"syscall"
-	"time"
 )
 
-const (
-	readSyscallName  = "fd_read"
-	writeSyscallName = "fd_write"
-)
-
-// Network file descriptor.
-type netFD struct {
-	pfd poll.FD
-
-	// immutable until Close
-	family      int
-	sotype      int
-	isConnected bool // handshake completed or use of association with peer
-	net         string
-	laddr       Addr
-	raddr       Addr
-
-	// The only networking available in WASI preview 1 is the ability to
-	// sock_accept on an pre-opened socket, and then fd_read, fd_write,
-	// fd_close, and sock_shutdown on the resulting connection. We
-	// intercept applicable netFD calls on this instance, and then pass
-	// the remainder of the netFD calls to fakeNetFD.
-	*fakeNetFD
-}
-
-func newFD(net string, sysfd int) *netFD {
-	return newPollFD(net, poll.FD{
-		Sysfd:         sysfd,
-		IsStream:      true,
-		ZeroReadIsEOF: true,
-	})
-}
-
-func newPollFD(net string, pfd poll.FD) *netFD {
-	var laddr Addr
-	var raddr Addr
-	// WASI preview 1 does not have functions like getsockname/getpeername,
-	// so we cannot get access to the underlying IP address used by connections.
-	//
-	// However, listeners created by FileListener are of type *TCPListener,
-	// which can be asserted by a Go program. The (*TCPListener).Addr method
-	// documents that the returned value will be of type *TCPAddr, we satisfy
-	// the documented behavior by creating addresses of the expected type here.
-	switch net {
-	case "tcp":
-		laddr = new(TCPAddr)
-		raddr = new(TCPAddr)
-	case "udp":
-		laddr = new(UDPAddr)
-		raddr = new(UDPAddr)
-	default:
-		laddr = unknownAddr{}
-		raddr = unknownAddr{}
-	}
-	return &netFD{
-		pfd:   pfd,
-		net:   net,
-		laddr: laddr,
-		raddr: raddr,
-	}
-}
-
-func (fd *netFD) init() error {
-	return fd.pfd.Init(fd.net, true)
-}
-
-func (fd *netFD) name() string {
-	return "unknown"
-}
-
-func (fd *netFD) accept() (netfd *netFD, err error) {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.accept()
-	}
-	d, _, errcall, err := fd.pfd.Accept()
-	if err != nil {
-		if errcall != "" {
-			err = wrapSyscallError(errcall, err)
-		}
-		return nil, err
-	}
-	netfd = newFD("tcp", d)
-	if err = netfd.init(); err != nil {
-		netfd.Close()
-		return nil, err
-	}
-	return netfd, nil
-}
-
-func (fd *netFD) setAddr(laddr, raddr Addr) {
-	fd.laddr = laddr
-	fd.raddr = raddr
-	runtime.SetFinalizer(fd, (*netFD).Close)
-}
-
-func (fd *netFD) Close() error {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.Close()
-	}
-	runtime.SetFinalizer(fd, nil)
-	return fd.pfd.Close()
-}
-
-func (fd *netFD) shutdown(how int) error {
-	if fd.fakeNetFD != nil {
-		return nil
-	}
-	err := fd.pfd.Shutdown(how)
-	runtime.KeepAlive(fd)
-	return wrapSyscallError("shutdown", err)
-}
-
 func (fd *netFD) closeRead() error {
 	if fd.fakeNetFD != nil {
 		return fd.fakeNetFD.closeRead()
@@ -138,47 +23,3 @@
 	}
 	return fd.shutdown(syscall.SHUT_WR)
 }
-
-func (fd *netFD) Read(p []byte) (n int, err error) {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.Read(p)
-	}
-	n, err = fd.pfd.Read(p)
-	runtime.KeepAlive(fd)
-	return n, wrapSyscallError(readSyscallName, err)
-}
-
-func (fd *netFD) Write(p []byte) (nn int, err error) {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.Write(p)
-	}
-	nn, err = fd.pfd.Write(p)
-	runtime.KeepAlive(fd)
-	return nn, wrapSyscallError(writeSyscallName, err)
-}
-
-func (fd *netFD) SetDeadline(t time.Time) error {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.SetDeadline(t)
-	}
-	return fd.pfd.SetDeadline(t)
-}
-
-func (fd *netFD) SetReadDeadline(t time.Time) error {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.SetReadDeadline(t)
-	}
-	return fd.pfd.SetReadDeadline(t)
-}
-
-func (fd *netFD) SetWriteDeadline(t time.Time) error {
-	if fd.fakeNetFD != nil {
-		return fd.fakeNetFD.SetWriteDeadline(t)
-	}
-	return fd.pfd.SetWriteDeadline(t)
-}
-
-type unknownAddr struct{}
-
-func (unknownAddr) Network() string { return "unknown" }
-func (unknownAddr) String() string  { return "unknown" }
diff --git a/src/net/fd_windows.go b/src/net/fd_windows.go
index eeb994df..45a10cf 100644
--- a/src/net/fd_windows.go
+++ b/src/net/fd_windows.go
@@ -64,10 +64,38 @@
 	if err := fd.init(); err != nil {
 		return nil, err
 	}
-	if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
-		fd.pfd.SetWriteDeadline(deadline)
+
+	if ctx.Done() != nil {
+		// Propagate the Context's deadline and cancellation.
+		// If the context is already done, or if it has a nonzero deadline,
+		// ensure that that is applied before the call to ConnectEx begins
+		// so that we don't return spurious connections.
 		defer fd.pfd.SetWriteDeadline(noDeadline)
+
+		if ctx.Err() != nil {
+			fd.pfd.SetWriteDeadline(aLongTimeAgo)
+		} else {
+			if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
+				fd.pfd.SetWriteDeadline(deadline)
+			}
+
+			done := make(chan struct{})
+			stop := context.AfterFunc(ctx, func() {
+				// Force the runtime's poller to immediately give
+				// up waiting for writability.
+				fd.pfd.SetWriteDeadline(aLongTimeAgo)
+				close(done)
+			})
+			defer func() {
+				if !stop() {
+					// Wait for the call to SetWriteDeadline to complete so that we can
+					// reset the deadline if everything else succeeded.
+					<-done
+				}
+			}()
+		}
 	}
+
 	if !canUseConnectEx(fd.net) {
 		err := connectFunc(fd.pfd.Sysfd, ra)
 		return nil, os.NewSyscallError("connect", err)
@@ -113,22 +141,6 @@
 		_ = fd.pfd.WSAIoctl(windows.SIO_TCP_INITIAL_RTO, (*byte)(unsafe.Pointer(&params)), uint32(unsafe.Sizeof(params)), nil, 0, &out, nil, 0)
 	}
 
-	// Wait for the goroutine converting context.Done into a write timeout
-	// to exist, otherwise our caller might cancel the context and
-	// cause fd.setWriteDeadline(aLongTimeAgo) to cancel a successful dial.
-	done := make(chan bool) // must be unbuffered
-	defer func() { done <- true }()
-	go func() {
-		select {
-		case <-ctx.Done():
-			// Force the runtime's poller to immediately give
-			// up waiting for writability.
-			fd.pfd.SetWriteDeadline(aLongTimeAgo)
-			<-done
-		case <-done:
-		}
-	}()
-
 	// Call ConnectEx API.
 	if err := fd.pfd.ConnectEx(ra); err != nil {
 		select {
diff --git a/src/net/file_stub.go b/src/net/file_stub.go
index 91df926..6fd3eec 100644
--- a/src/net/file_stub.go
+++ b/src/net/file_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build js && wasm
+//go:build js
 
 package net
 
diff --git a/src/net/file_test.go b/src/net/file_test.go
index 53cd3c1..c517af5 100644
--- a/src/net/file_test.go
+++ b/src/net/file_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -31,7 +29,7 @@
 
 func TestFileConn(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9", "windows":
+	case "plan9", "windows", "js", "wasip1":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 
@@ -132,7 +130,7 @@
 
 func TestFileListener(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9", "windows":
+	case "plan9", "windows", "js", "wasip1":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 
@@ -224,7 +222,7 @@
 
 func TestFilePacketConn(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9", "windows":
+	case "plan9", "windows", "js", "wasip1":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 
@@ -291,7 +289,7 @@
 // Issue 24483.
 func TestFileCloseRace(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9", "windows":
+	case "plan9", "windows", "js", "wasip1":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 	if !testableNetwork("tcp") {
diff --git a/src/net/hook.go b/src/net/hook.go
index ea71803..eded34d 100644
--- a/src/net/hook.go
+++ b/src/net/hook.go
@@ -13,8 +13,7 @@
 	// if non-nil, overrides dialTCP.
 	testHookDialTCP func(ctx context.Context, net string, laddr, raddr *TCPAddr) (*TCPConn, error)
 
-	testHookHostsPath = "/etc/hosts"
-	testHookLookupIP  = func(
+	testHookLookupIP = func(
 		ctx context.Context,
 		fn func(context.Context, string, string) ([]IPAddr, error),
 		network string,
@@ -23,4 +22,10 @@
 		return fn(ctx, network, host)
 	}
 	testHookSetKeepAlive = func(time.Duration) {}
+
+	// testHookStepTime sleeps until time has moved forward by a nonzero amount.
+	// This helps to avoid flakes in timeout tests by ensuring that an implausibly
+	// short deadline (such as 1ns in the future) is always expired by the time
+	// a relevant system call occurs.
+	testHookStepTime = func() {}
 )
diff --git a/src/net/hook_plan9.go b/src/net/hook_plan9.go
index e053348..6020d32 100644
--- a/src/net/hook_plan9.go
+++ b/src/net/hook_plan9.go
@@ -4,6 +4,6 @@
 
 package net
 
-import "time"
-
-var testHookDialChannel = func() { time.Sleep(time.Millisecond) } // see golang.org/issue/5349
+var (
+	hostsFilePath = "/etc/hosts"
+)
diff --git a/src/net/hook_unix.go b/src/net/hook_unix.go
index 4e20f59..69b3755 100644
--- a/src/net/hook_unix.go
+++ b/src/net/hook_unix.go
@@ -2,16 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1
+//go:build unix || js || wasip1
 
 package net
 
 import "syscall"
 
 var (
-	testHookDialChannel  = func() {} // for golang.org/issue/5349
 	testHookCanceledDial = func() {} // for golang.org/issue/16523
 
+	hostsFilePath = "/etc/hosts"
+
 	// Placeholders for socket system calls.
 	socketFunc        func(int, int, int) (int, error)  = syscall.Socket
 	connectFunc       func(int, syscall.Sockaddr) error = syscall.Connect
diff --git a/src/net/hook_windows.go b/src/net/hook_windows.go
index ab8656c..f7c5b5a 100644
--- a/src/net/hook_windows.go
+++ b/src/net/hook_windows.go
@@ -7,14 +7,12 @@
 import (
 	"internal/syscall/windows"
 	"syscall"
-	"time"
 )
 
 var (
-	testHookDialChannel = func() { time.Sleep(time.Millisecond) } // see golang.org/issue/5349
+	hostsFilePath = windows.GetSystemDirectory() + "/Drivers/etc/hosts"
 
 	// Placeholders for socket system calls.
-	socketFunc    func(int, int, int) (syscall.Handle, error)                                                 = syscall.Socket
 	wsaSocketFunc func(int32, int32, int32, *syscall.WSAProtocolInfo, uint32, uint32) (syscall.Handle, error) = windows.WSASocket
 	connectFunc   func(syscall.Handle, syscall.Sockaddr) error                                                = syscall.Connect
 	listenFunc    func(syscall.Handle, int) error                                                             = syscall.Listen
diff --git a/src/net/hosts.go b/src/net/hosts.go
index 56e6674..73e6fcc 100644
--- a/src/net/hosts.go
+++ b/src/net/hosts.go
@@ -51,7 +51,7 @@
 
 func readHosts() {
 	now := time.Now()
-	hp := testHookHostsPath
+	hp := hostsFilePath
 
 	if now.Before(hosts.expire) && hosts.path == hp && len(hosts.byName) > 0 {
 		return
diff --git a/src/net/hosts_test.go b/src/net/hosts_test.go
index b3f189e..5f22920 100644
--- a/src/net/hosts_test.go
+++ b/src/net/hosts_test.go
@@ -59,10 +59,10 @@
 }
 
 func TestLookupStaticHost(t *testing.T) {
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
 
 	for _, tt := range lookupStaticHostTests {
-		testHookHostsPath = tt.name
+		hostsFilePath = tt.name
 		for _, ent := range tt.ents {
 			testStaticHost(t, tt.name, ent)
 		}
@@ -128,10 +128,10 @@
 }
 
 func TestLookupStaticAddr(t *testing.T) {
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
 
 	for _, tt := range lookupStaticAddrTests {
-		testHookHostsPath = tt.name
+		hostsFilePath = tt.name
 		for _, ent := range tt.ents {
 			testStaticAddr(t, tt.name, ent)
 		}
@@ -151,27 +151,27 @@
 func TestHostCacheModification(t *testing.T) {
 	// Ensure that programs can't modify the internals of the host cache.
 	// See https://golang.org/issues/14212.
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
 
-	testHookHostsPath = "testdata/ipv4-hosts"
+	hostsFilePath = "testdata/ipv4-hosts"
 	ent := staticHostEntry{"localhost", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}}
-	testStaticHost(t, testHookHostsPath, ent)
+	testStaticHost(t, hostsFilePath, ent)
 	// Modify the addresses return by lookupStaticHost.
 	addrs, _ := lookupStaticHost(ent.in)
 	for i := range addrs {
 		addrs[i] += "junk"
 	}
-	testStaticHost(t, testHookHostsPath, ent)
+	testStaticHost(t, hostsFilePath, ent)
 
-	testHookHostsPath = "testdata/ipv6-hosts"
+	hostsFilePath = "testdata/ipv6-hosts"
 	ent = staticHostEntry{"::1", []string{"localhost"}}
-	testStaticAddr(t, testHookHostsPath, ent)
+	testStaticAddr(t, hostsFilePath, ent)
 	// Modify the hosts return by lookupStaticAddr.
 	hosts := lookupStaticAddr(ent.in)
 	for i := range hosts {
 		hosts[i] += "junk"
 	}
-	testStaticAddr(t, testHookHostsPath, ent)
+	testStaticAddr(t, hostsFilePath, ent)
 }
 
 var lookupStaticHostAliasesTest = []struct {
@@ -195,9 +195,9 @@
 }
 
 func TestLookupStaticHostAliases(t *testing.T) {
-	defer func(orig string) { testHookHostsPath = orig }(testHookHostsPath)
+	defer func(orig string) { hostsFilePath = orig }(hostsFilePath)
 
-	testHookHostsPath = "testdata/aliases"
+	hostsFilePath = "testdata/aliases"
 	for _, ent := range lookupStaticHostAliasesTest {
 		testLookupStaticHostAliases(t, ent.lookup, absDomainName(ent.res))
 	}
diff --git a/src/net/http/cgi/cgi_main.go b/src/net/http/cgi/cgi_main.go
new file mode 100644
index 0000000..8997d66
--- /dev/null
+++ b/src/net/http/cgi/cgi_main.go
@@ -0,0 +1,145 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgi
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"time"
+)
+
+func cgiMain() {
+	switch path.Join(os.Getenv("SCRIPT_NAME"), os.Getenv("PATH_INFO")) {
+	case "/bar", "/test.cgi", "/myscript/bar", "/test.cgi/extrapath":
+		testCGI()
+		return
+	}
+	childCGIProcess()
+}
+
+// testCGI is a CGI program translated from a Perl program to complete host_test.
+// test cases in host_test should be provided by testCGI.
+func testCGI() {
+	req, err := Request()
+	if err != nil {
+		panic(err)
+	}
+
+	err = req.ParseForm()
+	if err != nil {
+		panic(err)
+	}
+
+	params := req.Form
+	if params.Get("loc") != "" {
+		fmt.Printf("Location: %s\r\n\r\n", params.Get("loc"))
+		return
+	}
+
+	fmt.Printf("Content-Type: text/html\r\n")
+	fmt.Printf("X-CGI-Pid: %d\r\n", os.Getpid())
+	fmt.Printf("X-Test-Header: X-Test-Value\r\n")
+	fmt.Printf("\r\n")
+
+	if params.Get("writestderr") != "" {
+		fmt.Fprintf(os.Stderr, "Hello, stderr!\n")
+	}
+
+	if params.Get("bigresponse") != "" {
+		// 17 MB, for OS X: golang.org/issue/4958
+		line := strings.Repeat("A", 1024)
+		for i := 0; i < 17*1024; i++ {
+			fmt.Printf("%s\r\n", line)
+		}
+		return
+	}
+
+	fmt.Printf("test=Hello CGI\r\n")
+
+	keys := make([]string, 0, len(params))
+	for k := range params {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, key := range keys {
+		fmt.Printf("param-%s=%s\r\n", key, params.Get(key))
+	}
+
+	envs := envMap(os.Environ())
+	keys = make([]string, 0, len(envs))
+	for k := range envs {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, key := range keys {
+		fmt.Printf("env-%s=%s\r\n", key, envs[key])
+	}
+
+	cwd, _ := os.Getwd()
+	fmt.Printf("cwd=%s\r\n", cwd)
+}
+
+type neverEnding byte
+
+func (b neverEnding) Read(p []byte) (n int, err error) {
+	for i := range p {
+		p[i] = byte(b)
+	}
+	return len(p), nil
+}
+
+// childCGIProcess is used by integration_test to complete unit tests.
+func childCGIProcess() {
+	if os.Getenv("REQUEST_METHOD") == "" {
+		// Not in a CGI environment; skipping test.
+		return
+	}
+	switch os.Getenv("REQUEST_URI") {
+	case "/immediate-disconnect":
+		os.Exit(0)
+	case "/no-content-type":
+		fmt.Printf("Content-Length: 6\n\nHello\n")
+		os.Exit(0)
+	case "/empty-headers":
+		fmt.Printf("\nHello")
+		os.Exit(0)
+	}
+	Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+		if req.FormValue("nil-request-body") == "1" {
+			fmt.Fprintf(rw, "nil-request-body=%v\n", req.Body == nil)
+			return
+		}
+		rw.Header().Set("X-Test-Header", "X-Test-Value")
+		req.ParseForm()
+		if req.FormValue("no-body") == "1" {
+			return
+		}
+		if eb, ok := req.Form["exact-body"]; ok {
+			io.WriteString(rw, eb[0])
+			return
+		}
+		if req.FormValue("write-forever") == "1" {
+			io.Copy(rw, neverEnding('a'))
+			for {
+				time.Sleep(5 * time.Second) // hang forever, until killed
+			}
+		}
+		fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
+		for k, vv := range req.Form {
+			for _, v := range vv {
+				fmt.Fprintf(rw, "param-%s=%s\n", k, v)
+			}
+		}
+		for _, kv := range os.Environ() {
+			fmt.Fprintf(rw, "env-%s\n", kv)
+		}
+	}))
+	os.Exit(0)
+}
diff --git a/src/net/http/cgi/child.go b/src/net/http/cgi/child.go
index 1411f0b..e29fe20 100644
--- a/src/net/http/cgi/child.go
+++ b/src/net/http/cgi/child.go
@@ -46,7 +46,7 @@
 	return m
 }
 
-// RequestFromMap creates an http.Request from CGI variables.
+// RequestFromMap creates an [http.Request] from CGI variables.
 // The returned Request's Body field is not populated.
 func RequestFromMap(params map[string]string) (*http.Request, error) {
 	r := new(http.Request)
@@ -138,10 +138,10 @@
 	return r, nil
 }
 
-// Serve executes the provided Handler on the currently active CGI
+// Serve executes the provided [Handler] on the currently active CGI
 // request, if any. If there's no current CGI environment
 // an error is returned. The provided handler may be nil to use
-// http.DefaultServeMux.
+// [http.DefaultServeMux].
 func Serve(handler http.Handler) error {
 	req, err := Request()
 	if err != nil {
diff --git a/src/net/http/cgi/host.go b/src/net/http/cgi/host.go
index 073952a..ef222ab 100644
--- a/src/net/http/cgi/host.go
+++ b/src/net/http/cgi/host.go
@@ -115,23 +115,19 @@
 }
 
 func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
-	root := h.Root
-	if root == "" {
-		root = "/"
-	}
-
 	if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
 		rw.WriteHeader(http.StatusBadRequest)
 		rw.Write([]byte("Chunked request bodies are not supported by CGI."))
 		return
 	}
 
-	pathInfo := req.URL.Path
-	if root != "/" && strings.HasPrefix(pathInfo, root) {
-		pathInfo = pathInfo[len(root):]
-	}
+	root := strings.TrimRight(h.Root, "/")
+	pathInfo := strings.TrimPrefix(req.URL.Path, root)
 
 	port := "80"
+	if req.TLS != nil {
+		port = "443"
+	}
 	if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
 		port = matches[1]
 	}
diff --git a/src/net/http/cgi/host_test.go b/src/net/http/cgi/host_test.go
index 860e9b3..f29395f 100644
--- a/src/net/http/cgi/host_test.go
+++ b/src/net/http/cgi/host_test.go
@@ -9,21 +9,33 @@
 import (
 	"bufio"
 	"fmt"
+	"internal/testenv"
 	"io"
 	"net"
 	"net/http"
 	"net/http/httptest"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"reflect"
+	"regexp"
 	"runtime"
-	"strconv"
 	"strings"
 	"testing"
 	"time"
 )
 
+// TestMain executes the test binary as the cgi server if
+// SERVER_SOFTWARE is set, and runs the tests otherwise.
+func TestMain(m *testing.M) {
+	// SERVER_SOFTWARE swap variable is set when starting the cgi server.
+	if os.Getenv("SERVER_SOFTWARE") != "" {
+		cgiMain()
+		os.Exit(0)
+	}
+
+	os.Exit(m.Run())
+}
+
 func newRequest(httpreq string) *http.Request {
 	buf := bufio.NewReader(strings.NewReader(httpreq))
 	req, err := http.ReadRequest(buf)
@@ -88,24 +100,10 @@
 	}
 }
 
-var cgiTested, cgiWorks bool
-
-func check(t *testing.T) {
-	if !cgiTested {
-		cgiTested = true
-		cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil
-	}
-	if !cgiWorks {
-		// No Perl on Windows, needed by test.cgi
-		// TODO: make the child process be Go, not Perl.
-		t.Skip("Skipping test: test.cgi failed.")
-	}
-}
-
 func TestCGIBasicGet(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	expectedMap := map[string]string{
@@ -121,7 +119,7 @@
 		"env-REMOTE_PORT":       "1234",
 		"env-REQUEST_METHOD":    "GET",
 		"env-REQUEST_URI":       "/test.cgi?foo=bar&a=b",
-		"env-SCRIPT_FILENAME":   "testdata/test.cgi",
+		"env-SCRIPT_FILENAME":   os.Args[0],
 		"env-SCRIPT_NAME":       "/test.cgi",
 		"env-SERVER_NAME":       "example.com",
 		"env-SERVER_PORT":       "80",
@@ -138,9 +136,9 @@
 }
 
 func TestCGIEnvIPv6(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	expectedMap := map[string]string{
@@ -156,7 +154,7 @@
 		"env-REMOTE_PORT":       "12345",
 		"env-REQUEST_METHOD":    "GET",
 		"env-REQUEST_URI":       "/test.cgi?foo=bar&a=b",
-		"env-SCRIPT_FILENAME":   "testdata/test.cgi",
+		"env-SCRIPT_FILENAME":   os.Args[0],
 		"env-SCRIPT_NAME":       "/test.cgi",
 		"env-SERVER_NAME":       "example.com",
 		"env-SERVER_PORT":       "80",
@@ -171,27 +169,27 @@
 }
 
 func TestCGIBasicGetAbsPath(t *testing.T) {
-	check(t)
-	pwd, err := os.Getwd()
+	absPath, err := filepath.Abs(os.Args[0])
 	if err != nil {
-		t.Fatalf("getwd error: %v", err)
+		t.Fatal(err)
 	}
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: pwd + "/testdata/test.cgi",
+		Path: absPath,
 		Root: "/test.cgi",
 	}
 	expectedMap := map[string]string{
 		"env-REQUEST_URI":     "/test.cgi?foo=bar&a=b",
-		"env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
+		"env-SCRIPT_FILENAME": absPath,
 		"env-SCRIPT_NAME":     "/test.cgi",
 	}
 	runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
 }
 
 func TestPathInfo(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	expectedMap := map[string]string{
@@ -199,36 +197,36 @@
 		"env-PATH_INFO":       "/extrapath",
 		"env-QUERY_STRING":    "a=b",
 		"env-REQUEST_URI":     "/test.cgi/extrapath?a=b",
-		"env-SCRIPT_FILENAME": "testdata/test.cgi",
+		"env-SCRIPT_FILENAME": os.Args[0],
 		"env-SCRIPT_NAME":     "/test.cgi",
 	}
 	runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
 }
 
 func TestPathInfoDirRoot(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
-		Root: "/myscript/",
+		Path: os.Args[0],
+		Root: "/myscript//",
 	}
 	expectedMap := map[string]string{
-		"env-PATH_INFO":       "bar",
+		"env-PATH_INFO":       "/bar",
 		"env-QUERY_STRING":    "a=b",
 		"env-REQUEST_URI":     "/myscript/bar?a=b",
-		"env-SCRIPT_FILENAME": "testdata/test.cgi",
-		"env-SCRIPT_NAME":     "/myscript/",
+		"env-SCRIPT_FILENAME": os.Args[0],
+		"env-SCRIPT_NAME":     "/myscript",
 	}
 	runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
 }
 
 func TestDupHeaders(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 	}
 	expectedMap := map[string]string{
 		"env-REQUEST_URI":     "/myscript/bar?a=b",
-		"env-SCRIPT_FILENAME": "testdata/test.cgi",
+		"env-SCRIPT_FILENAME": os.Args[0],
 		"env-HTTP_COOKIE":     "nom=NOM; yum=YUM",
 		"env-HTTP_X_FOO":      "val1, val2",
 	}
@@ -245,13 +243,13 @@
 // Verify we don't set the HTTP_PROXY environment variable.
 // Hope nobody was depending on it. It's not a known header, though.
 func TestDropProxyHeader(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 	}
 	expectedMap := map[string]string{
 		"env-REQUEST_URI":     "/myscript/bar?a=b",
-		"env-SCRIPT_FILENAME": "testdata/test.cgi",
+		"env-SCRIPT_FILENAME": os.Args[0],
 		"env-HTTP_X_FOO":      "a",
 	}
 	runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
@@ -267,23 +265,23 @@
 }
 
 func TestPathInfoNoRoot(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "",
 	}
 	expectedMap := map[string]string{
 		"env-PATH_INFO":       "/bar",
 		"env-QUERY_STRING":    "a=b",
 		"env-REQUEST_URI":     "/bar?a=b",
-		"env-SCRIPT_FILENAME": "testdata/test.cgi",
-		"env-SCRIPT_NAME":     "/",
+		"env-SCRIPT_FILENAME": os.Args[0],
+		"env-SCRIPT_NAME":     "",
 	}
 	runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
 }
 
 func TestCGIBasicPost(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	postReq := `POST /test.cgi?a=b HTTP/1.0
 Host: example.com
 Content-Type: application/x-www-form-urlencoded
@@ -291,7 +289,7 @@
 
 postfoo=postbar`
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	expectedMap := map[string]string{
@@ -310,7 +308,7 @@
 
 // The CGI spec doesn't allow chunked requests.
 func TestCGIPostChunked(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	postReq := `POST /test.cgi?a=b HTTP/1.1
 Host: example.com
 Content-Type: application/x-www-form-urlencoded
@@ -319,7 +317,7 @@
 ` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
 
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	expectedMap := map[string]string{}
@@ -331,9 +329,9 @@
 }
 
 func TestRedirect(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil)
@@ -346,13 +344,13 @@
 }
 
 func TestInternalRedirect(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
 		fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path)
 		fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr)
 	})
 	h := &Handler{
-		Path:                "testdata/test.cgi",
+		Path:                os.Args[0],
 		Root:                "/test.cgi",
 		PathLocationHandler: baseHandler,
 	}
@@ -365,13 +363,14 @@
 
 // TestCopyError tests that we kill the process if there's an error copying
 // its output. (for example, from the client having gone away)
+//
+// If we fail to do so, the test will time out (and dump its goroutines) with a
+// call to [Handler.ServeHTTP] blocked on a deferred call to [exec.Cmd.Wait].
 func TestCopyError(t *testing.T) {
-	check(t)
-	if runtime.GOOS == "windows" {
-		t.Skipf("skipping test on %q", runtime.GOOS)
-	}
+	testenv.MustHaveExec(t)
+
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	ts := httptest.NewServer(h)
@@ -392,48 +391,50 @@
 		t.Fatalf("ReadResponse: %v", err)
 	}
 
-	pidstr := res.Header.Get("X-CGI-Pid")
-	if pidstr == "" {
-		t.Fatalf("expected an X-CGI-Pid header in response")
-	}
-	pid, err := strconv.Atoi(pidstr)
-	if err != nil {
-		t.Fatalf("invalid X-CGI-Pid value")
-	}
-
 	var buf [5000]byte
 	n, err := io.ReadFull(res.Body, buf[:])
 	if err != nil {
 		t.Fatalf("ReadFull: %d bytes, %v", n, err)
 	}
 
-	childRunning := func() bool {
-		return isProcessRunning(pid)
-	}
-
-	if !childRunning() {
-		t.Fatalf("pre-conn.Close, expected child to be running")
+	if !handlerRunning() {
+		t.Fatalf("pre-conn.Close, expected handler to still be running")
 	}
 	conn.Close()
+	closed := time.Now()
 
-	tries := 0
-	for tries < 25 && childRunning() {
-		time.Sleep(50 * time.Millisecond * time.Duration(tries))
-		tries++
-	}
-	if childRunning() {
-		t.Fatalf("post-conn.Close, expected child to be gone")
+	nextSleep := 1 * time.Millisecond
+	for {
+		time.Sleep(nextSleep)
+		nextSleep *= 2
+		if !handlerRunning() {
+			break
+		}
+		t.Logf("handler still running %v after conn.Close", time.Since(closed))
 	}
 }
 
-func TestDirUnix(t *testing.T) {
-	check(t)
-	if runtime.GOOS == "windows" {
-		t.Skipf("skipping test on %q", runtime.GOOS)
+// handlerRunning reports whether any goroutine is currently running
+// [Handler.ServeHTTP].
+func handlerRunning() bool {
+	r := regexp.MustCompile(`net/http/cgi\.\(\*Handler\)\.ServeHTTP`)
+	buf := make([]byte, 64<<10)
+	for {
+		n := runtime.Stack(buf, true)
+		if n < len(buf) {
+			return r.Match(buf[:n])
+		}
+		// Buffer wasn't large enough for a full goroutine dump.
+		// Resize it and try again.
+		buf = make([]byte, 2*len(buf))
 	}
+}
+
+func TestDir(t *testing.T) {
+	testenv.MustHaveExec(t)
 	cwd, _ := os.Getwd()
 	h := &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 		Dir:  cwd,
 	}
@@ -443,9 +444,9 @@
 	runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
 
 	cwd, _ = os.Getwd()
-	cwd = filepath.Join(cwd, "testdata")
+	cwd, _ = filepath.Split(os.Args[0])
 	h = &Handler{
-		Path: "testdata/test.cgi",
+		Path: os.Args[0],
 		Root: "/test.cgi",
 	}
 	expectedMap = map[string]string{
@@ -454,75 +455,15 @@
 	runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
 }
 
-func findPerl(t *testing.T) string {
-	t.Helper()
-	perl, err := exec.LookPath("perl")
-	if err != nil {
-		t.Skip("Skipping test: perl not found.")
-	}
-	perl, _ = filepath.Abs(perl)
-
-	cmd := exec.Command(perl, "-e", "print 123")
-	cmd.Env = []string{"PATH=/garbage"}
-	out, err := cmd.Output()
-	if err != nil || string(out) != "123" {
-		t.Skipf("Skipping test: %s is not functional", perl)
-	}
-	return perl
-}
-
-func TestDirWindows(t *testing.T) {
-	if runtime.GOOS != "windows" {
-		t.Skip("Skipping windows specific test.")
-	}
-
-	cgifile, _ := filepath.Abs("testdata/test.cgi")
-
-	perl := findPerl(t)
-
-	cwd, _ := os.Getwd()
-	h := &Handler{
-		Path: perl,
-		Root: "/test.cgi",
-		Dir:  cwd,
-		Args: []string{cgifile},
-		Env:  []string{"SCRIPT_FILENAME=" + cgifile},
-	}
-	expectedMap := map[string]string{
-		"cwd": cwd,
-	}
-	runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-
-	// If not specify Dir on windows, working directory should be
-	// base directory of perl.
-	cwd, _ = filepath.Split(perl)
-	if cwd != "" && cwd[len(cwd)-1] == filepath.Separator {
-		cwd = cwd[:len(cwd)-1]
-	}
-	h = &Handler{
-		Path: perl,
-		Root: "/test.cgi",
-		Args: []string{cgifile},
-		Env:  []string{"SCRIPT_FILENAME=" + cgifile},
-	}
-	expectedMap = map[string]string{
-		"cwd": cwd,
-	}
-	runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
-}
-
 func TestEnvOverride(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	cgifile, _ := filepath.Abs("testdata/test.cgi")
 
-	perl := findPerl(t)
-
 	cwd, _ := os.Getwd()
 	h := &Handler{
-		Path: perl,
+		Path: os.Args[0],
 		Root: "/test.cgi",
 		Dir:  cwd,
-		Args: []string{cgifile},
 		Env: []string{
 			"SCRIPT_FILENAME=" + cgifile,
 			"REQUEST_URI=/foo/bar",
@@ -538,10 +479,10 @@
 }
 
 func TestHandlerStderr(t *testing.T) {
-	check(t)
+	testenv.MustHaveExec(t)
 	var stderr strings.Builder
 	h := &Handler{
-		Path:   "testdata/test.cgi",
+		Path:   os.Args[0],
 		Root:   "/test.cgi",
 		Stderr: &stderr,
 	}
diff --git a/src/net/http/cgi/integration_test.go b/src/net/http/cgi/integration_test.go
index ef2eaf7..68f908e 100644
--- a/src/net/http/cgi/integration_test.go
+++ b/src/net/http/cgi/integration_test.go
@@ -20,7 +20,6 @@
 	"os"
 	"strings"
 	"testing"
-	"time"
 )
 
 // This test is a CGI host (testing host.go) that runs its own binary
@@ -31,7 +30,6 @@
 	h := &Handler{
 		Path: os.Args[0],
 		Root: "/test.go",
-		Args: []string{"-test.run=TestBeChildCGIProcess"},
 	}
 	expectedMap := map[string]string{
 		"test":                  "Hello CGI-in-CGI",
@@ -98,9 +96,8 @@
 	h := &Handler{
 		Path: os.Args[0],
 		Root: "/test.go",
-		Args: []string{"-test.run=TestBeChildCGIProcess"},
 	}
-	req, _ := http.NewRequest("GET", "http://example.com/test.cgi?write-forever=1", nil)
+	req, _ := http.NewRequest("GET", "http://example.com/test.go?write-forever=1", nil)
 	rec := httptest.NewRecorder()
 	var out bytes.Buffer
 	const writeLen = 50 << 10
@@ -120,7 +117,6 @@
 	h := &Handler{
 		Path: os.Args[0],
 		Root: "/test.go",
-		Args: []string{"-test.run=TestBeChildCGIProcess"},
 	}
 	expectedMap := map[string]string{
 		"_body": "",
@@ -139,7 +135,6 @@
 	h := &Handler{
 		Path: os.Args[0],
 		Root: "/test.go",
-		Args: []string{"-test.run=TestBeChildCGIProcess"},
 	}
 	expectedMap := map[string]string{
 		"nil-request-body": "false",
@@ -154,7 +149,6 @@
 	h := &Handler{
 		Path: os.Args[0],
 		Root: "/test.go",
-		Args: []string{"-test.run=TestBeChildCGIProcess"},
 	}
 	var tests = []struct {
 		name   string
@@ -202,7 +196,6 @@
 	h := &Handler{
 		Path: os.Args[0],
 		Root: "/test.go",
-		Args: []string{"-test.run=TestBeChildCGIProcess"},
 	}
 	expectedMap := map[string]string{
 		"_body": "",
@@ -212,61 +205,3 @@
 		t.Errorf("Got code %d; want 500", replay.Code)
 	}
 }
-
-type neverEnding byte
-
-func (b neverEnding) Read(p []byte) (n int, err error) {
-	for i := range p {
-		p[i] = byte(b)
-	}
-	return len(p), nil
-}
-
-// Note: not actually a test.
-func TestBeChildCGIProcess(t *testing.T) {
-	if os.Getenv("REQUEST_METHOD") == "" {
-		// Not in a CGI environment; skipping test.
-		return
-	}
-	switch os.Getenv("REQUEST_URI") {
-	case "/immediate-disconnect":
-		os.Exit(0)
-	case "/no-content-type":
-		fmt.Printf("Content-Length: 6\n\nHello\n")
-		os.Exit(0)
-	case "/empty-headers":
-		fmt.Printf("\nHello")
-		os.Exit(0)
-	}
-	Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
-		if req.FormValue("nil-request-body") == "1" {
-			fmt.Fprintf(rw, "nil-request-body=%v\n", req.Body == nil)
-			return
-		}
-		rw.Header().Set("X-Test-Header", "X-Test-Value")
-		req.ParseForm()
-		if req.FormValue("no-body") == "1" {
-			return
-		}
-		if eb, ok := req.Form["exact-body"]; ok {
-			io.WriteString(rw, eb[0])
-			return
-		}
-		if req.FormValue("write-forever") == "1" {
-			io.Copy(rw, neverEnding('a'))
-			for {
-				time.Sleep(5 * time.Second) // hang forever, until killed
-			}
-		}
-		fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
-		for k, vv := range req.Form {
-			for _, v := range vv {
-				fmt.Fprintf(rw, "param-%s=%s\n", k, v)
-			}
-		}
-		for _, kv := range os.Environ() {
-			fmt.Fprintf(rw, "env-%s\n", kv)
-		}
-	}))
-	os.Exit(0)
-}
diff --git a/src/net/http/cgi/plan9_test.go b/src/net/http/cgi/plan9_test.go
deleted file mode 100644
index b7ace3f..0000000
--- a/src/net/http/cgi/plan9_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build plan9
-
-package cgi
-
-import (
-	"os"
-	"strconv"
-)
-
-func isProcessRunning(pid int) bool {
-	_, err := os.Stat("/proc/" + strconv.Itoa(pid))
-	return err == nil
-}
diff --git a/src/net/http/cgi/posix_test.go b/src/net/http/cgi/posix_test.go
deleted file mode 100644
index 49b9470..0000000
--- a/src/net/http/cgi/posix_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !plan9
-
-package cgi
-
-import (
-	"os"
-	"syscall"
-)
-
-func isProcessRunning(pid int) bool {
-	p, err := os.FindProcess(pid)
-	if err != nil {
-		return false
-	}
-	return p.Signal(syscall.Signal(0)) == nil
-}
diff --git a/src/net/http/cgi/testdata/test.cgi b/src/net/http/cgi/testdata/test.cgi
deleted file mode 100755
index 667fce2..0000000
--- a/src/net/http/cgi/testdata/test.cgi
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/perl
-# Copyright 2011 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-#
-# Test script run as a child process under cgi_test.go
-
-use strict;
-use Cwd;
-
-binmode STDOUT;
-
-my $q = MiniCGI->new;
-my $params = $q->Vars;
-
-if ($params->{"loc"}) {
-    print "Location: $params->{loc}\r\n\r\n";
-    exit(0);
-}
-
-print "Content-Type: text/html\r\n";
-print "X-CGI-Pid: $$\r\n";
-print "X-Test-Header: X-Test-Value\r\n";
-print "\r\n";
-
-if ($params->{"writestderr"}) {
-    print STDERR "Hello, stderr!\n";
-}
-
-if ($params->{"bigresponse"}) {
-    # 17 MB, for OS X: golang.org/issue/4958
-    for (1..(17 * 1024)) {
-        print "A" x 1024, "\r\n";
-    }
-    exit 0;
-}
-
-print "test=Hello CGI\r\n";
-
-foreach my $k (sort keys %$params) {
-    print "param-$k=$params->{$k}\r\n";
-}
-
-foreach my $k (sort keys %ENV) {
-    my $clean_env = $ENV{$k};
-    $clean_env =~ s/[\n\r]//g;
-    print "env-$k=$clean_env\r\n";
-}
-
-# NOTE: msys perl returns /c/go/src/... not C:\go\....
-my $dir = getcwd();
-if ($^O eq 'MSWin32' || $^O eq 'msys' || $^O eq 'cygwin') {
-    if ($dir =~ /^.:/) {
-        $dir =~ s!/!\\!g;
-    } else {
-        my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe';
-        $cmd =~ s!\\!/!g;
-        $dir = `$cmd /c cd`;
-        chomp $dir;
-    }
-}
-print "cwd=$dir\r\n";
-
-# A minimal version of CGI.pm, for people without the perl-modules
-# package installed.  (CGI.pm used to be part of the Perl core, but
-# some distros now bundle perl-base and perl-modules separately...)
-package MiniCGI;
-
-sub new {
-    my $class = shift;
-    return bless {}, $class;
-}
-
-sub Vars {
-    my $self = shift;
-    my $pairs;
-    if ($ENV{CONTENT_LENGTH}) {
-        $pairs = do { local $/; <STDIN> };
-    } else {
-        $pairs = $ENV{QUERY_STRING};
-    }
-    my $vars = {};
-    foreach my $kv (split(/&/, $pairs)) {
-        my ($k, $v) = split(/=/, $kv, 2);
-        $vars->{_urldecode($k)} = _urldecode($v);
-    }
-    return $vars;
-}
-
-sub _urldecode {
-    my $v = shift;
-    $v =~ tr/+/ /;
-    $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
-    return $v;
-}
diff --git a/src/net/http/client.go b/src/net/http/client.go
index 2cab53a..8fc348f 100644
--- a/src/net/http/client.go
+++ b/src/net/http/client.go
@@ -27,34 +27,33 @@
 	"time"
 )
 
-// A Client is an HTTP client. Its zero value (DefaultClient) is a
-// usable client that uses DefaultTransport.
+// A Client is an HTTP client. Its zero value ([DefaultClient]) is a
+// usable client that uses [DefaultTransport].
 //
-// The Client's Transport typically has internal state (cached TCP
+// The [Client.Transport] typically has internal state (cached TCP
 // connections), so Clients should be reused instead of created as
 // needed. Clients are safe for concurrent use by multiple goroutines.
 //
-// A Client is higher-level than a RoundTripper (such as Transport)
+// A Client is higher-level than a [RoundTripper] (such as [Transport])
 // and additionally handles HTTP details such as cookies and
 // redirects.
 //
 // When following redirects, the Client will forward all headers set on the
-// initial Request except:
+// initial [Request] except:
 //
-// • when forwarding sensitive headers like "Authorization",
-// "WWW-Authenticate", and "Cookie" to untrusted targets.
-// These headers will be ignored when following a redirect to a domain
-// that is not a subdomain match or exact match of the initial domain.
-// For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
-// will forward the sensitive headers, but a redirect to "bar.com" will not.
-//
-// • when forwarding the "Cookie" header with a non-nil cookie Jar.
-// Since each redirect may mutate the state of the cookie jar,
-// a redirect may possibly alter a cookie set in the initial request.
-// When forwarding the "Cookie" header, any mutated cookies will be omitted,
-// with the expectation that the Jar will insert those mutated cookies
-// with the updated values (assuming the origin matches).
-// If Jar is nil, the initial cookies are forwarded without change.
+//   - when forwarding sensitive headers like "Authorization",
+//     "WWW-Authenticate", and "Cookie" to untrusted targets.
+//     These headers will be ignored when following a redirect to a domain
+//     that is not a subdomain match or exact match of the initial domain.
+//     For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
+//     will forward the sensitive headers, but a redirect to "bar.com" will not.
+//   - when forwarding the "Cookie" header with a non-nil cookie Jar.
+//     Since each redirect may mutate the state of the cookie jar,
+//     a redirect may possibly alter a cookie set in the initial request.
+//     When forwarding the "Cookie" header, any mutated cookies will be omitted,
+//     with the expectation that the Jar will insert those mutated cookies
+//     with the updated values (assuming the origin matches).
+//     If Jar is nil, the initial cookies are forwarded without change.
 type Client struct {
 	// Transport specifies the mechanism by which individual
 	// HTTP requests are made.
@@ -106,11 +105,11 @@
 	Timeout time.Duration
 }
 
-// DefaultClient is the default Client and is used by Get, Head, and Post.
+// DefaultClient is the default [Client] and is used by [Get], [Head], and [Post].
 var DefaultClient = &Client{}
 
 // RoundTripper is an interface representing the ability to execute a
-// single HTTP transaction, obtaining the Response for a given Request.
+// single HTTP transaction, obtaining the [Response] for a given [Request].
 //
 // A RoundTripper must be safe for concurrent use by multiple
 // goroutines.
@@ -440,7 +439,7 @@
 //
 // An error is returned if there were too many redirects or if there
 // was an HTTP protocol error. A non-2xx response doesn't cause an
-// error. Any returned error will be of type *url.Error. The url.Error
+// error. Any returned error will be of type [*url.Error]. The url.Error
 // value's Timeout method will report true if the request timed out.
 //
 // When err is nil, resp always contains a non-nil resp.Body.
@@ -448,10 +447,10 @@
 //
 // Get is a wrapper around DefaultClient.Get.
 //
-// To make a request with custom headers, use NewRequest and
+// To make a request with custom headers, use [NewRequest] and
 // DefaultClient.Do.
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
+// To make a request with a specified context.Context, use [NewRequestWithContext]
 // and DefaultClient.Do.
 func Get(url string) (resp *Response, err error) {
 	return DefaultClient.Get(url)
@@ -459,7 +458,7 @@
 
 // Get issues a GET to the specified URL. If the response is one of the
 // following redirect codes, Get follows the redirect after calling the
-// Client's CheckRedirect function:
+// [Client.CheckRedirect] function:
 //
 //	301 (Moved Permanently)
 //	302 (Found)
@@ -467,18 +466,18 @@
 //	307 (Temporary Redirect)
 //	308 (Permanent Redirect)
 //
-// An error is returned if the Client's CheckRedirect function fails
+// An error is returned if the [Client.CheckRedirect] function fails
 // or if there was an HTTP protocol error. A non-2xx response doesn't
-// cause an error. Any returned error will be of type *url.Error. The
+// cause an error. Any returned error will be of type [*url.Error]. The
 // url.Error value's Timeout method will report true if the request
 // timed out.
 //
 // When err is nil, resp always contains a non-nil resp.Body.
 // Caller should close resp.Body when done reading from it.
 //
-// To make a request with custom headers, use NewRequest and Client.Do.
+// To make a request with custom headers, use [NewRequest] and [Client.Do].
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
+// To make a request with a specified context.Context, use [NewRequestWithContext]
 // and Client.Do.
 func (c *Client) Get(url string) (resp *Response, err error) {
 	req, err := NewRequest("GET", url, nil)
@@ -559,20 +558,21 @@
 // connectivity problem). A non-2xx status code doesn't cause an
 // error.
 //
-// If the returned error is nil, the Response will contain a non-nil
+// If the returned error is nil, the [Response] will contain a non-nil
 // Body which the user is expected to close. If the Body is not both
-// read to EOF and closed, the Client's underlying RoundTripper
-// (typically Transport) may not be able to re-use a persistent TCP
+// read to EOF and closed, the [Client]'s underlying [RoundTripper]
+// (typically [Transport]) may not be able to re-use a persistent TCP
 // connection to the server for a subsequent "keep-alive" request.
 //
 // The request Body, if non-nil, will be closed by the underlying
-// Transport, even on errors.
+// Transport, even on errors. The Body may be closed asynchronously after
+// Do returns.
 //
 // On error, any Response can be ignored. A non-nil Response with a
 // non-nil error only occurs when CheckRedirect fails, and even then
-// the returned Response.Body is already closed.
+// the returned [Response.Body] is already closed.
 //
-// Generally Get, Post, or PostForm will be used instead of Do.
+// Generally [Get], [Post], or [PostForm] will be used instead of Do.
 //
 // If the server replies with a redirect, the Client first uses the
 // CheckRedirect function to determine whether the redirect should be
@@ -580,11 +580,11 @@
 // subsequent requests to use HTTP method GET
 // (or HEAD if the original request was HEAD), with no body.
 // A 307 or 308 redirect preserves the original HTTP method and body,
-// provided that the Request.GetBody function is defined.
-// The NewRequest function automatically sets GetBody for common
+// provided that the [Request.GetBody] function is defined.
+// The [NewRequest] function automatically sets GetBody for common
 // standard library body types.
 //
-// Any returned error will be of type *url.Error. The url.Error
+// Any returned error will be of type [*url.Error]. The url.Error
 // value's Timeout method will report true if the request timed out.
 func (c *Client) Do(req *Request) (*Response, error) {
 	return c.do(req)
@@ -818,17 +818,17 @@
 //
 // Caller should close resp.Body when done reading from it.
 //
-// If the provided body is an io.Closer, it is closed after the
+// If the provided body is an [io.Closer], it is closed after the
 // request.
 //
 // Post is a wrapper around DefaultClient.Post.
 //
-// To set custom headers, use NewRequest and DefaultClient.Do.
+// To set custom headers, use [NewRequest] and DefaultClient.Do.
 //
-// See the Client.Do method documentation for details on how redirects
+// See the [Client.Do] method documentation for details on how redirects
 // are handled.
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
+// To make a request with a specified context.Context, use [NewRequestWithContext]
 // and DefaultClient.Do.
 func Post(url, contentType string, body io.Reader) (resp *Response, err error) {
 	return DefaultClient.Post(url, contentType, body)
@@ -838,13 +838,13 @@
 //
 // Caller should close resp.Body when done reading from it.
 //
-// If the provided body is an io.Closer, it is closed after the
+// If the provided body is an [io.Closer], it is closed after the
 // request.
 //
-// To set custom headers, use NewRequest and Client.Do.
+// To set custom headers, use [NewRequest] and [Client.Do].
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and Client.Do.
+// To make a request with a specified context.Context, use [NewRequestWithContext]
+// and [Client.Do].
 //
 // See the Client.Do method documentation for details on how redirects
 // are handled.
@@ -861,17 +861,17 @@
 // values URL-encoded as the request body.
 //
 // The Content-Type header is set to application/x-www-form-urlencoded.
-// To set other headers, use NewRequest and DefaultClient.Do.
+// To set other headers, use [NewRequest] and DefaultClient.Do.
 //
 // When err is nil, resp always contains a non-nil resp.Body.
 // Caller should close resp.Body when done reading from it.
 //
 // PostForm is a wrapper around DefaultClient.PostForm.
 //
-// See the Client.Do method documentation for details on how redirects
+// See the [Client.Do] method documentation for details on how redirects
 // are handled.
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
+// To make a request with a specified [context.Context], use [NewRequestWithContext]
 // and DefaultClient.Do.
 func PostForm(url string, data url.Values) (resp *Response, err error) {
 	return DefaultClient.PostForm(url, data)
@@ -881,7 +881,7 @@
 // with data's keys and values URL-encoded as the request body.
 //
 // The Content-Type header is set to application/x-www-form-urlencoded.
-// To set other headers, use NewRequest and Client.Do.
+// To set other headers, use [NewRequest] and [Client.Do].
 //
 // When err is nil, resp always contains a non-nil resp.Body.
 // Caller should close resp.Body when done reading from it.
@@ -889,7 +889,7 @@
 // See the Client.Do method documentation for details on how redirects
 // are handled.
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
+// To make a request with a specified context.Context, use [NewRequestWithContext]
 // and Client.Do.
 func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) {
 	return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
@@ -907,7 +907,7 @@
 //
 // Head is a wrapper around DefaultClient.Head.
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
+// To make a request with a specified [context.Context], use [NewRequestWithContext]
 // and DefaultClient.Do.
 func Head(url string) (resp *Response, err error) {
 	return DefaultClient.Head(url)
@@ -915,7 +915,7 @@
 
 // Head issues a HEAD to the specified URL. If the response is one of the
 // following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function:
+// [Client.CheckRedirect] function:
 //
 //	301 (Moved Permanently)
 //	302 (Found)
@@ -923,8 +923,8 @@
 //	307 (Temporary Redirect)
 //	308 (Permanent Redirect)
 //
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and Client.Do.
+// To make a request with a specified [context.Context], use [NewRequestWithContext]
+// and [Client.Do].
 func (c *Client) Head(url string) (resp *Response, err error) {
 	req, err := NewRequest("HEAD", url, nil)
 	if err != nil {
@@ -933,12 +933,12 @@
 	return c.Do(req)
 }
 
-// CloseIdleConnections closes any connections on its Transport which
+// CloseIdleConnections closes any connections on its [Transport] which
 // were previously connected from previous requests but are now
 // sitting idle in a "keep-alive" state. It does not interrupt any
 // connections currently in use.
 //
-// If the Client's Transport does not have a CloseIdleConnections method
+// If [Client.Transport] does not have a [Client.CloseIdleConnections] method
 // then this method does nothing.
 func (c *Client) CloseIdleConnections() {
 	type closeIdler interface {
@@ -1014,6 +1014,12 @@
 	if sub == parent {
 		return true
 	}
+	// If sub contains a :, it's probably an IPv6 address (and is definitely not a hostname).
+	// Don't check the suffix in this case, to avoid matching the contents of a IPv6 zone.
+	// For example, "::1%.www.example.com" is not a subdomain of "www.example.com".
+	if strings.ContainsAny(sub, ":%") {
+		return false
+	}
 	// If sub is "foo.example.com" and parent is "example.com",
 	// that means sub must end in "."+parent.
 	// Do it without allocating.
diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go
index 0fe555a..e2a1cbb 100644
--- a/src/net/http/client_test.go
+++ b/src/net/http/client_test.go
@@ -60,13 +60,6 @@
 	}
 }
 
-type chanWriter chan string
-
-func (w chanWriter) Write(p []byte) (n int, err error) {
-	w <- string(p)
-	return len(p), nil
-}
-
 func TestClient(t *testing.T) { run(t, testClient) }
 func testClient(t *testing.T, mode testMode) {
 	ts := newClientServerTest(t, mode, robotsTxtHandler).ts
@@ -827,12 +820,12 @@
 	run(t, testClientInsecureTransport, []testMode{https1Mode, http2Mode})
 }
 func testClientInsecureTransport(t *testing.T, mode testMode) {
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+	cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
 		w.Write([]byte("Hello"))
-	})).ts
-	errc := make(chanWriter, 10) // but only expecting 1
-	ts.Config.ErrorLog = log.New(errc, "", 0)
-	defer ts.Close()
+	}))
+	ts := cst.ts
+	errLog := new(strings.Builder)
+	ts.Config.ErrorLog = log.New(errLog, "", 0)
 
 	// TODO(bradfitz): add tests for skipping hostname checks too?
 	// would require a new cert for testing, and probably
@@ -851,15 +844,10 @@
 		}
 	}
 
-	select {
-	case v := <-errc:
-		if !strings.Contains(v, "TLS handshake error") {
-			t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", v)
-		}
-	case <-time.After(5 * time.Second):
-		t.Errorf("timeout waiting for logged error")
+	cst.close()
+	if !strings.Contains(errLog.String(), "TLS handshake error") {
+		t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", errLog)
 	}
-
 }
 
 func TestClientErrorWithRequestURI(t *testing.T) {
@@ -897,9 +885,10 @@
 	run(t, testClientWithIncorrectTLSServerName, []testMode{https1Mode, http2Mode})
 }
 func testClientWithIncorrectTLSServerName(t *testing.T, mode testMode) {
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {})).ts
-	errc := make(chanWriter, 10) // but only expecting 1
-	ts.Config.ErrorLog = log.New(errc, "", 0)
+	cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {}))
+	ts := cst.ts
+	errLog := new(strings.Builder)
+	ts.Config.ErrorLog = log.New(errLog, "", 0)
 
 	c := ts.Client()
 	c.Transport.(*Transport).TLSClientConfig.ServerName = "badserver"
@@ -910,13 +899,10 @@
 	if !strings.Contains(err.Error(), "127.0.0.1") || !strings.Contains(err.Error(), "badserver") {
 		t.Errorf("wanted error mentioning 127.0.0.1 and badserver; got error: %v", err)
 	}
-	select {
-	case v := <-errc:
-		if !strings.Contains(v, "TLS handshake error") {
-			t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", v)
-		}
-	case <-time.After(5 * time.Second):
-		t.Errorf("timeout waiting for logged error")
+
+	cst.close()
+	if !strings.Contains(errLog.String(), "TLS handshake error") {
+		t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", errLog)
 	}
 }
 
@@ -960,7 +946,7 @@
 
 	c := ts.Client()
 	tr := c.Transport.(*Transport)
-	tr.TLSClientConfig.CipherSuites = []uint16{tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA}
+	tr.TLSClientConfig.CipherSuites = []uint16{tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA}
 	tr.TLSClientConfig.MaxVersion = tls.VersionTLS12 // to get to pick the cipher suite
 	tr.Dial = func(netw, addr string) (net.Conn, error) {
 		return net.Dial(netw, ts.Listener.Addr().String())
@@ -973,7 +959,7 @@
 	if res.TLS == nil {
 		t.Fatal("Response didn't set TLS Connection State.")
 	}
-	if got, want := res.TLS.CipherSuite, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA; got != want {
+	if got, want := res.TLS.CipherSuite, tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA; got != want {
 		t.Errorf("TLS Cipher Suite = %d; want %d", got, want)
 	}
 }
@@ -1725,6 +1711,7 @@
 		{"authorization", "http://foo.com/", "https://foo.com/", true},
 		{"authorization", "http://foo.com:1234/", "http://foo.com:4321/", true},
 		{"www-authenticate", "http://foo.com/", "http://bar.com/", false},
+		{"authorization", "http://foo.com/", "http://[::1%25.foo.com]/", false},
 
 		// But subdomains should work:
 		{"www-authenticate", "http://foo.com/", "http://foo.com/", true},
diff --git a/src/net/http/clientserver_test.go b/src/net/http/clientserver_test.go
index 5832153..32948f3 100644
--- a/src/net/http/clientserver_test.go
+++ b/src/net/http/clientserver_test.go
@@ -1172,16 +1172,12 @@
 			t.Fatal(err)
 		}
 	})()
-	timeout := time.NewTimer(5 * time.Second)
-	defer timeout.Stop()
 	for {
 		select {
 		case <-didGC:
 			return
-		case <-time.After(100 * time.Millisecond):
+		case <-time.After(1 * time.Millisecond):
 			runtime.GC()
-		case <-timeout.C:
-			t.Fatal("never saw GC of request")
 		}
 	}
 }
diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go
index 912fde6..c22897f 100644
--- a/src/net/http/cookie.go
+++ b/src/net/http/cookie.go
@@ -163,7 +163,7 @@
 	return cookies
 }
 
-// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
+// SetCookie adds a Set-Cookie header to the provided [ResponseWriter]'s headers.
 // The provided cookie must have a valid Name. Invalid cookies may be
 // silently dropped.
 func SetCookie(w ResponseWriter, cookie *Cookie) {
@@ -172,7 +172,7 @@
 	}
 }
 
-// String returns the serialization of the cookie for use in a Cookie
+// String returns the serialization of the cookie for use in a [Cookie]
 // header (if only Name and Value are set) or a Set-Cookie response
 // header (if other fields are set).
 // If c is nil or c.Name is invalid, the empty string is returned.
diff --git a/src/net/http/cookiejar/jar.go b/src/net/http/cookiejar/jar.go
index 273b54c..e7f5ddd 100644
--- a/src/net/http/cookiejar/jar.go
+++ b/src/net/http/cookiejar/jar.go
@@ -73,7 +73,7 @@
 	nextSeqNum uint64
 }
 
-// New returns a new cookie jar. A nil *Options is equivalent to a zero
+// New returns a new cookie jar. A nil [*Options] is equivalent to a zero
 // Options.
 func New(o *Options) (*Jar, error) {
 	jar := &Jar{
@@ -151,7 +151,7 @@
 	return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix
 }
 
-// Cookies implements the Cookies method of the http.CookieJar interface.
+// Cookies implements the Cookies method of the [http.CookieJar] interface.
 //
 // It returns an empty slice if the URL's scheme is not HTTP or HTTPS.
 func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) {
@@ -226,7 +226,7 @@
 	return cookies
 }
 
-// SetCookies implements the SetCookies method of the http.CookieJar interface.
+// SetCookies implements the SetCookies method of the [http.CookieJar] interface.
 //
 // It does nothing if the URL's scheme is not HTTP or HTTPS.
 func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) {
@@ -362,6 +362,13 @@
 
 // isIP reports whether host is an IP address.
 func isIP(host string) bool {
+	if strings.ContainsAny(host, ":%") {
+		// Probable IPv6 address.
+		// Hostnames can't contain : or %, so this is definitely not a valid host.
+		// Treating it as an IP is the more conservative option, and avoids the risk
+		// of interpeting ::1%.www.example.com as a subtomain of www.example.com.
+		return true
+	}
 	return net.ParseIP(host) != nil
 }
 
@@ -440,7 +447,6 @@
 var (
 	errIllegalDomain   = errors.New("cookiejar: illegal cookie domain attribute")
 	errMalformedDomain = errors.New("cookiejar: malformed cookie domain attribute")
-	errNoHostname      = errors.New("cookiejar: no host name available (IP only)")
 )
 
 // endOfTime is the time when session (non-persistent) cookies expire.
diff --git a/src/net/http/cookiejar/jar_test.go b/src/net/http/cookiejar/jar_test.go
index 56d0695..251f7c1 100644
--- a/src/net/http/cookiejar/jar_test.go
+++ b/src/net/http/cookiejar/jar_test.go
@@ -252,6 +252,7 @@
 	"127.0.0.1":            true,
 	"1.2.3.4":              true,
 	"2001:4860:0:2001::68": true,
+	"::1%zone":             true,
 	"example.com":          false,
 	"1.1.1.300":            false,
 	"www.foo.bar.net":      false,
@@ -629,6 +630,15 @@
 			{"http://www.host.test:1234/", "a=1"},
 		},
 	},
+	{
+		"IPv6 zone is not treated as a host.",
+		"https://example.com/",
+		[]string{"a=1"},
+		"a=1",
+		[]query{
+			{"https://[::1%25.example.com]:80/", ""},
+		},
+	},
 }
 
 func TestBasics(t *testing.T) {
diff --git a/src/net/http/doc.go b/src/net/http/doc.go
index d9e6aaf..f7ad3ae 100644
--- a/src/net/http/doc.go
+++ b/src/net/http/doc.go
@@ -5,7 +5,7 @@
 /*
 Package http provides HTTP client and server implementations.
 
-Get, Head, Post, and PostForm make HTTP (or HTTPS) requests:
+[Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests:
 
 	resp, err := http.Get("http://example.com/")
 	...
@@ -27,7 +27,7 @@
 # Clients and Transports
 
 For control over HTTP client headers, redirect policy, and other
-settings, create a Client:
+settings, create a [Client]:
 
 	client := &http.Client{
 		CheckRedirect: redirectPolicyFunc,
@@ -43,7 +43,7 @@
 	// ...
 
 For control over proxies, TLS configuration, keep-alives,
-compression, and other settings, create a Transport:
+compression, and other settings, create a [Transport]:
 
 	tr := &http.Transport{
 		MaxIdleConns:       10,
@@ -59,8 +59,8 @@
 # Servers
 
 ListenAndServe starts an HTTP server with a given address and handler.
-The handler is usually nil, which means to use DefaultServeMux.
-Handle and HandleFunc add handlers to DefaultServeMux:
+The handler is usually nil, which means to use [DefaultServeMux].
+[Handle] and [HandleFunc] add handlers to [DefaultServeMux]:
 
 	http.Handle("/foo", fooHandler)
 
@@ -86,8 +86,8 @@
 
 Starting with Go 1.6, the http package has transparent support for the
 HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
-can do so by setting Transport.TLSNextProto (for clients) or
-Server.TLSNextProto (for servers) to a non-nil, empty
+can do so by setting [Transport.TLSNextProto] (for clients) or
+[Server.TLSNextProto] (for servers) to a non-nil, empty
 map. Alternatively, the following GODEBUG settings are
 currently supported:
 
@@ -98,7 +98,7 @@
 
 Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug
 
-The http package's Transport and Server both automatically enable
+The http package's [Transport] and [Server] both automatically enable
 HTTP/2 support for simple configurations. To enable HTTP/2 for more
 complex configurations, to use lower-level HTTP/2 features, or to use
 a newer version of Go's http2 package, import "golang.org/x/net/http2"
diff --git a/src/net/http/export_test.go b/src/net/http/export_test.go
index 5d198f3..7e6d3d8 100644
--- a/src/net/http/export_test.go
+++ b/src/net/http/export_test.go
@@ -315,3 +315,21 @@
 	}
 	return nil, false
 }
+
+func init() {
+	// Set the default rstAvoidanceDelay to the minimum possible value to shake
+	// out tests that unexpectedly depend on it. Such tests should use
+	// runTimeSensitiveTest and SetRSTAvoidanceDelay to explicitly raise the delay
+	// if needed.
+	rstAvoidanceDelay = 1 * time.Nanosecond
+}
+
+// SetRSTAvoidanceDelay sets how long we are willing to wait between calling
+// CloseWrite on a connection and fully closing the connection.
+func SetRSTAvoidanceDelay(t *testing.T, d time.Duration) {
+	prevDelay := rstAvoidanceDelay
+	t.Cleanup(func() {
+		rstAvoidanceDelay = prevDelay
+	})
+	rstAvoidanceDelay = d
+}
diff --git a/src/net/http/fcgi/child.go b/src/net/http/fcgi/child.go
index dc82bf7..7665e7d 100644
--- a/src/net/http/fcgi/child.go
+++ b/src/net/http/fcgi/child.go
@@ -335,7 +335,7 @@
 // goroutine for each. The goroutine reads requests and then calls handler
 // to reply to them.
 // If l is nil, Serve accepts connections from os.Stdin.
-// If handler is nil, http.DefaultServeMux is used.
+// If handler is nil, [http.DefaultServeMux] is used.
 func Serve(l net.Listener, handler http.Handler) error {
 	if l == nil {
 		var err error
diff --git a/src/net/http/filetransport.go b/src/net/http/filetransport.go
index 94684b0..7384b22 100644
--- a/src/net/http/filetransport.go
+++ b/src/net/http/filetransport.go
@@ -7,6 +7,7 @@
 import (
 	"fmt"
 	"io"
+	"io/fs"
 )
 
 // fileTransport implements RoundTripper for the 'file' protocol.
@@ -14,13 +15,13 @@
 	fh fileHandler
 }
 
-// NewFileTransport returns a new RoundTripper, serving the provided
-// FileSystem. The returned RoundTripper ignores the URL host in its
+// NewFileTransport returns a new [RoundTripper], serving the provided
+// [FileSystem]. The returned RoundTripper ignores the URL host in its
 // incoming requests, as well as most other properties of the
 // request.
 //
 // The typical use case for NewFileTransport is to register the "file"
-// protocol with a Transport, as in:
+// protocol with a [Transport], as in:
 //
 //	t := &http.Transport{}
 //	t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
@@ -31,6 +32,24 @@
 	return fileTransport{fileHandler{fs}}
 }
 
+// NewFileTransportFS returns a new [RoundTripper], serving the provided
+// file system fsys. The returned RoundTripper ignores the URL host in its
+// incoming requests, as well as most other properties of the
+// request.
+//
+// The typical use case for NewFileTransportFS is to register the "file"
+// protocol with a [Transport], as in:
+//
+//	fsys := os.DirFS("/")
+//	t := &http.Transport{}
+//	t.RegisterProtocol("file", http.NewFileTransportFS(fsys))
+//	c := &http.Client{Transport: t}
+//	res, err := c.Get("file:///etc/passwd")
+//	...
+func NewFileTransportFS(fsys fs.FS) RoundTripper {
+	return NewFileTransport(FS(fsys))
+}
+
 func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
 	// We start ServeHTTP in a goroutine, which may take a long
 	// time if the file is large. The newPopulateResponseWriter
diff --git a/src/net/http/filetransport_test.go b/src/net/http/filetransport_test.go
index 77fc8ee..b3e3301 100644
--- a/src/net/http/filetransport_test.go
+++ b/src/net/http/filetransport_test.go
@@ -9,6 +9,7 @@
 	"os"
 	"path/filepath"
 	"testing"
+	"testing/fstest"
 )
 
 func checker(t *testing.T) func(string, error) {
@@ -62,3 +63,44 @@
 	}
 	res.Body.Close()
 }
+
+func TestFileTransportFS(t *testing.T) {
+	check := checker(t)
+
+	fsys := fstest.MapFS{
+		"index.html": {Data: []byte("index.html says hello")},
+	}
+
+	tr := &Transport{}
+	tr.RegisterProtocol("file", NewFileTransportFS(fsys))
+	c := &Client{Transport: tr}
+
+	for fname, mfile := range fsys {
+		urlstr := "file:///" + fname
+		res, err := c.Get(urlstr)
+		check("Get "+urlstr, err)
+		if res.StatusCode != 200 {
+			t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode)
+		}
+		if res.ContentLength != -1 {
+			t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength)
+		}
+		if res.Body == nil {
+			t.Fatalf("for %s, nil Body", urlstr)
+		}
+		slurp, err := io.ReadAll(res.Body)
+		res.Body.Close()
+		check("ReadAll "+urlstr, err)
+		if string(slurp) != string(mfile.Data) {
+			t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
+		}
+	}
+
+	const badURL = "file://../no-exist.txt"
+	res, err := c.Get(badURL)
+	check("Get "+badURL, err)
+	if res.StatusCode != 404 {
+		t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode)
+	}
+	res.Body.Close()
+}
diff --git a/src/net/http/fs.go b/src/net/http/fs.go
index 41e0b43..af7511a 100644
--- a/src/net/http/fs.go
+++ b/src/net/http/fs.go
@@ -25,12 +25,12 @@
 	"time"
 )
 
-// A Dir implements FileSystem using the native file system restricted to a
+// A Dir implements [FileSystem] using the native file system restricted to a
 // specific directory tree.
 //
-// While the FileSystem.Open method takes '/'-separated paths, a Dir's string
+// While the [FileSystem.Open] method takes '/'-separated paths, a Dir's string
 // value is a filename on the native file system, not a URL, so it is separated
-// by filepath.Separator, which isn't necessarily '/'.
+// by [filepath.Separator], which isn't necessarily '/'.
 //
 // Note that Dir could expose sensitive files and directories. Dir will follow
 // symlinks pointing out of the directory tree, which can be especially dangerous
@@ -67,7 +67,7 @@
 	return originalErr
 }
 
-// Open implements FileSystem using os.Open, opening files for reading rooted
+// Open implements [FileSystem] using [os.Open], opening files for reading rooted
 // and relative to the directory d.
 func (d Dir) Open(name string) (File, error) {
 	path, err := safefilepath.FromFS(path.Clean("/" + name))
@@ -89,18 +89,18 @@
 // A FileSystem implements access to a collection of named files.
 // The elements in a file path are separated by slash ('/', U+002F)
 // characters, regardless of host operating system convention.
-// See the FileServer function to convert a FileSystem to a Handler.
+// See the [FileServer] function to convert a FileSystem to a [Handler].
 //
-// This interface predates the fs.FS interface, which can be used instead:
-// the FS adapter function converts an fs.FS to a FileSystem.
+// This interface predates the [fs.FS] interface, which can be used instead:
+// the [FS] adapter function converts an fs.FS to a FileSystem.
 type FileSystem interface {
 	Open(name string) (File, error)
 }
 
-// A File is returned by a FileSystem's Open method and can be
-// served by the FileServer implementation.
+// A File is returned by a [FileSystem]'s Open method and can be
+// served by the [FileServer] implementation.
 //
-// The methods should behave the same as those on an *os.File.
+// The methods should behave the same as those on an [*os.File].
 type File interface {
 	io.Closer
 	io.Reader
@@ -167,7 +167,7 @@
 }
 
 // ServeContent replies to the request using the content in the
-// provided ReadSeeker. The main benefit of ServeContent over io.Copy
+// provided ReadSeeker. The main benefit of ServeContent over [io.Copy]
 // is that it handles Range requests properly, sets the MIME type, and
 // handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since,
 // and If-Range requests.
@@ -175,7 +175,7 @@
 // If the response's Content-Type header is not set, ServeContent
 // first tries to deduce the type from name's file extension and,
 // if that fails, falls back to reading the first block of the content
-// and passing it to DetectContentType.
+// and passing it to [DetectContentType].
 // The name is otherwise unused; in particular it can be empty and is
 // never sent in the response.
 //
@@ -190,7 +190,7 @@
 // If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
 // ServeContent uses it to handle requests using If-Match, If-None-Match, or If-Range.
 //
-// Note that *os.File implements the io.ReadSeeker interface.
+// Note that [*os.File] implements the [io.ReadSeeker] interface.
 func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
 	sizeFunc := func() (int64, error) {
 		size, err := content.Seek(0, io.SeekEnd)
@@ -343,10 +343,35 @@
 	}
 
 	w.Header().Set("Accept-Ranges", "bytes")
-	if w.Header().Get("Content-Encoding") == "" {
+
+	// We should be able to unconditionally set the Content-Length here.
+	//
+	// However, there is a pattern observed in the wild that this breaks:
+	// The user wraps the ResponseWriter in one which gzips data written to it,
+	// and sets "Content-Encoding: gzip".
+	//
+	// The user shouldn't be doing this; the serveContent path here depends
+	// on serving seekable data with a known length. If you want to compress
+	// on the fly, then you shouldn't be using ServeFile/ServeContent, or
+	// you should compress the entire file up-front and provide a seekable
+	// view of the compressed data.
+	//
+	// However, since we've observed this pattern in the wild, and since
+	// setting Content-Length here breaks code that mostly-works today,
+	// skip setting Content-Length if the user set Content-Encoding.
+	//
+	// If this is a range request, always set Content-Length.
+	// If the user isn't changing the bytes sent in the ResponseWrite,
+	// the Content-Length will be correct.
+	// If the user is changing the bytes sent, then the range request wasn't
+	// going to work properly anyway and we aren't worse off.
+	//
+	// A possible future improvement on this might be to look at the type
+	// of the ResponseWriter, and always set Content-Length if it's one
+	// that we recognize.
+	if len(ranges) > 0 || w.Header().Get("Content-Encoding") == "" {
 		w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
 	}
-
 	w.WriteHeader(code)
 
 	if r.Method != "HEAD" {
@@ -716,13 +741,13 @@
 //
 // As a precaution, ServeFile will reject requests where r.URL.Path
 // contains a ".." path element; this protects against callers who
-// might unsafely use filepath.Join on r.URL.Path without sanitizing
+// might unsafely use [filepath.Join] on r.URL.Path without sanitizing
 // it and then use that filepath.Join result as the name argument.
 //
 // As another special case, ServeFile redirects any request where r.URL.Path
 // ends in "/index.html" to the same path, without the final
 // "index.html". To avoid such redirects either modify the path or
-// use ServeContent.
+// use [ServeContent].
 //
 // Outside of those two special cases, ServeFile does not use
 // r.URL.Path for selecting the file or directory to serve; only the
@@ -741,6 +766,40 @@
 	serveFile(w, r, Dir(dir), file, false)
 }
 
+// ServeFileFS replies to the request with the contents
+// of the named file or directory from the file system fsys.
+//
+// If the provided file or directory name is a relative path, it is
+// interpreted relative to the current directory and may ascend to
+// parent directories. If the provided name is constructed from user
+// input, it should be sanitized before calling [ServeFile].
+//
+// As a precaution, ServeFile will reject requests where r.URL.Path
+// contains a ".." path element; this protects against callers who
+// might unsafely use [filepath.Join] on r.URL.Path without sanitizing
+// it and then use that filepath.Join result as the name argument.
+//
+// As another special case, ServeFile redirects any request where r.URL.Path
+// ends in "/index.html" to the same path, without the final
+// "index.html". To avoid such redirects either modify the path or
+// use ServeContent.
+//
+// Outside of those two special cases, ServeFile does not use
+// r.URL.Path for selecting the file or directory to serve; only the
+// file or directory provided in the name argument is used.
+func ServeFileFS(w ResponseWriter, r *Request, fsys fs.FS, name string) {
+	if containsDotDot(r.URL.Path) {
+		// Too many programs use r.URL.Path to construct the argument to
+		// serveFile. Reject the request under the assumption that happened
+		// here and ".." may not be wanted.
+		// Note that name might not contain "..", for example if code (still
+		// incorrectly) used filepath.Join(myDir, r.URL.Path).
+		Error(w, "invalid URL path", StatusBadRequest)
+		return
+	}
+	serveFile(w, r, FS(fsys), name, false)
+}
+
 func containsDotDot(v string) bool {
 	if !strings.Contains(v, "..") {
 		return false
@@ -831,9 +890,9 @@
 	return list, nil
 }
 
-// FS converts fsys to a FileSystem implementation,
-// for use with FileServer and NewFileTransport.
-// The files provided by fsys must implement io.Seeker.
+// FS converts fsys to a [FileSystem] implementation,
+// for use with [FileServer] and [NewFileTransport].
+// The files provided by fsys must implement [io.Seeker].
 func FS(fsys fs.FS) FileSystem {
 	return ioFS{fsys}
 }
@@ -846,17 +905,27 @@
 // "index.html".
 //
 // To use the operating system's file system implementation,
-// use http.Dir:
+// use [http.Dir]:
 //
 //	http.Handle("/", http.FileServer(http.Dir("/tmp")))
 //
-// To use an fs.FS implementation, use http.FS to convert it:
-//
-//	http.Handle("/", http.FileServer(http.FS(fsys)))
+// To use an [fs.FS] implementation, use [http.FileServerFS] instead.
 func FileServer(root FileSystem) Handler {
 	return &fileHandler{root}
 }
 
+// FileServerFS returns a handler that serves HTTP requests
+// with the contents of the file system fsys.
+//
+// As a special case, the returned file server redirects any request
+// ending in "/index.html" to the same path, without the final
+// "index.html".
+//
+//	http.Handle("/", http.FileServerFS(fsys))
+func FileServerFS(root fs.FS) Handler {
+	return FileServer(FS(root))
+}
+
 func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
 	upath := r.URL.Path
 	if !strings.HasPrefix(upath, "/") {
diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go
index 3fb9e01..861e70c 100644
--- a/src/net/http/fs_test.go
+++ b/src/net/http/fs_test.go
@@ -7,13 +7,16 @@
 import (
 	"bufio"
 	"bytes"
+	"compress/gzip"
 	"errors"
 	"fmt"
+	"internal/testenv"
 	"io"
 	"io/fs"
 	"mime"
 	"mime/multipart"
 	"net"
+	"net/http"
 	. "net/http"
 	"net/http/httptest"
 	"net/url"
@@ -26,6 +29,7 @@
 	"runtime"
 	"strings"
 	"testing"
+	"testing/fstest"
 	"time"
 )
 
@@ -1265,7 +1269,7 @@
 	defer ln.Close()
 
 	// Attempt to run strace, and skip on failure - this test requires SYS_PTRACE.
-	if err := exec.Command("strace", "-f", "-q", os.Args[0], "-test.run=^$").Run(); err != nil {
+	if err := testenv.Command(t, "strace", "-f", "-q", os.Args[0], "-test.run=^$").Run(); err != nil {
 		t.Skipf("skipping; failed to run strace: %v", err)
 	}
 
@@ -1278,7 +1282,7 @@
 	defer os.Remove(filepath)
 
 	var buf strings.Builder
-	child := exec.Command("strace", "-f", "-q", os.Args[0], "-test.run=TestLinuxSendfileChild")
+	child := testenv.Command(t, "strace", "-f", "-q", os.Args[0], "-test.run=^TestLinuxSendfileChild$")
 	child.ExtraFiles = append(child.ExtraFiles, lnf)
 	child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
 	child.Stdout = &buf
@@ -1559,3 +1563,108 @@
 		}
 	}
 }
+
+func TestFileServerFS(t *testing.T) {
+	filename := "index.html"
+	contents := []byte("index.html says hello")
+	fsys := fstest.MapFS{
+		filename: {Data: contents},
+	}
+	ts := newClientServerTest(t, http1Mode, FileServerFS(fsys)).ts
+	defer ts.Close()
+
+	res, err := ts.Client().Get(ts.URL + "/" + filename)
+	if err != nil {
+		t.Fatal(err)
+	}
+	b, err := io.ReadAll(res.Body)
+	if err != nil {
+		t.Fatal("reading Body:", err)
+	}
+	if s := string(b); s != string(contents) {
+		t.Errorf("for path %q got %q, want %q", filename, s, contents)
+	}
+	res.Body.Close()
+}
+
+func TestServeFileFS(t *testing.T) {
+	filename := "index.html"
+	contents := []byte("index.html says hello")
+	fsys := fstest.MapFS{
+		filename: {Data: contents},
+	}
+	ts := newClientServerTest(t, http1Mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+		ServeFileFS(w, r, fsys, filename)
+	})).ts
+	defer ts.Close()
+
+	res, err := ts.Client().Get(ts.URL + "/" + filename)
+	if err != nil {
+		t.Fatal(err)
+	}
+	b, err := io.ReadAll(res.Body)
+	if err != nil {
+		t.Fatal("reading Body:", err)
+	}
+	if s := string(b); s != string(contents) {
+		t.Errorf("for path %q got %q, want %q", filename, s, contents)
+	}
+	res.Body.Close()
+}
+
+func TestServeFileZippingResponseWriter(t *testing.T) {
+	// This test exercises a pattern which is incorrect,
+	// but has been observed enough in the world that we don't want to break it.
+	//
+	// The server is setting "Content-Encoding: gzip",
+	// wrapping the ResponseWriter in an implementation which gzips data written to it,
+	// and passing this ResponseWriter to ServeFile.
+	//
+	// This means ServeFile cannot properly set a Content-Length header, because it
+	// doesn't know what content it is going to send--the ResponseWriter is modifying
+	// the bytes sent.
+	//
+	// Range requests are always going to be broken in this scenario,
+	// but verify that we can serve non-range requests correctly.
+	filename := "index.html"
+	contents := []byte("contents will be sent with Content-Encoding: gzip")
+	fsys := fstest.MapFS{
+		filename: {Data: contents},
+	}
+	ts := newClientServerTest(t, http1Mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+		w.Header().Set("Content-Encoding", "gzip")
+		gzw := gzip.NewWriter(w)
+		defer gzw.Close()
+		ServeFileFS(gzipResponseWriter{w: gzw, ResponseWriter: w}, r, fsys, filename)
+	})).ts
+	defer ts.Close()
+
+	res, err := ts.Client().Get(ts.URL + "/" + filename)
+	if err != nil {
+		t.Fatal(err)
+	}
+	b, err := io.ReadAll(res.Body)
+	if err != nil {
+		t.Fatal("reading Body:", err)
+	}
+	if s := string(b); s != string(contents) {
+		t.Errorf("for path %q got %q, want %q", filename, s, contents)
+	}
+	res.Body.Close()
+}
+
+type gzipResponseWriter struct {
+	ResponseWriter
+	w *gzip.Writer
+}
+
+func (grw gzipResponseWriter) Write(b []byte) (int, error) {
+	return grw.w.Write(b)
+}
+
+func (grw gzipResponseWriter) Flush() {
+	grw.w.Flush()
+	if fw, ok := grw.ResponseWriter.(http.Flusher); ok {
+		fw.Flush()
+	}
+}
diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
index dd59e1f..ac41144 100644
--- a/src/net/http/h2_bundle.go
+++ b/src/net/http/h2_bundle.go
@@ -1,5 +1,4 @@
 //go:build !nethttpomithttp2
-// +build !nethttpomithttp2
 
 // Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
 //   $ bundle -o=h2_bundle.go -prefix=http2 -tags=!nethttpomithttp2 golang.org/x/net/http2
@@ -33,6 +32,7 @@
 	"io/fs"
 	"log"
 	"math"
+	"math/bits"
 	mathrand "math/rand"
 	"net"
 	"net/http/httptrace"
@@ -1041,41 +1041,44 @@
 // TODO: Benchmark to determine if the pools are necessary. The GC may have
 // improved enough that we can instead allocate chunks like this:
 // make([]byte, max(16<<10, expectedBytesRemaining))
-var (
-	http2dataChunkSizeClasses = []int{
-		1 << 10,
-		2 << 10,
-		4 << 10,
-		8 << 10,
-		16 << 10,
-	}
-	http2dataChunkPools = [...]sync.Pool{
-		{New: func() interface{} { return make([]byte, 1<<10) }},
-		{New: func() interface{} { return make([]byte, 2<<10) }},
-		{New: func() interface{} { return make([]byte, 4<<10) }},
-		{New: func() interface{} { return make([]byte, 8<<10) }},
-		{New: func() interface{} { return make([]byte, 16<<10) }},
-	}
-)
+var http2dataChunkPools = [...]sync.Pool{
+	{New: func() interface{} { return new([1 << 10]byte) }},
+	{New: func() interface{} { return new([2 << 10]byte) }},
+	{New: func() interface{} { return new([4 << 10]byte) }},
+	{New: func() interface{} { return new([8 << 10]byte) }},
+	{New: func() interface{} { return new([16 << 10]byte) }},
+}
 
 func http2getDataBufferChunk(size int64) []byte {
-	i := 0
-	for ; i < len(http2dataChunkSizeClasses)-1; i++ {
-		if size <= int64(http2dataChunkSizeClasses[i]) {
-			break
-		}
+	switch {
+	case size <= 1<<10:
+		return http2dataChunkPools[0].Get().(*[1 << 10]byte)[:]
+	case size <= 2<<10:
+		return http2dataChunkPools[1].Get().(*[2 << 10]byte)[:]
+	case size <= 4<<10:
+		return http2dataChunkPools[2].Get().(*[4 << 10]byte)[:]
+	case size <= 8<<10:
+		return http2dataChunkPools[3].Get().(*[8 << 10]byte)[:]
+	default:
+		return http2dataChunkPools[4].Get().(*[16 << 10]byte)[:]
 	}
-	return http2dataChunkPools[i].Get().([]byte)
 }
 
 func http2putDataBufferChunk(p []byte) {
-	for i, n := range http2dataChunkSizeClasses {
-		if len(p) == n {
-			http2dataChunkPools[i].Put(p)
-			return
-		}
+	switch len(p) {
+	case 1 << 10:
+		http2dataChunkPools[0].Put((*[1 << 10]byte)(p))
+	case 2 << 10:
+		http2dataChunkPools[1].Put((*[2 << 10]byte)(p))
+	case 4 << 10:
+		http2dataChunkPools[2].Put((*[4 << 10]byte)(p))
+	case 8 << 10:
+		http2dataChunkPools[3].Put((*[8 << 10]byte)(p))
+	case 16 << 10:
+		http2dataChunkPools[4].Put((*[16 << 10]byte)(p))
+	default:
+		panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
 	}
-	panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
 }
 
 // dataBuffer is an io.ReadWriter backed by a list of data chunks.
@@ -3058,41 +3061,6 @@
 	return buf.String()
 }
 
-func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
-	return trace != nil && trace.WroteHeaderField != nil
-}
-
-func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
-	if trace != nil && trace.WroteHeaderField != nil {
-		trace.WroteHeaderField(k, []string{v})
-	}
-}
-
-func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
-	if trace != nil {
-		return trace.Got1xxResponse
-	}
-	return nil
-}
-
-// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
-// connection.
-func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
-	dialer := &tls.Dialer{
-		Config: cfg,
-	}
-	cn, err := dialer.DialContext(ctx, network, addr)
-	if err != nil {
-		return nil, err
-	}
-	tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
-	return tlsCn, nil
-}
-
-func http2tlsUnderlyingConn(tc *tls.Conn) net.Conn {
-	return tc.NetConn()
-}
-
 var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
 
 type http2goroutineLock uint64
@@ -4831,14 +4799,6 @@
 	}
 }
 
-func (sc *http2serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
-	select {
-	case <-sc.doneServing:
-	case <-sharedCh:
-		close(privateCh)
-	}
-}
-
 type http2serverMessage int
 
 // Message values sent to serveMsgCh.
@@ -5722,9 +5682,11 @@
 // onReadTimeout is run on its own goroutine (from time.AfterFunc)
 // when the stream's ReadTimeout has fired.
 func (st *http2stream) onReadTimeout() {
-	// Wrap the ErrDeadlineExceeded to avoid callers depending on us
-	// returning the bare error.
-	st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+	if st.body != nil {
+		// Wrap the ErrDeadlineExceeded to avoid callers depending on us
+		// returning the bare error.
+		st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+	}
 }
 
 // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
@@ -5842,9 +5804,7 @@
 	// (in Go 1.8), though. That's a more sane option anyway.
 	if sc.hs.ReadTimeout != 0 {
 		sc.conn.SetReadDeadline(time.Time{})
-		if st.body != nil {
-			st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
-		}
+		st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
 	}
 
 	return sc.scheduleHandler(id, rw, req, handler)
@@ -6374,7 +6334,6 @@
 	wroteHeader   bool     // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
 	sentHeader    bool     // have we sent the header frame?
 	handlerDone   bool     // handler has finished
-	dirty         bool     // a Write failed; don't reuse this responseWriterState
 
 	sentContentLen int64 // non-zero if handler set a Content-Length header
 	wroteBytes     int64
@@ -6494,7 +6453,6 @@
 			date:          date,
 		})
 		if err != nil {
-			rws.dirty = true
 			return 0, err
 		}
 		if endStream {
@@ -6515,7 +6473,6 @@
 	if len(p) > 0 || endStream {
 		// only send a 0 byte DATA frame if we're ending the stream.
 		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
-			rws.dirty = true
 			return 0, err
 		}
 	}
@@ -6527,9 +6484,6 @@
 			trailers:  rws.trailers,
 			endStream: true,
 		})
-		if err != nil {
-			rws.dirty = true
-		}
 		return len(p), err
 	}
 	return len(p), nil
@@ -6745,14 +6699,12 @@
 			h.Del("Transfer-Encoding")
 		}
 
-		if rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+		rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
 			streamID:    rws.stream.id,
 			httpResCode: code,
 			h:           h,
 			endStream:   rws.handlerDone && !rws.hasTrailers(),
-		}) != nil {
-			rws.dirty = true
-		}
+		})
 
 		return
 	}
@@ -6817,19 +6769,10 @@
 
 func (w *http2responseWriter) handlerDone() {
 	rws := w.rws
-	dirty := rws.dirty
 	rws.handlerDone = true
 	w.Flush()
 	w.rws = nil
-	if !dirty {
-		// Only recycle the pool if all prior Write calls to
-		// the serverConn goroutine completed successfully. If
-		// they returned earlier due to resets from the peer
-		// there might still be write goroutines outstanding
-		// from the serverConn referencing the rws memory. See
-		// issue 20704.
-		http2responseWriterStatePool.Put(rws)
-	}
+	http2responseWriterStatePool.Put(rws)
 }
 
 // Push errors.
@@ -7374,8 +7317,7 @@
 // HTTP/2 server.
 type http2ClientConn struct {
 	t             *http2Transport
-	tconn         net.Conn // usually *tls.Conn, except specialized impls
-	tconnClosed   bool
+	tconn         net.Conn             // usually *tls.Conn, except specialized impls
 	tlsState      *tls.ConnectionState // nil only for specialized impls
 	reused        uint32               // whether conn is being reused; atomic
 	singleUse     bool                 // whether being used for a single http.Request
@@ -8103,7 +8045,7 @@
 	if !ok {
 		return
 	}
-	if nc := http2tlsUnderlyingConn(tc); nc != nil {
+	if nc := tc.NetConn(); nc != nil {
 		nc.Close()
 	}
 }
@@ -8765,7 +8707,28 @@
 	return int(n) // doesn't truncate; max is 512K
 }
 
-var http2bufPool sync.Pool // of *[]byte
+// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
+// streaming requests using small frame sizes occupy large buffers initially allocated for prior
+// requests needing big buffers. The size ranges are as follows:
+// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
+// {256 KB, 512 KB], {512 KB, infinity}
+// In practice, the maximum scratch buffer size should not exceed 512 KB due to
+// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
+// It exists mainly as a safety measure, for potential future increases in max buffer size.
+var http2bufPools [7]sync.Pool // of *[]byte
+
+func http2bufPoolIndex(size int) int {
+	if size <= 16384 {
+		return 0
+	}
+	size -= 1
+	bits := bits.Len(uint(size))
+	index := bits - 14
+	if index >= len(http2bufPools) {
+		return len(http2bufPools) - 1
+	}
+	return index
+}
 
 func (cs *http2clientStream) writeRequestBody(req *Request) (err error) {
 	cc := cs.cc
@@ -8783,12 +8746,13 @@
 	// Scratch buffer for reading into & writing from.
 	scratchLen := cs.frameScratchBufferLen(maxFrameSize)
 	var buf []byte
-	if bp, ok := http2bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
-		defer http2bufPool.Put(bp)
+	index := http2bufPoolIndex(scratchLen)
+	if bp, ok := http2bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
+		defer http2bufPools[index].Put(bp)
 		buf = *bp
 	} else {
 		buf = make([]byte, scratchLen)
-		defer http2bufPool.Put(&buf)
+		defer http2bufPools[index].Put(&buf)
 	}
 
 	var sawEOF bool
@@ -10269,6 +10233,37 @@
 	}
 }
 
+func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
+	return trace != nil && trace.WroteHeaderField != nil
+}
+
+func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
+	if trace != nil && trace.WroteHeaderField != nil {
+		trace.WroteHeaderField(k, []string{v})
+	}
+}
+
+func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+	if trace != nil {
+		return trace.Got1xxResponse
+	}
+	return nil
+}
+
+// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
+// connection.
+func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
+	dialer := &tls.Dialer{
+		Config: cfg,
+	}
+	cn, err := dialer.DialContext(ctx, network, addr)
+	if err != nil {
+		return nil, err
+	}
+	tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
+	return tlsCn, nil
+}
+
 // writeFramer is implemented by any type that is used to write frames.
 type http2writeFramer interface {
 	writeFrame(http2writeContext) error
diff --git a/src/net/http/h2_error.go b/src/net/http/h2_error.go
index 0391d31..2c0b21e 100644
--- a/src/net/http/h2_error.go
+++ b/src/net/http/h2_error.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !nethttpomithttp2
-// +build !nethttpomithttp2
 
 package http
 
diff --git a/src/net/http/h2_error_test.go b/src/net/http/h2_error_test.go
index 0d85e2f..5e40068 100644
--- a/src/net/http/h2_error_test.go
+++ b/src/net/http/h2_error_test.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !nethttpomithttp2
-// +build !nethttpomithttp2
 
 package http
 
diff --git a/src/net/http/header.go b/src/net/http/header.go
index e0b342c..9d0f3a1 100644
--- a/src/net/http/header.go
+++ b/src/net/http/header.go
@@ -20,13 +20,13 @@
 // A Header represents the key-value pairs in an HTTP header.
 //
 // The keys should be in canonical form, as returned by
-// CanonicalHeaderKey.
+// [CanonicalHeaderKey].
 type Header map[string][]string
 
 // Add adds the key, value pair to the header.
 // It appends to any existing values associated with key.
 // The key is case insensitive; it is canonicalized by
-// CanonicalHeaderKey.
+// [CanonicalHeaderKey].
 func (h Header) Add(key, value string) {
 	textproto.MIMEHeader(h).Add(key, value)
 }
@@ -34,7 +34,7 @@
 // Set sets the header entries associated with key to the
 // single element value. It replaces any existing values
 // associated with key. The key is case insensitive; it is
-// canonicalized by textproto.CanonicalMIMEHeaderKey.
+// canonicalized by [textproto.CanonicalMIMEHeaderKey].
 // To use non-canonical keys, assign to the map directly.
 func (h Header) Set(key, value string) {
 	textproto.MIMEHeader(h).Set(key, value)
@@ -42,7 +42,7 @@
 
 // Get gets the first value associated with the given key. If
 // there are no values associated with the key, Get returns "".
-// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
+// It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is
 // used to canonicalize the provided key. Get assumes that all
 // keys are stored in canonical form. To use non-canonical keys,
 // access the map directly.
@@ -51,7 +51,7 @@
 }
 
 // Values returns all values associated with the given key.
-// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
+// It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is
 // used to canonicalize the provided key. To use non-canonical
 // keys, access the map directly.
 // The returned slice is not a copy.
@@ -76,7 +76,7 @@
 
 // Del deletes the values associated with key.
 // The key is case insensitive; it is canonicalized by
-// CanonicalHeaderKey.
+// [CanonicalHeaderKey].
 func (h Header) Del(key string) {
 	textproto.MIMEHeader(h).Del(key)
 }
@@ -125,7 +125,7 @@
 
 // ParseTime parses a time header (such as the Date: header),
 // trying each of the three formats allowed by HTTP/1.1:
-// TimeFormat, time.RFC850, and time.ANSIC.
+// [TimeFormat], [time.RFC850], and [time.ANSIC].
 func ParseTime(text string) (t time.Time, err error) {
 	for _, layout := range timeFormats {
 		t, err = time.Parse(layout, text)
diff --git a/src/net/http/http.go b/src/net/http/http.go
index 9b81654..6e2259a 100644
--- a/src/net/http/http.go
+++ b/src/net/http/http.go
@@ -103,10 +103,10 @@
 	return string(b)
 }
 
-// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// NoBody is an [io.ReadCloser] with no bytes. Read always returns EOF
 // and Close always returns nil. It can be used in an outgoing client
 // request to explicitly signal that a request has zero bytes.
-// An alternative, however, is to simply set Request.Body to nil.
+// An alternative, however, is to simply set [Request.Body] to nil.
 var NoBody = noBody{}
 
 type noBody struct{}
@@ -121,7 +121,7 @@
 	_ io.ReadCloser = NoBody
 )
 
-// PushOptions describes options for Pusher.Push.
+// PushOptions describes options for [Pusher.Push].
 type PushOptions struct {
 	// Method specifies the HTTP method for the promised request.
 	// If set, it must be "GET" or "HEAD". Empty means "GET".
diff --git a/src/net/http/http_test.go b/src/net/http/http_test.go
index 91bb1b2..2e7e024 100644
--- a/src/net/http/http_test.go
+++ b/src/net/http/http_test.go
@@ -12,7 +12,6 @@
 	"io/fs"
 	"net/url"
 	"os"
-	"os/exec"
 	"reflect"
 	"regexp"
 	"strings"
@@ -55,7 +54,7 @@
 func TestCmdGoNoHTTPServer(t *testing.T) {
 	t.Parallel()
 	goBin := testenv.GoToolPath(t)
-	out, err := exec.Command(goBin, "tool", "nm", goBin).CombinedOutput()
+	out, err := testenv.Command(t, goBin, "tool", "nm", goBin).CombinedOutput()
 	if err != nil {
 		t.Fatalf("go tool nm: %v: %s", err, out)
 	}
@@ -89,7 +88,7 @@
 	}
 	t.Parallel()
 	goTool := testenv.GoToolPath(t)
-	out, err := exec.Command(goTool, "test", "-short", "-tags=nethttpomithttp2", "net/http").CombinedOutput()
+	out, err := testenv.Command(t, goTool, "test", "-short", "-tags=nethttpomithttp2", "net/http").CombinedOutput()
 	if err != nil {
 		t.Fatalf("go test -short failed: %v, %s", err, out)
 	}
@@ -101,7 +100,7 @@
 func TestOmitHTTP2Vet(t *testing.T) {
 	t.Parallel()
 	goTool := testenv.GoToolPath(t)
-	out, err := exec.Command(goTool, "vet", "-tags=nethttpomithttp2", "net/http").CombinedOutput()
+	out, err := testenv.Command(t, goTool, "vet", "-tags=nethttpomithttp2", "net/http").CombinedOutput()
 	if err != nil {
 		t.Fatalf("go vet failed: %v, %s", err, out)
 	}
diff --git a/src/net/http/httptest/httptest.go b/src/net/http/httptest/httptest.go
index 9bedefd..f0ca643 100644
--- a/src/net/http/httptest/httptest.go
+++ b/src/net/http/httptest/httptest.go
@@ -15,7 +15,7 @@
 )
 
 // NewRequest returns a new incoming server Request, suitable
-// for passing to an http.Handler for testing.
+// for passing to an [http.Handler] for testing.
 //
 // The target is the RFC 7230 "request-target": it may be either a
 // path or an absolute URL. If target is an absolute URL, the host name
diff --git a/src/net/http/httptest/recorder.go b/src/net/http/httptest/recorder.go
index 1c1d880..dd51901 100644
--- a/src/net/http/httptest/recorder.go
+++ b/src/net/http/httptest/recorder.go
@@ -16,7 +16,7 @@
 	"golang.org/x/net/http/httpguts"
 )
 
-// ResponseRecorder is an implementation of http.ResponseWriter that
+// ResponseRecorder is an implementation of [http.ResponseWriter] that
 // records its mutations for later inspection in tests.
 type ResponseRecorder struct {
 	// Code is the HTTP response code set by WriteHeader.
@@ -47,7 +47,7 @@
 	wroteHeader bool
 }
 
-// NewRecorder returns an initialized ResponseRecorder.
+// NewRecorder returns an initialized [ResponseRecorder].
 func NewRecorder() *ResponseRecorder {
 	return &ResponseRecorder{
 		HeaderMap: make(http.Header),
@@ -57,12 +57,12 @@
 }
 
 // DefaultRemoteAddr is the default remote address to return in RemoteAddr if
-// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
+// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder].
 const DefaultRemoteAddr = "1.2.3.4"
 
-// Header implements http.ResponseWriter. It returns the response
+// Header implements [http.ResponseWriter]. It returns the response
 // headers to mutate within a handler. To test the headers that were
-// written after a handler completes, use the Result method and see
+// written after a handler completes, use the [ResponseRecorder.Result] method and see
 // the returned Response value's Header.
 func (rw *ResponseRecorder) Header() http.Header {
 	m := rw.HeaderMap
@@ -112,7 +112,7 @@
 	return len(buf), nil
 }
 
-// WriteString implements io.StringWriter. The data in str is written
+// WriteString implements [io.StringWriter]. The data in str is written
 // to rw.Body, if not nil.
 func (rw *ResponseRecorder) WriteString(str string) (int, error) {
 	rw.writeHeader(nil, str)
@@ -139,7 +139,7 @@
 	}
 }
 
-// WriteHeader implements http.ResponseWriter.
+// WriteHeader implements [http.ResponseWriter].
 func (rw *ResponseRecorder) WriteHeader(code int) {
 	if rw.wroteHeader {
 		return
@@ -154,7 +154,7 @@
 	rw.snapHeader = rw.HeaderMap.Clone()
 }
 
-// Flush implements http.Flusher. To test whether Flush was
+// Flush implements [http.Flusher]. To test whether Flush was
 // called, see rw.Flushed.
 func (rw *ResponseRecorder) Flush() {
 	if !rw.wroteHeader {
@@ -175,7 +175,7 @@
 // did a write.
 //
 // The Response.Body is guaranteed to be non-nil and Body.Read call is
-// guaranteed to not return any error other than io.EOF.
+// guaranteed to not return any error other than [io.EOF].
 //
 // Result must only be called after the handler has finished running.
 func (rw *ResponseRecorder) Result() *http.Response {
diff --git a/src/net/http/httptest/server.go b/src/net/http/httptest/server.go
index f254a49..5095b43 100644
--- a/src/net/http/httptest/server.go
+++ b/src/net/http/httptest/server.go
@@ -77,7 +77,7 @@
 // When debugging a particular http server-based test,
 // this flag lets you run
 //
-//	go test -run=BrokenTest -httptest.serve=127.0.0.1:8000
+//	go test -run='^BrokenTest$' -httptest.serve=127.0.0.1:8000
 //
 // to start the broken server so you can interact with it manually.
 // We only register this flag if it looks like the caller knows about it
@@ -100,7 +100,7 @@
 	return false
 }
 
-// NewServer starts and returns a new Server.
+// NewServer starts and returns a new [Server].
 // The caller should call Close when finished, to shut it down.
 func NewServer(handler http.Handler) *Server {
 	ts := NewUnstartedServer(handler)
@@ -108,7 +108,7 @@
 	return ts
 }
 
-// NewUnstartedServer returns a new Server but doesn't start it.
+// NewUnstartedServer returns a new [Server] but doesn't start it.
 //
 // After changing its configuration, the caller should call Start or
 // StartTLS.
@@ -144,7 +144,7 @@
 		panic("Server already started")
 	}
 	if s.client == nil {
-		s.client = &http.Client{Transport: &http.Transport{}}
+		s.client = &http.Client{}
 	}
 	cert, err := tls.X509KeyPair(testcert.LocalhostCert, testcert.LocalhostKey)
 	if err != nil {
@@ -185,7 +185,7 @@
 	s.goServe()
 }
 
-// NewTLSServer starts and returns a new Server using TLS.
+// NewTLSServer starts and returns a new [Server] using TLS.
 // The caller should call Close when finished, to shut it down.
 func NewTLSServer(handler http.Handler) *Server {
 	ts := NewUnstartedServer(handler)
@@ -298,7 +298,7 @@
 
 // Client returns an HTTP client configured for making requests to the server.
 // It is configured to trust the server's TLS test certificate and will
-// close its idle connections on Server.Close.
+// close its idle connections on [Server.Close].
 func (s *Server) Client() *http.Client {
 	return s.client
 }
diff --git a/src/net/http/httptrace/trace.go b/src/net/http/httptrace/trace.go
index 6af30f7..706a432 100644
--- a/src/net/http/httptrace/trace.go
+++ b/src/net/http/httptrace/trace.go
@@ -19,7 +19,7 @@
 // unique type to prevent assignment.
 type clientEventContextKey struct{}
 
-// ContextClientTrace returns the ClientTrace associated with the
+// ContextClientTrace returns the [ClientTrace] associated with the
 // provided context. If none, it returns nil.
 func ContextClientTrace(ctx context.Context) *ClientTrace {
 	trace, _ := ctx.Value(clientEventContextKey{}).(*ClientTrace)
@@ -233,7 +233,7 @@
 	return t.DNSStart != nil || t.DNSDone != nil || t.ConnectStart != nil || t.ConnectDone != nil
 }
 
-// GotConnInfo is the argument to the ClientTrace.GotConn function and
+// GotConnInfo is the argument to the [ClientTrace.GotConn] function and
 // contains information about the obtained connection.
 type GotConnInfo struct {
 	// Conn is the connection that was obtained. It is owned by
diff --git a/src/net/http/httputil/dump.go b/src/net/http/httputil/dump.go
index 7affe5e..2edb9bc 100644
--- a/src/net/http/httputil/dump.go
+++ b/src/net/http/httputil/dump.go
@@ -71,8 +71,8 @@
 	return -1
 }
 
-// DumpRequestOut is like DumpRequest but for outgoing client requests. It
-// includes any headers that the standard http.Transport adds, such as
+// DumpRequestOut is like [DumpRequest] but for outgoing client requests. It
+// includes any headers that the standard [http.Transport] adds, such as
 // User-Agent.
 func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
 	save := req.Body
@@ -203,17 +203,17 @@
 // representation. It should only be used by servers to debug client
 // requests. The returned representation is an approximation only;
 // some details of the initial request are lost while parsing it into
-// an http.Request. In particular, the order and case of header field
+// an [http.Request]. In particular, the order and case of header field
 // names are lost. The order of values in multi-valued headers is kept
 // intact. HTTP/2 requests are dumped in HTTP/1.x form, not in their
 // original binary representations.
 //
 // If body is true, DumpRequest also returns the body. To do so, it
-// consumes req.Body and then replaces it with a new io.ReadCloser
+// consumes req.Body and then replaces it with a new [io.ReadCloser]
 // that yields the same bytes. If DumpRequest returns an error,
 // the state of req is undefined.
 //
-// The documentation for http.Request.Write details which fields
+// The documentation for [http.Request.Write] details which fields
 // of req are included in the dump.
 func DumpRequest(req *http.Request, body bool) ([]byte, error) {
 	var err error
diff --git a/src/net/http/httputil/httputil.go b/src/net/http/httputil/httputil.go
index 09ea74d..431930e 100644
--- a/src/net/http/httputil/httputil.go
+++ b/src/net/http/httputil/httputil.go
@@ -13,7 +13,7 @@
 
 // NewChunkedReader returns a new chunkedReader that translates the data read from r
 // out of HTTP "chunked" format before returning it.
-// The chunkedReader returns io.EOF when the final 0-length chunk is read.
+// The chunkedReader returns [io.EOF] when the final 0-length chunk is read.
 //
 // NewChunkedReader is not needed by normal applications. The http package
 // automatically decodes chunking when reading response bodies.
diff --git a/src/net/http/httputil/persist.go b/src/net/http/httputil/persist.go
index 84b116d..0cbe3eb 100644
--- a/src/net/http/httputil/persist.go
+++ b/src/net/http/httputil/persist.go
@@ -33,7 +33,7 @@
 // It is low-level, old, and unused by Go's current HTTP stack.
 // We should have deleted it before Go 1.
 //
-// Deprecated: Use the Server in package net/http instead.
+// Deprecated: Use the Server in package [net/http] instead.
 type ServerConn struct {
 	mu              sync.Mutex // read-write protects the following fields
 	c               net.Conn
@@ -50,7 +50,7 @@
 // It is low-level, old, and unused by Go's current HTTP stack.
 // We should have deleted it before Go 1.
 //
-// Deprecated: Use the Server in package net/http instead.
+// Deprecated: Use the Server in package [net/http] instead.
 func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn {
 	if r == nil {
 		r = bufio.NewReader(c)
@@ -58,10 +58,10 @@
 	return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)}
 }
 
-// Hijack detaches the ServerConn and returns the underlying connection as well
+// Hijack detaches the [ServerConn] and returns the underlying connection as well
 // as the read-side bufio which may have some left over data. Hijack may be
 // called before Read has signaled the end of the keep-alive logic. The user
-// should not call Hijack while Read or Write is in progress.
+// should not call Hijack while [ServerConn.Read] or [ServerConn.Write] is in progress.
 func (sc *ServerConn) Hijack() (net.Conn, *bufio.Reader) {
 	sc.mu.Lock()
 	defer sc.mu.Unlock()
@@ -72,7 +72,7 @@
 	return c, r
 }
 
-// Close calls Hijack and then also closes the underlying connection.
+// Close calls [ServerConn.Hijack] and then also closes the underlying connection.
 func (sc *ServerConn) Close() error {
 	c, _ := sc.Hijack()
 	if c != nil {
@@ -81,7 +81,7 @@
 	return nil
 }
 
-// Read returns the next request on the wire. An ErrPersistEOF is returned if
+// Read returns the next request on the wire. An [ErrPersistEOF] is returned if
 // it is gracefully determined that there are no more requests (e.g. after the
 // first request on an HTTP/1.0 connection, or after a Connection:close on a
 // HTTP/1.1 connection).
@@ -171,7 +171,7 @@
 
 // Write writes resp in response to req. To close the connection gracefully, set the
 // Response.Close field to true. Write should be considered operational until
-// it returns an error, regardless of any errors returned on the Read side.
+// it returns an error, regardless of any errors returned on the [ServerConn.Read] side.
 func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error {
 
 	// Retrieve the pipeline ID of this request/response pair
@@ -226,7 +226,7 @@
 // It is low-level, old, and unused by Go's current HTTP stack.
 // We should have deleted it before Go 1.
 //
-// Deprecated: Use Client or Transport in package net/http instead.
+// Deprecated: Use Client or Transport in package [net/http] instead.
 type ClientConn struct {
 	mu              sync.Mutex // read-write protects the following fields
 	c               net.Conn
@@ -244,7 +244,7 @@
 // It is low-level, old, and unused by Go's current HTTP stack.
 // We should have deleted it before Go 1.
 //
-// Deprecated: Use the Client or Transport in package net/http instead.
+// Deprecated: Use the Client or Transport in package [net/http] instead.
 func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
 	if r == nil {
 		r = bufio.NewReader(c)
@@ -261,17 +261,17 @@
 // It is low-level, old, and unused by Go's current HTTP stack.
 // We should have deleted it before Go 1.
 //
-// Deprecated: Use the Client or Transport in package net/http instead.
+// Deprecated: Use the Client or Transport in package [net/http] instead.
 func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
 	cc := NewClientConn(c, r)
 	cc.writeReq = (*http.Request).WriteProxy
 	return cc
 }
 
-// Hijack detaches the ClientConn and returns the underlying connection as well
+// Hijack detaches the [ClientConn] and returns the underlying connection as well
 // as the read-side bufio which may have some left over data. Hijack may be
 // called before the user or Read have signaled the end of the keep-alive
-// logic. The user should not call Hijack while Read or Write is in progress.
+// logic. The user should not call Hijack while [ClientConn.Read] or ClientConn.Write is in progress.
 func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) {
 	cc.mu.Lock()
 	defer cc.mu.Unlock()
@@ -282,7 +282,7 @@
 	return
 }
 
-// Close calls Hijack and then also closes the underlying connection.
+// Close calls [ClientConn.Hijack] and then also closes the underlying connection.
 func (cc *ClientConn) Close() error {
 	c, _ := cc.Hijack()
 	if c != nil {
@@ -291,7 +291,7 @@
 	return nil
 }
 
-// Write writes a request. An ErrPersistEOF error is returned if the connection
+// Write writes a request. An [ErrPersistEOF] error is returned if the connection
 // has been closed in an HTTP keep-alive sense. If req.Close equals true, the
 // keep-alive connection is logically closed after this request and the opposing
 // server is informed. An ErrUnexpectedEOF indicates the remote closed the
@@ -357,9 +357,9 @@
 }
 
 // Read reads the next response from the wire. A valid response might be
-// returned together with an ErrPersistEOF, which means that the remote
+// returned together with an [ErrPersistEOF], which means that the remote
 // requested that this be the last request serviced. Read can be called
-// concurrently with Write, but not with another Read.
+// concurrently with [ClientConn.Write], but not with another Read.
 func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) {
 	// Retrieve the pipeline ID of this request/response pair
 	cc.mu.Lock()
diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
index 2a76b0b..5c70f0d 100644
--- a/src/net/http/httputil/reverseproxy.go
+++ b/src/net/http/httputil/reverseproxy.go
@@ -26,7 +26,7 @@
 	"golang.org/x/net/http/httpguts"
 )
 
-// A ProxyRequest contains a request to be rewritten by a ReverseProxy.
+// A ProxyRequest contains a request to be rewritten by a [ReverseProxy].
 type ProxyRequest struct {
 	// In is the request received by the proxy.
 	// The Rewrite function must not modify In.
@@ -45,7 +45,7 @@
 //
 // SetURL rewrites the outbound Host header to match the target's host.
 // To preserve the inbound request's Host header (the default behavior
-// of NewSingleHostReverseProxy):
+// of [NewSingleHostReverseProxy]):
 //
 //	rewriteFunc := func(r *httputil.ProxyRequest) {
 //		r.SetURL(url)
@@ -68,7 +68,7 @@
 // If the outbound request contains an existing X-Forwarded-For header,
 // SetXForwarded appends the client IP address to it. To append to the
 // inbound request's X-Forwarded-For header (the default behavior of
-// ReverseProxy when using a Director function), copy the header
+// [ReverseProxy] when using a Director function), copy the header
 // from the inbound request before calling SetXForwarded:
 //
 //	rewriteFunc := func(r *httputil.ProxyRequest) {
@@ -200,7 +200,7 @@
 }
 
 // A BufferPool is an interface for getting and returning temporary
-// byte slices for use by io.CopyBuffer.
+// byte slices for use by [io.CopyBuffer].
 type BufferPool interface {
 	Get() []byte
 	Put([]byte)
@@ -239,7 +239,7 @@
 	return a.Path + b.Path, apath + bpath
 }
 
-// NewSingleHostReverseProxy returns a new ReverseProxy that routes
+// NewSingleHostReverseProxy returns a new [ReverseProxy] that routes
 // URLs to the scheme, host, and base path provided in target. If the
 // target's path is "/base" and the incoming request was for "/dir",
 // the target request will be for /base/dir.
@@ -461,10 +461,7 @@
 			rw.WriteHeader(code)
 
 			// Clear headers, it's not automatically done by ResponseWriter.WriteHeader() for 1xx responses
-			for k := range h {
-				delete(h, k)
-			}
-
+			clear(h)
 			return nil
 		},
 	}
diff --git a/src/net/http/internal/ascii/print.go b/src/net/http/internal/ascii/print.go
index 585e5ba..98dbf4e 100644
--- a/src/net/http/internal/ascii/print.go
+++ b/src/net/http/internal/ascii/print.go
@@ -9,7 +9,7 @@
 	"unicode"
 )
 
-// EqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// EqualFold is [strings.EqualFold], ASCII only. It reports whether s and t
 // are equal, ASCII-case-insensitively.
 func EqualFold(s, t string) bool {
 	if len(s) != len(t) {
diff --git a/src/net/http/internal/chunked.go b/src/net/http/internal/chunked.go
index 5a17441..196b5d8 100644
--- a/src/net/http/internal/chunked.go
+++ b/src/net/http/internal/chunked.go
@@ -22,7 +22,7 @@
 
 // NewChunkedReader returns a new chunkedReader that translates the data read from r
 // out of HTTP "chunked" format before returning it.
-// The chunkedReader returns io.EOF when the final 0-length chunk is read.
+// The chunkedReader returns [io.EOF] when the final 0-length chunk is read.
 //
 // NewChunkedReader is not needed by normal applications. The http package
 // automatically decodes chunking when reading response bodies.
@@ -39,7 +39,8 @@
 	n        uint64 // unread bytes in chunk
 	err      error
 	buf      [2]byte
-	checkEnd bool // whether need to check for \r\n chunk footer
+	checkEnd bool  // whether need to check for \r\n chunk footer
+	excess   int64 // "excessive" chunk overhead, for malicious sender detection
 }
 
 func (cr *chunkedReader) beginChunk() {
@@ -49,10 +50,36 @@
 	if cr.err != nil {
 		return
 	}
+	cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
+	line = trimTrailingWhitespace(line)
+	line, cr.err = removeChunkExtension(line)
+	if cr.err != nil {
+		return
+	}
 	cr.n, cr.err = parseHexUint(line)
 	if cr.err != nil {
 		return
 	}
+	// A sender who sends one byte per chunk will send 5 bytes of overhead
+	// for every byte of data. ("1\r\nX\r\n" to send "X".)
+	// We want to allow this, since streaming a byte at a time can be legitimate.
+	//
+	// A sender can use chunk extensions to add arbitrary amounts of additional
+	// data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
+	// We don't want to disallow extensions (although we discard them),
+	// but we also don't want to allow a sender to reduce the signal/noise ratio
+	// arbitrarily.
+	//
+	// We track the amount of excess overhead read,
+	// and produce an error if it grows too large.
+	//
+	// Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
+	// plus twice the amount of real data in the chunk.
+	cr.excess -= 16 + (2 * int64(cr.n))
+	cr.excess = max(cr.excess, 0)
+	if cr.excess > 16*1024 {
+		cr.err = errors.New("chunked encoding contains too much non-data")
+	}
 	if cr.n == 0 {
 		cr.err = io.EOF
 	}
@@ -140,11 +167,6 @@
 	if len(p) >= maxLineLength {
 		return nil, ErrLineTooLong
 	}
-	p = trimTrailingWhitespace(p)
-	p, err = removeChunkExtension(p)
-	if err != nil {
-		return nil, err
-	}
 	return p, nil
 }
 
@@ -199,7 +221,7 @@
 
 // Write the contents of data as one chunk to Wire.
 // NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
-// a bug since it does not check for success of io.WriteString
+// a bug since it does not check for success of [io.WriteString]
 func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
 
 	// Don't send 0-length data. It looks like EOF for chunked encoding.
@@ -231,9 +253,9 @@
 	return err
 }
 
-// FlushAfterChunkWriter signals from the caller of NewChunkedWriter
+// FlushAfterChunkWriter signals from the caller of [NewChunkedWriter]
 // that each chunk should be followed by a flush. It is used by the
-// http.Transport code to keep the buffering behavior for headers and
+// [net/http.Transport] code to keep the buffering behavior for headers and
 // trailers, but flush out chunks aggressively in the middle for
 // request bodies which may be generated slowly. See Issue 6574.
 type FlushAfterChunkWriter struct {
@@ -241,6 +263,9 @@
 }
 
 func parseHexUint(v []byte) (n uint64, err error) {
+	if len(v) == 0 {
+		return 0, errors.New("empty hex number for chunk length")
+	}
 	for i, b := range v {
 		switch {
 		case '0' <= b && b <= '9':
diff --git a/src/net/http/internal/chunked_test.go b/src/net/http/internal/chunked_test.go
index 5e29a78..af79711 100644
--- a/src/net/http/internal/chunked_test.go
+++ b/src/net/http/internal/chunked_test.go
@@ -153,6 +153,7 @@
 		{"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted
 		{"10000000000000000", 0, "http chunk length too large"},
 		{"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted
+		{"", 0, "empty hex number for chunk length"},
 	}
 	for i := uint64(0); i <= 1234; i++ {
 		tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i})
@@ -239,3 +240,62 @@
 		t.Errorf("expected %v, got %v", readErr, err)
 	}
 }
+
+func TestChunkReaderTooMuchOverhead(t *testing.T) {
+	// If the sender is sending 100x as many chunk header bytes as chunk data,
+	// we should reject the stream at some point.
+	chunk := []byte("1;")
+	for i := 0; i < 100; i++ {
+		chunk = append(chunk, 'a') // chunk extension
+	}
+	chunk = append(chunk, "\r\nX\r\n"...)
+	const bodylen = 1 << 20
+	r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
+		if i < bodylen {
+			return chunk, nil
+		}
+		return []byte("0\r\n"), nil
+	}})
+	_, err := io.ReadAll(r)
+	if err == nil {
+		t.Fatalf("successfully read body with excessive overhead; want error")
+	}
+}
+
+func TestChunkReaderByteAtATime(t *testing.T) {
+	// Sending one byte per chunk should not trip the excess-overhead detection.
+	const bodylen = 1 << 20
+	r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
+		if i < bodylen {
+			return []byte("1\r\nX\r\n"), nil
+		}
+		return []byte("0\r\n"), nil
+	}})
+	got, err := io.ReadAll(r)
+	if err != nil {
+		t.Errorf("unexpected error: %v", err)
+	}
+	if len(got) != bodylen {
+		t.Errorf("read %v bytes, want %v", len(got), bodylen)
+	}
+}
+
+type funcReader struct {
+	f   func(iteration int) ([]byte, error)
+	i   int
+	b   []byte
+	err error
+}
+
+func (r *funcReader) Read(p []byte) (n int, err error) {
+	if len(r.b) == 0 && r.err == nil {
+		r.b, r.err = r.f(r.i)
+		r.i++
+	}
+	n = copy(p, r.b)
+	r.b = r.b[n:]
+	if len(r.b) > 0 {
+		return n, nil
+	}
+	return n, r.err
+}
diff --git a/src/net/http/mapping.go b/src/net/http/mapping.go
new file mode 100644
index 0000000..87e6d5a
--- /dev/null
+++ b/src/net/http/mapping.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// A mapping is a collection of key-value pairs where the keys are unique.
+// A zero mapping is empty and ready to use.
+// A mapping tries to pick a representation that makes [mapping.find] most efficient.
+type mapping[K comparable, V any] struct {
+	s []entry[K, V] // for few pairs
+	m map[K]V       // for many pairs
+}
+
+type entry[K comparable, V any] struct {
+	key   K
+	value V
+}
+
+// maxSlice is the maximum number of pairs for which a slice is used.
+// It is a variable for benchmarking.
+var maxSlice int = 8
+
+// add adds a key-value pair to the mapping.
+func (h *mapping[K, V]) add(k K, v V) {
+	if h.m == nil && len(h.s) < maxSlice {
+		h.s = append(h.s, entry[K, V]{k, v})
+	} else {
+		if h.m == nil {
+			h.m = map[K]V{}
+			for _, e := range h.s {
+				h.m[e.key] = e.value
+			}
+			h.s = nil
+		}
+		h.m[k] = v
+	}
+}
+
+// find returns the value corresponding to the given key.
+// The second return value is false if there is no value
+// with that key.
+func (h *mapping[K, V]) find(k K) (v V, found bool) {
+	if h == nil {
+		return v, false
+	}
+	if h.m != nil {
+		v, found = h.m[k]
+		return v, found
+	}
+	for _, e := range h.s {
+		if e.key == k {
+			return e.value, true
+		}
+	}
+	return v, false
+}
+
+// eachPair calls f for each pair in the mapping.
+// If f returns false, pairs returns immediately.
+func (h *mapping[K, V]) eachPair(f func(k K, v V) bool) {
+	if h == nil {
+		return
+	}
+	if h.m != nil {
+		for k, v := range h.m {
+			if !f(k, v) {
+				return
+			}
+		}
+	} else {
+		for _, e := range h.s {
+			if !f(e.key, e.value) {
+				return
+			}
+		}
+	}
+}
diff --git a/src/net/http/mapping_test.go b/src/net/http/mapping_test.go
new file mode 100644
index 0000000..0aed9d9
--- /dev/null
+++ b/src/net/http/mapping_test.go
@@ -0,0 +1,154 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+	"cmp"
+	"fmt"
+	"slices"
+	"strconv"
+	"testing"
+)
+
+func TestMapping(t *testing.T) {
+	var m mapping[int, string]
+	for i := 0; i < maxSlice; i++ {
+		m.add(i, strconv.Itoa(i))
+	}
+	if m.m != nil {
+		t.Fatal("m.m != nil")
+	}
+	for i := 0; i < maxSlice; i++ {
+		g, _ := m.find(i)
+		w := strconv.Itoa(i)
+		if g != w {
+			t.Fatalf("%d: got %s, want %s", i, g, w)
+		}
+	}
+	m.add(4, "4")
+	if m.s != nil {
+		t.Fatal("m.s != nil")
+	}
+	if m.m == nil {
+		t.Fatal("m.m == nil")
+	}
+	g, _ := m.find(4)
+	if w := "4"; g != w {
+		t.Fatalf("got %s, want %s", g, w)
+	}
+}
+
+func TestMappingEachPair(t *testing.T) {
+	var m mapping[int, string]
+	var want []entry[int, string]
+	for i := 0; i < maxSlice*2; i++ {
+		v := strconv.Itoa(i)
+		m.add(i, v)
+		want = append(want, entry[int, string]{i, v})
+
+	}
+
+	var got []entry[int, string]
+	m.eachPair(func(k int, v string) bool {
+		got = append(got, entry[int, string]{k, v})
+		return true
+	})
+	slices.SortFunc(got, func(e1, e2 entry[int, string]) int {
+		return cmp.Compare(e1.key, e2.key)
+	})
+	if !slices.Equal(got, want) {
+		t.Errorf("got %v, want %v", got, want)
+	}
+}
+
+func BenchmarkFindChild(b *testing.B) {
+	key := "articles"
+	children := []string{
+		"*",
+		"cmd.html",
+		"code.html",
+		"contrib.html",
+		"contribute.html",
+		"debugging_with_gdb.html",
+		"docs.html",
+		"effective_go.html",
+		"files.log",
+		"gccgo_contribute.html",
+		"gccgo_install.html",
+		"go-logo-black.png",
+		"go-logo-blue.png",
+		"go-logo-white.png",
+		"go1.1.html",
+		"go1.2.html",
+		"go1.html",
+		"go1compat.html",
+		"go_faq.html",
+		"go_mem.html",
+		"go_spec.html",
+		"help.html",
+		"ie.css",
+		"install-source.html",
+		"install.html",
+		"logo-153x55.png",
+		"Makefile",
+		"root.html",
+		"share.png",
+		"sieve.gif",
+		"tos.html",
+		"articles",
+	}
+	if len(children) != 32 {
+		panic("bad len")
+	}
+	for _, n := range []int{2, 4, 8, 16, 32} {
+		list := children[:n]
+		b.Run(fmt.Sprintf("n=%d", n), func(b *testing.B) {
+
+			b.Run("rep=linear", func(b *testing.B) {
+				var entries []entry[string, any]
+				for _, c := range list {
+					entries = append(entries, entry[string, any]{c, nil})
+				}
+				b.ResetTimer()
+				for i := 0; i < b.N; i++ {
+					findChildLinear(key, entries)
+				}
+			})
+			b.Run("rep=map", func(b *testing.B) {
+				m := map[string]any{}
+				for _, c := range list {
+					m[c] = nil
+				}
+				var x any
+				b.ResetTimer()
+				for i := 0; i < b.N; i++ {
+					x = m[key]
+				}
+				_ = x
+			})
+			b.Run(fmt.Sprintf("rep=hybrid%d", maxSlice), func(b *testing.B) {
+				var h mapping[string, any]
+				for _, c := range list {
+					h.add(c, nil)
+				}
+				var x any
+				b.ResetTimer()
+				for i := 0; i < b.N; i++ {
+					x, _ = h.find(key)
+				}
+				_ = x
+			})
+		})
+	}
+}
+
+func findChildLinear(key string, entries []entry[string, any]) any {
+	for _, e := range entries {
+		if key == e.key {
+			return e.value
+		}
+	}
+	return nil
+}
diff --git a/src/net/http/pattern.go b/src/net/http/pattern.go
new file mode 100644
index 0000000..f6af19b
--- /dev/null
+++ b/src/net/http/pattern.go
@@ -0,0 +1,529 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Patterns for ServeMux routing.
+
+package http
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"strings"
+	"unicode"
+)
+
+// A pattern is something that can be matched against an HTTP request.
+// It has an optional method, an optional host, and a path.
+type pattern struct {
+	str    string // original string
+	method string
+	host   string
+	// The representation of a path differs from the surface syntax, which
+	// simplifies most algorithms.
+	//
+	// Paths ending in '/' are represented with an anonymous "..." wildcard.
+	// For example, the path "a/" is represented as a literal segment "a" followed
+	// by a segment with multi==true.
+	//
+	// Paths ending in "{$}" are represented with the literal segment "/".
+	// For example, the path "a/{$}" is represented as a literal segment "a" followed
+	// by a literal segment "/".
+	segments []segment
+	loc      string // source location of registering call, for helpful messages
+}
+
+func (p *pattern) String() string { return p.str }
+
+func (p *pattern) lastSegment() segment {
+	return p.segments[len(p.segments)-1]
+}
+
+// A segment is a pattern piece that matches one or more path segments, or
+// a trailing slash.
+//
+// If wild is false, it matches a literal segment, or, if s == "/", a trailing slash.
+// Examples:
+//
+//	"a" => segment{s: "a"}
+//	"/{$}" => segment{s: "/"}
+//
+// If wild is true and multi is false, it matches a single path segment.
+// Example:
+//
+//	"{x}" => segment{s: "x", wild: true}
+//
+// If both wild and multi are true, it matches all remaining path segments.
+// Example:
+//
+//	"{rest...}" => segment{s: "rest", wild: true, multi: true}
+type segment struct {
+	s     string // literal or wildcard name or "/" for "/{$}".
+	wild  bool
+	multi bool // "..." wildcard
+}
+
+// parsePattern parses a string into a Pattern.
+// The string's syntax is
+//
+//	[METHOD] [HOST]/[PATH]
+//
+// where:
+//   - METHOD is an HTTP method
+//   - HOST is a hostname
+//   - PATH consists of slash-separated segments, where each segment is either
+//     a literal or a wildcard of the form "{name}", "{name...}", or "{$}".
+//
+// METHOD, HOST and PATH are all optional; that is, the string can be "/".
+// If METHOD is present, it must be followed by a single space.
+// Wildcard names must be valid Go identifiers.
+// The "{$}" and "{name...}" wildcard must occur at the end of PATH.
+// PATH may end with a '/'.
+// Wildcard names in a path must be distinct.
+func parsePattern(s string) (_ *pattern, err error) {
+	if len(s) == 0 {
+		return nil, errors.New("empty pattern")
+	}
+	off := 0 // offset into string
+	defer func() {
+		if err != nil {
+			err = fmt.Errorf("at offset %d: %w", off, err)
+		}
+	}()
+
+	method, rest, found := strings.Cut(s, " ")
+	if !found {
+		rest = method
+		method = ""
+	}
+	if method != "" && !validMethod(method) {
+		return nil, fmt.Errorf("invalid method %q", method)
+	}
+	p := &pattern{str: s, method: method}
+
+	if found {
+		off = len(method) + 1
+	}
+	i := strings.IndexByte(rest, '/')
+	if i < 0 {
+		return nil, errors.New("host/path missing /")
+	}
+	p.host = rest[:i]
+	rest = rest[i:]
+	if j := strings.IndexByte(p.host, '{'); j >= 0 {
+		off += j
+		return nil, errors.New("host contains '{' (missing initial '/'?)")
+	}
+	// At this point, rest is the path.
+	off += i
+
+	// An unclean path with a method that is not CONNECT can never match,
+	// because paths are cleaned before matching.
+	if method != "" && method != "CONNECT" && rest != cleanPath(rest) {
+		return nil, errors.New("non-CONNECT pattern with unclean path can never match")
+	}
+
+	seenNames := map[string]bool{} // remember wildcard names to catch dups
+	for len(rest) > 0 {
+		// Invariant: rest[0] == '/'.
+		rest = rest[1:]
+		off = len(s) - len(rest)
+		if len(rest) == 0 {
+			// Trailing slash.
+			p.segments = append(p.segments, segment{wild: true, multi: true})
+			break
+		}
+		i := strings.IndexByte(rest, '/')
+		if i < 0 {
+			i = len(rest)
+		}
+		var seg string
+		seg, rest = rest[:i], rest[i:]
+		if i := strings.IndexByte(seg, '{'); i < 0 {
+			// Literal.
+			seg = pathUnescape(seg)
+			p.segments = append(p.segments, segment{s: seg})
+		} else {
+			// Wildcard.
+			if i != 0 {
+				return nil, errors.New("bad wildcard segment (must start with '{')")
+			}
+			if seg[len(seg)-1] != '}' {
+				return nil, errors.New("bad wildcard segment (must end with '}')")
+			}
+			name := seg[1 : len(seg)-1]
+			if name == "$" {
+				if len(rest) != 0 {
+					return nil, errors.New("{$} not at end")
+				}
+				p.segments = append(p.segments, segment{s: "/"})
+				break
+			}
+			name, multi := strings.CutSuffix(name, "...")
+			if multi && len(rest) != 0 {
+				return nil, errors.New("{...} wildcard not at end")
+			}
+			if name == "" {
+				return nil, errors.New("empty wildcard")
+			}
+			if !isValidWildcardName(name) {
+				return nil, fmt.Errorf("bad wildcard name %q", name)
+			}
+			if seenNames[name] {
+				return nil, fmt.Errorf("duplicate wildcard name %q", name)
+			}
+			seenNames[name] = true
+			p.segments = append(p.segments, segment{s: name, wild: true, multi: multi})
+		}
+	}
+	return p, nil
+}
+
+func isValidWildcardName(s string) bool {
+	if s == "" {
+		return false
+	}
+	// Valid Go identifier.
+	for i, c := range s {
+		if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) {
+			return false
+		}
+	}
+	return true
+}
+
+func pathUnescape(path string) string {
+	u, err := url.PathUnescape(path)
+	if err != nil {
+		// Invalidly escaped path; use the original
+		return path
+	}
+	return u
+}
+
+// relationship is a relationship between two patterns, p1 and p2.
+type relationship string
+
+const (
+	equivalent   relationship = "equivalent"   // both match the same requests
+	moreGeneral  relationship = "moreGeneral"  // p1 matches everything p2 does & more
+	moreSpecific relationship = "moreSpecific" // p2 matches everything p1 does & more
+	disjoint     relationship = "disjoint"     // there is no request that both match
+	overlaps     relationship = "overlaps"     // there is a request that both match, but neither is more specific
+)
+
+// conflictsWith reports whether p1 conflicts with p2, that is, whether
+// there is a request that both match but where neither is higher precedence
+// than the other.
+//
+//	Precedence is defined by two rules:
+//	1. Patterns with a host win over patterns without a host.
+//	2. Patterns whose method and path is more specific win. One pattern is more
+//	   specific than another if the second matches all the (method, path) pairs
+//	   of the first and more.
+//
+// If rule 1 doesn't apply, then two patterns conflict if their relationship
+// is either equivalence (they match the same set of requests) or overlap
+// (they both match some requests, but neither is more specific than the other).
+func (p1 *pattern) conflictsWith(p2 *pattern) bool {
+	if p1.host != p2.host {
+		// Either one host is empty and the other isn't, in which case the
+		// one with the host wins by rule 1, or neither host is empty
+		// and they differ, so they won't match the same paths.
+		return false
+	}
+	rel := p1.comparePathsAndMethods(p2)
+	return rel == equivalent || rel == overlaps
+}
+
+func (p1 *pattern) comparePathsAndMethods(p2 *pattern) relationship {
+	mrel := p1.compareMethods(p2)
+	// Optimization: avoid a call to comparePaths.
+	if mrel == disjoint {
+		return disjoint
+	}
+	prel := p1.comparePaths(p2)
+	return combineRelationships(mrel, prel)
+}
+
+// compareMethods determines the relationship between the method
+// part of patterns p1 and p2.
+//
+// A method can either be empty, "GET", or something else.
+// The empty string matches any method, so it is the most general.
+// "GET" matches both GET and HEAD.
+// Anything else matches only itself.
+func (p1 *pattern) compareMethods(p2 *pattern) relationship {
+	if p1.method == p2.method {
+		return equivalent
+	}
+	if p1.method == "" {
+		// p1 matches any method, but p2 does not, so p1 is more general.
+		return moreGeneral
+	}
+	if p2.method == "" {
+		return moreSpecific
+	}
+	if p1.method == "GET" && p2.method == "HEAD" {
+		// p1 matches GET and HEAD; p2 matches only HEAD.
+		return moreGeneral
+	}
+	if p2.method == "GET" && p1.method == "HEAD" {
+		return moreSpecific
+	}
+	return disjoint
+}
+
+// comparePaths determines the relationship between the path
+// part of two patterns.
+func (p1 *pattern) comparePaths(p2 *pattern) relationship {
+	// Optimization: if a path pattern doesn't end in a multi ("...") wildcard, then it
+	// can only match paths with the same number of segments.
+	if len(p1.segments) != len(p2.segments) && !p1.lastSegment().multi && !p2.lastSegment().multi {
+		return disjoint
+	}
+
+	// Consider corresponding segments in the two path patterns.
+	var segs1, segs2 []segment
+	rel := equivalent
+	for segs1, segs2 = p1.segments, p2.segments; len(segs1) > 0 && len(segs2) > 0; segs1, segs2 = segs1[1:], segs2[1:] {
+		rel = combineRelationships(rel, compareSegments(segs1[0], segs2[0]))
+		if rel == disjoint {
+			return rel
+		}
+	}
+	// We've reached the end of the corresponding segments of the patterns.
+	// If they have the same number of segments, then we've already determined
+	// their relationship.
+	if len(segs1) == 0 && len(segs2) == 0 {
+		return rel
+	}
+	// Otherwise, the only way they could fail to be disjoint is if the shorter
+	// pattern ends in a multi. In that case, that multi is more general
+	// than the remainder of the longer pattern, so combine those two relationships.
+	if len(segs1) < len(segs2) && p1.lastSegment().multi {
+		return combineRelationships(rel, moreGeneral)
+	}
+	if len(segs2) < len(segs1) && p2.lastSegment().multi {
+		return combineRelationships(rel, moreSpecific)
+	}
+	return disjoint
+}
+
+// compareSegments determines the relationship between two segments.
+func compareSegments(s1, s2 segment) relationship {
+	if s1.multi && s2.multi {
+		return equivalent
+	}
+	if s1.multi {
+		return moreGeneral
+	}
+	if s2.multi {
+		return moreSpecific
+	}
+	if s1.wild && s2.wild {
+		return equivalent
+	}
+	if s1.wild {
+		if s2.s == "/" {
+			// A single wildcard doesn't match a trailing slash.
+			return disjoint
+		}
+		return moreGeneral
+	}
+	if s2.wild {
+		if s1.s == "/" {
+			return disjoint
+		}
+		return moreSpecific
+	}
+	// Both literals.
+	if s1.s == s2.s {
+		return equivalent
+	}
+	return disjoint
+}
+
+// combineRelationships determines the overall relationship of two patterns
+// given the relationships of a partition of the patterns into two parts.
+//
+// For example, if p1 is more general than p2 in one way but equivalent
+// in the other, then it is more general overall.
+//
+// Or if p1 is more general in one way and more specific in the other, then
+// they overlap.
+func combineRelationships(r1, r2 relationship) relationship {
+	switch r1 {
+	case equivalent:
+		return r2
+	case disjoint:
+		return disjoint
+	case overlaps:
+		if r2 == disjoint {
+			return disjoint
+		}
+		return overlaps
+	case moreGeneral, moreSpecific:
+		switch r2 {
+		case equivalent:
+			return r1
+		case inverseRelationship(r1):
+			return overlaps
+		default:
+			return r2
+		}
+	default:
+		panic(fmt.Sprintf("unknown relationship %q", r1))
+	}
+}
+
+// If p1 has relationship `r` to p2, then
+// p2 has inverseRelationship(r) to p1.
+func inverseRelationship(r relationship) relationship {
+	switch r {
+	case moreSpecific:
+		return moreGeneral
+	case moreGeneral:
+		return moreSpecific
+	default:
+		return r
+	}
+}
+
+// isLitOrSingle reports whether the segment is a non-dollar literal or a single wildcard.
+func isLitOrSingle(seg segment) bool {
+	if seg.wild {
+		return !seg.multi
+	}
+	return seg.s != "/"
+}
+
+// describeConflict returns an explanation of why two patterns conflict.
+func describeConflict(p1, p2 *pattern) string {
+	mrel := p1.compareMethods(p2)
+	prel := p1.comparePaths(p2)
+	rel := combineRelationships(mrel, prel)
+	if rel == equivalent {
+		return fmt.Sprintf("%s matches the same requests as %s", p1, p2)
+	}
+	if rel != overlaps {
+		panic("describeConflict called with non-conflicting patterns")
+	}
+	if prel == overlaps {
+		return fmt.Sprintf(`%[1]s and %[2]s both match some paths, like %[3]q.
+But neither is more specific than the other.
+%[1]s matches %[4]q, but %[2]s doesn't.
+%[2]s matches %[5]q, but %[1]s doesn't.`,
+			p1, p2, commonPath(p1, p2), differencePath(p1, p2), differencePath(p2, p1))
+	}
+	if mrel == moreGeneral && prel == moreSpecific {
+		return fmt.Sprintf("%s matches more methods than %s, but has a more specific path pattern", p1, p2)
+	}
+	if mrel == moreSpecific && prel == moreGeneral {
+		return fmt.Sprintf("%s matches fewer methods than %s, but has a more general path pattern", p1, p2)
+	}
+	return fmt.Sprintf("bug: unexpected way for two patterns %s and %s to conflict: methods %s, paths %s", p1, p2, mrel, prel)
+}
+
+// writeMatchingPath writes to b a path that matches the segments.
+func writeMatchingPath(b *strings.Builder, segs []segment) {
+	for _, s := range segs {
+		writeSegment(b, s)
+	}
+}
+
+func writeSegment(b *strings.Builder, s segment) {
+	b.WriteByte('/')
+	if !s.multi && s.s != "/" {
+		b.WriteString(s.s)
+	}
+}
+
+// commonPath returns a path that both p1 and p2 match.
+// It assumes there is such a path.
+func commonPath(p1, p2 *pattern) string {
+	var b strings.Builder
+	var segs1, segs2 []segment
+	for segs1, segs2 = p1.segments, p2.segments; len(segs1) > 0 && len(segs2) > 0; segs1, segs2 = segs1[1:], segs2[1:] {
+		if s1 := segs1[0]; s1.wild {
+			writeSegment(&b, segs2[0])
+		} else {
+			writeSegment(&b, s1)
+		}
+	}
+	if len(segs1) > 0 {
+		writeMatchingPath(&b, segs1)
+	} else if len(segs2) > 0 {
+		writeMatchingPath(&b, segs2)
+	}
+	return b.String()
+}
+
+// differencePath returns a path that p1 matches and p2 doesn't.
+// It assumes there is such a path.
+func differencePath(p1, p2 *pattern) string {
+	var b strings.Builder
+
+	var segs1, segs2 []segment
+	for segs1, segs2 = p1.segments, p2.segments; len(segs1) > 0 && len(segs2) > 0; segs1, segs2 = segs1[1:], segs2[1:] {
+		s1 := segs1[0]
+		s2 := segs2[0]
+		if s1.multi && s2.multi {
+			// From here the patterns match the same paths, so we must have found a difference earlier.
+			b.WriteByte('/')
+			return b.String()
+
+		}
+		if s1.multi && !s2.multi {
+			// s1 ends in a "..." wildcard but s2 does not.
+			// A trailing slash will distinguish them, unless s2 ends in "{$}",
+			// in which case any segment will do; prefer the wildcard name if
+			// it has one.
+			b.WriteByte('/')
+			if s2.s == "/" {
+				if s1.s != "" {
+					b.WriteString(s1.s)
+				} else {
+					b.WriteString("x")
+				}
+			}
+			return b.String()
+		}
+		if !s1.multi && s2.multi {
+			writeSegment(&b, s1)
+		} else if s1.wild && s2.wild {
+			// Both patterns will match whatever we put here; use
+			// the first wildcard name.
+			writeSegment(&b, s1)
+		} else if s1.wild && !s2.wild {
+			// s1 is a wildcard, s2 is a literal.
+			// Any segment other than s2.s will work.
+			// Prefer the wildcard name, but if it's the same as the literal,
+			// tweak the literal.
+			if s1.s != s2.s {
+				writeSegment(&b, s1)
+			} else {
+				b.WriteByte('/')
+				b.WriteString(s2.s + "x")
+			}
+		} else if !s1.wild && s2.wild {
+			writeSegment(&b, s1)
+		} else {
+			// Both are literals. A precondition of this function is that the
+			// patterns overlap, so they must be the same literal. Use it.
+			if s1.s != s2.s {
+				panic(fmt.Sprintf("literals differ: %q and %q", s1.s, s2.s))
+			}
+			writeSegment(&b, s1)
+		}
+	}
+	if len(segs1) > 0 {
+		// p1 is longer than p2, and p2 does not end in a multi.
+		// Anything that matches the rest of p1 will do.
+		writeMatchingPath(&b, segs1)
+	} else if len(segs2) > 0 {
+		writeMatchingPath(&b, segs2)
+	}
+	return b.String()
+}
diff --git a/src/net/http/pattern_test.go b/src/net/http/pattern_test.go
new file mode 100644
index 0000000..f0c84d2
--- /dev/null
+++ b/src/net/http/pattern_test.go
@@ -0,0 +1,494 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+	"slices"
+	"strings"
+	"testing"
+)
+
+func TestParsePattern(t *testing.T) {
+	lit := func(name string) segment {
+		return segment{s: name}
+	}
+
+	wild := func(name string) segment {
+		return segment{s: name, wild: true}
+	}
+
+	multi := func(name string) segment {
+		s := wild(name)
+		s.multi = true
+		return s
+	}
+
+	for _, test := range []struct {
+		in   string
+		want pattern
+	}{
+		{"/", pattern{segments: []segment{multi("")}}},
+		{"/a", pattern{segments: []segment{lit("a")}}},
+		{
+			"/a/",
+			pattern{segments: []segment{lit("a"), multi("")}},
+		},
+		{"/path/to/something", pattern{segments: []segment{
+			lit("path"), lit("to"), lit("something"),
+		}}},
+		{
+			"/{w1}/lit/{w2}",
+			pattern{
+				segments: []segment{wild("w1"), lit("lit"), wild("w2")},
+			},
+		},
+		{
+			"/{w1}/lit/{w2}/",
+			pattern{
+				segments: []segment{wild("w1"), lit("lit"), wild("w2"), multi("")},
+			},
+		},
+		{
+			"example.com/",
+			pattern{host: "example.com", segments: []segment{multi("")}},
+		},
+		{
+			"GET /",
+			pattern{method: "GET", segments: []segment{multi("")}},
+		},
+		{
+			"POST example.com/foo/{w}",
+			pattern{
+				method:   "POST",
+				host:     "example.com",
+				segments: []segment{lit("foo"), wild("w")},
+			},
+		},
+		{
+			"/{$}",
+			pattern{segments: []segment{lit("/")}},
+		},
+		{
+			"DELETE example.com/a/{foo12}/{$}",
+			pattern{method: "DELETE", host: "example.com", segments: []segment{lit("a"), wild("foo12"), lit("/")}},
+		},
+		{
+			"/foo/{$}",
+			pattern{segments: []segment{lit("foo"), lit("/")}},
+		},
+		{
+			"/{a}/foo/{rest...}",
+			pattern{segments: []segment{wild("a"), lit("foo"), multi("rest")}},
+		},
+		{
+			"//",
+			pattern{segments: []segment{lit(""), multi("")}},
+		},
+		{
+			"/foo///./../bar",
+			pattern{segments: []segment{lit("foo"), lit(""), lit(""), lit("."), lit(".."), lit("bar")}},
+		},
+		{
+			"a.com/foo//",
+			pattern{host: "a.com", segments: []segment{lit("foo"), lit(""), multi("")}},
+		},
+		{
+			"/%61%62/%7b/%",
+			pattern{segments: []segment{lit("ab"), lit("{"), lit("%")}},
+		},
+	} {
+		got := mustParsePattern(t, test.in)
+		if !got.equal(&test.want) {
+			t.Errorf("%q:\ngot  %#v\nwant %#v", test.in, got, &test.want)
+		}
+	}
+}
+
+func TestParsePatternError(t *testing.T) {
+	for _, test := range []struct {
+		in       string
+		contains string
+	}{
+		{"", "empty pattern"},
+		{"A=B /", "at offset 0: invalid method"},
+		{" ", "at offset 1: host/path missing /"},
+		{"/{w}x", "at offset 1: bad wildcard segment"},
+		{"/x{w}", "at offset 1: bad wildcard segment"},
+		{"/{wx", "at offset 1: bad wildcard segment"},
+		{"/a/{/}/c", "at offset 3: bad wildcard segment"},
+		{"/a/{%61}/c", "at offset 3: bad wildcard name"}, // wildcard names aren't unescaped
+		{"/{a$}", "at offset 1: bad wildcard name"},
+		{"/{}", "at offset 1: empty wildcard"},
+		{"POST a.com/x/{}/y", "at offset 13: empty wildcard"},
+		{"/{...}", "at offset 1: empty wildcard"},
+		{"/{$...}", "at offset 1: bad wildcard"},
+		{"/{$}/", "at offset 1: {$} not at end"},
+		{"/{$}/x", "at offset 1: {$} not at end"},
+		{"/abc/{$}/x", "at offset 5: {$} not at end"},
+		{"/{a...}/", "at offset 1: {...} wildcard not at end"},
+		{"/{a...}/x", "at offset 1: {...} wildcard not at end"},
+		{"{a}/b", "at offset 0: host contains '{' (missing initial '/'?)"},
+		{"/a/{x}/b/{x...}", "at offset 9: duplicate wildcard name"},
+		{"GET //", "at offset 4: non-CONNECT pattern with unclean path"},
+	} {
+		_, err := parsePattern(test.in)
+		if err == nil || !strings.Contains(err.Error(), test.contains) {
+			t.Errorf("%q:\ngot %v, want error containing %q", test.in, err, test.contains)
+		}
+	}
+}
+
+func (p1 *pattern) equal(p2 *pattern) bool {
+	return p1.method == p2.method && p1.host == p2.host &&
+		slices.Equal(p1.segments, p2.segments)
+}
+
+func mustParsePattern(tb testing.TB, s string) *pattern {
+	tb.Helper()
+	p, err := parsePattern(s)
+	if err != nil {
+		tb.Fatal(err)
+	}
+	return p
+}
+
+func TestCompareMethods(t *testing.T) {
+	for _, test := range []struct {
+		p1, p2 string
+		want   relationship
+	}{
+		{"/", "/", equivalent},
+		{"GET /", "GET /", equivalent},
+		{"HEAD /", "HEAD /", equivalent},
+		{"POST /", "POST /", equivalent},
+		{"GET /", "POST /", disjoint},
+		{"GET /", "/", moreSpecific},
+		{"HEAD /", "/", moreSpecific},
+		{"GET /", "HEAD /", moreGeneral},
+	} {
+		pat1 := mustParsePattern(t, test.p1)
+		pat2 := mustParsePattern(t, test.p2)
+		got := pat1.compareMethods(pat2)
+		if got != test.want {
+			t.Errorf("%s vs %s: got %s, want %s", test.p1, test.p2, got, test.want)
+		}
+		got2 := pat2.compareMethods(pat1)
+		want2 := inverseRelationship(test.want)
+		if got2 != want2 {
+			t.Errorf("%s vs %s: got %s, want %s", test.p2, test.p1, got2, want2)
+		}
+	}
+}
+
+func TestComparePaths(t *testing.T) {
+	for _, test := range []struct {
+		p1, p2 string
+		want   relationship
+	}{
+		// A non-final pattern segment can have one of two values: literal or
+		// single wildcard. A final pattern segment can have one of 5: empty
+		// (trailing slash), literal, dollar, single wildcard, or multi
+		// wildcard. Trailing slash and multi wildcard are the same.
+
+		// A literal should be more specific than anything it overlaps, except itself.
+		{"/a", "/a", equivalent},
+		{"/a", "/b", disjoint},
+		{"/a", "/", moreSpecific},
+		{"/a", "/{$}", disjoint},
+		{"/a", "/{x}", moreSpecific},
+		{"/a", "/{x...}", moreSpecific},
+
+		// Adding a segment doesn't change that.
+		{"/b/a", "/b/a", equivalent},
+		{"/b/a", "/b/b", disjoint},
+		{"/b/a", "/b/", moreSpecific},
+		{"/b/a", "/b/{$}", disjoint},
+		{"/b/a", "/b/{x}", moreSpecific},
+		{"/b/a", "/b/{x...}", moreSpecific},
+		{"/{z}/a", "/{z}/a", equivalent},
+		{"/{z}/a", "/{z}/b", disjoint},
+		{"/{z}/a", "/{z}/", moreSpecific},
+		{"/{z}/a", "/{z}/{$}", disjoint},
+		{"/{z}/a", "/{z}/{x}", moreSpecific},
+		{"/{z}/a", "/{z}/{x...}", moreSpecific},
+
+		// Single wildcard on left.
+		{"/{z}", "/a", moreGeneral},
+		{"/{z}", "/a/b", disjoint},
+		{"/{z}", "/{$}", disjoint},
+		{"/{z}", "/{x}", equivalent},
+		{"/{z}", "/", moreSpecific},
+		{"/{z}", "/{x...}", moreSpecific},
+		{"/b/{z}", "/b/a", moreGeneral},
+		{"/b/{z}", "/b/a/b", disjoint},
+		{"/b/{z}", "/b/{$}", disjoint},
+		{"/b/{z}", "/b/{x}", equivalent},
+		{"/b/{z}", "/b/", moreSpecific},
+		{"/b/{z}", "/b/{x...}", moreSpecific},
+
+		// Trailing slash on left.
+		{"/", "/a", moreGeneral},
+		{"/", "/a/b", moreGeneral},
+		{"/", "/{$}", moreGeneral},
+		{"/", "/{x}", moreGeneral},
+		{"/", "/", equivalent},
+		{"/", "/{x...}", equivalent},
+
+		{"/b/", "/b/a", moreGeneral},
+		{"/b/", "/b/a/b", moreGeneral},
+		{"/b/", "/b/{$}", moreGeneral},
+		{"/b/", "/b/{x}", moreGeneral},
+		{"/b/", "/b/", equivalent},
+		{"/b/", "/b/{x...}", equivalent},
+
+		{"/{z}/", "/{z}/a", moreGeneral},
+		{"/{z}/", "/{z}/a/b", moreGeneral},
+		{"/{z}/", "/{z}/{$}", moreGeneral},
+		{"/{z}/", "/{z}/{x}", moreGeneral},
+		{"/{z}/", "/{z}/", equivalent},
+		{"/{z}/", "/a/", moreGeneral},
+		{"/{z}/", "/{z}/{x...}", equivalent},
+		{"/{z}/", "/a/{x...}", moreGeneral},
+		{"/a/{z}/", "/{z}/a/", overlaps},
+		{"/a/{z}/b/", "/{x}/c/{y...}", overlaps},
+
+		// Multi wildcard on left.
+		{"/{m...}", "/a", moreGeneral},
+		{"/{m...}", "/a/b", moreGeneral},
+		{"/{m...}", "/{$}", moreGeneral},
+		{"/{m...}", "/{x}", moreGeneral},
+		{"/{m...}", "/", equivalent},
+		{"/{m...}", "/{x...}", equivalent},
+
+		{"/b/{m...}", "/b/a", moreGeneral},
+		{"/b/{m...}", "/b/a/b", moreGeneral},
+		{"/b/{m...}", "/b/{$}", moreGeneral},
+		{"/b/{m...}", "/b/{x}", moreGeneral},
+		{"/b/{m...}", "/b/", equivalent},
+		{"/b/{m...}", "/b/{x...}", equivalent},
+		{"/b/{m...}", "/a/{x...}", disjoint},
+
+		{"/{z}/{m...}", "/{z}/a", moreGeneral},
+		{"/{z}/{m...}", "/{z}/a/b", moreGeneral},
+		{"/{z}/{m...}", "/{z}/{$}", moreGeneral},
+		{"/{z}/{m...}", "/{z}/{x}", moreGeneral},
+		{"/{z}/{m...}", "/{w}/", equivalent},
+		{"/{z}/{m...}", "/a/", moreGeneral},
+		{"/{z}/{m...}", "/{z}/{x...}", equivalent},
+		{"/{z}/{m...}", "/a/{x...}", moreGeneral},
+		{"/a/{m...}", "/a/b/{y...}", moreGeneral},
+		{"/a/{m...}", "/a/{x}/{y...}", moreGeneral},
+		{"/a/{z}/{m...}", "/a/b/{y...}", moreGeneral},
+		{"/a/{z}/{m...}", "/{z}/a/", overlaps},
+		{"/a/{z}/{m...}", "/{z}/b/{y...}", overlaps},
+		{"/a/{z}/b/{m...}", "/{x}/c/{y...}", overlaps},
+		{"/a/{z}/a/{m...}", "/{x}/b", disjoint},
+
+		// Dollar on left.
+		{"/{$}", "/a", disjoint},
+		{"/{$}", "/a/b", disjoint},
+		{"/{$}", "/{$}", equivalent},
+		{"/{$}", "/{x}", disjoint},
+		{"/{$}", "/", moreSpecific},
+		{"/{$}", "/{x...}", moreSpecific},
+
+		{"/b/{$}", "/b", disjoint},
+		{"/b/{$}", "/b/a", disjoint},
+		{"/b/{$}", "/b/a/b", disjoint},
+		{"/b/{$}", "/b/{$}", equivalent},
+		{"/b/{$}", "/b/{x}", disjoint},
+		{"/b/{$}", "/b/", moreSpecific},
+		{"/b/{$}", "/b/{x...}", moreSpecific},
+		{"/b/{$}", "/b/c/{x...}", disjoint},
+		{"/b/{x}/a/{$}", "/{x}/c/{y...}", overlaps},
+		{"/{x}/b/{$}", "/a/{x}/{y}", disjoint},
+		{"/{x}/b/{$}", "/a/{x}/c", disjoint},
+
+		{"/{z}/{$}", "/{z}/a", disjoint},
+		{"/{z}/{$}", "/{z}/a/b", disjoint},
+		{"/{z}/{$}", "/{z}/{$}", equivalent},
+		{"/{z}/{$}", "/{z}/{x}", disjoint},
+		{"/{z}/{$}", "/{z}/", moreSpecific},
+		{"/{z}/{$}", "/a/", overlaps},
+		{"/{z}/{$}", "/a/{x...}", overlaps},
+		{"/{z}/{$}", "/{z}/{x...}", moreSpecific},
+		{"/a/{z}/{$}", "/{z}/a/", overlaps},
+	} {
+		pat1 := mustParsePattern(t, test.p1)
+		pat2 := mustParsePattern(t, test.p2)
+		if g := pat1.comparePaths(pat1); g != equivalent {
+			t.Errorf("%s does not match itself; got %s", pat1, g)
+		}
+		if g := pat2.comparePaths(pat2); g != equivalent {
+			t.Errorf("%s does not match itself; got %s", pat2, g)
+		}
+		got := pat1.comparePaths(pat2)
+		if got != test.want {
+			t.Errorf("%s vs %s: got %s, want %s", test.p1, test.p2, got, test.want)
+			t.Logf("pat1: %+v\n", pat1.segments)
+			t.Logf("pat2: %+v\n", pat2.segments)
+		}
+		want2 := inverseRelationship(test.want)
+		got2 := pat2.comparePaths(pat1)
+		if got2 != want2 {
+			t.Errorf("%s vs %s: got %s, want %s", test.p2, test.p1, got2, want2)
+		}
+	}
+}
+
+func TestConflictsWith(t *testing.T) {
+	for _, test := range []struct {
+		p1, p2 string
+		want   bool
+	}{
+		{"/a", "/a", true},
+		{"/a", "/ab", false},
+		{"/a/b/cd", "/a/b/cd", true},
+		{"/a/b/cd", "/a/b/c", false},
+		{"/a/b/c", "/a/c/c", false},
+		{"/{x}", "/{y}", true},
+		{"/{x}", "/a", false}, // more specific
+		{"/{x}/{y}", "/{x}/a", false},
+		{"/{x}/{y}", "/{x}/a/b", false},
+		{"/{x}", "/a/{y}", false},
+		{"/{x}/{y}", "/{x}/a/", false},
+		{"/{x}", "/a/{y...}", false},           // more specific
+		{"/{x}/a/{y}", "/{x}/a/{y...}", false}, // more specific
+		{"/{x}/{y}", "/{x}/a/{$}", false},      // more specific
+		{"/{x}/{y}/{$}", "/{x}/a/{$}", false},
+		{"/a/{x}", "/{x}/b", true},
+		{"/", "GET /", false},
+		{"/", "GET /foo", false},
+		{"GET /", "GET /foo", false},
+		{"GET /", "/foo", true},
+		{"GET /foo", "HEAD /", true},
+	} {
+		pat1 := mustParsePattern(t, test.p1)
+		pat2 := mustParsePattern(t, test.p2)
+		got := pat1.conflictsWith(pat2)
+		if got != test.want {
+			t.Errorf("%q.ConflictsWith(%q) = %t, want %t",
+				test.p1, test.p2, got, test.want)
+		}
+		// conflictsWith should be commutative.
+		got = pat2.conflictsWith(pat1)
+		if got != test.want {
+			t.Errorf("%q.ConflictsWith(%q) = %t, want %t",
+				test.p2, test.p1, got, test.want)
+		}
+	}
+}
+
+func TestRegisterConflict(t *testing.T) {
+	mux := NewServeMux()
+	pat1 := "/a/{x}/"
+	if err := mux.registerErr(pat1, NotFoundHandler()); err != nil {
+		t.Fatal(err)
+	}
+	pat2 := "/a/{y}/{z...}"
+	err := mux.registerErr(pat2, NotFoundHandler())
+	var got string
+	if err == nil {
+		got = "<nil>"
+	} else {
+		got = err.Error()
+	}
+	want := "matches the same requests as"
+	if !strings.Contains(got, want) {
+		t.Errorf("got\n%s\nwant\n%s", got, want)
+	}
+}
+
+func TestDescribeConflict(t *testing.T) {
+	for _, test := range []struct {
+		p1, p2 string
+		want   string
+	}{
+		{"/a/{x}", "/a/{y}", "the same requests"},
+		{"/", "/{m...}", "the same requests"},
+		{"/a/{x}", "/{y}/b", "both match some paths"},
+		{"/a", "GET /{x}", "matches more methods than GET /{x}, but has a more specific path pattern"},
+		{"GET /a", "HEAD /", "matches more methods than HEAD /, but has a more specific path pattern"},
+		{"POST /", "/a", "matches fewer methods than /a, but has a more general path pattern"},
+	} {
+		got := describeConflict(mustParsePattern(t, test.p1), mustParsePattern(t, test.p2))
+		if !strings.Contains(got, test.want) {
+			t.Errorf("%s vs. %s:\ngot:\n%s\nwhich does not contain %q",
+				test.p1, test.p2, got, test.want)
+		}
+	}
+}
+
+func TestCommonPath(t *testing.T) {
+	for _, test := range []struct {
+		p1, p2 string
+		want   string
+	}{
+		{"/a/{x}", "/{x}/a", "/a/a"},
+		{"/a/{z}/", "/{z}/a/", "/a/a/"},
+		{"/a/{z}/{m...}", "/{z}/a/", "/a/a/"},
+		{"/{z}/{$}", "/a/", "/a/"},
+		{"/{z}/{$}", "/a/{x...}", "/a/"},
+		{"/a/{z}/{$}", "/{z}/a/", "/a/a/"},
+		{"/a/{x}/b/{y...}", "/{x}/c/{y...}", "/a/c/b/"},
+		{"/a/{x}/b/", "/{x}/c/{y...}", "/a/c/b/"},
+		{"/a/{x}/b/{$}", "/{x}/c/{y...}", "/a/c/b/"},
+		{"/a/{z}/{x...}", "/{z}/b/{y...}", "/a/b/"},
+	} {
+		pat1 := mustParsePattern(t, test.p1)
+		pat2 := mustParsePattern(t, test.p2)
+		if pat1.comparePaths(pat2) != overlaps {
+			t.Fatalf("%s does not overlap %s", test.p1, test.p2)
+		}
+		got := commonPath(pat1, pat2)
+		if got != test.want {
+			t.Errorf("%s vs. %s: got %q, want %q", test.p1, test.p2, got, test.want)
+		}
+	}
+}
+
+func TestDifferencePath(t *testing.T) {
+	for _, test := range []struct {
+		p1, p2 string
+		want   string
+	}{
+		{"/a/{x}", "/{x}/a", "/a/x"},
+		{"/{x}/a", "/a/{x}", "/x/a"},
+		{"/a/{z}/", "/{z}/a/", "/a/z/"},
+		{"/{z}/a/", "/a/{z}/", "/z/a/"},
+		{"/{a}/a/", "/a/{z}/", "/ax/a/"},
+		{"/a/{z}/{x...}", "/{z}/b/{y...}", "/a/z/"},
+		{"/{z}/b/{y...}", "/a/{z}/{x...}", "/z/b/"},
+		{"/a/b/", "/a/b/c", "/a/b/"},
+		{"/a/b/{x...}", "/a/b/c", "/a/b/"},
+		{"/a/b/{x...}", "/a/b/c/d", "/a/b/"},
+		{"/a/b/{x...}", "/a/b/c/d/", "/a/b/"},
+		{"/a/{z}/{m...}", "/{z}/a/", "/a/z/"},
+		{"/{z}/a/", "/a/{z}/{m...}", "/z/a/"},
+		{"/{z}/{$}", "/a/", "/z/"},
+		{"/a/", "/{z}/{$}", "/a/x"},
+		{"/{z}/{$}", "/a/{x...}", "/z/"},
+		{"/a/{foo...}", "/{z}/{$}", "/a/foo"},
+		{"/a/{z}/{$}", "/{z}/a/", "/a/z/"},
+		{"/{z}/a/", "/a/{z}/{$}", "/z/a/x"},
+		{"/a/{x}/b/{y...}", "/{x}/c/{y...}", "/a/x/b/"},
+		{"/{x}/c/{y...}", "/a/{x}/b/{y...}", "/x/c/"},
+		{"/a/{c}/b/", "/{x}/c/{y...}", "/a/cx/b/"},
+		{"/{x}/c/{y...}", "/a/{c}/b/", "/x/c/"},
+		{"/a/{x}/b/{$}", "/{x}/c/{y...}", "/a/x/b/"},
+		{"/{x}/c/{y...}", "/a/{x}/b/{$}", "/x/c/"},
+	} {
+		pat1 := mustParsePattern(t, test.p1)
+		pat2 := mustParsePattern(t, test.p2)
+		rel := pat1.comparePaths(pat2)
+		if rel != overlaps && rel != moreGeneral {
+			t.Fatalf("%s vs. %s are %s, need overlaps or moreGeneral", pat1, pat2, rel)
+		}
+		got := differencePath(pat1, pat2)
+		if got != test.want {
+			t.Errorf("%s vs. %s: got %q, want %q", test.p1, test.p2, got, test.want)
+		}
+	}
+}
diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go
index bc3225d..bc48f11 100644
--- a/src/net/http/pprof/pprof.go
+++ b/src/net/http/pprof/pprof.go
@@ -47,12 +47,12 @@
 //	go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30
 //
 // Or to look at the goroutine blocking profile, after calling
-// runtime.SetBlockProfileRate in your program:
+// [runtime.SetBlockProfileRate] in your program:
 //
 //	go tool pprof http://localhost:6060/debug/pprof/block
 //
 // Or to look at the holders of contended mutexes, after calling
-// runtime.SetMutexProfileFraction in your program:
+// [runtime.SetMutexProfileFraction] in your program:
 //
 //	go tool pprof http://localhost:6060/debug/pprof/mutex
 //
diff --git a/src/net/http/pprof/pprof_test.go b/src/net/http/pprof/pprof_test.go
index f82ad45..24ad59a 100644
--- a/src/net/http/pprof/pprof_test.go
+++ b/src/net/http/pprof/pprof_test.go
@@ -6,12 +6,14 @@
 
 import (
 	"bytes"
+	"encoding/base64"
 	"fmt"
 	"internal/profile"
 	"internal/testenv"
 	"io"
 	"net/http"
 	"net/http/httptest"
+	"path/filepath"
 	"runtime"
 	"runtime/pprof"
 	"strings"
@@ -261,3 +263,64 @@
 	}
 	return false
 }
+
+// TestDeltaProfileEmptyBase validates that we still receive a valid delta
+// profile even if the base contains no samples.
+//
+// Regression test for https://go.dev/issue/64566.
+func TestDeltaProfileEmptyBase(t *testing.T) {
+	if testing.Short() {
+		// Delta profile collection has a 1s minimum.
+		t.Skip("skipping in -short mode")
+	}
+
+	testenv.MustHaveGoRun(t)
+
+	gotool, err := testenv.GoTool()
+	if err != nil {
+		t.Fatalf("error finding go tool: %v", err)
+	}
+
+	out, err := testenv.Command(t, gotool, "run", filepath.Join("testdata", "delta_mutex.go")).CombinedOutput()
+	if err != nil {
+		t.Fatalf("error running profile collection: %v\noutput: %s", err, out)
+	}
+
+	// Log the binary output for debugging failures.
+	b64 := make([]byte, base64.StdEncoding.EncodedLen(len(out)))
+	base64.StdEncoding.Encode(b64, out)
+	t.Logf("Output in base64.StdEncoding: %s", b64)
+
+	p, err := profile.Parse(bytes.NewReader(out))
+	if err != nil {
+		t.Fatalf("Parse got err %v want nil", err)
+	}
+
+	t.Logf("Output as parsed Profile: %s", p)
+
+	if len(p.SampleType) != 2 {
+		t.Errorf("len(p.SampleType) got %d want 2", len(p.SampleType))
+	}
+	if p.SampleType[0].Type != "contentions" {
+		t.Errorf(`p.SampleType[0].Type got %q want "contentions"`, p.SampleType[0].Type)
+	}
+	if p.SampleType[0].Unit != "count" {
+		t.Errorf(`p.SampleType[0].Unit got %q want "count"`, p.SampleType[0].Unit)
+	}
+	if p.SampleType[1].Type != "delay" {
+		t.Errorf(`p.SampleType[1].Type got %q want "delay"`, p.SampleType[1].Type)
+	}
+	if p.SampleType[1].Unit != "nanoseconds" {
+		t.Errorf(`p.SampleType[1].Unit got %q want "nanoseconds"`, p.SampleType[1].Unit)
+	}
+
+	if p.PeriodType == nil {
+		t.Fatal("p.PeriodType got nil want not nil")
+	}
+	if p.PeriodType.Type != "contentions" {
+		t.Errorf(`p.PeriodType.Type got %q want "contentions"`, p.PeriodType.Type)
+	}
+	if p.PeriodType.Unit != "count" {
+		t.Errorf(`p.PeriodType.Unit got %q want "count"`, p.PeriodType.Unit)
+	}
+}
diff --git a/src/net/http/pprof/testdata/delta_mutex.go b/src/net/http/pprof/testdata/delta_mutex.go
new file mode 100644
index 0000000..634069c
--- /dev/null
+++ b/src/net/http/pprof/testdata/delta_mutex.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This binary collects a 1s delta mutex profile and dumps it to os.Stdout.
+//
+// This is in a subprocess because we want the base mutex profile to be empty
+// (as a regression test for https://go.dev/issue/64566) and the only way to
+// force reset the profile is to create a new subprocess.
+//
+// This manually collects the HTTP response and dumps to stdout in order to
+// avoid any flakiness around port selection for a real HTTP server.
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"net/http"
+	"net/http/pprof"
+	"net/http/httptest"
+	"runtime"
+)
+
+func main() {
+	// Disable the mutex profiler. This is the default, but that default is
+	// load-bearing for this test, which needs the base profile to be empty.
+	runtime.SetMutexProfileFraction(0)
+
+	h := pprof.Handler("mutex")
+
+	req := httptest.NewRequest("GET", "/debug/pprof/mutex?seconds=1", nil)
+	rec := httptest.NewRecorder()
+	rec.Body = new(bytes.Buffer)
+
+	h.ServeHTTP(rec, req)
+	resp := rec.Result()
+	if resp.StatusCode != http.StatusOK {
+		log.Fatalf("Request failed: %s\n%s", resp.Status, rec.Body)
+	}
+
+	fmt.Print(rec.Body)
+}
diff --git a/src/net/http/request.go b/src/net/http/request.go
index 81f79566..99fdebcf 100644
--- a/src/net/http/request.go
+++ b/src/net/http/request.go
@@ -107,14 +107,10 @@
 //
 // The field semantics differ slightly between client and server
 // usage. In addition to the notes on the fields below, see the
-// documentation for Request.Write and RoundTripper.
+// documentation for [Request.Write] and [RoundTripper].
 type Request struct {
 	// Method specifies the HTTP method (GET, POST, PUT, etc.).
 	// For client requests, an empty string means GET.
-	//
-	// Go's HTTP client does not support sending a request with
-	// the CONNECT method. See the documentation on Transport for
-	// details.
 	Method string
 
 	// URL specifies either the URI being requested (for server
@@ -329,10 +325,15 @@
 	// It is unexported to prevent people from using Context wrong
 	// and mutating the contexts held by callers of the same request.
 	ctx context.Context
+
+	// The following fields are for requests matched by ServeMux.
+	pat         *pattern          // the pattern that matched
+	matches     []string          // values for the matching wildcards in pat
+	otherValues map[string]string // for calls to SetPathValue that don't match a wildcard
 }
 
 // Context returns the request's context. To change the context, use
-// Clone or WithContext.
+// [Request.Clone] or [Request.WithContext].
 //
 // The returned context is always non-nil; it defaults to the
 // background context.
@@ -356,8 +357,8 @@
 // lifetime of a request and its response: obtaining a connection,
 // sending the request, and reading the response headers and body.
 //
-// To create a new request with a context, use NewRequestWithContext.
-// To make a deep copy of a request with a new context, use Request.Clone.
+// To create a new request with a context, use [NewRequestWithContext].
+// To make a deep copy of a request with a new context, use [Request.Clone].
 func (r *Request) WithContext(ctx context.Context) *Request {
 	if ctx == nil {
 		panic("nil context")
@@ -396,6 +397,20 @@
 	r2.Form = cloneURLValues(r.Form)
 	r2.PostForm = cloneURLValues(r.PostForm)
 	r2.MultipartForm = cloneMultipartForm(r.MultipartForm)
+
+	// Copy matches and otherValues. See issue 61410.
+	if s := r.matches; s != nil {
+		s2 := make([]string, len(s))
+		copy(s2, s)
+		r2.matches = s2
+	}
+	if s := r.otherValues; s != nil {
+		s2 := make(map[string]string, len(s))
+		for k, v := range s {
+			s2[k] = v
+		}
+		r2.otherValues = s2
+	}
 	return r2
 }
 
@@ -420,7 +435,7 @@
 var ErrNoCookie = errors.New("http: named cookie not present")
 
 // Cookie returns the named cookie provided in the request or
-// ErrNoCookie if not found.
+// [ErrNoCookie] if not found.
 // If multiple cookies match the given name, only one cookie will
 // be returned.
 func (r *Request) Cookie(name string) (*Cookie, error) {
@@ -434,7 +449,7 @@
 }
 
 // AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
-// AddCookie does not attach more than one Cookie header field. That
+// AddCookie does not attach more than one [Cookie] header field. That
 // means all cookies, if any, are written into the same line,
 // separated by semicolon.
 // AddCookie only sanitizes c's name and value, and does not sanitize
@@ -452,7 +467,7 @@
 //
 // Referer is misspelled as in the request itself, a mistake from the
 // earliest days of HTTP.  This value can also be fetched from the
-// Header map as Header["Referer"]; the benefit of making it available
+// [Header] map as Header["Referer"]; the benefit of making it available
 // as a method is that the compiler can diagnose programs that use the
 // alternate (correct English) spelling req.Referrer() but cannot
 // diagnose programs that use Header["Referrer"].
@@ -470,7 +485,7 @@
 
 // MultipartReader returns a MIME multipart reader if this is a
 // multipart/form-data or a multipart/mixed POST request, else returns nil and an error.
-// Use this function instead of ParseMultipartForm to
+// Use this function instead of [Request.ParseMultipartForm] to
 // process the request body as a stream.
 func (r *Request) MultipartReader() (*multipart.Reader, error) {
 	if r.MultipartForm == multipartByReader {
@@ -533,15 +548,15 @@
 //	TransferEncoding
 //	Body
 //
-// If Body is present, Content-Length is <= 0 and TransferEncoding
+// If Body is present, Content-Length is <= 0 and [Request.TransferEncoding]
 // hasn't been set to "identity", Write adds "Transfer-Encoding:
 // chunked" to the header. Body is closed after it is sent.
 func (r *Request) Write(w io.Writer) error {
 	return r.write(w, false, nil, nil)
 }
 
-// WriteProxy is like Write but writes the request in the form
-// expected by an HTTP proxy. In particular, WriteProxy writes the
+// WriteProxy is like [Request.Write] but writes the request in the form
+// expected by an HTTP proxy. In particular, [Request.WriteProxy] writes the
 // initial Request-URI line of the request with an absolute URI, per
 // section 5.3 of RFC 7230, including the scheme and host.
 // In either case, WriteProxy also writes a Host header, using
@@ -669,6 +684,8 @@
 		userAgent = r.Header.Get("User-Agent")
 	}
 	if userAgent != "" {
+		userAgent = headerNewlineToSpace.Replace(userAgent)
+		userAgent = textproto.TrimString(userAgent)
 		_, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent)
 		if err != nil {
 			return err
@@ -834,32 +851,33 @@
 	return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1
 }
 
-// NewRequest wraps NewRequestWithContext using context.Background.
+// NewRequest wraps [NewRequestWithContext] using [context.Background].
 func NewRequest(method, url string, body io.Reader) (*Request, error) {
 	return NewRequestWithContext(context.Background(), method, url, body)
 }
 
-// NewRequestWithContext returns a new Request given a method, URL, and
+// NewRequestWithContext returns a new [Request] given a method, URL, and
 // optional body.
 //
-// If the provided body is also an io.Closer, the returned
-// Request.Body is set to body and will be closed by the Client
-// methods Do, Post, and PostForm, and Transport.RoundTrip.
+// If the provided body is also an [io.Closer], the returned
+// [Request.Body] is set to body and will be closed (possibly
+// asynchronously) by the Client methods Do, Post, and PostForm,
+// and [Transport.RoundTrip].
 //
 // NewRequestWithContext returns a Request suitable for use with
-// Client.Do or Transport.RoundTrip. To create a request for use with
-// testing a Server Handler, either use the NewRequest function in the
-// net/http/httptest package, use ReadRequest, or manually update the
+// [Client.Do] or [Transport.RoundTrip]. To create a request for use with
+// testing a Server Handler, either use the [NewRequest] function in the
+// net/http/httptest package, use [ReadRequest], or manually update the
 // Request fields. For an outgoing client request, the context
 // controls the entire lifetime of a request and its response:
 // obtaining a connection, sending the request, and reading the
 // response headers and body. See the Request type's documentation for
 // the difference between inbound and outbound request fields.
 //
-// If body is of type *bytes.Buffer, *bytes.Reader, or
-// *strings.Reader, the returned request's ContentLength is set to its
+// If body is of type [*bytes.Buffer], [*bytes.Reader], or
+// [*strings.Reader], the returned request's ContentLength is set to its
 // exact value (instead of -1), GetBody is populated (so 307 and 308
-// redirects can replay the body), and Body is set to NoBody if the
+// redirects can replay the body), and Body is set to [NoBody] if the
 // ContentLength is 0.
 func NewRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*Request, error) {
 	if method == "" {
@@ -983,7 +1001,7 @@
 // The username may not contain a colon. Some protocols may impose
 // additional requirements on pre-escaping the username and
 // password. For instance, when used with OAuth2, both arguments must
-// be URL encoded first with url.QueryEscape.
+// be URL encoded first with [url.QueryEscape].
 func (r *Request) SetBasicAuth(username, password string) {
 	r.Header.Set("Authorization", "Basic "+basicAuth(username, password))
 }
@@ -1017,8 +1035,8 @@
 // ReadRequest reads and parses an incoming request from b.
 //
 // ReadRequest is a low-level function and should only be used for
-// specialized applications; most code should use the Server to read
-// requests and handle them via the Handler interface. ReadRequest
+// specialized applications; most code should use the [Server] to read
+// requests and handle them via the [Handler] interface. ReadRequest
 // only supports HTTP/1.x requests. For HTTP/2, use golang.org/x/net/http2.
 func ReadRequest(b *bufio.Reader) (*Request, error) {
 	req, err := readRequest(b)
@@ -1127,15 +1145,15 @@
 	return req, nil
 }
 
-// MaxBytesReader is similar to io.LimitReader but is intended for
+// MaxBytesReader is similar to [io.LimitReader] but is intended for
 // limiting the size of incoming request bodies. In contrast to
 // io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// non-nil error of type *MaxBytesError for a Read beyond the limit,
+// non-nil error of type [*MaxBytesError] for a Read beyond the limit,
 // and closes the underlying reader when its Close method is called.
 //
 // MaxBytesReader prevents clients from accidentally or maliciously
 // sending a large request and wasting server resources. If possible,
-// it tells the ResponseWriter to close the connection after the limit
+// it tells the [ResponseWriter] to close the connection after the limit
 // has been reached.
 func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
 	if n < 0 { // Treat negative limits as equivalent to 0.
@@ -1144,7 +1162,7 @@
 	return &maxBytesReader{w: w, r: r, i: n, n: n}
 }
 
-// MaxBytesError is returned by MaxBytesReader when its read limit is exceeded.
+// MaxBytesError is returned by [MaxBytesReader] when its read limit is exceeded.
 type MaxBytesError struct {
 	Limit int64
 }
@@ -1269,14 +1287,14 @@
 // as a form and puts the results into both r.PostForm and r.Form. Request body
 // parameters take precedence over URL query string values in r.Form.
 //
-// If the request Body's size has not already been limited by MaxBytesReader,
+// If the request Body's size has not already been limited by [MaxBytesReader],
 // the size is capped at 10MB.
 //
 // For other HTTP methods, or when the Content-Type is not
 // application/x-www-form-urlencoded, the request Body is not read, and
 // r.PostForm is initialized to a non-nil, empty value.
 //
-// ParseMultipartForm calls ParseForm automatically.
+// [Request.ParseMultipartForm] calls ParseForm automatically.
 // ParseForm is idempotent.
 func (r *Request) ParseForm() error {
 	var err error
@@ -1317,7 +1335,7 @@
 // The whole request body is parsed and up to a total of maxMemory bytes of
 // its file parts are stored in memory, with the remainder stored on
 // disk in temporary files.
-// ParseMultipartForm calls ParseForm if necessary.
+// ParseMultipartForm calls [Request.ParseForm] if necessary.
 // If ParseForm returns an error, ParseMultipartForm returns it but also
 // continues parsing the request body.
 // After one call to ParseMultipartForm, subsequent calls have no effect.
@@ -1360,12 +1378,16 @@
 }
 
 // FormValue returns the first value for the named component of the query.
-// POST and PUT body parameters take precedence over URL query string values.
-// FormValue calls ParseMultipartForm and ParseForm if necessary and ignores
-// any errors returned by these functions.
+// The precedence order:
+//  1. application/x-www-form-urlencoded form body (POST, PUT, PATCH only)
+//  2. query parameters (always)
+//  3. multipart/form-data form body (always)
+//
+// FormValue calls [Request.ParseMultipartForm] and [Request.ParseForm]
+// if necessary and ignores any errors returned by these functions.
 // If key is not present, FormValue returns the empty string.
 // To access multiple values of the same key, call ParseForm and
-// then inspect Request.Form directly.
+// then inspect [Request.Form] directly.
 func (r *Request) FormValue(key string) string {
 	if r.Form == nil {
 		r.ParseMultipartForm(defaultMaxMemory)
@@ -1377,8 +1399,8 @@
 }
 
 // PostFormValue returns the first value for the named component of the POST,
-// PATCH, or PUT request body. URL query parameters are ignored.
-// PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores
+// PUT, or PATCH request body. URL query parameters are ignored.
+// PostFormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary and ignores
 // any errors returned by these functions.
 // If key is not present, PostFormValue returns the empty string.
 func (r *Request) PostFormValue(key string) string {
@@ -1392,7 +1414,7 @@
 }
 
 // FormFile returns the first file for the provided form key.
-// FormFile calls ParseMultipartForm and ParseForm if necessary.
+// FormFile calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary.
 func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
 	if r.MultipartForm == multipartByReader {
 		return nil, nil, errors.New("http: multipart handled by MultipartReader")
@@ -1412,6 +1434,50 @@
 	return nil, nil, ErrMissingFile
 }
 
+// PathValue returns the value for the named path wildcard in the [ServeMux] pattern
+// that matched the request.
+// It returns the empty string if the request was not matched against a pattern
+// or there is no such wildcard in the pattern.
+func (r *Request) PathValue(name string) string {
+	if i := r.patIndex(name); i >= 0 {
+		return r.matches[i]
+	}
+	return r.otherValues[name]
+}
+
+// SetPathValue sets name to value, so that subsequent calls to r.PathValue(name)
+// return value.
+func (r *Request) SetPathValue(name, value string) {
+	if i := r.patIndex(name); i >= 0 {
+		r.matches[i] = value
+	} else {
+		if r.otherValues == nil {
+			r.otherValues = map[string]string{}
+		}
+		r.otherValues[name] = value
+	}
+}
+
+// patIndex returns the index of name in the list of named wildcards of the
+// request's pattern, or -1 if there is no such name.
+func (r *Request) patIndex(name string) int {
+	// The linear search seems expensive compared to a map, but just creating the map
+	// takes a lot of time, and most patterns will just have a couple of wildcards.
+	if r.pat == nil {
+		return -1
+	}
+	i := 0
+	for _, seg := range r.pat.segments {
+		if seg.wild && seg.s != "" {
+			if name == seg.s {
+				return i
+			}
+			i++
+		}
+	}
+	return -1
+}
+
 func (r *Request) expectsContinue() bool {
 	return hasToken(r.Header.get("Expect"), "100-continue")
 }
diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
index a32b583..6ce3233 100644
--- a/src/net/http/request_test.go
+++ b/src/net/http/request_test.go
@@ -15,7 +15,9 @@
 	"io"
 	"math"
 	"mime/multipart"
+	"net/http"
 	. "net/http"
+	"net/http/httptest"
 	"net/url"
 	"os"
 	"reflect"
@@ -787,6 +789,25 @@
 	}
 }
 
+func TestRequestBadUserAgent(t *testing.T) {
+	got := []string{}
+	req, err := NewRequest("GET", "http://foo/after", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	req.Header.Set("User-Agent", "evil\r\nX-Evil: evil")
+	req.Write(logWrites{t, &got})
+	want := []string{
+		"GET /after HTTP/1.1\r\n",
+		"Host: foo\r\n",
+		"User-Agent: evil  X-Evil: evil\r\n",
+		"\r\n",
+	}
+	if !reflect.DeepEqual(got, want) {
+		t.Errorf("Writes = %q\n  Want = %q", got, want)
+	}
+}
+
 func TestStarRequest(t *testing.T) {
 	req, err := ReadRequest(bufio.NewReader(strings.NewReader("M-SEARCH * HTTP/1.1\r\n\r\n")))
 	if err != nil {
@@ -1032,6 +1053,33 @@
 	}
 }
 
+// Ensure that Request.Clone works correctly with PathValue.
+// See issue 64911.
+func TestRequestClonePathValue(t *testing.T) {
+	req, _ := http.NewRequest("GET", "https://example.org/", nil)
+	req.SetPathValue("p1", "orig")
+
+	clonedReq := req.Clone(context.Background())
+	clonedReq.SetPathValue("p2", "copy")
+
+	// Ensure that any modifications to the cloned
+	// request do not pollute the original request.
+	if g, w := req.PathValue("p2"), ""; g != w {
+		t.Fatalf("p2 mismatch got %q, want %q", g, w)
+	}
+	if g, w := req.PathValue("p1"), "orig"; g != w {
+		t.Fatalf("p1 mismatch got %q, want %q", g, w)
+	}
+
+	// Assert on the changes to the cloned request.
+	if g, w := clonedReq.PathValue("p1"), "orig"; g != w {
+		t.Fatalf("p1 mismatch got %q, want %q", g, w)
+	}
+	if g, w := clonedReq.PathValue("p2"), "copy"; g != w {
+		t.Fatalf("p2 mismatch got %q, want %q", g, w)
+	}
+}
+
 // Issue 34878: verify we don't panic when including basic auth (Go 1.13 regression)
 func TestNoPanicOnRoundTripWithBasicAuth(t *testing.T) { run(t, testNoPanicWithBasicAuth) }
 func testNoPanicWithBasicAuth(t *testing.T, mode testMode) {
@@ -1395,3 +1443,141 @@
 		t.Error("errors.Is(ErrNotSupported, errors.ErrUnsupported) failed")
 	}
 }
+
+func TestPathValueNoMatch(t *testing.T) {
+	// Check that PathValue and SetPathValue work on a Request that was never matched.
+	var r Request
+	if g, w := r.PathValue("x"), ""; g != w {
+		t.Errorf("got %q, want %q", g, w)
+	}
+	r.SetPathValue("x", "a")
+	if g, w := r.PathValue("x"), "a"; g != w {
+		t.Errorf("got %q, want %q", g, w)
+	}
+}
+
+func TestPathValue(t *testing.T) {
+	for _, test := range []struct {
+		pattern string
+		url     string
+		want    map[string]string
+	}{
+		{
+			"/{a}/is/{b}/{c...}",
+			"/now/is/the/time/for/all",
+			map[string]string{
+				"a": "now",
+				"b": "the",
+				"c": "time/for/all",
+				"d": "",
+			},
+		},
+		{
+			"/names/{name}/{other...}",
+			"/names/%2fjohn/address",
+			map[string]string{
+				"name":  "/john",
+				"other": "address",
+			},
+		},
+		{
+			"/names/{name}/{other...}",
+			"/names/john%2Fdoe/there/is%2F/more",
+			map[string]string{
+				"name":  "john/doe",
+				"other": "there/is//more",
+			},
+		},
+	} {
+		mux := NewServeMux()
+		mux.HandleFunc(test.pattern, func(w ResponseWriter, r *Request) {
+			for name, want := range test.want {
+				got := r.PathValue(name)
+				if got != want {
+					t.Errorf("%q, %q: got %q, want %q", test.pattern, name, got, want)
+				}
+			}
+		})
+		server := httptest.NewServer(mux)
+		defer server.Close()
+		res, err := Get(server.URL + test.url)
+		if err != nil {
+			t.Fatal(err)
+		}
+		res.Body.Close()
+	}
+}
+
+func TestSetPathValue(t *testing.T) {
+	mux := NewServeMux()
+	mux.HandleFunc("/a/{b}/c/{d...}", func(_ ResponseWriter, r *Request) {
+		kvs := map[string]string{
+			"b": "X",
+			"d": "Y",
+			"a": "Z",
+		}
+		for k, v := range kvs {
+			r.SetPathValue(k, v)
+		}
+		for k, w := range kvs {
+			if g := r.PathValue(k); g != w {
+				t.Errorf("got %q, want %q", g, w)
+			}
+		}
+	})
+	server := httptest.NewServer(mux)
+	defer server.Close()
+	res, err := Get(server.URL + "/a/b/c/d/e")
+	if err != nil {
+		t.Fatal(err)
+	}
+	res.Body.Close()
+}
+
+func TestStatus(t *testing.T) {
+	// The main purpose of this test is to check 405 responses and the Allow header.
+	h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
+	mux := NewServeMux()
+	mux.Handle("GET /g", h)
+	mux.Handle("POST /p", h)
+	mux.Handle("PATCH /p", h)
+	mux.Handle("PUT /r", h)
+	mux.Handle("GET /r/", h)
+	server := httptest.NewServer(mux)
+	defer server.Close()
+
+	for _, test := range []struct {
+		method, path string
+		wantStatus   int
+		wantAllow    string
+	}{
+		{"GET", "/g", 200, ""},
+		{"HEAD", "/g", 200, ""},
+		{"POST", "/g", 405, "GET, HEAD"},
+		{"GET", "/x", 404, ""},
+		{"GET", "/p", 405, "PATCH, POST"},
+		{"GET", "/./p", 405, "PATCH, POST"},
+		{"GET", "/r/", 200, ""},
+		{"GET", "/r", 200, ""}, // redirected
+		{"HEAD", "/r/", 200, ""},
+		{"HEAD", "/r", 200, ""}, // redirected
+		{"PUT", "/r/", 405, "GET, HEAD"},
+		{"PUT", "/r", 200, ""},
+	} {
+		req, err := http.NewRequest(test.method, server.URL+test.path, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		res, err := http.DefaultClient.Do(req)
+		if err != nil {
+			t.Fatal(err)
+		}
+		res.Body.Close()
+		if g, w := res.StatusCode, test.wantStatus; g != w {
+			t.Errorf("%s %s: got %d, want %d", test.method, test.path, g, w)
+		}
+		if g, w := res.Header.Get("Allow"), test.wantAllow; g != w {
+			t.Errorf("%s %s, Allow: got %q, want %q", test.method, test.path, g, w)
+		}
+	}
+}
diff --git a/src/net/http/response.go b/src/net/http/response.go
index 755c696..0c3d7f6 100644
--- a/src/net/http/response.go
+++ b/src/net/http/response.go
@@ -29,7 +29,7 @@
 
 // Response represents the response from an HTTP request.
 //
-// The Client and Transport return Responses from servers once
+// The [Client] and [Transport] return Responses from servers once
 // the response headers have been received. The response body
 // is streamed on demand as the Body field is read.
 type Response struct {
@@ -126,13 +126,13 @@
 	return readSetCookies(r.Header)
 }
 
-// ErrNoLocation is returned by Response's Location method
+// ErrNoLocation is returned by the [Response.Location] method
 // when no Location header is present.
 var ErrNoLocation = errors.New("http: no Location header in response")
 
 // Location returns the URL of the response's "Location" header,
 // if present. Relative redirects are resolved relative to
-// the Response's Request. ErrNoLocation is returned if no
+// [Response.Request]. [ErrNoLocation] is returned if no
 // Location header is present.
 func (r *Response) Location() (*url.URL, error) {
 	lv := r.Header.Get("Location")
@@ -146,8 +146,8 @@
 }
 
 // ReadResponse reads and returns an HTTP response from r.
-// The req parameter optionally specifies the Request that corresponds
-// to this Response. If nil, a GET request is assumed.
+// The req parameter optionally specifies the [Request] that corresponds
+// to this [Response]. If nil, a GET request is assumed.
 // Clients must call resp.Body.Close when finished reading resp.Body.
 // After that call, clients can inspect resp.Trailer to find key/value
 // pairs included in the response trailer.
diff --git a/src/net/http/response_test.go b/src/net/http/response_test.go
index 19fb48f..f3425c3 100644
--- a/src/net/http/response_test.go
+++ b/src/net/http/response_test.go
@@ -849,7 +849,7 @@
 	type testCase struct {
 		name    string // optional, defaults to in
 		in      string
-		wantErr any // nil, err value, or string substring
+		wantErr any // nil, err value, bool value, or string substring
 	}
 
 	status := func(s string, wantErr any) testCase {
@@ -883,6 +883,7 @@
 	}
 
 	errMultiCL := "message cannot contain multiple Content-Length headers"
+	errEmptyCL := "invalid empty Content-Length"
 
 	tests := []testCase{
 		{"", "", io.ErrUnexpectedEOF},
@@ -918,7 +919,7 @@
 		contentLength("200 OK", "Content-Length: 7\r\nContent-Length: 7\r\n\r\nGophers\r\n", nil),
 		contentLength("201 OK", "Content-Length: 0\r\nContent-Length: 7\r\n\r\nGophers\r\n", errMultiCL),
 		contentLength("300 OK", "Content-Length: 0\r\nContent-Length: 0 \r\n\r\nGophers\r\n", nil),
-		contentLength("200 OK", "Content-Length:\r\nContent-Length:\r\n\r\nGophers\r\n", nil),
+		contentLength("200 OK", "Content-Length:\r\nContent-Length:\r\n\r\nGophers\r\n", errEmptyCL),
 		contentLength("206 OK", "Content-Length:\r\nContent-Length: 0 \r\nConnection: close\r\n\r\nGophers\r\n", errMultiCL),
 
 		// multiple content-length headers for 204 and 304 should still be checked
diff --git a/src/net/http/responsecontroller.go b/src/net/http/responsecontroller.go
index 92276ff..f3f24c1 100644
--- a/src/net/http/responsecontroller.go
+++ b/src/net/http/responsecontroller.go
@@ -13,14 +13,14 @@
 
 // A ResponseController is used by an HTTP handler to control the response.
 //
-// A ResponseController may not be used after the Handler.ServeHTTP method has returned.
+// A ResponseController may not be used after the [Handler.ServeHTTP] method has returned.
 type ResponseController struct {
 	rw ResponseWriter
 }
 
-// NewResponseController creates a ResponseController for a request.
+// NewResponseController creates a [ResponseController] for a request.
 //
-// The ResponseWriter should be the original value passed to the Handler.ServeHTTP method,
+// The ResponseWriter should be the original value passed to the [Handler.ServeHTTP] method,
 // or have an Unwrap method returning the original ResponseWriter.
 //
 // If the ResponseWriter implements any of the following methods, the ResponseController
@@ -34,7 +34,7 @@
 //	EnableFullDuplex() error
 //
 // If the ResponseWriter does not support a method, ResponseController returns
-// an error matching ErrNotSupported.
+// an error matching [ErrNotSupported].
 func NewResponseController(rw ResponseWriter) *ResponseController {
 	return &ResponseController{rw}
 }
@@ -116,8 +116,8 @@
 	}
 }
 
-// EnableFullDuplex indicates that the request handler will interleave reads from Request.Body
-// with writes to the ResponseWriter.
+// EnableFullDuplex indicates that the request handler will interleave reads from [Request.Body]
+// with writes to the [ResponseWriter].
 //
 // For HTTP/1 requests, the Go HTTP server by default consumes any unread portion of
 // the request body before beginning to write the response, preventing handlers from
diff --git a/src/net/http/responsecontroller_test.go b/src/net/http/responsecontroller_test.go
index 5828f37..f1dcc79 100644
--- a/src/net/http/responsecontroller_test.go
+++ b/src/net/http/responsecontroller_test.go
@@ -1,3 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package http_test
 
 import (
diff --git a/src/net/http/roundtrip.go b/src/net/http/roundtrip.go
index 49ea1a7..08c2701 100644
--- a/src/net/http/roundtrip.go
+++ b/src/net/http/roundtrip.go
@@ -6,10 +6,10 @@
 
 package http
 
-// RoundTrip implements the RoundTripper interface.
+// RoundTrip implements the [RoundTripper] interface.
 //
 // For higher-level HTTP client support (such as handling of cookies
-// and redirects), see Get, Post, and the Client type.
+// and redirects), see [Get], [Post], and the [Client] type.
 //
 // Like the RoundTripper interface, the error types returned
 // by RoundTrip are unspecified.
diff --git a/src/net/http/roundtrip_js.go b/src/net/http/roundtrip_js.go
index 9f9f0cb..04c241e 100644
--- a/src/net/http/roundtrip_js.go
+++ b/src/net/http/roundtrip_js.go
@@ -10,6 +10,7 @@
 	"errors"
 	"fmt"
 	"io"
+	"net/http/internal/ascii"
 	"strconv"
 	"strings"
 	"syscall/js"
@@ -55,7 +56,7 @@
 var jsFetchDisabled = js.Global().Get("process").Type() == js.TypeObject &&
 	strings.HasPrefix(js.Global().Get("process").Get("argv0").String(), "node")
 
-// RoundTrip implements the RoundTripper interface using the WHATWG Fetch API.
+// RoundTrip implements the [RoundTripper] interface using the WHATWG Fetch API.
 func (t *Transport) RoundTrip(req *Request) (*Response, error) {
 	// The Transport has a documented contract that states that if the DialContext or
 	// DialTLSContext functions are set, they will be used to set up the connections.
@@ -184,11 +185,22 @@
 		}
 
 		code := result.Get("status").Int()
+
+		uncompressed := false
+		if ascii.EqualFold(header.Get("Content-Encoding"), "gzip") {
+			// The fetch api will decode the gzip, but Content-Encoding not be deleted.
+			header.Del("Content-Encoding")
+			header.Del("Content-Length")
+			contentLength = -1
+			uncompressed = true
+		}
+
 		respCh <- &Response{
 			Status:        fmt.Sprintf("%d %s", code, StatusText(code)),
 			StatusCode:    code,
 			Header:        header,
 			ContentLength: contentLength,
+			Uncompressed:  uncompressed,
 			Body:          body,
 			Request:       req,
 		}
diff --git a/src/net/http/routing_index.go b/src/net/http/routing_index.go
new file mode 100644
index 0000000..9ac42c9
--- /dev/null
+++ b/src/net/http/routing_index.go
@@ -0,0 +1,124 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import "math"
+
+// A routingIndex optimizes conflict detection by indexing patterns.
+//
+// The basic idea is to rule out patterns that cannot conflict with a given
+// pattern because they have a different literal in a corresponding segment.
+// See the comments in [routingIndex.possiblyConflictingPatterns] for more details.
+type routingIndex struct {
+	// map from a particular segment position and value to all registered patterns
+	// with that value in that position.
+	// For example, the key {1, "b"} would hold the patterns "/a/b" and "/a/b/c"
+	// but not "/a", "b/a", "/a/c" or "/a/{x}".
+	segments map[routingIndexKey][]*pattern
+	// All patterns that end in a multi wildcard (including trailing slash).
+	// We do not try to be clever about indexing multi patterns, because there
+	// are unlikely to be many of them.
+	multis []*pattern
+}
+
+type routingIndexKey struct {
+	pos int    // 0-based segment position
+	s   string // literal, or empty for wildcard
+}
+
+func (idx *routingIndex) addPattern(pat *pattern) {
+	if pat.lastSegment().multi {
+		idx.multis = append(idx.multis, pat)
+	} else {
+		if idx.segments == nil {
+			idx.segments = map[routingIndexKey][]*pattern{}
+		}
+		for pos, seg := range pat.segments {
+			key := routingIndexKey{pos: pos, s: ""}
+			if !seg.wild {
+				key.s = seg.s
+			}
+			idx.segments[key] = append(idx.segments[key], pat)
+		}
+	}
+}
+
+// possiblyConflictingPatterns calls f on all patterns that might conflict with
+// pat. If f returns a non-nil error, possiblyConflictingPatterns returns immediately
+// with that error.
+//
+// To be correct, possiblyConflictingPatterns must include all patterns that
+// might conflict. But it may also include patterns that cannot conflict.
+// For instance, an implementation that returns all registered patterns is correct.
+// We use this fact throughout, simplifying the implementation by returning more
+// patterns that we might need to.
+func (idx *routingIndex) possiblyConflictingPatterns(pat *pattern, f func(*pattern) error) (err error) {
+	// Terminology:
+	//   dollar pattern: one ending in "{$}"
+	//   multi pattern: one ending in a trailing slash or "{x...}" wildcard
+	//   ordinary pattern: neither of the above
+
+	// apply f to all the pats, stopping on error.
+	apply := func(pats []*pattern) error {
+		if err != nil {
+			return err
+		}
+		for _, p := range pats {
+			err = f(p)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	// Our simple indexing scheme doesn't try to prune multi patterns; assume
+	// any of them can match the argument.
+	if err := apply(idx.multis); err != nil {
+		return err
+	}
+	if pat.lastSegment().s == "/" {
+		// All paths that a dollar pattern matches end in a slash; no paths that
+		// an ordinary pattern matches do. So only other dollar or multi
+		// patterns can conflict with a dollar pattern. Furthermore, conflicting
+		// dollar patterns must have the {$} in the same position.
+		return apply(idx.segments[routingIndexKey{s: "/", pos: len(pat.segments) - 1}])
+	}
+	// For ordinary and multi patterns, the only conflicts can be with a multi,
+	// or a pattern that has the same literal or a wildcard at some literal
+	// position.
+	// We could intersect all the possible matches at each position, but we
+	// do something simpler: we find the position with the fewest patterns.
+	var lmin, wmin []*pattern
+	min := math.MaxInt
+	hasLit := false
+	for i, seg := range pat.segments {
+		if seg.multi {
+			break
+		}
+		if !seg.wild {
+			hasLit = true
+			lpats := idx.segments[routingIndexKey{s: seg.s, pos: i}]
+			wpats := idx.segments[routingIndexKey{s: "", pos: i}]
+			if sum := len(lpats) + len(wpats); sum < min {
+				lmin = lpats
+				wmin = wpats
+				min = sum
+			}
+		}
+	}
+	if hasLit {
+		apply(lmin)
+		apply(wmin)
+		return err
+	}
+
+	// This pattern is all wildcards.
+	// Check it against everything.
+	for _, pats := range idx.segments {
+		apply(pats)
+	}
+	return err
+}
diff --git a/src/net/http/routing_index_test.go b/src/net/http/routing_index_test.go
new file mode 100644
index 0000000..1ffb927
--- /dev/null
+++ b/src/net/http/routing_index_test.go
@@ -0,0 +1,179 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+	"fmt"
+	"slices"
+	"sort"
+	"strings"
+	"testing"
+)
+
+func TestIndex(t *testing.T) {
+	// Generate every kind of pattern up to some number of segments,
+	// and compare conflicts found during indexing with those found
+	// by exhaustive comparison.
+	patterns := generatePatterns()
+	var idx routingIndex
+	for i, pat := range patterns {
+		got := indexConflicts(pat, &idx)
+		want := trueConflicts(pat, patterns[:i])
+		if !slices.Equal(got, want) {
+			t.Fatalf("%q:\ngot  %q\nwant %q", pat, got, want)
+		}
+		idx.addPattern(pat)
+	}
+}
+
+func trueConflicts(pat *pattern, pats []*pattern) []string {
+	var s []string
+	for _, p := range pats {
+		if pat.conflictsWith(p) {
+			s = append(s, p.String())
+		}
+	}
+	sort.Strings(s)
+	return s
+}
+
+func indexConflicts(pat *pattern, idx *routingIndex) []string {
+	var s []string
+	idx.possiblyConflictingPatterns(pat, func(p *pattern) error {
+		if pat.conflictsWith(p) {
+			s = append(s, p.String())
+		}
+		return nil
+	})
+	sort.Strings(s)
+	return slices.Compact(s)
+}
+
+// generatePatterns generates all possible patterns using a representative
+// sample of parts.
+func generatePatterns() []*pattern {
+	var pats []*pattern
+
+	collect := func(s string) {
+		// Replace duplicate wildcards with unique ones.
+		var b strings.Builder
+		wc := 0
+		for {
+			i := strings.Index(s, "{x}")
+			if i < 0 {
+				b.WriteString(s)
+				break
+			}
+			b.WriteString(s[:i])
+			fmt.Fprintf(&b, "{x%d}", wc)
+			wc++
+			s = s[i+3:]
+		}
+		pat, err := parsePattern(b.String())
+		if err != nil {
+			panic(err)
+		}
+		pats = append(pats, pat)
+	}
+
+	var (
+		methods   = []string{"", "GET ", "HEAD ", "POST "}
+		hosts     = []string{"", "h1", "h2"}
+		segs      = []string{"/a", "/b", "/{x}"}
+		finalSegs = []string{"/a", "/b", "/{f}", "/{m...}", "/{$}"}
+	)
+
+	g := genConcat(
+		genChoice(methods),
+		genChoice(hosts),
+		genStar(3, genChoice(segs)),
+		genChoice(finalSegs))
+	g(collect)
+	return pats
+}
+
+// A generator is a function that calls its argument with the strings that it
+// generates.
+type generator func(collect func(string))
+
+// genConst generates a single constant string.
+func genConst(s string) generator {
+	return func(collect func(string)) {
+		collect(s)
+	}
+}
+
+// genChoice generates all the strings in its argument.
+func genChoice(choices []string) generator {
+	return func(collect func(string)) {
+		for _, c := range choices {
+			collect(c)
+		}
+	}
+}
+
+// genConcat2 generates the cross product of the strings of g1 concatenated
+// with those of g2.
+func genConcat2(g1, g2 generator) generator {
+	return func(collect func(string)) {
+		g1(func(s1 string) {
+			g2(func(s2 string) {
+				collect(s1 + s2)
+			})
+		})
+	}
+}
+
+// genConcat generalizes genConcat2 to any number of generators.
+func genConcat(gs ...generator) generator {
+	if len(gs) == 0 {
+		return genConst("")
+	}
+	return genConcat2(gs[0], genConcat(gs[1:]...))
+}
+
+// genRepeat generates strings of exactly n copies of g's strings.
+func genRepeat(n int, g generator) generator {
+	if n == 0 {
+		return genConst("")
+	}
+	return genConcat(g, genRepeat(n-1, g))
+}
+
+// genStar (named after the Kleene star) generates 0, 1, 2, ..., max
+// copies of the strings of g.
+func genStar(max int, g generator) generator {
+	return func(collect func(string)) {
+		for i := 0; i <= max; i++ {
+			genRepeat(i, g)(collect)
+		}
+	}
+}
+
+func BenchmarkMultiConflicts(b *testing.B) {
+	// How fast is indexing if the corpus is all multis?
+	const nMultis = 1000
+	var pats []*pattern
+	for i := 0; i < nMultis; i++ {
+		pats = append(pats, mustParsePattern(b, fmt.Sprintf("/a/b/{x}/d%d/", i)))
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		var idx routingIndex
+		for _, p := range pats {
+			got := indexConflicts(p, &idx)
+			if len(got) != 0 {
+				b.Fatalf("got %d conflicts, want 0", len(got))
+			}
+			idx.addPattern(p)
+		}
+		if i == 0 {
+			// Confirm that all the multis ended up where they belong.
+			if g, w := len(idx.multis), nMultis; g != w {
+				b.Fatalf("got %d multis, want %d", g, w)
+			}
+		}
+	}
+}
diff --git a/src/net/http/routing_tree.go b/src/net/http/routing_tree.go
new file mode 100644
index 0000000..8812ed0
--- /dev/null
+++ b/src/net/http/routing_tree.go
@@ -0,0 +1,240 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a decision tree for fast matching of requests to
+// patterns.
+//
+// The root of the tree branches on the host of the request.
+// The next level branches on the method.
+// The remaining levels branch on consecutive segments of the path.
+//
+// The "more specific wins" precedence rule can result in backtracking.
+// For example, given the patterns
+//     /a/b/z
+//     /a/{x}/c
+// we will first try to match the path "/a/b/c" with /a/b/z, and
+// when that fails we will try against /a/{x}/c.
+
+package http
+
+import (
+	"strings"
+)
+
+// A routingNode is a node in the decision tree.
+// The same struct is used for leaf and interior nodes.
+type routingNode struct {
+	// A leaf node holds a single pattern and the Handler it was registered
+	// with.
+	pattern *pattern
+	handler Handler
+
+	// An interior node maps parts of the incoming request to child nodes.
+	// special children keys:
+	//     "/"	trailing slash (resulting from {$})
+	//	   ""   single wildcard
+	//	   "*"  multi wildcard
+	children   mapping[string, *routingNode]
+	emptyChild *routingNode // optimization: child with key ""
+}
+
+// addPattern adds a pattern and its associated Handler to the tree
+// at root.
+func (root *routingNode) addPattern(p *pattern, h Handler) {
+	// First level of tree is host.
+	n := root.addChild(p.host)
+	// Second level of tree is method.
+	n = n.addChild(p.method)
+	// Remaining levels are path.
+	n.addSegments(p.segments, p, h)
+}
+
+// addSegments adds the given segments to the tree rooted at n.
+// If there are no segments, then n is a leaf node that holds
+// the given pattern and handler.
+func (n *routingNode) addSegments(segs []segment, p *pattern, h Handler) {
+	if len(segs) == 0 {
+		n.set(p, h)
+		return
+	}
+	seg := segs[0]
+	if seg.multi {
+		if len(segs) != 1 {
+			panic("multi wildcard not last")
+		}
+		n.addChild("*").set(p, h)
+	} else if seg.wild {
+		n.addChild("").addSegments(segs[1:], p, h)
+	} else {
+		n.addChild(seg.s).addSegments(segs[1:], p, h)
+	}
+}
+
+// set sets the pattern and handler for n, which
+// must be a leaf node.
+func (n *routingNode) set(p *pattern, h Handler) {
+	if n.pattern != nil || n.handler != nil {
+		panic("non-nil leaf fields")
+	}
+	n.pattern = p
+	n.handler = h
+}
+
+// addChild adds a child node with the given key to n
+// if one does not exist, and returns the child.
+func (n *routingNode) addChild(key string) *routingNode {
+	if key == "" {
+		if n.emptyChild == nil {
+			n.emptyChild = &routingNode{}
+		}
+		return n.emptyChild
+	}
+	if c := n.findChild(key); c != nil {
+		return c
+	}
+	c := &routingNode{}
+	n.children.add(key, c)
+	return c
+}
+
+// findChild returns the child of n with the given key, or nil
+// if there is no child with that key.
+func (n *routingNode) findChild(key string) *routingNode {
+	if key == "" {
+		return n.emptyChild
+	}
+	r, _ := n.children.find(key)
+	return r
+}
+
+// match returns the leaf node under root that matches the arguments, and a list
+// of values for pattern wildcards in the order that the wildcards appear.
+// For example, if the request path is "/a/b/c" and the pattern is "/{x}/b/{y}",
+// then the second return value will be []string{"a", "c"}.
+func (root *routingNode) match(host, method, path string) (*routingNode, []string) {
+	if host != "" {
+		// There is a host. If there is a pattern that specifies that host and it
+		// matches, we are done. If the pattern doesn't match, fall through to
+		// try patterns with no host.
+		if l, m := root.findChild(host).matchMethodAndPath(method, path); l != nil {
+			return l, m
+		}
+	}
+	return root.emptyChild.matchMethodAndPath(method, path)
+}
+
+// matchMethodAndPath matches the method and path.
+// Its return values are the same as [routingNode.match].
+// The receiver should be a child of the root.
+func (n *routingNode) matchMethodAndPath(method, path string) (*routingNode, []string) {
+	if n == nil {
+		return nil, nil
+	}
+	if l, m := n.findChild(method).matchPath(path, nil); l != nil {
+		// Exact match of method name.
+		return l, m
+	}
+	if method == "HEAD" {
+		// GET matches HEAD too.
+		if l, m := n.findChild("GET").matchPath(path, nil); l != nil {
+			return l, m
+		}
+	}
+	// No exact match; try patterns with no method.
+	return n.emptyChild.matchPath(path, nil)
+}
+
+// matchPath matches a path.
+// Its return values are the same as [routingNode.match].
+// matchPath calls itself recursively. The matches argument holds the wildcard matches
+// found so far.
+func (n *routingNode) matchPath(path string, matches []string) (*routingNode, []string) {
+	if n == nil {
+		return nil, nil
+	}
+	// If path is empty, then we are done.
+	// If n is a leaf node, we found a match; return it.
+	// If n is an interior node (which means it has a nil pattern),
+	// then we failed to match.
+	if path == "" {
+		if n.pattern == nil {
+			return nil, nil
+		}
+		return n, matches
+	}
+	// Get the first segment of path.
+	seg, rest := firstSegment(path)
+	// First try matching against patterns that have a literal for this position.
+	// We know by construction that such patterns are more specific than those
+	// with a wildcard at this position (they are either more specific, equivalent,
+	// or overlap, and we ruled out the first two when the patterns were registered).
+	if n, m := n.findChild(seg).matchPath(rest, matches); n != nil {
+		return n, m
+	}
+	// If matching a literal fails, try again with patterns that have a single
+	// wildcard (represented by an empty string in the child mapping).
+	// Again, by construction, patterns with a single wildcard must be more specific than
+	// those with a multi wildcard.
+	// We skip this step if the segment is a trailing slash, because single wildcards
+	// don't match trailing slashes.
+	if seg != "/" {
+		if n, m := n.emptyChild.matchPath(rest, append(matches, seg)); n != nil {
+			return n, m
+		}
+	}
+	// Lastly, match the pattern (there can be at most one) that has a multi
+	// wildcard in this position to the rest of the path.
+	if c := n.findChild("*"); c != nil {
+		// Don't record a match for a nameless wildcard (which arises from a
+		// trailing slash in the pattern).
+		if c.pattern.lastSegment().s != "" {
+			matches = append(matches, pathUnescape(path[1:])) // remove initial slash
+		}
+		return c, matches
+	}
+	return nil, nil
+}
+
+// firstSegment splits path into its first segment, and the rest.
+// The path must begin with "/".
+// If path consists of only a slash, firstSegment returns ("/", "").
+// The segment is returned unescaped, if possible.
+func firstSegment(path string) (seg, rest string) {
+	if path == "/" {
+		return "/", ""
+	}
+	path = path[1:] // drop initial slash
+	i := strings.IndexByte(path, '/')
+	if i < 0 {
+		i = len(path)
+	}
+	return pathUnescape(path[:i]), path[i:]
+}
+
+// matchingMethods adds to methodSet all the methods that would result in a
+// match if passed to routingNode.match with the given host and path.
+func (root *routingNode) matchingMethods(host, path string, methodSet map[string]bool) {
+	if host != "" {
+		root.findChild(host).matchingMethodsPath(path, methodSet)
+	}
+	root.emptyChild.matchingMethodsPath(path, methodSet)
+	if methodSet["GET"] {
+		methodSet["HEAD"] = true
+	}
+}
+
+func (n *routingNode) matchingMethodsPath(path string, set map[string]bool) {
+	if n == nil {
+		return
+	}
+	n.children.eachPair(func(method string, c *routingNode) bool {
+		if p, _ := c.matchPath(path, nil); p != nil {
+			set[method] = true
+		}
+		return true
+	})
+	// Don't look at the empty child. If there were an empty
+	// child, it would match on any method, but we only
+	// call this when we fail to match on a method.
+}
diff --git a/src/net/http/routing_tree_test.go b/src/net/http/routing_tree_test.go
new file mode 100644
index 0000000..2aac8b6
--- /dev/null
+++ b/src/net/http/routing_tree_test.go
@@ -0,0 +1,295 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"testing"
+
+	"slices"
+)
+
+func TestRoutingFirstSegment(t *testing.T) {
+	for _, test := range []struct {
+		in   string
+		want []string
+	}{
+		{"/a/b/c", []string{"a", "b", "c"}},
+		{"/a/b/", []string{"a", "b", "/"}},
+		{"/", []string{"/"}},
+		{"/a/%62/c", []string{"a", "b", "c"}},
+		{"/a%2Fb%2fc", []string{"a/b/c"}},
+	} {
+		var got []string
+		rest := test.in
+		for len(rest) > 0 {
+			var seg string
+			seg, rest = firstSegment(rest)
+			got = append(got, seg)
+		}
+		if !slices.Equal(got, test.want) {
+			t.Errorf("%q: got %v, want %v", test.in, got, test.want)
+		}
+	}
+}
+
+// TODO: test host and method
+var testTree *routingNode
+
+func getTestTree() *routingNode {
+	if testTree == nil {
+		testTree = buildTree("/a", "/a/b", "/a/{x}",
+			"/g/h/i", "/g/{x}/j",
+			"/a/b/{x...}", "/a/b/{y}", "/a/b/{$}")
+	}
+	return testTree
+}
+
+func buildTree(pats ...string) *routingNode {
+	root := &routingNode{}
+	for _, p := range pats {
+		pat, err := parsePattern(p)
+		if err != nil {
+			panic(err)
+		}
+		root.addPattern(pat, nil)
+	}
+	return root
+}
+
+func TestRoutingAddPattern(t *testing.T) {
+	want := `"":
+    "":
+        "a":
+            "/a"
+            "":
+                "/a/{x}"
+            "b":
+                "/a/b"
+                "":
+                    "/a/b/{y}"
+                "*":
+                    "/a/b/{x...}"
+                "/":
+                    "/a/b/{$}"
+        "g":
+            "":
+                "j":
+                    "/g/{x}/j"
+            "h":
+                "i":
+                    "/g/h/i"
+`
+
+	var b strings.Builder
+	getTestTree().print(&b, 0)
+	got := b.String()
+	if got != want {
+		t.Errorf("got\n%s\nwant\n%s", got, want)
+	}
+}
+
+type testCase struct {
+	method, host, path string
+	wantPat            string // "" for nil (no match)
+	wantMatches        []string
+}
+
+func TestRoutingNodeMatch(t *testing.T) {
+
+	test := func(tree *routingNode, tests []testCase) {
+		t.Helper()
+		for _, test := range tests {
+			gotNode, gotMatches := tree.match(test.host, test.method, test.path)
+			got := ""
+			if gotNode != nil {
+				got = gotNode.pattern.String()
+			}
+			if got != test.wantPat {
+				t.Errorf("%s, %s, %s: got %q, want %q", test.host, test.method, test.path, got, test.wantPat)
+			}
+			if !slices.Equal(gotMatches, test.wantMatches) {
+				t.Errorf("%s, %s, %s: got matches %v, want %v", test.host, test.method, test.path, gotMatches, test.wantMatches)
+			}
+		}
+	}
+
+	test(getTestTree(), []testCase{
+		{"GET", "", "/a", "/a", nil},
+		{"Get", "", "/b", "", nil},
+		{"Get", "", "/a/b", "/a/b", nil},
+		{"Get", "", "/a/c", "/a/{x}", []string{"c"}},
+		{"Get", "", "/a/b/", "/a/b/{$}", nil},
+		{"Get", "", "/a/b/c", "/a/b/{y}", []string{"c"}},
+		{"Get", "", "/a/b/c/d", "/a/b/{x...}", []string{"c/d"}},
+		{"Get", "", "/g/h/i", "/g/h/i", nil},
+		{"Get", "", "/g/h/j", "/g/{x}/j", []string{"h"}},
+	})
+
+	tree := buildTree(
+		"/item/",
+		"POST /item/{user}",
+		"GET /item/{user}",
+		"/item/{user}",
+		"/item/{user}/{id}",
+		"/item/{user}/new",
+		"/item/{$}",
+		"POST alt.com/item/{user}",
+		"GET /headwins",
+		"HEAD /headwins",
+		"/path/{p...}")
+
+	test(tree, []testCase{
+		{"GET", "", "/item/jba",
+			"GET /item/{user}", []string{"jba"}},
+		{"POST", "", "/item/jba",
+			"POST /item/{user}", []string{"jba"}},
+		{"HEAD", "", "/item/jba",
+			"GET /item/{user}", []string{"jba"}},
+		{"get", "", "/item/jba",
+			"/item/{user}", []string{"jba"}}, // method matches are case-sensitive
+		{"POST", "", "/item/jba/17",
+			"/item/{user}/{id}", []string{"jba", "17"}},
+		{"GET", "", "/item/jba/new",
+			"/item/{user}/new", []string{"jba"}},
+		{"GET", "", "/item/",
+			"/item/{$}", []string{}},
+		{"GET", "", "/item/jba/17/line2",
+			"/item/", nil},
+		{"POST", "alt.com", "/item/jba",
+			"POST alt.com/item/{user}", []string{"jba"}},
+		{"GET", "alt.com", "/item/jba",
+			"GET /item/{user}", []string{"jba"}},
+		{"GET", "", "/item",
+			"", nil}, // does not match
+		{"GET", "", "/headwins",
+			"GET /headwins", nil},
+		{"HEAD", "", "/headwins", // HEAD is more specific than GET
+			"HEAD /headwins", nil},
+		{"GET", "", "/path/to/file",
+			"/path/{p...}", []string{"to/file"}},
+	})
+
+	// A pattern ending in {$} should only match URLS with a trailing slash.
+	pat1 := "/a/b/{$}"
+	test(buildTree(pat1), []testCase{
+		{"GET", "", "/a/b", "", nil},
+		{"GET", "", "/a/b/", pat1, nil},
+		{"GET", "", "/a/b/c", "", nil},
+		{"GET", "", "/a/b/c/d", "", nil},
+	})
+
+	// A pattern ending in a single wildcard should not match a trailing slash URL.
+	pat2 := "/a/b/{w}"
+	test(buildTree(pat2), []testCase{
+		{"GET", "", "/a/b", "", nil},
+		{"GET", "", "/a/b/", "", nil},
+		{"GET", "", "/a/b/c", pat2, []string{"c"}},
+		{"GET", "", "/a/b/c/d", "", nil},
+	})
+
+	// A pattern ending in a multi wildcard should match both URLs.
+	pat3 := "/a/b/{w...}"
+	test(buildTree(pat3), []testCase{
+		{"GET", "", "/a/b", "", nil},
+		{"GET", "", "/a/b/", pat3, []string{""}},
+		{"GET", "", "/a/b/c", pat3, []string{"c"}},
+		{"GET", "", "/a/b/c/d", pat3, []string{"c/d"}},
+	})
+
+	// All three of the above should work together.
+	test(buildTree(pat1, pat2, pat3), []testCase{
+		{"GET", "", "/a/b", "", nil},
+		{"GET", "", "/a/b/", pat1, nil},
+		{"GET", "", "/a/b/c", pat2, []string{"c"}},
+		{"GET", "", "/a/b/c/d", pat3, []string{"c/d"}},
+	})
+}
+
+func TestMatchingMethods(t *testing.T) {
+	hostTree := buildTree("GET a.com/", "PUT b.com/", "POST /foo/{x}")
+	for _, test := range []struct {
+		name       string
+		tree       *routingNode
+		host, path string
+		want       string
+	}{
+		{
+			"post",
+			buildTree("POST /"), "", "/foo",
+			"POST",
+		},
+		{
+			"get",
+			buildTree("GET /"), "", "/foo",
+			"GET,HEAD",
+		},
+		{
+			"host",
+			hostTree, "", "/foo",
+			"",
+		},
+		{
+			"host",
+			hostTree, "", "/foo/bar",
+			"POST",
+		},
+		{
+			"host2",
+			hostTree, "a.com", "/foo/bar",
+			"GET,HEAD,POST",
+		},
+		{
+			"host3",
+			hostTree, "b.com", "/bar",
+			"PUT",
+		},
+		{
+			// This case shouldn't come up because we only call matchingMethods
+			// when there was no match, but we include it for completeness.
+			"empty",
+			buildTree("/"), "", "/",
+			"",
+		},
+	} {
+		t.Run(test.name, func(t *testing.T) {
+			ms := map[string]bool{}
+			test.tree.matchingMethods(test.host, test.path, ms)
+			keys := mapKeys(ms)
+			sort.Strings(keys)
+			got := strings.Join(keys, ",")
+			if got != test.want {
+				t.Errorf("got %s, want %s", got, test.want)
+			}
+		})
+	}
+}
+
+func (n *routingNode) print(w io.Writer, level int) {
+	indent := strings.Repeat("    ", level)
+	if n.pattern != nil {
+		fmt.Fprintf(w, "%s%q\n", indent, n.pattern)
+	}
+	if n.emptyChild != nil {
+		fmt.Fprintf(w, "%s%q:\n", indent, "")
+		n.emptyChild.print(w, level+1)
+	}
+
+	var keys []string
+	n.children.eachPair(func(k string, _ *routingNode) bool {
+		keys = append(keys, k)
+		return true
+	})
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		fmt.Fprintf(w, "%s%q:\n", indent, k)
+		n, _ := n.children.find(k)
+		n.print(w, level+1)
+	}
+}
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
index bb380cf..0c76f1b 100644
--- a/src/net/http/serve_test.go
+++ b/src/net/http/serve_test.go
@@ -30,7 +30,6 @@
 	"net/http/internal/testcert"
 	"net/url"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"reflect"
 	"regexp"
@@ -108,10 +107,14 @@
 	readMu   sync.Mutex // for TestHandlerBodyClose
 	readBuf  bytes.Buffer
 	writeBuf bytes.Buffer
-	closec   chan bool // if non-nil, send value to it on close
+	closec   chan bool // 1-buffered; receives true when Close is called
 	noopConn
 }
 
+func newTestConn() *testConn {
+	return &testConn{closec: make(chan bool, 1)}
+}
+
 func (c *testConn) Read(b []byte) (int, error) {
 	c.readMu.Lock()
 	defer c.readMu.Unlock()
@@ -647,30 +650,27 @@
 
 func TestServerTimeouts(t *testing.T) { run(t, testServerTimeouts, []testMode{http1Mode}) }
 func testServerTimeouts(t *testing.T, mode testMode) {
-	// Try three times, with increasing timeouts.
-	tries := []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second}
-	for i, timeout := range tries {
-		err := testServerTimeoutsWithTimeout(t, timeout, mode)
-		if err == nil {
-			return
-		}
-		t.Logf("failed at %v: %v", timeout, err)
-		if i != len(tries)-1 {
-			t.Logf("retrying at %v ...", tries[i+1])
-		}
-	}
-	t.Fatal("all attempts failed")
+	runTimeSensitiveTest(t, []time.Duration{
+		10 * time.Millisecond,
+		50 * time.Millisecond,
+		100 * time.Millisecond,
+		500 * time.Millisecond,
+		1 * time.Second,
+	}, func(t *testing.T, timeout time.Duration) error {
+		return testServerTimeoutsWithTimeout(t, timeout, mode)
+	})
 }
 
 func testServerTimeoutsWithTimeout(t *testing.T, timeout time.Duration, mode testMode) error {
-	reqNum := 0
-	ts := newClientServerTest(t, mode, HandlerFunc(func(res ResponseWriter, req *Request) {
-		reqNum++
-		fmt.Fprintf(res, "req=%d", reqNum)
+	var reqNum atomic.Int32
+	cst := newClientServerTest(t, mode, HandlerFunc(func(res ResponseWriter, req *Request) {
+		fmt.Fprintf(res, "req=%d", reqNum.Add(1))
 	}), func(ts *httptest.Server) {
 		ts.Config.ReadTimeout = timeout
 		ts.Config.WriteTimeout = timeout
-	}).ts
+	})
+	defer cst.close()
+	ts := cst.ts
 
 	// Hit the HTTP server successfully.
 	c := ts.Client()
@@ -866,16 +866,20 @@
 }
 
 func testWriteDeadlineEnforcedPerStream(t *testing.T, mode testMode, timeout time.Duration) error {
-	reqNum := 0
-	ts := newClientServerTest(t, mode, HandlerFunc(func(res ResponseWriter, req *Request) {
-		reqNum++
-		if reqNum == 1 {
-			return // first request succeeds
+	firstRequest := make(chan bool, 1)
+	cst := newClientServerTest(t, mode, HandlerFunc(func(res ResponseWriter, req *Request) {
+		select {
+		case firstRequest <- true:
+			// first request succeeds
+		default:
+			// second request times out
+			time.Sleep(timeout)
 		}
-		time.Sleep(timeout) // second request times out
 	}), func(ts *httptest.Server) {
 		ts.Config.WriteTimeout = timeout / 2
-	}).ts
+	})
+	defer cst.close()
+	ts := cst.ts
 
 	c := ts.Client()
 
@@ -922,14 +926,18 @@
 }
 
 func testNoWriteDeadline(t *testing.T, mode testMode, timeout time.Duration) error {
-	reqNum := 0
-	ts := newClientServerTest(t, mode, HandlerFunc(func(res ResponseWriter, req *Request) {
-		reqNum++
-		if reqNum == 1 {
-			return // first request succeeds
+	firstRequest := make(chan bool, 1)
+	cst := newClientServerTest(t, mode, HandlerFunc(func(res ResponseWriter, req *Request) {
+		select {
+		case firstRequest <- true:
+			// first request succeeds
+		default:
+			// second request times out
+			time.Sleep(timeout)
 		}
-		time.Sleep(timeout) // second request timesout
-	})).ts
+	}))
+	defer cst.close()
+	ts := cst.ts
 
 	c := ts.Client()
 
@@ -1392,27 +1400,28 @@
 	run(t, testTLSHandshakeTimeout, []testMode{https1Mode, http2Mode})
 }
 func testTLSHandshakeTimeout(t *testing.T, mode testMode) {
-	errc := make(chanWriter, 10) // but only expecting 1
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {}),
+	errLog := new(strings.Builder)
+	cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {}),
 		func(ts *httptest.Server) {
 			ts.Config.ReadTimeout = 250 * time.Millisecond
-			ts.Config.ErrorLog = log.New(errc, "", 0)
+			ts.Config.ErrorLog = log.New(errLog, "", 0)
 		},
-	).ts
+	)
+	ts := cst.ts
+
 	conn, err := net.Dial("tcp", ts.Listener.Addr().String())
 	if err != nil {
 		t.Fatalf("Dial: %v", err)
 	}
-	defer conn.Close()
-
 	var buf [1]byte
 	n, err := conn.Read(buf[:])
 	if err == nil || n != 0 {
 		t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
 	}
+	conn.Close()
 
-	v := <-errc
-	if !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") {
+	cst.close()
+	if v := errLog.String(); !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") {
 		t.Errorf("expected a TLS handshake timeout error; got %q", v)
 	}
 }
@@ -2968,15 +2977,36 @@
 	return len(p), nil
 }
 
-type countReader struct {
-	r io.Reader
-	n *int64
+type bodyLimitReader struct {
+	mu     sync.Mutex
+	count  int
+	limit  int
+	closed chan struct{}
 }
 
-func (cr countReader) Read(p []byte) (n int, err error) {
-	n, err = cr.r.Read(p)
-	atomic.AddInt64(cr.n, int64(n))
-	return
+func (r *bodyLimitReader) Read(p []byte) (int, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	select {
+	case <-r.closed:
+		return 0, errors.New("closed")
+	default:
+	}
+	if r.count > r.limit {
+		return 0, errors.New("at limit")
+	}
+	r.count += len(p)
+	for i := range p {
+		p[i] = 'a'
+	}
+	return len(p), nil
+}
+
+func (r *bodyLimitReader) Close() error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	close(r.closed)
+	return nil
 }
 
 func TestRequestBodyLimit(t *testing.T) { run(t, testRequestBodyLimit) }
@@ -3000,8 +3030,11 @@
 		}
 	}))
 
-	nWritten := new(int64)
-	req, _ := NewRequest("POST", cst.ts.URL, io.LimitReader(countReader{neverEnding('a'), nWritten}, limit*200))
+	body := &bodyLimitReader{
+		closed: make(chan struct{}),
+		limit:  limit * 200,
+	}
+	req, _ := NewRequest("POST", cst.ts.URL, body)
 
 	// Send the POST, but don't care it succeeds or not. The
 	// remote side is going to reply and then close the TCP
@@ -3016,10 +3049,13 @@
 	if err == nil {
 		resp.Body.Close()
 	}
+	// Wait for the Transport to finish writing the request body.
+	// It will close the body when done.
+	<-body.closed
 
-	if atomic.LoadInt64(nWritten) > limit*100 {
+	if body.count > limit*100 {
 		t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d",
-			limit, nWritten)
+			limit, body.count)
 	}
 }
 
@@ -3075,47 +3111,71 @@
 // closing the TCP connection, causing the client to get a RST.
 // See https://golang.org/issue/3595
 func TestServerGracefulClose(t *testing.T) {
-	run(t, testServerGracefulClose, []testMode{http1Mode})
+	// Not parallel: modifies the global rstAvoidanceDelay.
+	run(t, testServerGracefulClose, []testMode{http1Mode}, testNotParallel)
 }
 func testServerGracefulClose(t *testing.T, mode testMode) {
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
-		Error(w, "bye", StatusUnauthorized)
-	})).ts
+	runTimeSensitiveTest(t, []time.Duration{
+		1 * time.Millisecond,
+		5 * time.Millisecond,
+		10 * time.Millisecond,
+		50 * time.Millisecond,
+		100 * time.Millisecond,
+		500 * time.Millisecond,
+		time.Second,
+		5 * time.Second,
+	}, func(t *testing.T, timeout time.Duration) error {
+		SetRSTAvoidanceDelay(t, timeout)
+		t.Logf("set RST avoidance delay to %v", timeout)
 
-	conn, err := net.Dial("tcp", ts.Listener.Addr().String())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer conn.Close()
-	const bodySize = 5 << 20
-	req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize))
-	for i := 0; i < bodySize; i++ {
-		req = append(req, 'x')
-	}
-	writeErr := make(chan error)
-	go func() {
-		_, err := conn.Write(req)
-		writeErr <- err
-	}()
-	br := bufio.NewReader(conn)
-	lineNum := 0
-	for {
-		line, err := br.ReadString('\n')
-		if err == io.EOF {
-			break
+		const bodySize = 5 << 20
+		req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize))
+		for i := 0; i < bodySize; i++ {
+			req = append(req, 'x')
 		}
+
+		cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+			Error(w, "bye", StatusUnauthorized)
+		}))
+		// We need to close cst explicitly here so that in-flight server
+		// requests don't race with the call to SetRSTAvoidanceDelay for a retry.
+		defer cst.close()
+		ts := cst.ts
+
+		conn, err := net.Dial("tcp", ts.Listener.Addr().String())
 		if err != nil {
-			t.Fatalf("ReadLine: %v", err)
+			return err
 		}
-		lineNum++
-		if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") {
-			t.Errorf("Response line = %q; want a 401", line)
+		writeErr := make(chan error)
+		go func() {
+			_, err := conn.Write(req)
+			writeErr <- err
+		}()
+		defer func() {
+			conn.Close()
+			// Wait for write to finish. This is a broken pipe on both
+			// Darwin and Linux, but checking this isn't the point of
+			// the test.
+			<-writeErr
+		}()
+
+		br := bufio.NewReader(conn)
+		lineNum := 0
+		for {
+			line, err := br.ReadString('\n')
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				return fmt.Errorf("ReadLine: %v", err)
+			}
+			lineNum++
+			if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") {
+				t.Errorf("Response line = %q; want a 401", line)
+			}
 		}
-	}
-	// Wait for write to finish. This is a broken pipe on both
-	// Darwin and Linux, but checking this isn't the point of
-	// the test.
-	<-writeErr
+		return nil
+	})
 }
 
 func TestCaseSensitiveMethod(t *testing.T) { run(t, testCaseSensitiveMethod) }
@@ -3897,91 +3957,93 @@
 // and the http client), and both think they can close it on failure.
 // Therefore, all incoming server requests Bodies need to be thread-safe.
 func TestTransportAndServerSharedBodyRace(t *testing.T) {
-	run(t, testTransportAndServerSharedBodyRace)
+	run(t, testTransportAndServerSharedBodyRace, testNotParallel)
 }
 func testTransportAndServerSharedBodyRace(t *testing.T, mode testMode) {
-	const bodySize = 1 << 20
+	// The proxy server in the middle of the stack for this test potentially
+	// from its handler after only reading half of the body.
+	// That can trigger https://go.dev/issue/3595, which is otherwise
+	// irrelevant to this test.
+	runTimeSensitiveTest(t, []time.Duration{
+		1 * time.Millisecond,
+		5 * time.Millisecond,
+		10 * time.Millisecond,
+		50 * time.Millisecond,
+		100 * time.Millisecond,
+		500 * time.Millisecond,
+		time.Second,
+		5 * time.Second,
+	}, func(t *testing.T, timeout time.Duration) error {
+		SetRSTAvoidanceDelay(t, timeout)
+		t.Logf("set RST avoidance delay to %v", timeout)
 
-	// errorf is like t.Errorf, but also writes to println. When
-	// this test fails, it hangs. This helps debugging and I've
-	// added this enough times "temporarily".  It now gets added
-	// full time.
-	errorf := func(format string, args ...any) {
-		v := fmt.Sprintf(format, args...)
-		println(v)
-		t.Error(v)
-	}
+		const bodySize = 1 << 20
 
-	unblockBackend := make(chan bool)
-	backend := newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
-		gone := rw.(CloseNotifier).CloseNotify()
-		didCopy := make(chan any)
-		go func() {
+		var wg sync.WaitGroup
+		backend := newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
+			// Work around https://go.dev/issue/38370: clientServerTest uses
+			// an httptest.Server under the hood, and in HTTP/2 mode it does not always
+			// “[block] until all outstanding requests on this server have completed”,
+			// causing the call to Logf below to race with the end of the test.
+			//
+			// Since the client doesn't cancel the request until we have copied half
+			// the body, this call to add happens before the test is cleaned up,
+			// preventing the race.
+			wg.Add(1)
+			defer wg.Done()
+
 			n, err := io.CopyN(rw, req.Body, bodySize)
-			didCopy <- []any{n, err}
+			t.Logf("backend CopyN: %v, %v", n, err)
+			<-req.Context().Done()
+		}))
+		// We need to close explicitly here so that in-flight server
+		// requests don't race with the call to SetRSTAvoidanceDelay for a retry.
+		defer func() {
+			wg.Wait()
+			backend.close()
 		}()
-		isGone := false
-	Loop:
-		for {
-			select {
-			case <-didCopy:
-				break Loop
-			case <-gone:
-				isGone = true
-			case <-time.After(time.Second):
-				println("1 second passes in backend, proxygone=", isGone)
+
+		var proxy *clientServerTest
+		proxy = newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
+			req2, _ := NewRequest("POST", backend.ts.URL, req.Body)
+			req2.ContentLength = bodySize
+			cancel := make(chan struct{})
+			req2.Cancel = cancel
+
+			bresp, err := proxy.c.Do(req2)
+			if err != nil {
+				t.Errorf("Proxy outbound request: %v", err)
+				return
 			}
-		}
-		<-unblockBackend
-	}))
-	defer backend.close()
+			_, err = io.CopyN(io.Discard, bresp.Body, bodySize/2)
+			if err != nil {
+				t.Errorf("Proxy copy error: %v", err)
+				return
+			}
+			t.Cleanup(func() { bresp.Body.Close() })
 
-	backendRespc := make(chan *Response, 1)
-	var proxy *clientServerTest
-	proxy = newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
-		req2, _ := NewRequest("POST", backend.ts.URL, req.Body)
-		req2.ContentLength = bodySize
-		cancel := make(chan struct{})
-		req2.Cancel = cancel
+			// Try to cause a race. Canceling the client request will cause the client
+			// transport to close req2.Body. Returning from the server handler will
+			// cause the server to close req.Body. Since they are the same underlying
+			// ReadCloser, that will result in concurrent calls to Close (and possibly a
+			// Read concurrent with a Close).
+			if mode == http2Mode {
+				close(cancel)
+			} else {
+				proxy.c.Transport.(*Transport).CancelRequest(req2)
+			}
+			rw.Write([]byte("OK"))
+		}))
+		defer proxy.close()
 
-		bresp, err := proxy.c.Do(req2)
+		req, _ := NewRequest("POST", proxy.ts.URL, io.LimitReader(neverEnding('a'), bodySize))
+		res, err := proxy.c.Do(req)
 		if err != nil {
-			errorf("Proxy outbound request: %v", err)
-			return
+			return fmt.Errorf("original request: %v", err)
 		}
-		_, err = io.CopyN(io.Discard, bresp.Body, bodySize/2)
-		if err != nil {
-			errorf("Proxy copy error: %v", err)
-			return
-		}
-		backendRespc <- bresp // to close later
-
-		// Try to cause a race: Both the Transport and the proxy handler's Server
-		// will try to read/close req.Body (aka req2.Body)
-		if mode == http2Mode {
-			close(cancel)
-		} else {
-			proxy.c.Transport.(*Transport).CancelRequest(req2)
-		}
-		rw.Write([]byte("OK"))
-	}))
-	defer proxy.close()
-
-	defer close(unblockBackend)
-	req, _ := NewRequest("POST", proxy.ts.URL, io.LimitReader(neverEnding('a'), bodySize))
-	res, err := proxy.c.Do(req)
-	if err != nil {
-		t.Fatalf("Original request: %v", err)
-	}
-
-	// Cleanup, so we don't leak goroutines.
-	res.Body.Close()
-	select {
-	case res := <-backendRespc:
 		res.Body.Close()
-	default:
-		// We failed earlier. (e.g. on proxy.c.Do(req2))
-	}
+		return nil
+	})
 }
 
 // Test that a hanging Request.Body.Read from another goroutine can't
@@ -4316,7 +4378,8 @@
 }
 
 func TestCloseWrite(t *testing.T) {
-	setParallel(t)
+	SetRSTAvoidanceDelay(t, 1*time.Millisecond)
+
 	var srv Server
 	var testConn closeWriteTestConn
 	c := ExportServerNewConn(&srv, &testConn)
@@ -4552,10 +4615,10 @@
 }
 
 // If a Handler finishes and there's an unread request body,
-// verify the server try to do implicit read on it before replying.
+// verify the server implicitly tries to do a read on it before replying.
 func TestHandlerFinishSkipBigContentLengthRead(t *testing.T) {
 	setParallel(t)
-	conn := &testConn{closec: make(chan bool)}
+	conn := newTestConn()
 	conn.readBuf.Write([]byte(fmt.Sprintf(
 		"POST / HTTP/1.1\r\n" +
 			"Host: test\r\n" +
@@ -4645,7 +4708,7 @@
 		{"GET / HTTP/3.0", "", 505},
 	}
 	for _, tt := range tests {
-		conn := &testConn{closec: make(chan bool, 1)}
+		conn := newTestConn()
 		methodTarget := "GET / "
 		if !strings.HasPrefix(tt.proto, "HTTP/") {
 			methodTarget = ""
@@ -4743,7 +4806,7 @@
 		{"foo: foo\xfffoo\r\n", 200}, // non-ASCII high octets in value are fine
 	}
 	for _, tt := range tests {
-		conn := &testConn{closec: make(chan bool, 1)}
+		conn := newTestConn()
 		io.WriteString(&conn.readBuf, "GET / HTTP/1.1\r\nHost: foo\r\n"+tt.header+"\r\n")
 
 		ln := &oneConnListener{conn}
@@ -4966,7 +5029,7 @@
 // For use like:
 //
 //	$ go test -c
-//	$ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof
+//	$ ./http.test -test.run='^$' -test.bench='^BenchmarkServer$' -test.benchtime=15s -test.cpuprofile=http.prof
 //	$ go tool pprof http.test http.prof
 //	(pprof) web
 func BenchmarkServer(b *testing.B) {
@@ -5005,7 +5068,7 @@
 	defer ts.Close()
 	b.StartTimer()
 
-	cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer$")
+	cmd := testenv.Command(b, os.Args[0], "-test.run=^$", "-test.bench=^BenchmarkServer$")
 	cmd.Env = append([]string{
 		fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N),
 		fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL),
@@ -5060,7 +5123,7 @@
 
 	// Start server process.
 	ctx, cancel := context.WithCancel(context.Background())
-	cmd := testenv.CommandContext(b, ctx, os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkClient$")
+	cmd := testenv.CommandContext(b, ctx, os.Args[0], "-test.run=^$", "-test.bench=^BenchmarkClient$")
 	cmd.Env = append(cmd.Environ(), "TEST_BENCH_SERVER=yes")
 	cmd.Stderr = os.Stderr
 	stdout, err := cmd.StdoutPipe()
@@ -5129,11 +5192,7 @@
 `)
 	res := []byte("Hello world!\n")
 
-	conn := &testConn{
-		// testConn.Close will not push into the channel
-		// if it's full.
-		closec: make(chan bool, 1),
-	}
+	conn := newTestConn()
 	handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
 		rw.Header().Set("Content-Type", "text/html; charset=utf-8")
 		rw.Write(res)
@@ -5356,49 +5415,75 @@
 	if testing.Short() {
 		t.Skip("skipping in short mode")
 	}
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
-		io.Copy(io.Discard, r.Body)
-		io.WriteString(w, r.RemoteAddr)
-	}), func(ts *httptest.Server) {
-		ts.Config.ReadHeaderTimeout = 1 * time.Second
-		ts.Config.IdleTimeout = 2 * time.Second
-	}).ts
-	c := ts.Client()
+	runTimeSensitiveTest(t, []time.Duration{
+		10 * time.Millisecond,
+		100 * time.Millisecond,
+		1 * time.Second,
+		10 * time.Second,
+	}, func(t *testing.T, readHeaderTimeout time.Duration) error {
+		cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+			io.Copy(io.Discard, r.Body)
+			io.WriteString(w, r.RemoteAddr)
+		}), func(ts *httptest.Server) {
+			ts.Config.ReadHeaderTimeout = readHeaderTimeout
+			ts.Config.IdleTimeout = 2 * readHeaderTimeout
+		})
+		defer cst.close()
+		ts := cst.ts
+		t.Logf("ReadHeaderTimeout = %v", ts.Config.ReadHeaderTimeout)
+		t.Logf("IdleTimeout = %v", ts.Config.IdleTimeout)
+		c := ts.Client()
 
-	get := func() string {
-		res, err := c.Get(ts.URL)
-		if err != nil {
-			t.Fatal(err)
+		get := func() (string, error) {
+			res, err := c.Get(ts.URL)
+			if err != nil {
+				return "", err
+			}
+			defer res.Body.Close()
+			slurp, err := io.ReadAll(res.Body)
+			if err != nil {
+				// If we're at this point the headers have definitely already been
+				// read and the server is not idle, so neither timeout applies:
+				// this should never fail.
+				t.Fatal(err)
+			}
+			return string(slurp), nil
 		}
-		defer res.Body.Close()
-		slurp, err := io.ReadAll(res.Body)
+
+		a1, err := get()
 		if err != nil {
-			t.Fatal(err)
+			return err
 		}
-		return string(slurp)
-	}
+		a2, err := get()
+		if err != nil {
+			return err
+		}
+		if a1 != a2 {
+			return fmt.Errorf("did requests on different connections")
+		}
+		time.Sleep(ts.Config.IdleTimeout * 3 / 2)
+		a3, err := get()
+		if err != nil {
+			return err
+		}
+		if a2 == a3 {
+			return fmt.Errorf("request three unexpectedly on same connection")
+		}
 
-	a1, a2 := get(), get()
-	if a1 != a2 {
-		t.Fatalf("did requests on different connections")
-	}
-	time.Sleep(3 * time.Second)
-	a3 := get()
-	if a2 == a3 {
-		t.Fatal("request three unexpectedly on same connection")
-	}
+		// And test that ReadHeaderTimeout still works:
+		conn, err := net.Dial("tcp", ts.Listener.Addr().String())
+		if err != nil {
+			return err
+		}
+		defer conn.Close()
+		conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo.com\r\n"))
+		time.Sleep(ts.Config.ReadHeaderTimeout * 2)
+		if _, err := io.CopyN(io.Discard, conn, 1); err == nil {
+			return fmt.Errorf("copy byte succeeded; want err")
+		}
 
-	// And test that ReadHeaderTimeout still works:
-	conn, err := net.Dial("tcp", ts.Listener.Addr().String())
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer conn.Close()
-	conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo.com\r\n"))
-	time.Sleep(2 * time.Second)
-	if _, err := io.CopyN(io.Discard, conn, 1); err == nil {
-		t.Fatal("copy byte succeeded; want err")
-	}
+		return nil
+	})
 }
 
 func get(t *testing.T, c *Client, url string) string {
@@ -5658,7 +5743,7 @@
 		time.Second,
 		2 * time.Second,
 	}, func(t *testing.T, timeout time.Duration) error {
-		ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+		cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
 			select {
 			case <-time.After(2 * timeout):
 				fmt.Fprint(w, "ok")
@@ -5667,7 +5752,9 @@
 			}
 		}), func(ts *httptest.Server) {
 			ts.Config.ReadTimeout = timeout
-		}).ts
+		})
+		defer cst.close()
+		ts := cst.ts
 
 		c := ts.Client()
 
@@ -5701,10 +5788,12 @@
 		time.Second,
 		2 * time.Second,
 	}, func(t *testing.T, timeout time.Duration) error {
-		ts := newClientServerTest(t, mode, serve(200), func(ts *httptest.Server) {
+		cst := newClientServerTest(t, mode, serve(200), func(ts *httptest.Server) {
 			ts.Config.ReadHeaderTimeout = timeout
 			ts.Config.IdleTimeout = 0 // disable idle timeout
-		}).ts
+		})
+		defer cst.close()
+		ts := cst.ts
 
 		// rather than using an http.Client, create a single connection, so that
 		// we can ensure this connection is not closed.
@@ -5747,9 +5836,10 @@
 		if err == nil {
 			return
 		}
-		if i == len(durations)-1 {
+		if i == len(durations)-1 || t.Failed() {
 			t.Fatalf("failed with duration %v: %v", d, err)
 		}
+		t.Logf("retrying after error with duration %v: %v", d, err)
 	}
 }
 
@@ -5929,7 +6019,7 @@
 		{"GE(T", 400},
 	}
 	for _, tt := range tests {
-		conn := &testConn{closec: make(chan bool, 1)}
+		conn := newTestConn()
 		io.WriteString(&conn.readBuf, tt.method+" / HTTP/1.1\r\nHost: foo.example\r\n\r\n")
 
 		ln := &oneConnListener{conn}
@@ -6594,7 +6684,7 @@
 }
 
 func TestMaxBytesHandler(t *testing.T) {
-	setParallel(t)
+	// Not parallel: modifies the global rstAvoidanceDelay.
 	defer afterTest(t)
 
 	for _, maxSize := range []int64{100, 1_000, 1_000_000} {
@@ -6603,77 +6693,99 @@
 				func(t *testing.T) {
 					run(t, func(t *testing.T, mode testMode) {
 						testMaxBytesHandler(t, mode, maxSize, requestSize)
-					})
+					}, testNotParallel)
 				})
 		}
 	}
 }
 
 func testMaxBytesHandler(t *testing.T, mode testMode, maxSize, requestSize int64) {
-	var (
-		handlerN   int64
-		handlerErr error
-	)
-	echo := HandlerFunc(func(w ResponseWriter, r *Request) {
-		var buf bytes.Buffer
-		handlerN, handlerErr = io.Copy(&buf, r.Body)
-		io.Copy(w, &buf)
-	})
+	runTimeSensitiveTest(t, []time.Duration{
+		1 * time.Millisecond,
+		5 * time.Millisecond,
+		10 * time.Millisecond,
+		50 * time.Millisecond,
+		100 * time.Millisecond,
+		500 * time.Millisecond,
+		time.Second,
+		5 * time.Second,
+	}, func(t *testing.T, timeout time.Duration) error {
+		SetRSTAvoidanceDelay(t, timeout)
+		t.Logf("set RST avoidance delay to %v", timeout)
 
-	ts := newClientServerTest(t, mode, MaxBytesHandler(echo, maxSize)).ts
-	defer ts.Close()
+		var (
+			handlerN   int64
+			handlerErr error
+		)
+		echo := HandlerFunc(func(w ResponseWriter, r *Request) {
+			var buf bytes.Buffer
+			handlerN, handlerErr = io.Copy(&buf, r.Body)
+			io.Copy(w, &buf)
+		})
 
-	c := ts.Client()
+		cst := newClientServerTest(t, mode, MaxBytesHandler(echo, maxSize))
+		// We need to close cst explicitly here so that in-flight server
+		// requests don't race with the call to SetRSTAvoidanceDelay for a retry.
+		defer cst.close()
+		ts := cst.ts
+		c := ts.Client()
 
-	body := strings.Repeat("a", int(requestSize))
-	var wg sync.WaitGroup
-	defer wg.Wait()
-	getBody := func() (io.ReadCloser, error) {
-		wg.Add(1)
-		body := &wgReadCloser{
-			Reader: strings.NewReader(body),
-			wg:     &wg,
+		body := strings.Repeat("a", int(requestSize))
+		var wg sync.WaitGroup
+		defer wg.Wait()
+		getBody := func() (io.ReadCloser, error) {
+			wg.Add(1)
+			body := &wgReadCloser{
+				Reader: strings.NewReader(body),
+				wg:     &wg,
+			}
+			return body, nil
 		}
-		return body, nil
-	}
-	reqBody, _ := getBody()
-	req, err := NewRequest("POST", ts.URL, reqBody)
-	if err != nil {
-		reqBody.Close()
-		t.Fatal(err)
-	}
-	req.ContentLength = int64(len(body))
-	req.GetBody = getBody
-	req.Header.Set("Content-Type", "text/plain")
-
-	var buf strings.Builder
-	res, err := c.Do(req)
-	if err != nil {
-		t.Errorf("unexpected connection error: %v", err)
-	} else {
-		_, err = io.Copy(&buf, res.Body)
-		res.Body.Close()
+		reqBody, _ := getBody()
+		req, err := NewRequest("POST", ts.URL, reqBody)
 		if err != nil {
-			t.Errorf("unexpected read error: %v", err)
+			reqBody.Close()
+			t.Fatal(err)
 		}
-	}
-	if handlerN > maxSize {
-		t.Errorf("expected max request body %d; got %d", maxSize, handlerN)
-	}
-	if requestSize > maxSize && handlerErr == nil {
-		t.Error("expected error on handler side; got nil")
-	}
-	if requestSize <= maxSize {
-		if handlerErr != nil {
-			t.Errorf("%d expected nil error on handler side; got %v", requestSize, handlerErr)
+		req.ContentLength = int64(len(body))
+		req.GetBody = getBody
+		req.Header.Set("Content-Type", "text/plain")
+
+		var buf strings.Builder
+		res, err := c.Do(req)
+		if err != nil {
+			return fmt.Errorf("unexpected connection error: %v", err)
+		} else {
+			_, err = io.Copy(&buf, res.Body)
+			res.Body.Close()
+			if err != nil {
+				return fmt.Errorf("unexpected read error: %v", err)
+			}
 		}
-		if handlerN != requestSize {
-			t.Errorf("expected request of size %d; got %d", requestSize, handlerN)
+		// We don't expect any of the errors after this point to occur due
+		// to rstAvoidanceDelay being too short, so we use t.Errorf for those
+		// instead of returning a (retriable) error.
+
+		if handlerN > maxSize {
+			t.Errorf("expected max request body %d; got %d", maxSize, handlerN)
 		}
-	}
-	if buf.Len() != int(handlerN) {
-		t.Errorf("expected echo of size %d; got %d", handlerN, buf.Len())
-	}
+		if requestSize > maxSize && handlerErr == nil {
+			t.Error("expected error on handler side; got nil")
+		}
+		if requestSize <= maxSize {
+			if handlerErr != nil {
+				t.Errorf("%d expected nil error on handler side; got %v", requestSize, handlerErr)
+			}
+			if handlerN != requestSize {
+				t.Errorf("expected request of size %d; got %d", requestSize, handlerN)
+			}
+		}
+		if buf.Len() != int(handlerN) {
+			t.Errorf("expected echo of size %d; got %d", handlerN, buf.Len())
+		}
+
+		return nil
+	})
 }
 
 func TestEarlyHints(t *testing.T) {
diff --git a/src/net/http/servemux121.go b/src/net/http/servemux121.go
new file mode 100644
index 0000000..c0a4b77
--- /dev/null
+++ b/src/net/http/servemux121.go
@@ -0,0 +1,211 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// This file implements ServeMux behavior as in Go 1.21.
+// The behavior is controlled by a GODEBUG setting.
+// Most of this code is derived from commit 08e35cc334.
+// Changes are minimal: aside from the different receiver type,
+// they mostly involve renaming functions, usually by unexporting them.
+
+import (
+	"internal/godebug"
+	"net/url"
+	"sort"
+	"strings"
+	"sync"
+)
+
+var httpmuxgo121 = godebug.New("httpmuxgo121")
+
+var use121 bool
+
+// Read httpmuxgo121 once at startup, since dealing with changes to it during
+// program execution is too complex and error-prone.
+func init() {
+	if httpmuxgo121.Value() == "1" {
+		use121 = true
+		httpmuxgo121.IncNonDefault()
+	}
+}
+
+// serveMux121 holds the state of a ServeMux needed for Go 1.21 behavior.
+type serveMux121 struct {
+	mu    sync.RWMutex
+	m     map[string]muxEntry
+	es    []muxEntry // slice of entries sorted from longest to shortest.
+	hosts bool       // whether any patterns contain hostnames
+}
+
+type muxEntry struct {
+	h       Handler
+	pattern string
+}
+
+// Formerly ServeMux.Handle.
+func (mux *serveMux121) handle(pattern string, handler Handler) {
+	mux.mu.Lock()
+	defer mux.mu.Unlock()
+
+	if pattern == "" {
+		panic("http: invalid pattern")
+	}
+	if handler == nil {
+		panic("http: nil handler")
+	}
+	if _, exist := mux.m[pattern]; exist {
+		panic("http: multiple registrations for " + pattern)
+	}
+
+	if mux.m == nil {
+		mux.m = make(map[string]muxEntry)
+	}
+	e := muxEntry{h: handler, pattern: pattern}
+	mux.m[pattern] = e
+	if pattern[len(pattern)-1] == '/' {
+		mux.es = appendSorted(mux.es, e)
+	}
+
+	if pattern[0] != '/' {
+		mux.hosts = true
+	}
+}
+
+func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
+	n := len(es)
+	i := sort.Search(n, func(i int) bool {
+		return len(es[i].pattern) < len(e.pattern)
+	})
+	if i == n {
+		return append(es, e)
+	}
+	// we now know that i points at where we want to insert
+	es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
+	copy(es[i+1:], es[i:])      // Move shorter entries down
+	es[i] = e
+	return es
+}
+
+// Formerly ServeMux.HandleFunc.
+func (mux *serveMux121) handleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+	if handler == nil {
+		panic("http: nil handler")
+	}
+	mux.handle(pattern, HandlerFunc(handler))
+}
+
+// Formerly ServeMux.Handler.
+func (mux *serveMux121) findHandler(r *Request) (h Handler, pattern string) {
+
+	// CONNECT requests are not canonicalized.
+	if r.Method == "CONNECT" {
+		// If r.URL.Path is /tree and its handler is not registered,
+		// the /tree -> /tree/ redirect applies to CONNECT requests
+		// but the path canonicalization does not.
+		if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
+			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+		}
+
+		return mux.handler(r.Host, r.URL.Path)
+	}
+
+	// All other requests have any port stripped and path cleaned
+	// before passing to mux.handler.
+	host := stripHostPort(r.Host)
+	path := cleanPath(r.URL.Path)
+
+	// If the given path is /tree and its handler is not registered,
+	// redirect for /tree/.
+	if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
+		return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+	}
+
+	if path != r.URL.Path {
+		_, pattern = mux.handler(host, path)
+		u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
+		return RedirectHandler(u.String(), StatusMovedPermanently), pattern
+	}
+
+	return mux.handler(host, r.URL.Path)
+}
+
+// handler is the main implementation of findHandler.
+// The path is known to be in canonical form, except for CONNECT methods.
+func (mux *serveMux121) handler(host, path string) (h Handler, pattern string) {
+	mux.mu.RLock()
+	defer mux.mu.RUnlock()
+
+	// Host-specific pattern takes precedence over generic ones
+	if mux.hosts {
+		h, pattern = mux.match(host + path)
+	}
+	if h == nil {
+		h, pattern = mux.match(path)
+	}
+	if h == nil {
+		h, pattern = NotFoundHandler(), ""
+	}
+	return
+}
+
+// Find a handler on a handler map given a path string.
+// Most-specific (longest) pattern wins.
+func (mux *serveMux121) match(path string) (h Handler, pattern string) {
+	// Check for exact match first.
+	v, ok := mux.m[path]
+	if ok {
+		return v.h, v.pattern
+	}
+
+	// Check for longest valid match.  mux.es contains all patterns
+	// that end in / sorted from longest to shortest.
+	for _, e := range mux.es {
+		if strings.HasPrefix(path, e.pattern) {
+			return e.h, e.pattern
+		}
+	}
+	return nil, ""
+}
+
+// redirectToPathSlash determines if the given path needs appending "/" to it.
+// This occurs when a handler for path + "/" was already registered, but
+// not for path itself. If the path needs appending to, it creates a new
+// URL, setting the path to u.Path + "/" and returning true to indicate so.
+func (mux *serveMux121) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
+	mux.mu.RLock()
+	shouldRedirect := mux.shouldRedirectRLocked(host, path)
+	mux.mu.RUnlock()
+	if !shouldRedirect {
+		return u, false
+	}
+	path = path + "/"
+	u = &url.URL{Path: path, RawQuery: u.RawQuery}
+	return u, true
+}
+
+// shouldRedirectRLocked reports whether the given path and host should be redirected to
+// path+"/". This should happen if a handler is registered for path+"/" but
+// not path -- see comments at ServeMux.
+func (mux *serveMux121) shouldRedirectRLocked(host, path string) bool {
+	p := []string{path, host + path}
+
+	for _, c := range p {
+		if _, exist := mux.m[c]; exist {
+			return false
+		}
+	}
+
+	n := len(path)
+	if n == 0 {
+		return false
+	}
+	for _, c := range p {
+		if _, exist := mux.m[c+"/"]; exist {
+			return path[n-1] != '/'
+		}
+	}
+
+	return false
+}
diff --git a/src/net/http/server.go b/src/net/http/server.go
index 8f63a90..acac78b 100644
--- a/src/net/http/server.go
+++ b/src/net/http/server.go
@@ -61,16 +61,16 @@
 
 // A Handler responds to an HTTP request.
 //
-// ServeHTTP should write reply headers and data to the ResponseWriter
+// [Handler.ServeHTTP] should write reply headers and data to the [ResponseWriter]
 // and then return. Returning signals that the request is finished; it
-// is not valid to use the ResponseWriter or read from the
-// Request.Body after or concurrently with the completion of the
+// is not valid to use the [ResponseWriter] or read from the
+// [Request.Body] after or concurrently with the completion of the
 // ServeHTTP call.
 //
 // Depending on the HTTP client software, HTTP protocol version, and
 // any intermediaries between the client and the Go server, it may not
-// be possible to read from the Request.Body after writing to the
-// ResponseWriter. Cautious handlers should read the Request.Body
+// be possible to read from the [Request.Body] after writing to the
+// [ResponseWriter]. Cautious handlers should read the [Request.Body]
 // first, and then reply.
 //
 // Except for reading the body, handlers should not modify the
@@ -82,7 +82,7 @@
 // and either closes the network connection or sends an HTTP/2
 // RST_STREAM, depending on the HTTP protocol. To abort a handler so
 // the client sees an interrupted response but the server doesn't log
-// an error, panic with the value ErrAbortHandler.
+// an error, panic with the value [ErrAbortHandler].
 type Handler interface {
 	ServeHTTP(ResponseWriter, *Request)
 }
@@ -90,15 +90,14 @@
 // A ResponseWriter interface is used by an HTTP handler to
 // construct an HTTP response.
 //
-// A ResponseWriter may not be used after the Handler.ServeHTTP method
-// has returned.
+// A ResponseWriter may not be used after [Handler.ServeHTTP] has returned.
 type ResponseWriter interface {
 	// Header returns the header map that will be sent by
-	// WriteHeader. The Header map also is the mechanism with which
-	// Handlers can set HTTP trailers.
+	// [ResponseWriter.WriteHeader]. The [Header] map also is the mechanism with which
+	// [Handler] implementations can set HTTP trailers.
 	//
-	// Changing the header map after a call to WriteHeader (or
-	// Write) has no effect unless the HTTP status code was of the
+	// Changing the header map after a call to [ResponseWriter.WriteHeader] (or
+	// [ResponseWriter.Write]) has no effect unless the HTTP status code was of the
 	// 1xx class or the modified headers are trailers.
 	//
 	// There are two ways to set Trailers. The preferred way is to
@@ -107,9 +106,9 @@
 	// trailer keys which will come later. In this case, those
 	// keys of the Header map are treated as if they were
 	// trailers. See the example. The second way, for trailer
-	// keys not known to the Handler until after the first Write,
-	// is to prefix the Header map keys with the TrailerPrefix
-	// constant value. See TrailerPrefix.
+	// keys not known to the [Handler] until after the first [ResponseWriter.Write],
+	// is to prefix the [Header] map keys with the [TrailerPrefix]
+	// constant value.
 	//
 	// To suppress automatic response headers (such as "Date"), set
 	// their value to nil.
@@ -117,11 +116,11 @@
 
 	// Write writes the data to the connection as part of an HTTP reply.
 	//
-	// If WriteHeader has not yet been called, Write calls
+	// If [ResponseWriter.WriteHeader] has not yet been called, Write calls
 	// WriteHeader(http.StatusOK) before writing the data. If the Header
 	// does not contain a Content-Type line, Write adds a Content-Type set
 	// to the result of passing the initial 512 bytes of written data to
-	// DetectContentType. Additionally, if the total size of all written
+	// [DetectContentType]. Additionally, if the total size of all written
 	// data is under a few KB and there are no Flush calls, the
 	// Content-Length header is added automatically.
 	//
@@ -162,8 +161,8 @@
 // The Flusher interface is implemented by ResponseWriters that allow
 // an HTTP handler to flush buffered data to the client.
 //
-// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
-// support Flusher, but ResponseWriter wrappers may not. Handlers
+// The default HTTP/1.x and HTTP/2 [ResponseWriter] implementations
+// support [Flusher], but ResponseWriter wrappers may not. Handlers
 // should always test for this ability at runtime.
 //
 // Note that even for ResponseWriters that support Flush,
@@ -178,7 +177,7 @@
 // The Hijacker interface is implemented by ResponseWriters that allow
 // an HTTP handler to take over the connection.
 //
-// The default ResponseWriter for HTTP/1.x connections supports
+// The default [ResponseWriter] for HTTP/1.x connections supports
 // Hijacker, but HTTP/2 connections intentionally do not.
 // ResponseWriter wrappers may also not support Hijacker. Handlers
 // should always test for this ability at runtime.
@@ -212,7 +211,7 @@
 // if the client has disconnected before the response is ready.
 //
 // Deprecated: the CloseNotifier interface predates Go's context package.
-// New code should use Request.Context instead.
+// New code should use [Request.Context] instead.
 type CloseNotifier interface {
 	// CloseNotify returns a channel that receives at most a
 	// single value (true) when the client connection has gone
@@ -506,7 +505,7 @@
 	return nil
 }
 
-// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// TrailerPrefix is a magic prefix for [ResponseWriter.Header] map keys
 // that, if present, signals that the map entry is actually for
 // the response trailers, and not the response headers. The prefix
 // is stripped after the ServeHTTP call finishes and the values are
@@ -572,13 +571,12 @@
 	io.Writer
 }
 
-// ReadFrom is here to optimize copying from an *os.File regular file
-// to a *net.TCPConn with sendfile, or from a supported src type such
+// ReadFrom is here to optimize copying from an [*os.File] regular file
+// to a [*net.TCPConn] with sendfile, or from a supported src type such
 // as a *net.TCPConn on Linux with splice.
 func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
-	bufp := copyBufPool.Get().(*[]byte)
-	buf := *bufp
-	defer copyBufPool.Put(bufp)
+	buf := getCopyBuf()
+	defer putCopyBuf(buf)
 
 	// Our underlying w.conn.rwc is usually a *TCPConn (with its
 	// own ReadFrom method). If not, just fall back to the normal
@@ -808,11 +806,18 @@
 	bufioWriter4kPool sync.Pool
 )
 
-var copyBufPool = sync.Pool{
-	New: func() any {
-		b := make([]byte, 32*1024)
-		return &b
-	},
+const copyBufPoolSize = 32 * 1024
+
+var copyBufPool = sync.Pool{New: func() any { return new([copyBufPoolSize]byte) }}
+
+func getCopyBuf() []byte {
+	return copyBufPool.Get().(*[copyBufPoolSize]byte)[:]
+}
+func putCopyBuf(b []byte) {
+	if len(b) != copyBufPoolSize {
+		panic("trying to put back buffer of the wrong size in the copyBufPool")
+	}
+	copyBufPool.Put((*[copyBufPoolSize]byte)(b))
 }
 
 func bufioWriterPool(size int) *sync.Pool {
@@ -862,7 +867,7 @@
 
 // DefaultMaxHeaderBytes is the maximum permitted size of the headers
 // in an HTTP request.
-// This can be overridden by setting Server.MaxHeaderBytes.
+// This can be overridden by setting [Server.MaxHeaderBytes].
 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
 
 func (srv *Server) maxHeaderBytes() int {
@@ -935,11 +940,11 @@
 }
 
 // TimeFormat is the time format to use when generating times in HTTP
-// headers. It is like time.RFC1123 but hard-codes GMT as the time
+// headers. It is like [time.RFC1123] but hard-codes GMT as the time
 // zone. The time being formatted must be in UTC for Format to
 // generate the correct format.
 //
-// For parsing this time format, see ParseTime.
+// For parsing this time format, see [ParseTime].
 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
 
 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
@@ -1585,13 +1590,13 @@
 // The Writers are wired together like:
 //
 //  1. *response (the ResponseWriter) ->
-//  2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes ->
+//  2. (*response).w, a [*bufio.Writer] of bufferBeforeChunkingSize bytes ->
 //  3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
 //     and which writes the chunk headers, if needed ->
 //  4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to ->
 //  5. checkConnErrorWriter{c}, which notes any non-nil error on Write
 //     and populates c.werr with it if so, but otherwise writes to ->
-//  6. the rwc, the net.Conn.
+//  6. the rwc, the [net.Conn].
 //
 // TODO(bradfitz): short-circuit some of the buffering when the
 // initial header contains both a Content-Type and Content-Length.
@@ -1752,8 +1757,12 @@
 // and processes its final data before they process the subsequent RST
 // from closing a connection with known unread data.
 // This RST seems to occur mostly on BSD systems. (And Windows?)
-// This timeout is somewhat arbitrary (~latency around the planet).
-const rstAvoidanceDelay = 500 * time.Millisecond
+// This timeout is somewhat arbitrary (~latency around the planet),
+// and may be modified by tests.
+//
+// TODO(bcmills): This should arguably be a server configuration parameter,
+// not a hard-coded value.
+var rstAvoidanceDelay = 500 * time.Millisecond
 
 type closeWriter interface {
 	CloseWrite() error
@@ -1772,6 +1781,27 @@
 	if tcp, ok := c.rwc.(closeWriter); ok {
 		tcp.CloseWrite()
 	}
+
+	// When we return from closeWriteAndWait, the caller will fully close the
+	// connection. If client is still writing to the connection, this will cause
+	// the write to fail with ECONNRESET or similar. Unfortunately, many TCP
+	// implementations will also drop unread packets from the client's read buffer
+	// when a write fails, causing our final response to be truncated away too.
+	//
+	// As a result, https://www.rfc-editor.org/rfc/rfc7230#section-6.6 recommends
+	// that “[t]he server … continues to read from the connection until it
+	// receives a corresponding close by the client, or until the server is
+	// reasonably certain that its own TCP stack has received the client's
+	// acknowledgement of the packet(s) containing the server's last response.”
+	//
+	// Unfortunately, we have no straightforward way to be “reasonably certain”
+	// that we have received the client's ACK, and at any rate we don't want to
+	// allow a misbehaving client to soak up server connections indefinitely by
+	// withholding an ACK, nor do we want to go through the complexity or overhead
+	// of using low-level APIs to figure out when a TCP round-trip has completed.
+	//
+	// Instead, we declare that we are “reasonably certain” that we received the
+	// ACK if maxRSTAvoidanceDelay has elapsed.
 	time.Sleep(rstAvoidanceDelay)
 }
 
@@ -1971,7 +2001,7 @@
 					fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
 					return
 				}
-				publicErr := "400 Bad Request"
+				const publicErr = "400 Bad Request"
 				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
 				return
 			}
@@ -2067,8 +2097,8 @@
 	w.finishRequest()
 }
 
-// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
-// and a Hijacker.
+// Hijack implements the [Hijacker.Hijack] method. Our response is both a [ResponseWriter]
+// and a [Hijacker].
 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
 	if w.handlerDone.Load() {
 		panic("net/http: Hijack called after ServeHTTP finished")
@@ -2128,7 +2158,7 @@
 // The HandlerFunc type is an adapter to allow the use of
 // ordinary functions as HTTP handlers. If f is a function
 // with the appropriate signature, HandlerFunc(f) is a
-// Handler that calls f.
+// [Handler] that calls f.
 type HandlerFunc func(ResponseWriter, *Request)
 
 // ServeHTTP calls f(w, r).
@@ -2187,9 +2217,9 @@
 // which may be a path relative to the request path.
 //
 // The provided code should be in the 3xx range and is usually
-// StatusMovedPermanently, StatusFound or StatusSeeOther.
+// [StatusMovedPermanently], [StatusFound] or [StatusSeeOther].
 //
-// If the Content-Type header has not been set, Redirect sets it
+// If the Content-Type header has not been set, [Redirect] sets it
 // to "text/html; charset=utf-8" and writes a small HTML body.
 // Setting the Content-Type header to any value, including nil,
 // disables that behavior.
@@ -2277,7 +2307,7 @@
 // status code.
 //
 // The provided code should be in the 3xx range and is usually
-// StatusMovedPermanently, StatusFound or StatusSeeOther.
+// [StatusMovedPermanently], [StatusFound] or [StatusSeeOther].
 func RedirectHandler(url string, code int) Handler {
 	return &redirectHandler{url, code}
 }
@@ -2287,52 +2317,132 @@
 // patterns and calls the handler for the pattern that
 // most closely matches the URL.
 //
-// Patterns name fixed, rooted paths, like "/favicon.ico",
-// or rooted subtrees, like "/images/" (note the trailing slash).
-// Longer patterns take precedence over shorter ones, so that
-// if there are handlers registered for both "/images/"
-// and "/images/thumbnails/", the latter handler will be
-// called for paths beginning with "/images/thumbnails/" and the
-// former will receive requests for any other paths in the
-// "/images/" subtree.
+// # Patterns
 //
-// Note that since a pattern ending in a slash names a rooted subtree,
-// the pattern "/" matches all paths not matched by other registered
-// patterns, not just the URL with Path == "/".
+// Patterns can match the method, host and path of a request.
+// Some examples:
 //
-// If a subtree has been registered and a request is received naming the
-// subtree root without its trailing slash, ServeMux redirects that
-// request to the subtree root (adding the trailing slash). This behavior can
-// be overridden with a separate registration for the path without
-// the trailing slash. For example, registering "/images/" causes ServeMux
+//   - "/index.html" matches the path "/index.html" for any host and method.
+//   - "GET /static/" matches a GET request whose path begins with "/static/".
+//   - "example.com/" matches any request to the host "example.com".
+//   - "example.com/{$}" matches requests with host "example.com" and path "/".
+//   - "/b/{bucket}/o/{objectname...}" matches paths whose first segment is "b"
+//     and whose third segment is "o". The name "bucket" denotes the second
+//     segment and "objectname" denotes the remainder of the path.
+//
+// In general, a pattern looks like
+//
+//	[METHOD ][HOST]/[PATH]
+//
+// All three parts are optional; "/" is a valid pattern.
+// If METHOD is present, it must be followed by a single space.
+//
+// Literal (that is, non-wildcard) parts of a pattern match
+// the corresponding parts of a request case-sensitively.
+//
+// A pattern with no method matches every method. A pattern
+// with the method GET matches both GET and HEAD requests.
+// Otherwise, the method must match exactly.
+//
+// A pattern with no host matches every host.
+// A pattern with a host matches URLs on that host only.
+//
+// A path can include wildcard segments of the form {NAME} or {NAME...}.
+// For example, "/b/{bucket}/o/{objectname...}".
+// The wildcard name must be a valid Go identifier.
+// Wildcards must be full path segments: they must be preceded by a slash and followed by
+// either a slash or the end of the string.
+// For example, "/b_{bucket}" is not a valid pattern.
+//
+// Normally a wildcard matches only a single path segment,
+// ending at the next literal slash (not %2F) in the request URL.
+// But if the "..." is present, then the wildcard matches the remainder of the URL path, including slashes.
+// (Therefore it is invalid for a "..." wildcard to appear anywhere but at the end of a pattern.)
+// The match for a wildcard can be obtained by calling [Request.PathValue] with the wildcard's name.
+// A trailing slash in a path acts as an anonymous "..." wildcard.
+//
+// The special wildcard {$} matches only the end of the URL.
+// For example, the pattern "/{$}" matches only the path "/",
+// whereas the pattern "/" matches every path.
+//
+// For matching, both pattern paths and incoming request paths are unescaped segment by segment.
+// So, for example, the path "/a%2Fb/100%25" is treated as having two segments, "a/b" and "100%".
+// The pattern "/a%2fb/" matches it, but the pattern "/a/b/" does not.
+//
+// # Precedence
+//
+// If two or more patterns match a request, then the most specific pattern takes precedence.
+// A pattern P1 is more specific than P2 if P1 matches a strict subset of P2’s requests;
+// that is, if P2 matches all the requests of P1 and more.
+// If neither is more specific, then the patterns conflict.
+// There is one exception to this rule, for backwards compatibility:
+// if two patterns would otherwise conflict and one has a host while the other does not,
+// then the pattern with the host takes precedence.
+// If a pattern passed [ServeMux.Handle] or [ServeMux.HandleFunc] conflicts with
+// another pattern that is already registered, those functions panic.
+//
+// As an example of the general rule, "/images/thumbnails/" is more specific than "/images/",
+// so both can be registered.
+// The former matches paths beginning with "/images/thumbnails/"
+// and the latter will match any other path in the "/images/" subtree.
+//
+// As another example, consider the patterns "GET /" and "/index.html":
+// both match a GET request for "/index.html", but the former pattern
+// matches all other GET and HEAD requests, while the latter matches any
+// request for "/index.html" that uses a different method.
+// The patterns conflict.
+//
+// # Trailing-slash redirection
+//
+// Consider a [ServeMux] with a handler for a subtree, registered using a trailing slash or "..." wildcard.
+// If the ServeMux receives a request for the subtree root without a trailing slash,
+// it redirects the request by adding the trailing slash.
+// This behavior can be overridden with a separate registration for the path without
+// the trailing slash or "..." wildcard. For example, registering "/images/" causes ServeMux
 // to redirect a request for "/images" to "/images/", unless "/images" has
 // been registered separately.
 //
-// Patterns may optionally begin with a host name, restricting matches to
-// URLs on that host only. Host-specific patterns take precedence over
-// general patterns, so that a handler might register for the two patterns
-// "/codesearch" and "codesearch.google.com/" without also taking over
-// requests for "http://www.google.com/".
+// # Request sanitizing
 //
 // ServeMux also takes care of sanitizing the URL request path and the Host
 // header, stripping the port number and redirecting any request containing . or
-// .. elements or repeated slashes to an equivalent, cleaner URL.
+// .. segments or repeated slashes to an equivalent, cleaner URL.
+//
+// # Compatibility
+//
+// The pattern syntax and matching behavior of ServeMux changed significantly
+// in Go 1.22. To restore the old behavior, set the GODEBUG environment variable
+// to "httpmuxgo121=1". This setting is read once, at program startup; changes
+// during execution will be ignored.
+//
+// The backwards-incompatible changes include:
+//   - Wildcards are just ordinary literal path segments in 1.21.
+//     For example, the pattern "/{x}" will match only that path in 1.21,
+//     but will match any one-segment path in 1.22.
+//   - In 1.21, no pattern was rejected, unless it was empty or conflicted with an existing pattern.
+//     In 1.22, syntactically invalid patterns will cause [ServeMux.Handle] and [ServeMux.HandleFunc] to panic.
+//     For example, in 1.21, the patterns "/{"  and "/a{x}" match themselves,
+//     but in 1.22 they are invalid and will cause a panic when registered.
+//   - In 1.22, each segment of a pattern is unescaped; this was not done in 1.21.
+//     For example, in 1.22 the pattern "/%61" matches the path "/a" ("%61" being the URL escape sequence for "a"),
+//     but in 1.21 it would match only the path "/%2561" (where "%25" is the escape for the percent sign).
+//   - When matching patterns to paths, in 1.22 each segment of the path is unescaped; in 1.21, the entire path is unescaped.
+//     This change mostly affects how paths with %2F escapes adjacent to slashes are treated.
+//     See https://go.dev/issue/21955 for details.
 type ServeMux struct {
-	mu    sync.RWMutex
-	m     map[string]muxEntry
-	es    []muxEntry // slice of entries sorted from longest to shortest.
-	hosts bool       // whether any patterns contain hostnames
+	mu       sync.RWMutex
+	tree     routingNode
+	index    routingIndex
+	patterns []*pattern  // TODO(jba): remove if possible
+	mux121   serveMux121 // used only when GODEBUG=httpmuxgo121=1
 }
 
-type muxEntry struct {
-	h       Handler
-	pattern string
+// NewServeMux allocates and returns a new [ServeMux].
+func NewServeMux() *ServeMux {
+	return &ServeMux{}
 }
 
-// NewServeMux allocates and returns a new ServeMux.
-func NewServeMux() *ServeMux { return new(ServeMux) }
-
-// DefaultServeMux is the default ServeMux used by Serve.
+// DefaultServeMux is the default [ServeMux] used by [Serve].
 var DefaultServeMux = &defaultServeMux
 
 var defaultServeMux ServeMux
@@ -2372,66 +2482,6 @@
 	return host
 }
 
-// Find a handler on a handler map given a path string.
-// Most-specific (longest) pattern wins.
-func (mux *ServeMux) match(path string) (h Handler, pattern string) {
-	// Check for exact match first.
-	v, ok := mux.m[path]
-	if ok {
-		return v.h, v.pattern
-	}
-
-	// Check for longest valid match.  mux.es contains all patterns
-	// that end in / sorted from longest to shortest.
-	for _, e := range mux.es {
-		if strings.HasPrefix(path, e.pattern) {
-			return e.h, e.pattern
-		}
-	}
-	return nil, ""
-}
-
-// redirectToPathSlash determines if the given path needs appending "/" to it.
-// This occurs when a handler for path + "/" was already registered, but
-// not for path itself. If the path needs appending to, it creates a new
-// URL, setting the path to u.Path + "/" and returning true to indicate so.
-func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
-	mux.mu.RLock()
-	shouldRedirect := mux.shouldRedirectRLocked(host, path)
-	mux.mu.RUnlock()
-	if !shouldRedirect {
-		return u, false
-	}
-	path = path + "/"
-	u = &url.URL{Path: path, RawQuery: u.RawQuery}
-	return u, true
-}
-
-// shouldRedirectRLocked reports whether the given path and host should be redirected to
-// path+"/". This should happen if a handler is registered for path+"/" but
-// not path -- see comments at ServeMux.
-func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
-	p := []string{path, host + path}
-
-	for _, c := range p {
-		if _, exist := mux.m[c]; exist {
-			return false
-		}
-	}
-
-	n := len(path)
-	if n == 0 {
-		return false
-	}
-	for _, c := range p {
-		if _, exist := mux.m[c+"/"]; exist {
-			return path[n-1] != '/'
-		}
-	}
-
-	return false
-}
-
 // Handler returns the handler to use for the given request,
 // consulting r.Method, r.Host, and r.URL.Path. It always returns
 // a non-nil handler. If the path is not in its canonical form, the
@@ -2443,61 +2493,175 @@
 //
 // Handler also returns the registered pattern that matches the
 // request or, in the case of internally-generated redirects,
-// the pattern that will match after following the redirect.
+// the path that will match after following the redirect.
 //
 // If there is no registered handler that applies to the request,
 // Handler returns a “page not found” handler and an empty pattern.
 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
+	if use121 {
+		return mux.mux121.findHandler(r)
+	}
+	h, p, _, _ := mux.findHandler(r)
+	return h, p
+}
 
+// findHandler finds a handler for a request.
+// If there is a matching handler, it returns it and the pattern that matched.
+// Otherwise it returns a Redirect or NotFound handler with the path that would match
+// after the redirect.
+func (mux *ServeMux) findHandler(r *Request) (h Handler, patStr string, _ *pattern, matches []string) {
+	var n *routingNode
+	host := r.URL.Host
+	escapedPath := r.URL.EscapedPath()
+	path := escapedPath
 	// CONNECT requests are not canonicalized.
 	if r.Method == "CONNECT" {
 		// If r.URL.Path is /tree and its handler is not registered,
 		// the /tree -> /tree/ redirect applies to CONNECT requests
 		// but the path canonicalization does not.
-		if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
-			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+		_, _, u := mux.matchOrRedirect(host, r.Method, path, r.URL)
+		if u != nil {
+			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path, nil, nil
 		}
+		// Redo the match, this time with r.Host instead of r.URL.Host.
+		// Pass a nil URL to skip the trailing-slash redirect logic.
+		n, matches, _ = mux.matchOrRedirect(r.Host, r.Method, path, nil)
+	} else {
+		// All other requests have any port stripped and path cleaned
+		// before passing to mux.handler.
+		host = stripHostPort(r.Host)
+		path = cleanPath(path)
 
-		return mux.handler(r.Host, r.URL.Path)
+		// If the given path is /tree and its handler is not registered,
+		// redirect for /tree/.
+		var u *url.URL
+		n, matches, u = mux.matchOrRedirect(host, r.Method, path, r.URL)
+		if u != nil {
+			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path, nil, nil
+		}
+		if path != escapedPath {
+			// Redirect to cleaned path.
+			patStr := ""
+			if n != nil {
+				patStr = n.pattern.String()
+			}
+			u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
+			return RedirectHandler(u.String(), StatusMovedPermanently), patStr, nil, nil
+		}
 	}
-
-	// All other requests have any port stripped and path cleaned
-	// before passing to mux.handler.
-	host := stripHostPort(r.Host)
-	path := cleanPath(r.URL.Path)
-
-	// If the given path is /tree and its handler is not registered,
-	// redirect for /tree/.
-	if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
-		return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+	if n == nil {
+		// We didn't find a match with the request method. To distinguish between
+		// Not Found and Method Not Allowed, see if there is another pattern that
+		// matches except for the method.
+		allowedMethods := mux.matchingMethods(host, path)
+		if len(allowedMethods) > 0 {
+			return HandlerFunc(func(w ResponseWriter, r *Request) {
+				w.Header().Set("Allow", strings.Join(allowedMethods, ", "))
+				Error(w, StatusText(StatusMethodNotAllowed), StatusMethodNotAllowed)
+			}), "", nil, nil
+		}
+		return NotFoundHandler(), "", nil, nil
 	}
-
-	if path != r.URL.Path {
-		_, pattern = mux.handler(host, path)
-		u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
-		return RedirectHandler(u.String(), StatusMovedPermanently), pattern
-	}
-
-	return mux.handler(host, r.URL.Path)
+	return n.handler, n.pattern.String(), n.pattern, matches
 }
 
-// handler is the main implementation of Handler.
-// The path is known to be in canonical form, except for CONNECT methods.
-func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
+// matchOrRedirect looks up a node in the tree that matches the host, method and path.
+//
+// If the url argument is non-nil, handler also deals with trailing-slash
+// redirection: when a path doesn't match exactly, the match is tried again
+// after appending "/" to the path. If that second match succeeds, the last
+// return value is the URL to redirect to.
+func (mux *ServeMux) matchOrRedirect(host, method, path string, u *url.URL) (_ *routingNode, matches []string, redirectTo *url.URL) {
 	mux.mu.RLock()
 	defer mux.mu.RUnlock()
 
-	// Host-specific pattern takes precedence over generic ones
-	if mux.hosts {
-		h, pattern = mux.match(host + path)
+	n, matches := mux.tree.match(host, method, path)
+	// If we have an exact match, or we were asked not to try trailing-slash redirection,
+	// then we're done.
+	if !exactMatch(n, path) && u != nil {
+		// If there is an exact match with a trailing slash, then redirect.
+		path += "/"
+		n2, _ := mux.tree.match(host, method, path)
+		if exactMatch(n2, path) {
+			return nil, nil, &url.URL{Path: cleanPath(u.Path) + "/", RawQuery: u.RawQuery}
+		}
 	}
-	if h == nil {
-		h, pattern = mux.match(path)
+	return n, matches, nil
+}
+
+// exactMatch reports whether the node's pattern exactly matches the path.
+// As a special case, if the node is nil, exactMatch return false.
+//
+// Before wildcards were introduced, it was clear that an exact match meant
+// that the pattern and path were the same string. The only other possibility
+// was that a trailing-slash pattern, like "/", matched a path longer than
+// it, like "/a".
+//
+// With wildcards, we define an inexact match as any one where a multi wildcard
+// matches a non-empty string. All other matches are exact.
+// For example, these are all exact matches:
+//
+//	pattern   path
+//	/a        /a
+//	/{x}      /a
+//	/a/{$}    /a/
+//	/a/       /a/
+//
+// The last case has a multi wildcard (implicitly), but the match is exact because
+// the wildcard matches the empty string.
+//
+// Examples of matches that are not exact:
+//
+//	pattern   path
+//	/         /a
+//	/a/{x...} /a/b
+func exactMatch(n *routingNode, path string) bool {
+	if n == nil {
+		return false
 	}
-	if h == nil {
-		h, pattern = NotFoundHandler(), ""
+	// We can't directly implement the definition (empty match for multi
+	// wildcard) because we don't record a match for anonymous multis.
+
+	// If there is no multi, the match is exact.
+	if !n.pattern.lastSegment().multi {
+		return true
 	}
-	return
+
+	// If the path doesn't end in a trailing slash, then the multi match
+	// is non-empty.
+	if len(path) > 0 && path[len(path)-1] != '/' {
+		return false
+	}
+	// Only patterns ending in {$} or a multi wildcard can
+	// match a path with a trailing slash.
+	// For the match to be exact, the number of pattern
+	// segments should be the same as the number of slashes in the path.
+	// E.g. "/a/b/{$}" and "/a/b/{...}" exactly match "/a/b/", but "/a/" does not.
+	return len(n.pattern.segments) == strings.Count(path, "/")
+}
+
+// matchingMethods return a sorted list of all methods that would match with the given host and path.
+func (mux *ServeMux) matchingMethods(host, path string) []string {
+	// Hold the read lock for the entire method so that the two matches are done
+	// on the same set of registered patterns.
+	mux.mu.RLock()
+	defer mux.mu.RUnlock()
+	ms := map[string]bool{}
+	mux.tree.matchingMethods(host, path, ms)
+	// matchOrRedirect will try appending a trailing slash if there is no match.
+	mux.tree.matchingMethods(host, path+"/", ms)
+	methods := mapKeys(ms)
+	sort.Strings(methods)
+	return methods
+}
+
+// TODO(jba): replace with maps.Keys when it is defined.
+func mapKeys[K comparable, V any](m map[K]V) []K {
+	var ks []K
+	for k := range m {
+		ks = append(ks, k)
+	}
+	return ks
 }
 
 // ServeHTTP dispatches the request to the handler whose
@@ -2510,82 +2674,117 @@
 		w.WriteHeader(StatusBadRequest)
 		return
 	}
-	h, _ := mux.Handler(r)
+	var h Handler
+	if use121 {
+		h, _ = mux.mux121.findHandler(r)
+	} else {
+		h, _, r.pat, r.matches = mux.findHandler(r)
+	}
 	h.ServeHTTP(w, r)
 }
 
+// The four functions below all call ServeMux.register so that callerLocation
+// always refers to user code.
+
 // Handle registers the handler for the given pattern.
-// If a handler already exists for pattern, Handle panics.
+// If the given pattern conflicts, with one that is already registered, Handle
+// panics.
 func (mux *ServeMux) Handle(pattern string, handler Handler) {
-	mux.mu.Lock()
-	defer mux.mu.Unlock()
-
-	if pattern == "" {
-		panic("http: invalid pattern")
+	if use121 {
+		mux.mux121.handle(pattern, handler)
+	} else {
+		mux.register(pattern, handler)
 	}
-	if handler == nil {
-		panic("http: nil handler")
-	}
-	if _, exist := mux.m[pattern]; exist {
-		panic("http: multiple registrations for " + pattern)
-	}
-
-	if mux.m == nil {
-		mux.m = make(map[string]muxEntry)
-	}
-	e := muxEntry{h: handler, pattern: pattern}
-	mux.m[pattern] = e
-	if pattern[len(pattern)-1] == '/' {
-		mux.es = appendSorted(mux.es, e)
-	}
-
-	if pattern[0] != '/' {
-		mux.hosts = true
-	}
-}
-
-func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
-	n := len(es)
-	i := sort.Search(n, func(i int) bool {
-		return len(es[i].pattern) < len(e.pattern)
-	})
-	if i == n {
-		return append(es, e)
-	}
-	// we now know that i points at where we want to insert
-	es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
-	copy(es[i+1:], es[i:])      // Move shorter entries down
-	es[i] = e
-	return es
 }
 
 // HandleFunc registers the handler function for the given pattern.
+// If the given pattern conflicts, with one that is already registered, HandleFunc
+// panics.
 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
-	if handler == nil {
-		panic("http: nil handler")
+	if use121 {
+		mux.mux121.handleFunc(pattern, handler)
+	} else {
+		mux.register(pattern, HandlerFunc(handler))
 	}
-	mux.Handle(pattern, HandlerFunc(handler))
 }
 
-// Handle registers the handler for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+// Handle registers the handler for the given pattern in [DefaultServeMux].
+// The documentation for [ServeMux] explains how patterns are matched.
+func Handle(pattern string, handler Handler) {
+	if use121 {
+		DefaultServeMux.mux121.handle(pattern, handler)
+	} else {
+		DefaultServeMux.register(pattern, handler)
+	}
+}
 
-// HandleFunc registers the handler function for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
+// HandleFunc registers the handler function for the given pattern in [DefaultServeMux].
+// The documentation for [ServeMux] explains how patterns are matched.
 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
-	DefaultServeMux.HandleFunc(pattern, handler)
+	if use121 {
+		DefaultServeMux.mux121.handleFunc(pattern, handler)
+	} else {
+		DefaultServeMux.register(pattern, HandlerFunc(handler))
+	}
+}
+
+func (mux *ServeMux) register(pattern string, handler Handler) {
+	if err := mux.registerErr(pattern, handler); err != nil {
+		panic(err)
+	}
+}
+
+func (mux *ServeMux) registerErr(patstr string, handler Handler) error {
+	if patstr == "" {
+		return errors.New("http: invalid pattern")
+	}
+	if handler == nil {
+		return errors.New("http: nil handler")
+	}
+	if f, ok := handler.(HandlerFunc); ok && f == nil {
+		return errors.New("http: nil handler")
+	}
+
+	pat, err := parsePattern(patstr)
+	if err != nil {
+		return fmt.Errorf("parsing %q: %w", patstr, err)
+	}
+
+	// Get the caller's location, for better conflict error messages.
+	// Skip register and whatever calls it.
+	_, file, line, ok := runtime.Caller(3)
+	if !ok {
+		pat.loc = "unknown location"
+	} else {
+		pat.loc = fmt.Sprintf("%s:%d", file, line)
+	}
+
+	mux.mu.Lock()
+	defer mux.mu.Unlock()
+	// Check for conflict.
+	if err := mux.index.possiblyConflictingPatterns(pat, func(pat2 *pattern) error {
+		if pat.conflictsWith(pat2) {
+			d := describeConflict(pat, pat2)
+			return fmt.Errorf("pattern %q (registered at %s) conflicts with pattern %q (registered at %s):\n%s",
+				pat, pat.loc, pat2, pat2.loc, d)
+		}
+		return nil
+	}); err != nil {
+		return err
+	}
+	mux.tree.addPattern(pat, handler)
+	mux.index.addPattern(pat)
+	mux.patterns = append(mux.patterns, pat)
+	return nil
 }
 
 // Serve accepts incoming HTTP connections on the listener l,
 // creating a new service goroutine for each. The service goroutines
 // read requests and then call handler to reply to them.
 //
-// The handler is typically nil, in which case the DefaultServeMux is used.
+// The handler is typically nil, in which case [DefaultServeMux] is used.
 //
-// HTTP/2 support is only enabled if the Listener returns *tls.Conn
+// HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
 // connections and they were configured with "h2" in the TLS
 // Config.NextProtos.
 //
@@ -2599,7 +2798,7 @@
 // creating a new service goroutine for each. The service goroutines
 // read requests and then call handler to reply to them.
 //
-// The handler is typically nil, in which case the DefaultServeMux is used.
+// The handler is typically nil, in which case [DefaultServeMux] is used.
 //
 // Additionally, files containing a certificate and matching private key
 // for the server must be provided. If the certificate is signed by a
@@ -2725,13 +2924,13 @@
 }
 
 // Close immediately closes all active net.Listeners and any
-// connections in state StateNew, StateActive, or StateIdle. For a
-// graceful shutdown, use Shutdown.
+// connections in state [StateNew], [StateActive], or [StateIdle]. For a
+// graceful shutdown, use [Server.Shutdown].
 //
 // Close does not attempt to close (and does not even know about)
 // any hijacked connections, such as WebSockets.
 //
-// Close returns any error returned from closing the Server's
+// Close returns any error returned from closing the [Server]'s
 // underlying Listener(s).
 func (srv *Server) Close() error {
 	srv.inShutdown.Store(true)
@@ -2769,16 +2968,16 @@
 // indefinitely for connections to return to idle and then shut down.
 // If the provided context expires before the shutdown is complete,
 // Shutdown returns the context's error, otherwise it returns any
-// error returned from closing the Server's underlying Listener(s).
+// error returned from closing the [Server]'s underlying Listener(s).
 //
-// When Shutdown is called, Serve, ListenAndServe, and
-// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
+// When Shutdown is called, [Serve], [ListenAndServe], and
+// [ListenAndServeTLS] immediately return [ErrServerClosed]. Make sure the
 // program doesn't exit and waits instead for Shutdown to return.
 //
 // Shutdown does not attempt to close nor wait for hijacked
 // connections such as WebSockets. The caller of Shutdown should
 // separately notify such long-lived connections of shutdown and wait
-// for them to close, if desired. See RegisterOnShutdown for a way to
+// for them to close, if desired. See [Server.RegisterOnShutdown] for a way to
 // register shutdown notification functions.
 //
 // Once Shutdown has been called on a server, it may not be reused;
@@ -2821,7 +3020,7 @@
 	}
 }
 
-// RegisterOnShutdown registers a function to call on Shutdown.
+// RegisterOnShutdown registers a function to call on [Server.Shutdown].
 // This can be used to gracefully shutdown connections that have
 // undergone ALPN protocol upgrade or that have been hijacked.
 // This function should start protocol-specific graceful shutdown,
@@ -2869,7 +3068,7 @@
 }
 
 // A ConnState represents the state of a client connection to a server.
-// It's used by the optional Server.ConnState hook.
+// It's used by the optional [Server.ConnState] hook.
 type ConnState int
 
 const (
@@ -2946,7 +3145,7 @@
 // behavior doesn't match that of many proxies, and the mismatch can lead to
 // security issues.
 //
-// AllowQuerySemicolons should be invoked before Request.ParseForm is called.
+// AllowQuerySemicolons should be invoked before [Request.ParseForm] is called.
 func AllowQuerySemicolons(h Handler) Handler {
 	return HandlerFunc(func(w ResponseWriter, r *Request) {
 		if strings.Contains(r.URL.RawQuery, ";") {
@@ -2963,13 +3162,13 @@
 }
 
 // ListenAndServe listens on the TCP network address srv.Addr and then
-// calls Serve to handle requests on incoming connections.
+// calls [Serve] to handle requests on incoming connections.
 // Accepted connections are configured to enable TCP keep-alives.
 //
 // If srv.Addr is blank, ":http" is used.
 //
-// ListenAndServe always returns a non-nil error. After Shutdown or Close,
-// the returned error is ErrServerClosed.
+// ListenAndServe always returns a non-nil error. After [Server.Shutdown] or [Server.Close],
+// the returned error is [ErrServerClosed].
 func (srv *Server) ListenAndServe() error {
 	if srv.shuttingDown() {
 		return ErrServerClosed
@@ -3009,20 +3208,20 @@
 	return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
 }
 
-// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
-// and ListenAndServeTLS methods after a call to Shutdown or Close.
+// ErrServerClosed is returned by the [Server.Serve], [ServeTLS], [ListenAndServe],
+// and [ListenAndServeTLS] methods after a call to [Server.Shutdown] or [Server.Close].
 var ErrServerClosed = errors.New("http: Server closed")
 
 // Serve accepts incoming connections on the Listener l, creating a
 // new service goroutine for each. The service goroutines read requests and
 // then call srv.Handler to reply to them.
 //
-// HTTP/2 support is only enabled if the Listener returns *tls.Conn
+// HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
 // connections and they were configured with "h2" in the TLS
 // Config.NextProtos.
 //
 // Serve always returns a non-nil error and closes l.
-// After Shutdown or Close, the returned error is ErrServerClosed.
+// After [Server.Shutdown] or [Server.Close], the returned error is [ErrServerClosed].
 func (srv *Server) Serve(l net.Listener) error {
 	if fn := testHookServerServe; fn != nil {
 		fn(srv, l) // call hook with unwrapped listener
@@ -3092,14 +3291,14 @@
 // setup and then read requests, calling srv.Handler to reply to them.
 //
 // Files containing a certificate and matching private key for the
-// server must be provided if neither the Server's
+// server must be provided if neither the [Server]'s
 // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
 // If the certificate is signed by a certificate authority, the
 // certFile should be the concatenation of the server's certificate,
 // any intermediates, and the CA's certificate.
 //
-// ServeTLS always returns a non-nil error. After Shutdown or Close, the
-// returned error is ErrServerClosed.
+// ServeTLS always returns a non-nil error. After [Server.Shutdown] or [Server.Close], the
+// returned error is [ErrServerClosed].
 func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
 	// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
 	// before we clone it and create the TLS Listener.
@@ -3228,10 +3427,10 @@
 }
 
 // ListenAndServe listens on the TCP network address addr and then calls
-// Serve with handler to handle requests on incoming connections.
+// [Serve] with handler to handle requests on incoming connections.
 // Accepted connections are configured to enable TCP keep-alives.
 //
-// The handler is typically nil, in which case the DefaultServeMux is used.
+// The handler is typically nil, in which case [DefaultServeMux] is used.
 //
 // ListenAndServe always returns a non-nil error.
 func ListenAndServe(addr string, handler Handler) error {
@@ -3239,7 +3438,7 @@
 	return server.ListenAndServe()
 }
 
-// ListenAndServeTLS acts identically to ListenAndServe, except that it
+// ListenAndServeTLS acts identically to [ListenAndServe], except that it
 // expects HTTPS connections. Additionally, files containing a certificate and
 // matching private key for the server must be provided. If the certificate
 // is signed by a certificate authority, the certFile should be the concatenation
@@ -3250,11 +3449,11 @@
 }
 
 // ListenAndServeTLS listens on the TCP network address srv.Addr and
-// then calls ServeTLS to handle requests on incoming TLS connections.
+// then calls [ServeTLS] to handle requests on incoming TLS connections.
 // Accepted connections are configured to enable TCP keep-alives.
 //
 // Filenames containing a certificate and matching private key for the
-// server must be provided if neither the Server's TLSConfig.Certificates
+// server must be provided if neither the [Server]'s TLSConfig.Certificates
 // nor TLSConfig.GetCertificate are populated. If the certificate is
 // signed by a certificate authority, the certFile should be the
 // concatenation of the server's certificate, any intermediates, and
@@ -3262,8 +3461,8 @@
 //
 // If srv.Addr is blank, ":https" is used.
 //
-// ListenAndServeTLS always returns a non-nil error. After Shutdown or
-// Close, the returned error is ErrServerClosed.
+// ListenAndServeTLS always returns a non-nil error. After [Server.Shutdown] or
+// [Server.Close], the returned error is [ErrServerClosed].
 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
 	if srv.shuttingDown() {
 		return ErrServerClosed
@@ -3333,17 +3532,17 @@
 	}
 }
 
-// TimeoutHandler returns a Handler that runs h with the given time limit.
+// TimeoutHandler returns a [Handler] that runs h with the given time limit.
 //
 // The new Handler calls h.ServeHTTP to handle each request, but if a
 // call runs for longer than its time limit, the handler responds with
 // a 503 Service Unavailable error and the given message in its body.
 // (If msg is empty, a suitable default message will be sent.)
-// After such a timeout, writes by h to its ResponseWriter will return
-// ErrHandlerTimeout.
+// After such a timeout, writes by h to its [ResponseWriter] will return
+// [ErrHandlerTimeout].
 //
-// TimeoutHandler supports the Pusher interface but does not support
-// the Hijacker or Flusher interfaces.
+// TimeoutHandler supports the [Pusher] interface but does not support
+// the [Hijacker] or [Flusher] interfaces.
 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
 	return &timeoutHandler{
 		handler: h,
@@ -3352,7 +3551,7 @@
 	}
 }
 
-// ErrHandlerTimeout is returned on ResponseWriter Write calls
+// ErrHandlerTimeout is returned on [ResponseWriter] Write calls
 // in handlers which have timed out.
 var ErrHandlerTimeout = errors.New("http: Handler timeout")
 
@@ -3441,7 +3640,7 @@
 
 var _ Pusher = (*timeoutWriter)(nil)
 
-// Push implements the Pusher interface.
+// Push implements the [Pusher] interface.
 func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
 	if pusher, ok := tw.w.(Pusher); ok {
 		return pusher.Push(target, opts)
@@ -3526,7 +3725,7 @@
 	h   serverHandler
 }
 
-// BaseContext is an exported but unadvertised http.Handler method
+// BaseContext is an exported but unadvertised [http.Handler] method
 // recognized by x/net/http2 to pass down a context; the TLSNextProto
 // API predates context support so we shoehorn through the only
 // interface we have available.
@@ -3613,7 +3812,6 @@
 		break
 	}
 	return
-
 }
 
 func strSliceContains(ss []string, s string) bool {
@@ -3635,7 +3833,7 @@
 	return false
 }
 
-// MaxBytesHandler returns a Handler that runs h with its ResponseWriter and Request.Body wrapped by a MaxBytesReader.
+// MaxBytesHandler returns a [Handler] that runs h with its [ResponseWriter] and [Request.Body] wrapped by a MaxBytesReader.
 func MaxBytesHandler(h Handler, n int64) Handler {
 	return HandlerFunc(func(w ResponseWriter, r *Request) {
 		r2 := *r
diff --git a/src/net/http/server_test.go b/src/net/http/server_test.go
index d17c5c1..e81e3bb 100644
--- a/src/net/http/server_test.go
+++ b/src/net/http/server_test.go
@@ -8,6 +8,8 @@
 
 import (
 	"fmt"
+	"net/url"
+	"regexp"
 	"testing"
 	"time"
 )
@@ -64,6 +66,190 @@
 	}
 }
 
+type handler struct{ i int }
+
+func (handler) ServeHTTP(ResponseWriter, *Request) {}
+
+func TestFindHandler(t *testing.T) {
+	mux := NewServeMux()
+	for _, ph := range []struct {
+		pat string
+		h   Handler
+	}{
+		{"/", &handler{1}},
+		{"/foo/", &handler{2}},
+		{"/foo", &handler{3}},
+		{"/bar/", &handler{4}},
+		{"//foo", &handler{5}},
+	} {
+		mux.Handle(ph.pat, ph.h)
+	}
+
+	for _, test := range []struct {
+		method      string
+		path        string
+		wantHandler string
+	}{
+		{"GET", "/", "&http.handler{i:1}"},
+		{"GET", "//", `&http.redirectHandler{url:"/", code:301}`},
+		{"GET", "/foo/../bar/./..//baz", `&http.redirectHandler{url:"/baz", code:301}`},
+		{"GET", "/foo", "&http.handler{i:3}"},
+		{"GET", "/foo/x", "&http.handler{i:2}"},
+		{"GET", "/bar/x", "&http.handler{i:4}"},
+		{"GET", "/bar", `&http.redirectHandler{url:"/bar/", code:301}`},
+		{"CONNECT", "/", "&http.handler{i:1}"},
+		{"CONNECT", "//", "&http.handler{i:1}"},
+		{"CONNECT", "//foo", "&http.handler{i:5}"},
+		{"CONNECT", "/foo/../bar/./..//baz", "&http.handler{i:2}"},
+		{"CONNECT", "/foo", "&http.handler{i:3}"},
+		{"CONNECT", "/foo/x", "&http.handler{i:2}"},
+		{"CONNECT", "/bar/x", "&http.handler{i:4}"},
+		{"CONNECT", "/bar", `&http.redirectHandler{url:"/bar/", code:301}`},
+	} {
+		var r Request
+		r.Method = test.method
+		r.Host = "example.com"
+		r.URL = &url.URL{Path: test.path}
+		gotH, _, _, _ := mux.findHandler(&r)
+		got := fmt.Sprintf("%#v", gotH)
+		if got != test.wantHandler {
+			t.Errorf("%s %q: got %q, want %q", test.method, test.path, got, test.wantHandler)
+		}
+	}
+}
+
+func TestEmptyServeMux(t *testing.T) {
+	// Verify that a ServeMux with nothing registered
+	// doesn't panic.
+	mux := NewServeMux()
+	var r Request
+	r.Method = "GET"
+	r.Host = "example.com"
+	r.URL = &url.URL{Path: "/"}
+	_, p := mux.Handler(&r)
+	if p != "" {
+		t.Errorf(`got %q, want ""`, p)
+	}
+}
+
+func TestRegisterErr(t *testing.T) {
+	mux := NewServeMux()
+	h := &handler{}
+	mux.Handle("/a", h)
+
+	for _, test := range []struct {
+		pattern    string
+		handler    Handler
+		wantRegexp string
+	}{
+		{"", h, "invalid pattern"},
+		{"/", nil, "nil handler"},
+		{"/", HandlerFunc(nil), "nil handler"},
+		{"/{x", h, `parsing "/\{x": at offset 1: bad wildcard segment`},
+		{"/a", h, `conflicts with pattern.* \(registered at .*/server_test.go:\d+`},
+	} {
+		t.Run(fmt.Sprintf("%s:%#v", test.pattern, test.handler), func(t *testing.T) {
+			err := mux.registerErr(test.pattern, test.handler)
+			if err == nil {
+				t.Fatal("got nil error")
+			}
+			re := regexp.MustCompile(test.wantRegexp)
+			if g := err.Error(); !re.MatchString(g) {
+				t.Errorf("\ngot %q\nwant string matching %q", g, test.wantRegexp)
+			}
+		})
+	}
+}
+
+func TestExactMatch(t *testing.T) {
+	for _, test := range []struct {
+		pattern string
+		path    string
+		want    bool
+	}{
+		{"", "/a", false},
+		{"/", "/a", false},
+		{"/a", "/a", true},
+		{"/a/{x...}", "/a/b", false},
+		{"/a/{x}", "/a/b", true},
+		{"/a/b/", "/a/b/", true},
+		{"/a/b/{$}", "/a/b/", true},
+		{"/a/", "/a/b/", false},
+	} {
+		var n *routingNode
+		if test.pattern != "" {
+			pat := mustParsePattern(t, test.pattern)
+			n = &routingNode{pattern: pat}
+		}
+		got := exactMatch(n, test.path)
+		if got != test.want {
+			t.Errorf("%q, %s: got %t, want %t", test.pattern, test.path, got, test.want)
+		}
+	}
+}
+
+func TestEscapedPathsAndPatterns(t *testing.T) {
+	matches := []struct {
+		pattern  string
+		paths    []string // paths that match the pattern
+		paths121 []string // paths that matched the pattern in Go 1.21.
+	}{
+		{
+			"/a", // this pattern matches a path that unescapes to "/a"
+			[]string{"/a", "/%61"},
+			[]string{"/a", "/%61"},
+		},
+		{
+			"/%62", // patterns are unescaped by segment; matches paths that unescape to "/b"
+			[]string{"/b", "/%62"},
+			[]string{"/%2562"}, // In 1.21, patterns were not unescaped but paths were.
+		},
+		{
+			"/%7B/%7D", // the only way to write a pattern that matches '{' or '}'
+			[]string{"/{/}", "/%7b/}", "/{/%7d", "/%7B/%7D"},
+			[]string{"/%257B/%257D"}, // In 1.21, patterns were not unescaped.
+		},
+		{
+			"/%x", // patterns that do not unescape are left unchanged
+			[]string{"/%25x"},
+			[]string{"/%25x"},
+		},
+	}
+
+	run := func(t *testing.T, test121 bool) {
+		defer func(u bool) { use121 = u }(use121)
+		use121 = test121
+
+		mux := NewServeMux()
+		for _, m := range matches {
+			mux.HandleFunc(m.pattern, func(w ResponseWriter, r *Request) {})
+		}
+
+		for _, m := range matches {
+			paths := m.paths
+			if use121 {
+				paths = m.paths121
+			}
+			for _, p := range paths {
+				u, err := url.ParseRequestURI(p)
+				if err != nil {
+					t.Fatal(err)
+				}
+				req := &Request{
+					URL: u,
+				}
+				_, gotPattern := mux.Handler(req)
+				if g, w := gotPattern, m.pattern; g != w {
+					t.Errorf("%s: pattern: got %q, want %q", p, g, w)
+				}
+			}
+		}
+	}
+
+	t.Run("latest", func(t *testing.T) { run(t, false) })
+	t.Run("1.21", func(t *testing.T) { run(t, true) })
+}
+
 func BenchmarkServerMatch(b *testing.B) {
 	fn := func(w ResponseWriter, r *Request) {
 		fmt.Fprintf(w, "OK")
@@ -90,7 +276,11 @@
 		"/products/", "/products/3/image.jpg"}
 	b.StartTimer()
 	for i := 0; i < b.N; i++ {
-		if h, p := mux.match(paths[i%len(paths)]); h != nil && p == "" {
+		r, err := NewRequest("GET", "http://example.com/"+paths[i%len(paths)], nil)
+		if err != nil {
+			b.Fatal(err)
+		}
+		if h, p, _, _ := mux.findHandler(r); h != nil && p == "" {
 			b.Error("impossible")
 		}
 	}
diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go
index d6f26a7..315c6e2 100644
--- a/src/net/http/transfer.go
+++ b/src/net/http/transfer.go
@@ -9,6 +9,7 @@
 	"bytes"
 	"errors"
 	"fmt"
+	"internal/godebug"
 	"io"
 	"net/http/httptrace"
 	"net/http/internal"
@@ -409,7 +410,10 @@
 //
 // This function is only intended for use in writeBody.
 func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) {
-	n, err = io.Copy(dst, src)
+	buf := getCopyBuf()
+	defer putCopyBuf(buf)
+
+	n, err = io.CopyBuffer(dst, src, buf)
 	if err != nil && err != io.EOF {
 		t.bodyReadError = err
 	}
@@ -527,7 +531,7 @@
 		return err
 	}
 	if isResponse && t.RequestMethod == "HEAD" {
-		if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil {
+		if n, err := parseContentLength(t.Header["Content-Length"]); err != nil {
 			return err
 		} else {
 			t.ContentLength = n
@@ -707,18 +711,15 @@
 		return -1, nil
 	}
 
-	// Logic based on Content-Length
-	var cl string
-	if len(contentLens) == 1 {
-		cl = textproto.TrimString(contentLens[0])
-	}
-	if cl != "" {
-		n, err := parseContentLength(cl)
+	if len(contentLens) > 0 {
+		// Logic based on Content-Length
+		n, err := parseContentLength(contentLens)
 		if err != nil {
 			return -1, err
 		}
 		return n, nil
 	}
+
 	header.Del("Content-Length")
 
 	if isRequest {
@@ -816,10 +817,10 @@
 	onHitEOF   func() // if non-nil, func to call when EOF is Read
 }
 
-// ErrBodyReadAfterClose is returned when reading a Request or Response
+// ErrBodyReadAfterClose is returned when reading a [Request] or [Response]
 // Body after the body has been closed. This typically happens when the body is
-// read after an HTTP Handler calls WriteHeader or Write on its
-// ResponseWriter.
+// read after an HTTP [Handler] calls WriteHeader or Write on its
+// [ResponseWriter].
 var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body")
 
 func (b *body) Read(p []byte) (n int, err error) {
@@ -1038,19 +1039,31 @@
 	return bl.b.readLocked(p)
 }
 
-// parseContentLength trims whitespace from s and returns -1 if no value
-// is set, or the value if it's >= 0.
-func parseContentLength(cl string) (int64, error) {
-	cl = textproto.TrimString(cl)
-	if cl == "" {
+var laxContentLength = godebug.New("httplaxcontentlength")
+
+// parseContentLength checks that the header is valid and then trims
+// whitespace. It returns -1 if no value is set otherwise the value
+// if it's >= 0.
+func parseContentLength(clHeaders []string) (int64, error) {
+	if len(clHeaders) == 0 {
 		return -1, nil
 	}
+	cl := textproto.TrimString(clHeaders[0])
+
+	// The Content-Length must be a valid numeric value.
+	// See: https://datatracker.ietf.org/doc/html/rfc2616/#section-14.13
+	if cl == "" {
+		if laxContentLength.Value() == "1" {
+			laxContentLength.IncNonDefault()
+			return -1, nil
+		}
+		return 0, badStringError("invalid empty Content-Length", cl)
+	}
 	n, err := strconv.ParseUint(cl, 10, 63)
 	if err != nil {
 		return 0, badStringError("bad Content-Length", cl)
 	}
 	return int64(n), nil
-
 }
 
 // finishAsyncByteRead finishes reading the 1-byte sniff
diff --git a/src/net/http/transfer_test.go b/src/net/http/transfer_test.go
index 5e0df89..b1a5a93 100644
--- a/src/net/http/transfer_test.go
+++ b/src/net/http/transfer_test.go
@@ -112,8 +112,8 @@
 }
 
 func TestTransferWriterWriteBodyReaderTypes(t *testing.T) {
-	fileType := reflect.TypeOf(&os.File{})
-	bufferType := reflect.TypeOf(&bytes.Buffer{})
+	fileType := reflect.TypeFor[*os.File]()
+	bufferType := reflect.TypeFor[*bytes.Buffer]()
 
 	nBytes := int64(1 << 10)
 	newFileFunc := func() (r io.Reader, done func(), err error) {
@@ -264,6 +264,12 @@
 					actualReader = reflect.TypeOf(lr.R)
 				} else {
 					actualReader = reflect.TypeOf(mw.CalledReader)
+					// We have to handle this special case for genericWriteTo in os,
+					// this struct is introduced to support a zero-copy optimization,
+					// check out https://go.dev/issue/58808 for details.
+					if actualReader.Kind() == reflect.Struct && actualReader.PkgPath() == "os" && actualReader.Name() == "fileWithoutWriteTo" {
+						actualReader = actualReader.Field(1).Type
+					}
 				}
 
 				if tc.expectedReader != actualReader {
@@ -333,6 +339,10 @@
 		wantErr error
 	}{
 		{
+			cl:      "",
+			wantErr: badStringError("invalid empty Content-Length", ""),
+		},
+		{
 			cl:      "3",
 			wantErr: nil,
 		},
@@ -356,7 +366,7 @@
 	}
 
 	for _, tt := range tests {
-		if _, gotErr := parseContentLength(tt.cl); !reflect.DeepEqual(gotErr, tt.wantErr) {
+		if _, gotErr := parseContentLength([]string{tt.cl}); !reflect.DeepEqual(gotErr, tt.wantErr) {
 			t.Errorf("%q:\n\tgot=%v\n\twant=%v", tt.cl, gotErr, tt.wantErr)
 		}
 	}
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index c07352b..17067ac 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -35,8 +35,8 @@
 	"golang.org/x/net/http/httpproxy"
 )
 
-// DefaultTransport is the default implementation of Transport and is
-// used by DefaultClient. It establishes network connections as needed
+// DefaultTransport is the default implementation of [Transport] and is
+// used by [DefaultClient]. It establishes network connections as needed
 // and caches them for reuse by subsequent calls. It uses HTTP proxies
 // as directed by the environment variables HTTP_PROXY, HTTPS_PROXY
 // and NO_PROXY (or the lowercase versions thereof).
@@ -53,42 +53,42 @@
 	ExpectContinueTimeout: 1 * time.Second,
 }
 
-// DefaultMaxIdleConnsPerHost is the default value of Transport's
+// DefaultMaxIdleConnsPerHost is the default value of [Transport]'s
 // MaxIdleConnsPerHost.
 const DefaultMaxIdleConnsPerHost = 2
 
-// Transport is an implementation of RoundTripper that supports HTTP,
+// Transport is an implementation of [RoundTripper] that supports HTTP,
 // HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
 //
 // By default, Transport caches connections for future re-use.
 // This may leave many open connections when accessing many hosts.
-// This behavior can be managed using Transport's CloseIdleConnections method
-// and the MaxIdleConnsPerHost and DisableKeepAlives fields.
+// This behavior can be managed using [Transport.CloseIdleConnections] method
+// and the [Transport.MaxIdleConnsPerHost] and [Transport.DisableKeepAlives] fields.
 //
 // Transports should be reused instead of created as needed.
 // Transports are safe for concurrent use by multiple goroutines.
 //
 // A Transport is a low-level primitive for making HTTP and HTTPS requests.
-// For high-level functionality, such as cookies and redirects, see Client.
+// For high-level functionality, such as cookies and redirects, see [Client].
 //
 // Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2
 // for HTTPS URLs, depending on whether the server supports HTTP/2,
-// and how the Transport is configured. The DefaultTransport supports HTTP/2.
+// and how the Transport is configured. The [DefaultTransport] supports HTTP/2.
 // To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
 // and call ConfigureTransport. See the package docs for more about HTTP/2.
 //
 // Responses with status codes in the 1xx range are either handled
 // automatically (100 expect-continue) or ignored. The one
 // exception is HTTP status code 101 (Switching Protocols), which is
-// considered a terminal status and returned by RoundTrip. To see the
+// considered a terminal status and returned by [Transport.RoundTrip]. To see the
 // ignored 1xx responses, use the httptrace trace package's
 // ClientTrace.Got1xxResponse.
 //
 // Transport only retries a request upon encountering a network error
 // if the connection has been already been used successfully and if the
-// request is idempotent and either has no body or has its Request.GetBody
+// request is idempotent and either has no body or has its [Request.GetBody]
 // defined. HTTP requests are considered idempotent if they have HTTP methods
-// GET, HEAD, OPTIONS, or TRACE; or if their Header map contains an
+// GET, HEAD, OPTIONS, or TRACE; or if their [Header] map contains an
 // "Idempotency-Key" or "X-Idempotency-Key" entry. If the idempotency key
 // value is a zero-length slice, the request is treated as idempotent but the
 // header is not sent on the wire.
@@ -117,6 +117,10 @@
 	// "https", and "socks5" are supported. If the scheme is empty,
 	// "http" is assumed.
 	//
+	// If the proxy URL contains a userinfo subcomponent,
+	// the proxy request will pass the username and password
+	// in a Proxy-Authorization header.
+	//
 	// If Proxy is nil or returns a nil *URL, no proxy is used.
 	Proxy func(*Request) (*url.URL, error)
 
@@ -233,7 +237,7 @@
 
 	// TLSNextProto specifies how the Transport switches to an
 	// alternate protocol (such as HTTP/2) after a TLS ALPN
-	// protocol negotiation. If Transport dials an TLS connection
+	// protocol negotiation. If Transport dials a TLS connection
 	// with a non-empty protocol name and TLSNextProto contains a
 	// map entry for that key (such as "h2"), then the func is
 	// called with the request's authority (such as "example.com"
@@ -449,7 +453,7 @@
 	return envProxyFunc()(req.URL)
 }
 
-// ProxyURL returns a proxy function (for use in a Transport)
+// ProxyURL returns a proxy function (for use in a [Transport])
 // that always returns the same URL.
 func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
 	return func(*Request) (*url.URL, error) {
@@ -748,14 +752,14 @@
 var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol")
 
 // RegisterProtocol registers a new protocol with scheme.
-// The Transport will pass requests using the given scheme to rt.
+// The [Transport] will pass requests using the given scheme to rt.
 // It is rt's responsibility to simulate HTTP request semantics.
 //
 // RegisterProtocol can be used by other packages to provide
 // implementations of protocol schemes like "ftp" or "file".
 //
-// If rt.RoundTrip returns ErrSkipAltProtocol, the Transport will
-// handle the RoundTrip itself for that one request, as if the
+// If rt.RoundTrip returns [ErrSkipAltProtocol], the Transport will
+// handle the [Transport.RoundTrip] itself for that one request, as if the
 // protocol were not registered.
 func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
 	t.altMu.Lock()
@@ -795,9 +799,9 @@
 }
 
 // CancelRequest cancels an in-flight request by closing its connection.
-// CancelRequest should only be called after RoundTrip has returned.
+// CancelRequest should only be called after [Transport.RoundTrip] has returned.
 //
-// Deprecated: Use Request.WithContext to create a request with a
+// Deprecated: Use [Request.WithContext] to create a request with a
 // cancelable context instead. CancelRequest cannot cancel HTTP/2
 // requests.
 func (t *Transport) CancelRequest(req *Request) {
@@ -1205,7 +1209,6 @@
 type wantConn struct {
 	cm    connectMethod
 	key   connectMethodKey // cm.key()
-	ctx   context.Context  // context for dial
 	ready chan struct{}    // closed when pc, err pair is delivered
 
 	// hooks for testing to know when dials are done
@@ -1214,7 +1217,8 @@
 	beforeDial func()
 	afterDial  func()
 
-	mu  sync.Mutex // protects pc, err, close(ready)
+	mu  sync.Mutex      // protects ctx, pc, err, close(ready)
+	ctx context.Context // context for dial, cleared after delivered or canceled
 	pc  *persistConn
 	err error
 }
@@ -1229,6 +1233,13 @@
 	}
 }
 
+// getCtxForDial returns context for dial or nil if connection was delivered or canceled.
+func (w *wantConn) getCtxForDial() context.Context {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	return w.ctx
+}
+
 // tryDeliver attempts to deliver pc, err to w and reports whether it succeeded.
 func (w *wantConn) tryDeliver(pc *persistConn, err error) bool {
 	w.mu.Lock()
@@ -1238,6 +1249,7 @@
 		return false
 	}
 
+	w.ctx = nil
 	w.pc = pc
 	w.err = err
 	if w.pc == nil && w.err == nil {
@@ -1255,6 +1267,7 @@
 		close(w.ready) // catch misbehavior in future delivery
 	}
 	pc := w.pc
+	w.ctx = nil
 	w.pc = nil
 	w.err = err
 	w.mu.Unlock()
@@ -1463,8 +1476,13 @@
 // If the dial is canceled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()].
 func (t *Transport) dialConnFor(w *wantConn) {
 	defer w.afterDial()
+	ctx := w.getCtxForDial()
+	if ctx == nil {
+		t.decConnsPerHost(w.key)
+		return
+	}
 
-	pc, err := t.dialConn(w.ctx, w.cm)
+	pc, err := t.dialConn(ctx, w.cm)
 	delivered := w.tryDeliver(pc, err)
 	if err == nil && (!delivered || pc.alt != nil) {
 		// pconn was not passed to w,
@@ -1560,6 +1578,11 @@
 	}()
 	if err := <-errc; err != nil {
 		plainConn.Close()
+		if err == (tlsHandshakeTimeoutError{}) {
+			// Now that we have closed the connection,
+			// wait for the call to HandshakeContext to return.
+			<-errc
+		}
 		if trace != nil && trace.TLSHandshakeDone != nil {
 			trace.TLSHandshakeDone(tls.ConnectionState{}, err)
 		}
@@ -2248,7 +2271,7 @@
 			}
 		case <-rc.req.Cancel:
 			alive = false
-			pc.t.CancelRequest(rc.req)
+			pc.t.cancelRequest(rc.cancelKey, errRequestCanceled)
 		case <-rc.req.Context().Done():
 			alive = false
 			pc.t.cancelRequest(rc.cancelKey, rc.req.Context().Err())
diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
index 028fecc..3fb5624 100644
--- a/src/net/http/transport_test.go
+++ b/src/net/http/transport_test.go
@@ -730,6 +730,56 @@
 	}
 }
 
+func TestTransportMaxConnsPerHostDialCancellation(t *testing.T) {
+	run(t, testTransportMaxConnsPerHostDialCancellation,
+		testNotParallel, // because test uses SetPendingDialHooks
+		[]testMode{http1Mode, https1Mode, http2Mode},
+	)
+}
+
+func testTransportMaxConnsPerHostDialCancellation(t *testing.T, mode testMode) {
+	CondSkipHTTP2(t)
+
+	h := HandlerFunc(func(w ResponseWriter, r *Request) {
+		_, err := w.Write([]byte("foo"))
+		if err != nil {
+			t.Fatalf("Write: %v", err)
+		}
+	})
+
+	cst := newClientServerTest(t, mode, h)
+	defer cst.close()
+	ts := cst.ts
+	c := ts.Client()
+	tr := c.Transport.(*Transport)
+	tr.MaxConnsPerHost = 1
+
+	// This request is cancelled when dial is queued, which preempts dialing.
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	SetPendingDialHooks(cancel, nil)
+	defer SetPendingDialHooks(nil, nil)
+
+	req, _ := NewRequestWithContext(ctx, "GET", ts.URL, nil)
+	_, err := c.Do(req)
+	if !errors.Is(err, context.Canceled) {
+		t.Errorf("expected error %v, got %v", context.Canceled, err)
+	}
+
+	// This request should succeed.
+	SetPendingDialHooks(nil, nil)
+	req, _ = NewRequest("GET", ts.URL, nil)
+	resp, err := c.Do(req)
+	if err != nil {
+		t.Fatalf("request failed: %v", err)
+	}
+	defer resp.Body.Close()
+	_, err = io.ReadAll(resp.Body)
+	if err != nil {
+		t.Fatalf("read body failed: %v", err)
+	}
+}
+
 func TestTransportRemovesDeadIdleConnections(t *testing.T) {
 	run(t, testTransportRemovesDeadIdleConnections, []testMode{http1Mode})
 }
@@ -2099,25 +2149,50 @@
 
 // Test that a client receives a server's reply, even if the server doesn't read
 // the entire request body.
-func TestIssue3595(t *testing.T) { run(t, testIssue3595) }
+func TestIssue3595(t *testing.T) {
+	// Not parallel: modifies the global rstAvoidanceDelay.
+	run(t, testIssue3595, testNotParallel)
+}
 func testIssue3595(t *testing.T, mode testMode) {
-	const deniedMsg = "sorry, denied."
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
-		Error(w, deniedMsg, StatusUnauthorized)
-	})).ts
-	c := ts.Client()
-	res, err := c.Post(ts.URL, "application/octet-stream", neverEnding('a'))
-	if err != nil {
-		t.Errorf("Post: %v", err)
-		return
-	}
-	got, err := io.ReadAll(res.Body)
-	if err != nil {
-		t.Fatalf("Body ReadAll: %v", err)
-	}
-	if !strings.Contains(string(got), deniedMsg) {
-		t.Errorf("Known bug: response %q does not contain %q", got, deniedMsg)
-	}
+	runTimeSensitiveTest(t, []time.Duration{
+		1 * time.Millisecond,
+		5 * time.Millisecond,
+		10 * time.Millisecond,
+		50 * time.Millisecond,
+		100 * time.Millisecond,
+		500 * time.Millisecond,
+		time.Second,
+		5 * time.Second,
+	}, func(t *testing.T, timeout time.Duration) error {
+		SetRSTAvoidanceDelay(t, timeout)
+		t.Logf("set RST avoidance delay to %v", timeout)
+
+		const deniedMsg = "sorry, denied."
+		cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+			Error(w, deniedMsg, StatusUnauthorized)
+		}))
+		// We need to close cst explicitly here so that in-flight server
+		// requests don't race with the call to SetRSTAvoidanceDelay for a retry.
+		defer cst.close()
+		ts := cst.ts
+		c := ts.Client()
+
+		res, err := c.Post(ts.URL, "application/octet-stream", neverEnding('a'))
+		if err != nil {
+			return fmt.Errorf("Post: %v", err)
+		}
+		got, err := io.ReadAll(res.Body)
+		if err != nil {
+			return fmt.Errorf("Body ReadAll: %v", err)
+		}
+		t.Logf("server response:\n%s", got)
+		if !strings.Contains(string(got), deniedMsg) {
+			// If we got an RST packet too early, we should have seen an error
+			// from io.ReadAll, not a silently-truncated body.
+			t.Errorf("Known bug: response %q does not contain %q", got, deniedMsg)
+		}
+		return nil
+	})
 }
 
 // From https://golang.org/issue/4454 ,
@@ -2440,6 +2515,7 @@
 			if d > 0 {
 				t.Logf("pending requests = %d after %v (want 0)", n, d)
 			}
+			return false
 		}
 		return true
 	})
@@ -2599,6 +2675,65 @@
 			if d > 0 {
 				t.Logf("pending requests = %d after %v (want 0)", n, d)
 			}
+			return false
+		}
+		return true
+	})
+}
+
+// Issue 51354
+func TestCancelRequestWithBodyWithChannel(t *testing.T) {
+	run(t, testCancelRequestWithBodyWithChannel, []testMode{http1Mode})
+}
+func testCancelRequestWithBodyWithChannel(t *testing.T, mode testMode) {
+	if testing.Short() {
+		t.Skip("skipping test in -short mode")
+	}
+
+	const msg = "Hello"
+	unblockc := make(chan struct{})
+	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+		io.WriteString(w, msg)
+		w.(Flusher).Flush() // send headers and some body
+		<-unblockc
+	})).ts
+	defer close(unblockc)
+
+	c := ts.Client()
+	tr := c.Transport.(*Transport)
+
+	req, _ := NewRequest("POST", ts.URL, strings.NewReader("withbody"))
+	cancel := make(chan struct{})
+	req.Cancel = cancel
+
+	res, err := c.Do(req)
+	if err != nil {
+		t.Fatal(err)
+	}
+	body := make([]byte, len(msg))
+	n, _ := io.ReadFull(res.Body, body)
+	if n != len(body) || !bytes.Equal(body, []byte(msg)) {
+		t.Errorf("Body = %q; want %q", body[:n], msg)
+	}
+	close(cancel)
+
+	tail, err := io.ReadAll(res.Body)
+	res.Body.Close()
+	if err != ExportErrRequestCanceled {
+		t.Errorf("Body.Read error = %v; want errRequestCanceled", err)
+	} else if len(tail) > 0 {
+		t.Errorf("Spurious bytes from Body.Read: %q", tail)
+	}
+
+	// Verify no outstanding requests after readLoop/writeLoop
+	// goroutines shut down.
+	waitCondition(t, 10*time.Millisecond, func(d time.Duration) bool {
+		n := tr.NumPendingRequestsForTesting()
+		if n > 0 {
+			if d > 0 {
+				t.Logf("pending requests = %d after %v (want 0)", n, d)
+			}
+			return false
 		}
 		return true
 	})
@@ -3414,6 +3549,7 @@
 		c net.Conn
 	}
 	var getOkay bool
+	var copying sync.WaitGroup
 	closeConn := func() {
 		sconn.Lock()
 		defer sconn.Unlock()
@@ -3425,7 +3561,10 @@
 			}
 		}
 	}
-	defer closeConn()
+	defer func() {
+		closeConn()
+		copying.Wait()
+	}()
 
 	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
 		if r.Method == "GET" {
@@ -3437,7 +3576,12 @@
 		sconn.c = conn
 		sconn.Unlock()
 		conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nfoo")) // keep-alive
-		go io.Copy(io.Discard, conn)
+
+		copying.Add(1)
+		go func() {
+			io.Copy(io.Discard, conn)
+			copying.Done()
+		}()
 	})).ts
 	c := ts.Client()
 
@@ -4267,68 +4411,78 @@
 
 // Issue 11745.
 func TestTransportPrefersResponseOverWriteError(t *testing.T) {
-	run(t, testTransportPrefersResponseOverWriteError)
+	// Not parallel: modifies the global rstAvoidanceDelay.
+	run(t, testTransportPrefersResponseOverWriteError, testNotParallel)
 }
 func testTransportPrefersResponseOverWriteError(t *testing.T, mode testMode) {
 	if testing.Short() {
 		t.Skip("skipping in short mode")
 	}
-	const contentLengthLimit = 1024 * 1024 // 1MB
-	ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
-		if r.ContentLength >= contentLengthLimit {
-			w.WriteHeader(StatusBadRequest)
-			r.Body.Close()
-			return
+
+	runTimeSensitiveTest(t, []time.Duration{
+		1 * time.Millisecond,
+		5 * time.Millisecond,
+		10 * time.Millisecond,
+		50 * time.Millisecond,
+		100 * time.Millisecond,
+		500 * time.Millisecond,
+		time.Second,
+		5 * time.Second,
+	}, func(t *testing.T, timeout time.Duration) error {
+		SetRSTAvoidanceDelay(t, timeout)
+		t.Logf("set RST avoidance delay to %v", timeout)
+
+		const contentLengthLimit = 1024 * 1024 // 1MB
+		cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) {
+			if r.ContentLength >= contentLengthLimit {
+				w.WriteHeader(StatusBadRequest)
+				r.Body.Close()
+				return
+			}
+			w.WriteHeader(StatusOK)
+		}))
+		// We need to close cst explicitly here so that in-flight server
+		// requests don't race with the call to SetRSTAvoidanceDelay for a retry.
+		defer cst.close()
+		ts := cst.ts
+		c := ts.Client()
+
+		count := 100
+
+		bigBody := strings.Repeat("a", contentLengthLimit*2)
+		var wg sync.WaitGroup
+		defer wg.Wait()
+		getBody := func() (io.ReadCloser, error) {
+			wg.Add(1)
+			body := &wgReadCloser{
+				Reader: strings.NewReader(bigBody),
+				wg:     &wg,
+			}
+			return body, nil
 		}
-		w.WriteHeader(StatusOK)
-	})).ts
-	c := ts.Client()
 
-	fail := 0
-	count := 100
+		for i := 0; i < count; i++ {
+			reqBody, _ := getBody()
+			req, err := NewRequest("PUT", ts.URL, reqBody)
+			if err != nil {
+				reqBody.Close()
+				t.Fatal(err)
+			}
+			req.ContentLength = int64(len(bigBody))
+			req.GetBody = getBody
 
-	bigBody := strings.Repeat("a", contentLengthLimit*2)
-	var wg sync.WaitGroup
-	defer wg.Wait()
-	getBody := func() (io.ReadCloser, error) {
-		wg.Add(1)
-		body := &wgReadCloser{
-			Reader: strings.NewReader(bigBody),
-			wg:     &wg,
-		}
-		return body, nil
-	}
-
-	for i := 0; i < count; i++ {
-		reqBody, _ := getBody()
-		req, err := NewRequest("PUT", ts.URL, reqBody)
-		if err != nil {
-			reqBody.Close()
-			t.Fatal(err)
-		}
-		req.ContentLength = int64(len(bigBody))
-		req.GetBody = getBody
-
-		resp, err := c.Do(req)
-		if err != nil {
-			fail++
-			t.Logf("%d = %#v", i, err)
-			if ue, ok := err.(*url.Error); ok {
-				t.Logf("urlErr = %#v", ue.Err)
-				if ne, ok := ue.Err.(*net.OpError); ok {
-					t.Logf("netOpError = %#v", ne.Err)
+			resp, err := c.Do(req)
+			if err != nil {
+				return fmt.Errorf("Do %d: %v", i, err)
+			} else {
+				resp.Body.Close()
+				if resp.StatusCode != 400 {
+					t.Errorf("Expected status code 400, got %v", resp.Status)
 				}
 			}
-		} else {
-			resp.Body.Close()
-			if resp.StatusCode != 400 {
-				t.Errorf("Expected status code 400, got %v", resp.Status)
-			}
 		}
-	}
-	if fail > 0 {
-		t.Errorf("Failed %v out of %v\n", fail, count)
-	}
+		return nil
+	})
 }
 
 func TestTransportAutomaticHTTP2(t *testing.T) {
@@ -6750,3 +6904,36 @@
 		resp.Body.Close()
 	}
 }
+
+func TestProxyAuthHeader(t *testing.T) {
+	// Not parallel: Sets an environment variable.
+	run(t, testProxyAuthHeader, []testMode{http1Mode}, testNotParallel)
+}
+func testProxyAuthHeader(t *testing.T, mode testMode) {
+	const username = "u"
+	const password = "@/?!"
+	cst := newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
+		// Copy the Proxy-Authorization header to a new Request,
+		// since Request.BasicAuth only parses the Authorization header.
+		var r2 Request
+		r2.Header = Header{
+			"Authorization": req.Header["Proxy-Authorization"],
+		}
+		gotuser, gotpass, ok := r2.BasicAuth()
+		if !ok || gotuser != username || gotpass != password {
+			t.Errorf("req.BasicAuth() = %q, %q, %v; want %q, %q, true", gotuser, gotpass, ok, username, password)
+		}
+	}))
+	u, err := url.Parse(cst.ts.URL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	u.User = url.UserPassword(username, password)
+	t.Setenv("HTTP_PROXY", u.String())
+	cst.tr.Proxy = ProxyURL(u)
+	resp, err := cst.c.Get("http://_/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	resp.Body.Close()
+}
diff --git a/src/net/http/triv.go b/src/net/http/triv.go
index f614922..c169642 100644
--- a/src/net/http/triv.go
+++ b/src/net/http/triv.go
@@ -34,7 +34,7 @@
 	n  int
 }
 
-// This makes Counter satisfy the expvar.Var interface, so we can export
+// This makes Counter satisfy the [expvar.Var] interface, so we can export
 // it directly.
 func (ctr *Counter) String() string {
 	ctr.mu.Lock()
diff --git a/src/net/interface.go b/src/net/interface.go
index e1c9a2e..20ac07d 100644
--- a/src/net/interface.go
+++ b/src/net/interface.go
@@ -114,7 +114,7 @@
 // addresses.
 //
 // The returned list does not identify the associated interface; use
-// Interfaces and Interface.Addrs for more detail.
+// Interfaces and [Interface.Addrs] for more detail.
 func InterfaceAddrs() ([]Addr, error) {
 	ifat, err := interfaceAddrTable(nil)
 	if err != nil {
@@ -127,7 +127,7 @@
 //
 // On Solaris, it returns one of the logical network interfaces
 // sharing the logical data link; for more precision use
-// InterfaceByName.
+// [InterfaceByName].
 func InterfaceByIndex(index int) (*Interface, error) {
 	if index <= 0 {
 		return nil, &OpError{Op: "route", Net: "ip+net", Source: nil, Addr: nil, Err: errInvalidInterfaceIndex}
diff --git a/src/net/interface_stub.go b/src/net/interface_stub.go
index 829dbc6..4c280c6 100644
--- a/src/net/interface_stub.go
+++ b/src/net/interface_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (js && wasm) || wasip1
+//go:build js || wasip1
 
 package net
 
diff --git a/src/net/interface_test.go b/src/net/interface_test.go
index 5590b06..a97d675 100644
--- a/src/net/interface_test.go
+++ b/src/net/interface_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
diff --git a/src/net/internal/socktest/main_test.go b/src/net/internal/socktest/main_test.go
index 0197feb..967ce67 100644
--- a/src/net/internal/socktest/main_test.go
+++ b/src/net/internal/socktest/main_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !plan9 && !wasip1
+//go:build !js && !plan9 && !wasip1 && !windows
 
 package socktest_test
 
diff --git a/src/net/internal/socktest/main_windows_test.go b/src/net/internal/socktest/main_windows_test.go
deleted file mode 100644
index df1cb97..0000000
--- a/src/net/internal/socktest/main_windows_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package socktest_test
-
-import "syscall"
-
-var (
-	socketFunc func(int, int, int) (syscall.Handle, error)
-	closeFunc  func(syscall.Handle) error
-)
-
-func installTestHooks() {
-	socketFunc = sw.Socket
-	closeFunc = sw.Closesocket
-}
-
-func uninstallTestHooks() {
-	socketFunc = syscall.Socket
-	closeFunc = syscall.Closesocket
-}
diff --git a/src/net/internal/socktest/switch.go b/src/net/internal/socktest/switch.go
index 3c37b6f..dea6d9288 100644
--- a/src/net/internal/socktest/switch.go
+++ b/src/net/internal/socktest/switch.go
@@ -133,7 +133,7 @@
 // If the filter returns a non-nil error, the execution of system call
 // will be canceled and the system call function returns the non-nil
 // error.
-// It can return a non-nil AfterFilter for filtering after the
+// It can return a non-nil [AfterFilter] for filtering after the
 // execution of the system call.
 type Filter func(*Status) (AfterFilter, error)
 
diff --git a/src/net/internal/socktest/sys_unix.go b/src/net/internal/socktest/sys_unix.go
index 712462a..3eef26c 100644
--- a/src/net/internal/socktest/sys_unix.go
+++ b/src/net/internal/socktest/sys_unix.go
@@ -8,7 +8,7 @@
 
 import "syscall"
 
-// Socket wraps syscall.Socket.
+// Socket wraps [syscall.Socket].
 func (sw *Switch) Socket(family, sotype, proto int) (s int, err error) {
 	sw.once.Do(sw.init)
 
diff --git a/src/net/internal/socktest/sys_windows.go b/src/net/internal/socktest/sys_windows.go
index 8c1c862..2f02446 100644
--- a/src/net/internal/socktest/sys_windows.go
+++ b/src/net/internal/socktest/sys_windows.go
@@ -9,39 +9,7 @@
 	"syscall"
 )
 
-// Socket wraps syscall.Socket.
-func (sw *Switch) Socket(family, sotype, proto int) (s syscall.Handle, err error) {
-	sw.once.Do(sw.init)
-
-	so := &Status{Cookie: cookie(family, sotype, proto)}
-	sw.fmu.RLock()
-	f, _ := sw.fltab[FilterSocket]
-	sw.fmu.RUnlock()
-
-	af, err := f.apply(so)
-	if err != nil {
-		return syscall.InvalidHandle, err
-	}
-	s, so.Err = syscall.Socket(family, sotype, proto)
-	if err = af.apply(so); err != nil {
-		if so.Err == nil {
-			syscall.Closesocket(s)
-		}
-		return syscall.InvalidHandle, err
-	}
-
-	sw.smu.Lock()
-	defer sw.smu.Unlock()
-	if so.Err != nil {
-		sw.stats.getLocked(so.Cookie).OpenFailed++
-		return syscall.InvalidHandle, so.Err
-	}
-	nso := sw.addLocked(s, family, sotype, proto)
-	sw.stats.getLocked(nso.Cookie).Opened++
-	return s, nil
-}
-
-// WSASocket wraps syscall.WSASocket.
+// WSASocket wraps [syscall.WSASocket].
 func (sw *Switch) WSASocket(family, sotype, proto int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (s syscall.Handle, err error) {
 	sw.once.Do(sw.init)
 
@@ -73,7 +41,7 @@
 	return s, nil
 }
 
-// Closesocket wraps syscall.Closesocket.
+// Closesocket wraps [syscall.Closesocket].
 func (sw *Switch) Closesocket(s syscall.Handle) (err error) {
 	so := sw.sockso(s)
 	if so == nil {
@@ -103,7 +71,7 @@
 	return nil
 }
 
-// Connect wraps syscall.Connect.
+// Connect wraps [syscall.Connect].
 func (sw *Switch) Connect(s syscall.Handle, sa syscall.Sockaddr) (err error) {
 	so := sw.sockso(s)
 	if so == nil {
@@ -132,7 +100,7 @@
 	return nil
 }
 
-// ConnectEx wraps syscall.ConnectEx.
+// ConnectEx wraps [syscall.ConnectEx].
 func (sw *Switch) ConnectEx(s syscall.Handle, sa syscall.Sockaddr, b *byte, n uint32, nwr *uint32, o *syscall.Overlapped) (err error) {
 	so := sw.sockso(s)
 	if so == nil {
@@ -161,7 +129,7 @@
 	return nil
 }
 
-// Listen wraps syscall.Listen.
+// Listen wraps [syscall.Listen].
 func (sw *Switch) Listen(s syscall.Handle, backlog int) (err error) {
 	so := sw.sockso(s)
 	if so == nil {
@@ -190,7 +158,7 @@
 	return nil
 }
 
-// AcceptEx wraps syscall.AcceptEx.
+// AcceptEx wraps [syscall.AcceptEx].
 func (sw *Switch) AcceptEx(ls syscall.Handle, as syscall.Handle, b *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, rcvd *uint32, overlapped *syscall.Overlapped) error {
 	so := sw.sockso(ls)
 	if so == nil {
diff --git a/src/net/ip.go b/src/net/ip.go
index d51ba10..6083dd8 100644
--- a/src/net/ip.go
+++ b/src/net/ip.go
@@ -38,7 +38,7 @@
 // An IPMask is a bitmask that can be used to manipulate
 // IP addresses for IP addressing and routing.
 //
-// See type IPNet and func ParseCIDR for details.
+// See type [IPNet] and func [ParseCIDR] for details.
 type IPMask []byte
 
 // An IPNet represents an IP network.
@@ -72,9 +72,9 @@
 	return p
 }
 
-// CIDRMask returns an IPMask consisting of 'ones' 1 bits
+// CIDRMask returns an [IPMask] consisting of 'ones' 1 bits
 // followed by 0s up to a total length of 'bits' bits.
-// For a mask of this form, CIDRMask is the inverse of IPMask.Size.
+// For a mask of this form, CIDRMask is the inverse of [IPMask.Size].
 func CIDRMask(ones, bits int) IPMask {
 	if bits != 8*IPv4len && bits != 8*IPv6len {
 		return nil
@@ -324,8 +324,8 @@
 	return ip.String()
 }
 
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by String, with one exception:
+// MarshalText implements the [encoding.TextMarshaler] interface.
+// The encoding is the same as returned by [IP.String], with one exception:
 // When len(ip) is zero, it returns an empty slice.
 func (ip IP) MarshalText() ([]byte, error) {
 	if len(ip) == 0 {
@@ -337,8 +337,8 @@
 	return []byte(ip.String()), nil
 }
 
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The IP address is expected in a form accepted by ParseIP.
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+// The IP address is expected in a form accepted by [ParseIP].
 func (ip *IP) UnmarshalText(text []byte) error {
 	if len(text) == 0 {
 		*ip = nil
diff --git a/src/net/ip_test.go b/src/net/ip_test.go
index 1373059..acc2310 100644
--- a/src/net/ip_test.go
+++ b/src/net/ip_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
diff --git a/src/net/iprawsock.go b/src/net/iprawsock.go
index f18331a..4c06b1b 100644
--- a/src/net/iprawsock.go
+++ b/src/net/iprawsock.go
@@ -72,7 +72,7 @@
 // recommended, because it will return at most one of the host name's
 // IP addresses.
 //
-// See func Dial for a description of the network and address
+// See func [Dial] for a description of the network and address
 // parameters.
 func ResolveIPAddr(network, address string) (*IPAddr, error) {
 	if network == "" { // a hint wildcard for Go 1.0 undocumented behavior
@@ -94,19 +94,19 @@
 	return addrs.forResolve(network, address).(*IPAddr), nil
 }
 
-// IPConn is the implementation of the Conn and PacketConn interfaces
+// IPConn is the implementation of the [Conn] and [PacketConn] interfaces
 // for IP network connections.
 type IPConn struct {
 	conn
 }
 
 // SyscallConn returns a raw network connection.
-// This implements the syscall.Conn interface.
+// This implements the [syscall.Conn] interface.
 func (c *IPConn) SyscallConn() (syscall.RawConn, error) {
 	if !c.ok() {
 		return nil, syscall.EINVAL
 	}
-	return newRawConn(c.fd)
+	return newRawConn(c.fd), nil
 }
 
 // ReadFromIP acts like ReadFrom but returns an IPAddr.
@@ -121,7 +121,7 @@
 	return n, addr, err
 }
 
-// ReadFrom implements the PacketConn ReadFrom method.
+// ReadFrom implements the [PacketConn] ReadFrom method.
 func (c *IPConn) ReadFrom(b []byte) (int, Addr, error) {
 	if !c.ok() {
 		return 0, nil, syscall.EINVAL
@@ -154,7 +154,7 @@
 	return
 }
 
-// WriteToIP acts like WriteTo but takes an IPAddr.
+// WriteToIP acts like [IPConn.WriteTo] but takes an [IPAddr].
 func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (int, error) {
 	if !c.ok() {
 		return 0, syscall.EINVAL
@@ -166,7 +166,7 @@
 	return n, err
 }
 
-// WriteTo implements the PacketConn WriteTo method.
+// WriteTo implements the [PacketConn] WriteTo method.
 func (c *IPConn) WriteTo(b []byte, addr Addr) (int, error) {
 	if !c.ok() {
 		return 0, syscall.EINVAL
@@ -201,7 +201,7 @@
 
 func newIPConn(fd *netFD) *IPConn { return &IPConn{conn{fd}} }
 
-// DialIP acts like Dial for IP networks.
+// DialIP acts like [Dial] for IP networks.
 //
 // The network must be an IP network name; see func Dial for details.
 //
@@ -220,7 +220,7 @@
 	return c, nil
 }
 
-// ListenIP acts like ListenPacket for IP networks.
+// ListenIP acts like [ListenPacket] for IP networks.
 //
 // The network must be an IP network name; see func Dial for details.
 //
diff --git a/src/net/iprawsock_posix.go b/src/net/iprawsock_posix.go
index 59967eb..73b41ab 100644
--- a/src/net/iprawsock_posix.go
+++ b/src/net/iprawsock_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
diff --git a/src/net/iprawsock_test.go b/src/net/iprawsock_test.go
index 14c03a1..7f1fc13 100644
--- a/src/net/iprawsock_test.go
+++ b/src/net/iprawsock_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
diff --git a/src/net/ipsock.go b/src/net/ipsock.go
index 0f5da25..176dbc7 100644
--- a/src/net/ipsock.go
+++ b/src/net/ipsock.go
@@ -83,10 +83,10 @@
 	switch network {
 	case "ip":
 		// IPv6 literal (addr does NOT contain a port)
-		want6 = count(addr, ':') > 0
+		want6 = bytealg.CountString(addr, ':') > 0
 	case "tcp", "udp":
 		// IPv6 literal. (addr contains a port, so look for '[')
-		want6 = count(addr, '[') > 0
+		want6 = bytealg.CountString(addr, '[') > 0
 	}
 	if want6 {
 		return addrs.first(isNotIPv4)
@@ -172,7 +172,7 @@
 	j, k := 0, 0
 
 	// The port starts after the last colon.
-	i := last(hostport, ':')
+	i := bytealg.LastIndexByteString(hostport, ':')
 	if i < 0 {
 		return addrErr(hostport, missingPort)
 	}
@@ -219,7 +219,7 @@
 func splitHostZone(s string) (host, zone string) {
 	// The IPv6 scoped addressing zone identifier starts after the
 	// last percent sign.
-	if i := last(s, '%'); i > 0 {
+	if i := bytealg.LastIndexByteString(s, '%'); i > 0 {
 		host, zone = s[:i], s[i+1:]
 	} else {
 		host = s
diff --git a/src/net/ipsock_plan9.go b/src/net/ipsock_plan9.go
index 4328743..c8d0180 100644
--- a/src/net/ipsock_plan9.go
+++ b/src/net/ipsock_plan9.go
@@ -181,7 +181,6 @@
 	}
 	resc := make(chan res)
 	go func() {
-		testHookDialChannel()
 		fd, err := dialPlan9Blocking(ctx, net, laddr, raddr)
 		select {
 		case resc <- res{fd, err}:
diff --git a/src/net/ipsock_posix.go b/src/net/ipsock_posix.go
index b0a00a6..67ce147 100644
--- a/src/net/ipsock_posix.go
+++ b/src/net/ipsock_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
@@ -25,6 +25,15 @@
 // general. Unfortunately, we need to run on kernels built without
 // IPv6 support too. So probe the kernel to figure it out.
 func (p *ipStackCapabilities) probe() {
+	switch runtime.GOOS {
+	case "js", "wasip1":
+		// Both ipv4 and ipv6 are faked; see net_fake.go.
+		p.ipv4Enabled = true
+		p.ipv6Enabled = true
+		p.ipv4MappedIPv6Enabled = true
+		return
+	}
+
 	s, err := sysSocket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
 	switch err {
 	case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT:
@@ -135,8 +144,11 @@
 }
 
 func internetSocket(ctx context.Context, net string, laddr, raddr sockaddr, sotype, proto int, mode string, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) (fd *netFD, err error) {
-	if (runtime.GOOS == "aix" || runtime.GOOS == "windows" || runtime.GOOS == "openbsd") && mode == "dial" && raddr.isWildcard() {
-		raddr = raddr.toLocal(net)
+	switch runtime.GOOS {
+	case "aix", "windows", "openbsd", "js", "wasip1":
+		if mode == "dial" && raddr.isWildcard() {
+			raddr = raddr.toLocal(net)
+		}
 	}
 	family, ipv6only := favoriteAddrFamily(net, laddr, raddr, mode)
 	return socket(ctx, net, family, sotype, proto, ipv6only, laddr, raddr, ctrlCtxFn)
diff --git a/src/net/listen_test.go b/src/net/listen_test.go
index f0a8861..9100b3d 100644
--- a/src/net/listen_test.go
+++ b/src/net/listen_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !plan9 && !wasip1
+//go:build !plan9
 
 package net
 
diff --git a/src/net/lookup.go b/src/net/lookup.go
index a7133b5..3ec2660 100644
--- a/src/net/lookup.go
+++ b/src/net/lookup.go
@@ -41,19 +41,20 @@
 		"domain": 53,
 	},
 	"tcp": {
-		"ftp":    21,
-		"ftps":   990,
-		"gopher": 70, // ʕ◔ϖ◔ʔ
-		"http":   80,
-		"https":  443,
-		"imap2":  143,
-		"imap3":  220,
-		"imaps":  993,
-		"pop3":   110,
-		"pop3s":  995,
-		"smtp":   25,
-		"ssh":    22,
-		"telnet": 23,
+		"ftp":         21,
+		"ftps":        990,
+		"gopher":      70, // ʕ◔ϖ◔ʔ
+		"http":        80,
+		"https":       443,
+		"imap2":       143,
+		"imap3":       220,
+		"imaps":       993,
+		"pop3":        110,
+		"pop3s":       995,
+		"smtp":        25,
+		"submissions": 465,
+		"ssh":         22,
+		"telnet":      23,
 	},
 }
 
@@ -83,12 +84,20 @@
 
 func lookupPortMap(network, service string) (port int, error error) {
 	switch network {
-	case "tcp4", "tcp6":
-		network = "tcp"
-	case "udp4", "udp6":
-		network = "udp"
+	case "ip": // no hints
+		if p, err := lookupPortMapWithNetwork("tcp", "ip", service); err == nil {
+			return p, nil
+		}
+		return lookupPortMapWithNetwork("udp", "ip", service)
+	case "tcp", "tcp4", "tcp6":
+		return lookupPortMapWithNetwork("tcp", "tcp", service)
+	case "udp", "udp4", "udp6":
+		return lookupPortMapWithNetwork("udp", "udp", service)
 	}
+	return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}
+}
 
+func lookupPortMapWithNetwork(network, errNetwork, service string) (port int, error error) {
 	if m, ok := services[network]; ok {
 		var lowerService [maxPortBufSize]byte
 		n := copy(lowerService[:], service)
@@ -96,8 +105,9 @@
 		if port, ok := m[string(lowerService[:n])]; ok && n == len(service) {
 			return port, nil
 		}
+		return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true}
 	}
-	return 0, &AddrError{Err: "unknown port", Addr: network + "/" + service}
+	return 0, &DNSError{Err: "unknown network", Name: errNetwork + "/" + service}
 }
 
 // ipVersion returns the provided network's IP version: '4', '6' or 0
@@ -171,8 +181,8 @@
 // LookupHost looks up the given host using the local resolver.
 // It returns a slice of that host's addresses.
 //
-// LookupHost uses context.Background internally; to specify the context, use
-// Resolver.LookupHost.
+// LookupHost uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupHost].
 func LookupHost(host string) (addrs []string, err error) {
 	return DefaultResolver.LookupHost(context.Background(), host)
 }
@@ -407,18 +417,20 @@
 
 // LookupPort looks up the port for the given network and service.
 //
-// LookupPort uses context.Background internally; to specify the context, use
-// Resolver.LookupPort.
+// LookupPort uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupPort].
 func LookupPort(network, service string) (port int, err error) {
 	return DefaultResolver.LookupPort(context.Background(), network, service)
 }
 
 // LookupPort looks up the port for the given network and service.
+//
+// The network must be one of "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6" or "ip".
 func (r *Resolver) LookupPort(ctx context.Context, network, service string) (port int, err error) {
 	port, needsLookup := parsePort(service)
 	if needsLookup {
 		switch network {
-		case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6":
+		case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "ip":
 		case "": // a hint wildcard for Go 1.0 undocumented behavior
 			network = "ip"
 		default:
@@ -437,7 +449,7 @@
 
 // LookupCNAME returns the canonical name for the given host.
 // Callers that do not care about the canonical name can call
-// LookupHost or LookupIP directly; both take care of resolving
+// [LookupHost] or [LookupIP] directly; both take care of resolving
 // the canonical name as part of the lookup.
 //
 // A canonical name is the final name after following zero
@@ -449,15 +461,15 @@
 // The returned canonical name is validated to be a properly
 // formatted presentation-format domain name.
 //
-// LookupCNAME uses context.Background internally; to specify the context, use
-// Resolver.LookupCNAME.
+// LookupCNAME uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupCNAME].
 func LookupCNAME(host string) (cname string, err error) {
 	return DefaultResolver.LookupCNAME(context.Background(), host)
 }
 
 // LookupCNAME returns the canonical name for the given host.
 // Callers that do not care about the canonical name can call
-// LookupHost or LookupIP directly; both take care of resolving
+// [LookupHost] or [LookupIP] directly; both take care of resolving
 // the canonical name as part of the lookup.
 //
 // A canonical name is the final name after following zero
@@ -479,7 +491,7 @@
 	return cname, nil
 }
 
-// LookupSRV tries to resolve an SRV query of the given service,
+// LookupSRV tries to resolve an [SRV] query of the given service,
 // protocol, and domain name. The proto is "tcp" or "udp".
 // The returned records are sorted by priority and randomized
 // by weight within a priority.
@@ -497,7 +509,7 @@
 	return DefaultResolver.LookupSRV(context.Background(), service, proto, name)
 }
 
-// LookupSRV tries to resolve an SRV query of the given service,
+// LookupSRV tries to resolve an [SRV] query of the given service,
 // protocol, and domain name. The proto is "tcp" or "udp".
 // The returned records are sorted by priority and randomized
 // by weight within a priority.
@@ -542,8 +554,8 @@
 // invalid names, those records are filtered out and an error
 // will be returned alongside the remaining results, if any.
 //
-// LookupMX uses context.Background internally; to specify the context, use
-// Resolver.LookupMX.
+// LookupMX uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupMX].
 func LookupMX(name string) ([]*MX, error) {
 	return DefaultResolver.LookupMX(context.Background(), name)
 }
@@ -582,8 +594,8 @@
 // invalid names, those records are filtered out and an error
 // will be returned alongside the remaining results, if any.
 //
-// LookupNS uses context.Background internally; to specify the context, use
-// Resolver.LookupNS.
+// LookupNS uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupNS].
 func LookupNS(name string) ([]*NS, error) {
 	return DefaultResolver.LookupNS(context.Background(), name)
 }
@@ -617,8 +629,8 @@
 
 // LookupTXT returns the DNS TXT records for the given domain name.
 //
-// LookupTXT uses context.Background internally; to specify the context, use
-// Resolver.LookupTXT.
+// LookupTXT uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupTXT].
 func LookupTXT(name string) ([]string, error) {
 	return DefaultResolver.lookupTXT(context.Background(), name)
 }
@@ -636,10 +648,10 @@
 // out and an error will be returned alongside the remaining results, if any.
 //
 // When using the host C library resolver, at most one result will be
-// returned. To bypass the host resolver, use a custom Resolver.
+// returned. To bypass the host resolver, use a custom [Resolver].
 //
-// LookupAddr uses context.Background internally; to specify the context, use
-// Resolver.LookupAddr.
+// LookupAddr uses [context.Background] internally; to specify the context, use
+// [Resolver.LookupAddr].
 func LookupAddr(addr string) (names []string, err error) {
 	return DefaultResolver.LookupAddr(context.Background(), addr)
 }
diff --git a/src/net/lookup_fake.go b/src/net/lookup_fake.go
deleted file mode 100644
index c27eae4..0000000
--- a/src/net/lookup_fake.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build js && wasm
-
-package net
-
-import (
-	"context"
-	"syscall"
-)
-
-func lookupProtocol(ctx context.Context, name string) (proto int, err error) {
-	return lookupProtocolMap(name)
-}
-
-func (*Resolver) lookupHost(ctx context.Context, host string) (addrs []string, err error) {
-	return nil, syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
-	return nil, syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupPort(ctx context.Context, network, service string) (port int, err error) {
-	return goLookupPort(network, service)
-}
-
-func (*Resolver) lookupCNAME(ctx context.Context, name string) (cname string, err error) {
-	return "", syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupSRV(ctx context.Context, service, proto, name string) (cname string, srvs []*SRV, err error) {
-	return "", nil, syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupMX(ctx context.Context, name string) (mxs []*MX, err error) {
-	return nil, syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupNS(ctx context.Context, name string) (nss []*NS, err error) {
-	return nil, syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupTXT(ctx context.Context, name string) (txts []string, err error) {
-	return nil, syscall.ENOPROTOOPT
-}
-
-func (*Resolver) lookupAddr(ctx context.Context, addr string) (ptrs []string, err error) {
-	return nil, syscall.ENOPROTOOPT
-}
-
-// concurrentThreadsLimit returns the number of threads we permit to
-// run concurrently doing DNS lookups.
-func concurrentThreadsLimit() int {
-	return 500
-}
diff --git a/src/net/lookup_plan9.go b/src/net/lookup_plan9.go
index 5404b99..8cfc4f6 100644
--- a/src/net/lookup_plan9.go
+++ b/src/net/lookup_plan9.go
@@ -106,6 +106,22 @@
 	return query(ctx, netdir+"/dns", addr+" "+typ, 1024)
 }
 
+func handlePlan9DNSError(err error, name string) error {
+	if stringsHasSuffix(err.Error(), "dns: name does not exist") ||
+		stringsHasSuffix(err.Error(), "dns: resource does not exist; negrcode 0") ||
+		stringsHasSuffix(err.Error(), "dns: resource does not exist; negrcode") {
+		return &DNSError{
+			Err:        errNoSuchHost.Error(),
+			Name:       name,
+			IsNotFound: true,
+		}
+	}
+	return &DNSError{
+		Err:  err.Error(),
+		Name: name,
+	}
+}
+
 // toLower returns a lower-case version of in. Restricting us to
 // ASCII is sufficient to handle the IP protocol names and allow
 // us to not depend on the strings and unicode packages.
@@ -153,12 +169,10 @@
 	// host names in local network (e.g. from /lib/ndb/local)
 	lines, err := queryCS(ctx, "net", host, "1")
 	if err != nil {
-		dnsError := &DNSError{Err: err.Error(), Name: host}
 		if stringsHasSuffix(err.Error(), "dns failure") {
-			dnsError.Err = errNoSuchHost.Error()
-			dnsError.IsNotFound = true
+			return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true}
 		}
-		return nil, dnsError
+		return nil, handlePlan9DNSError(err, host)
 	}
 loop:
 	for _, line := range lines {
@@ -184,31 +198,11 @@
 	return
 }
 
-// preferGoOverPlan9 reports whether the resolver should use the
-// "PreferGo" implementation rather than asking plan9 services
-// for the answers.
-func (r *Resolver) preferGoOverPlan9() bool {
-	_, _, res := r.preferGoOverPlan9WithOrderAndConf()
-	return res
-}
-
-func (r *Resolver) preferGoOverPlan9WithOrderAndConf() (hostLookupOrder, *dnsConfig, bool) {
-	order, conf := systemConf().hostLookupOrder(r, "") // name is unused
-
-	// TODO(bradfitz): for now we only permit use of the PreferGo
-	// implementation when there's a non-nil Resolver with a
-	// non-nil Dialer. This is a sign that they the code is trying
-	// to use their DNS-speaking net.Conn (such as an in-memory
-	// DNS cache) and they don't want to actually hit the network.
-	// Once we add support for looking the default DNS servers
-	// from plan9, though, then we can relax this.
-	return order, conf, order != hostLookupCgo && r != nil && r.Dial != nil
-}
-
 func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
-	if r.preferGoOverPlan9() {
-		return r.goLookupIP(ctx, network, host)
+	if order, conf := systemConf().hostLookupOrder(r, host); order != hostLookupCgo {
+		return r.goLookupIP(ctx, network, host, order, conf)
 	}
+
 	lits, err := r.lookupHost(ctx, host)
 	if err != nil {
 		return
@@ -223,24 +217,36 @@
 	return
 }
 
-func (*Resolver) lookupPort(ctx context.Context, network, service string) (port int, err error) {
+func (r *Resolver) lookupPort(ctx context.Context, network, service string) (port int, err error) {
 	switch network {
-	case "tcp4", "tcp6":
-		network = "tcp"
-	case "udp4", "udp6":
-		network = "udp"
+	case "ip": // no hints
+		if p, err := r.lookupPortWithNetwork(ctx, "tcp", "ip", service); err == nil {
+			return p, nil
+		}
+		return r.lookupPortWithNetwork(ctx, "udp", "ip", service)
+	case "tcp", "tcp4", "tcp6":
+		return r.lookupPortWithNetwork(ctx, "tcp", "tcp", service)
+	case "udp", "udp4", "udp6":
+		return r.lookupPortWithNetwork(ctx, "udp", "udp", service)
+	default:
+		return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}
 	}
+}
+
+func (*Resolver) lookupPortWithNetwork(ctx context.Context, network, errNetwork, service string) (port int, err error) {
 	lines, err := queryCS(ctx, network, "127.0.0.1", toLower(service))
 	if err != nil {
+		if stringsHasSuffix(err.Error(), "can't translate service") {
+			return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true}
+		}
 		return
 	}
-	unknownPortError := &AddrError{Err: "unknown port", Addr: network + "/" + service}
 	if len(lines) == 0 {
-		return 0, unknownPortError
+		return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true}
 	}
 	f := getFields(lines[0])
 	if len(f) < 2 {
-		return 0, unknownPortError
+		return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true}
 	}
 	s := f[1]
 	if i := bytealg.IndexByteString(s, '!'); i >= 0 {
@@ -249,21 +255,20 @@
 	if n, _, ok := dtoi(s); ok {
 		return n, nil
 	}
-	return 0, unknownPortError
+	return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true}
 }
 
 func (r *Resolver) lookupCNAME(ctx context.Context, name string) (cname string, err error) {
-	if order, conf, preferGo := r.preferGoOverPlan9WithOrderAndConf(); preferGo {
+	if order, conf := systemConf().hostLookupOrder(r, name); order != hostLookupCgo {
 		return r.goLookupCNAME(ctx, name, order, conf)
 	}
 
 	lines, err := queryDNS(ctx, name, "cname")
 	if err != nil {
 		if stringsHasSuffix(err.Error(), "dns failure") || stringsHasSuffix(err.Error(), "resource does not exist; negrcode 0") {
-			cname = name + "."
-			err = nil
+			return absDomainName(name), nil
 		}
-		return
+		return "", handlePlan9DNSError(err, cname)
 	}
 	if len(lines) > 0 {
 		if f := getFields(lines[0]); len(f) >= 3 {
@@ -274,7 +279,7 @@
 }
 
 func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*SRV, err error) {
-	if r.preferGoOverPlan9() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupSRV(ctx, service, proto, name)
 	}
 	var target string
@@ -285,7 +290,7 @@
 	}
 	lines, err := queryDNS(ctx, target, "srv")
 	if err != nil {
-		return
+		return "", nil, handlePlan9DNSError(err, name)
 	}
 	for _, line := range lines {
 		f := getFields(line)
@@ -306,12 +311,12 @@
 }
 
 func (r *Resolver) lookupMX(ctx context.Context, name string) (mx []*MX, err error) {
-	if r.preferGoOverPlan9() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupMX(ctx, name)
 	}
 	lines, err := queryDNS(ctx, name, "mx")
 	if err != nil {
-		return
+		return nil, handlePlan9DNSError(err, name)
 	}
 	for _, line := range lines {
 		f := getFields(line)
@@ -327,12 +332,12 @@
 }
 
 func (r *Resolver) lookupNS(ctx context.Context, name string) (ns []*NS, err error) {
-	if r.preferGoOverPlan9() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupNS(ctx, name)
 	}
 	lines, err := queryDNS(ctx, name, "ns")
 	if err != nil {
-		return
+		return nil, handlePlan9DNSError(err, name)
 	}
 	for _, line := range lines {
 		f := getFields(line)
@@ -345,12 +350,12 @@
 }
 
 func (r *Resolver) lookupTXT(ctx context.Context, name string) (txt []string, err error) {
-	if r.preferGoOverPlan9() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupTXT(ctx, name)
 	}
 	lines, err := queryDNS(ctx, name, "txt")
 	if err != nil {
-		return
+		return nil, handlePlan9DNSError(err, name)
 	}
 	for _, line := range lines {
 		if i := bytealg.IndexByteString(line, '\t'); i >= 0 {
@@ -361,7 +366,7 @@
 }
 
 func (r *Resolver) lookupAddr(ctx context.Context, addr string) (name []string, err error) {
-	if order, conf, preferGo := r.preferGoOverPlan9WithOrderAndConf(); preferGo {
+	if order, conf := systemConf().addrLookupOrder(r, addr); order != hostLookupCgo {
 		return r.goLookupPTR(ctx, addr, order, conf)
 	}
 	arpa, err := reverseaddr(addr)
@@ -370,7 +375,7 @@
 	}
 	lines, err := queryDNS(ctx, arpa, "ptr")
 	if err != nil {
-		return
+		return nil, handlePlan9DNSError(err, addr)
 	}
 	for _, line := range lines {
 		f := getFields(line)
diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go
index 0689c19..57ac9a9 100644
--- a/src/net/lookup_test.go
+++ b/src/net/lookup_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -1462,3 +1460,189 @@
 		return
 	}
 }
+
+func TestLookupPortNotFound(t *testing.T) {
+	allResolvers(t, func(t *testing.T) {
+		_, err := LookupPort("udp", "_-unknown-service-")
+		var dnsErr *DNSError
+		if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound {
+			t.Fatalf("unexpected error: %v", err)
+		}
+	})
+}
+
+// submissions service is only available through a tcp network, see:
+// https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=submissions
+var tcpOnlyService = func() string {
+	// plan9 does not have submissions service defined in the service database.
+	if runtime.GOOS == "plan9" {
+		return "https"
+	}
+	return "submissions"
+}()
+
+func TestLookupPortDifferentNetwork(t *testing.T) {
+	allResolvers(t, func(t *testing.T) {
+		_, err := LookupPort("udp", tcpOnlyService)
+		var dnsErr *DNSError
+		if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound {
+			t.Fatalf("unexpected error: %v", err)
+		}
+	})
+}
+
+func TestLookupPortEmptyNetworkString(t *testing.T) {
+	allResolvers(t, func(t *testing.T) {
+		_, err := LookupPort("", tcpOnlyService)
+		if err != nil {
+			t.Fatalf("unexpected error: %v", err)
+		}
+	})
+}
+
+func TestLookupPortIPNetworkString(t *testing.T) {
+	allResolvers(t, func(t *testing.T) {
+		_, err := LookupPort("ip", tcpOnlyService)
+		if err != nil {
+			t.Fatalf("unexpected error: %v", err)
+		}
+	})
+}
+
+func allResolvers(t *testing.T, f func(t *testing.T)) {
+	t.Run("default resolver", f)
+	t.Run("forced go resolver", func(t *testing.T) {
+		if fixup := forceGoDNS(); fixup != nil {
+			defer fixup()
+			f(t)
+		}
+	})
+	t.Run("forced cgo resolver", func(t *testing.T) {
+		if fixup := forceCgoDNS(); fixup != nil {
+			defer fixup()
+			f(t)
+		}
+	})
+}
+
+func TestLookupNoSuchHost(t *testing.T) {
+	mustHaveExternalNetwork(t)
+
+	const testNXDOMAIN = "invalid.invalid."
+	const testNODATA = "_ldap._tcp.google.com."
+
+	tests := []struct {
+		name  string
+		query func() error
+	}{
+		{
+			name: "LookupCNAME NXDOMAIN",
+			query: func() error {
+				_, err := LookupCNAME(testNXDOMAIN)
+				return err
+			},
+		},
+		{
+			name: "LookupHost NXDOMAIN",
+			query: func() error {
+				_, err := LookupHost(testNXDOMAIN)
+				return err
+			},
+		},
+		{
+			name: "LookupHost NODATA",
+			query: func() error {
+				_, err := LookupHost(testNODATA)
+				return err
+			},
+		},
+		{
+			name: "LookupMX NXDOMAIN",
+			query: func() error {
+				_, err := LookupMX(testNXDOMAIN)
+				return err
+			},
+		},
+		{
+			name: "LookupMX NODATA",
+			query: func() error {
+				_, err := LookupMX(testNODATA)
+				return err
+			},
+		},
+		{
+			name: "LookupNS NXDOMAIN",
+			query: func() error {
+				_, err := LookupNS(testNXDOMAIN)
+				return err
+			},
+		},
+		{
+			name: "LookupNS NODATA",
+			query: func() error {
+				_, err := LookupNS(testNODATA)
+				return err
+			},
+		},
+		{
+			name: "LookupSRV NXDOMAIN",
+			query: func() error {
+				_, _, err := LookupSRV("unknown", "tcp", testNXDOMAIN)
+				return err
+			},
+		},
+		{
+			name: "LookupTXT NXDOMAIN",
+			query: func() error {
+				_, err := LookupTXT(testNXDOMAIN)
+				return err
+			},
+		},
+		{
+			name: "LookupTXT NODATA",
+			query: func() error {
+				_, err := LookupTXT(testNODATA)
+				return err
+			},
+		},
+	}
+
+	for _, v := range tests {
+		t.Run(v.name, func(t *testing.T) {
+			allResolvers(t, func(t *testing.T) {
+				attempts := 0
+				for {
+					err := v.query()
+					if err == nil {
+						t.Errorf("unexpected success")
+						return
+					}
+					if dnsErr, ok := err.(*DNSError); ok {
+						succeeded := true
+						if !dnsErr.IsNotFound {
+							succeeded = false
+							t.Log("IsNotFound is set to false")
+						}
+						if dnsErr.Err != errNoSuchHost.Error() {
+							succeeded = false
+							t.Logf("error message is not equal to: %v", errNoSuchHost.Error())
+						}
+						if succeeded {
+							return
+						}
+					}
+					testenv.SkipFlakyNet(t)
+					if attempts < len(backoffDuration) {
+						dur := backoffDuration[attempts]
+						t.Logf("backoff %v after failure %v\n", dur, err)
+						time.Sleep(dur)
+						attempts++
+						continue
+					}
+					t.Errorf("unexpected error: %v", err)
+					return
+				}
+			})
+		})
+	}
+}
diff --git a/src/net/lookup_unix.go b/src/net/lookup_unix.go
index 56ae11e..382a2d4 100644
--- a/src/net/lookup_unix.go
+++ b/src/net/lookup_unix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || wasip1
+//go:build unix || js || wasip1
 
 package net
 
@@ -10,7 +10,6 @@
 	"context"
 	"internal/bytealg"
 	"sync"
-	"syscall"
 )
 
 var onceReadProtocols sync.Once
@@ -62,9 +61,6 @@
 }
 
 func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
-	if r.preferGo() {
-		return r.goLookupIP(ctx, network, host)
-	}
 	order, conf := systemConf().hostLookupOrder(r, host)
 	if order == hostLookupCgo {
 		return cgoLookupIP(ctx, network, host)
@@ -123,27 +119,3 @@
 	}
 	return r.goLookupPTR(ctx, addr, order, conf)
 }
-
-// concurrentThreadsLimit returns the number of threads we permit to
-// run concurrently doing DNS lookups via cgo. A DNS lookup may use a
-// file descriptor so we limit this to less than the number of
-// permitted open files. On some systems, notably Darwin, if
-// getaddrinfo is unable to open a file descriptor it simply returns
-// EAI_NONAME rather than a useful error. Limiting the number of
-// concurrent getaddrinfo calls to less than the permitted number of
-// file descriptors makes that error less likely. We don't bother to
-// apply the same limit to DNS lookups run directly from Go, because
-// there we will return a meaningful "too many open files" error.
-func concurrentThreadsLimit() int {
-	var rlim syscall.Rlimit
-	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil {
-		return 500
-	}
-	r := rlim.Cur
-	if r > 500 {
-		r = 500
-	} else if r > 30 {
-		r -= 30
-	}
-	return int(r)
-}
diff --git a/src/net/lookup_windows.go b/src/net/lookup_windows.go
index 33d5ac5..3048f32 100644
--- a/src/net/lookup_windows.go
+++ b/src/net/lookup_windows.go
@@ -20,13 +20,17 @@
 const cgoAvailable = true
 
 const (
+	_DNS_ERROR_RCODE_NAME_ERROR = syscall.Errno(9003)
+	_DNS_INFO_NO_RECORDS        = syscall.Errno(9501)
+
 	_WSAHOST_NOT_FOUND = syscall.Errno(11001)
 	_WSATRY_AGAIN      = syscall.Errno(11002)
+	_WSATYPE_NOT_FOUND = syscall.Errno(10109)
 )
 
 func winError(call string, err error) error {
 	switch err {
-	case _WSAHOST_NOT_FOUND:
+	case _WSAHOST_NOT_FOUND, _DNS_ERROR_RCODE_NAME_ERROR, _DNS_INFO_NO_RECORDS:
 		return errNoSuchHost
 	}
 	return os.NewSyscallError(call, err)
@@ -91,19 +95,11 @@
 	return addrs, nil
 }
 
-// preferGoOverWindows reports whether the resolver should use the
-// pure Go implementation rather than making win32 calls to ask the
-// kernel for its answer.
-func (r *Resolver) preferGoOverWindows() bool {
-	conf := systemConf()
-	order, _ := conf.hostLookupOrder(r, "") // name is unused
-	return order != hostLookupCgo
-}
-
 func (r *Resolver) lookupIP(ctx context.Context, network, name string) ([]IPAddr, error) {
-	if r.preferGoOverWindows() {
-		return r.goLookupIP(ctx, network, name)
+	if order, conf := systemConf().hostLookupOrder(r, name); order != hostLookupCgo {
+		return r.goLookupIP(ctx, network, name, order, conf)
 	}
+
 	// TODO(bradfitz,brainman): use ctx more. See TODO below.
 
 	var family int32 = syscall.AF_UNSPEC
@@ -200,37 +196,51 @@
 }
 
 func (r *Resolver) lookupPort(ctx context.Context, network, service string) (int, error) {
-	if r.preferGoOverWindows() {
+	if systemConf().mustUseGoResolver(r) {
 		return lookupPortMap(network, service)
 	}
 
 	// TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this.
 	acquireThread()
 	defer releaseThread()
-	var stype int32
+
+	var hints syscall.AddrinfoW
+
 	switch network {
-	case "tcp4", "tcp6":
-		stype = syscall.SOCK_STREAM
-	case "udp4", "udp6":
-		stype = syscall.SOCK_DGRAM
+	case "ip": // no hints
+	case "tcp", "tcp4", "tcp6":
+		hints.Socktype = syscall.SOCK_STREAM
+		hints.Protocol = syscall.IPPROTO_TCP
+	case "udp", "udp4", "udp6":
+		hints.Socktype = syscall.SOCK_DGRAM
+		hints.Protocol = syscall.IPPROTO_UDP
+	default:
+		return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}
 	}
-	hints := syscall.AddrinfoW{
-		Family:   syscall.AF_UNSPEC,
-		Socktype: stype,
-		Protocol: syscall.IPPROTO_IP,
+
+	switch ipVersion(network) {
+	case '4':
+		hints.Family = syscall.AF_INET
+	case '6':
+		hints.Family = syscall.AF_INET6
 	}
+
 	var result *syscall.AddrinfoW
 	e := syscall.GetAddrInfoW(nil, syscall.StringToUTF16Ptr(service), &hints, &result)
 	if e != nil {
 		if port, err := lookupPortMap(network, service); err == nil {
 			return port, nil
 		}
-		err := winError("getaddrinfow", e)
-		dnsError := &DNSError{Err: err.Error(), Name: network + "/" + service}
-		if err == errNoSuchHost {
-			dnsError.IsNotFound = true
+
+		// The _WSATYPE_NOT_FOUND error is returned by GetAddrInfoW
+		// when the service name is unknown. We are also checking
+		// for _WSAHOST_NOT_FOUND here to match the cgo (unix) version
+		// cgo_unix.go (cgoLookupServicePort).
+		if e == _WSATYPE_NOT_FOUND || e == _WSAHOST_NOT_FOUND {
+			return 0, &DNSError{Err: "unknown port", Name: network + "/" + service, IsNotFound: true}
 		}
-		return 0, dnsError
+		err := os.NewSyscallError("getaddrinfow", e)
+		return 0, &DNSError{Err: err.Error(), Name: network + "/" + service}
 	}
 	defer syscall.FreeAddrInfoW(result)
 	if result == nil {
@@ -249,7 +259,7 @@
 }
 
 func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error) {
-	if order, conf := systemConf().hostLookupOrder(r, ""); order != hostLookupCgo {
+	if order, conf := systemConf().hostLookupOrder(r, name); order != hostLookupCgo {
 		return r.goLookupCNAME(ctx, name, order, conf)
 	}
 
@@ -264,7 +274,8 @@
 		return absDomainName(name), nil
 	}
 	if e != nil {
-		return "", &DNSError{Err: winError("dnsquery", e).Error(), Name: name}
+		err := winError("dnsquery", e)
+		return "", &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost}
 	}
 	defer syscall.DnsRecordListFree(rec, 1)
 
@@ -274,7 +285,7 @@
 }
 
 func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
-	if r.preferGoOverWindows() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupSRV(ctx, service, proto, name)
 	}
 	// TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this.
@@ -289,7 +300,8 @@
 	var rec *syscall.DNSRecord
 	e := syscall.DnsQuery(target, syscall.DNS_TYPE_SRV, 0, nil, &rec, nil)
 	if e != nil {
-		return "", nil, &DNSError{Err: winError("dnsquery", e).Error(), Name: target}
+		err := winError("dnsquery", e)
+		return "", nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost}
 	}
 	defer syscall.DnsRecordListFree(rec, 1)
 
@@ -303,7 +315,7 @@
 }
 
 func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
-	if r.preferGoOverWindows() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupMX(ctx, name)
 	}
 	// TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this.
@@ -312,7 +324,8 @@
 	var rec *syscall.DNSRecord
 	e := syscall.DnsQuery(name, syscall.DNS_TYPE_MX, 0, nil, &rec, nil)
 	if e != nil {
-		return nil, &DNSError{Err: winError("dnsquery", e).Error(), Name: name}
+		err := winError("dnsquery", e)
+		return nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost}
 	}
 	defer syscall.DnsRecordListFree(rec, 1)
 
@@ -326,7 +339,7 @@
 }
 
 func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) {
-	if r.preferGoOverWindows() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupNS(ctx, name)
 	}
 	// TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this.
@@ -335,7 +348,8 @@
 	var rec *syscall.DNSRecord
 	e := syscall.DnsQuery(name, syscall.DNS_TYPE_NS, 0, nil, &rec, nil)
 	if e != nil {
-		return nil, &DNSError{Err: winError("dnsquery", e).Error(), Name: name}
+		err := winError("dnsquery", e)
+		return nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost}
 	}
 	defer syscall.DnsRecordListFree(rec, 1)
 
@@ -348,7 +362,7 @@
 }
 
 func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) {
-	if r.preferGoOverWindows() {
+	if systemConf().mustUseGoResolver(r) {
 		return r.goLookupTXT(ctx, name)
 	}
 	// TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this.
@@ -357,7 +371,8 @@
 	var rec *syscall.DNSRecord
 	e := syscall.DnsQuery(name, syscall.DNS_TYPE_TEXT, 0, nil, &rec, nil)
 	if e != nil {
-		return nil, &DNSError{Err: winError("dnsquery", e).Error(), Name: name}
+		err := winError("dnsquery", e)
+		return nil, &DNSError{Err: err.Error(), Name: name, IsNotFound: err == errNoSuchHost}
 	}
 	defer syscall.DnsRecordListFree(rec, 1)
 
@@ -374,7 +389,7 @@
 }
 
 func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error) {
-	if order, conf := systemConf().hostLookupOrder(r, ""); order != hostLookupCgo {
+	if order, conf := systemConf().addrLookupOrder(r, addr); order != hostLookupCgo {
 		return r.goLookupPTR(ctx, addr, order, conf)
 	}
 
@@ -388,7 +403,8 @@
 	var rec *syscall.DNSRecord
 	e := syscall.DnsQuery(arpa, syscall.DNS_TYPE_PTR, 0, nil, &rec, nil)
 	if e != nil {
-		return nil, &DNSError{Err: winError("dnsquery", e).Error(), Name: addr}
+		err := winError("dnsquery", e)
+		return nil, &DNSError{Err: err.Error(), Name: addr, IsNotFound: err == errNoSuchHost}
 	}
 	defer syscall.DnsRecordListFree(rec, 1)
 
diff --git a/src/net/mail/message.go b/src/net/mail/message.go
index af516fc..fc2a9e4 100644
--- a/src/net/mail/message.go
+++ b/src/net/mail/message.go
@@ -280,7 +280,7 @@
 	// Add quotes if needed
 	quoteLocal := false
 	for i, r := range local {
-		if isAtext(r, false, false) {
+		if isAtext(r, false) {
 			continue
 		}
 		if r == '.' {
@@ -444,7 +444,7 @@
 	if !p.consume('<') {
 		atext := true
 		for _, r := range displayName {
-			if !isAtext(r, true, false) {
+			if !isAtext(r, true) {
 				atext = false
 				break
 			}
@@ -479,7 +479,9 @@
 	// handle empty group.
 	p.skipSpace()
 	if p.consume(';') {
-		p.skipCFWS()
+		if !p.skipCFWS() {
+			return nil, errors.New("mail: misformatted parenthetical comment")
+		}
 		return group, nil
 	}
 
@@ -496,7 +498,9 @@
 			return nil, errors.New("mail: misformatted parenthetical comment")
 		}
 		if p.consume(';') {
-			p.skipCFWS()
+			if !p.skipCFWS() {
+				return nil, errors.New("mail: misformatted parenthetical comment")
+			}
 			break
 		}
 		if !p.consume(',') {
@@ -566,6 +570,12 @@
 	var words []string
 	var isPrevEncoded bool
 	for {
+		// obs-phrase allows CFWS after one word
+		if len(words) > 0 {
+			if !p.skipCFWS() {
+				return "", errors.New("mail: misformatted parenthetical comment")
+			}
+		}
 		// word = atom / quoted-string
 		var word string
 		p.skipSpace()
@@ -661,7 +671,6 @@
 // If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
 // If permissive is true, consumeAtom will not fail on:
 // - leading/trailing/double dots in the atom (see golang.org/issue/4938)
-// - special characters (RFC 5322 3.2.3) except '<', '>', ':' and '"' (see golang.org/issue/21018)
 func (p *addrParser) consumeAtom(dot bool, permissive bool) (atom string, err error) {
 	i := 0
 
@@ -672,7 +681,7 @@
 		case size == 1 && r == utf8.RuneError:
 			return "", fmt.Errorf("mail: invalid utf-8 in address: %q", p.s)
 
-		case size == 0 || !isAtext(r, dot, permissive):
+		case size == 0 || !isAtext(r, dot):
 			break Loop
 
 		default:
@@ -850,18 +859,13 @@
 
 // isAtext reports whether r is an RFC 5322 atext character.
 // If dot is true, period is included.
-// If permissive is true, RFC 5322 3.2.3 specials is included,
-// except '<', '>', ':' and '"'.
-func isAtext(r rune, dot, permissive bool) bool {
+func isAtext(r rune, dot bool) bool {
 	switch r {
 	case '.':
 		return dot
 
 	// RFC 5322 3.2.3. specials
-	case '(', ')', '[', ']', ';', '@', '\\', ',':
-		return permissive
-
-	case '<', '>', '"', ':':
+	case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '"': // RFC 5322 3.2.3. specials
 		return false
 	}
 	return isVchar(r)
diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go
index 1e1bb40..1f2f62a 100644
--- a/src/net/mail/message_test.go
+++ b/src/net/mail/message_test.go
@@ -385,8 +385,11 @@
 		13: {"group not closed: null@example.com", "expected comma"},
 		14: {"group: first@example.com, second@example.com;", "group with multiple addresses"},
 		15: {"john.doe", "missing '@' or angle-addr"},
-		16: {"john.doe@", "no angle-addr"},
+		16: {"john.doe@", "missing '@' or angle-addr"},
 		17: {"John Doe@foo.bar", "no angle-addr"},
+		18: {" group: null@example.com; (asd", "misformatted parenthetical comment"},
+		19: {" group: ; (asd", "misformatted parenthetical comment"},
+		20: {`(John) Doe <jdoe@machine.example>`, "missing word in phrase:"},
 	}
 
 	for i, tc := range mustErrTestCases {
@@ -436,6 +439,15 @@
 				Address: "john.q.public@example.com",
 			}},
 		},
+		// Comment in display name
+		{
+			`John (middle) Doe <jdoe@machine.example>`,
+			[]*Address{{
+				Name:    "John Doe",
+				Address: "jdoe@machine.example",
+			}},
+		},
+		// Display name is quoted string, so comment is not a comment
 		{
 			`"John (middle) Doe" <jdoe@machine.example>`,
 			[]*Address{{
@@ -444,20 +456,6 @@
 			}},
 		},
 		{
-			`John (middle) Doe <jdoe@machine.example>`,
-			[]*Address{{
-				Name:    "John (middle) Doe",
-				Address: "jdoe@machine.example",
-			}},
-		},
-		{
-			`John !@M@! Doe <jdoe@machine.example>`,
-			[]*Address{{
-				Name:    "John !@M@! Doe",
-				Address: "jdoe@machine.example",
-			}},
-		},
-		{
 			`"John <middle> Doe" <jdoe@machine.example>`,
 			[]*Address{{
 				Name:    "John <middle> Doe",
@@ -788,6 +786,26 @@
 				},
 			},
 		},
+		// Comment in group display name
+		{
+			`group (comment:): a@example.com, b@example.com;`,
+			[]*Address{
+				{
+					Address: "a@example.com",
+				},
+				{
+					Address: "b@example.com",
+				},
+			},
+		},
+		{
+			`x(:"):"@a.example;("@b.example;`,
+			[]*Address{
+				{
+					Address: `@a.example;(@b.example`,
+				},
+			},
+		},
 	}
 	for _, test := range tests {
 		if len(test.exp) == 1 {
diff --git a/src/net/main_conf_test.go b/src/net/main_conf_test.go
index 28a1cb8..307ff5d 100644
--- a/src/net/main_conf_test.go
+++ b/src/net/main_conf_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !plan9 && !wasip1
+//go:build !plan9
 
 package net
 
diff --git a/src/net/main_noconf_test.go b/src/net/main_noconf_test.go
index 077a36e..cdd7c54 100644
--- a/src/net/main_noconf_test.go
+++ b/src/net/main_noconf_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (js && wasm) || plan9 || wasip1
+//go:build plan9
 
 package net
 
diff --git a/src/net/main_posix_test.go b/src/net/main_posix_test.go
index a7942ee..24a2a55 100644
--- a/src/net/main_posix_test.go
+++ b/src/net/main_posix_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !plan9 && !wasip1
+//go:build !plan9
 
 package net
 
diff --git a/src/net/main_test.go b/src/net/main_test.go
index 9fd5c88..7dc1e3e 100644
--- a/src/net/main_test.go
+++ b/src/net/main_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -16,6 +14,7 @@
 	"strings"
 	"sync"
 	"testing"
+	"time"
 )
 
 var (
@@ -61,6 +60,20 @@
 	os.Exit(st)
 }
 
+// mustSetDeadline calls the bound method m to set a deadline on a Conn.
+// If the call fails, mustSetDeadline skips t if the current GOOS is believed
+// not to support deadlines, or fails the test otherwise.
+func mustSetDeadline(t testing.TB, m func(time.Time) error, d time.Duration) {
+	err := m(time.Now().Add(d))
+	if err != nil {
+		t.Helper()
+		if runtime.GOOS == "plan9" {
+			t.Skipf("skipping: %s does not support deadlines", runtime.GOOS)
+		}
+		t.Fatal(err)
+	}
+}
+
 type ipv6LinkLocalUnicastTest struct {
 	network, address string
 	nameLookup       bool
diff --git a/src/net/main_wasm_test.go b/src/net/main_wasm_test.go
new file mode 100644
index 0000000..b8196bb
--- /dev/null
+++ b/src/net/main_wasm_test.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build wasip1 || js
+
+package net
+
+func installTestHooks() {}
+
+func uninstallTestHooks() {}
+
+func forceCloseSockets() {}
diff --git a/src/net/main_windows_test.go b/src/net/main_windows_test.go
index 07f21b7..bc024c0 100644
--- a/src/net/main_windows_test.go
+++ b/src/net/main_windows_test.go
@@ -8,7 +8,6 @@
 
 var (
 	// Placeholders for saving original socket system calls.
-	origSocket      = socketFunc
 	origWSASocket   = wsaSocketFunc
 	origClosesocket = poll.CloseFunc
 	origConnect     = connectFunc
@@ -18,7 +17,6 @@
 )
 
 func installTestHooks() {
-	socketFunc = sw.Socket
 	wsaSocketFunc = sw.WSASocket
 	poll.CloseFunc = sw.Closesocket
 	connectFunc = sw.Connect
@@ -28,7 +26,6 @@
 }
 
 func uninstallTestHooks() {
-	socketFunc = origSocket
 	wsaSocketFunc = origWSASocket
 	poll.CloseFunc = origClosesocket
 	connectFunc = origConnect
diff --git a/src/net/mockserver_test.go b/src/net/mockserver_test.go
index f86dd66..46b2a57 100644
--- a/src/net/mockserver_test.go
+++ b/src/net/mockserver_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -339,6 +337,7 @@
 		return c
 	}
 
+	t.Helper()
 	switch network {
 	case "udp":
 		if supportsIPv4() {
@@ -359,7 +358,6 @@
 		return listenPacket(network, testUnixAddr(t))
 	}
 
-	t.Helper()
 	t.Fatalf("%s is not supported", network)
 	return nil
 }
diff --git a/src/net/net.go b/src/net/net.go
index 5cfc25f..c434c96 100644
--- a/src/net/net.go
+++ b/src/net/net.go
@@ -8,8 +8,8 @@
 
 Although the package provides access to low-level networking
 primitives, most clients will need only the basic interface provided
-by the Dial, Listen, and Accept functions and the associated
-Conn and Listener interfaces. The crypto/tls package uses
+by the [Dial], [Listen], and Accept functions and the associated
+[Conn] and [Listener] interfaces. The crypto/tls package uses
 the same interfaces and similar Dial and Listen functions.
 
 The Dial function connects to a server:
@@ -39,7 +39,7 @@
 # Name Resolution
 
 The method for resolving domain names, whether indirectly with functions like Dial
-or directly with functions like LookupHost and LookupAddr, varies by operating system.
+or directly with functions like [LookupHost] and [LookupAddr], varies by operating system.
 
 On Unix systems, the resolver has two options for resolving names.
 It can use a pure Go resolver that sends DNS requests directly to the servers
@@ -95,8 +95,8 @@
 
 // Addr represents a network end point address.
 //
-// The two methods Network and String conventionally return strings
-// that can be passed as the arguments to Dial, but the exact form
+// The two methods [Addr.Network] and [Addr.String] conventionally return strings
+// that can be passed as the arguments to [Dial], but the exact form
 // and meaning of the strings is up to the implementation.
 type Addr interface {
 	Network() string // name of the network (for example, "tcp", "udp")
@@ -284,7 +284,7 @@
 	return nil
 }
 
-// File returns a copy of the underlying os.File.
+// File returns a copy of the underlying [os.File].
 // It is the caller's responsibility to close f when finished.
 // Closing c does not affect f, and closing f does not affect c.
 //
@@ -624,7 +624,11 @@
 	Server      string // server used
 	IsTimeout   bool   // if true, timed out; not all timeouts set this
 	IsTemporary bool   // if true, error is temporary; not all errors set this
-	IsNotFound  bool   // if true, host could not be found
+
+	// IsNotFound is set to true when the requested name does not
+	// contain any records of the requested type (data not found),
+	// or the name itself was not found (NXDOMAIN).
+	IsNotFound bool
 }
 
 func (e *DNSError) Error() string {
@@ -641,12 +645,12 @@
 
 // Timeout reports whether the DNS lookup is known to have timed out.
 // This is not always known; a DNS lookup may fail due to a timeout
-// and return a DNSError for which Timeout returns false.
+// and return a [DNSError] for which Timeout returns false.
 func (e *DNSError) Timeout() bool { return e.IsTimeout }
 
 // Temporary reports whether the DNS error is known to be temporary.
 // This is not always known; a DNS lookup may fail due to a temporary
-// error and return a DNSError for which Temporary returns false.
+// error and return a [DNSError] for which Temporary returns false.
 func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary }
 
 // errClosed exists just so that the docs for ErrClosed don't mention
@@ -660,15 +664,53 @@
 // errors.Is(err, net.ErrClosed).
 var ErrClosed error = errClosed
 
-type writerOnly struct {
-	io.Writer
+// noReadFrom can be embedded alongside another type to
+// hide the ReadFrom method of that other type.
+type noReadFrom struct{}
+
+// ReadFrom hides another ReadFrom method.
+// It should never be called.
+func (noReadFrom) ReadFrom(io.Reader) (int64, error) {
+	panic("can't happen")
+}
+
+// tcpConnWithoutReadFrom implements all the methods of *TCPConn other
+// than ReadFrom. This is used to permit ReadFrom to call io.Copy
+// without leading to a recursive call to ReadFrom.
+type tcpConnWithoutReadFrom struct {
+	noReadFrom
+	*TCPConn
 }
 
 // Fallback implementation of io.ReaderFrom's ReadFrom, when sendfile isn't
 // applicable.
-func genericReadFrom(w io.Writer, r io.Reader) (n int64, err error) {
+func genericReadFrom(c *TCPConn, r io.Reader) (n int64, err error) {
 	// Use wrapper to hide existing r.ReadFrom from io.Copy.
-	return io.Copy(writerOnly{w}, r)
+	return io.Copy(tcpConnWithoutReadFrom{TCPConn: c}, r)
+}
+
+// noWriteTo can be embedded alongside another type to
+// hide the WriteTo method of that other type.
+type noWriteTo struct{}
+
+// WriteTo hides another WriteTo method.
+// It should never be called.
+func (noWriteTo) WriteTo(io.Writer) (int64, error) {
+	panic("can't happen")
+}
+
+// tcpConnWithoutWriteTo implements all the methods of *TCPConn other
+// than WriteTo. This is used to permit WriteTo to call io.Copy
+// without leading to a recursive call to WriteTo.
+type tcpConnWithoutWriteTo struct {
+	noWriteTo
+	*TCPConn
+}
+
+// Fallback implementation of io.WriterTo's WriteTo, when zero-copy isn't applicable.
+func genericWriteTo(c *TCPConn, w io.Writer) (n int64, err error) {
+	// Use wrapper to hide existing w.WriteTo from io.Copy.
+	return io.Copy(w, tcpConnWithoutWriteTo{TCPConn: c})
 }
 
 // Limit the number of concurrent cgo-using goroutines, because
@@ -714,7 +756,7 @@
 
 // WriteTo writes contents of the buffers to w.
 //
-// WriteTo implements io.WriterTo for Buffers.
+// WriteTo implements [io.WriterTo] for [Buffers].
 //
 // WriteTo modifies the slice v as well as v[i] for 0 <= i < len(v),
 // but does not modify v[i][j] for any i, j.
@@ -736,7 +778,7 @@
 
 // Read from the buffers.
 //
-// Read implements io.Reader for Buffers.
+// Read implements [io.Reader] for [Buffers].
 //
 // Read modifies the slice v as well as v[i] for 0 <= i < len(v),
 // but does not modify v[i][j] for any i, j.
diff --git a/src/net/net_fake.go b/src/net/net_fake.go
index 908767a..525ff32 100644
--- a/src/net/net_fake.go
+++ b/src/net/net_fake.go
@@ -2,405 +2,1169 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Fake networking for js/wasm and wasip1/wasm. It is intended to allow tests of other package to pass.
+// Fake networking for js/wasm and wasip1/wasm.
+// It is intended to allow tests of other package to pass.
 
-//go:build (js && wasm) || wasip1
+//go:build js || wasip1
 
 package net
 
 import (
 	"context"
+	"errors"
 	"io"
 	"os"
+	"runtime"
 	"sync"
+	"sync/atomic"
 	"syscall"
 	"time"
 )
 
-var listenersMu sync.Mutex
-var listeners = make(map[fakeNetAddr]*netFD)
+var (
+	sockets         sync.Map // fakeSockAddr → *netFD
+	fakeSocketIDs   sync.Map // fakeNetFD.id → *netFD
+	fakePorts       sync.Map // int (port #) → *netFD
+	nextPortCounter atomic.Int32
+)
 
-var portCounterMu sync.Mutex
-var portCounter = 0
+const defaultBuffer = 65535
 
-func nextPort() int {
-	portCounterMu.Lock()
-	defer portCounterMu.Unlock()
-	portCounter++
-	return portCounter
-}
-
-type fakeNetAddr struct {
-	network string
+type fakeSockAddr struct {
+	family  int
 	address string
 }
 
-type fakeNetFD struct {
-	listener fakeNetAddr
-	r        *bufferedPipe
-	w        *bufferedPipe
-	incoming chan *netFD
-	closedMu sync.Mutex
-	closed   bool
+func fakeAddr(sa sockaddr) fakeSockAddr {
+	return fakeSockAddr{
+		family:  sa.family(),
+		address: sa.String(),
+	}
 }
 
 // socket returns a network file descriptor that is ready for
-// asynchronous I/O using the network poller.
+// I/O using the fake network.
 func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) (*netFD, error) {
-	fd := &netFD{family: family, sotype: sotype, net: net}
-	if laddr != nil && raddr == nil {
-		return fakelistener(fd, laddr)
-	}
-	fd2 := &netFD{family: family, sotype: sotype, net: net}
-	return fakeconn(fd, fd2, laddr, raddr)
-}
-
-func fakeIPAndPort(ip IP, port int) (IP, int) {
-	if ip == nil {
-		ip = IPv4(127, 0, 0, 1)
-	}
-	if port == 0 {
-		port = nextPort()
-	}
-	return ip, port
-}
-
-func fakeTCPAddr(addr *TCPAddr) *TCPAddr {
-	var ip IP
-	var port int
-	var zone string
-	if addr != nil {
-		ip, port, zone = addr.IP, addr.Port, addr.Zone
-	}
-	ip, port = fakeIPAndPort(ip, port)
-	return &TCPAddr{IP: ip, Port: port, Zone: zone}
-}
-
-func fakeUDPAddr(addr *UDPAddr) *UDPAddr {
-	var ip IP
-	var port int
-	var zone string
-	if addr != nil {
-		ip, port, zone = addr.IP, addr.Port, addr.Zone
-	}
-	ip, port = fakeIPAndPort(ip, port)
-	return &UDPAddr{IP: ip, Port: port, Zone: zone}
-}
-
-func fakeUnixAddr(sotype int, addr *UnixAddr) *UnixAddr {
-	var net, name string
-	if addr != nil {
-		name = addr.Name
+	if raddr != nil && ctrlCtxFn != nil {
+		return nil, os.NewSyscallError("socket", syscall.ENOTSUP)
 	}
 	switch sotype {
-	case syscall.SOCK_DGRAM:
-		net = "unixgram"
-	case syscall.SOCK_SEQPACKET:
-		net = "unixpacket"
+	case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET, syscall.SOCK_DGRAM:
 	default:
-		net = "unix"
+		return nil, os.NewSyscallError("socket", syscall.ENOTSUP)
 	}
-	return &UnixAddr{Net: net, Name: name}
-}
 
-func fakelistener(fd *netFD, laddr sockaddr) (*netFD, error) {
-	switch l := laddr.(type) {
-	case *TCPAddr:
-		laddr = fakeTCPAddr(l)
-	case *UDPAddr:
-		laddr = fakeUDPAddr(l)
-	case *UnixAddr:
-		if l.Name == "" {
-			return nil, syscall.ENOENT
+	fd := &netFD{
+		family: family,
+		sotype: sotype,
+		net:    net,
+	}
+	fd.fakeNetFD = newFakeNetFD(fd)
+
+	if raddr == nil {
+		if err := fakeListen(fd, laddr); err != nil {
+			fd.Close()
+			return nil, err
 		}
-		laddr = fakeUnixAddr(fd.sotype, l)
-	default:
-		return nil, syscall.EOPNOTSUPP
+		return fd, nil
 	}
 
-	listener := fakeNetAddr{
-		network: laddr.Network(),
-		address: laddr.String(),
+	if err := fakeConnect(ctx, fd, laddr, raddr); err != nil {
+		fd.Close()
+		return nil, err
 	}
-
-	fd.fakeNetFD = &fakeNetFD{
-		listener: listener,
-		incoming: make(chan *netFD, 1024),
-	}
-
-	fd.laddr = laddr
-	listenersMu.Lock()
-	defer listenersMu.Unlock()
-	if _, exists := listeners[listener]; exists {
-		return nil, syscall.EADDRINUSE
-	}
-	listeners[listener] = fd
 	return fd, nil
 }
 
-func fakeconn(fd *netFD, fd2 *netFD, laddr, raddr sockaddr) (*netFD, error) {
-	switch r := raddr.(type) {
+func validateResolvedAddr(net string, family int, sa sockaddr) error {
+	validateIP := func(ip IP) error {
+		switch family {
+		case syscall.AF_INET:
+			if len(ip) != 4 {
+				return &AddrError{
+					Err:  "non-IPv4 address",
+					Addr: ip.String(),
+				}
+			}
+		case syscall.AF_INET6:
+			if len(ip) != 16 {
+				return &AddrError{
+					Err:  "non-IPv6 address",
+					Addr: ip.String(),
+				}
+			}
+		default:
+			panic("net: unexpected address family in validateResolvedAddr")
+		}
+		return nil
+	}
+
+	switch net {
+	case "tcp", "tcp4", "tcp6":
+		sa, ok := sa.(*TCPAddr)
+		if !ok {
+			return &AddrError{
+				Err:  "non-TCP address for " + net + " network",
+				Addr: sa.String(),
+			}
+		}
+		if err := validateIP(sa.IP); err != nil {
+			return err
+		}
+		if sa.Port <= 0 || sa.Port >= 1<<16 {
+			return &AddrError{
+				Err:  "port out of range",
+				Addr: sa.String(),
+			}
+		}
+		return nil
+
+	case "udp", "udp4", "udp6":
+		sa, ok := sa.(*UDPAddr)
+		if !ok {
+			return &AddrError{
+				Err:  "non-UDP address for " + net + " network",
+				Addr: sa.String(),
+			}
+		}
+		if err := validateIP(sa.IP); err != nil {
+			return err
+		}
+		if sa.Port <= 0 || sa.Port >= 1<<16 {
+			return &AddrError{
+				Err:  "port out of range",
+				Addr: sa.String(),
+			}
+		}
+		return nil
+
+	case "unix", "unixgram", "unixpacket":
+		sa, ok := sa.(*UnixAddr)
+		if !ok {
+			return &AddrError{
+				Err:  "non-Unix address for " + net + " network",
+				Addr: sa.String(),
+			}
+		}
+		if sa.Name != "" {
+			i := len(sa.Name) - 1
+			for i > 0 && !os.IsPathSeparator(sa.Name[i]) {
+				i--
+			}
+			for i > 0 && os.IsPathSeparator(sa.Name[i]) {
+				i--
+			}
+			if i <= 0 {
+				return &AddrError{
+					Err:  "unix socket name missing path component",
+					Addr: sa.Name,
+				}
+			}
+			if _, err := os.Stat(sa.Name[:i+1]); err != nil {
+				return &AddrError{
+					Err:  err.Error(),
+					Addr: sa.Name,
+				}
+			}
+		}
+		return nil
+
+	default:
+		return &AddrError{
+			Err:  syscall.EAFNOSUPPORT.Error(),
+			Addr: sa.String(),
+		}
+	}
+}
+
+func matchIPFamily(family int, addr sockaddr) sockaddr {
+	convertIP := func(ip IP) IP {
+		switch family {
+		case syscall.AF_INET:
+			return ip.To4()
+		case syscall.AF_INET6:
+			return ip.To16()
+		default:
+			return ip
+		}
+	}
+
+	switch addr := addr.(type) {
 	case *TCPAddr:
-		r = fakeTCPAddr(r)
-		raddr = r
-		laddr = fakeTCPAddr(laddr.(*TCPAddr))
+		ip := convertIP(addr.IP)
+		if ip == nil || len(ip) == len(addr.IP) {
+			return addr
+		}
+		return &TCPAddr{IP: ip, Port: addr.Port, Zone: addr.Zone}
 	case *UDPAddr:
-		r = fakeUDPAddr(r)
-		raddr = r
-		laddr = fakeUDPAddr(laddr.(*UDPAddr))
-	case *UnixAddr:
-		r = fakeUnixAddr(fd.sotype, r)
-		raddr = r
-		laddr = &UnixAddr{Net: r.Net, Name: r.Name}
+		ip := convertIP(addr.IP)
+		if ip == nil || len(ip) == len(addr.IP) {
+			return addr
+		}
+		return &UDPAddr{IP: ip, Port: addr.Port, Zone: addr.Zone}
 	default:
-		return nil, syscall.EAFNOSUPPORT
+		return addr
 	}
-	fd.laddr = laddr
-	fd.raddr = raddr
-
-	fd.fakeNetFD = &fakeNetFD{
-		r: newBufferedPipe(65536),
-		w: newBufferedPipe(65536),
-	}
-	fd2.fakeNetFD = &fakeNetFD{
-		r: fd.fakeNetFD.w,
-		w: fd.fakeNetFD.r,
-	}
-
-	fd2.laddr = fd.raddr
-	fd2.raddr = fd.laddr
-
-	listener := fakeNetAddr{
-		network: fd.raddr.Network(),
-		address: fd.raddr.String(),
-	}
-	listenersMu.Lock()
-	defer listenersMu.Unlock()
-	l, ok := listeners[listener]
-	if !ok {
-		return nil, syscall.ECONNREFUSED
-	}
-	l.incoming <- fd2
-	return fd, nil
 }
 
-func (fd *fakeNetFD) Read(p []byte) (n int, err error) {
-	return fd.r.Read(p)
+type fakeNetFD struct {
+	fd           *netFD
+	assignedPort int // 0 if no port has been assigned for this socket
+
+	queue         *packetQueue // incoming packets
+	peer          *netFD       // connected peer (for outgoing packets); nil for listeners and PacketConns
+	readDeadline  atomic.Pointer[deadlineTimer]
+	writeDeadline atomic.Pointer[deadlineTimer]
+
+	fakeAddr fakeSockAddr // cached fakeSockAddr equivalent of fd.laddr
+
+	// The incoming channels hold incoming connections that have not yet been accepted.
+	// All of these channels are 1-buffered.
+	incoming      chan []*netFD // holds the queue when it has >0 but <SOMAXCONN pending connections; closed when the Listener is closed
+	incomingFull  chan []*netFD // holds the queue when it has SOMAXCONN pending connections
+	incomingEmpty chan bool     // holds true when the incoming queue is empty
 }
 
-func (fd *fakeNetFD) Write(p []byte) (nn int, err error) {
-	return fd.w.Write(p)
+func newFakeNetFD(fd *netFD) *fakeNetFD {
+	ffd := &fakeNetFD{fd: fd}
+	ffd.readDeadline.Store(newDeadlineTimer(noDeadline))
+	ffd.writeDeadline.Store(newDeadlineTimer(noDeadline))
+	return ffd
 }
 
-func (fd *fakeNetFD) Close() error {
-	fd.closedMu.Lock()
-	if fd.closed {
-		fd.closedMu.Unlock()
-		return nil
-	}
-	fd.closed = true
-	fd.closedMu.Unlock()
+func (ffd *fakeNetFD) Read(p []byte) (n int, err error) {
+	n, _, err = ffd.queue.recvfrom(ffd.readDeadline.Load(), p, false, nil)
+	return n, err
+}
 
-	if fd.listener != (fakeNetAddr{}) {
-		listenersMu.Lock()
-		delete(listeners, fd.listener)
-		close(fd.incoming)
-		fd.listener = fakeNetAddr{}
-		listenersMu.Unlock()
-		return nil
+func (ffd *fakeNetFD) Write(p []byte) (nn int, err error) {
+	peer := ffd.peer
+	if peer == nil {
+		if ffd.fd.raddr == nil {
+			return 0, os.NewSyscallError("write", syscall.ENOTCONN)
+		}
+		peeri, _ := sockets.Load(fakeAddr(ffd.fd.raddr.(sockaddr)))
+		if peeri == nil {
+			return 0, os.NewSyscallError("write", syscall.ECONNRESET)
+		}
+		peer = peeri.(*netFD)
+		if peer.queue == nil {
+			return 0, os.NewSyscallError("write", syscall.ECONNRESET)
+		}
 	}
 
-	fd.r.Close()
-	fd.w.Close()
+	if peer.fakeNetFD == nil {
+		return 0, os.NewSyscallError("write", syscall.EINVAL)
+	}
+	return peer.queue.write(ffd.writeDeadline.Load(), p, ffd.fd.laddr.(sockaddr))
+}
+
+func (ffd *fakeNetFD) Close() (err error) {
+	if ffd.fakeAddr != (fakeSockAddr{}) {
+		sockets.CompareAndDelete(ffd.fakeAddr, ffd.fd)
+	}
+
+	if ffd.queue != nil {
+		if closeErr := ffd.queue.closeRead(); err == nil {
+			err = closeErr
+		}
+	}
+	if ffd.peer != nil {
+		if closeErr := ffd.peer.queue.closeWrite(); err == nil {
+			err = closeErr
+		}
+	}
+	ffd.readDeadline.Load().Reset(noDeadline)
+	ffd.writeDeadline.Load().Reset(noDeadline)
+
+	if ffd.incoming != nil {
+		var (
+			incoming []*netFD
+			ok       bool
+		)
+		select {
+		case _, ok = <-ffd.incomingEmpty:
+		case incoming, ok = <-ffd.incoming:
+		case incoming, ok = <-ffd.incomingFull:
+		}
+		if ok {
+			// Sends on ffd.incoming require a receive first.
+			// Since we successfully received, no other goroutine may
+			// send on it at this point, and we may safely close it.
+			close(ffd.incoming)
+
+			for _, c := range incoming {
+				c.Close()
+			}
+		}
+	}
+
+	if ffd.assignedPort != 0 {
+		fakePorts.CompareAndDelete(ffd.assignedPort, ffd.fd)
+	}
+
+	return err
+}
+
+func (ffd *fakeNetFD) closeRead() error {
+	return ffd.queue.closeRead()
+}
+
+func (ffd *fakeNetFD) closeWrite() error {
+	if ffd.peer == nil {
+		return os.NewSyscallError("closeWrite", syscall.ENOTCONN)
+	}
+	return ffd.peer.queue.closeWrite()
+}
+
+func (ffd *fakeNetFD) accept(laddr Addr) (*netFD, error) {
+	if ffd.incoming == nil {
+		return nil, os.NewSyscallError("accept", syscall.EINVAL)
+	}
+
+	var (
+		incoming []*netFD
+		ok       bool
+	)
+	select {
+	case <-ffd.readDeadline.Load().expired:
+		return nil, os.ErrDeadlineExceeded
+	case incoming, ok = <-ffd.incoming:
+		if !ok {
+			return nil, ErrClosed
+		}
+	case incoming, ok = <-ffd.incomingFull:
+	}
+
+	peer := incoming[0]
+	incoming = incoming[1:]
+	if len(incoming) == 0 {
+		ffd.incomingEmpty <- true
+	} else {
+		ffd.incoming <- incoming
+	}
+	return peer, nil
+}
+
+func (ffd *fakeNetFD) SetDeadline(t time.Time) error {
+	err1 := ffd.SetReadDeadline(t)
+	err2 := ffd.SetWriteDeadline(t)
+	if err1 != nil {
+		return err1
+	}
+	return err2
+}
+
+func (ffd *fakeNetFD) SetReadDeadline(t time.Time) error {
+	dt := ffd.readDeadline.Load()
+	if !dt.Reset(t) {
+		ffd.readDeadline.Store(newDeadlineTimer(t))
+	}
 	return nil
 }
 
-func (fd *fakeNetFD) closeRead() error {
-	fd.r.Close()
-	return nil
-}
-
-func (fd *fakeNetFD) closeWrite() error {
-	fd.w.Close()
-	return nil
-}
-
-func (fd *fakeNetFD) accept() (*netFD, error) {
-	c, ok := <-fd.incoming
-	if !ok {
-		return nil, syscall.EINVAL
+func (ffd *fakeNetFD) SetWriteDeadline(t time.Time) error {
+	dt := ffd.writeDeadline.Load()
+	if !dt.Reset(t) {
+		ffd.writeDeadline.Store(newDeadlineTimer(t))
 	}
-	return c, nil
-}
-
-func (fd *fakeNetFD) SetDeadline(t time.Time) error {
-	fd.r.SetReadDeadline(t)
-	fd.w.SetWriteDeadline(t)
 	return nil
 }
 
-func (fd *fakeNetFD) SetReadDeadline(t time.Time) error {
-	fd.r.SetReadDeadline(t)
-	return nil
-}
+const maxPacketSize = 65535
 
-func (fd *fakeNetFD) SetWriteDeadline(t time.Time) error {
-	fd.w.SetWriteDeadline(t)
-	return nil
-}
-
-func newBufferedPipe(softLimit int) *bufferedPipe {
-	p := &bufferedPipe{softLimit: softLimit}
-	p.rCond.L = &p.mu
-	p.wCond.L = &p.mu
-	return p
-}
-
-type bufferedPipe struct {
-	softLimit int
-	mu        sync.Mutex
+type packet struct {
 	buf       []byte
-	closed    bool
-	rCond     sync.Cond
-	wCond     sync.Cond
-	rDeadline time.Time
-	wDeadline time.Time
+	bufOffset int
+	next      *packet
+	from      sockaddr
 }
 
-func (p *bufferedPipe) Read(b []byte) (int, error) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-
-	for {
-		if p.closed && len(p.buf) == 0 {
-			return 0, io.EOF
-		}
-		if !p.rDeadline.IsZero() {
-			d := time.Until(p.rDeadline)
-			if d <= 0 {
-				return 0, os.ErrDeadlineExceeded
-			}
-			time.AfterFunc(d, p.rCond.Broadcast)
-		}
-		if len(p.buf) > 0 {
-			break
-		}
-		p.rCond.Wait()
-	}
-
-	n := copy(b, p.buf)
-	p.buf = p.buf[n:]
-	p.wCond.Broadcast()
-	return n, nil
+func (p *packet) clear() {
+	p.buf = p.buf[:0]
+	p.bufOffset = 0
+	p.next = nil
+	p.from = nil
 }
 
-func (p *bufferedPipe) Write(b []byte) (int, error) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
+var packetPool = sync.Pool{
+	New: func() any { return new(packet) },
+}
 
+type packetQueueState struct {
+	head, tail      *packet // unqueued packets
+	nBytes          int     // number of bytes enqueued in the packet buffers starting from head
+	readBufferBytes int     // soft limit on nbytes; no more packets may be enqueued when the limit is exceeded
+	readClosed      bool    // true if the reader of the queue has stopped reading
+	writeClosed     bool    // true if the writer of the queue has stopped writing; the reader sees either io.EOF or syscall.ECONNRESET when they have read all buffered packets
+	noLinger        bool    // if true, the reader sees ECONNRESET instead of EOF
+}
+
+// A packetQueue is a set of 1-buffered channels implementing a FIFO queue
+// of packets.
+type packetQueue struct {
+	empty chan packetQueueState // contains configuration parameters when the queue is empty and not closed
+	ready chan packetQueueState // contains the packets when non-empty or closed
+	full  chan packetQueueState // contains the packets when buffer is full and not closed
+}
+
+func newPacketQueue(readBufferBytes int) *packetQueue {
+	pq := &packetQueue{
+		empty: make(chan packetQueueState, 1),
+		ready: make(chan packetQueueState, 1),
+		full:  make(chan packetQueueState, 1),
+	}
+	pq.put(packetQueueState{
+		readBufferBytes: readBufferBytes,
+	})
+	return pq
+}
+
+func (pq *packetQueue) get() packetQueueState {
+	var q packetQueueState
+	select {
+	case q = <-pq.empty:
+	case q = <-pq.ready:
+	case q = <-pq.full:
+	}
+	return q
+}
+
+func (pq *packetQueue) put(q packetQueueState) {
+	switch {
+	case q.readClosed || q.writeClosed:
+		pq.ready <- q
+	case q.nBytes >= q.readBufferBytes:
+		pq.full <- q
+	case q.head == nil:
+		if q.nBytes > 0 {
+			defer panic("net: put with nil packet list and nonzero nBytes")
+		}
+		pq.empty <- q
+	default:
+		pq.ready <- q
+	}
+}
+
+func (pq *packetQueue) closeRead() error {
+	q := pq.get()
+
+	// Discard any unread packets.
+	for q.head != nil {
+		p := q.head
+		q.head = p.next
+		p.clear()
+		packetPool.Put(p)
+	}
+	q.nBytes = 0
+
+	q.readClosed = true
+	pq.put(q)
+	return nil
+}
+
+func (pq *packetQueue) closeWrite() error {
+	q := pq.get()
+	q.writeClosed = true
+	pq.put(q)
+	return nil
+}
+
+func (pq *packetQueue) setLinger(linger bool) error {
+	q := pq.get()
+	defer func() { pq.put(q) }()
+
+	if q.writeClosed {
+		return ErrClosed
+	}
+	q.noLinger = !linger
+	return nil
+}
+
+func (pq *packetQueue) write(dt *deadlineTimer, b []byte, from sockaddr) (n int, err error) {
 	for {
-		if p.closed {
-			return 0, syscall.ENOTCONN
+		dn := len(b)
+		if dn > maxPacketSize {
+			dn = maxPacketSize
 		}
-		if !p.wDeadline.IsZero() {
-			d := time.Until(p.wDeadline)
-			if d <= 0 {
-				return 0, os.ErrDeadlineExceeded
-			}
-			time.AfterFunc(d, p.wCond.Broadcast)
+
+		dn, err = pq.send(dt, b[:dn], from, true)
+		n += dn
+		if err != nil {
+			return n, err
 		}
-		if len(p.buf) <= p.softLimit {
-			break
+
+		b = b[dn:]
+		if len(b) == 0 {
+			return n, nil
 		}
-		p.wCond.Wait()
+	}
+}
+
+func (pq *packetQueue) send(dt *deadlineTimer, b []byte, from sockaddr, block bool) (n int, err error) {
+	if from == nil {
+		return 0, os.NewSyscallError("send", syscall.EINVAL)
+	}
+	if len(b) > maxPacketSize {
+		return 0, os.NewSyscallError("send", syscall.EMSGSIZE)
 	}
 
-	p.buf = append(p.buf, b...)
-	p.rCond.Broadcast()
+	var q packetQueueState
+	var full chan packetQueueState
+	if !block {
+		full = pq.full
+	}
+
+	// Before we check dt.expired, yield to other goroutines.
+	// This may help to prevent starvation of the goroutine that runs the
+	// deadlineTimer's time.After callback.
+	//
+	// TODO(#65178): Remove this when the runtime scheduler no longer starves
+	// runnable goroutines.
+	runtime.Gosched()
+
+	select {
+	case <-dt.expired:
+		return 0, os.ErrDeadlineExceeded
+
+	case q = <-full:
+		pq.put(q)
+		return 0, os.NewSyscallError("send", syscall.ENOBUFS)
+
+	case q = <-pq.empty:
+	case q = <-pq.ready:
+	}
+	defer func() { pq.put(q) }()
+
+	// Don't allow a packet to be sent if the deadline has expired,
+	// even if the select above chose a different branch.
+	select {
+	case <-dt.expired:
+		return 0, os.ErrDeadlineExceeded
+	default:
+	}
+	if q.writeClosed {
+		return 0, ErrClosed
+	} else if q.readClosed {
+		return 0, os.NewSyscallError("send", syscall.ECONNRESET)
+	}
+
+	p := packetPool.Get().(*packet)
+	p.buf = append(p.buf[:0], b...)
+	p.from = from
+
+	if q.head == nil {
+		q.head = p
+	} else {
+		q.tail.next = p
+	}
+	q.tail = p
+	q.nBytes += len(p.buf)
+
 	return len(b), nil
 }
 
-func (p *bufferedPipe) Close() {
-	p.mu.Lock()
-	defer p.mu.Unlock()
+func (pq *packetQueue) recvfrom(dt *deadlineTimer, b []byte, wholePacket bool, checkFrom func(sockaddr) error) (n int, from sockaddr, err error) {
+	var q packetQueueState
+	var empty chan packetQueueState
+	if len(b) == 0 {
+		// For consistency with the implementation on Unix platforms,
+		// allow a zero-length Read to proceed if the queue is empty.
+		// (Without this, TestZeroByteRead deadlocks.)
+		empty = pq.empty
+	}
 
-	p.closed = true
-	p.rCond.Broadcast()
-	p.wCond.Broadcast()
+	// Before we check dt.expired, yield to other goroutines.
+	// This may help to prevent starvation of the goroutine that runs the
+	// deadlineTimer's time.After callback.
+	//
+	// TODO(#65178): Remove this when the runtime scheduler no longer starves
+	// runnable goroutines.
+	runtime.Gosched()
+
+	select {
+	case <-dt.expired:
+		return 0, nil, os.ErrDeadlineExceeded
+	case q = <-empty:
+	case q = <-pq.ready:
+	case q = <-pq.full:
+	}
+	defer func() { pq.put(q) }()
+
+	p := q.head
+	if p == nil {
+		switch {
+		case q.readClosed:
+			return 0, nil, ErrClosed
+		case q.writeClosed:
+			if q.noLinger {
+				return 0, nil, os.NewSyscallError("recvfrom", syscall.ECONNRESET)
+			}
+			return 0, nil, io.EOF
+		case len(b) == 0:
+			return 0, nil, nil
+		default:
+			// This should be impossible: pq.full should only contain a non-empty list,
+			// pq.ready should either contain a non-empty list or indicate that the
+			// connection is closed, and we should only receive from pq.empty if
+			// len(b) == 0.
+			panic("net: nil packet list from non-closed packetQueue")
+		}
+	}
+
+	select {
+	case <-dt.expired:
+		return 0, nil, os.ErrDeadlineExceeded
+	default:
+	}
+
+	if checkFrom != nil {
+		if err := checkFrom(p.from); err != nil {
+			return 0, nil, err
+		}
+	}
+
+	n = copy(b, p.buf[p.bufOffset:])
+	from = p.from
+	if wholePacket || p.bufOffset+n == len(p.buf) {
+		q.head = p.next
+		q.nBytes -= len(p.buf)
+		p.clear()
+		packetPool.Put(p)
+	} else {
+		p.bufOffset += n
+	}
+
+	return n, from, nil
 }
 
-func (p *bufferedPipe) SetReadDeadline(t time.Time) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-
-	p.rDeadline = t
-	p.rCond.Broadcast()
+// setReadBuffer sets a soft limit on the number of bytes available to read
+// from the pipe.
+func (pq *packetQueue) setReadBuffer(bytes int) error {
+	if bytes <= 0 {
+		return os.NewSyscallError("setReadBuffer", syscall.EINVAL)
+	}
+	q := pq.get() // Use the queue as a lock.
+	q.readBufferBytes = bytes
+	pq.put(q)
+	return nil
 }
 
-func (p *bufferedPipe) SetWriteDeadline(t time.Time) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
+type deadlineTimer struct {
+	timer   chan *time.Timer
+	expired chan struct{}
+}
 
-	p.wDeadline = t
-	p.wCond.Broadcast()
+func newDeadlineTimer(deadline time.Time) *deadlineTimer {
+	dt := &deadlineTimer{
+		timer:   make(chan *time.Timer, 1),
+		expired: make(chan struct{}),
+	}
+	dt.timer <- nil
+	dt.Reset(deadline)
+	return dt
+}
+
+// Reset attempts to reset the timer.
+// If the timer has already expired, Reset returns false.
+func (dt *deadlineTimer) Reset(deadline time.Time) bool {
+	timer := <-dt.timer
+	defer func() { dt.timer <- timer }()
+
+	if deadline.Equal(noDeadline) {
+		if timer != nil && timer.Stop() {
+			timer = nil
+		}
+		return timer == nil
+	}
+
+	d := time.Until(deadline)
+	if d < 0 {
+		// Ensure that a deadline in the past takes effect immediately.
+		defer func() { <-dt.expired }()
+	}
+
+	if timer == nil {
+		timer = time.AfterFunc(d, func() { close(dt.expired) })
+		return true
+	}
+	if !timer.Stop() {
+		return false
+	}
+	timer.Reset(d)
+	return true
 }
 
 func sysSocket(family, sotype, proto int) (int, error) {
-	return 0, syscall.ENOSYS
+	return 0, os.NewSyscallError("sysSocket", syscall.ENOSYS)
 }
 
-func (fd *fakeNetFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (syscall.Sockaddr, error) {
-	return nil, syscall.ENOSYS
+func fakeListen(fd *netFD, laddr sockaddr) (err error) {
+	wrapErr := func(err error) error {
+		if errno, ok := err.(syscall.Errno); ok {
+			err = os.NewSyscallError("listen", errno)
+		}
+		if errors.Is(err, syscall.EADDRINUSE) {
+			return err
+		}
+		if laddr != nil {
+			if _, ok := err.(*AddrError); !ok {
+				err = &AddrError{
+					Err:  err.Error(),
+					Addr: laddr.String(),
+				}
+			}
+		}
+		return err
+	}
+
+	ffd := newFakeNetFD(fd)
+	defer func() {
+		if fd.fakeNetFD != ffd {
+			// Failed to register listener; clean up.
+			ffd.Close()
+		}
+	}()
+
+	if err := ffd.assignFakeAddr(matchIPFamily(fd.family, laddr)); err != nil {
+		return wrapErr(err)
+	}
+
+	ffd.fakeAddr = fakeAddr(fd.laddr.(sockaddr))
+	switch fd.sotype {
+	case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET:
+		ffd.incoming = make(chan []*netFD, 1)
+		ffd.incomingFull = make(chan []*netFD, 1)
+		ffd.incomingEmpty = make(chan bool, 1)
+		ffd.incomingEmpty <- true
+	case syscall.SOCK_DGRAM:
+		ffd.queue = newPacketQueue(defaultBuffer)
+	default:
+		return wrapErr(syscall.EINVAL)
+	}
+
+	fd.fakeNetFD = ffd
+	if _, dup := sockets.LoadOrStore(ffd.fakeAddr, fd); dup {
+		fd.fakeNetFD = nil
+		return wrapErr(syscall.EADDRINUSE)
+	}
+
+	return nil
 }
 
-func (fd *fakeNetFD) readFrom(p []byte) (n int, sa syscall.Sockaddr, err error) {
-	return 0, nil, syscall.ENOSYS
+func fakeConnect(ctx context.Context, fd *netFD, laddr, raddr sockaddr) error {
+	wrapErr := func(err error) error {
+		if errno, ok := err.(syscall.Errno); ok {
+			err = os.NewSyscallError("connect", errno)
+		}
+		if errors.Is(err, syscall.EADDRINUSE) {
+			return err
+		}
+		if terr, ok := err.(interface{ Timeout() bool }); !ok || !terr.Timeout() {
+			// For consistency with the net implementation on other platforms,
+			// if we don't need to preserve the Timeout-ness of err we should
+			// wrap it in an AddrError. (Unfortunately we can't wrap errors
+			// that convey structured information, because AddrError reduces
+			// the wrapped Err to a flat string.)
+			if _, ok := err.(*AddrError); !ok {
+				err = &AddrError{
+					Err:  err.Error(),
+					Addr: raddr.String(),
+				}
+			}
+		}
+		return err
+	}
 
-}
-func (fd *fakeNetFD) readFromInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) {
-	return 0, syscall.ENOSYS
+	if fd.isConnected {
+		return wrapErr(syscall.EISCONN)
+	}
+	if ctx.Err() != nil {
+		return wrapErr(syscall.ETIMEDOUT)
+	}
+
+	fd.raddr = matchIPFamily(fd.family, raddr)
+	if err := validateResolvedAddr(fd.net, fd.family, fd.raddr.(sockaddr)); err != nil {
+		return wrapErr(err)
+	}
+
+	if err := fd.fakeNetFD.assignFakeAddr(laddr); err != nil {
+		return wrapErr(err)
+	}
+	fd.fakeNetFD.queue = newPacketQueue(defaultBuffer)
+
+	switch fd.sotype {
+	case syscall.SOCK_DGRAM:
+		if ua, ok := fd.laddr.(*UnixAddr); !ok || ua.Name != "" {
+			fd.fakeNetFD.fakeAddr = fakeAddr(fd.laddr.(sockaddr))
+			if _, dup := sockets.LoadOrStore(fd.fakeNetFD.fakeAddr, fd); dup {
+				return wrapErr(syscall.EADDRINUSE)
+			}
+		}
+		fd.isConnected = true
+		return nil
+
+	case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET:
+	default:
+		return wrapErr(syscall.EINVAL)
+	}
+
+	fa := fakeAddr(raddr)
+	lni, ok := sockets.Load(fa)
+	if !ok {
+		return wrapErr(syscall.ECONNREFUSED)
+	}
+	ln := lni.(*netFD)
+	if ln.sotype != fd.sotype {
+		return wrapErr(syscall.EPROTOTYPE)
+	}
+	if ln.incoming == nil {
+		return wrapErr(syscall.ECONNREFUSED)
+	}
+
+	peer := &netFD{
+		family:      ln.family,
+		sotype:      ln.sotype,
+		net:         ln.net,
+		laddr:       ln.laddr,
+		raddr:       fd.laddr,
+		isConnected: true,
+	}
+	peer.fakeNetFD = newFakeNetFD(fd)
+	peer.fakeNetFD.queue = newPacketQueue(defaultBuffer)
+	defer func() {
+		if fd.peer != peer {
+			// Failed to connect; clean up.
+			peer.Close()
+		}
+	}()
+
+	var incoming []*netFD
+	select {
+	case <-ctx.Done():
+		return wrapErr(syscall.ETIMEDOUT)
+	case ok = <-ln.incomingEmpty:
+	case incoming, ok = <-ln.incoming:
+	}
+	if !ok {
+		return wrapErr(syscall.ECONNREFUSED)
+	}
+
+	fd.isConnected = true
+	fd.peer = peer
+	peer.peer = fd
+
+	incoming = append(incoming, peer)
+	if len(incoming) >= listenerBacklog() {
+		ln.incomingFull <- incoming
+	} else {
+		ln.incoming <- incoming
+	}
+	return nil
 }
 
-func (fd *fakeNetFD) readFromInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) {
-	return 0, syscall.ENOSYS
+func (ffd *fakeNetFD) assignFakeAddr(addr sockaddr) error {
+	validate := func(sa sockaddr) error {
+		if err := validateResolvedAddr(ffd.fd.net, ffd.fd.family, sa); err != nil {
+			return err
+		}
+		ffd.fd.laddr = sa
+		return nil
+	}
+
+	assignIP := func(addr sockaddr) error {
+		var (
+			ip   IP
+			port int
+			zone string
+		)
+		switch addr := addr.(type) {
+		case *TCPAddr:
+			if addr != nil {
+				ip = addr.IP
+				port = addr.Port
+				zone = addr.Zone
+			}
+		case *UDPAddr:
+			if addr != nil {
+				ip = addr.IP
+				port = addr.Port
+				zone = addr.Zone
+			}
+		default:
+			return validate(addr)
+		}
+
+		if ip == nil {
+			ip = IPv4(127, 0, 0, 1)
+		}
+		switch ffd.fd.family {
+		case syscall.AF_INET:
+			if ip4 := ip.To4(); ip4 != nil {
+				ip = ip4
+			}
+		case syscall.AF_INET6:
+			if ip16 := ip.To16(); ip16 != nil {
+				ip = ip16
+			}
+		}
+		if ip == nil {
+			return syscall.EINVAL
+		}
+
+		if port == 0 {
+			var prevPort int32
+			portWrapped := false
+			nextPort := func() (int, bool) {
+				for {
+					port := nextPortCounter.Add(1)
+					if port <= 0 || port >= 1<<16 {
+						// nextPortCounter ran off the end of the port space.
+						// Bump it back into range.
+						for {
+							if nextPortCounter.CompareAndSwap(port, 0) {
+								break
+							}
+							if port = nextPortCounter.Load(); port >= 0 && port < 1<<16 {
+								break
+							}
+						}
+						if portWrapped {
+							// This is the second wraparound, so we've scanned the whole port space
+							// at least once already and it's time to give up.
+							return 0, false
+						}
+						portWrapped = true
+						prevPort = 0
+						continue
+					}
+
+					if port <= prevPort {
+						// nextPortCounter has wrapped around since the last time we read it.
+						if portWrapped {
+							// This is the second wraparound, so we've scanned the whole port space
+							// at least once already and it's time to give up.
+							return 0, false
+						} else {
+							portWrapped = true
+						}
+					}
+
+					prevPort = port
+					return int(port), true
+				}
+			}
+
+			for {
+				var ok bool
+				port, ok = nextPort()
+				if !ok {
+					ffd.assignedPort = 0
+					return syscall.EADDRINUSE
+				}
+
+				ffd.assignedPort = int(port)
+				if _, dup := fakePorts.LoadOrStore(ffd.assignedPort, ffd.fd); !dup {
+					break
+				}
+			}
+		}
+
+		switch addr.(type) {
+		case *TCPAddr:
+			return validate(&TCPAddr{IP: ip, Port: port, Zone: zone})
+		case *UDPAddr:
+			return validate(&UDPAddr{IP: ip, Port: port, Zone: zone})
+		default:
+			panic("unreachable")
+		}
+	}
+
+	switch ffd.fd.net {
+	case "tcp", "tcp4", "tcp6":
+		if addr == nil {
+			return assignIP(new(TCPAddr))
+		}
+		return assignIP(addr)
+
+	case "udp", "udp4", "udp6":
+		if addr == nil {
+			return assignIP(new(UDPAddr))
+		}
+		return assignIP(addr)
+
+	case "unix", "unixgram", "unixpacket":
+		uaddr, ok := addr.(*UnixAddr)
+		if !ok && addr != nil {
+			return &AddrError{
+				Err:  "non-Unix address for " + ffd.fd.net + " network",
+				Addr: addr.String(),
+			}
+		}
+		if uaddr == nil {
+			return validate(&UnixAddr{Net: ffd.fd.net})
+		}
+		return validate(&UnixAddr{Net: ffd.fd.net, Name: uaddr.Name})
+
+	default:
+		return &AddrError{
+			Err:  syscall.EAFNOSUPPORT.Error(),
+			Addr: addr.String(),
+		}
+	}
 }
 
-func (fd *fakeNetFD) readMsg(p []byte, oob []byte, flags int) (n, oobn, retflags int, sa syscall.Sockaddr, err error) {
-	return 0, 0, 0, nil, syscall.ENOSYS
+func (ffd *fakeNetFD) readFrom(p []byte) (n int, sa syscall.Sockaddr, err error) {
+	if ffd.queue == nil {
+		return 0, nil, os.NewSyscallError("readFrom", syscall.EINVAL)
+	}
+
+	n, from, err := ffd.queue.recvfrom(ffd.readDeadline.Load(), p, true, nil)
+
+	if from != nil {
+		// Convert the net.sockaddr to a syscall.Sockaddr type.
+		var saErr error
+		sa, saErr = from.sockaddr(ffd.fd.family)
+		if err == nil {
+			err = saErr
+		}
+	}
+
+	return n, sa, err
 }
 
-func (fd *fakeNetFD) readMsgInet4(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet4) (n, oobn, retflags int, err error) {
-	return 0, 0, 0, syscall.ENOSYS
+func (ffd *fakeNetFD) readFromInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) {
+	n, _, err = ffd.queue.recvfrom(ffd.readDeadline.Load(), p, true, func(from sockaddr) error {
+		fromSA, err := from.sockaddr(syscall.AF_INET)
+		if err != nil {
+			return err
+		}
+		if fromSA == nil {
+			return os.NewSyscallError("readFromInet4", syscall.EINVAL)
+		}
+		*sa = *(fromSA.(*syscall.SockaddrInet4))
+		return nil
+	})
+	return n, err
 }
 
-func (fd *fakeNetFD) readMsgInet6(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet6) (n, oobn, retflags int, err error) {
-	return 0, 0, 0, syscall.ENOSYS
+func (ffd *fakeNetFD) readFromInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) {
+	n, _, err = ffd.queue.recvfrom(ffd.readDeadline.Load(), p, true, func(from sockaddr) error {
+		fromSA, err := from.sockaddr(syscall.AF_INET6)
+		if err != nil {
+			return err
+		}
+		if fromSA == nil {
+			return os.NewSyscallError("readFromInet6", syscall.EINVAL)
+		}
+		*sa = *(fromSA.(*syscall.SockaddrInet6))
+		return nil
+	})
+	return n, err
 }
 
-func (fd *fakeNetFD) writeMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (n int, oobn int, err error) {
-	return 0, 0, syscall.ENOSYS
+func (ffd *fakeNetFD) readMsg(p []byte, oob []byte, flags int) (n, oobn, retflags int, sa syscall.Sockaddr, err error) {
+	if flags != 0 {
+		return 0, 0, 0, nil, os.NewSyscallError("readMsg", syscall.ENOTSUP)
+	}
+	n, sa, err = ffd.readFrom(p)
+	return n, 0, 0, sa, err
 }
 
-func (fd *fakeNetFD) writeMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (n int, oobn int, err error) {
-	return 0, 0, syscall.ENOSYS
+func (ffd *fakeNetFD) readMsgInet4(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet4) (n, oobn, retflags int, err error) {
+	if flags != 0 {
+		return 0, 0, 0, os.NewSyscallError("readMsgInet4", syscall.ENOTSUP)
+	}
+	n, err = ffd.readFromInet4(p, sa)
+	return n, 0, 0, err
 }
 
-func (fd *fakeNetFD) writeTo(p []byte, sa syscall.Sockaddr) (n int, err error) {
-	return 0, syscall.ENOSYS
+func (ffd *fakeNetFD) readMsgInet6(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet6) (n, oobn, retflags int, err error) {
+	if flags != 0 {
+		return 0, 0, 0, os.NewSyscallError("readMsgInet6", syscall.ENOTSUP)
+	}
+	n, err = ffd.readFromInet6(p, sa)
+	return n, 0, 0, err
 }
 
-func (fd *fakeNetFD) writeToInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) {
-	return 0, syscall.ENOSYS
+func (ffd *fakeNetFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
+	if len(oob) > 0 {
+		return 0, 0, os.NewSyscallError("writeMsg", syscall.ENOTSUP)
+	}
+	n, err = ffd.writeTo(p, sa)
+	return n, 0, err
 }
 
-func (fd *fakeNetFD) writeToInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) {
-	return 0, syscall.ENOSYS
+func (ffd *fakeNetFD) writeMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (n int, oobn int, err error) {
+	return ffd.writeMsg(p, oob, sa)
 }
 
-func (fd *fakeNetFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
-	return 0, 0, syscall.ENOSYS
+func (ffd *fakeNetFD) writeMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (n int, oobn int, err error) {
+	return ffd.writeMsg(p, oob, sa)
 }
 
-func (fd *fakeNetFD) dup() (f *os.File, err error) {
-	return nil, syscall.ENOSYS
+func (ffd *fakeNetFD) writeTo(p []byte, sa syscall.Sockaddr) (n int, err error) {
+	raddr := ffd.fd.raddr
+	if sa != nil {
+		if ffd.fd.isConnected {
+			return 0, os.NewSyscallError("writeTo", syscall.EISCONN)
+		}
+		raddr = ffd.fd.addrFunc()(sa)
+	}
+	if raddr == nil {
+		return 0, os.NewSyscallError("writeTo", syscall.EINVAL)
+	}
+
+	peeri, _ := sockets.Load(fakeAddr(raddr.(sockaddr)))
+	if peeri == nil {
+		if len(ffd.fd.net) >= 3 && ffd.fd.net[:3] == "udp" {
+			return len(p), nil
+		}
+		return 0, os.NewSyscallError("writeTo", syscall.ECONNRESET)
+	}
+	peer := peeri.(*netFD)
+	if peer.queue == nil {
+		if len(ffd.fd.net) >= 3 && ffd.fd.net[:3] == "udp" {
+			return len(p), nil
+		}
+		return 0, os.NewSyscallError("writeTo", syscall.ECONNRESET)
+	}
+
+	block := true
+	if len(ffd.fd.net) >= 3 && ffd.fd.net[:3] == "udp" {
+		block = false
+	}
+	return peer.queue.send(ffd.writeDeadline.Load(), p, ffd.fd.laddr.(sockaddr), block)
+}
+
+func (ffd *fakeNetFD) writeToInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) {
+	return ffd.writeTo(p, sa)
+}
+
+func (ffd *fakeNetFD) writeToInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) {
+	return ffd.writeTo(p, sa)
+}
+
+func (ffd *fakeNetFD) dup() (f *os.File, err error) {
+	return nil, os.NewSyscallError("dup", syscall.ENOSYS)
+}
+
+func (ffd *fakeNetFD) setReadBuffer(bytes int) error {
+	if ffd.queue == nil {
+		return os.NewSyscallError("setReadBuffer", syscall.EINVAL)
+	}
+	ffd.queue.setReadBuffer(bytes)
+	return nil
+}
+
+func (ffd *fakeNetFD) setWriteBuffer(bytes int) error {
+	return os.NewSyscallError("setWriteBuffer", syscall.ENOTSUP)
+}
+
+func (ffd *fakeNetFD) setLinger(sec int) error {
+	if sec < 0 || ffd.peer == nil {
+		return os.NewSyscallError("setLinger", syscall.EINVAL)
+	}
+	ffd.peer.queue.setLinger(sec > 0)
+	return nil
 }
diff --git a/src/net/net_fake_js.go b/src/net/net_fake_js.go
deleted file mode 100644
index 7ba108b..0000000
--- a/src/net/net_fake_js.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Fake networking for js/wasm. It is intended to allow tests of other package to pass.
-
-//go:build js && wasm
-
-package net
-
-import (
-	"context"
-	"internal/poll"
-
-	"golang.org/x/net/dns/dnsmessage"
-)
-
-// Network file descriptor.
-type netFD struct {
-	*fakeNetFD
-
-	// immutable until Close
-	family int
-	sotype int
-	net    string
-	laddr  Addr
-	raddr  Addr
-
-	// unused
-	pfd         poll.FD
-	isConnected bool // handshake completed or use of association with peer
-}
-
-func (r *Resolver) lookup(ctx context.Context, name string, qtype dnsmessage.Type, conf *dnsConfig) (dnsmessage.Parser, string, error) {
-	panic("unreachable")
-}
diff --git a/src/net/net_fake_test.go b/src/net/net_fake_test.go
index 783304d..4542228 100644
--- a/src/net/net_fake_test.go
+++ b/src/net/net_fake_test.go
@@ -13,191 +13,95 @@
 // The tests in this files are intended to validate the behavior of the fake
 // network stack on these platforms.
 
-import "testing"
+import (
+	"errors"
+	"syscall"
+	"testing"
+)
 
-func TestFakeConn(t *testing.T) {
-	tests := []struct {
-		name   string
-		listen func() (Listener, error)
-		dial   func(Addr) (Conn, error)
-		addr   func(*testing.T, Addr)
-	}{
-		{
-			name: "Listener:tcp",
-			listen: func() (Listener, error) {
-				return Listen("tcp", ":0")
-			},
-			dial: func(addr Addr) (Conn, error) {
-				return Dial(addr.Network(), addr.String())
-			},
-			addr: testFakeTCPAddr,
-		},
-
-		{
-			name: "ListenTCP:tcp",
-			listen: func() (Listener, error) {
-				// Creating a listening TCP connection with a nil address must
-				// select an IP address on localhost with a random port.
-				// This test verifies that the fake network facility does that.
-				return ListenTCP("tcp", nil)
-			},
-			dial: func(addr Addr) (Conn, error) {
-				// Connecting a listening TCP connection will select a local
-				// address on the local network and connects to the destination
-				// address.
-				return DialTCP("tcp", nil, addr.(*TCPAddr))
-			},
-			addr: testFakeTCPAddr,
-		},
-
-		{
-			name: "ListenUnix:unix",
-			listen: func() (Listener, error) {
-				return ListenUnix("unix", &UnixAddr{Name: "test"})
-			},
-			dial: func(addr Addr) (Conn, error) {
-				return DialUnix("unix", nil, addr.(*UnixAddr))
-			},
-			addr: testFakeUnixAddr("unix", "test"),
-		},
-
-		{
-			name: "ListenUnix:unixpacket",
-			listen: func() (Listener, error) {
-				return ListenUnix("unixpacket", &UnixAddr{Name: "test"})
-			},
-			dial: func(addr Addr) (Conn, error) {
-				return DialUnix("unixpacket", nil, addr.(*UnixAddr))
-			},
-			addr: testFakeUnixAddr("unixpacket", "test"),
-		},
+func TestFakePortExhaustion(t *testing.T) {
+	if testing.Short() {
+		t.Skipf("skipping test that opens 1<<16 connections")
 	}
 
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			l, err := test.listen()
-			if err != nil {
-				t.Fatal(err)
+	ln := newLocalListener(t, "tcp")
+	done := make(chan struct{})
+	go func() {
+		var accepted []Conn
+		defer func() {
+			for _, c := range accepted {
+				c.Close()
 			}
-			defer l.Close()
-			test.addr(t, l.Addr())
+			close(done)
+		}()
 
-			c, err := test.dial(l.Addr())
+		for {
+			c, err := ln.Accept()
 			if err != nil {
-				t.Fatal(err)
+				return
 			}
-			defer c.Close()
-			test.addr(t, c.LocalAddr())
-			test.addr(t, c.RemoteAddr())
-		})
-	}
-}
-
-func TestFakePacketConn(t *testing.T) {
-	tests := []struct {
-		name   string
-		listen func() (PacketConn, error)
-		dial   func(Addr) (Conn, error)
-		addr   func(*testing.T, Addr)
-	}{
-		{
-			name: "ListenPacket:udp",
-			listen: func() (PacketConn, error) {
-				return ListenPacket("udp", ":0")
-			},
-			dial: func(addr Addr) (Conn, error) {
-				return Dial(addr.Network(), addr.String())
-			},
-			addr: testFakeUDPAddr,
-		},
-
-		{
-			name: "ListenUDP:udp",
-			listen: func() (PacketConn, error) {
-				// Creating a listening UDP connection with a nil address must
-				// select an IP address on localhost with a random port.
-				// This test verifies that the fake network facility does that.
-				return ListenUDP("udp", nil)
-			},
-			dial: func(addr Addr) (Conn, error) {
-				// Connecting a listening UDP connection will select a local
-				// address on the local network and connects to the destination
-				// address.
-				return DialUDP("udp", nil, addr.(*UDPAddr))
-			},
-			addr: testFakeUDPAddr,
-		},
-
-		{
-			name: "ListenUnixgram:unixgram",
-			listen: func() (PacketConn, error) {
-				return ListenUnixgram("unixgram", &UnixAddr{Name: "test"})
-			},
-			dial: func(addr Addr) (Conn, error) {
-				return DialUnix("unixgram", nil, addr.(*UnixAddr))
-			},
-			addr: testFakeUnixAddr("unixgram", "test"),
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			l, err := test.listen()
-			if err != nil {
-				t.Fatal(err)
-			}
-			defer l.Close()
-			test.addr(t, l.LocalAddr())
-
-			c, err := test.dial(l.LocalAddr())
-			if err != nil {
-				t.Fatal(err)
-			}
-			defer c.Close()
-			test.addr(t, c.LocalAddr())
-			test.addr(t, c.RemoteAddr())
-		})
-	}
-}
-
-func testFakeTCPAddr(t *testing.T, addr Addr) {
-	t.Helper()
-	if a, ok := addr.(*TCPAddr); !ok {
-		t.Errorf("Addr is not *TCPAddr: %T", addr)
-	} else {
-		testFakeNetAddr(t, a.IP, a.Port)
-	}
-}
-
-func testFakeUDPAddr(t *testing.T, addr Addr) {
-	t.Helper()
-	if a, ok := addr.(*UDPAddr); !ok {
-		t.Errorf("Addr is not *UDPAddr: %T", addr)
-	} else {
-		testFakeNetAddr(t, a.IP, a.Port)
-	}
-}
-
-func testFakeNetAddr(t *testing.T, ip IP, port int) {
-	t.Helper()
-	if port == 0 {
-		t.Error("network address is missing port")
-	} else if len(ip) == 0 {
-		t.Error("network address is missing IP")
-	} else if !ip.Equal(IPv4(127, 0, 0, 1)) {
-		t.Errorf("network address has wrong IP: %s", ip)
-	}
-}
-
-func testFakeUnixAddr(net, name string) func(*testing.T, Addr) {
-	return func(t *testing.T, addr Addr) {
-		t.Helper()
-		if a, ok := addr.(*UnixAddr); !ok {
-			t.Errorf("Addr is not *UnixAddr: %T", addr)
-		} else if a.Net != net {
-			t.Errorf("unix address has wrong net: want=%q got=%q", net, a.Net)
-		} else if a.Name != name {
-			t.Errorf("unix address has wrong name: want=%q got=%q", name, a.Name)
+			accepted = append(accepted, c)
 		}
+	}()
+
+	var dialed []Conn
+	defer func() {
+		ln.Close()
+		for _, c := range dialed {
+			c.Close()
+		}
+		<-done
+	}()
+
+	// Since this test is not running in parallel, we expect to be able to open
+	// all 65535 valid (fake) ports. The listener is already using one, so
+	// we should be able to Dial the remaining 65534.
+	for len(dialed) < (1<<16)-2 {
+		c, err := Dial(ln.Addr().Network(), ln.Addr().String())
+		if err != nil {
+			t.Fatalf("unexpected error from Dial with %v connections: %v", len(dialed), err)
+		}
+		dialed = append(dialed, c)
+		if testing.Verbose() && len(dialed)%(1<<12) == 0 {
+			t.Logf("dialed %d connections", len(dialed))
+		}
+	}
+	t.Logf("dialed %d connections", len(dialed))
+
+	// Now that all of the ports are in use, dialing another should fail due
+	// to port exhaustion, which (for POSIX-like socket APIs) should return
+	// an EADDRINUSE error.
+	c, err := Dial(ln.Addr().Network(), ln.Addr().String())
+	if err == nil {
+		c.Close()
+	}
+	if errors.Is(err, syscall.EADDRINUSE) {
+		t.Logf("Dial returned expected error: %v", err)
+	} else {
+		t.Errorf("unexpected error from Dial: %v\nwant: %v", err, syscall.EADDRINUSE)
+	}
+
+	// Opening a Listener should fail at this point too.
+	ln2, err := Listen("tcp", "localhost:0")
+	if err == nil {
+		ln2.Close()
+	}
+	if errors.Is(err, syscall.EADDRINUSE) {
+		t.Logf("Listen returned expected error: %v", err)
+	} else {
+		t.Errorf("unexpected error from Listen: %v\nwant: %v", err, syscall.EADDRINUSE)
+	}
+
+	// When we close an arbitrary connection, we should be able to reuse its port
+	// even if the server hasn't yet seen the ECONNRESET for the connection.
+	dialed[0].Close()
+	dialed = dialed[1:]
+	t.Logf("closed one connection")
+	c, err = Dial(ln.Addr().Network(), ln.Addr().String())
+	if err == nil {
+		c.Close()
+		t.Logf("Dial succeeded")
+	} else {
+		t.Errorf("unexpected error from Dial: %v", err)
 	}
 }
diff --git a/src/net/net_test.go b/src/net/net_test.go
index a0ac85f..b448a79 100644
--- a/src/net/net_test.go
+++ b/src/net/net_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -383,8 +381,16 @@
 
 			ln := newLocalListener(t, network)
 			connc := make(chan Conn, 1)
+			defer func() {
+				ln.Close()
+				for c := range connc {
+					if c != nil {
+						c.Close()
+					}
+				}
+			}()
 			go func() {
-				defer ln.Close()
+				defer close(connc)
 				c, err := ln.Accept()
 				if err != nil {
 					t.Error(err)
@@ -440,8 +446,9 @@
 			errc <- err
 			return
 		}
-		defer c1.Close()
-		errc <- peer1(c1.(*TCPConn))
+		err = peer1(c1.(*TCPConn))
+		c1.Close()
+		errc <- err
 	}()
 	go func() {
 		c2, err := Dial("tcp", ln.Addr().String())
@@ -449,12 +456,13 @@
 			errc <- err
 			return
 		}
-		defer c2.Close()
-		errc <- peer2(c2.(*TCPConn))
+		err = peer2(c2.(*TCPConn))
+		c2.Close()
+		errc <- err
 	}()
 	for i := 0; i < 2; i++ {
 		if err := <-errc; err != nil {
-			t.Fatal(err)
+			t.Error(err)
 		}
 	}
 }
diff --git a/src/net/netip/export_test.go b/src/net/netip/export_test.go
index 59971fa..72347ee 100644
--- a/src/net/netip/export_test.go
+++ b/src/net/netip/export_test.go
@@ -28,3 +28,5 @@
 
 func (a Addr) IsZero() bool   { return a.isZero() }
 func (p Prefix) IsZero() bool { return p.isZero() }
+
+func (p Prefix) Compare(p2 Prefix) int { return p.compare(p2) }
diff --git a/src/net/netip/leaf_alts.go b/src/net/netip/leaf_alts.go
index 70513ab..d887bed 100644
--- a/src/net/netip/leaf_alts.go
+++ b/src/net/netip/leaf_alts.go
@@ -7,15 +7,6 @@
 
 package netip
 
-func stringsLastIndexByte(s string, b byte) int {
-	for i := len(s) - 1; i >= 0; i-- {
-		if s[i] == b {
-			return i
-		}
-	}
-	return -1
-}
-
 func beUint64(b []byte) uint64 {
 	_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
 	return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
diff --git a/src/net/netip/netip.go b/src/net/netip/netip.go
index a44b094..7a189e8 100644
--- a/src/net/netip/netip.go
+++ b/src/net/netip/netip.go
@@ -12,6 +12,7 @@
 package netip
 
 import (
+	"cmp"
 	"errors"
 	"math"
 	"strconv"
@@ -127,7 +128,7 @@
 	return Addr{}, parseAddrError{in: s, msg: "unable to parse IP"}
 }
 
-// MustParseAddr calls ParseAddr(s) and panics on error.
+// MustParseAddr calls [ParseAddr](s) and panics on error.
 // It is intended for use in tests with hard-coded strings.
 func MustParseAddr(s string) Addr {
 	ip, err := ParseAddr(s)
@@ -334,8 +335,8 @@
 }
 
 // AddrFromSlice parses the 4- or 16-byte byte slice as an IPv4 or IPv6 address.
-// Note that a net.IP can be passed directly as the []byte argument.
-// If slice's length is not 4 or 16, AddrFromSlice returns Addr{}, false.
+// Note that a [net.IP] can be passed directly as the []byte argument.
+// If slice's length is not 4 or 16, AddrFromSlice returns [Addr]{}, false.
 func AddrFromSlice(slice []byte) (ip Addr, ok bool) {
 	switch len(slice) {
 	case 4:
@@ -375,13 +376,13 @@
 	return ip.z == z0
 }
 
-// IsValid reports whether the Addr is an initialized address (not the zero Addr).
+// IsValid reports whether the [Addr] is an initialized address (not the zero Addr).
 //
 // Note that "0.0.0.0" and "::" are both valid values.
 func (ip Addr) IsValid() bool { return ip.z != z0 }
 
 // BitLen returns the number of bits in the IP address:
-// 128 for IPv6, 32 for IPv4, and 0 for the zero Addr.
+// 128 for IPv6, 32 for IPv4, and 0 for the zero [Addr].
 //
 // Note that IPv4-mapped IPv6 addresses are considered IPv6 addresses
 // and therefore have bit length 128.
@@ -406,7 +407,7 @@
 
 // Compare returns an integer comparing two IPs.
 // The result will be 0 if ip == ip2, -1 if ip < ip2, and +1 if ip > ip2.
-// The definition of "less than" is the same as the Less method.
+// The definition of "less than" is the same as the [Addr.Less] method.
 func (ip Addr) Compare(ip2 Addr) int {
 	f1, f2 := ip.BitLen(), ip2.BitLen()
 	if f1 < f2 {
@@ -448,7 +449,7 @@
 
 // Is4 reports whether ip is an IPv4 address.
 //
-// It returns false for IPv4-mapped IPv6 addresses. See Addr.Unmap.
+// It returns false for IPv4-mapped IPv6 addresses. See [Addr.Unmap].
 func (ip Addr) Is4() bool {
 	return ip.z == z4
 }
@@ -582,7 +583,7 @@
 // IANA-allocated 2000::/3 global unicast space, with the exception of the
 // link-local address space. It also returns true even if ip is in the IPv4
 // private address space or IPv6 unique local address space.
-// It returns false for the zero Addr.
+// It returns false for the zero [Addr].
 //
 // For reference, see RFC 1122, RFC 4291, and RFC 4632.
 func (ip Addr) IsGlobalUnicast() bool {
@@ -606,7 +607,7 @@
 // IsPrivate reports whether ip is a private address, according to RFC 1918
 // (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether
 // ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. This is the
-// same as net.IP.IsPrivate.
+// same as [net.IP.IsPrivate].
 func (ip Addr) IsPrivate() bool {
 	// Match the stdlib's IsPrivate logic.
 	if ip.Is4() {
@@ -629,14 +630,14 @@
 // IsUnspecified reports whether ip is an unspecified address, either the IPv4
 // address "0.0.0.0" or the IPv6 address "::".
 //
-// Note that the zero Addr is not an unspecified address.
+// Note that the zero [Addr] is not an unspecified address.
 func (ip Addr) IsUnspecified() bool {
 	return ip == IPv4Unspecified() || ip == IPv6Unspecified()
 }
 
 // Prefix keeps only the top b bits of IP, producing a Prefix
 // of the specified length.
-// If ip is a zero Addr, Prefix always returns a zero Prefix and a nil error.
+// If ip is a zero [Addr], Prefix always returns a zero Prefix and a nil error.
 // Otherwise, if bits is less than zero or greater than ip.BitLen(),
 // Prefix returns an error.
 func (ip Addr) Prefix(b int) (Prefix, error) {
@@ -661,15 +662,10 @@
 	return PrefixFrom(ip, b), nil
 }
 
-const (
-	netIPv4len = 4
-	netIPv6len = 16
-)
-
 // As16 returns the IP address in its 16-byte representation.
 // IPv4 addresses are returned as IPv4-mapped IPv6 addresses.
 // IPv6 addresses with zones are returned without their zone (use the
-// Zone method to get it).
+// [Addr.Zone] method to get it).
 // The ip zero value returns all zeroes.
 func (ip Addr) As16() (a16 [16]byte) {
 	bePutUint64(a16[:8], ip.addr.hi)
@@ -678,7 +674,7 @@
 }
 
 // As4 returns an IPv4 or IPv4-in-IPv6 address in its 4-byte representation.
-// If ip is the zero Addr or an IPv6 address, As4 panics.
+// If ip is the zero [Addr] or an IPv6 address, As4 panics.
 // Note that 0.0.0.0 is not the zero Addr.
 func (ip Addr) As4() (a4 [4]byte) {
 	if ip.z == z4 || ip.Is4In6() {
@@ -709,7 +705,7 @@
 }
 
 // Next returns the address following ip.
-// If there is none, it returns the zero Addr.
+// If there is none, it returns the zero [Addr].
 func (ip Addr) Next() Addr {
 	ip.addr = ip.addr.addOne()
 	if ip.Is4() {
@@ -743,10 +739,10 @@
 // String returns the string form of the IP address ip.
 // It returns one of 5 forms:
 //
-//   - "invalid IP", if ip is the zero Addr
+//   - "invalid IP", if ip is the zero [Addr]
 //   - IPv4 dotted decimal ("192.0.2.1")
 //   - IPv6 ("2001:db8::1")
-//   - "::ffff:1.2.3.4" (if Is4In6)
+//   - "::ffff:1.2.3.4" (if [Addr.Is4In6])
 //   - IPv6 with zone ("fe80:db8::1%eth0")
 //
 // Note that unlike package net's IP.String method,
@@ -771,7 +767,7 @@
 }
 
 // AppendTo appends a text encoding of ip,
-// as generated by MarshalText,
+// as generated by [Addr.MarshalText],
 // to b and returns the extended buffer.
 func (ip Addr) AppendTo(b []byte) []byte {
 	switch ip.z {
@@ -903,7 +899,7 @@
 	return ret
 }
 
-// StringExpanded is like String but IPv6 addresses are expanded with leading
+// StringExpanded is like [Addr.String] but IPv6 addresses are expanded with leading
 // zeroes and no "::" compression. For example, "2001:db8::1" becomes
 // "2001:0db8:0000:0000:0000:0000:0000:0001".
 func (ip Addr) StringExpanded() string {
@@ -931,9 +927,9 @@
 	return string(ret)
 }
 
-// MarshalText implements the encoding.TextMarshaler interface,
-// The encoding is the same as returned by String, with one exception:
-// If ip is the zero Addr, the encoding is the empty string.
+// MarshalText implements the [encoding.TextMarshaler] interface,
+// The encoding is the same as returned by [Addr.String], with one exception:
+// If ip is the zero [Addr], the encoding is the empty string.
 func (ip Addr) MarshalText() ([]byte, error) {
 	switch ip.z {
 	case z0:
@@ -960,9 +956,9 @@
 }
 
 // UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The IP address is expected in a form accepted by ParseAddr.
+// The IP address is expected in a form accepted by [ParseAddr].
 //
-// If text is empty, UnmarshalText sets *ip to the zero Addr and
+// If text is empty, UnmarshalText sets *ip to the zero [Addr] and
 // returns no error.
 func (ip *Addr) UnmarshalText(text []byte) error {
 	if len(text) == 0 {
@@ -992,15 +988,15 @@
 	return b
 }
 
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-// It returns a zero-length slice for the zero Addr,
+// MarshalBinary implements the [encoding.BinaryMarshaler] interface.
+// It returns a zero-length slice for the zero [Addr],
 // the 4-byte form for an IPv4 address,
 // and the 16-byte form with zone appended for an IPv6 address.
 func (ip Addr) MarshalBinary() ([]byte, error) {
 	return ip.marshalBinaryWithTrailingBytes(0), nil
 }
 
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface.
 // It expects data in the form generated by MarshalBinary.
 func (ip *Addr) UnmarshalBinary(b []byte) error {
 	n := len(b)
@@ -1027,7 +1023,7 @@
 	port uint16
 }
 
-// AddrPortFrom returns an AddrPort with the provided IP and port.
+// AddrPortFrom returns an [AddrPort] with the provided IP and port.
 // It does not allocate.
 func AddrPortFrom(ip Addr, port uint16) AddrPort { return AddrPort{ip: ip, port: port} }
 
@@ -1043,7 +1039,7 @@
 // ip string should parse as an IPv6 address or an IPv4 address, in
 // order for s to be a valid ip:port string.
 func splitAddrPort(s string) (ip, port string, v6 bool, err error) {
-	i := stringsLastIndexByte(s, ':')
+	i := bytealg.LastIndexByteString(s, ':')
 	if i == -1 {
 		return "", "", false, errors.New("not an ip:port")
 	}
@@ -1066,7 +1062,7 @@
 	return ip, port, v6, nil
 }
 
-// ParseAddrPort parses s as an AddrPort.
+// ParseAddrPort parses s as an [AddrPort].
 //
 // It doesn't do any name resolution: both the address and the port
 // must be numeric.
@@ -1093,7 +1089,7 @@
 	return ipp, nil
 }
 
-// MustParseAddrPort calls ParseAddrPort(s) and panics on error.
+// MustParseAddrPort calls [ParseAddrPort](s) and panics on error.
 // It is intended for use in tests with hard-coded strings.
 func MustParseAddrPort(s string) AddrPort {
 	ip, err := ParseAddrPort(s)
@@ -1107,36 +1103,35 @@
 // All ports are valid, including zero.
 func (p AddrPort) IsValid() bool { return p.ip.IsValid() }
 
+// Compare returns an integer comparing two AddrPorts.
+// The result will be 0 if p == p2, -1 if p < p2, and +1 if p > p2.
+// AddrPorts sort first by IP address, then port.
+func (p AddrPort) Compare(p2 AddrPort) int {
+	if c := p.Addr().Compare(p2.Addr()); c != 0 {
+		return c
+	}
+	return cmp.Compare(p.Port(), p2.Port())
+}
+
 func (p AddrPort) String() string {
 	switch p.ip.z {
 	case z0:
 		return "invalid AddrPort"
 	case z4:
-		a := p.ip.As4()
-		buf := make([]byte, 0, 21)
-		for i := range a {
-			buf = strconv.AppendUint(buf, uint64(a[i]), 10)
-			buf = append(buf, "...:"[i])
-		}
+		const max = len("255.255.255.255:65535")
+		buf := make([]byte, 0, max)
+		buf = p.ip.appendTo4(buf)
+		buf = append(buf, ':')
 		buf = strconv.AppendUint(buf, uint64(p.port), 10)
 		return string(buf)
 	default:
 		// TODO: this could be more efficient allocation-wise:
-		return joinHostPort(p.ip.String(), itoa.Itoa(int(p.port)))
+		return "[" + p.ip.String() + "]:" + itoa.Uitoa(uint(p.port))
 	}
 }
 
-func joinHostPort(host, port string) string {
-	// We assume that host is a literal IPv6 address if host has
-	// colons.
-	if bytealg.IndexByteString(host, ':') >= 0 {
-		return "[" + host + "]:" + port
-	}
-	return host + ":" + port
-}
-
 // AppendTo appends a text encoding of p,
-// as generated by MarshalText,
+// as generated by [AddrPort.MarshalText],
 // to b and returns the extended buffer.
 func (p AddrPort) AppendTo(b []byte) []byte {
 	switch p.ip.z {
@@ -1163,9 +1158,9 @@
 	return b
 }
 
-// MarshalText implements the encoding.TextMarshaler interface. The
-// encoding is the same as returned by String, with one exception: if
-// p.Addr() is the zero Addr, the encoding is the empty string.
+// MarshalText implements the [encoding.TextMarshaler] interface. The
+// encoding is the same as returned by [AddrPort.String], with one exception: if
+// p.Addr() is the zero [Addr], the encoding is the empty string.
 func (p AddrPort) MarshalText() ([]byte, error) {
 	var max int
 	switch p.ip.z {
@@ -1181,8 +1176,8 @@
 }
 
 // UnmarshalText implements the encoding.TextUnmarshaler
-// interface. The AddrPort is expected in a form
-// generated by MarshalText or accepted by ParseAddrPort.
+// interface. The [AddrPort] is expected in a form
+// generated by [AddrPort.MarshalText] or accepted by [ParseAddrPort].
 func (p *AddrPort) UnmarshalText(text []byte) error {
 	if len(text) == 0 {
 		*p = AddrPort{}
@@ -1193,8 +1188,8 @@
 	return err
 }
 
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-// It returns Addr.MarshalBinary with an additional two bytes appended
+// MarshalBinary implements the [encoding.BinaryMarshaler] interface.
+// It returns [Addr.MarshalBinary] with an additional two bytes appended
 // containing the port in little-endian.
 func (p AddrPort) MarshalBinary() ([]byte, error) {
 	b := p.Addr().marshalBinaryWithTrailingBytes(2)
@@ -1202,8 +1197,8 @@
 	return b, nil
 }
 
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It expects data in the form generated by MarshalBinary.
+// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface.
+// It expects data in the form generated by [AddrPort.MarshalBinary].
 func (p *AddrPort) UnmarshalBinary(b []byte) error {
 	if len(b) < 2 {
 		return errors.New("unexpected slice size")
@@ -1219,7 +1214,7 @@
 
 // Prefix is an IP address prefix (CIDR) representing an IP network.
 //
-// The first Bits() of Addr() are specified. The remaining bits match any address.
+// The first [Prefix.Bits]() of [Addr]() are specified. The remaining bits match any address.
 // The range of Bits() is [0,32] for IPv4 or [0,128] for IPv6.
 type Prefix struct {
 	ip Addr
@@ -1229,13 +1224,13 @@
 	bitsPlusOne uint8
 }
 
-// PrefixFrom returns a Prefix with the provided IP address and bit
+// PrefixFrom returns a [Prefix] with the provided IP address and bit
 // prefix length.
 //
-// It does not allocate. Unlike Addr.Prefix, PrefixFrom does not mask
+// It does not allocate. Unlike [Addr.Prefix], [PrefixFrom] does not mask
 // off the host bits of ip.
 //
-// If bits is less than zero or greater than ip.BitLen, Prefix.Bits
+// If bits is less than zero or greater than ip.BitLen, [Prefix.Bits]
 // will return an invalid value -1.
 func PrefixFrom(ip Addr, bits int) Prefix {
 	var bitsPlusOne uint8
@@ -1257,8 +1252,8 @@
 func (p Prefix) Bits() int { return int(p.bitsPlusOne) - 1 }
 
 // IsValid reports whether p.Bits() has a valid range for p.Addr().
-// If p.Addr() is the zero Addr, IsValid returns false.
-// Note that if p is the zero Prefix, then p.IsValid() == false.
+// If p.Addr() is the zero [Addr], IsValid returns false.
+// Note that if p is the zero [Prefix], then p.IsValid() == false.
 func (p Prefix) IsValid() bool { return p.bitsPlusOne > 0 }
 
 func (p Prefix) isZero() bool { return p == Prefix{} }
@@ -1266,6 +1261,24 @@
 // IsSingleIP reports whether p contains exactly one IP.
 func (p Prefix) IsSingleIP() bool { return p.IsValid() && p.Bits() == p.ip.BitLen() }
 
+// compare returns an integer comparing two prefixes.
+// The result will be 0 if p == p2, -1 if p < p2, and +1 if p > p2.
+// Prefixes sort first by validity (invalid before valid), then
+// address family (IPv4 before IPv6), then prefix length, then
+// address.
+//
+// Unexported for Go 1.22 because we may want to compare by p.Addr first.
+// See post-acceptance discussion on go.dev/issue/61642.
+func (p Prefix) compare(p2 Prefix) int {
+	if c := cmp.Compare(p.Addr().BitLen(), p2.Addr().BitLen()); c != 0 {
+		return c
+	}
+	if c := cmp.Compare(p.Bits(), p2.Bits()); c != 0 {
+		return c
+	}
+	return p.Addr().Compare(p2.Addr())
+}
+
 // ParsePrefix parses s as an IP address prefix.
 // The string can be in the form "192.168.1.0/24" or "2001:db8::/32",
 // the CIDR notation defined in RFC 4632 and RFC 4291.
@@ -1274,7 +1287,7 @@
 //
 // Note that masked address bits are not zeroed. Use Masked for that.
 func ParsePrefix(s string) (Prefix, error) {
-	i := stringsLastIndexByte(s, '/')
+	i := bytealg.LastIndexByteString(s, '/')
 	if i < 0 {
 		return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): no '/'")
 	}
@@ -1288,6 +1301,12 @@
 	}
 
 	bitsStr := s[i+1:]
+
+	// strconv.Atoi accepts a leading sign and leading zeroes, but we don't want that.
+	if len(bitsStr) > 1 && (bitsStr[0] < '1' || bitsStr[0] > '9') {
+		return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): bad bits after slash: " + strconv.Quote(bitsStr))
+	}
+
 	bits, err := strconv.Atoi(bitsStr)
 	if err != nil {
 		return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): bad bits after slash: " + strconv.Quote(bitsStr))
@@ -1302,7 +1321,7 @@
 	return PrefixFrom(ip, bits), nil
 }
 
-// MustParsePrefix calls ParsePrefix(s) and panics on error.
+// MustParsePrefix calls [ParsePrefix](s) and panics on error.
 // It is intended for use in tests with hard-coded strings.
 func MustParsePrefix(s string) Prefix {
 	ip, err := ParsePrefix(s)
@@ -1315,7 +1334,7 @@
 // Masked returns p in its canonical form, with all but the high
 // p.Bits() bits of p.Addr() masked off.
 //
-// If p is zero or otherwise invalid, Masked returns the zero Prefix.
+// If p is zero or otherwise invalid, Masked returns the zero [Prefix].
 func (p Prefix) Masked() Prefix {
 	m, _ := p.ip.Prefix(p.Bits())
 	return m
@@ -1392,7 +1411,7 @@
 }
 
 // AppendTo appends a text encoding of p,
-// as generated by MarshalText,
+// as generated by [Prefix.MarshalText],
 // to b and returns the extended buffer.
 func (p Prefix) AppendTo(b []byte) []byte {
 	if p.isZero() {
@@ -1419,8 +1438,8 @@
 	return b
 }
 
-// MarshalText implements the encoding.TextMarshaler interface,
-// The encoding is the same as returned by String, with one exception:
+// MarshalText implements the [encoding.TextMarshaler] interface,
+// The encoding is the same as returned by [Prefix.String], with one exception:
 // If p is the zero value, the encoding is the empty string.
 func (p Prefix) MarshalText() ([]byte, error) {
 	var max int
@@ -1437,8 +1456,8 @@
 }
 
 // UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The IP address is expected in a form accepted by ParsePrefix
-// or generated by MarshalText.
+// The IP address is expected in a form accepted by [ParsePrefix]
+// or generated by [Prefix.MarshalText].
 func (p *Prefix) UnmarshalText(text []byte) error {
 	if len(text) == 0 {
 		*p = Prefix{}
@@ -1449,8 +1468,8 @@
 	return err
 }
 
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-// It returns Addr.MarshalBinary with an additional byte appended
+// MarshalBinary implements the [encoding.BinaryMarshaler] interface.
+// It returns [Addr.MarshalBinary] with an additional byte appended
 // containing the prefix bits.
 func (p Prefix) MarshalBinary() ([]byte, error) {
 	b := p.Addr().withoutZone().marshalBinaryWithTrailingBytes(1)
@@ -1458,8 +1477,8 @@
 	return b, nil
 }
 
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It expects data in the form generated by MarshalBinary.
+// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface.
+// It expects data in the form generated by [Prefix.MarshalBinary].
 func (p *Prefix) UnmarshalBinary(b []byte) error {
 	if len(b) < 1 {
 		return errors.New("unexpected slice size")
diff --git a/src/net/netip/netip_test.go b/src/net/netip/netip_test.go
index 0f80bb0..a748ac3 100644
--- a/src/net/netip/netip_test.go
+++ b/src/net/netip/netip_test.go
@@ -14,6 +14,7 @@
 	"net"
 	. "net/netip"
 	"reflect"
+	"slices"
 	"sort"
 	"strings"
 	"testing"
@@ -389,6 +390,7 @@
 		want string
 	}{
 		{mustIPPort("1.2.3.4:80"), "1.2.3.4:80"},
+		{mustIPPort("[::]:80"), "[::]:80"},
 		{mustIPPort("[1::CAFE]:80"), "[1::cafe]:80"},
 		{mustIPPort("[1::CAFE%en0]:80"), "[1::cafe%en0]:80"},
 		{mustIPPort("[::FFFF:192.168.140.255]:80"), "[::ffff:192.168.140.255]:80"},
@@ -812,7 +814,7 @@
 	}
 }
 
-func TestLessCompare(t *testing.T) {
+func TestAddrLessCompare(t *testing.T) {
 	tests := []struct {
 		a, b Addr
 		want bool
@@ -882,6 +884,109 @@
 	}
 }
 
+func TestAddrPortCompare(t *testing.T) {
+	tests := []struct {
+		a, b AddrPort
+		want int
+	}{
+		{AddrPort{}, AddrPort{}, 0},
+		{AddrPort{}, mustIPPort("1.2.3.4:80"), -1},
+
+		{mustIPPort("1.2.3.4:80"), mustIPPort("1.2.3.4:80"), 0},
+		{mustIPPort("[::1]:80"), mustIPPort("[::1]:80"), 0},
+
+		{mustIPPort("1.2.3.4:80"), mustIPPort("2.3.4.5:22"), -1},
+		{mustIPPort("[::1]:80"), mustIPPort("[::2]:22"), -1},
+
+		{mustIPPort("1.2.3.4:80"), mustIPPort("1.2.3.4:443"), -1},
+		{mustIPPort("[::1]:80"), mustIPPort("[::1]:443"), -1},
+
+		{mustIPPort("1.2.3.4:80"), mustIPPort("[0102:0304::0]:80"), -1},
+	}
+	for _, tt := range tests {
+		got := tt.a.Compare(tt.b)
+		if got != tt.want {
+			t.Errorf("Compare(%q, %q) = %v; want %v", tt.a, tt.b, got, tt.want)
+		}
+
+		// Also check inverse.
+		if got == tt.want {
+			got2 := tt.b.Compare(tt.a)
+			if want2 := -1 * tt.want; got2 != want2 {
+				t.Errorf("Compare(%q, %q) was correctly %v, but Compare(%q, %q) was %v", tt.a, tt.b, got, tt.b, tt.a, got2)
+			}
+		}
+	}
+
+	// And just sort.
+	values := []AddrPort{
+		mustIPPort("[::1]:80"),
+		mustIPPort("[::2]:80"),
+		AddrPort{},
+		mustIPPort("1.2.3.4:443"),
+		mustIPPort("8.8.8.8:8080"),
+		mustIPPort("[::1%foo]:1024"),
+	}
+	slices.SortFunc(values, func(a, b AddrPort) int { return a.Compare(b) })
+	got := fmt.Sprintf("%s", values)
+	want := `[invalid AddrPort 1.2.3.4:443 8.8.8.8:8080 [::1]:80 [::1%foo]:1024 [::2]:80]`
+	if got != want {
+		t.Errorf("unexpected sort\n got: %s\nwant: %s\n", got, want)
+	}
+}
+
+func TestPrefixCompare(t *testing.T) {
+	tests := []struct {
+		a, b Prefix
+		want int
+	}{
+		{Prefix{}, Prefix{}, 0},
+		{Prefix{}, mustPrefix("1.2.3.0/24"), -1},
+
+		{mustPrefix("1.2.3.0/24"), mustPrefix("1.2.3.0/24"), 0},
+		{mustPrefix("fe80::/64"), mustPrefix("fe80::/64"), 0},
+
+		{mustPrefix("1.2.3.0/24"), mustPrefix("1.2.4.0/24"), -1},
+		{mustPrefix("fe80::/64"), mustPrefix("fe90::/64"), -1},
+
+		{mustPrefix("1.2.0.0/16"), mustPrefix("1.2.0.0/24"), -1},
+		{mustPrefix("fe80::/48"), mustPrefix("fe80::/64"), -1},
+
+		{mustPrefix("1.2.3.0/24"), mustPrefix("fe80::/8"), -1},
+	}
+	for _, tt := range tests {
+		got := tt.a.Compare(tt.b)
+		if got != tt.want {
+			t.Errorf("Compare(%q, %q) = %v; want %v", tt.a, tt.b, got, tt.want)
+		}
+
+		// Also check inverse.
+		if got == tt.want {
+			got2 := tt.b.Compare(tt.a)
+			if want2 := -1 * tt.want; got2 != want2 {
+				t.Errorf("Compare(%q, %q) was correctly %v, but Compare(%q, %q) was %v", tt.a, tt.b, got, tt.b, tt.a, got2)
+			}
+		}
+	}
+
+	// And just sort.
+	values := []Prefix{
+		mustPrefix("1.2.3.0/24"),
+		mustPrefix("fe90::/64"),
+		mustPrefix("fe80::/64"),
+		mustPrefix("1.2.0.0/16"),
+		Prefix{},
+		mustPrefix("fe80::/48"),
+		mustPrefix("1.2.0.0/24"),
+	}
+	slices.SortFunc(values, func(a, b Prefix) int { return a.Compare(b) })
+	got := fmt.Sprintf("%s", values)
+	want := `[invalid Prefix 1.2.0.0/16 1.2.0.0/24 1.2.3.0/24 fe80::/48 fe80::/64 fe90::/64]`
+	if got != want {
+		t.Errorf("unexpected sort\n got: %s\nwant: %s\n", got, want)
+	}
+}
+
 func TestIPStringExpanded(t *testing.T) {
 	tests := []struct {
 		ip Addr
@@ -1352,7 +1457,7 @@
 		},
 		{
 			prefix: "1.1.1.0/-1",
-			errstr: "out of range",
+			errstr: "bad bits",
 		},
 		{
 			prefix: "1.1.1.0/33",
@@ -1371,6 +1476,22 @@
 			prefix: "2001:db8::%a/32",
 			errstr: "zones cannot be present",
 		},
+		{
+			prefix: "1.1.1.0/+32",
+			errstr: "bad bits",
+		},
+		{
+			prefix: "1.1.1.0/-32",
+			errstr: "bad bits",
+		},
+		{
+			prefix: "1.1.1.0/032",
+			errstr: "bad bits",
+		},
+		{
+			prefix: "1.1.1.0/0032",
+			errstr: "bad bits",
+		},
 	}
 	for _, test := range tests {
 		t.Run(test.prefix, func(t *testing.T) {
diff --git a/src/net/packetconn_test.go b/src/net/packetconn_test.go
index dc0c14b..e39e7de 100644
--- a/src/net/packetconn_test.go
+++ b/src/net/packetconn_test.go
@@ -2,10 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// This file implements API tests across platforms and will never have a build
-// tag.
-
-//go:build !js && !wasip1
+// This file implements API tests across platforms and should never have a build
+// constraint.
 
 package net
 
diff --git a/src/net/parse.go b/src/net/parse.go
index fbc5014..29dffad 100644
--- a/src/net/parse.go
+++ b/src/net/parse.go
@@ -180,42 +180,6 @@
 	return byte(n), ok && ei == 2
 }
 
-// Convert i to a hexadecimal string. Leading zeros are not printed.
-func appendHex(dst []byte, i uint32) []byte {
-	if i == 0 {
-		return append(dst, '0')
-	}
-	for j := 7; j >= 0; j-- {
-		v := i >> uint(j*4)
-		if v > 0 {
-			dst = append(dst, hexDigit[v&0xf])
-		}
-	}
-	return dst
-}
-
-// Number of occurrences of b in s.
-func count(s string, b byte) int {
-	n := 0
-	for i := 0; i < len(s); i++ {
-		if s[i] == b {
-			n++
-		}
-	}
-	return n
-}
-
-// Index of rightmost occurrence of b in s.
-func last(s string, b byte) int {
-	i := len(s)
-	for i--; i >= 0; i-- {
-		if s[i] == b {
-			break
-		}
-	}
-	return i
-}
-
 // hasUpperCase tells whether the given string contains at least one upper-case.
 func hasUpperCase(s string) bool {
 	for i := range s {
diff --git a/src/net/pipe.go b/src/net/pipe.go
index f174193..69955e4 100644
--- a/src/net/pipe.go
+++ b/src/net/pipe.go
@@ -106,7 +106,7 @@
 }
 
 // Pipe creates a synchronous, in-memory, full duplex
-// network connection; both ends implement the Conn interface.
+// network connection; both ends implement the [Conn] interface.
 // Reads on one end are matched with writes on the other,
 // copying data directly between the two; there is no internal
 // buffering.
diff --git a/src/net/platform_test.go b/src/net/platform_test.go
index 71e9082..709d4a3 100644
--- a/src/net/platform_test.go
+++ b/src/net/platform_test.go
@@ -165,7 +165,7 @@
 	// A few APIs like File and Read/WriteMsg{UDP,IP} are not
 	// fully implemented yet on Plan 9 and Windows.
 	switch runtime.GOOS {
-	case "windows":
+	case "windows", "js", "wasip1":
 		if network == "file+net" {
 			t.Logf(format, args...)
 			return
diff --git a/src/net/port_unix.go b/src/net/port_unix.go
index 0b2ea3e..df73dba 100644
--- a/src/net/port_unix.go
+++ b/src/net/port_unix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1
+//go:build unix || js || wasip1
 
 // Read system port mappings from /etc/services
 
diff --git a/src/net/protoconn_test.go b/src/net/protoconn_test.go
index c566807..a617470 100644
--- a/src/net/protoconn_test.go
+++ b/src/net/protoconn_test.go
@@ -5,8 +5,6 @@
 // This file implements API tests across platforms and will never have a build
 // tag.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -39,7 +37,7 @@
 	}
 	defer ln.Close()
 	ln.Addr()
-	ln.SetDeadline(time.Now().Add(30 * time.Nanosecond))
+	mustSetDeadline(t, ln.SetDeadline, 30*time.Nanosecond)
 
 	if c, err := ln.Accept(); err != nil {
 		if !err.(Error).Timeout() {
@@ -162,6 +160,10 @@
 }
 
 func TestIPConnSpecificMethods(t *testing.T) {
+	if !testableNetwork("ip4") {
+		t.Skip("skipping: ip4 not supported")
+	}
+
 	la, err := ResolveIPAddr("ip4", "127.0.0.1")
 	if err != nil {
 		t.Fatal(err)
@@ -217,7 +219,7 @@
 	defer ln.Close()
 	defer os.Remove(addr)
 	ln.Addr()
-	ln.SetDeadline(time.Now().Add(30 * time.Nanosecond))
+	mustSetDeadline(t, ln.SetDeadline, 30*time.Nanosecond)
 
 	if c, err := ln.Accept(); err != nil {
 		if !err.(Error).Timeout() {
@@ -235,7 +237,7 @@
 	}
 
 	if f, err := ln.File(); err != nil {
-		t.Fatal(err)
+		condFatalf(t, "file+net", "%v", err)
 	} else {
 		f.Close()
 	}
@@ -332,7 +334,7 @@
 	}
 
 	if f, err := c1.File(); err != nil {
-		t.Fatal(err)
+		condFatalf(t, "file+net", "%v", err)
 	} else {
 		f.Close()
 	}
diff --git a/src/net/rawconn.go b/src/net/rawconn.go
index 974320c..19228e9 100644
--- a/src/net/rawconn.go
+++ b/src/net/rawconn.go
@@ -63,7 +63,7 @@
 
 // PollFD returns the poll.FD of the underlying connection.
 //
-// Other packages in std that also import internal/poll (such as os)
+// Other packages in std that also import [internal/poll] (such as os)
 // can use a type assertion to access this extension method so that
 // they can pass the *poll.FD to functions like poll.Splice.
 //
@@ -75,8 +75,19 @@
 	return &c.fd.pfd
 }
 
-func newRawConn(fd *netFD) (*rawConn, error) {
-	return &rawConn{fd: fd}, nil
+func newRawConn(fd *netFD) *rawConn {
+	return &rawConn{fd: fd}
+}
+
+// Network returns the network type of the underlying connection.
+//
+// Other packages in std that import internal/poll and are unable to
+// import net (such as os) can use a type assertion to access this
+// extension method so that they can distinguish different socket types.
+//
+// Network is not intended for use outside the standard library.
+func (c *rawConn) Network() poll.String {
+	return poll.String(c.fd.net)
 }
 
 type rawListener struct {
@@ -91,6 +102,6 @@
 	return syscall.EINVAL
 }
 
-func newRawListener(fd *netFD) (*rawListener, error) {
-	return &rawListener{rawConn{fd: fd}}, nil
+func newRawListener(fd *netFD) *rawListener {
+	return &rawListener{rawConn{fd: fd}}
 }
diff --git a/src/net/rawconn_stub_test.go b/src/net/rawconn_stub_test.go
index c8ad80c..6d54f2d 100644
--- a/src/net/rawconn_stub_test.go
+++ b/src/net/rawconn_stub_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (js && wasm) || plan9 || wasip1
+//go:build js || plan9 || wasip1
 
 package net
 
diff --git a/src/net/rawconn_test.go b/src/net/rawconn_test.go
index 06d5856..70b16c4 100644
--- a/src/net/rawconn_test.go
+++ b/src/net/rawconn_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -15,7 +13,7 @@
 
 func TestRawConnReadWrite(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9":
+	case "plan9", "js", "wasip1":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 
@@ -169,7 +167,7 @@
 
 func TestRawConnControl(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9":
+	case "plan9", "js", "wasip1":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 
diff --git a/src/net/resolverdialfunc_test.go b/src/net/resolverdialfunc_test.go
index 1de0402..1af4199 100644
--- a/src/net/resolverdialfunc_test.go
+++ b/src/net/resolverdialfunc_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 // Test that Resolver.Dial can be a func returning an in-memory net.Conn
 // speaking DNS.
 
diff --git a/src/net/rlimit_js.go b/src/net/rlimit_js.go
new file mode 100644
index 0000000..9ee5748
--- /dev/null
+++ b/src/net/rlimit_js.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js
+
+package net
+
+// concurrentThreadsLimit returns the number of threads we permit to
+// run concurrently doing DNS lookups.
+func concurrentThreadsLimit() int {
+	return 500
+}
diff --git a/src/net/rlimit_unix.go b/src/net/rlimit_unix.go
new file mode 100644
index 0000000..0094756
--- /dev/null
+++ b/src/net/rlimit_unix.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || wasip1
+
+package net
+
+import "syscall"
+
+// concurrentThreadsLimit returns the number of threads we permit to
+// run concurrently doing DNS lookups via cgo. A DNS lookup may use a
+// file descriptor so we limit this to less than the number of
+// permitted open files. On some systems, notably Darwin, if
+// getaddrinfo is unable to open a file descriptor it simply returns
+// EAI_NONAME rather than a useful error. Limiting the number of
+// concurrent getaddrinfo calls to less than the permitted number of
+// file descriptors makes that error less likely. We don't bother to
+// apply the same limit to DNS lookups run directly from Go, because
+// there we will return a meaningful "too many open files" error.
+func concurrentThreadsLimit() int {
+	var rlim syscall.Rlimit
+	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil {
+		return 500
+	}
+	r := rlim.Cur
+	if r > 500 {
+		r = 500
+	} else if r > 30 {
+		r -= 30
+	}
+	return int(r)
+}
diff --git a/src/net/rpc/client.go b/src/net/rpc/client.go
index 42d1351..ffdc435 100644
--- a/src/net/rpc/client.go
+++ b/src/net/rpc/client.go
@@ -53,13 +53,13 @@
 
 // A ClientCodec implements writing of RPC requests and
 // reading of RPC responses for the client side of an RPC session.
-// The client calls WriteRequest to write a request to the connection
-// and calls ReadResponseHeader and ReadResponseBody in pairs
-// to read responses. The client calls Close when finished with the
+// The client calls [ClientCodec.WriteRequest] to write a request to the connection
+// and calls [ClientCodec.ReadResponseHeader] and [ClientCodec.ReadResponseBody] in pairs
+// to read responses. The client calls [ClientCodec.Close] when finished with the
 // connection. ReadResponseBody may be called with a nil
 // argument to force the body of the response to be read and then
 // discarded.
-// See NewClient's comment for information about concurrent access.
+// See [NewClient]'s comment for information about concurrent access.
 type ClientCodec interface {
 	WriteRequest(*Request, any) error
 	ReadResponseHeader(*Response) error
@@ -181,7 +181,7 @@
 	}
 }
 
-// NewClient returns a new Client to handle requests to the
+// NewClient returns a new [Client] to handle requests to the
 // set of services at the other end of the connection.
 // It adds a buffer to the write side of the connection so
 // the header and payload are sent as a unit.
@@ -196,7 +196,7 @@
 	return NewClientWithCodec(client)
 }
 
-// NewClientWithCodec is like NewClient but uses the specified
+// NewClientWithCodec is like [NewClient] but uses the specified
 // codec to encode requests and decode responses.
 func NewClientWithCodec(codec ClientCodec) *Client {
 	client := &Client{
@@ -279,7 +279,7 @@
 }
 
 // Close calls the underlying codec's Close method. If the connection is already
-// shutting down, ErrShutdown is returned.
+// shutting down, [ErrShutdown] is returned.
 func (client *Client) Close() error {
 	client.mutex.Lock()
 	if client.closing {
@@ -291,7 +291,7 @@
 	return client.codec.Close()
 }
 
-// Go invokes the function asynchronously. It returns the Call structure representing
+// Go invokes the function asynchronously. It returns the [Call] structure representing
 // the invocation. The done channel will signal when the call is complete by returning
 // the same Call object. If done is nil, Go will allocate a new channel.
 // If non-nil, done must be buffered or Go will deliberately crash.
diff --git a/src/net/rpc/jsonrpc/client.go b/src/net/rpc/jsonrpc/client.go
index c473017..1beba0f 100644
--- a/src/net/rpc/jsonrpc/client.go
+++ b/src/net/rpc/jsonrpc/client.go
@@ -33,7 +33,7 @@
 	pending map[uint64]string // map request id to method name
 }
 
-// NewClientCodec returns a new rpc.ClientCodec using JSON-RPC on conn.
+// NewClientCodec returns a new [rpc.ClientCodec] using JSON-RPC on conn.
 func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec {
 	return &clientCodec{
 		dec:     json.NewDecoder(conn),
@@ -108,7 +108,7 @@
 	return c.c.Close()
 }
 
-// NewClient returns a new rpc.Client to handle requests to the
+// NewClient returns a new [rpc.Client] to handle requests to the
 // set of services at the other end of the connection.
 func NewClient(conn io.ReadWriteCloser) *rpc.Client {
 	return rpc.NewClientWithCodec(NewClientCodec(conn))
diff --git a/src/net/rpc/jsonrpc/server.go b/src/net/rpc/jsonrpc/server.go
index 3ee4ddf..57a4de1 100644
--- a/src/net/rpc/jsonrpc/server.go
+++ b/src/net/rpc/jsonrpc/server.go
@@ -33,7 +33,7 @@
 	pending map[uint64]*json.RawMessage
 }
 
-// NewServerCodec returns a new rpc.ServerCodec using JSON-RPC on conn.
+// NewServerCodec returns a new [rpc.ServerCodec] using JSON-RPC on conn.
 func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec {
 	return &serverCodec{
 		dec:     json.NewDecoder(conn),
diff --git a/src/net/rpc/server.go b/src/net/rpc/server.go
index 5cea2cc..1771726 100644
--- a/src/net/rpc/server.go
+++ b/src/net/rpc/server.go
@@ -30,17 +30,17 @@
 The method's first argument represents the arguments provided by the caller; the
 second argument represents the result parameters to be returned to the caller.
 The method's return value, if non-nil, is passed back as a string that the client
-sees as if created by errors.New.  If an error is returned, the reply parameter
+sees as if created by [errors.New].  If an error is returned, the reply parameter
 will not be sent back to the client.
 
-The server may handle requests on a single connection by calling ServeConn.  More
-typically it will create a network listener and call Accept or, for an HTTP
-listener, HandleHTTP and http.Serve.
+The server may handle requests on a single connection by calling [ServeConn].  More
+typically it will create a network listener and call [Accept] or, for an HTTP
+listener, [HandleHTTP] and [http.Serve].
 
 A client wishing to use the service establishes a connection and then invokes
-NewClient on the connection.  The convenience function Dial (DialHTTP) performs
+[NewClient] on the connection.  The convenience function [Dial] ([DialHTTP]) performs
 both steps for a raw network connection (an HTTP connection).  The resulting
-Client object has two methods, Call and Go, that specify the service and method to
+[Client] object has two methods, [Call] and Go, that specify the service and method to
 call, a pointer containing the arguments, and a pointer to receive the result
 parameters.
 
@@ -48,7 +48,7 @@
 launches the call asynchronously and signals completion using the Call
 structure's Done channel.
 
-Unless an explicit codec is set up, package encoding/gob is used to
+Unless an explicit codec is set up, package [encoding/gob] is used to
 transport the data.
 
 Here is a simple example.  A server wishes to export an object of type Arith:
@@ -146,9 +146,8 @@
 	DefaultDebugPath = "/debug/rpc"
 )
 
-// Precompute the reflect type for error. Can't use error directly
-// because Typeof takes an empty interface value. This is annoying.
-var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
+// Precompute the reflect type for error.
+var typeOfError = reflect.TypeFor[error]()
 
 type methodType struct {
 	sync.Mutex // protects counters
@@ -193,12 +192,12 @@
 	freeResp   *Response
 }
 
-// NewServer returns a new Server.
+// NewServer returns a new [Server].
 func NewServer() *Server {
 	return &Server{}
 }
 
-// DefaultServer is the default instance of *Server.
+// DefaultServer is the default instance of [*Server].
 var DefaultServer = NewServer()
 
 // Is this type exported or a builtin?
@@ -226,7 +225,7 @@
 	return server.register(rcvr, "", false)
 }
 
-// RegisterName is like Register but uses the provided name for the type
+// RegisterName is like [Register] but uses the provided name for the type
 // instead of the receiver's concrete type.
 func (server *Server) RegisterName(name string, rcvr any) error {
 	return server.register(rcvr, name, true)
@@ -441,8 +440,8 @@
 // ServeConn blocks, serving the connection until the client hangs up.
 // The caller typically invokes ServeConn in a go statement.
 // ServeConn uses the gob wire format (see package gob) on the
-// connection. To use an alternate codec, use ServeCodec.
-// See NewClient's comment for information about concurrent access.
+// connection. To use an alternate codec, use [ServeCodec].
+// See [NewClient]'s comment for information about concurrent access.
 func (server *Server) ServeConn(conn io.ReadWriteCloser) {
 	buf := bufio.NewWriter(conn)
 	srv := &gobServerCodec{
@@ -454,7 +453,7 @@
 	server.ServeCodec(srv)
 }
 
-// ServeCodec is like ServeConn but uses the specified codec to
+// ServeCodec is like [ServeConn] but uses the specified codec to
 // decode requests and encode responses.
 func (server *Server) ServeCodec(codec ServerCodec) {
 	sending := new(sync.Mutex)
@@ -484,7 +483,7 @@
 	codec.Close()
 }
 
-// ServeRequest is like ServeCodec but synchronously serves a single request.
+// ServeRequest is like [ServeCodec] but synchronously serves a single request.
 // It does not close the codec upon completion.
 func (server *Server) ServeRequest(codec ServerCodec) error {
 	sending := new(sync.Mutex)
@@ -636,10 +635,10 @@
 	}
 }
 
-// Register publishes the receiver's methods in the DefaultServer.
+// Register publishes the receiver's methods in the [DefaultServer].
 func Register(rcvr any) error { return DefaultServer.Register(rcvr) }
 
-// RegisterName is like Register but uses the provided name for the type
+// RegisterName is like [Register] but uses the provided name for the type
 // instead of the receiver's concrete type.
 func RegisterName(name string, rcvr any) error {
 	return DefaultServer.RegisterName(name, rcvr)
@@ -647,12 +646,12 @@
 
 // A ServerCodec implements reading of RPC requests and writing of
 // RPC responses for the server side of an RPC session.
-// The server calls ReadRequestHeader and ReadRequestBody in pairs
-// to read requests from the connection, and it calls WriteResponse to
-// write a response back. The server calls Close when finished with the
+// The server calls [ServerCodec.ReadRequestHeader] and [ServerCodec.ReadRequestBody] in pairs
+// to read requests from the connection, and it calls [ServerCodec.WriteResponse] to
+// write a response back. The server calls [ServerCodec.Close] when finished with the
 // connection. ReadRequestBody may be called with a nil
 // argument to force the body of the request to be read and discarded.
-// See NewClient's comment for information about concurrent access.
+// See [NewClient]'s comment for information about concurrent access.
 type ServerCodec interface {
 	ReadRequestHeader(*Request) error
 	ReadRequestBody(any) error
@@ -662,37 +661,37 @@
 	Close() error
 }
 
-// ServeConn runs the DefaultServer on a single connection.
+// ServeConn runs the [DefaultServer] on a single connection.
 // ServeConn blocks, serving the connection until the client hangs up.
 // The caller typically invokes ServeConn in a go statement.
 // ServeConn uses the gob wire format (see package gob) on the
-// connection. To use an alternate codec, use ServeCodec.
-// See NewClient's comment for information about concurrent access.
+// connection. To use an alternate codec, use [ServeCodec].
+// See [NewClient]'s comment for information about concurrent access.
 func ServeConn(conn io.ReadWriteCloser) {
 	DefaultServer.ServeConn(conn)
 }
 
-// ServeCodec is like ServeConn but uses the specified codec to
+// ServeCodec is like [ServeConn] but uses the specified codec to
 // decode requests and encode responses.
 func ServeCodec(codec ServerCodec) {
 	DefaultServer.ServeCodec(codec)
 }
 
-// ServeRequest is like ServeCodec but synchronously serves a single request.
+// ServeRequest is like [ServeCodec] but synchronously serves a single request.
 // It does not close the codec upon completion.
 func ServeRequest(codec ServerCodec) error {
 	return DefaultServer.ServeRequest(codec)
 }
 
 // Accept accepts connections on the listener and serves requests
-// to DefaultServer for each incoming connection.
+// to [DefaultServer] for each incoming connection.
 // Accept blocks; the caller typically invokes it in a go statement.
 func Accept(lis net.Listener) { DefaultServer.Accept(lis) }
 
 // Can connect to RPC service using HTTP CONNECT to rpcPath.
 var connected = "200 Connected to Go RPC"
 
-// ServeHTTP implements an http.Handler that answers RPC requests.
+// ServeHTTP implements an [http.Handler] that answers RPC requests.
 func (server *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
 	if req.Method != "CONNECT" {
 		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -711,15 +710,15 @@
 
 // HandleHTTP registers an HTTP handler for RPC messages on rpcPath,
 // and a debugging handler on debugPath.
-// It is still necessary to invoke http.Serve(), typically in a go statement.
+// It is still necessary to invoke [http.Serve](), typically in a go statement.
 func (server *Server) HandleHTTP(rpcPath, debugPath string) {
 	http.Handle(rpcPath, server)
 	http.Handle(debugPath, debugHTTP{server})
 }
 
-// HandleHTTP registers an HTTP handler for RPC messages to DefaultServer
-// on DefaultRPCPath and a debugging handler on DefaultDebugPath.
-// It is still necessary to invoke http.Serve(), typically in a go statement.
+// HandleHTTP registers an HTTP handler for RPC messages to [DefaultServer]
+// on [DefaultRPCPath] and a debugging handler on [DefaultDebugPath].
+// It is still necessary to invoke [http.Serve](), typically in a go statement.
 func HandleHTTP() {
 	DefaultServer.HandleHTTP(DefaultRPCPath, DefaultDebugPath)
 }
diff --git a/src/net/sendfile_linux_test.go b/src/net/sendfile_linux_test.go
index 8cd6acc..7a66d36 100644
--- a/src/net/sendfile_linux_test.go
+++ b/src/net/sendfile_linux_test.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux
-// +build linux
 
 package net
 
@@ -15,29 +14,36 @@
 )
 
 func BenchmarkSendFile(b *testing.B) {
+	b.Run("file-to-tcp", func(b *testing.B) { benchmarkSendFile(b, "tcp") })
+	b.Run("file-to-unix", func(b *testing.B) { benchmarkSendFile(b, "unix") })
+}
+
+func benchmarkSendFile(b *testing.B, proto string) {
 	for i := 0; i <= 10; i++ {
 		size := 1 << (i + 10)
-		bench := sendFileBench{chunkSize: size}
+		bench := sendFileBench{
+			proto:     proto,
+			chunkSize: size,
+		}
 		b.Run(strconv.Itoa(size), bench.benchSendFile)
 	}
 }
 
 type sendFileBench struct {
+	proto     string
 	chunkSize int
 }
 
 func (bench sendFileBench) benchSendFile(b *testing.B) {
 	fileSize := b.N * bench.chunkSize
 	f := createTempFile(b, fileSize)
-	fileName := f.Name()
-	defer os.Remove(fileName)
-	defer f.Close()
 
-	client, server := spliceTestSocketPair(b, "tcp")
+	client, server := spliceTestSocketPair(b, bench.proto)
 	defer server.Close()
 
 	cleanUp, err := startSpliceClient(client, "r", bench.chunkSize, fileSize)
 	if err != nil {
+		client.Close()
 		b.Fatal(err)
 	}
 	defer cleanUp()
@@ -52,15 +58,18 @@
 		b.Fatalf("failed to copy data with sendfile, error: %v", err)
 	}
 	if sent != int64(fileSize) {
-		b.Fatalf("bytes sent mismatch\n\texpect: %d\n\tgot: %d", fileSize, sent)
+		b.Fatalf("bytes sent mismatch, got: %d, want: %d", sent, fileSize)
 	}
 }
 
 func createTempFile(b *testing.B, size int) *os.File {
-	f, err := os.CreateTemp("", "linux-sendfile-test")
+	f, err := os.CreateTemp(b.TempDir(), "linux-sendfile-bench")
 	if err != nil {
 		b.Fatalf("failed to create temporary file: %v", err)
 	}
+	b.Cleanup(func() {
+		f.Close()
+	})
 
 	data := make([]byte, size)
 	if _, err := f.Write(data); err != nil {
diff --git a/src/net/sendfile_stub.go b/src/net/sendfile_stub.go
index c7a2e6a..a4fdd99 100644
--- a/src/net/sendfile_stub.go
+++ b/src/net/sendfile_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || (js && wasm) || netbsd || openbsd || ios || wasip1
+//go:build aix || js || netbsd || openbsd || ios || wasip1
 
 package net
 
diff --git a/src/net/sendfile_test.go b/src/net/sendfile_test.go
index 44a87a1..4cba1ed 100644
--- a/src/net/sendfile_test.go
+++ b/src/net/sendfile_test.go
@@ -2,12 +2,11 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
 	"bytes"
+	"context"
 	"crypto/sha256"
 	"encoding/hex"
 	"errors"
@@ -209,7 +208,7 @@
 // Test that sendfile doesn't put a pipe into blocking mode.
 func TestSendfilePipe(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9", "windows":
+	case "plan9", "windows", "js", "wasip1":
 		// These systems don't support deadlines on pipes.
 		t.Skipf("skipping on %s", runtime.GOOS)
 	}
@@ -362,3 +361,88 @@
 		t.Fatal(err)
 	}
 }
+
+func BenchmarkSendfileZeroBytes(b *testing.B) {
+	var (
+		wg          sync.WaitGroup
+		ctx, cancel = context.WithCancel(context.Background())
+	)
+
+	defer wg.Wait()
+
+	ln := newLocalListener(b, "tcp")
+	defer ln.Close()
+
+	tempFile, err := os.CreateTemp(b.TempDir(), "test.txt")
+	if err != nil {
+		b.Fatalf("failed to create temp file: %v", err)
+	}
+	defer tempFile.Close()
+
+	fileName := tempFile.Name()
+
+	dataSize := b.N
+	wg.Add(1)
+	go func(f *os.File) {
+		defer wg.Done()
+
+		for i := 0; i < dataSize; i++ {
+			if _, err := f.Write([]byte{1}); err != nil {
+				b.Errorf("failed to write: %v", err)
+				return
+			}
+			if i%1000 == 0 {
+				f.Sync()
+			}
+		}
+	}(tempFile)
+
+	b.ResetTimer()
+	b.ReportAllocs()
+
+	wg.Add(1)
+	go func(ln Listener, fileName string) {
+		defer wg.Done()
+
+		conn, err := ln.Accept()
+		if err != nil {
+			b.Errorf("failed to accept: %v", err)
+			return
+		}
+		defer conn.Close()
+
+		f, err := os.OpenFile(fileName, os.O_RDONLY, 0660)
+		if err != nil {
+			b.Errorf("failed to open file: %v", err)
+			return
+		}
+		defer f.Close()
+
+		for {
+			if ctx.Err() != nil {
+				return
+			}
+
+			if _, err := io.Copy(conn, f); err != nil {
+				b.Errorf("failed to copy: %v", err)
+				return
+			}
+		}
+	}(ln, fileName)
+
+	conn, err := Dial("tcp", ln.Addr().String())
+	if err != nil {
+		b.Fatalf("failed to dial: %v", err)
+	}
+	defer conn.Close()
+
+	n, err := io.CopyN(io.Discard, conn, int64(dataSize))
+	if err != nil {
+		b.Fatalf("failed to copy: %v", err)
+	}
+	if n != int64(dataSize) {
+		b.Fatalf("expected %d copied bytes, but got %d", dataSize, n)
+	}
+
+	cancel()
+}
diff --git a/src/net/sendfile_unix_alt.go b/src/net/sendfile_unix_alt.go
index b867717..5cb65ee 100644
--- a/src/net/sendfile_unix_alt.go
+++ b/src/net/sendfile_unix_alt.go
@@ -15,8 +15,8 @@
 // sendFile copies the contents of r to c using the sendfile
 // system call to minimize copies.
 //
-// if handled == true, sendFile returns the number of bytes copied and any
-// non-EOF error.
+// if handled == true, sendFile returns the number (potentially zero) of bytes
+// copied and any non-EOF error.
 //
 // if handled == false, sendFile performed no work.
 func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
@@ -65,7 +65,7 @@
 
 	var werr error
 	err = sc.Read(func(fd uintptr) bool {
-		written, werr = poll.SendFile(&c.pfd, int(fd), pos, remain)
+		written, werr, handled = poll.SendFile(&c.pfd, int(fd), pos, remain)
 		return true
 	})
 	if err == nil {
@@ -78,8 +78,8 @@
 
 	_, err1 := f.Seek(written, io.SeekCurrent)
 	if err1 != nil && err == nil {
-		return written, err1, written > 0
+		return written, err1, handled
 	}
 
-	return written, wrapSyscallError("sendfile", err), written > 0
+	return written, wrapSyscallError("sendfile", err), handled
 }
diff --git a/src/net/server_test.go b/src/net/server_test.go
index 2ff0689..eb6b111 100644
--- a/src/net/server_test.go
+++ b/src/net/server_test.go
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
+	"fmt"
 	"os"
 	"testing"
 )
@@ -251,65 +250,80 @@
 
 func TestUDPServer(t *testing.T) {
 	for i, tt := range udpServerTests {
-		if !testableListenArgs(tt.snet, tt.saddr, tt.taddr) {
-			t.Logf("skipping %s test", tt.snet+" "+tt.saddr+"<-"+tt.taddr)
-			continue
-		}
-
-		c1, err := ListenPacket(tt.snet, tt.saddr)
-		if err != nil {
-			if perr := parseDialError(err); perr != nil {
-				t.Error(perr)
+		i, tt := i, tt
+		t.Run(fmt.Sprint(i), func(t *testing.T) {
+			if !testableListenArgs(tt.snet, tt.saddr, tt.taddr) {
+				t.Skipf("skipping %s %s<-%s test", tt.snet, tt.saddr, tt.taddr)
 			}
-			t.Fatal(err)
-		}
+			t.Logf("%s %s<-%s", tt.snet, tt.saddr, tt.taddr)
 
-		ls := (&packetListener{PacketConn: c1}).newLocalServer()
-		defer ls.teardown()
-		tpch := make(chan error, 1)
-		handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, tpch) }
-		if err := ls.buildup(handler); err != nil {
-			t.Fatal(err)
-		}
-
-		trch := make(chan error, 1)
-		_, port, err := SplitHostPort(ls.PacketConn.LocalAddr().String())
-		if err != nil {
-			t.Fatal(err)
-		}
-		if tt.dial {
-			d := Dialer{Timeout: someTimeout}
-			c2, err := d.Dial(tt.tnet, JoinHostPort(tt.taddr, port))
+			c1, err := ListenPacket(tt.snet, tt.saddr)
 			if err != nil {
 				if perr := parseDialError(err); perr != nil {
 					t.Error(perr)
 				}
 				t.Fatal(err)
 			}
-			defer c2.Close()
-			go transceiver(c2, []byte("UDP SERVER TEST"), trch)
-		} else {
-			c2, err := ListenPacket(tt.tnet, JoinHostPort(tt.taddr, "0"))
-			if err != nil {
-				if perr := parseDialError(err); perr != nil {
-					t.Error(perr)
-				}
-				t.Fatal(err)
-			}
-			defer c2.Close()
-			dst, err := ResolveUDPAddr(tt.tnet, JoinHostPort(tt.taddr, port))
-			if err != nil {
-				t.Fatal(err)
-			}
-			go packetTransceiver(c2, []byte("UDP SERVER TEST"), dst, trch)
-		}
 
-		for err := range trch {
-			t.Errorf("#%d: %v", i, err)
-		}
-		for err := range tpch {
-			t.Errorf("#%d: %v", i, err)
-		}
+			ls := (&packetListener{PacketConn: c1}).newLocalServer()
+			defer ls.teardown()
+			tpch := make(chan error, 1)
+			handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, tpch) }
+			if err := ls.buildup(handler); err != nil {
+				t.Fatal(err)
+			}
+
+			trch := make(chan error, 1)
+			_, port, err := SplitHostPort(ls.PacketConn.LocalAddr().String())
+			if err != nil {
+				t.Fatal(err)
+			}
+			if tt.dial {
+				d := Dialer{Timeout: someTimeout}
+				c2, err := d.Dial(tt.tnet, JoinHostPort(tt.taddr, port))
+				if err != nil {
+					if perr := parseDialError(err); perr != nil {
+						t.Error(perr)
+					}
+					t.Fatal(err)
+				}
+				defer c2.Close()
+				go transceiver(c2, []byte("UDP SERVER TEST"), trch)
+			} else {
+				c2, err := ListenPacket(tt.tnet, JoinHostPort(tt.taddr, "0"))
+				if err != nil {
+					if perr := parseDialError(err); perr != nil {
+						t.Error(perr)
+					}
+					t.Fatal(err)
+				}
+				defer c2.Close()
+				dst, err := ResolveUDPAddr(tt.tnet, JoinHostPort(tt.taddr, port))
+				if err != nil {
+					t.Fatal(err)
+				}
+				go packetTransceiver(c2, []byte("UDP SERVER TEST"), dst, trch)
+			}
+
+			for trch != nil || tpch != nil {
+				select {
+				case err, ok := <-trch:
+					if !ok {
+						trch = nil
+					}
+					if err != nil {
+						t.Errorf("client: %v", err)
+					}
+				case err, ok := <-tpch:
+					if !ok {
+						tpch = nil
+					}
+					if err != nil {
+						t.Errorf("server: %v", err)
+					}
+				}
+			}
+		})
 	}
 }
 
@@ -326,58 +340,73 @@
 	}
 
 	for i, tt := range unixgramServerTests {
-		if !testableListenArgs("unixgram", tt.saddr, "") {
-			t.Logf("skipping %s test", "unixgram "+tt.saddr+"<-"+tt.caddr)
-			continue
-		}
-
-		c1, err := ListenPacket("unixgram", tt.saddr)
-		if err != nil {
-			if perr := parseDialError(err); perr != nil {
-				t.Error(perr)
+		i, tt := i, tt
+		t.Run(fmt.Sprint(i), func(t *testing.T) {
+			if !testableListenArgs("unixgram", tt.saddr, "") {
+				t.Skipf("skipping unixgram %s<-%s test", tt.saddr, tt.caddr)
 			}
-			t.Fatal(err)
-		}
+			t.Logf("unixgram %s<-%s", tt.saddr, tt.caddr)
 
-		ls := (&packetListener{PacketConn: c1}).newLocalServer()
-		defer ls.teardown()
-		tpch := make(chan error, 1)
-		handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, tpch) }
-		if err := ls.buildup(handler); err != nil {
-			t.Fatal(err)
-		}
-
-		trch := make(chan error, 1)
-		if tt.dial {
-			d := Dialer{Timeout: someTimeout, LocalAddr: &UnixAddr{Net: "unixgram", Name: tt.caddr}}
-			c2, err := d.Dial("unixgram", ls.PacketConn.LocalAddr().String())
+			c1, err := ListenPacket("unixgram", tt.saddr)
 			if err != nil {
 				if perr := parseDialError(err); perr != nil {
 					t.Error(perr)
 				}
 				t.Fatal(err)
 			}
-			defer os.Remove(c2.LocalAddr().String())
-			defer c2.Close()
-			go transceiver(c2, []byte(c2.LocalAddr().String()), trch)
-		} else {
-			c2, err := ListenPacket("unixgram", tt.caddr)
-			if err != nil {
-				if perr := parseDialError(err); perr != nil {
-					t.Error(perr)
-				}
+
+			ls := (&packetListener{PacketConn: c1}).newLocalServer()
+			defer ls.teardown()
+			tpch := make(chan error, 1)
+			handler := func(ls *localPacketServer, c PacketConn) { packetTransponder(c, tpch) }
+			if err := ls.buildup(handler); err != nil {
 				t.Fatal(err)
 			}
-			defer os.Remove(c2.LocalAddr().String())
-			defer c2.Close()
-			go packetTransceiver(c2, []byte("UNIXGRAM SERVER TEST"), ls.PacketConn.LocalAddr(), trch)
-		}
 
-		for err := range trch {
-			t.Errorf("#%d: %v", i, err)
-		}
-		for err := range tpch {
-			t.Errorf("#%d: %v", i, err)
-		}
+			trch := make(chan error, 1)
+			if tt.dial {
+				d := Dialer{Timeout: someTimeout, LocalAddr: &UnixAddr{Net: "unixgram", Name: tt.caddr}}
+				c2, err := d.Dial("unixgram", ls.PacketConn.LocalAddr().String())
+				if err != nil {
+					if perr := parseDialError(err); perr != nil {
+						t.Error(perr)
+					}
+					t.Fatal(err)
+				}
+				defer os.Remove(c2.LocalAddr().String())
+				defer c2.Close()
+				go transceiver(c2, []byte(c2.LocalAddr().String()), trch)
+			} else {
+				c2, err := ListenPacket("unixgram", tt.caddr)
+				if err != nil {
+					if perr := parseDialError(err); perr != nil {
+						t.Error(perr)
+					}
+					t.Fatal(err)
+				}
+				defer os.Remove(c2.LocalAddr().String())
+				defer c2.Close()
+				go packetTransceiver(c2, []byte("UNIXGRAM SERVER TEST"), ls.PacketConn.LocalAddr(), trch)
+			}
+
+			for trch != nil || tpch != nil {
+				select {
+				case err, ok := <-trch:
+					if !ok {
+						trch = nil
+					}
+					if err != nil {
+						t.Errorf("client: %v", err)
+					}
+				case err, ok := <-tpch:
+					if !ok {
+						tpch = nil
+					}
+					if err != nil {
+						t.Errorf("server: %v", err)
+					}
+				}
+			}
+		})
 	}
 }
diff --git a/src/net/smtp/auth.go b/src/net/smtp/auth.go
index 72eb166..6d461ac 100644
--- a/src/net/smtp/auth.go
+++ b/src/net/smtp/auth.go
@@ -42,7 +42,7 @@
 	host                         string
 }
 
-// PlainAuth returns an Auth that implements the PLAIN authentication
+// PlainAuth returns an [Auth] that implements the PLAIN authentication
 // mechanism as defined in RFC 4616. The returned Auth uses the given
 // username and password to authenticate to host and act as identity.
 // Usually identity should be the empty string, to act as username.
@@ -86,7 +86,7 @@
 	username, secret string
 }
 
-// CRAMMD5Auth returns an Auth that implements the CRAM-MD5 authentication
+// CRAMMD5Auth returns an [Auth] that implements the CRAM-MD5 authentication
 // mechanism as defined in RFC 2195.
 // The returned Auth uses the given username and secret to authenticate
 // to the server using the challenge-response mechanism.
diff --git a/src/net/smtp/smtp.go b/src/net/smtp/smtp.go
index b5a025e..b787793 100644
--- a/src/net/smtp/smtp.go
+++ b/src/net/smtp/smtp.go
@@ -48,7 +48,7 @@
 	helloError error  // the error from the hello
 }
 
-// Dial returns a new Client connected to an SMTP server at addr.
+// Dial returns a new [Client] connected to an SMTP server at addr.
 // The addr must include a port, as in "mail.example.com:smtp".
 func Dial(addr string) (*Client, error) {
 	conn, err := net.Dial("tcp", addr)
@@ -59,7 +59,7 @@
 	return NewClient(conn, host)
 }
 
-// NewClient returns a new Client using an existing connection and host as a
+// NewClient returns a new [Client] using an existing connection and host as a
 // server name to be used when authenticating.
 func NewClient(conn net.Conn, host string) (*Client, error) {
 	text := textproto.NewConn(conn)
@@ -166,7 +166,7 @@
 }
 
 // TLSConnectionState returns the client's TLS connection state.
-// The return values are their zero values if StartTLS did
+// The return values are their zero values if [Client.StartTLS] did
 // not succeed.
 func (c *Client) TLSConnectionState() (state tls.ConnectionState, ok bool) {
 	tc, ok := c.conn.(*tls.Conn)
@@ -241,7 +241,7 @@
 // If the server supports the 8BITMIME extension, Mail adds the BODY=8BITMIME
 // parameter. If the server supports the SMTPUTF8 extension, Mail adds the
 // SMTPUTF8 parameter.
-// This initiates a mail transaction and is followed by one or more Rcpt calls.
+// This initiates a mail transaction and is followed by one or more [Client.Rcpt] calls.
 func (c *Client) Mail(from string) error {
 	if err := validateLine(from); err != nil {
 		return err
@@ -263,8 +263,8 @@
 }
 
 // Rcpt issues a RCPT command to the server using the provided email address.
-// A call to Rcpt must be preceded by a call to Mail and may be followed by
-// a Data call or another Rcpt call.
+// A call to Rcpt must be preceded by a call to [Client.Mail] and may be followed by
+// a [Client.Data] call or another Rcpt call.
 func (c *Client) Rcpt(to string) error {
 	if err := validateLine(to); err != nil {
 		return err
@@ -287,7 +287,7 @@
 // Data issues a DATA command to the server and returns a writer that
 // can be used to write the mail headers and body. The caller should
 // close the writer before calling any more methods on c. A call to
-// Data must be preceded by one or more calls to Rcpt.
+// Data must be preceded by one or more calls to [Client.Rcpt].
 func (c *Client) Data() (io.WriteCloser, error) {
 	_, _, err := c.cmd(354, "DATA")
 	if err != nil {
diff --git a/src/net/sock_posix.go b/src/net/sock_posix.go
index b3e1806..d04c26e 100644
--- a/src/net/sock_posix.go
+++ b/src/net/sock_posix.go
@@ -89,38 +89,10 @@
 	return fd.net + "6"
 }
 
-func (fd *netFD) addrFunc() func(syscall.Sockaddr) Addr {
-	switch fd.family {
-	case syscall.AF_INET, syscall.AF_INET6:
-		switch fd.sotype {
-		case syscall.SOCK_STREAM:
-			return sockaddrToTCP
-		case syscall.SOCK_DGRAM:
-			return sockaddrToUDP
-		case syscall.SOCK_RAW:
-			return sockaddrToIP
-		}
-	case syscall.AF_UNIX:
-		switch fd.sotype {
-		case syscall.SOCK_STREAM:
-			return sockaddrToUnix
-		case syscall.SOCK_DGRAM:
-			return sockaddrToUnixgram
-		case syscall.SOCK_SEQPACKET:
-			return sockaddrToUnixpacket
-		}
-	}
-	return func(syscall.Sockaddr) Addr { return nil }
-}
-
 func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr, ctrlCtxFn func(context.Context, string, string, syscall.RawConn) error) error {
 	var c *rawConn
-	var err error
 	if ctrlCtxFn != nil {
-		c, err = newRawConn(fd)
-		if err != nil {
-			return err
-		}
+		c = newRawConn(fd)
 		var ctrlAddr string
 		if raddr != nil {
 			ctrlAddr = raddr.String()
@@ -133,6 +105,7 @@
 	}
 
 	var lsa syscall.Sockaddr
+	var err error
 	if laddr != nil {
 		if lsa, err = laddr.sockaddr(fd.family); err != nil {
 			return err
@@ -185,10 +158,7 @@
 	}
 
 	if ctrlCtxFn != nil {
-		c, err := newRawConn(fd)
-		if err != nil {
-			return err
-		}
+		c := newRawConn(fd)
 		if err := ctrlCtxFn(ctx, fd.ctrlNetwork(), laddr.String(), c); err != nil {
 			return err
 		}
@@ -239,10 +209,7 @@
 	}
 
 	if ctrlCtxFn != nil {
-		c, err := newRawConn(fd)
-		if err != nil {
-			return err
-		}
+		c := newRawConn(fd)
 		if err := ctrlCtxFn(ctx, fd.ctrlNetwork(), laddr.String(), c); err != nil {
 			return err
 		}
diff --git a/src/net/sock_stub.go b/src/net/sock_stub.go
index e163755..fd86fa9 100644
--- a/src/net/sock_stub.go
+++ b/src/net/sock_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || (js && wasm) || solaris || wasip1
+//go:build aix || js || solaris || wasip1
 
 package net
 
diff --git a/src/net/sock_windows.go b/src/net/sock_windows.go
index fa11c7a..a519909 100644
--- a/src/net/sock_windows.go
+++ b/src/net/sock_windows.go
@@ -11,29 +11,15 @@
 )
 
 func maxListenerBacklog() int {
-	// TODO: Implement this
-	// NOTE: Never return a number bigger than 1<<16 - 1. See issue 5030.
+	// When the socket backlog is SOMAXCONN, Windows will set the backlog to
+	// "a reasonable maximum value".
+	// See: https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-listen
 	return syscall.SOMAXCONN
 }
 
 func sysSocket(family, sotype, proto int) (syscall.Handle, error) {
 	s, err := wsaSocketFunc(int32(family), int32(sotype), int32(proto),
 		nil, 0, windows.WSA_FLAG_OVERLAPPED|windows.WSA_FLAG_NO_HANDLE_INHERIT)
-	if err == nil {
-		return s, nil
-	}
-	// WSA_FLAG_NO_HANDLE_INHERIT flag is not supported on some
-	// old versions of Windows, see
-	// https://msdn.microsoft.com/en-us/library/windows/desktop/ms742212(v=vs.85).aspx
-	// for details. Just use syscall.Socket, if windows.WSASocket failed.
-
-	// See ../syscall/exec_unix.go for description of ForkLock.
-	syscall.ForkLock.RLock()
-	s, err = socketFunc(family, sotype, proto)
-	if err == nil {
-		syscall.CloseOnExec(s)
-	}
-	syscall.ForkLock.RUnlock()
 	if err != nil {
 		return syscall.InvalidHandle, os.NewSyscallError("socket", err)
 	}
diff --git a/src/net/sockaddr_posix.go b/src/net/sockaddr_posix.go
index e44fc76..c5604fc 100644
--- a/src/net/sockaddr_posix.go
+++ b/src/net/sockaddr_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
@@ -32,3 +32,27 @@
 	// toLocal maps the zero address to a local system address (127.0.0.1 or ::1)
 	toLocal(net string) sockaddr
 }
+
+func (fd *netFD) addrFunc() func(syscall.Sockaddr) Addr {
+	switch fd.family {
+	case syscall.AF_INET, syscall.AF_INET6:
+		switch fd.sotype {
+		case syscall.SOCK_STREAM:
+			return sockaddrToTCP
+		case syscall.SOCK_DGRAM:
+			return sockaddrToUDP
+		case syscall.SOCK_RAW:
+			return sockaddrToIP
+		}
+	case syscall.AF_UNIX:
+		switch fd.sotype {
+		case syscall.SOCK_STREAM:
+			return sockaddrToUnix
+		case syscall.SOCK_DGRAM:
+			return sockaddrToUnixgram
+		case syscall.SOCK_SEQPACKET:
+			return sockaddrToUnixpacket
+		}
+	}
+	return func(syscall.Sockaddr) Addr { return nil }
+}
diff --git a/src/net/sockopt_fake.go b/src/net/sockopt_fake.go
new file mode 100644
index 0000000..9d9f7ea
--- /dev/null
+++ b/src/net/sockopt_fake.go
@@ -0,0 +1,46 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js || wasip1
+
+package net
+
+import "syscall"
+
+func setDefaultSockopts(s, family, sotype int, ipv6only bool) error {
+	return nil
+}
+
+func setDefaultListenerSockopts(s int) error {
+	return nil
+}
+
+func setDefaultMulticastSockopts(s int) error {
+	return nil
+}
+
+func setReadBuffer(fd *netFD, bytes int) error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.setReadBuffer(bytes)
+	}
+	return syscall.ENOPROTOOPT
+}
+
+func setWriteBuffer(fd *netFD, bytes int) error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.setWriteBuffer(bytes)
+	}
+	return syscall.ENOPROTOOPT
+}
+
+func setKeepAlive(fd *netFD, keepalive bool) error {
+	return syscall.ENOPROTOOPT
+}
+
+func setLinger(fd *netFD, sec int) error {
+	if fd.fakeNetFD != nil {
+		return fd.fakeNetFD.setLinger(sec)
+	}
+	return syscall.ENOPROTOOPT
+}
diff --git a/src/net/sockopt_posix.go b/src/net/sockopt_posix.go
index 32e8fcd..a380c77 100644
--- a/src/net/sockopt_posix.go
+++ b/src/net/sockopt_posix.go
@@ -20,35 +20,6 @@
 	return 0
 }
 
-func ipv4AddrToInterface(ip IP) (*Interface, error) {
-	ift, err := Interfaces()
-	if err != nil {
-		return nil, err
-	}
-	for _, ifi := range ift {
-		ifat, err := ifi.Addrs()
-		if err != nil {
-			return nil, err
-		}
-		for _, ifa := range ifat {
-			switch v := ifa.(type) {
-			case *IPAddr:
-				if ip.Equal(v.IP) {
-					return &ifi, nil
-				}
-			case *IPNet:
-				if ip.Equal(v.IP) {
-					return &ifi, nil
-				}
-			}
-		}
-	}
-	if ip.Equal(IPv4zero) {
-		return nil, nil
-	}
-	return nil, errNoSuchInterface
-}
-
 func interfaceToIPv4Addr(ifi *Interface) (IP, error) {
 	if ifi == nil {
 		return IPv4zero, nil
diff --git a/src/net/sockopt_stub.go b/src/net/sockopt_stub.go
deleted file mode 100644
index 186d891..0000000
--- a/src/net/sockopt_stub.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (js && wasm) || wasip1
-
-package net
-
-import "syscall"
-
-func setDefaultSockopts(s, family, sotype int, ipv6only bool) error {
-	return nil
-}
-
-func setDefaultListenerSockopts(s int) error {
-	return nil
-}
-
-func setDefaultMulticastSockopts(s int) error {
-	return nil
-}
-
-func setReadBuffer(fd *netFD, bytes int) error {
-	return syscall.ENOPROTOOPT
-}
-
-func setWriteBuffer(fd *netFD, bytes int) error {
-	return syscall.ENOPROTOOPT
-}
-
-func setKeepAlive(fd *netFD, keepalive bool) error {
-	return syscall.ENOPROTOOPT
-}
-
-func setLinger(fd *netFD, sec int) error {
-	return syscall.ENOPROTOOPT
-}
diff --git a/src/net/sockoptip_stub.go b/src/net/sockoptip_stub.go
index a37c312..23891a8 100644
--- a/src/net/sockoptip_stub.go
+++ b/src/net/sockoptip_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (js && wasm) || wasip1
+//go:build js || wasip1
 
 package net
 
diff --git a/src/net/sockoptip_windows.go b/src/net/sockoptip_windows.go
index 6267603..9dfa37c 100644
--- a/src/net/sockoptip_windows.go
+++ b/src/net/sockoptip_windows.go
@@ -8,7 +8,6 @@
 	"os"
 	"runtime"
 	"syscall"
-	"unsafe"
 )
 
 func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error {
@@ -18,7 +17,7 @@
 	}
 	var a [4]byte
 	copy(a[:], ip.To4())
-	err = fd.pfd.Setsockopt(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, (*byte)(unsafe.Pointer(&a[0])), 4)
+	err = fd.pfd.SetsockoptInet4Addr(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, a)
 	runtime.KeepAlive(fd)
 	return wrapSyscallError("setsockopt", err)
 }
diff --git a/src/net/splice_linux.go b/src/net/splice_linux.go
index ab2ab70..bdafcb5 100644
--- a/src/net/splice_linux.go
+++ b/src/net/splice_linux.go
@@ -9,12 +9,12 @@
 	"io"
 )
 
-// splice transfers data from r to c using the splice system call to minimize
-// copies from and to userspace. c must be a TCP connection. Currently, splice
-// is only enabled if r is a TCP or a stream-oriented Unix connection.
+// spliceFrom transfers data from r to c using the splice system call to minimize
+// copies from and to userspace. c must be a TCP connection.
+// Currently, spliceFrom is only enabled if r is a TCP or a stream-oriented Unix connection.
 //
-// If splice returns handled == false, it has performed no work.
-func splice(c *netFD, r io.Reader) (written int64, err error, handled bool) {
+// If spliceFrom returns handled == false, it has performed no work.
+func spliceFrom(c *netFD, r io.Reader) (written int64, err error, handled bool) {
 	var remain int64 = 1<<63 - 1 // by default, copy until EOF
 	lr, ok := r.(*io.LimitedReader)
 	if ok {
@@ -25,14 +25,17 @@
 	}
 
 	var s *netFD
-	if tc, ok := r.(*TCPConn); ok {
-		s = tc.fd
-	} else if uc, ok := r.(*UnixConn); ok {
-		if uc.fd.net != "unix" {
+	switch v := r.(type) {
+	case *TCPConn:
+		s = v.fd
+	case tcpConnWithoutWriteTo:
+		s = v.fd
+	case *UnixConn:
+		if v.fd.net != "unix" {
 			return 0, nil, false
 		}
-		s = uc.fd
-	} else {
+		s = v.fd
+	default:
 		return 0, nil, false
 	}
 
@@ -42,3 +45,18 @@
 	}
 	return written, wrapSyscallError(sc, err), handled
 }
+
+// spliceTo transfers data from c to w using the splice system call to minimize
+// copies from and to userspace. c must be a TCP connection.
+// Currently, spliceTo is only enabled if w is a stream-oriented Unix connection.
+//
+// If spliceTo returns handled == false, it has performed no work.
+func spliceTo(w io.Writer, c *netFD) (written int64, err error, handled bool) {
+	uc, ok := w.(*UnixConn)
+	if !ok || uc.fd.net != "unix" {
+		return
+	}
+
+	written, handled, sc, err := poll.Splice(&uc.fd.pfd, &c.pfd, 1<<63-1)
+	return written, wrapSyscallError(sc, err), handled
+}
diff --git a/src/net/splice_stub.go b/src/net/splice_stub.go
index 3cdadb1..239227f 100644
--- a/src/net/splice_stub.go
+++ b/src/net/splice_stub.go
@@ -8,6 +8,10 @@
 
 import "io"
 
-func splice(c *netFD, r io.Reader) (int64, error, bool) {
+func spliceFrom(_ *netFD, _ io.Reader) (int64, error, bool) {
+	return 0, nil, false
+}
+
+func spliceTo(_ io.Writer, _ *netFD) (int64, error, bool) {
 	return 0, nil, false
 }
diff --git a/src/net/splice_test.go b/src/net/splice_test.go
index 75a8f27..227ddeb 100644
--- a/src/net/splice_test.go
+++ b/src/net/splice_test.go
@@ -23,6 +23,7 @@
 		t.Skip("skipping unix-to-tcp tests")
 	}
 	t.Run("unix-to-tcp", func(t *testing.T) { testSplice(t, "unix", "tcp") })
+	t.Run("tcp-to-unix", func(t *testing.T) { testSplice(t, "tcp", "unix") })
 	t.Run("tcp-to-file", func(t *testing.T) { testSpliceToFile(t, "tcp", "file") })
 	t.Run("unix-to-file", func(t *testing.T) { testSpliceToFile(t, "unix", "file") })
 	t.Run("no-unixpacket", testSpliceNoUnixpacket)
@@ -159,6 +160,13 @@
 }
 
 func testSpliceReaderAtEOF(t *testing.T, upNet, downNet string) {
+	// UnixConn doesn't implement io.ReaderFrom, which will fail
+	// the following test in asserting a UnixConn to be an io.ReaderFrom,
+	// so skip this test.
+	if upNet == "unix" || downNet == "unix" {
+		t.Skip("skipping test on unix socket")
+	}
+
 	clientUp, serverUp := spliceTestSocketPair(t, upNet)
 	defer clientUp.Close()
 	clientDown, serverDown := spliceTestSocketPair(t, downNet)
@@ -166,16 +174,16 @@
 
 	serverUp.Close()
 
-	// We'd like to call net.splice here and check the handled return
+	// We'd like to call net.spliceFrom here and check the handled return
 	// value, but we disable splice on old Linux kernels.
 	//
-	// In that case, poll.Splice and net.splice return a non-nil error
+	// In that case, poll.Splice and net.spliceFrom return a non-nil error
 	// and handled == false. We'd ideally like to see handled == true
 	// because the source reader is at EOF, but if we're running on an old
-	// kernel, and splice is disabled, we won't see EOF from net.splice,
+	// kernel, and splice is disabled, we won't see EOF from net.spliceFrom,
 	// because we won't touch the reader at all.
 	//
-	// Trying to untangle the errors from net.splice and match them
+	// Trying to untangle the errors from net.spliceFrom and match them
 	// against the errors created by the poll package would be brittle,
 	// so this is a higher level test.
 	//
@@ -268,7 +276,7 @@
 	//
 	// What we want is err == nil and handled == false, i.e. we never
 	// called poll.Splice, because we know the unix socket's network.
-	_, err, handled := splice(serverDown.(*TCPConn).fd, serverUp)
+	_, err, handled := spliceFrom(serverDown.(*TCPConn).fd, serverUp)
 	if err != nil || handled != false {
 		t.Fatalf("got err = %v, handled = %t, want nil error, handled == false", err, handled)
 	}
@@ -289,7 +297,7 @@
 	defer clientDown.Close()
 	defer serverDown.Close()
 	// Analogous to testSpliceNoUnixpacket.
-	_, err, handled := splice(serverDown.(*TCPConn).fd, up)
+	_, err, handled := spliceFrom(serverDown.(*TCPConn).fd, up)
 	if err != nil || handled != false {
 		t.Fatalf("got err = %v, handled = %t, want nil error, handled == false", err, handled)
 	}
@@ -300,6 +308,7 @@
 
 	b.Run("tcp-to-tcp", func(b *testing.B) { benchSplice(b, "tcp", "tcp") })
 	b.Run("unix-to-tcp", func(b *testing.B) { benchSplice(b, "unix", "tcp") })
+	b.Run("tcp-to-unix", func(b *testing.B) { benchSplice(b, "tcp", "unix") })
 }
 
 func benchSplice(b *testing.B, upNet, downNet string) {
diff --git a/src/net/tcpsock.go b/src/net/tcpsock.go
index 358e487..590516b 100644
--- a/src/net/tcpsock.go
+++ b/src/net/tcpsock.go
@@ -24,7 +24,7 @@
 	Zone string // IPv6 scoped addressing zone
 }
 
-// AddrPort returns the TCPAddr a as a netip.AddrPort.
+// AddrPort returns the [TCPAddr] a as a [netip.AddrPort].
 //
 // If a.Port does not fit in a uint16, it's silently truncated.
 //
@@ -79,7 +79,7 @@
 // recommended, because it will return at most one of the host name's
 // IP addresses.
 //
-// See func Dial for a description of the network and address
+// See func [Dial] for a description of the network and address
 // parameters.
 func ResolveTCPAddr(network, address string) (*TCPAddr, error) {
 	switch network {
@@ -96,7 +96,7 @@
 	return addrs.forResolve(network, address).(*TCPAddr), nil
 }
 
-// TCPAddrFromAddrPort returns addr as a TCPAddr. If addr.IsValid() is false,
+// TCPAddrFromAddrPort returns addr as a [TCPAddr]. If addr.IsValid() is false,
 // then the returned TCPAddr will contain a nil IP field, indicating an
 // address family-agnostic unspecified address.
 func TCPAddrFromAddrPort(addr netip.AddrPort) *TCPAddr {
@@ -107,22 +107,22 @@
 	}
 }
 
-// TCPConn is an implementation of the Conn interface for TCP network
+// TCPConn is an implementation of the [Conn] interface for TCP network
 // connections.
 type TCPConn struct {
 	conn
 }
 
 // SyscallConn returns a raw network connection.
-// This implements the syscall.Conn interface.
+// This implements the [syscall.Conn] interface.
 func (c *TCPConn) SyscallConn() (syscall.RawConn, error) {
 	if !c.ok() {
 		return nil, syscall.EINVAL
 	}
-	return newRawConn(c.fd)
+	return newRawConn(c.fd), nil
 }
 
-// ReadFrom implements the io.ReaderFrom ReadFrom method.
+// ReadFrom implements the [io.ReaderFrom] ReadFrom method.
 func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
 	if !c.ok() {
 		return 0, syscall.EINVAL
@@ -134,6 +134,18 @@
 	return n, err
 }
 
+// WriteTo implements the io.WriterTo WriteTo method.
+func (c *TCPConn) WriteTo(w io.Writer) (int64, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	n, err := c.writeTo(w)
+	if err != nil && err != io.EOF {
+		err = &OpError{Op: "writeto", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+	}
+	return n, err
+}
+
 // CloseRead shuts down the reading side of the TCP connection.
 // Most callers should just use Close.
 func (c *TCPConn) CloseRead() error {
@@ -250,7 +262,7 @@
 	return &TCPConn{conn{fd}}
 }
 
-// DialTCP acts like Dial for TCP networks.
+// DialTCP acts like [Dial] for TCP networks.
 //
 // The network must be a TCP network name; see func Dial for details.
 //
@@ -275,14 +287,14 @@
 }
 
 // TCPListener is a TCP network listener. Clients should typically
-// use variables of type Listener instead of assuming TCP.
+// use variables of type [Listener] instead of assuming TCP.
 type TCPListener struct {
 	fd *netFD
 	lc ListenConfig
 }
 
 // SyscallConn returns a raw network connection.
-// This implements the syscall.Conn interface.
+// This implements the [syscall.Conn] interface.
 //
 // The returned RawConn only supports calling Control. Read and
 // Write return an error.
@@ -290,7 +302,7 @@
 	if !l.ok() {
 		return nil, syscall.EINVAL
 	}
-	return newRawListener(l.fd)
+	return newRawListener(l.fd), nil
 }
 
 // AcceptTCP accepts the next incoming call and returns the new
@@ -306,8 +318,8 @@
 	return c, nil
 }
 
-// Accept implements the Accept method in the Listener interface; it
-// waits for the next call and returns a generic Conn.
+// Accept implements the Accept method in the [Listener] interface; it
+// waits for the next call and returns a generic [Conn].
 func (l *TCPListener) Accept() (Conn, error) {
 	if !l.ok() {
 		return nil, syscall.EINVAL
@@ -331,7 +343,7 @@
 	return nil
 }
 
-// Addr returns the listener's network address, a *TCPAddr.
+// Addr returns the listener's network address, a [*TCPAddr].
 // The Addr returned is shared by all invocations of Addr, so
 // do not modify it.
 func (l *TCPListener) Addr() Addr { return l.fd.laddr }
@@ -342,13 +354,10 @@
 	if !l.ok() {
 		return syscall.EINVAL
 	}
-	if err := l.fd.pfd.SetDeadline(t); err != nil {
-		return &OpError{Op: "set", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
-	}
-	return nil
+	return l.fd.SetDeadline(t)
 }
 
-// File returns a copy of the underlying os.File.
+// File returns a copy of the underlying [os.File].
 // It is the caller's responsibility to close f when finished.
 // Closing l does not affect f, and closing f does not affect l.
 //
@@ -366,7 +375,7 @@
 	return
 }
 
-// ListenTCP acts like Listen for TCP networks.
+// ListenTCP acts like [Listen] for TCP networks.
 //
 // The network must be a TCP network name; see func Dial for details.
 //
diff --git a/src/net/tcpsock_plan9.go b/src/net/tcpsock_plan9.go
index d55948f..463dedc 100644
--- a/src/net/tcpsock_plan9.go
+++ b/src/net/tcpsock_plan9.go
@@ -14,6 +14,10 @@
 	return genericReadFrom(c, r)
 }
 
+func (c *TCPConn) writeTo(w io.Writer) (int64, error) {
+	return genericWriteTo(c, w)
+}
+
 func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
 	if h := sd.testHookDialTCP; h != nil {
 		return h(ctx, sd.network, laddr, raddr)
diff --git a/src/net/tcpsock_posix.go b/src/net/tcpsock_posix.go
index e6f425b..01b5ec9 100644
--- a/src/net/tcpsock_posix.go
+++ b/src/net/tcpsock_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
@@ -45,7 +45,7 @@
 }
 
 func (c *TCPConn) readFrom(r io.Reader) (int64, error) {
-	if n, err, handled := splice(c.fd, r); handled {
+	if n, err, handled := spliceFrom(c.fd, r); handled {
 		return n, err
 	}
 	if n, err, handled := sendFile(c.fd, r); handled {
@@ -54,6 +54,13 @@
 	return genericReadFrom(c, r)
 }
 
+func (c *TCPConn) writeTo(w io.Writer) (int64, error) {
+	if n, err, handled := spliceTo(w, c.fd); handled {
+		return n, err
+	}
+	return genericWriteTo(c, w)
+}
+
 func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
 	if h := sd.testHookDialTCP; h != nil {
 		return h(ctx, sd.network, laddr, raddr)
diff --git a/src/net/tcpsock_test.go b/src/net/tcpsock_test.go
index f720a22..b37e936 100644
--- a/src/net/tcpsock_test.go
+++ b/src/net/tcpsock_test.go
@@ -2,11 +2,11 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
+	"context"
+	"errors"
 	"fmt"
 	"internal/testenv"
 	"io"
@@ -670,6 +670,11 @@
 }
 
 func TestCopyPipeIntoTCP(t *testing.T) {
+	switch runtime.GOOS {
+	case "js", "wasip1":
+		t.Skipf("skipping: os.Pipe not supported on %s", runtime.GOOS)
+	}
+
 	ln := newLocalListener(t, "tcp")
 	defer ln.Close()
 
@@ -783,3 +788,48 @@
 		t.Errorf("got keepalive %v; want %v", got, defaultTCPKeepAlive)
 	}
 }
+
+func TestTCPListenAfterClose(t *testing.T) {
+	// Regression test for https://go.dev/issue/50216:
+	// after calling Close on a Listener, the fake net implementation would
+	// erroneously Accept a connection dialed before the call to Close.
+
+	ln := newLocalListener(t, "tcp")
+	defer ln.Close()
+
+	var wg sync.WaitGroup
+	ctx, cancel := context.WithCancel(context.Background())
+
+	d := &Dialer{}
+	for n := 2; n > 0; n-- {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+
+			c, err := d.DialContext(ctx, ln.Addr().Network(), ln.Addr().String())
+			if err == nil {
+				<-ctx.Done()
+				c.Close()
+			}
+		}()
+	}
+
+	c, err := ln.Accept()
+	if err == nil {
+		c.Close()
+	} else {
+		t.Error(err)
+	}
+	time.Sleep(10 * time.Millisecond)
+	cancel()
+	wg.Wait()
+	ln.Close()
+
+	c, err = ln.Accept()
+	if !errors.Is(err, ErrClosed) {
+		if err == nil {
+			c.Close()
+		}
+		t.Errorf("after l.Close(), l.Accept() = _, %v\nwant %v", err, ErrClosed)
+	}
+}
diff --git a/src/net/tcpsock_unix_test.go b/src/net/tcpsock_unix_test.go
index 35fd937..df810a2 100644
--- a/src/net/tcpsock_unix_test.go
+++ b/src/net/tcpsock_unix_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !plan9 && !wasip1 && !windows
+//go:build !plan9 && !windows
 
 package net
 
diff --git a/src/net/tcpsockopt_stub.go b/src/net/tcpsockopt_stub.go
index f778143..cef07cd 100644
--- a/src/net/tcpsockopt_stub.go
+++ b/src/net/tcpsockopt_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (js && wasm) || wasip1
+//go:build js || wasip1
 
 package net
 
diff --git a/src/net/textproto/header.go b/src/net/textproto/header.go
index a58df7a..689a682 100644
--- a/src/net/textproto/header.go
+++ b/src/net/textproto/header.go
@@ -23,7 +23,7 @@
 }
 
 // Get gets the first value associated with the given key.
-// It is case insensitive; CanonicalMIMEHeaderKey is used
+// It is case insensitive; [CanonicalMIMEHeaderKey] is used
 // to canonicalize the provided key.
 // If there are no values associated with the key, Get returns "".
 // To use non-canonical keys, access the map directly.
@@ -39,7 +39,7 @@
 }
 
 // Values returns all values associated with the given key.
-// It is case insensitive; CanonicalMIMEHeaderKey is
+// It is case insensitive; [CanonicalMIMEHeaderKey] is
 // used to canonicalize the provided key. To use non-canonical
 // keys, access the map directly.
 // The returned slice is not a copy.
diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
index fc2590b..7930211 100644
--- a/src/net/textproto/reader.go
+++ b/src/net/textproto/reader.go
@@ -16,6 +16,10 @@
 	"sync"
 )
 
+// TODO: This should be a distinguishable error (ErrMessageTooLarge)
+// to allow mime/multipart to detect it.
+var errMessageTooLarge = errors.New("message too large")
+
 // A Reader implements convenience methods for reading requests
 // or responses from a text protocol network connection.
 type Reader struct {
@@ -24,10 +28,10 @@
 	buf []byte // a re-usable buffer for readContinuedLineSlice
 }
 
-// NewReader returns a new Reader reading from r.
+// NewReader returns a new [Reader] reading from r.
 //
-// To avoid denial of service attacks, the provided bufio.Reader
-// should be reading from an io.LimitReader or similar Reader to bound
+// To avoid denial of service attacks, the provided [bufio.Reader]
+// should be reading from an [io.LimitReader] or similar Reader to bound
 // the size of responses.
 func NewReader(r *bufio.Reader) *Reader {
 	return &Reader{R: r}
@@ -36,20 +40,23 @@
 // ReadLine reads a single line from r,
 // eliding the final \n or \r\n from the returned string.
 func (r *Reader) ReadLine() (string, error) {
-	line, err := r.readLineSlice()
+	line, err := r.readLineSlice(-1)
 	return string(line), err
 }
 
-// ReadLineBytes is like ReadLine but returns a []byte instead of a string.
+// ReadLineBytes is like [Reader.ReadLine] but returns a []byte instead of a string.
 func (r *Reader) ReadLineBytes() ([]byte, error) {
-	line, err := r.readLineSlice()
+	line, err := r.readLineSlice(-1)
 	if line != nil {
 		line = bytes.Clone(line)
 	}
 	return line, err
 }
 
-func (r *Reader) readLineSlice() ([]byte, error) {
+// readLineSlice reads a single line from r,
+// up to lim bytes long (or unlimited if lim is less than 0),
+// eliding the final \r or \r\n from the returned string.
+func (r *Reader) readLineSlice(lim int64) ([]byte, error) {
 	r.closeDot()
 	var line []byte
 	for {
@@ -57,6 +64,9 @@
 		if err != nil {
 			return nil, err
 		}
+		if lim >= 0 && int64(len(line))+int64(len(l)) > lim {
+			return nil, errMessageTooLarge
+		}
 		// Avoid the copy if the first call produced a full line.
 		if line == nil && !more {
 			return l, nil
@@ -88,7 +98,7 @@
 //
 // Empty lines are never continued.
 func (r *Reader) ReadContinuedLine() (string, error) {
-	line, err := r.readContinuedLineSlice(noValidation)
+	line, err := r.readContinuedLineSlice(-1, noValidation)
 	return string(line), err
 }
 
@@ -106,10 +116,10 @@
 	return s[i:n]
 }
 
-// ReadContinuedLineBytes is like ReadContinuedLine but
+// ReadContinuedLineBytes is like [Reader.ReadContinuedLine] but
 // returns a []byte instead of a string.
 func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
-	line, err := r.readContinuedLineSlice(noValidation)
+	line, err := r.readContinuedLineSlice(-1, noValidation)
 	if line != nil {
 		line = bytes.Clone(line)
 	}
@@ -120,13 +130,14 @@
 // returning a byte slice with all lines. The validateFirstLine function
 // is run on the first read line, and if it returns an error then this
 // error is returned from readContinuedLineSlice.
-func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) {
+// It reads up to lim bytes of data (or unlimited if lim is less than 0).
+func (r *Reader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) {
 	if validateFirstLine == nil {
 		return nil, fmt.Errorf("missing validateFirstLine func")
 	}
 
 	// Read the first line.
-	line, err := r.readLineSlice()
+	line, err := r.readLineSlice(lim)
 	if err != nil {
 		return nil, err
 	}
@@ -154,13 +165,21 @@
 	// copy the slice into buf.
 	r.buf = append(r.buf[:0], trim(line)...)
 
+	if lim < 0 {
+		lim = math.MaxInt64
+	}
+	lim -= int64(len(r.buf))
+
 	// Read continuation lines.
 	for r.skipSpace() > 0 {
-		line, err := r.readLineSlice()
+		r.buf = append(r.buf, ' ')
+		if int64(len(r.buf)) >= lim {
+			return nil, errMessageTooLarge
+		}
+		line, err := r.readLineSlice(lim - int64(len(r.buf)))
 		if err != nil {
 			break
 		}
-		r.buf = append(r.buf, ' ')
 		r.buf = append(r.buf, trim(line)...)
 	}
 	return r.buf, nil
@@ -289,7 +308,7 @@
 	return
 }
 
-// DotReader returns a new Reader that satisfies Reads using the
+// DotReader returns a new [Reader] that satisfies Reads using the
 // decoded text of a dot-encoded block read from r.
 // The returned Reader is only valid until the next call
 // to a method on r.
@@ -303,7 +322,7 @@
 //
 // The decoded form returned by the Reader's Read method
 // rewrites the "\r\n" line endings into the simpler "\n",
-// removes leading dot escapes if present, and stops with error io.EOF
+// removes leading dot escapes if present, and stops with error [io.EOF]
 // after consuming (and discarding) the end-of-sequence line.
 func (r *Reader) DotReader() io.Reader {
 	r.closeDot()
@@ -420,7 +439,7 @@
 
 // ReadDotBytes reads a dot-encoding and returns the decoded data.
 //
-// See the documentation for the DotReader method for details about dot-encoding.
+// See the documentation for the [Reader.DotReader] method for details about dot-encoding.
 func (r *Reader) ReadDotBytes() ([]byte, error) {
 	return io.ReadAll(r.DotReader())
 }
@@ -428,7 +447,7 @@
 // ReadDotLines reads a dot-encoding and returns a slice
 // containing the decoded lines, with the final \r\n or \n elided from each.
 //
-// See the documentation for the DotReader method for details about dot-encoding.
+// See the documentation for the [Reader.DotReader] method for details about dot-encoding.
 func (r *Reader) ReadDotLines() ([]string, error) {
 	// We could use ReadDotBytes and then Split it,
 	// but reading a line at a time avoids needing a
@@ -462,7 +481,7 @@
 // ReadMIMEHeader reads a MIME-style header from r.
 // The header is a sequence of possibly continued Key: Value lines
 // ending in a blank line.
-// The returned map m maps CanonicalMIMEHeaderKey(key) to a
+// The returned map m maps [CanonicalMIMEHeaderKey](key) to a
 // sequence of values in the same order encountered in the input.
 //
 // For example, consider this input:
@@ -507,7 +526,8 @@
 
 	// The first line cannot start with a leading space.
 	if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
-		line, err := r.readLineSlice()
+		const errorLimit = 80 // arbitrary limit on how much of the line we'll quote
+		line, err := r.readLineSlice(errorLimit)
 		if err != nil {
 			return m, err
 		}
@@ -515,7 +535,7 @@
 	}
 
 	for {
-		kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon)
+		kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon)
 		if len(kv) == 0 {
 			return m, err
 		}
@@ -544,7 +564,7 @@
 
 		maxHeaders--
 		if maxHeaders < 0 {
-			return nil, errors.New("message too large")
+			return nil, errMessageTooLarge
 		}
 
 		// Skip initial spaces in value.
@@ -557,9 +577,7 @@
 		}
 		maxMemory -= int64(len(value))
 		if maxMemory < 0 {
-			// TODO: This should be a distinguishable error (ErrMessageTooLarge)
-			// to allow mime/multipart to detect it.
-			return m, errors.New("message too large")
+			return m, errMessageTooLarge
 		}
 		if vv == nil && len(strs) > 0 {
 			// More than likely this will be a single-element key.
diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
index 696ae40..26ff617 100644
--- a/src/net/textproto/reader_test.go
+++ b/src/net/textproto/reader_test.go
@@ -36,6 +36,18 @@
 	}
 }
 
+func TestReadLineLongLine(t *testing.T) {
+	line := strings.Repeat("12345", 10000)
+	r := reader(line + "\r\n")
+	s, err := r.ReadLine()
+	if err != nil {
+		t.Fatalf("Line 1: %v", err)
+	}
+	if s != line {
+		t.Fatalf("%v-byte line does not match expected %v-byte line", len(s), len(line))
+	}
+}
+
 func TestReadContinuedLine(t *testing.T) {
 	r := reader("line1\nline\n 2\nline3\n")
 	s, err := r.ReadContinuedLine()
diff --git a/src/net/textproto/textproto.go b/src/net/textproto/textproto.go
index 70038d5..4ae3ecf 100644
--- a/src/net/textproto/textproto.go
+++ b/src/net/textproto/textproto.go
@@ -7,20 +7,20 @@
 //
 // The package provides:
 //
-// Error, which represents a numeric error response from
+// [Error], which represents a numeric error response from
 // a server.
 //
-// Pipeline, to manage pipelined requests and responses
+// [Pipeline], to manage pipelined requests and responses
 // in a client.
 //
-// Reader, to read numeric response code lines,
+// [Reader], to read numeric response code lines,
 // key: value headers, lines wrapped with leading spaces
 // on continuation lines, and whole text blocks ending
 // with a dot on a line by itself.
 //
-// Writer, to write dot-encoded text blocks.
+// [Writer], to write dot-encoded text blocks.
 //
-// Conn, a convenient packaging of Reader, Writer, and Pipeline for use
+// [Conn], a convenient packaging of [Reader], [Writer], and [Pipeline] for use
 // with a single network connection.
 package textproto
 
@@ -50,8 +50,8 @@
 }
 
 // A Conn represents a textual network protocol connection.
-// It consists of a Reader and Writer to manage I/O
-// and a Pipeline to sequence concurrent requests on the connection.
+// It consists of a [Reader] and [Writer] to manage I/O
+// and a [Pipeline] to sequence concurrent requests on the connection.
 // These embedded types carry methods with them;
 // see the documentation of those types for details.
 type Conn struct {
@@ -61,7 +61,7 @@
 	conn io.ReadWriteCloser
 }
 
-// NewConn returns a new Conn using conn for I/O.
+// NewConn returns a new [Conn] using conn for I/O.
 func NewConn(conn io.ReadWriteCloser) *Conn {
 	return &Conn{
 		Reader: Reader{R: bufio.NewReader(conn)},
@@ -75,8 +75,8 @@
 	return c.conn.Close()
 }
 
-// Dial connects to the given address on the given network using net.Dial
-// and then returns a new Conn for the connection.
+// Dial connects to the given address on the given network using [net.Dial]
+// and then returns a new [Conn] for the connection.
 func Dial(network, addr string) (*Conn, error) {
 	c, err := net.Dial(network, addr)
 	if err != nil {
diff --git a/src/net/textproto/writer.go b/src/net/textproto/writer.go
index 2ece3f5..662515f 100644
--- a/src/net/textproto/writer.go
+++ b/src/net/textproto/writer.go
@@ -17,7 +17,7 @@
 	dot *dotWriter
 }
 
-// NewWriter returns a new Writer writing to w.
+// NewWriter returns a new [Writer] writing to w.
 func NewWriter(w *bufio.Writer) *Writer {
 	return &Writer{W: w}
 }
@@ -39,7 +39,7 @@
 // when the DotWriter is closed. The caller should close the
 // DotWriter before the next call to a method on w.
 //
-// See the documentation for Reader's DotReader method for details about dot-encoding.
+// See the documentation for the [Reader.DotReader] method for details about dot-encoding.
 func (w *Writer) DotWriter() io.WriteCloser {
 	w.closeDot()
 	w.dot = &dotWriter{w: w}
diff --git a/src/net/timeout_test.go b/src/net/timeout_test.go
index c0bce57..ca86f31 100644
--- a/src/net/timeout_test.go
+++ b/src/net/timeout_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -11,7 +9,6 @@
 	"fmt"
 	"internal/testenv"
 	"io"
-	"net/internal/socktest"
 	"os"
 	"runtime"
 	"sync"
@@ -19,65 +16,121 @@
 	"time"
 )
 
-var dialTimeoutTests = []struct {
-	timeout time.Duration
-	delta   time.Duration // for deadline
+func init() {
+	// Install a hook to ensure that a 1ns timeout will always
+	// be exceeded by the time Dial gets to the relevant system call.
+	//
+	// Without this, systems with a very large timer granularity — such as
+	// Windows — may be able to accept connections without measurably exceeding
+	// even an implausibly short deadline.
+	testHookStepTime = func() {
+		now := time.Now()
+		for time.Since(now) == 0 {
+			time.Sleep(1 * time.Nanosecond)
+		}
+	}
+}
 
-	guard time.Duration
+var dialTimeoutTests = []struct {
+	initialTimeout time.Duration
+	initialDelta   time.Duration // for deadline
 }{
 	// Tests that dial timeouts, deadlines in the past work.
-	{-5 * time.Second, 0, -5 * time.Second},
-	{0, -5 * time.Second, -5 * time.Second},
-	{-5 * time.Second, 5 * time.Second, -5 * time.Second}, // timeout over deadline
-	{-1 << 63, 0, time.Second},
-	{0, -1 << 63, time.Second},
+	{-5 * time.Second, 0},
+	{0, -5 * time.Second},
+	{-5 * time.Second, 5 * time.Second}, // timeout over deadline
+	{-1 << 63, 0},
+	{0, -1 << 63},
 
-	{50 * time.Millisecond, 0, 100 * time.Millisecond},
-	{0, 50 * time.Millisecond, 100 * time.Millisecond},
-	{50 * time.Millisecond, 5 * time.Second, 100 * time.Millisecond}, // timeout over deadline
+	{1 * time.Millisecond, 0},
+	{0, 1 * time.Millisecond},
+	{1 * time.Millisecond, 5 * time.Second}, // timeout over deadline
 }
 
 func TestDialTimeout(t *testing.T) {
-	// Cannot use t.Parallel - modifies global hooks.
-	origTestHookDialChannel := testHookDialChannel
-	defer func() { testHookDialChannel = origTestHookDialChannel }()
-	defer sw.Set(socktest.FilterConnect, nil)
+	switch runtime.GOOS {
+	case "plan9":
+		t.Skipf("not supported on %s", runtime.GOOS)
+	}
 
-	for i, tt := range dialTimeoutTests {
-		switch runtime.GOOS {
-		case "plan9", "windows":
-			testHookDialChannel = func() { time.Sleep(tt.guard) }
-			if runtime.GOOS == "plan9" {
-				break
+	t.Parallel()
+
+	ln := newLocalListener(t, "tcp")
+	defer func() {
+		if err := ln.Close(); err != nil {
+			t.Error(err)
+		}
+	}()
+
+	for _, tt := range dialTimeoutTests {
+		t.Run(fmt.Sprintf("%v/%v", tt.initialTimeout, tt.initialDelta), func(t *testing.T) {
+			// We don't run these subtests in parallel because we don't know how big
+			// the kernel's accept queue is, and we don't want to accidentally saturate
+			// it with concurrent calls. (That could cause the Dial to fail with
+			// ECONNREFUSED or ECONNRESET instead of a timeout error.)
+			d := Dialer{Timeout: tt.initialTimeout}
+			delta := tt.initialDelta
+
+			var (
+				beforeDial time.Time
+				afterDial  time.Time
+				err        error
+			)
+			for {
+				if delta != 0 {
+					d.Deadline = time.Now().Add(delta)
+				}
+
+				beforeDial = time.Now()
+
+				var c Conn
+				c, err = d.Dial(ln.Addr().Network(), ln.Addr().String())
+				afterDial = time.Now()
+
+				if err != nil {
+					break
+				}
+
+				// Even though we're not calling Accept on the Listener, the kernel may
+				// spuriously accept connections on its behalf. If that happens, we will
+				// close the connection (to try to get it out of the kernel's accept
+				// queue) and try a shorter timeout.
+				//
+				// We assume that we will reach a point where the call actually does
+				// time out, although in theory (since this socket is on a loopback
+				// address) a sufficiently clever kernel could notice that no Accept
+				// call is pending and bypass both the queue and the timeout to return
+				// another error immediately.
+				t.Logf("closing spurious connection from Dial")
+				c.Close()
+
+				if delta <= 1 && d.Timeout <= 1 {
+					t.Fatalf("can't reduce Timeout or Deadline")
+				}
+				if delta > 1 {
+					delta /= 2
+					t.Logf("reducing Deadline delta to %v", delta)
+				}
+				if d.Timeout > 1 {
+					d.Timeout /= 2
+					t.Logf("reducing Timeout to %v", d.Timeout)
+				}
 			}
-			fallthrough
-		default:
-			sw.Set(socktest.FilterConnect, func(so *socktest.Status) (socktest.AfterFilter, error) {
-				time.Sleep(tt.guard)
-				return nil, errTimedout
-			})
-		}
 
-		d := Dialer{Timeout: tt.timeout}
-		if tt.delta != 0 {
-			d.Deadline = time.Now().Add(tt.delta)
-		}
+			if d.Deadline.IsZero() || afterDial.Before(d.Deadline) {
+				delay := afterDial.Sub(beforeDial)
+				if delay < d.Timeout {
+					t.Errorf("Dial returned after %v; want ≥%v", delay, d.Timeout)
+				}
+			}
 
-		// This dial never starts to send any TCP SYN
-		// segment because of above socket filter and
-		// test hook.
-		c, err := d.Dial("tcp", "127.0.0.1:0")
-		if err == nil {
-			err = fmt.Errorf("unexpectedly established: tcp:%s->%s", c.LocalAddr(), c.RemoteAddr())
-			c.Close()
-		}
-
-		if perr := parseDialError(err); perr != nil {
-			t.Errorf("#%d: %v", i, perr)
-		}
-		if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
-			t.Fatalf("#%d: %v", i, err)
-		}
+			if perr := parseDialError(err); perr != nil {
+				t.Errorf("unexpected error from Dial: %v", perr)
+			}
+			if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
+				t.Errorf("Dial: %v, want timeout", err)
+			}
+		})
 	}
 }
 
@@ -189,35 +242,22 @@
 	ln := newLocalListener(t, "tcp")
 	defer ln.Close()
 
-	max := time.NewTimer(time.Second)
-	defer max.Stop()
-	ch := make(chan error)
-	go func() {
-		if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil {
-			t.Error(err)
-		}
-		if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil {
-			t.Error(err)
-		}
-		c, err := ln.Accept()
-		if err == nil {
-			c.Close()
-		}
-		ch <- err
-	}()
+	if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil {
+		t.Error(err)
+	}
+	if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil {
+		t.Error(err)
+	}
+	c, err := ln.Accept()
+	if err == nil {
+		c.Close()
+	}
 
-	select {
-	case <-max.C:
-		ln.Close()
-		<-ch // wait for tester goroutine to stop
-		t.Fatal("Accept didn't return in an expected time")
-	case err := <-ch:
-		if perr := parseAcceptError(err); perr != nil {
-			t.Error(perr)
-		}
-		if !isDeadlineExceeded(err) {
-			t.Fatal(err)
-		}
+	if perr := parseAcceptError(err); perr != nil {
+		t.Error(perr)
+	}
+	if !isDeadlineExceeded(err) {
+		t.Fatal(err)
 	}
 }
 
@@ -529,7 +569,7 @@
 			t.Error(err)
 		}
 		maxch <- time.NewTimer(100 * time.Millisecond)
-		var b [1]byte
+		var b [1024]byte
 		for {
 			if _, err := c.Write(b[:]); err != nil {
 				ch <- err
diff --git a/src/net/udpsock.go b/src/net/udpsock.go
index e30624d..4f8acb7 100644
--- a/src/net/udpsock.go
+++ b/src/net/udpsock.go
@@ -129,7 +129,7 @@
 	if !c.ok() {
 		return nil, syscall.EINVAL
 	}
-	return newRawConn(c.fd)
+	return newRawConn(c.fd), nil
 }
 
 // ReadFromUDP acts like ReadFrom but returns a UDPAddr.
diff --git a/src/net/udpsock_posix.go b/src/net/udpsock_posix.go
index f3dbcfe..5035059 100644
--- a/src/net/udpsock_posix.go
+++ b/src/net/udpsock_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
diff --git a/src/net/udpsock_test.go b/src/net/udpsock_test.go
index 2afd4ac..8a21aa7 100644
--- a/src/net/udpsock_test.go
+++ b/src/net/udpsock_test.go
@@ -2,12 +2,11 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
 	"errors"
+	"fmt"
 	"internal/testenv"
 	"net/netip"
 	"os"
@@ -116,6 +115,10 @@
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
 
+	if !testableNetwork("udp") {
+		t.Skipf("skipping: udp not supported")
+	}
+
 	c, err := ListenPacket("udp", "127.0.0.1:0")
 	if err != nil {
 		t.Fatal(err)
@@ -221,19 +224,29 @@
 	testenv.MustHaveExternalNetwork(t)
 
 	for _, tt := range udpConnLocalNameTests {
-		c, err := ListenUDP(tt.net, tt.laddr)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer c.Close()
-		la := c.LocalAddr()
-		if a, ok := la.(*UDPAddr); !ok || a.Port == 0 {
-			t.Fatalf("got %v; expected a proper address with non-zero port number", la)
-		}
+		t.Run(fmt.Sprint(tt.laddr), func(t *testing.T) {
+			if !testableNetwork(tt.net) {
+				t.Skipf("skipping: %s not available", tt.net)
+			}
+
+			c, err := ListenUDP(tt.net, tt.laddr)
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer c.Close()
+			la := c.LocalAddr()
+			if a, ok := la.(*UDPAddr); !ok || a.Port == 0 {
+				t.Fatalf("got %v; expected a proper address with non-zero port number", la)
+			}
+		})
 	}
 }
 
 func TestUDPConnLocalAndRemoteNames(t *testing.T) {
+	if !testableNetwork("udp") {
+		t.Skipf("skipping: udp not available")
+	}
+
 	for _, laddr := range []string{"", "127.0.0.1:0"} {
 		c1, err := ListenPacket("udp", "127.0.0.1:0")
 		if err != nil {
@@ -330,6 +343,9 @@
 	case "darwin", "ios":
 		testenv.SkipFlaky(t, 29225)
 	}
+	if !testableNetwork("udp") {
+		t.Skipf("skipping: udp not available")
+	}
 
 	c := newLocalPacketListener(t, "udp")
 	defer c.Close()
@@ -363,6 +379,9 @@
 	case "plan9":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
+	if !testableNetwork("udp") {
+		t.Skipf("skipping: udp not available")
+	}
 
 	c := newLocalPacketListener(t, "udp")
 	defer c.Close()
@@ -397,6 +416,9 @@
 	case "plan9":
 		t.Skipf("not supported on %s", runtime.GOOS)
 	}
+	if !testableNetwork("udp") {
+		t.Skipf("skipping: udp not available")
+	}
 
 	c1 := newLocalPacketListener(t, "udp")
 	defer c1.Close()
@@ -434,6 +456,10 @@
 // TestUDPReadTimeout verifies that ReadFromUDP with timeout returns an error
 // without data or an address.
 func TestUDPReadTimeout(t *testing.T) {
+	if !testableNetwork("udp4") {
+		t.Skipf("skipping: udp4 not available")
+	}
+
 	la, err := ResolveUDPAddr("udp4", "127.0.0.1:0")
 	if err != nil {
 		t.Fatal(err)
@@ -460,10 +486,14 @@
 
 func TestAllocs(t *testing.T) {
 	switch runtime.GOOS {
-	case "plan9":
-		// Plan9 wasn't optimized.
+	case "plan9", "js", "wasip1":
+		// These implementations have not been optimized.
 		t.Skipf("skipping on %v", runtime.GOOS)
 	}
+	if !testableNetwork("udp4") {
+		t.Skipf("skipping: udp4 not available")
+	}
+
 	// Optimizations are required to remove the allocs.
 	testenv.SkipIfOptimizationOff(t)
 
@@ -590,6 +620,10 @@
 	case "plan9":
 		t.Skipf("skipping on %v", runtime.GOOS)
 	}
+	if !testableNetwork("udp4") {
+		t.Skipf("skipping: udp4 not available")
+	}
+
 	conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)})
 	if err != nil {
 		t.Fatal(err)
@@ -625,8 +659,11 @@
 // WriteMsgUDPAddrPort accepts IPv4, IPv4-mapped IPv6, and IPv6 target addresses
 // on a UDPConn listening on "::".
 func TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) {
-	if !supportsIPv6() {
-		t.Skip("IPv6 is not supported")
+	if !testableNetwork("udp4") {
+		t.Skipf("skipping: udp4 not available")
+	}
+	if !testableNetwork("udp6") {
+		t.Skipf("skipping: udp6 not available")
 	}
 
 	switch runtime.GOOS {
diff --git a/src/net/unixsock.go b/src/net/unixsock.go
index 14fbac0..821be7b 100644
--- a/src/net/unixsock.go
+++ b/src/net/unixsock.go
@@ -52,7 +52,7 @@
 //
 // The network must be a Unix network name.
 //
-// See func Dial for a description of the network and address
+// See func [Dial] for a description of the network and address
 // parameters.
 func ResolveUnixAddr(network, address string) (*UnixAddr, error) {
 	switch network {
@@ -63,19 +63,19 @@
 	}
 }
 
-// UnixConn is an implementation of the Conn interface for connections
+// UnixConn is an implementation of the [Conn] interface for connections
 // to Unix domain sockets.
 type UnixConn struct {
 	conn
 }
 
 // SyscallConn returns a raw network connection.
-// This implements the syscall.Conn interface.
+// This implements the [syscall.Conn] interface.
 func (c *UnixConn) SyscallConn() (syscall.RawConn, error) {
 	if !c.ok() {
 		return nil, syscall.EINVAL
 	}
-	return newRawConn(c.fd)
+	return newRawConn(c.fd), nil
 }
 
 // CloseRead shuts down the reading side of the Unix domain connection.
@@ -102,7 +102,7 @@
 	return nil
 }
 
-// ReadFromUnix acts like ReadFrom but returns a UnixAddr.
+// ReadFromUnix acts like [UnixConn.ReadFrom] but returns a [UnixAddr].
 func (c *UnixConn) ReadFromUnix(b []byte) (int, *UnixAddr, error) {
 	if !c.ok() {
 		return 0, nil, syscall.EINVAL
@@ -114,7 +114,7 @@
 	return n, addr, err
 }
 
-// ReadFrom implements the PacketConn ReadFrom method.
+// ReadFrom implements the [PacketConn] ReadFrom method.
 func (c *UnixConn) ReadFrom(b []byte) (int, Addr, error) {
 	if !c.ok() {
 		return 0, nil, syscall.EINVAL
@@ -147,7 +147,7 @@
 	return
 }
 
-// WriteToUnix acts like WriteTo but takes a UnixAddr.
+// WriteToUnix acts like [UnixConn.WriteTo] but takes a [UnixAddr].
 func (c *UnixConn) WriteToUnix(b []byte, addr *UnixAddr) (int, error) {
 	if !c.ok() {
 		return 0, syscall.EINVAL
@@ -159,7 +159,7 @@
 	return n, err
 }
 
-// WriteTo implements the PacketConn WriteTo method.
+// WriteTo implements the [PacketConn] WriteTo method.
 func (c *UnixConn) WriteTo(b []byte, addr Addr) (int, error) {
 	if !c.ok() {
 		return 0, syscall.EINVAL
@@ -194,7 +194,7 @@
 
 func newUnixConn(fd *netFD) *UnixConn { return &UnixConn{conn{fd}} }
 
-// DialUnix acts like Dial for Unix networks.
+// DialUnix acts like [Dial] for Unix networks.
 //
 // The network must be a Unix network name; see func Dial for details.
 //
@@ -215,7 +215,7 @@
 }
 
 // UnixListener is a Unix domain socket listener. Clients should
-// typically use variables of type Listener instead of assuming Unix
+// typically use variables of type [Listener] instead of assuming Unix
 // domain sockets.
 type UnixListener struct {
 	fd         *netFD
@@ -227,7 +227,7 @@
 func (ln *UnixListener) ok() bool { return ln != nil && ln.fd != nil }
 
 // SyscallConn returns a raw network connection.
-// This implements the syscall.Conn interface.
+// This implements the [syscall.Conn] interface.
 //
 // The returned RawConn only supports calling Control. Read and
 // Write return an error.
@@ -235,7 +235,7 @@
 	if !l.ok() {
 		return nil, syscall.EINVAL
 	}
-	return newRawListener(l.fd)
+	return newRawListener(l.fd), nil
 }
 
 // AcceptUnix accepts the next incoming call and returns the new
@@ -251,8 +251,8 @@
 	return c, nil
 }
 
-// Accept implements the Accept method in the Listener interface.
-// Returned connections will be of type *UnixConn.
+// Accept implements the Accept method in the [Listener] interface.
+// Returned connections will be of type [*UnixConn].
 func (l *UnixListener) Accept() (Conn, error) {
 	if !l.ok() {
 		return nil, syscall.EINVAL
@@ -287,13 +287,10 @@
 	if !l.ok() {
 		return syscall.EINVAL
 	}
-	if err := l.fd.pfd.SetDeadline(t); err != nil {
-		return &OpError{Op: "set", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
-	}
-	return nil
+	return l.fd.SetDeadline(t)
 }
 
-// File returns a copy of the underlying os.File.
+// File returns a copy of the underlying [os.File].
 // It is the caller's responsibility to close f when finished.
 // Closing l does not affect f, and closing f does not affect l.
 //
@@ -311,7 +308,7 @@
 	return
 }
 
-// ListenUnix acts like Listen for Unix networks.
+// ListenUnix acts like [Listen] for Unix networks.
 //
 // The network must be "unix" or "unixpacket".
 func ListenUnix(network string, laddr *UnixAddr) (*UnixListener, error) {
@@ -331,7 +328,7 @@
 	return ln, nil
 }
 
-// ListenUnixgram acts like ListenPacket for Unix networks.
+// ListenUnixgram acts like [ListenPacket] for Unix networks.
 //
 // The network must be "unixgram".
 func ListenUnixgram(network string, laddr *UnixAddr) (*UnixConn, error) {
diff --git a/src/net/unixsock_posix.go b/src/net/unixsock_posix.go
index c501b49..f6c8e8f 100644
--- a/src/net/unixsock_posix.go
+++ b/src/net/unixsock_posix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build unix || (js && wasm) || wasip1 || windows
+//go:build unix || js || wasip1 || windows
 
 package net
 
diff --git a/src/net/unixsock_readmsg_other.go b/src/net/unixsock_readmsg_other.go
index 0899a6d..4bef3ee 100644
--- a/src/net/unixsock_readmsg_other.go
+++ b/src/net/unixsock_readmsg_other.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (js && wasm) || wasip1 || windows
+//go:build js || wasip1 || windows
 
 package net
 
diff --git a/src/net/unixsock_test.go b/src/net/unixsock_test.go
index 8402519..6906ecc 100644
--- a/src/net/unixsock_test.go
+++ b/src/net/unixsock_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !plan9 && !wasip1 && !windows
+//go:build !plan9 && !windows
 
 package net
 
@@ -21,6 +21,10 @@
 	if !testableNetwork("unixgram") {
 		t.Skip("unixgram test")
 	}
+	switch runtime.GOOS {
+	case "js", "wasip1":
+		t.Skipf("skipping: syscall.Socket not implemented on %s", runtime.GOOS)
+	}
 	if runtime.GOOS == "openbsd" {
 		testenv.SkipFlaky(t, 15157)
 	}
@@ -359,6 +363,11 @@
 	if !testableNetwork("unix") {
 		t.Skip("unix test")
 	}
+	switch runtime.GOOS {
+	case "js", "wasip1":
+		t.Skipf("skipping: %s does not support Unlink", runtime.GOOS)
+	}
+
 	name := testUnixAddr(t)
 
 	listen := func(t *testing.T) *UnixListener {
diff --git a/src/net/url/url.go b/src/net/url/url.go
index 501b263..f362958 100644
--- a/src/net/url/url.go
+++ b/src/net/url/url.go
@@ -175,7 +175,7 @@
 	return true
 }
 
-// QueryUnescape does the inverse transformation of QueryEscape,
+// QueryUnescape does the inverse transformation of [QueryEscape],
 // converting each 3-byte encoded substring of the form "%AB" into the
 // hex-decoded byte 0xAB.
 // It returns an error if any % is not followed by two hexadecimal
@@ -184,12 +184,12 @@
 	return unescape(s, encodeQueryComponent)
 }
 
-// PathUnescape does the inverse transformation of PathEscape,
+// PathUnescape does the inverse transformation of [PathEscape],
 // converting each 3-byte encoded substring of the form "%AB" into the
 // hex-decoded byte 0xAB. It returns an error if any % is not followed
 // by two hexadecimal digits.
 //
-// PathUnescape is identical to QueryUnescape except that it does not
+// PathUnescape is identical to [QueryUnescape] except that it does not
 // unescape '+' to ' ' (space).
 func PathUnescape(s string) (string, error) {
 	return unescape(s, encodePathSegment)
@@ -271,12 +271,12 @@
 }
 
 // QueryEscape escapes the string so it can be safely placed
-// inside a URL query.
+// inside a [URL] query.
 func QueryEscape(s string) string {
 	return escape(s, encodeQueryComponent)
 }
 
-// PathEscape escapes the string so it can be safely placed inside a URL path segment,
+// PathEscape escapes the string so it can be safely placed inside a [URL] path segment,
 // replacing special characters (including /) with %XX sequences as needed.
 func PathEscape(s string) string {
 	return escape(s, encodePathSegment)
@@ -348,10 +348,17 @@
 //
 //	scheme:opaque[?query][#fragment]
 //
+// The Host field contains the host and port subcomponents of the URL.
+// When the port is present, it is separated from the host with a colon.
+// When the host is an IPv6 address, it must be enclosed in square brackets:
+// "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port
+// into a string suitable for the Host field, adding square brackets to
+// the host when necessary.
+//
 // Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/.
 // A consequence is that it is impossible to tell which slashes in the Path were
 // slashes in the raw URL and which were %2f. This distinction is rarely important,
-// but when it is, the code should use the EscapedPath method, which preserves
+// but when it is, the code should use the [URL.EscapedPath] method, which preserves
 // the original encoding of Path.
 //
 // The RawPath field is an optional field which is only set when the default
@@ -363,7 +370,7 @@
 	Scheme      string
 	Opaque      string    // encoded opaque data
 	User        *Userinfo // username and password information
-	Host        string    // host or host:port
+	Host        string    // host or host:port (see Hostname and Port methods)
 	Path        string    // path (relative paths may omit leading slash)
 	RawPath     string    // encoded path hint (see EscapedPath method)
 	OmitHost    bool      // do not emit empty host (authority)
@@ -373,13 +380,13 @@
 	RawFragment string    // encoded fragment hint (see EscapedFragment method)
 }
 
-// User returns a Userinfo containing the provided username
+// User returns a [Userinfo] containing the provided username
 // and no password set.
 func User(username string) *Userinfo {
 	return &Userinfo{username, "", false}
 }
 
-// UserPassword returns a Userinfo containing the provided username
+// UserPassword returns a [Userinfo] containing the provided username
 // and password.
 //
 // This functionality should only be used with legacy web sites.
@@ -392,7 +399,7 @@
 }
 
 // The Userinfo type is an immutable encapsulation of username and
-// password details for a URL. An existing Userinfo value is guaranteed
+// password details for a [URL]. An existing Userinfo value is guaranteed
 // to have a username set (potentially empty, as allowed by RFC 2396),
 // and optionally a password.
 type Userinfo struct {
@@ -457,7 +464,7 @@
 	return "", rawURL, nil
 }
 
-// Parse parses a raw url into a URL structure.
+// Parse parses a raw url into a [URL] structure.
 //
 // The url may be relative (a path, without a host) or absolute
 // (starting with a scheme). Trying to parse a hostname and path
@@ -479,7 +486,7 @@
 	return url, nil
 }
 
-// ParseRequestURI parses a raw url into a URL structure. It assumes that
+// ParseRequestURI parses a raw url into a [URL] structure. It assumes that
 // url was received in an HTTP request, so the url is interpreted
 // only as an absolute URI or an absolute path.
 // The string url is assumed not to have a #fragment suffix.
@@ -690,7 +697,7 @@
 // EscapedPath returns u.RawPath when it is a valid escaping of u.Path.
 // Otherwise EscapedPath ignores u.RawPath and computes an escaped
 // form on its own.
-// The String and RequestURI methods use EscapedPath to construct
+// The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct
 // their results.
 // In general, code should call EscapedPath instead of
 // reading u.RawPath directly.
@@ -754,7 +761,7 @@
 // EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment.
 // Otherwise EscapedFragment ignores u.RawFragment and computes an escaped
 // form on its own.
-// The String method uses EscapedFragment to construct its result.
+// The [URL.String] method uses EscapedFragment to construct its result.
 // In general, code should call EscapedFragment instead of
 // reading u.RawFragment directly.
 func (u *URL) EscapedFragment() string {
@@ -784,7 +791,7 @@
 	return true
 }
 
-// String reassembles the URL into a valid URL string.
+// String reassembles the [URL] into a valid URL string.
 // The general form of the result is one of:
 //
 //	scheme:opaque?query#fragment
@@ -858,7 +865,7 @@
 	return buf.String()
 }
 
-// Redacted is like String but replaces any password with "xxxxx".
+// Redacted is like [URL.String] but replaces any password with "xxxxx".
 // Only the password in u.User is redacted.
 func (u *URL) Redacted() string {
 	if u == nil {
@@ -963,7 +970,7 @@
 // Encode encodes the values into “URL encoded” form
 // ("bar=baz&foo=quux") sorted by key.
 func (v Values) Encode() string {
-	if v == nil {
+	if len(v) == 0 {
 		return ""
 	}
 	var buf strings.Builder
@@ -1053,15 +1060,15 @@
 	return r
 }
 
-// IsAbs reports whether the URL is absolute.
+// IsAbs reports whether the [URL] is absolute.
 // Absolute means that it has a non-empty scheme.
 func (u *URL) IsAbs() bool {
 	return u.Scheme != ""
 }
 
-// Parse parses a URL in the context of the receiver. The provided URL
+// Parse parses a [URL] in the context of the receiver. The provided URL
 // may be relative or absolute. Parse returns nil, err on parse
-// failure, otherwise its return value is the same as ResolveReference.
+// failure, otherwise its return value is the same as [URL.ResolveReference].
 func (u *URL) Parse(ref string) (*URL, error) {
 	refURL, err := Parse(ref)
 	if err != nil {
@@ -1073,7 +1080,7 @@
 // ResolveReference resolves a URI reference to an absolute URI from
 // an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
 // may be relative or absolute. ResolveReference always returns a new
-// URL instance, even if the returned URL is identical to either the
+// [URL] instance, even if the returned URL is identical to either the
 // base or reference. If ref is an absolute URL, then ResolveReference
 // ignores base and returns a copy of ref.
 func (u *URL) ResolveReference(ref *URL) *URL {
@@ -1110,7 +1117,7 @@
 
 // Query parses RawQuery and returns the corresponding values.
 // It silently discards malformed value pairs.
-// To check errors use ParseQuery.
+// To check errors use [ParseQuery].
 func (u *URL) Query() Values {
 	v, _ := ParseQuery(u.RawQuery)
 	return v
@@ -1187,7 +1194,7 @@
 	return nil
 }
 
-// JoinPath returns a new URL with the provided path elements joined to
+// JoinPath returns a new [URL] with the provided path elements joined to
 // any existing path and the resulting path cleaned of any ./ or ../ elements.
 // Any sequences of multiple / characters will be reduced to a single /.
 func (u *URL) JoinPath(elem ...string) *URL {
@@ -1253,7 +1260,7 @@
 	return false
 }
 
-// JoinPath returns a URL string with the provided path elements joined to
+// JoinPath returns a [URL] string with the provided path elements joined to
 // the existing path of base and the resulting path cleaned of any ./ or ../ elements.
 func JoinPath(base string, elem ...string) (result string, err error) {
 	url, err := Parse(base)
diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go
index 23c5c58..4aa20bb 100644
--- a/src/net/url/url_test.go
+++ b/src/net/url/url_test.go
@@ -1072,6 +1072,7 @@
 
 var encodeQueryTests = []EncodeQueryTest{
 	{nil, ""},
+	{Values{}, ""},
 	{Values{"q": {"puppies"}, "oe": {"utf8"}}, "oe=utf8&q=puppies"},
 	{Values{"q": {"dogs", "&", "7"}}, "q=dogs&q=%26&q=7"},
 	{Values{
diff --git a/src/net/writev_test.go b/src/net/writev_test.go
index 8722c0f..e4e88c4 100644
--- a/src/net/writev_test.go
+++ b/src/net/writev_test.go
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !js && !wasip1
-
 package net
 
 import (
@@ -187,9 +185,15 @@
 	}
 
 	ln := newLocalListener(t, "tcp")
-	defer ln.Close()
 
 	ch := make(chan Conn, 1)
+	defer func() {
+		ln.Close()
+		for c := range ch {
+			c.Close()
+		}
+	}()
+
 	go func() {
 		defer close(ch)
 		c, err := ln.Accept()
diff --git a/src/os/dir_windows.go b/src/os/dir_windows.go
index 9dc2cd7..4485dff 100644
--- a/src/os/dir_windows.go
+++ b/src/os/dir_windows.go
@@ -5,60 +5,184 @@
 package os
 
 import (
+	"internal/syscall/windows"
 	"io"
 	"io/fs"
 	"runtime"
+	"sync"
 	"syscall"
+	"unsafe"
 )
 
+// Auxiliary information if the File describes a directory
+type dirInfo struct {
+	// buf is a slice pointer so the slice header
+	// does not escape to the heap when returning
+	// buf to dirBufPool.
+	buf   *[]byte // buffer for directory I/O
+	bufp  int     // location of next record in buf
+	vol   uint32
+	class uint32 // type of entries in buf
+	path  string // absolute directory path, empty if the file system supports FILE_ID_BOTH_DIR_INFO
+}
+
+const (
+	// dirBufSize is the size of the dirInfo buffer.
+	// The buffer must be big enough to hold at least a single entry.
+	// The filename alone can be 512 bytes (MAX_PATH*2), and the fixed part of
+	// the FILE_ID_BOTH_DIR_INFO structure is 105 bytes, so dirBufSize
+	// should not be set below 1024 bytes (512+105+safety buffer).
+	// Windows 8.1 and earlier only works with buffer sizes up to 64 kB.
+	dirBufSize = 64 * 1024 // 64kB
+)
+
+var dirBufPool = sync.Pool{
+	New: func() any {
+		// The buffer must be at least a block long.
+		buf := make([]byte, dirBufSize)
+		return &buf
+	},
+}
+
+func (d *dirInfo) close() {
+	if d.buf != nil {
+		dirBufPool.Put(d.buf)
+		d.buf = nil
+	}
+}
+
+// allowReadDirFileID indicates whether File.readdir should try to use FILE_ID_BOTH_DIR_INFO
+// if the underlying file system supports it.
+// Useful for testing purposes.
+var allowReadDirFileID = true
+
 func (file *File) readdir(n int, mode readdirMode) (names []string, dirents []DirEntry, infos []FileInfo, err error) {
 	// If this file has no dirinfo, create one.
-	needdata := true
 	if file.dirinfo == nil {
-		needdata = false
-		file.dirinfo, err = openDir(file.name)
+		// vol is used by os.SameFile.
+		// It is safe to query it once and reuse the value.
+		// Hard links are not allowed to reference files in other volumes.
+		// Junctions and symbolic links can reference files and directories in other volumes,
+		// but the reparse point should still live in the parent volume.
+		var vol, flags uint32
+		err = windows.GetVolumeInformationByHandle(file.pfd.Sysfd, nil, 0, &vol, nil, &flags, nil, 0)
+		runtime.KeepAlive(file)
 		if err != nil {
 			err = &PathError{Op: "readdir", Path: file.name, Err: err}
 			return
 		}
-	}
-	wantAll := n <= 0
-	if wantAll {
-		n = -1
-	}
-	d := &file.dirinfo.data
-	for n != 0 && !file.dirinfo.isempty {
-		if needdata {
-			e := syscall.FindNextFile(file.dirinfo.h, d)
-			runtime.KeepAlive(file)
-			if e != nil {
-				if e == syscall.ERROR_NO_MORE_FILES {
-					break
-				} else {
-					err = &PathError{Op: "FindNextFile", Path: file.name, Err: e}
+		file.dirinfo = new(dirInfo)
+		file.dirinfo.buf = dirBufPool.Get().(*[]byte)
+		file.dirinfo.vol = vol
+		if allowReadDirFileID && flags&windows.FILE_SUPPORTS_OPEN_BY_FILE_ID != 0 {
+			file.dirinfo.class = windows.FileIdBothDirectoryRestartInfo
+		} else {
+			file.dirinfo.class = windows.FileFullDirectoryRestartInfo
+			// Set the directory path for use by os.SameFile, as it is possible that
+			// the file system supports retrieving the file ID using GetFileInformationByHandle.
+			file.dirinfo.path = file.name
+			if !isAbs(file.dirinfo.path) {
+				// If the path is relative, we need to convert it to an absolute path
+				// in case the current directory changes between this call and a
+				// call to os.SameFile.
+				file.dirinfo.path, err = syscall.FullPath(file.dirinfo.path)
+				if err != nil {
+					err = &PathError{Op: "readdir", Path: file.name, Err: err}
 					return
 				}
 			}
 		}
-		needdata = true
-		name := syscall.UTF16ToString(d.FileName[0:])
-		if name == "." || name == ".." { // Useless names
-			continue
-		}
-		if mode == readdirName {
-			names = append(names, name)
-		} else {
-			f := newFileStatFromWin32finddata(d)
-			f.name = name
-			f.path = file.dirinfo.path
-			f.appendNameToPath = true
-			if mode == readdirDirEntry {
-				dirents = append(dirents, dirEntry{f})
-			} else {
-				infos = append(infos, f)
+	}
+	d := file.dirinfo
+	wantAll := n <= 0
+	if wantAll {
+		n = -1
+	}
+	for n != 0 {
+		// Refill the buffer if necessary
+		if d.bufp == 0 {
+			err = windows.GetFileInformationByHandleEx(file.pfd.Sysfd, d.class, (*byte)(unsafe.Pointer(&(*d.buf)[0])), uint32(len(*d.buf)))
+			runtime.KeepAlive(file)
+			if err != nil {
+				if err == syscall.ERROR_NO_MORE_FILES {
+					break
+				}
+				if err == syscall.ERROR_FILE_NOT_FOUND &&
+					(d.class == windows.FileIdBothDirectoryRestartInfo || d.class == windows.FileFullDirectoryRestartInfo) {
+					// GetFileInformationByHandleEx doesn't document the return error codes when the info class is FileIdBothDirectoryRestartInfo,
+					// but MS-FSA 2.1.5.6.3 [1] specifies that the underlying file system driver should return STATUS_NO_SUCH_FILE when
+					// reading an empty root directory, which is mapped to ERROR_FILE_NOT_FOUND by Windows.
+					// Note that some file system drivers may never return this error code, as the spec allows to return the "." and ".."
+					// entries in such cases, making the directory appear non-empty.
+					// The chances of false positive are very low, as we know that the directory exists, else GetVolumeInformationByHandle
+					// would have failed, and that the handle is still valid, as we haven't closed it.
+					// See go.dev/issue/61159.
+					// [1] https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fsa/fa8194e0-53ec-413b-8315-e8fa85396fd8
+					break
+				}
+				if s, _ := file.Stat(); s != nil && !s.IsDir() {
+					err = &PathError{Op: "readdir", Path: file.name, Err: syscall.ENOTDIR}
+				} else {
+					err = &PathError{Op: "GetFileInformationByHandleEx", Path: file.name, Err: err}
+				}
+				return
+			}
+			if d.class == windows.FileIdBothDirectoryRestartInfo {
+				d.class = windows.FileIdBothDirectoryInfo
+			} else if d.class == windows.FileFullDirectoryRestartInfo {
+				d.class = windows.FileFullDirectoryInfo
 			}
 		}
-		n--
+		// Drain the buffer
+		var islast bool
+		for n != 0 && !islast {
+			var nextEntryOffset uint32
+			var nameslice []uint16
+			entry := unsafe.Pointer(&(*d.buf)[d.bufp])
+			if d.class == windows.FileIdBothDirectoryInfo {
+				info := (*windows.FILE_ID_BOTH_DIR_INFO)(entry)
+				nextEntryOffset = info.NextEntryOffset
+				nameslice = unsafe.Slice(&info.FileName[0], info.FileNameLength/2)
+			} else {
+				info := (*windows.FILE_FULL_DIR_INFO)(entry)
+				nextEntryOffset = info.NextEntryOffset
+				nameslice = unsafe.Slice(&info.FileName[0], info.FileNameLength/2)
+			}
+			d.bufp += int(nextEntryOffset)
+			islast = nextEntryOffset == 0
+			if islast {
+				d.bufp = 0
+			}
+			if (len(nameslice) == 1 && nameslice[0] == '.') ||
+				(len(nameslice) == 2 && nameslice[0] == '.' && nameslice[1] == '.') {
+				// Ignore "." and ".." and avoid allocating a string for them.
+				continue
+			}
+			name := syscall.UTF16ToString(nameslice)
+			if mode == readdirName {
+				names = append(names, name)
+			} else {
+				var f *fileStat
+				if d.class == windows.FileIdBothDirectoryInfo {
+					f = newFileStatFromFileIDBothDirInfo((*windows.FILE_ID_BOTH_DIR_INFO)(entry))
+				} else {
+					f = newFileStatFromFileFullDirInfo((*windows.FILE_FULL_DIR_INFO)(entry))
+					// Defer appending the entry name to the parent directory path until
+					// it is really needed, to avoid allocating a string that may not be used.
+					// It is currently only used in os.SameFile.
+					f.appendNameToPath = true
+					f.path = d.path
+				}
+				f.name = name
+				f.vol = d.vol
+				if mode == readdirDirEntry {
+					dirents = append(dirents, dirEntry{f})
+				} else {
+					infos = append(infos, f)
+				}
+			}
+			n--
+		}
 	}
 	if !wantAll && len(names)+len(dirents)+len(infos) == 0 {
 		return nil, nil, nil, io.EOF
diff --git a/src/os/example_test.go b/src/os/example_test.go
index 5c7c6ea..7437a74 100644
--- a/src/os/example_test.go
+++ b/src/os/example_test.go
@@ -5,17 +5,19 @@
 package os_test
 
 import (
+	"bytes"
 	"errors"
 	"fmt"
 	"io/fs"
 	"log"
 	"os"
 	"path/filepath"
+	"sync"
 	"time"
 )
 
 func ExampleOpenFile() {
-	f, err := os.OpenFile("notes.txt", os.O_RDWR|os.O_CREATE, 0755)
+	f, err := os.OpenFile("notes.txt", os.O_RDWR|os.O_CREATE, 0644)
 	if err != nil {
 		log.Fatal(err)
 	}
@@ -263,3 +265,131 @@
 		log.Fatal(err)
 	}
 }
+
+func ExampleReadlink() {
+	// First, we create a relative symlink to a file.
+	d, err := os.MkdirTemp("", "")
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer os.RemoveAll(d)
+	targetPath := filepath.Join(d, "hello.txt")
+	if err := os.WriteFile(targetPath, []byte("Hello, Gophers!"), 0644); err != nil {
+		log.Fatal(err)
+	}
+	linkPath := filepath.Join(d, "hello.link")
+	if err := os.Symlink("hello.txt", filepath.Join(d, "hello.link")); err != nil {
+		if errors.Is(err, errors.ErrUnsupported) {
+			// Allow the example to run on platforms that do not support symbolic links.
+			fmt.Printf("%s links to %s\n", filepath.Base(linkPath), "hello.txt")
+			return
+		}
+		log.Fatal(err)
+	}
+
+	// Readlink returns the relative path as passed to os.Symlink.
+	dst, err := os.Readlink(linkPath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("%s links to %s\n", filepath.Base(linkPath), dst)
+
+	var dstAbs string
+	if filepath.IsAbs(dst) {
+		dstAbs = dst
+	} else {
+		// Symlink targets are relative to the directory containing the link.
+		dstAbs = filepath.Join(filepath.Dir(linkPath), dst)
+	}
+
+	// Check that the target is correct by comparing it with os.Stat
+	// on the original target path.
+	dstInfo, err := os.Stat(dstAbs)
+	if err != nil {
+		log.Fatal(err)
+	}
+	targetInfo, err := os.Stat(targetPath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if !os.SameFile(dstInfo, targetInfo) {
+		log.Fatalf("link destination (%s) is not the same file as %s", dstAbs, targetPath)
+	}
+
+	// Output:
+	// hello.link links to hello.txt
+}
+
+func ExampleUserCacheDir() {
+	dir, dirErr := os.UserCacheDir()
+	if dirErr == nil {
+		dir = filepath.Join(dir, "ExampleUserCacheDir")
+	}
+
+	getCache := func(name string) ([]byte, error) {
+		if dirErr != nil {
+			return nil, &os.PathError{Op: "getCache", Path: name, Err: os.ErrNotExist}
+		}
+		return os.ReadFile(filepath.Join(dir, name))
+	}
+
+	var mkdirOnce sync.Once
+	putCache := func(name string, b []byte) error {
+		if dirErr != nil {
+			return &os.PathError{Op: "putCache", Path: name, Err: dirErr}
+		}
+		mkdirOnce.Do(func() {
+			if err := os.MkdirAll(dir, 0700); err != nil {
+				log.Printf("can't create user cache dir: %v", err)
+			}
+		})
+		return os.WriteFile(filepath.Join(dir, name), b, 0600)
+	}
+
+	// Read and store cached data.
+	// …
+	_ = getCache
+	_ = putCache
+
+	// Output:
+}
+
+func ExampleUserConfigDir() {
+	dir, dirErr := os.UserConfigDir()
+
+	var (
+		configPath string
+		origConfig []byte
+	)
+	if dirErr == nil {
+		configPath = filepath.Join(dir, "ExampleUserConfigDir", "example.conf")
+		var err error
+		origConfig, err = os.ReadFile(configPath)
+		if err != nil && !os.IsNotExist(err) {
+			// The user has a config file but we couldn't read it.
+			// Report the error instead of ignoring their configuration.
+			log.Fatal(err)
+		}
+	}
+
+	// Use and perhaps make changes to the config.
+	config := bytes.Clone(origConfig)
+	// …
+
+	// Save changes.
+	if !bytes.Equal(config, origConfig) {
+		if configPath == "" {
+			log.Printf("not saving config changes: %v", dirErr)
+		} else {
+			err := os.MkdirAll(filepath.Dir(configPath), 0700)
+			if err == nil {
+				err = os.WriteFile(configPath, config, 0600)
+			}
+			if err != nil {
+				log.Printf("error saving config changes: %v", err)
+			}
+		}
+	}
+
+	// Output:
+}
diff --git a/src/os/exec/dot_test.go b/src/os/exec/dot_test.go
index 66c92f7..ed4bad2 100644
--- a/src/os/exec/dot_test.go
+++ b/src/os/exec/dot_test.go
@@ -24,7 +24,7 @@
 
 func TestLookPath(t *testing.T) {
 	testenv.MustHaveExec(t)
-	// Not parallel: uses os.Chdir and t.Setenv.
+	// Not parallel: uses Chdir and Setenv.
 
 	tmpDir := filepath.Join(t.TempDir(), "testdir")
 	if err := os.Mkdir(tmpDir, 0777); err != nil {
@@ -38,18 +38,7 @@
 	if err := os.WriteFile(filepath.Join(tmpDir, executable), []byte{1, 2, 3}, 0777); err != nil {
 		t.Fatal(err)
 	}
-	cwd, err := os.Getwd()
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := os.Chdir(cwd); err != nil {
-			panic(err)
-		}
-	}()
-	if err = os.Chdir(tmpDir); err != nil {
-		t.Fatal(err)
-	}
+	chdir(t, tmpDir)
 	t.Setenv("PWD", tmpDir)
 	t.Logf(". is %#q", tmpDir)
 
diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go
index 138be29..c88ee7f 100644
--- a/src/os/exec/exec.go
+++ b/src/os/exec/exec.go
@@ -426,6 +426,23 @@
 		if err != nil {
 			cmd.Err = err
 		}
+	} else if runtime.GOOS == "windows" && filepath.IsAbs(name) {
+		// We may need to add a filename extension from PATHEXT
+		// or verify an extension that is already present.
+		// Since the path is absolute, its extension should be unambiguous
+		// and independent of cmd.Dir, and we can go ahead and update cmd.Path to
+		// reflect it.
+		//
+		// Note that we cannot add an extension here for relative paths, because
+		// cmd.Dir may be set after we return from this function and that may cause
+		// the command to resolve to a different extension.
+		lp, err := lookExtensions(name, "")
+		if lp != "" {
+			cmd.Path = lp
+		}
+		if err != nil {
+			cmd.Err = err
+		}
 	}
 	return cmd
 }
@@ -590,32 +607,6 @@
 	return c.Wait()
 }
 
-// lookExtensions finds windows executable by its dir and path.
-// It uses LookPath to try appropriate extensions.
-// lookExtensions does not search PATH, instead it converts `prog` into `.\prog`.
-func lookExtensions(path, dir string) (string, error) {
-	if filepath.Base(path) == path {
-		path = "." + string(filepath.Separator) + path
-	}
-	if dir == "" {
-		return LookPath(path)
-	}
-	if filepath.VolumeName(path) != "" {
-		return LookPath(path)
-	}
-	if len(path) > 1 && os.IsPathSeparator(path[0]) {
-		return LookPath(path)
-	}
-	dirandpath := filepath.Join(dir, path)
-	// We assume that LookPath will only add file extension.
-	lp, err := LookPath(dirandpath)
-	if err != nil {
-		return "", err
-	}
-	ext := strings.TrimPrefix(lp, dirandpath)
-	return path + ext, nil
-}
-
 // Start starts the specified command but does not wait for it to complete.
 //
 // If Start returns successfully, the c.Process field will be set.
@@ -649,12 +640,28 @@
 		}
 		return c.Err
 	}
-	if runtime.GOOS == "windows" {
-		lp, err := lookExtensions(c.Path, c.Dir)
+	lp := c.Path
+	if runtime.GOOS == "windows" && !filepath.IsAbs(c.Path) {
+		// If c.Path is relative, we had to wait until now
+		// to resolve it in case c.Dir was changed.
+		// (If it is absolute, we already resolved its extension in Command
+		// and shouldn't need to do so again.)
+		//
+		// Unfortunately, we cannot write the result back to c.Path because programs
+		// may assume that they can call Start concurrently with reading the path.
+		// (It is safe and non-racy to do so on Unix platforms, and users might not
+		// test with the race detector on all platforms;
+		// see https://go.dev/issue/62596.)
+		//
+		// So we will pass the fully resolved path to os.StartProcess, but leave
+		// c.Path as is: missing a bit of logging information seems less harmful
+		// than triggering a surprising data race, and if the user really cares
+		// about that bit of logging they can always use LookPath to resolve it.
+		var err error
+		lp, err = lookExtensions(c.Path, c.Dir)
 		if err != nil {
 			return err
 		}
-		c.Path = lp
 	}
 	if c.Cancel != nil && c.ctx == nil {
 		return errors.New("exec: command with a non-nil Cancel was not created with CommandContext")
@@ -690,7 +697,7 @@
 		return err
 	}
 
-	c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
+	c.Process, err = os.StartProcess(lp, c.argv(), &os.ProcAttr{
 		Dir:   c.Dir,
 		Files: childFiles,
 		Env:   env,
@@ -1124,7 +1131,7 @@
 // grow larger than w.N. It returns the un-appended suffix of p.
 func (w *prefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) {
 	if remain := w.N - len(*dst); remain > 0 {
-		add := minInt(len(p), remain)
+		add := min(len(p), remain)
 		*dst = append(*dst, p[:add]...)
 		p = p[add:]
 	}
@@ -1149,13 +1156,6 @@
 	return buf.Bytes()
 }
 
-func minInt(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
 // environ returns a best-effort copy of the environment in which the command
 // would be run as it is currently configured. If an error occurs in computing
 // the environment, it is returned alongside the best-effort copy.
diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go
index 473f92b..71a0049 100644
--- a/src/os/exec/exec_test.go
+++ b/src/os/exec/exec_test.go
@@ -77,6 +77,21 @@
 	if os.Getenv("GO_EXEC_TEST_PID") == "" {
 		os.Setenv("GO_EXEC_TEST_PID", strconv.Itoa(pid))
 
+		if runtime.GOOS == "windows" {
+			// Normalize environment so that test behavior is consistent.
+			// (The behavior of LookPath varies depending on this variable.)
+			//
+			// Ideally we would test both with the variable set and with it cleared,
+			// but I (bcmills) am not sure that that's feasible: it may already be set
+			// in the Windows registry, and I'm not sure if it is possible to remove
+			// a registry variable in a program's environment.
+			//
+			// Per https://learn.microsoft.com/en-us/windows/win32/api/processenv/nf-processenv-needcurrentdirectoryforexepathw#remarks,
+			// “the existence of the NoDefaultCurrentDirectoryInExePath environment
+			// variable is checked, and not its value.”
+			os.Setenv("NoDefaultCurrentDirectoryInExePath", "TRUE")
+		}
+
 		code := m.Run()
 		if code == 0 && flag.Lookup("test.run").Value.String() == "" && flag.Lookup("test.list").Value.String() == "" {
 			for cmd := range helperCommands {
@@ -180,6 +195,28 @@
 	sync.Once
 }
 
+func chdir(t *testing.T, dir string) {
+	t.Helper()
+
+	prev, err := os.Getwd()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := os.Chdir(dir); err != nil {
+		t.Fatal(err)
+	}
+	t.Logf("Chdir(%#q)", dir)
+
+	t.Cleanup(func() {
+		if err := os.Chdir(prev); err != nil {
+			// Couldn't chdir back to the original working directory.
+			// panic instead of t.Fatal so that we don't run other tests
+			// in an unexpected location.
+			panic("couldn't restore working directory: " + err.Error())
+		}
+	})
+}
+
 var helperCommandUsed sync.Map
 
 var helperCommands = map[string]func(...string){
@@ -1782,3 +1819,19 @@
 	cancel()
 	hangs.Wait()
 }
+
+// TestPathRace tests that [Cmd.String] can be called concurrently
+// with [Cmd.Start].
+func TestPathRace(t *testing.T) {
+	cmd := helperCommand(t, "exit", "0")
+
+	done := make(chan struct{})
+	go func() {
+		out, err := cmd.CombinedOutput()
+		t.Logf("%v: %v\n%s", cmd, err, out)
+		close(done)
+	}()
+
+	t.Logf("running in background: %v", cmd)
+	<-done
+}
diff --git a/src/os/exec/lp_linux_test.go b/src/os/exec/lp_linux_test.go
index 60cb13e..a7f9aa2 100644
--- a/src/os/exec/lp_linux_test.go
+++ b/src/os/exec/lp_linux_test.go
@@ -50,7 +50,7 @@
 	// Check that it works as expected.
 	_, err = exec.LookPath(path)
 	if err != nil {
-		t.Fatalf("findExecutable: got %v, want nil", err)
+		t.Fatalf("LookPath: got %v, want nil", err)
 	}
 
 	for {
diff --git a/src/os/exec/lp_plan9.go b/src/os/exec/lp_plan9.go
index 9344b14..dffdbac 100644
--- a/src/os/exec/lp_plan9.go
+++ b/src/os/exec/lp_plan9.go
@@ -64,3 +64,9 @@
 	}
 	return "", &Error{file, ErrNotFound}
 }
+
+// lookExtensions is a no-op on non-Windows platforms, since
+// they do not restrict executables to specific extensions.
+func lookExtensions(path, dir string) (string, error) {
+	return path, nil
+}
diff --git a/src/os/exec/lp_unix.go b/src/os/exec/lp_unix.go
index fd2c6ef..3787132 100644
--- a/src/os/exec/lp_unix.go
+++ b/src/os/exec/lp_unix.go
@@ -80,3 +80,9 @@
 	}
 	return "", &Error{file, ErrNotFound}
 }
+
+// lookExtensions is a no-op on non-Windows platforms, since
+// they do not restrict executables to specific extensions.
+func lookExtensions(path, dir string) (string, error) {
+	return path, nil
+}
diff --git a/src/os/exec/lp_unix_test.go b/src/os/exec/lp_unix_test.go
index 181b1f0..1503dda 100644
--- a/src/os/exec/lp_unix_test.go
+++ b/src/os/exec/lp_unix_test.go
@@ -4,30 +4,19 @@
 
 //go:build unix
 
-package exec
+package exec_test
 
 import (
 	"os"
+	"os/exec"
 	"testing"
 )
 
 func TestLookPathUnixEmptyPath(t *testing.T) {
-	// Not parallel: uses os.Chdir.
+	// Not parallel: uses Chdir and Setenv.
 
-	tmp, err := os.MkdirTemp("", "TestLookPathUnixEmptyPath")
-	if err != nil {
-		t.Fatal("TempDir failed: ", err)
-	}
-	defer os.RemoveAll(tmp)
-	wd, err := os.Getwd()
-	if err != nil {
-		t.Fatal("Getwd failed: ", err)
-	}
-	err = os.Chdir(tmp)
-	if err != nil {
-		t.Fatal("Chdir failed: ", err)
-	}
-	defer os.Chdir(wd)
+	tmp := t.TempDir()
+	chdir(t, tmp)
 
 	f, err := os.OpenFile("exec_me", os.O_CREATE|os.O_EXCL, 0700)
 	if err != nil {
@@ -40,7 +29,7 @@
 
 	t.Setenv("PATH", "")
 
-	path, err := LookPath("exec_me")
+	path, err := exec.LookPath("exec_me")
 	if err == nil {
 		t.Fatal("LookPath found exec_me in empty $PATH")
 	}
diff --git a/src/os/exec/lp_wasm.go b/src/os/exec/lp_wasm.go
index f2c8e9c..3c81904 100644
--- a/src/os/exec/lp_wasm.go
+++ b/src/os/exec/lp_wasm.go
@@ -21,3 +21,9 @@
 	// Wasm can not execute processes, so act as if there are no executables at all.
 	return "", &Error{file, ErrNotFound}
 }
+
+// lookExtensions is a no-op on non-Windows platforms, since
+// they do not restrict executables to specific extensions.
+func lookExtensions(path, dir string) (string, error) {
+	return path, nil
+}
diff --git a/src/os/exec/lp_windows.go b/src/os/exec/lp_windows.go
index 066d38d..698a97c 100644
--- a/src/os/exec/lp_windows.go
+++ b/src/os/exec/lp_windows.go
@@ -43,13 +43,18 @@
 		if chkStat(file) == nil {
 			return file, nil
 		}
+		// Keep checking exts below, so that programs with weird names
+		// like "foo.bat.exe" will resolve instead of failing.
 	}
 	for _, e := range exts {
 		if f := file + e; chkStat(f) == nil {
 			return f, nil
 		}
 	}
-	return "", fs.ErrNotExist
+	if hasExt(file) {
+		return "", fs.ErrNotExist
+	}
+	return "", ErrNotFound
 }
 
 // LookPath searches for an executable named file in the
@@ -63,6 +68,51 @@
 // As of Go 1.19, LookPath will instead return that path along with an error satisfying
 // errors.Is(err, ErrDot). See the package documentation for more details.
 func LookPath(file string) (string, error) {
+	return lookPath(file, pathExt())
+}
+
+// lookExtensions finds windows executable by its dir and path.
+// It uses LookPath to try appropriate extensions.
+// lookExtensions does not search PATH, instead it converts `prog` into `.\prog`.
+//
+// If the path already has an extension found in PATHEXT,
+// lookExtensions returns it directly without searching
+// for additional extensions. For example,
+// "C:\foo\example.com" would be returned as-is even if the
+// program is actually "C:\foo\example.com.exe".
+func lookExtensions(path, dir string) (string, error) {
+	if filepath.Base(path) == path {
+		path = "." + string(filepath.Separator) + path
+	}
+	exts := pathExt()
+	if ext := filepath.Ext(path); ext != "" {
+		for _, e := range exts {
+			if strings.EqualFold(ext, e) {
+				// Assume that path has already been resolved.
+				return path, nil
+			}
+		}
+	}
+	if dir == "" {
+		return lookPath(path, exts)
+	}
+	if filepath.VolumeName(path) != "" {
+		return lookPath(path, exts)
+	}
+	if len(path) > 1 && os.IsPathSeparator(path[0]) {
+		return lookPath(path, exts)
+	}
+	dirandpath := filepath.Join(dir, path)
+	// We assume that LookPath will only add file extension.
+	lp, err := lookPath(dirandpath, exts)
+	if err != nil {
+		return "", err
+	}
+	ext := strings.TrimPrefix(lp, dirandpath)
+	return path + ext, nil
+}
+
+func pathExt() []string {
 	var exts []string
 	x := os.Getenv(`PATHEXT`)
 	if x != "" {
@@ -78,7 +128,11 @@
 	} else {
 		exts = []string{".com", ".exe", ".bat", ".cmd"}
 	}
+	return exts
+}
 
+// lookPath implements LookPath for the given PATHEXT list.
+func lookPath(file string, exts []string) (string, error) {
 	if strings.ContainsAny(file, `:\/`) {
 		f, err := findExecutable(file, exts)
 		if err == nil {
@@ -112,6 +166,12 @@
 
 	path := os.Getenv("path")
 	for _, dir := range filepath.SplitList(path) {
+		if dir == "" {
+			// Skip empty entries, consistent with what PowerShell does.
+			// (See https://go.dev/issue/61493#issuecomment-1649724826.)
+			continue
+		}
+
 		if f, err := findExecutable(filepath.Join(dir, file), exts); err == nil {
 			if dotErr != nil {
 				// https://go.dev/issue/53536: if we resolved a relative path implicitly,
@@ -130,7 +190,14 @@
 
 			if !filepath.IsAbs(f) {
 				if execerrdot.Value() != "0" {
-					return f, &Error{file, ErrDot}
+					// If this is the same relative path that we already found,
+					// dotErr is non-nil and we already checked it above.
+					// Otherwise, record this path as the one to which we must resolve,
+					// with or without a dotErr.
+					if dotErr == nil {
+						dotf, dotErr = f, &Error{file, ErrDot}
+					}
+					continue
 				}
 				execerrdot.IncNonDefault()
 			}
diff --git a/src/os/exec/lp_windows_test.go b/src/os/exec/lp_windows_test.go
index 4d85a5f..a92a297 100644
--- a/src/os/exec/lp_windows_test.go
+++ b/src/os/exec/lp_windows_test.go
@@ -12,496 +12,484 @@
 	"fmt"
 	"internal/testenv"
 	"io"
+	"io/fs"
 	"os"
 	"os/exec"
 	"path/filepath"
-	"strconv"
+	"slices"
 	"strings"
 	"testing"
 )
 
 func init() {
-	registerHelperCommand("exec", cmdExec)
-	registerHelperCommand("lookpath", cmdLookPath)
+	registerHelperCommand("printpath", cmdPrintPath)
 }
 
-func cmdLookPath(args ...string) {
-	p, err := exec.LookPath(args[0])
+func cmdPrintPath(args ...string) {
+	exe, err := os.Executable()
 	if err != nil {
-		fmt.Fprintf(os.Stderr, "LookPath failed: %v\n", err)
+		fmt.Fprintf(os.Stderr, "Executable: %v\n", err)
 		os.Exit(1)
 	}
-	fmt.Print(p)
+	fmt.Println(exe)
 }
 
-func cmdExec(args ...string) {
-	cmd := exec.Command(args[1])
-	cmd.Dir = args[0]
-	if errors.Is(cmd.Err, exec.ErrDot) {
-		cmd.Err = nil
+// makePATH returns a PATH variable referring to the
+// given directories relative to a root directory.
+//
+// The empty string results in an empty entry.
+// Paths beginning with . are kept as relative entries.
+func makePATH(root string, dirs []string) string {
+	paths := make([]string, 0, len(dirs))
+	for _, d := range dirs {
+		switch {
+		case d == "":
+			paths = append(paths, "")
+		case d == "." || (len(d) >= 2 && d[0] == '.' && os.IsPathSeparator(d[1])):
+			paths = append(paths, filepath.Clean(d))
+		default:
+			paths = append(paths, filepath.Join(root, d))
+		}
 	}
-	output, err := cmd.CombinedOutput()
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Child: %s %s", err, string(output))
-		os.Exit(1)
-	}
-	fmt.Printf("%s", string(output))
+	return strings.Join(paths, string(os.PathListSeparator))
 }
 
-func installExe(t *testing.T, dest, src string) {
-	fsrc, err := os.Open(src)
-	if err != nil {
-		t.Fatal("os.Open failed: ", err)
-	}
-	defer fsrc.Close()
-	fdest, err := os.Create(dest)
-	if err != nil {
-		t.Fatal("os.Create failed: ", err)
-	}
-	defer fdest.Close()
-	_, err = io.Copy(fdest, fsrc)
-	if err != nil {
-		t.Fatal("io.Copy failed: ", err)
+// installProgs creates executable files (or symlinks to executable files) at
+// multiple destination paths. It uses root as prefix for all destination files.
+func installProgs(t *testing.T, root string, files []string) {
+	for _, f := range files {
+		dstPath := filepath.Join(root, f)
+
+		dir := filepath.Dir(dstPath)
+		if err := os.MkdirAll(dir, 0755); err != nil {
+			t.Fatal(err)
+		}
+
+		if os.IsPathSeparator(f[len(f)-1]) {
+			continue // directory and PATH entry only.
+		}
+		if strings.EqualFold(filepath.Ext(f), ".bat") {
+			installBat(t, dstPath)
+		} else {
+			installExe(t, dstPath)
+		}
 	}
 }
 
-func installBat(t *testing.T, dest string) {
-	f, err := os.Create(dest)
+// installExe installs a copy of the test executable
+// at the given location, creating directories as needed.
+//
+// (We use a copy instead of just a symlink to ensure that os.Executable
+// always reports an unambiguous path, regardless of how it is implemented.)
+func installExe(t *testing.T, dstPath string) {
+	src, err := os.Open(exePath(t))
 	if err != nil {
-		t.Fatalf("failed to create batch file: %v", err)
+		t.Fatal(err)
 	}
-	defer f.Close()
-	fmt.Fprintf(f, "@echo %s\n", dest)
+	defer src.Close()
+
+	dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o777)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		if err := dst.Close(); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	_, err = io.Copy(dst, src)
+	if err != nil {
+		t.Fatal(err)
+	}
 }
 
-func installProg(t *testing.T, dest, srcExe string) {
-	err := os.MkdirAll(filepath.Dir(dest), 0700)
+// installBat creates a batch file at dst that prints its own
+// path when run.
+func installBat(t *testing.T, dstPath string) {
+	dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o777)
 	if err != nil {
-		t.Fatal("os.MkdirAll failed: ", err)
+		t.Fatal(err)
 	}
-	if strings.ToLower(filepath.Ext(dest)) == ".bat" {
-		installBat(t, dest)
-		return
+	defer func() {
+		if err := dst.Close(); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	if _, err := fmt.Fprintf(dst, "@echo %s\r\n", dstPath); err != nil {
+		t.Fatal(err)
 	}
-	installExe(t, dest, srcExe)
 }
 
 type lookPathTest struct {
-	rootDir   string
-	PATH      string
-	PATHEXT   string
-	files     []string
-	searchFor string
-	fails     bool // test is expected to fail
-}
-
-func (test lookPathTest) runProg(t *testing.T, env []string, cmd *exec.Cmd) (string, error) {
-	cmd.Env = env
-	cmd.Dir = test.rootDir
-	args := append([]string(nil), cmd.Args...)
-	args[0] = filepath.Base(args[0])
-	cmdText := fmt.Sprintf("%q command", strings.Join(args, " "))
-	out, err := cmd.CombinedOutput()
-	if (err != nil) != test.fails {
-		if test.fails {
-			t.Fatalf("test=%+v: %s succeeded, but expected to fail", test, cmdText)
-		}
-		t.Fatalf("test=%+v: %s failed, but expected to succeed: %v - %v", test, cmdText, err, string(out))
-	}
-	if err != nil {
-		return "", fmt.Errorf("test=%+v: %s failed: %v - %v", test, cmdText, err, string(out))
-	}
-	// normalise program output
-	p := string(out)
-	// trim terminating \r and \n that batch file outputs
-	for len(p) > 0 && (p[len(p)-1] == '\n' || p[len(p)-1] == '\r') {
-		p = p[:len(p)-1]
-	}
-	if !filepath.IsAbs(p) {
-		return p, nil
-	}
-	if p[:len(test.rootDir)] != test.rootDir {
-		t.Fatalf("test=%+v: %s output is wrong: %q must have %q prefix", test, cmdText, p, test.rootDir)
-	}
-	return p[len(test.rootDir)+1:], nil
-}
-
-func updateEnv(env []string, name, value string) []string {
-	for i, e := range env {
-		if strings.HasPrefix(strings.ToUpper(e), name+"=") {
-			env[i] = name + "=" + value
-			return env
-		}
-	}
-	return append(env, name+"="+value)
-}
-
-func createEnv(dir, PATH, PATHEXT string) []string {
-	env := os.Environ()
-	env = updateEnv(env, "PATHEXT", PATHEXT)
-	// Add dir in front of every directory in the PATH.
-	dirs := filepath.SplitList(PATH)
-	for i := range dirs {
-		dirs[i] = filepath.Join(dir, dirs[i])
-	}
-	path := strings.Join(dirs, ";")
-	env = updateEnv(env, "PATH", os.Getenv("SystemRoot")+"/System32;"+path)
-	return env
-}
-
-// createFiles copies srcPath file into multiply files.
-// It uses dir as prefix for all destination files.
-func createFiles(t *testing.T, dir string, files []string, srcPath string) {
-	for _, f := range files {
-		installProg(t, filepath.Join(dir, f), srcPath)
-	}
-}
-
-func (test lookPathTest) run(t *testing.T, tmpdir, printpathExe string) {
-	test.rootDir = tmpdir
-	createFiles(t, test.rootDir, test.files, printpathExe)
-	env := createEnv(test.rootDir, test.PATH, test.PATHEXT)
-	// Run "cmd.exe /c test.searchFor" with new environment and
-	// work directory set. All candidates are copies of printpath.exe.
-	// These will output their program paths when run.
-	should, errCmd := test.runProg(t, env, testenv.Command(t, "cmd", "/c", test.searchFor))
-	// Run the lookpath program with new environment and work directory set.
-	have, errLP := test.runProg(t, env, helperCommand(t, "lookpath", test.searchFor))
-	// Compare results.
-	if errCmd == nil && errLP == nil {
-		// both succeeded
-		if should != have {
-			t.Fatalf("test=%+v:\ncmd /c ran: %s\nlookpath found: %s", test, should, have)
-		}
-		return
-	}
-	if errCmd != nil && errLP != nil {
-		// both failed -> continue
-		return
-	}
-	if errCmd != nil {
-		t.Fatal(errCmd)
-	}
-	if errLP != nil {
-		t.Fatal(errLP)
-	}
+	name            string
+	PATHEXT         string // empty to use default
+	files           []string
+	PATH            []string // if nil, use all parent directories from files
+	searchFor       string
+	want            string
+	wantErr         error
+	skipCmdExeCheck bool // if true, do not check want against the behavior of cmd.exe
 }
 
 var lookPathTests = []lookPathTest{
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "first match",
 		files:     []string{`p1\a.exe`, `p2\a.exe`, `p2\a`},
 		searchFor: `a`,
+		want:      `p1\a.exe`,
 	},
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1.dir;p2.dir`,
+		name:      "dirs with extensions",
 		files:     []string{`p1.dir\a`, `p2.dir\a.exe`},
 		searchFor: `a`,
+		want:      `p2.dir\a.exe`,
 	},
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "first with extension",
 		files:     []string{`p1\a.exe`, `p2\a.exe`},
 		searchFor: `a.exe`,
+		want:      `p1\a.exe`,
 	},
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "specific name",
 		files:     []string{`p1\a.exe`, `p2\b.exe`},
 		searchFor: `b`,
+		want:      `p2\b.exe`,
 	},
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "no extension",
 		files:     []string{`p1\b`, `p2\a`},
 		searchFor: `a`,
-		fails:     true, // TODO(brainman): do not know why this fails
+		wantErr:   exec.ErrNotFound,
 	},
-	// If the command name specifies a path, the shell searches
-	// the specified path for an executable file matching
-	// the command name. If a match is found, the external
-	// command (the executable file) executes.
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "directory, no extension",
 		files:     []string{`p1\a.exe`, `p2\a.exe`},
 		searchFor: `p2\a`,
+		want:      `p2\a.exe`,
 	},
-	// If the command name specifies a path, the shell searches
-	// the specified path for an executable file matching the command
-	// name. ... If no match is found, the shell reports an error
-	// and command processing completes.
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
-		files:     []string{`p1\b.exe`, `p2\a.exe`},
-		searchFor: `p2\b`,
-		fails:     true,
-	},
-	// If the command name does not specify a path, the shell
-	// searches the current directory for an executable file
-	// matching the command name. If a match is found, the external
-	// command (the executable file) executes.
-	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
-		files:     []string{`a`, `p1\a.exe`, `p2\a.exe`},
-		searchFor: `a`,
-	},
-	// The shell now searches each directory specified by the
-	// PATH environment variable, in the order listed, for an
-	// executable file matching the command name. If a match
-	// is found, the external command (the executable file) executes.
-	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
-		files:     []string{`p1\a.exe`, `p2\a.exe`},
-		searchFor: `a`,
-	},
-	// The shell now searches each directory specified by the
-	// PATH environment variable, in the order listed, for an
-	// executable file matching the command name. If no match
-	// is found, the shell reports an error and command processing
-	// completes.
-	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "no match",
 		files:     []string{`p1\a.exe`, `p2\a.exe`},
 		searchFor: `b`,
-		fails:     true,
-	},
-	// If the command name includes a file extension, the shell
-	// searches each directory for the exact file name specified
-	// by the command name.
-	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
-		files:     []string{`p1\a.exe`, `p2\a.exe`},
-		searchFor: `a.exe`,
+		wantErr:   exec.ErrNotFound,
 	},
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
+		name:      "no match with dir",
+		files:     []string{`p1\b.exe`, `p2\a.exe`},
+		searchFor: `p2\b`,
+		wantErr:   exec.ErrNotFound,
+	},
+	{
+		name:      "extensionless file in CWD ignored",
+		files:     []string{`a`, `p1\a.exe`, `p2\a.exe`},
+		searchFor: `a`,
+		want:      `p1\a.exe`,
+	},
+	{
+		name:      "extensionless file in PATH ignored",
+		files:     []string{`p1\a`, `p2\a.exe`},
+		searchFor: `a`,
+		want:      `p2\a.exe`,
+	},
+	{
+		name:      "specific extension",
+		files:     []string{`p1\a.exe`, `p2\a.bat`},
+		searchFor: `a.bat`,
+		want:      `p2\a.bat`,
+	},
+	{
+		name:      "mismatched extension",
 		files:     []string{`p1\a.exe`, `p2\a.exe`},
 		searchFor: `a.com`,
-		fails:     true, // includes extension and not exact file name match
+		wantErr:   exec.ErrNotFound,
 	},
 	{
-		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1`,
+		name:      "doubled extension",
 		files:     []string{`p1\a.exe.exe`},
 		searchFor: `a.exe`,
+		want:      `p1\a.exe.exe`,
 	},
 	{
+		name:      "extension not in PATHEXT",
 		PATHEXT:   `.COM;.BAT`,
-		PATH:      `p1;p2`,
 		files:     []string{`p1\a.exe`, `p2\a.exe`},
 		searchFor: `a.exe`,
+		want:      `p1\a.exe`,
 	},
-	// If the command name does not include a file extension, the shell
-	// adds the extensions listed in the PATHEXT environment variable,
-	// one by one, and searches the directory for that file name. Note
-	// that the shell tries all possible file extensions in a specific
-	// directory before moving on to search the next directory
-	// (if there is one).
 	{
+		name:      "first allowed by PATHEXT",
 		PATHEXT:   `.COM;.EXE`,
-		PATH:      `p1;p2`,
 		files:     []string{`p1\a.bat`, `p2\a.exe`},
 		searchFor: `a`,
+		want:      `p2\a.exe`,
 	},
 	{
+		name:      "first directory containing a PATHEXT match",
 		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
 		files:     []string{`p1\a.bat`, `p2\a.exe`},
 		searchFor: `a`,
+		want:      `p1\a.bat`,
 	},
 	{
+		name:      "first PATHEXT entry",
 		PATHEXT:   `.COM;.EXE;.BAT`,
-		PATH:      `p1;p2`,
 		files:     []string{`p1\a.bat`, `p1\a.exe`, `p2\a.bat`, `p2\a.exe`},
 		searchFor: `a`,
+		want:      `p1\a.exe`,
 	},
 	{
-		PATHEXT:   `.COM`,
-		PATH:      `p1;p2`,
-		files:     []string{`p1\a.bat`, `p2\a.exe`},
+		name:      "ignore dir with PATHEXT extension",
+		files:     []string{`a.exe\`},
 		searchFor: `a`,
-		fails:     true, // tried all extensions in PATHEXT, but none matches
+		wantErr:   exec.ErrNotFound,
+	},
+	{
+		name:      "ignore empty PATH entry",
+		files:     []string{`a.bat`, `p\a.bat`},
+		PATH:      []string{`p`},
+		searchFor: `a`,
+		want:      `p\a.bat`,
+		// If cmd.exe is too old it might not respect NoDefaultCurrentDirectoryInExePath,
+		// so skip that check.
+		skipCmdExeCheck: true,
+	},
+	{
+		name:      "return ErrDot if found by a different absolute path",
+		files:     []string{`p1\a.bat`, `p2\a.bat`},
+		PATH:      []string{`.\p1`, `p2`},
+		searchFor: `a`,
+		want:      `p1\a.bat`,
+		wantErr:   exec.ErrDot,
+	},
+	{
+		name:      "suppress ErrDot if also found in absolute path",
+		files:     []string{`p1\a.bat`, `p2\a.bat`},
+		PATH:      []string{`.\p1`, `p1`, `p2`},
+		searchFor: `a`,
+		want:      `p1\a.bat`,
 	},
 }
 
 func TestLookPathWindows(t *testing.T) {
-	if testing.Short() {
-		maySkipHelperCommand("lookpath")
-		t.Skipf("skipping test in short mode that would build a helper binary")
+	// Not parallel: uses Chdir and Setenv.
+
+	// We are using the "printpath" command mode to test exec.Command here,
+	// so we won't be calling helperCommand to resolve it.
+	// That may cause it to appear to be unused.
+	maySkipHelperCommand("printpath")
+
+	// Before we begin, find the absolute path to cmd.exe.
+	// In non-short mode, we will use it to check the ground truth
+	// of the test's "want" field.
+	cmdExe, err := exec.LookPath("cmd")
+	if err != nil {
+		t.Fatal(err)
 	}
-	t.Parallel()
 
-	tmp := t.TempDir()
-	printpathExe := buildPrintPathExe(t, tmp)
-
-	// Run all tests.
-	for i, test := range lookPathTests {
-		i, test := i, test
-		t.Run(fmt.Sprint(i), func(t *testing.T) {
-			t.Parallel()
-
-			dir := filepath.Join(tmp, "d"+strconv.Itoa(i))
-			err := os.Mkdir(dir, 0700)
-			if err != nil {
-				t.Fatal("Mkdir failed: ", err)
+	for _, tt := range lookPathTests {
+		t.Run(tt.name, func(t *testing.T) {
+			if tt.want == "" && tt.wantErr == nil {
+				t.Fatalf("test must specify either want or wantErr")
 			}
-			test.run(t, dir, printpathExe)
+
+			root := t.TempDir()
+			installProgs(t, root, tt.files)
+
+			if tt.PATHEXT != "" {
+				t.Setenv("PATHEXT", tt.PATHEXT)
+				t.Logf("set PATHEXT=%s", tt.PATHEXT)
+			}
+
+			var pathVar string
+			if tt.PATH == nil {
+				paths := make([]string, 0, len(tt.files))
+				for _, f := range tt.files {
+					dir := filepath.Join(root, filepath.Dir(f))
+					if !slices.Contains(paths, dir) {
+						paths = append(paths, dir)
+					}
+				}
+				pathVar = strings.Join(paths, string(os.PathListSeparator))
+			} else {
+				pathVar = makePATH(root, tt.PATH)
+			}
+			t.Setenv("PATH", pathVar)
+			t.Logf("set PATH=%s", pathVar)
+
+			chdir(t, root)
+
+			if !testing.Short() && !(tt.skipCmdExeCheck || errors.Is(tt.wantErr, exec.ErrDot)) {
+				// Check that cmd.exe, which is our source of ground truth,
+				// agrees that our test case is correct.
+				cmd := testenv.Command(t, cmdExe, "/c", tt.searchFor, "printpath")
+				out, err := cmd.Output()
+				if err == nil {
+					gotAbs := strings.TrimSpace(string(out))
+					wantAbs := ""
+					if tt.want != "" {
+						wantAbs = filepath.Join(root, tt.want)
+					}
+					if gotAbs != wantAbs {
+						// cmd.exe disagrees. Probably the test case is wrong?
+						t.Fatalf("%v\n\tresolved to %s\n\twant %s", cmd, gotAbs, wantAbs)
+					}
+				} else if tt.wantErr == nil {
+					if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+						t.Fatalf("%v: %v\n%s", cmd, err, ee.Stderr)
+					}
+					t.Fatalf("%v: %v", cmd, err)
+				}
+			}
+
+			got, err := exec.LookPath(tt.searchFor)
+			if filepath.IsAbs(got) {
+				got, err = filepath.Rel(root, got)
+				if err != nil {
+					t.Fatal(err)
+				}
+			}
+			if got != tt.want {
+				t.Errorf("LookPath(%#q) = %#q; want %#q", tt.searchFor, got, tt.want)
+			}
+			if !errors.Is(err, tt.wantErr) {
+				t.Errorf("LookPath(%#q): %v; want %v", tt.searchFor, err, tt.wantErr)
+			}
 		})
 	}
 }
 
 type commandTest struct {
-	PATH  string
-	files []string
-	dir   string
-	arg0  string
-	want  string
-	fails bool // test is expected to fail
-}
-
-func (test commandTest) isSuccess(rootDir, output string, err error) error {
-	if err != nil {
-		return fmt.Errorf("test=%+v: exec: %v %v", test, err, output)
-	}
-	path := output
-	if path[:len(rootDir)] != rootDir {
-		return fmt.Errorf("test=%+v: %q must have %q prefix", test, path, rootDir)
-	}
-	path = path[len(rootDir)+1:]
-	if path != test.want {
-		return fmt.Errorf("test=%+v: want %q, got %q", test, test.want, path)
-	}
-	return nil
-}
-
-func (test commandTest) runOne(t *testing.T, rootDir string, env []string, dir, arg0 string) {
-	cmd := helperCommand(t, "exec", dir, arg0)
-	cmd.Dir = rootDir
-	cmd.Env = env
-	output, err := cmd.CombinedOutput()
-	err = test.isSuccess(rootDir, string(output), err)
-	if (err != nil) != test.fails {
-		if test.fails {
-			t.Errorf("test=%+v: succeeded, but expected to fail", test)
-		} else {
-			t.Error(err)
-		}
-	}
-}
-
-func (test commandTest) run(t *testing.T, rootDir, printpathExe string) {
-	createFiles(t, rootDir, test.files, printpathExe)
-	PATHEXT := `.COM;.EXE;.BAT`
-	env := createEnv(rootDir, test.PATH, PATHEXT)
-	test.runOne(t, rootDir, env, test.dir, test.arg0)
+	name       string
+	PATH       []string
+	files      []string
+	dir        string
+	arg0       string
+	want       string
+	wantPath   string // the resolved c.Path, if different from want
+	wantErrDot bool
+	wantRunErr error
 }
 
 var commandTests = []commandTest{
 	// testing commands with no slash, like `a.exe`
 	{
-		// should find a.exe in current directory
-		files: []string{`a.exe`},
-		arg0:  `a.exe`,
-		want:  `a.exe`,
+		name:       "current directory",
+		files:      []string{`a.exe`},
+		PATH:       []string{"."},
+		arg0:       `a.exe`,
+		want:       `a.exe`,
+		wantErrDot: true,
 	},
 	{
-		// like above, but add PATH in attempt to break the test
-		PATH:  `p2;p`,
-		files: []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
-		arg0:  `a.exe`,
-		want:  `a.exe`,
+		name:       "with extra PATH",
+		files:      []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
+		PATH:       []string{".", "p2", "p"},
+		arg0:       `a.exe`,
+		want:       `a.exe`,
+		wantErrDot: true,
 	},
 	{
-		// like above, but use "a" instead of "a.exe" for command
-		PATH:  `p2;p`,
-		files: []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
-		arg0:  `a`,
-		want:  `a.exe`,
+		name:       "with extra PATH and no extension",
+		files:      []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
+		PATH:       []string{".", "p2", "p"},
+		arg0:       `a`,
+		want:       `a.exe`,
+		wantErrDot: true,
 	},
 	// testing commands with slash, like `.\a.exe`
 	{
-		// should find p\a.exe
+		name:  "with dir",
 		files: []string{`p\a.exe`},
+		PATH:  []string{"."},
 		arg0:  `p\a.exe`,
 		want:  `p\a.exe`,
 	},
 	{
-		// like above, but adding `.` in front of executable should still be OK
+		name:  "with explicit dot",
 		files: []string{`p\a.exe`},
+		PATH:  []string{"."},
 		arg0:  `.\p\a.exe`,
 		want:  `p\a.exe`,
 	},
 	{
-		// like above, but with PATH added in attempt to break it
-		PATH:  `p2`,
+		name:  "with irrelevant PATH",
 		files: []string{`p\a.exe`, `p2\a.exe`},
+		PATH:  []string{".", "p2"},
 		arg0:  `p\a.exe`,
 		want:  `p\a.exe`,
 	},
 	{
-		// like above, but make sure .exe is tried even for commands with slash
-		PATH:  `p2`,
+		name:  "with slash and no extension",
 		files: []string{`p\a.exe`, `p2\a.exe`},
+		PATH:  []string{".", "p2"},
 		arg0:  `p\a`,
 		want:  `p\a.exe`,
 	},
 	// tests commands, like `a.exe`, with c.Dir set
 	{
-		// should not find a.exe in p, because LookPath(`a.exe`) will fail
-		files: []string{`p\a.exe`},
-		dir:   `p`,
-		arg0:  `a.exe`,
-		want:  `p\a.exe`,
-		fails: true,
+		// should not find a.exe in p, because LookPath(`a.exe`) will fail when
+		// called by Command (before Dir is set), and that error is sticky.
+		name:       "not found before Dir",
+		files:      []string{`p\a.exe`},
+		PATH:       []string{"."},
+		dir:        `p`,
+		arg0:       `a.exe`,
+		want:       `p\a.exe`,
+		wantRunErr: exec.ErrNotFound,
 	},
 	{
-		// LookPath(`a.exe`) will find `.\a.exe`, but prefixing that with
+		// LookPath(`a.exe`) will resolve to `.\a.exe`, but prefixing that with
 		// dir `p\a.exe` will refer to a non-existent file
-		files: []string{`a.exe`, `p\not_important_file`},
-		dir:   `p`,
-		arg0:  `a.exe`,
-		want:  `a.exe`,
-		fails: true,
+		name:       "resolved before Dir",
+		files:      []string{`a.exe`, `p\not_important_file`},
+		PATH:       []string{"."},
+		dir:        `p`,
+		arg0:       `a.exe`,
+		want:       `a.exe`,
+		wantErrDot: true,
+		wantRunErr: fs.ErrNotExist,
 	},
 	{
 		// like above, but making test succeed by installing file
 		// in referred destination (so LookPath(`a.exe`) will still
 		// find `.\a.exe`, but we successfully execute `p\a.exe`)
-		files: []string{`a.exe`, `p\a.exe`},
-		dir:   `p`,
-		arg0:  `a.exe`,
-		want:  `p\a.exe`,
+		name:       "relative to Dir",
+		files:      []string{`a.exe`, `p\a.exe`},
+		PATH:       []string{"."},
+		dir:        `p`,
+		arg0:       `a.exe`,
+		want:       `p\a.exe`,
+		wantErrDot: true,
 	},
 	{
 		// like above, but add PATH in attempt to break the test
-		PATH:  `p2;p`,
-		files: []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
-		dir:   `p`,
-		arg0:  `a.exe`,
-		want:  `p\a.exe`,
+		name:       "relative to Dir with extra PATH",
+		files:      []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
+		PATH:       []string{".", "p2", "p"},
+		dir:        `p`,
+		arg0:       `a.exe`,
+		want:       `p\a.exe`,
+		wantErrDot: true,
 	},
 	{
 		// like above, but use "a" instead of "a.exe" for command
-		PATH:  `p2;p`,
-		files: []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
-		dir:   `p`,
-		arg0:  `a`,
-		want:  `p\a.exe`,
+		name:       "relative to Dir with extra PATH and no extension",
+		files:      []string{`a.exe`, `p\a.exe`, `p2\a.exe`},
+		PATH:       []string{".", "p2", "p"},
+		dir:        `p`,
+		arg0:       `a`,
+		want:       `p\a.exe`,
+		wantErrDot: true,
 	},
 	{
-		// finds `a.exe` in the PATH regardless of dir set
-		// because LookPath returns full path in that case
-		PATH:  `p2;p`,
+		// finds `a.exe` in the PATH regardless of Dir because Command resolves the
+		// full path (using LookPath) before Dir is set.
+		name:  "from PATH with no match in Dir",
 		files: []string{`p\a.exe`, `p2\a.exe`},
+		PATH:  []string{".", "p2", "p"},
 		dir:   `p`,
 		arg0:  `a.exe`,
 		want:  `p2\a.exe`,
@@ -509,104 +497,141 @@
 	// tests commands, like `.\a.exe`, with c.Dir set
 	{
 		// should use dir when command is path, like ".\a.exe"
+		name:  "relative to Dir with explicit dot",
 		files: []string{`p\a.exe`},
+		PATH:  []string{"."},
 		dir:   `p`,
 		arg0:  `.\a.exe`,
 		want:  `p\a.exe`,
 	},
 	{
 		// like above, but with PATH added in attempt to break it
-		PATH:  `p2`,
+		name:  "relative to Dir with dot and extra PATH",
 		files: []string{`p\a.exe`, `p2\a.exe`},
+		PATH:  []string{".", "p2"},
 		dir:   `p`,
 		arg0:  `.\a.exe`,
 		want:  `p\a.exe`,
 	},
 	{
-		// like above, but make sure .exe is tried even for commands with slash
-		PATH:  `p2`,
+		// LookPath(".\a") will fail before Dir is set, and that error is sticky.
+		name:  "relative to Dir with dot and extra PATH and no extension",
 		files: []string{`p\a.exe`, `p2\a.exe`},
+		PATH:  []string{".", "p2"},
 		dir:   `p`,
 		arg0:  `.\a`,
 		want:  `p\a.exe`,
 	},
+	{
+		// LookPath(".\a") will fail before Dir is set, and that error is sticky.
+		name:  "relative to Dir with different extension",
+		files: []string{`a.exe`, `p\a.bat`},
+		PATH:  []string{"."},
+		dir:   `p`,
+		arg0:  `.\a`,
+		want:  `p\a.bat`,
+	},
 }
 
 func TestCommand(t *testing.T) {
-	if testing.Short() {
-		maySkipHelperCommand("exec")
-		t.Skipf("skipping test in short mode that would build a helper binary")
-	}
-	t.Parallel()
+	// Not parallel: uses Chdir and Setenv.
 
-	tmp := t.TempDir()
-	printpathExe := buildPrintPathExe(t, tmp)
+	// We are using the "printpath" command mode to test exec.Command here,
+	// so we won't be calling helperCommand to resolve it.
+	// That may cause it to appear to be unused.
+	maySkipHelperCommand("printpath")
 
-	// Run all tests.
-	for i, test := range commandTests {
-		i, test := i, test
-		t.Run(fmt.Sprint(i), func(t *testing.T) {
-			t.Parallel()
-
-			dir := filepath.Join(tmp, "d"+strconv.Itoa(i))
-			err := os.Mkdir(dir, 0700)
-			if err != nil {
-				t.Fatal("Mkdir failed: ", err)
+	for _, tt := range commandTests {
+		t.Run(tt.name, func(t *testing.T) {
+			if tt.PATH == nil {
+				t.Fatalf("test must specify PATH")
 			}
-			test.run(t, dir, printpathExe)
+
+			root := t.TempDir()
+			installProgs(t, root, tt.files)
+
+			pathVar := makePATH(root, tt.PATH)
+			t.Setenv("PATH", pathVar)
+			t.Logf("set PATH=%s", pathVar)
+
+			chdir(t, root)
+
+			cmd := exec.Command(tt.arg0, "printpath")
+			cmd.Dir = filepath.Join(root, tt.dir)
+			if tt.wantErrDot {
+				if errors.Is(cmd.Err, exec.ErrDot) {
+					cmd.Err = nil
+				} else {
+					t.Fatalf("cmd.Err = %v; want ErrDot", cmd.Err)
+				}
+			}
+
+			out, err := cmd.Output()
+			if err != nil {
+				if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+					t.Logf("%v: %v\n%s", cmd, err, ee.Stderr)
+				} else {
+					t.Logf("%v: %v", cmd, err)
+				}
+				if !errors.Is(err, tt.wantRunErr) {
+					t.Errorf("want %v", tt.wantRunErr)
+				}
+				return
+			}
+
+			got := strings.TrimSpace(string(out))
+			if filepath.IsAbs(got) {
+				got, err = filepath.Rel(root, got)
+				if err != nil {
+					t.Fatal(err)
+				}
+			}
+			if got != tt.want {
+				t.Errorf("\nran  %#q\nwant %#q", got, tt.want)
+			}
+
+			gotPath := cmd.Path
+			wantPath := tt.wantPath
+			if wantPath == "" {
+				if strings.Contains(tt.arg0, `\`) {
+					wantPath = tt.arg0
+				} else if tt.wantErrDot {
+					wantPath = strings.TrimPrefix(tt.want, tt.dir+`\`)
+				} else {
+					wantPath = filepath.Join(root, tt.want)
+				}
+			}
+			if gotPath != wantPath {
+				t.Errorf("\ncmd.Path = %#q\nwant       %#q", gotPath, wantPath)
+			}
 		})
 	}
 }
 
-// buildPrintPathExe creates a Go program that prints its own path.
-// dir is a temp directory where executable will be created.
-// The function returns full path to the created program.
-func buildPrintPathExe(t *testing.T, dir string) string {
-	const name = "printpath"
-	srcname := name + ".go"
-	err := os.WriteFile(filepath.Join(dir, srcname), []byte(printpathSrc), 0644)
-	if err != nil {
-		t.Fatalf("failed to create source: %v", err)
-	}
-	if err != nil {
-		t.Fatalf("failed to execute template: %v", err)
-	}
-	outname := name + ".exe"
-	cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", outname, srcname)
-	cmd.Dir = dir
+func TestAbsCommandWithDoubledExtension(t *testing.T) {
+	t.Parallel()
+
+	// We expect that ".com" is always included in PATHEXT, but it may also be
+	// found in the import path of a Go package. If it is at the root of the
+	// import path, the resulting executable may be named like "example.com.exe".
+	//
+	// Since "example.com" looks like a proper executable name, it is probably ok
+	// for exec.Command to try to run it directly without re-resolving it.
+	// However, exec.LookPath should try a little harder to figure it out.
+
+	comPath := filepath.Join(t.TempDir(), "example.com")
+	batPath := comPath + ".bat"
+	installBat(t, batPath)
+
+	cmd := exec.Command(comPath)
 	out, err := cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("failed to build executable: %v - %v", err, string(out))
+	t.Logf("%v: %v\n%s", cmd, err, out)
+	if !errors.Is(err, fs.ErrNotExist) {
+		t.Errorf("Command(%#q).Run: %v\nwant fs.ErrNotExist", comPath, err)
 	}
-	return filepath.Join(dir, outname)
-}
 
-const printpathSrc = `
-package main
-
-import (
-	"os"
-	"syscall"
-	"unsafe"
-)
-
-func getMyName() (string, error) {
-	var sysproc = syscall.MustLoadDLL("kernel32.dll").MustFindProc("GetModuleFileNameW")
-	b := make([]uint16, syscall.MAX_PATH)
-	r, _, err := sysproc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
-	n := uint32(r)
-	if n == 0 {
-		return "", err
+	resolved, err := exec.LookPath(comPath)
+	if err != nil || resolved != batPath {
+		t.Fatalf("LookPath(%#q) = %v, %v; want %#q, <nil>", comPath, resolved, err, batPath)
 	}
-	return syscall.UTF16ToString(b[0:n]), nil
 }
-
-func main() {
-	path, err := getMyName()
-	if err != nil {
-		os.Stderr.Write([]byte("getMyName failed: " + err.Error() + "\n"))
-		os.Exit(1)
-	}
-	os.Stdout.Write([]byte(path))
-}
-`
diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go
index a512d51..4f9ea08 100644
--- a/src/os/exec_posix.go
+++ b/src/os/exec_posix.go
@@ -105,7 +105,7 @@
 	case status.Exited():
 		code := status.ExitStatus()
 		if runtime.GOOS == "windows" && uint(code) >= 1<<16 { // windows uses large hex numbers
-			res = "exit status " + uitox(uint(code))
+			res = "exit status " + itoa.Uitox(uint(code))
 		} else { // unix systems use small decimal integers
 			res = "exit status " + itoa.Itoa(code) // unix
 		}
diff --git a/src/os/exec_unix.go b/src/os/exec_unix.go
index f9063b4..36b320d 100644
--- a/src/os/exec_unix.go
+++ b/src/os/exec_unix.go
@@ -48,9 +48,7 @@
 	if e != nil {
 		return nil, NewSyscallError("wait", e)
 	}
-	if pid1 != 0 {
-		p.setDone()
-	}
+	p.setDone()
 	ps = &ProcessState{
 		pid:    pid1,
 		status: status,
diff --git a/src/os/exec_unix_test.go b/src/os/exec_unix_test.go
index 2604519..88e1b63 100644
--- a/src/os/exec_unix_test.go
+++ b/src/os/exec_unix_test.go
@@ -19,7 +19,7 @@
 
 	p, err := StartProcess(testenv.GoToolPath(t), []string{"go"}, &ProcAttr{})
 	if err != nil {
-		t.Errorf("starting test process: %v", err)
+		t.Fatalf("starting test process: %v", err)
 	}
 	p.Wait()
 	if got := p.Signal(Kill); got != ErrProcessDone {
diff --git a/src/os/exec_windows.go b/src/os/exec_windows.go
index 239bed1..061a12b 100644
--- a/src/os/exec_windows.go
+++ b/src/os/exec_windows.go
@@ -35,12 +35,6 @@
 		return nil, NewSyscallError("GetProcessTimes", e)
 	}
 	p.setDone()
-	// NOTE(brainman): It seems that sometimes process is not dead
-	// when WaitForSingleObject returns. But we do not know any
-	// other way to wait for it. Sleeping for a while seems to do
-	// the trick sometimes.
-	// See https://golang.org/issue/25965 for details.
-	defer time.Sleep(5 * time.Millisecond)
 	defer p.Release()
 	return &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil
 }
diff --git a/src/os/exec_windows_test.go b/src/os/exec_windows_test.go
new file mode 100644
index 0000000..f8ed4cd
--- /dev/null
+++ b/src/os/exec_windows_test.go
@@ -0,0 +1,83 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package os_test
+
+import (
+	"internal/testenv"
+	"io"
+	. "os"
+	"path/filepath"
+	"sync"
+	"testing"
+)
+
+func TestRemoveAllWithExecutedProcess(t *testing.T) {
+	// Regression test for golang.org/issue/25965.
+	if testing.Short() {
+		t.Skip("slow test; skipping")
+	}
+	testenv.MustHaveExec(t)
+
+	name, err := Executable()
+	if err != nil {
+		t.Fatal(err)
+	}
+	r, err := Open(name)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer r.Close()
+	const n = 100
+	var execs [n]string
+	// First create n executables.
+	for i := 0; i < n; i++ {
+		// Rewind r.
+		if _, err := r.Seek(0, io.SeekStart); err != nil {
+			t.Fatal(err)
+		}
+		name := filepath.Join(t.TempDir(), "test.exe")
+		execs[i] = name
+		w, err := Create(name)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if _, err = io.Copy(w, r); err != nil {
+			w.Close()
+			t.Fatal(err)
+		}
+		if err := w.Sync(); err != nil {
+			w.Close()
+			t.Fatal(err)
+		}
+		if err = w.Close(); err != nil {
+			t.Fatal(err)
+		}
+	}
+	// Then run each executable and remove its directory.
+	// Run each executable in a separate goroutine to add some load
+	// and increase the chance of triggering the bug.
+	var wg sync.WaitGroup
+	wg.Add(n)
+	for i := 0; i < n; i++ {
+		go func(i int) {
+			defer wg.Done()
+			name := execs[i]
+			dir := filepath.Dir(name)
+			// Run test.exe without executing any test, just to make it do something.
+			cmd := testenv.Command(t, name, "-test.run=^$")
+			if err := cmd.Run(); err != nil {
+				t.Errorf("exec failed: %v", err)
+			}
+			// Remove dir and check that it doesn't return `ERROR_ACCESS_DENIED`.
+			err = RemoveAll(dir)
+			if err != nil {
+				t.Errorf("RemoveAll failed: %v", err)
+			}
+		}(i)
+	}
+	wg.Wait()
+}
diff --git a/src/os/executable_test.go b/src/os/executable_test.go
index c835bb4..98b72d7 100644
--- a/src/os/executable_test.go
+++ b/src/os/executable_test.go
@@ -30,7 +30,7 @@
 		t.Fatalf("filepath.Rel: %v", err)
 	}
 
-	cmd := testenv.Command(t, fn, "-test.run=XXXX")
+	cmd := testenv.Command(t, fn, "-test.run=^$")
 	// make child start with a relative program path
 	cmd.Dir = dir
 	cmd.Path = fn
diff --git a/src/os/export_linux_test.go b/src/os/export_linux_test.go
index 3fd5e61..942b48a 100644
--- a/src/os/export_linux_test.go
+++ b/src/os/export_linux_test.go
@@ -5,7 +5,8 @@
 package os
 
 var (
-	PollCopyFileRangeP = &pollCopyFileRange
-	PollSpliceFile     = &pollSplice
-	GetPollFDForTest   = getPollFD
+	PollCopyFileRangeP  = &pollCopyFileRange
+	PollSpliceFile      = &pollSplice
+	PollSendFile        = &pollSendFile
+	GetPollFDAndNetwork = getPollFDAndNetwork
 )
diff --git a/src/os/export_windows_test.go b/src/os/export_windows_test.go
index ff4f899..6e11888 100644
--- a/src/os/export_windows_test.go
+++ b/src/os/export_windows_test.go
@@ -7,8 +7,9 @@
 // Export for testing.
 
 var (
-	FixLongPath       = fixLongPath
-	CanUseLongPaths   = canUseLongPaths
-	NewConsoleFile    = newConsoleFile
-	CommandLineToArgv = commandLineToArgv
+	FixLongPath        = fixLongPath
+	CanUseLongPaths    = canUseLongPaths
+	NewConsoleFile     = newConsoleFile
+	CommandLineToArgv  = commandLineToArgv
+	AllowReadDirFileID = &allowReadDirFileID
 )
diff --git a/src/os/file.go b/src/os/file.go
index 7fd2f5d..090ffba 100644
--- a/src/os/file.go
+++ b/src/os/file.go
@@ -157,20 +157,26 @@
 	return n, f.wrapErr("write", e)
 }
 
-func genericReadFrom(f *File, r io.Reader) (int64, error) {
-	return io.Copy(fileWithoutReadFrom{f}, r)
+// noReadFrom can be embedded alongside another type to
+// hide the ReadFrom method of that other type.
+type noReadFrom struct{}
+
+// ReadFrom hides another ReadFrom method.
+// It should never be called.
+func (noReadFrom) ReadFrom(io.Reader) (int64, error) {
+	panic("can't happen")
 }
 
 // fileWithoutReadFrom implements all the methods of *File other
 // than ReadFrom. This is used to permit ReadFrom to call io.Copy
 // without leading to a recursive call to ReadFrom.
 type fileWithoutReadFrom struct {
+	noReadFrom
 	*File
 }
 
-// This ReadFrom method hides the *File ReadFrom method.
-func (fileWithoutReadFrom) ReadFrom(fileWithoutReadFrom) {
-	panic("unreachable")
+func genericReadFrom(f *File, r io.Reader) (int64, error) {
+	return io.Copy(fileWithoutReadFrom{File: f}, r)
 }
 
 // Write writes len(b) bytes from b to the File.
@@ -229,6 +235,40 @@
 	return
 }
 
+// WriteTo implements io.WriterTo.
+func (f *File) WriteTo(w io.Writer) (n int64, err error) {
+	if err := f.checkValid("read"); err != nil {
+		return 0, err
+	}
+	n, handled, e := f.writeTo(w)
+	if handled {
+		return n, f.wrapErr("read", e)
+	}
+	return genericWriteTo(f, w) // without wrapping
+}
+
+// noWriteTo can be embedded alongside another type to
+// hide the WriteTo method of that other type.
+type noWriteTo struct{}
+
+// WriteTo hides another WriteTo method.
+// It should never be called.
+func (noWriteTo) WriteTo(io.Writer) (int64, error) {
+	panic("can't happen")
+}
+
+// fileWithoutWriteTo implements all the methods of *File other
+// than WriteTo. This is used to permit WriteTo to call io.Copy
+// without leading to a recursive call to WriteTo.
+type fileWithoutWriteTo struct {
+	noWriteTo
+	*File
+}
+
+func genericWriteTo(f *File, w io.Writer) (int64, error) {
+	return io.Copy(w, fileWithoutWriteTo{File: f})
+}
+
 // Seek sets the offset for the next Read or Write on file to offset, interpreted
 // according to whence: 0 means relative to the origin of the file, 1 means
 // relative to the current offset, and 2 means relative to the end.
@@ -352,6 +392,15 @@
 	return rename(oldpath, newpath)
 }
 
+// Readlink returns the destination of the named symbolic link.
+// If there is an error, it will be of type *PathError.
+//
+// If the link destination is relative, Readlink returns the relative path
+// without resolving it to an absolute one.
+func Readlink(name string) (string, error) {
+	return readlink(name)
+}
+
 // Many functions in package syscall return a count of -1 instead of 0.
 // Using fixCount(call()) instead of call() corrects the count.
 func fixCount(n int, err error) (int, error) {
@@ -622,24 +671,12 @@
 	return dirFS(dir)
 }
 
-// containsAny reports whether any bytes in chars are within s.
-func containsAny(s, chars string) bool {
-	for i := 0; i < len(s); i++ {
-		for j := 0; j < len(chars); j++ {
-			if s[i] == chars[j] {
-				return true
-			}
-		}
-	}
-	return false
-}
-
 type dirFS string
 
 func (dir dirFS) Open(name string) (fs.File, error) {
 	fullname, err := dir.join(name)
 	if err != nil {
-		return nil, &PathError{Op: "stat", Path: name, Err: err}
+		return nil, &PathError{Op: "open", Path: name, Err: err}
 	}
 	f, err := Open(fullname)
 	if err != nil {
@@ -662,7 +699,15 @@
 	if err != nil {
 		return nil, &PathError{Op: "readfile", Path: name, Err: err}
 	}
-	return ReadFile(fullname)
+	b, err := ReadFile(fullname)
+	if err != nil {
+		if e, ok := err.(*PathError); ok {
+			// See comment in dirFS.Open.
+			e.Path = name
+		}
+		return nil, err
+	}
+	return b, nil
 }
 
 // ReadDir reads the named directory, returning all its directory entries sorted
@@ -672,7 +717,15 @@
 	if err != nil {
 		return nil, &PathError{Op: "readdir", Path: name, Err: err}
 	}
-	return ReadDir(fullname)
+	entries, err := ReadDir(fullname)
+	if err != nil {
+		if e, ok := err.(*PathError); ok {
+			// See comment in dirFS.Open.
+			e.Path = name
+		}
+		return nil, err
+	}
+	return entries, nil
 }
 
 func (dir dirFS) Stat(name string) (fs.FileInfo, error) {
@@ -737,10 +790,6 @@
 
 	data := make([]byte, 0, size)
 	for {
-		if len(data) >= cap(data) {
-			d := append(data[:cap(data)], 0)
-			data = d[:len(data)]
-		}
 		n, err := f.Read(data[len(data):cap(data)])
 		data = data[:len(data)+n]
 		if err != nil {
@@ -749,6 +798,11 @@
 			}
 			return data, err
 		}
+
+		if len(data) >= cap(data) {
+			d := append(data[:cap(data)], 0)
+			data = d[:len(data)]
+		}
 	}
 }
 
diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go
index 8336487..c0ee6b3 100644
--- a/src/os/file_plan9.go
+++ b/src/os/file_plan9.go
@@ -5,6 +5,7 @@
 package os
 
 import (
+	"internal/bytealg"
 	"internal/poll"
 	"io"
 	"runtime"
@@ -387,7 +388,7 @@
 }
 
 func rename(oldname, newname string) error {
-	dirname := oldname[:lastIndex(oldname, '/')+1]
+	dirname := oldname[:bytealg.LastIndexByteString(oldname, '/')+1]
 	if hasPrefix(newname, dirname) {
 		newname = newname[len(dirname):]
 	} else {
@@ -396,7 +397,7 @@
 
 	// If newname still contains slashes after removing the oldname
 	// prefix, the rename is cross-directory and must be rejected.
-	if lastIndex(newname, '/') >= 0 {
+	if bytealg.LastIndexByteString(newname, '/') >= 0 {
 		return &LinkError{"rename", oldname, newname, ErrInvalid}
 	}
 
@@ -504,9 +505,7 @@
 	return &LinkError{"symlink", oldname, newname, syscall.EPLAN9}
 }
 
-// Readlink returns the destination of the named symbolic link.
-// If there is an error, it will be of type *PathError.
-func Readlink(name string) (string, error) {
+func readlink(name string) (string, error) {
 	return "", &PathError{Op: "readlink", Path: name, Err: syscall.EPLAN9}
 }
 
@@ -543,7 +542,6 @@
 		dir = "/tmp"
 	}
 	return dir
-
 }
 
 // Chdir changes the current working directory to the file,
diff --git a/src/os/file_unix.go b/src/os/file_unix.go
index 533a484..a527b23 100644
--- a/src/os/file_unix.go
+++ b/src/os/file_unix.go
@@ -426,9 +426,7 @@
 	return nil
 }
 
-// Readlink returns the destination of the named symbolic link.
-// If there is an error, it will be of type *PathError.
-func Readlink(name string) (string, error) {
+func readlink(name string) (string, error) {
 	for len := 128; ; len *= 2 {
 		b := make([]byte, len)
 		var (
diff --git a/src/os/file_windows.go b/src/os/file_windows.go
index 8d77a63..8b04ed6 100644
--- a/src/os/file_windows.go
+++ b/src/os/file_windows.go
@@ -87,18 +87,6 @@
 	return newFile(h, name, "file")
 }
 
-// Auxiliary information if the File describes a directory
-type dirInfo struct {
-	h       syscall.Handle // search handle created with FindFirstFile
-	data    syscall.Win32finddata
-	path    string
-	isempty bool // set if FindFirstFile returns ERROR_FILE_NOT_FOUND
-}
-
-func (d *dirInfo) close() error {
-	return syscall.FindClose(d.h)
-}
-
 func epipecheck(file *File, e error) {
 }
 
@@ -106,63 +94,6 @@
 // On Unix-like systems, it is "/dev/null"; on Windows, "NUL".
 const DevNull = "NUL"
 
-func openDir(name string) (d *dirInfo, e error) {
-	var mask string
-
-	path := fixLongPath(name)
-
-	if len(path) == 2 && path[1] == ':' { // it is a drive letter, like C:
-		mask = path + `*`
-	} else if len(path) > 0 {
-		lc := path[len(path)-1]
-		if lc == '/' || lc == '\\' {
-			mask = path + `*`
-		} else {
-			mask = path + `\*`
-		}
-	} else {
-		mask = `\*`
-	}
-	maskp, e := syscall.UTF16PtrFromString(mask)
-	if e != nil {
-		return nil, e
-	}
-	d = new(dirInfo)
-	d.h, e = syscall.FindFirstFile(maskp, &d.data)
-	if e != nil {
-		// FindFirstFile returns ERROR_FILE_NOT_FOUND when
-		// no matching files can be found. Then, if directory
-		// exists, we should proceed.
-		// If FindFirstFile failed because name does not point
-		// to a directory, we should return ENOTDIR.
-		var fa syscall.Win32FileAttributeData
-		pathp, e1 := syscall.UTF16PtrFromString(path)
-		if e1 != nil {
-			return nil, e
-		}
-		e1 = syscall.GetFileAttributesEx(pathp, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa)))
-		if e1 != nil {
-			return nil, e
-		}
-		if fa.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 {
-			return nil, syscall.ENOTDIR
-		}
-		if e != syscall.ERROR_FILE_NOT_FOUND {
-			return nil, e
-		}
-		d.isempty = true
-	}
-	d.path = path
-	if !isAbs(d.path) {
-		d.path, e = syscall.FullPath(d.path)
-		if e != nil {
-			d.close()
-			return nil, e
-		}
-	}
-	return d, nil
-}
-
 // openFileNolog is the Windows implementation of OpenFile.
 func openFileNolog(name string, flag int, perm FileMode) (*File, error) {
 	if name == "" {
@@ -446,12 +377,6 @@
 
 	// handle paths, like \??\Volume{abc}\...
 
-	err := windows.LoadGetFinalPathNameByHandle()
-	if err != nil {
-		// we must be using old version of Windows
-		return "", err
-	}
-
 	h, err := openSymlink(path)
 	if err != nil {
 		return "", err
@@ -481,7 +406,7 @@
 	return "", errors.New("GetFinalPathNameByHandle returned unexpected path: " + s)
 }
 
-func readlink(path string) (string, error) {
+func readReparseLink(path string) (string, error) {
 	h, err := openSymlink(path)
 	if err != nil {
 		return "", err
@@ -513,10 +438,8 @@
 	}
 }
 
-// Readlink returns the destination of the named symbolic link.
-// If there is an error, it will be of type *PathError.
-func Readlink(name string) (string, error) {
-	s, err := readlink(fixLongPath(name))
+func readlink(name string) (string, error) {
+	s, err := readReparseLink(fixLongPath(name))
 	if err != nil {
 		return "", &PathError{Op: "readlink", Path: name, Err: err}
 	}
diff --git a/src/os/os_test.go b/src/os/os_test.go
index 94c3ad0..6adc3b5 100644
--- a/src/os/os_test.go
+++ b/src/os/os_test.go
@@ -11,6 +11,7 @@
 	"internal/testenv"
 	"io"
 	"io/fs"
+	"log"
 	. "os"
 	"os/exec"
 	"path/filepath"
@@ -33,6 +34,8 @@
 		Exit(0)
 	}
 
+	log.SetFlags(log.LstdFlags | log.Lshortfile)
+
 	Exit(m.Run())
 }
 
@@ -147,7 +150,7 @@
 func equal(name1, name2 string) (r bool) {
 	switch runtime.GOOS {
 	case "windows":
-		r = strings.ToLower(name1) == strings.ToLower(name2)
+		r = strings.EqualFold(name1, name2)
 	default:
 		r = name1 == name2
 	}
@@ -1180,6 +1183,7 @@
 			// Stat does not return the real case of the file (it returns what the called asked for)
 			// So we have to use readdir to get the real name of the file.
 			dirNames, err := fd.Readdirnames(-1)
+			fd.Close()
 			if err != nil {
 				t.Fatalf("readdirnames: %s", err)
 			}
@@ -1619,8 +1623,17 @@
 	if err != nil {
 		t.Fatalf("Getwd: %s", err)
 	}
-	if !equal(wdNew, wd) {
-		t.Fatalf("fd.Chdir failed, got %s, want %s", wdNew, wd)
+
+	wdInfo, err := fd.Stat()
+	if err != nil {
+		t.Fatal(err)
+	}
+	newInfo, err := Stat(wdNew)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !SameFile(wdInfo, newInfo) {
+		t.Fatalf("fd.Chdir failed: got %s, want %s", wdNew, wd)
 	}
 }
 
@@ -2374,6 +2387,11 @@
 		Exit(0)
 	}
 
+	exe, err := Executable()
+	if err != nil {
+		t.Skipf("can't find executable: %v", err)
+	}
+
 	testenv.MustHaveExec(t)
 	t.Parallel()
 
@@ -2388,13 +2406,11 @@
 		t.Fatalf("unexpected Stdin mode (%v), want ModeCharDevice or ModeNamedPipe", mode)
 	}
 
-	var cmd *exec.Cmd
-	if runtime.GOOS == "windows" {
-		cmd = testenv.Command(t, "cmd", "/c", "echo output | "+Args[0]+" -test.run=TestStatStdin")
-	} else {
-		cmd = testenv.Command(t, "/bin/sh", "-c", "echo output | "+Args[0]+" -test.run=TestStatStdin")
-	}
-	cmd.Env = append(Environ(), "GO_WANT_HELPER_PROCESS=1")
+	cmd := testenv.Command(t, exe, "-test.run=^TestStatStdin$")
+	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+	// This will make standard input a pipe.
+	cmd.Stdin = strings.NewReader("output")
 
 	output, err := cmd.CombinedOutput()
 	if err != nil {
@@ -2606,7 +2622,7 @@
 	testenv.MustHaveExec(t)
 	t.Parallel()
 
-	cmd := testenv.Command(t, Args[0], "-test.run=TestGetppid")
+	cmd := testenv.Command(t, Args[0], "-test.run=^TestGetppid$")
 	cmd.Env = append(Environ(), "GO_WANT_HELPER_PROCESS=1")
 
 	// verify that Getppid() from the forked process reports our process id
@@ -2823,6 +2839,54 @@
 	t.Run("dir", testDoubleCloseError(sfdir))
 }
 
+func TestUserCacheDir(t *testing.T) {
+	t.Parallel()
+
+	dir, err := UserCacheDir()
+	if err != nil {
+		t.Skipf("skipping: %v", err)
+	}
+	if dir == "" {
+		t.Fatalf("UserCacheDir returned %q; want non-empty path or error", dir)
+	}
+
+	fi, err := Stat(dir)
+	if err != nil {
+		if IsNotExist(err) {
+			t.Log(err)
+			return
+		}
+		t.Fatal(err)
+	}
+	if !fi.IsDir() {
+		t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode())
+	}
+}
+
+func TestUserConfigDir(t *testing.T) {
+	t.Parallel()
+
+	dir, err := UserConfigDir()
+	if err != nil {
+		t.Skipf("skipping: %v", err)
+	}
+	if dir == "" {
+		t.Fatalf("UserConfigDir returned %q; want non-empty path or error", dir)
+	}
+
+	fi, err := Stat(dir)
+	if err != nil {
+		if IsNotExist(err) {
+			t.Log(err)
+			return
+		}
+		t.Fatal(err)
+	}
+	if !fi.IsDir() {
+		t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode())
+	}
+}
+
 func TestUserHomeDir(t *testing.T) {
 	t.Parallel()
 
@@ -3270,3 +3334,27 @@
 		t.Errorf("got nils %d errs %d, want 2 2", nils, errs)
 	}
 }
+
+func TestRandomLen(t *testing.T) {
+	for range 5 {
+		dir, err := MkdirTemp(t.TempDir(), "*")
+		if err != nil {
+			t.Fatal(err)
+		}
+		base := filepath.Base(dir)
+		if len(base) > 10 {
+			t.Errorf("MkdirTemp returned len %d: %s", len(base), base)
+		}
+	}
+	for range 5 {
+		f, err := CreateTemp(t.TempDir(), "*")
+		if err != nil {
+			t.Fatal(err)
+		}
+		base := filepath.Base(f.Name())
+		f.Close()
+		if len(base) > 10 {
+			t.Errorf("CreateTemp returned len %d: %s", len(base), base)
+		}
+	}
+}
diff --git a/src/os/os_unix_test.go b/src/os/os_unix_test.go
index 98e7afd..98e436f 100644
--- a/src/os/os_unix_test.go
+++ b/src/os/os_unix_test.go
@@ -75,6 +75,12 @@
 	t.Log("groups: ", groups)
 	for _, g := range groups {
 		if err = Chown(f.Name(), -1, g); err != nil {
+			if testenv.SyscallIsNotSupported(err) {
+				t.Logf("chown %s -1 %d: %s (error ignored)", f.Name(), g, err)
+				// Since the Chown call failed, the file should be unmodified.
+				checkUidGid(t, f.Name(), int(sys.Uid), gid)
+				continue
+			}
 			t.Fatalf("chown %s -1 %d: %s", f.Name(), g, err)
 		}
 		checkUidGid(t, f.Name(), int(sys.Uid), g)
@@ -123,6 +129,12 @@
 	t.Log("groups: ", groups)
 	for _, g := range groups {
 		if err = f.Chown(-1, g); err != nil {
+			if testenv.SyscallIsNotSupported(err) {
+				t.Logf("chown %s -1 %d: %s (error ignored)", f.Name(), g, err)
+				// Since the Chown call failed, the file should be unmodified.
+				checkUidGid(t, f.Name(), int(sys.Uid), gid)
+				continue
+			}
 			t.Fatalf("fchown %s -1 %d: %s", f.Name(), g, err)
 		}
 		checkUidGid(t, f.Name(), int(sys.Uid), g)
@@ -181,12 +193,22 @@
 	t.Log("groups: ", groups)
 	for _, g := range groups {
 		if err = Lchown(linkname, -1, g); err != nil {
+			if testenv.SyscallIsNotSupported(err) {
+				t.Logf("lchown %s -1 %d: %s (error ignored)", f.Name(), g, err)
+				// Since the Lchown call failed, the file should be unmodified.
+				checkUidGid(t, f.Name(), int(sys.Uid), gid)
+				continue
+			}
 			t.Fatalf("lchown %s -1 %d: %s", linkname, g, err)
 		}
 		checkUidGid(t, linkname, int(sys.Uid), g)
 
 		// Check that link target's gid is unchanged.
 		checkUidGid(t, f.Name(), int(sys.Uid), int(sys.Gid))
+
+		if err = Lchown(linkname, -1, gid); err != nil {
+			t.Fatalf("lchown %s -1 %d: %s", f.Name(), gid, err)
+		}
 	}
 }
 
@@ -235,8 +257,23 @@
 	const umask = 0077
 	dir := newDir("TestMkdirStickyUmask", t)
 	defer RemoveAll(dir)
+
 	oldUmask := syscall.Umask(umask)
 	defer syscall.Umask(oldUmask)
+
+	// We have set a umask, but if the parent directory happens to have a default
+	// ACL, the umask may be ignored. To prevent spurious failures from an ACL,
+	// we create a non-sticky directory as a “control case” to compare against our
+	// sticky-bit “experiment”.
+	control := filepath.Join(dir, "control")
+	if err := Mkdir(control, 0755); err != nil {
+		t.Fatal(err)
+	}
+	cfi, err := Stat(control)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	p := filepath.Join(dir, "dir1")
 	if err := Mkdir(p, ModeSticky|0755); err != nil {
 		t.Fatal(err)
@@ -245,8 +282,11 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	if mode := fi.Mode(); (mode&umask) != 0 || (mode&^ModePerm) != (ModeDir|ModeSticky) {
-		t.Errorf("unexpected mode %s", mode)
+
+	got := fi.Mode()
+	want := cfi.Mode() | ModeSticky
+	if got != want {
+		t.Errorf("Mkdir(_, ModeSticky|0755) created dir with mode %v; want %v", got, want)
 	}
 }
 
@@ -346,3 +386,59 @@
 		}
 	}
 }
+
+// Test that copying to files opened with O_APPEND works and
+// the copy_file_range syscall isn't used on Linux.
+//
+// Regression test for go.dev/issue/60181
+func TestIssue60181(t *testing.T) {
+	defer chtmpdir(t)()
+
+	want := "hello gopher"
+
+	a, err := CreateTemp("", "a")
+	if err != nil {
+		t.Fatal(err)
+	}
+	a.WriteString(want[:5])
+	a.Close()
+
+	b, err := CreateTemp("", "b")
+	if err != nil {
+		t.Fatal(err)
+	}
+	b.WriteString(want[5:])
+	b.Close()
+
+	afd, err := syscall.Open(a.Name(), syscall.O_RDWR|syscall.O_APPEND, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bfd, err := syscall.Open(b.Name(), syscall.O_RDONLY, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	aa := NewFile(uintptr(afd), a.Name())
+	defer aa.Close()
+	bb := NewFile(uintptr(bfd), b.Name())
+	defer bb.Close()
+
+	// This would fail on Linux in case the copy_file_range syscall was used because it doesn't
+	// support destination files opened with O_APPEND, see
+	// https://man7.org/linux/man-pages/man2/copy_file_range.2.html#ERRORS
+	_, err = io.Copy(aa, bb)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	buf, err := ReadFile(aa.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if got := string(buf); got != want {
+		t.Errorf("files not concatenated: got %q, want %q", got, want)
+	}
+}
diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go
index d6aab18..7436b9a 100644
--- a/src/os/os_windows_test.go
+++ b/src/os/os_windows_test.go
@@ -281,7 +281,7 @@
 			},
 		},
 		{
-			// Do as junction utility https://technet.microsoft.com/en-au/sysinternals/bb896768.aspx does - set PrintNameLength to 0.
+			// Do as junction utility https://learn.microsoft.com/en-us/sysinternals/downloads/junction does - set PrintNameLength to 0.
 			name: "have_blank_print_name",
 			mklink: func(link, target string) error {
 				var t reparseData
@@ -427,6 +427,28 @@
 	testDirLinks(t, tests)
 }
 
+func mustHaveWorkstation(t *testing.T) {
+	mar, err := windows.OpenSCManager(nil, nil, windows.SERVICE_QUERY_STATUS)
+	if err != nil {
+		return
+	}
+	defer syscall.CloseHandle(mar)
+	//LanmanWorkstation is the service name, and Workstation is the display name.
+	srv, err := windows.OpenService(mar, syscall.StringToUTF16Ptr("LanmanWorkstation"), windows.SERVICE_QUERY_STATUS)
+	if err != nil {
+		return
+	}
+	defer syscall.CloseHandle(srv)
+	var state windows.SERVICE_STATUS
+	err = windows.QueryServiceStatus(srv, &state)
+	if err != nil {
+		return
+	}
+	if state.CurrentState != windows.SERVICE_RUNNING {
+		t.Skip("Requires the Windows service Workstation, but it is detected that it is not enabled.")
+	}
+}
+
 func TestNetworkSymbolicLink(t *testing.T) {
 	testenv.MustHaveSymlink(t)
 
@@ -435,7 +457,8 @@
 	dir := t.TempDir()
 	chdir(t, dir)
 
-	shareName := "GoSymbolicLinkTestShare" // hope no conflictions
+	pid := os.Getpid()
+	shareName := fmt.Sprintf("GoSymbolicLinkTestShare%d", pid)
 	sharePath := filepath.Join(dir, shareName)
 	testDir := "TestDir"
 
@@ -453,11 +476,22 @@
 		t.Fatal(err)
 	}
 
+	// Per https://learn.microsoft.com/en-us/windows/win32/api/lmshare/ns-lmshare-share_info_2:
+	//
+	// “[The shi2_permissions field] indicates the shared resource's permissions
+	// for servers running with share-level security. A server running user-level
+	// security ignores this member.
+	// …
+	// Note that Windows does not support share-level security.”
+	//
+	// So it shouldn't matter what permissions we set here.
+	const permissions = 0
+
 	p := windows.SHARE_INFO_2{
 		Netname:     wShareName,
-		Type:        windows.STYPE_DISKTREE,
+		Type:        windows.STYPE_DISKTREE | windows.STYPE_TEMPORARY,
 		Remark:      nil,
-		Permissions: 0,
+		Permissions: permissions,
 		MaxUses:     1,
 		CurrentUses: 0,
 		Path:        wSharePath,
@@ -466,11 +500,8 @@
 
 	err = windows.NetShareAdd(nil, 2, (*byte)(unsafe.Pointer(&p)), nil)
 	if err != nil {
-		if err == syscall.ERROR_ACCESS_DENIED {
-			t.Skip("you don't have enough privileges to add network share")
-		}
-		if err == _NERR_ServerNotStarted {
-			t.Skip(_NERR_ServerNotStarted.Error())
+		if err == syscall.ERROR_ACCESS_DENIED || err == _NERR_ServerNotStarted {
+			t.Skipf("skipping: NetShareAdd: %v", err)
 		}
 		t.Fatal(err)
 	}
@@ -489,6 +520,7 @@
 	}
 	fi2, err := os.Stat(UNCPath)
 	if err != nil {
+		mustHaveWorkstation(t)
 		t.Fatal(err)
 	}
 	if !os.SameFile(fi1, fi2) {
@@ -509,7 +541,7 @@
 		t.Fatal(err)
 	}
 	if got != target {
-		t.Errorf(`os.Readlink("%s"): got %v, want %v`, link, got, target)
+		t.Errorf(`os.Readlink(%#q): got %v, want %v`, link, got, target)
 	}
 
 	got, err = filepath.EvalSymlinks(link)
@@ -517,7 +549,46 @@
 		t.Fatal(err)
 	}
 	if got != target {
-		t.Errorf(`filepath.EvalSymlinks("%s"): got %v, want %v`, link, got, target)
+		t.Errorf(`filepath.EvalSymlinks(%#q): got %v, want %v`, link, got, target)
+	}
+}
+
+func TestStatLxSymLink(t *testing.T) {
+	if _, err := exec.LookPath("wsl"); err != nil {
+		t.Skip("skipping: WSL not detected")
+	}
+
+	temp := t.TempDir()
+	chdir(t, temp)
+
+	const target = "target"
+	const link = "link"
+
+	_, err := testenv.Command(t, "wsl", "/bin/mkdir", target).Output()
+	if err != nil {
+		// This normally happens when WSL still doesn't have a distro installed to run on.
+		t.Skipf("skipping: WSL is not correctly installed: %v", err)
+	}
+
+	_, err = testenv.Command(t, "wsl", "/bin/ln", "-s", target, link).Output()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	fi, err := os.Lstat(link)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if m := fi.Mode(); m&fs.ModeSymlink != 0 {
+		// This can happen depending on newer WSL versions when running as admin or in developer mode.
+		t.Skip("skipping: WSL created reparse tag IO_REPARSE_TAG_SYMLINK instead of an IO_REPARSE_TAG_LX_SYMLINK")
+	}
+	// Stat'ing a IO_REPARSE_TAG_LX_SYMLINK from outside WSL always return ERROR_CANT_ACCESS_FILE.
+	// We check this condition to validate that os.Stat has tried to follow the link.
+	_, err = os.Stat(link)
+	const ERROR_CANT_ACCESS_FILE = syscall.Errno(1920)
+	if err == nil || !errors.Is(err, ERROR_CANT_ACCESS_FILE) {
+		t.Fatalf("os.Stat(%q): got %v, want ERROR_CANT_ACCESS_FILE", link, err)
 	}
 }
 
@@ -814,7 +885,7 @@
 		` \\\\\""x"""y z`,
 		"\tb\t\"x\ty\"",
 		` "Брад" d e`,
-		// examples from https://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+		// examples from https://learn.microsoft.com/en-us/cpp/cpp/main-function-command-line-args
 		` "abc" d e`,
 		` a\\b d"e f"g h`,
 		` a\\\"b c d`,
@@ -1157,7 +1228,7 @@
 		t.Skip(err)
 	}
 
-	cmd := testenv.Command(t, exe, "-test.run=TestRootDirAsTemp")
+	cmd := testenv.Command(t, exe, "-test.run=^TestRootDirAsTemp$")
 	cmd.Env = cmd.Environ()
 	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
 	cmd.Env = append(cmd.Env, "TMP="+newtmp)
@@ -1465,3 +1536,61 @@
 		t.Errorf("ReadDir(%q) = %v", dir, err)
 	}
 }
+
+func TestReadDirNoFileID(t *testing.T) {
+	*os.AllowReadDirFileID = false
+	defer func() { *os.AllowReadDirFileID = true }()
+
+	dir := t.TempDir()
+	pathA := filepath.Join(dir, "a")
+	pathB := filepath.Join(dir, "b")
+	if err := os.WriteFile(pathA, nil, 0666); err != nil {
+		t.Fatal(err)
+	}
+	if err := os.WriteFile(pathB, nil, 0666); err != nil {
+		t.Fatal(err)
+	}
+
+	files, err := os.ReadDir(dir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(files) != 2 {
+		t.Fatalf("ReadDir(%q) = %v; want 2 files", dir, files)
+	}
+
+	// Check that os.SameFile works with files returned by os.ReadDir.
+	f1, err := files[0].Info()
+	if err != nil {
+		t.Fatal(err)
+	}
+	f2, err := files[1].Info()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !os.SameFile(f1, f1) {
+		t.Errorf("SameFile(%v, %v) = false; want true", f1, f1)
+	}
+	if !os.SameFile(f2, f2) {
+		t.Errorf("SameFile(%v, %v) = false; want true", f2, f2)
+	}
+	if os.SameFile(f1, f2) {
+		t.Errorf("SameFile(%v, %v) = true; want false", f1, f2)
+	}
+
+	// Check that os.SameFile works with a mix of os.ReadDir and os.Stat files.
+	f1s, err := os.Stat(pathA)
+	if err != nil {
+		t.Fatal(err)
+	}
+	f2s, err := os.Stat(pathB)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !os.SameFile(f1, f1s) {
+		t.Errorf("SameFile(%v, %v) = false; want true", f1, f1s)
+	}
+	if !os.SameFile(f2, f2s) {
+		t.Errorf("SameFile(%v, %v) = false; want true", f2, f2s)
+	}
+}
diff --git a/src/os/path.go b/src/os/path.go
index df87887..6ac4cbe 100644
--- a/src/os/path.go
+++ b/src/os/path.go
@@ -26,19 +26,25 @@
 	}
 
 	// Slow path: make sure parent exists and then call Mkdir for path.
-	i := len(path)
-	for i > 0 && IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+
+	// Extract the parent folder from path by first removing any trailing
+	// path separator and then scanning backward until finding a path
+	// separator or reaching the beginning of the string.
+	i := len(path) - 1
+	for i >= 0 && IsPathSeparator(path[i]) {
 		i--
 	}
-
-	j := i
-	for j > 0 && !IsPathSeparator(path[j-1]) { // Scan backward over element.
-		j--
+	for i >= 0 && !IsPathSeparator(path[i]) {
+		i--
+	}
+	if i < 0 {
+		i = 0
 	}
 
-	if j > 1 {
-		// Create parent.
-		err = MkdirAll(fixRootDirectory(path[:j-1]), perm)
+	// If there is a parent directory, and it is not the volume name,
+	// recurse to ensure parent directory exists.
+	if parent := path[:i]; len(parent) > len(volumeName(path)) {
+		err = MkdirAll(parent, perm)
 		if err != nil {
 			return err
 		}
diff --git a/src/os/path_plan9.go b/src/os/path_plan9.go
index a54b4b9..f1c9dbc 100644
--- a/src/os/path_plan9.go
+++ b/src/os/path_plan9.go
@@ -14,6 +14,6 @@
 	return PathSeparator == c
 }
 
-func fixRootDirectory(p string) string {
-	return p
+func volumeName(p string) string {
+	return ""
 }
diff --git a/src/os/path_unix.go b/src/os/path_unix.go
index c975cdb..1c80fa9 100644
--- a/src/os/path_unix.go
+++ b/src/os/path_unix.go
@@ -70,6 +70,6 @@
 	return dirname, basename
 }
 
-func fixRootDirectory(p string) string {
-	return p
+func volumeName(p string) string {
+	return ""
 }
diff --git a/src/os/path_windows.go b/src/os/path_windows.go
index 3356908..0522025 100644
--- a/src/os/path_windows.go
+++ b/src/os/path_windows.go
@@ -139,7 +139,7 @@
 // or contains .. elements), or is short enough, fixLongPath returns
 // path unmodified.
 //
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+// See https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
 func fixLongPath(path string) string {
 	if canUseLongPaths {
 		return path
@@ -214,14 +214,3 @@
 	}
 	return string(pathbuf[:w])
 }
-
-// fixRootDirectory fixes a reference to a drive's root directory to
-// have the required trailing slash.
-func fixRootDirectory(p string) string {
-	if len(p) == len(`\\?\c:`) {
-		if IsPathSeparator(p[0]) && IsPathSeparator(p[1]) && p[2] == '?' && IsPathSeparator(p[3]) && p[5] == ':' {
-			return p + `\`
-		}
-	}
-	return p
-}
diff --git a/src/os/path_windows_test.go b/src/os/path_windows_test.go
index 2506b4f..4e5e501 100644
--- a/src/os/path_windows_test.go
+++ b/src/os/path_windows_test.go
@@ -5,7 +5,11 @@
 package os_test
 
 import (
+	"fmt"
+	"internal/syscall/windows"
+	"internal/testenv"
 	"os"
+	"path/filepath"
 	"strings"
 	"syscall"
 	"testing"
@@ -106,3 +110,48 @@
 		dir.Close()
 	}
 }
+
+func testMkdirAllAtRoot(t *testing.T, root string) {
+	// Create a unique-enough directory name in root.
+	base := fmt.Sprintf("%s-%d", t.Name(), os.Getpid())
+	path := filepath.Join(root, base)
+	if err := os.MkdirAll(path, 0777); err != nil {
+		t.Fatalf("MkdirAll(%q) failed: %v", path, err)
+	}
+	// Clean up
+	if err := os.RemoveAll(path); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestMkdirAllExtendedLengthAtRoot(t *testing.T) {
+	if testenv.Builder() == "" {
+		t.Skipf("skipping non-hermetic test outside of Go builders")
+	}
+
+	const prefix = `\\?\`
+	vol := filepath.VolumeName(t.TempDir()) + `\`
+	if len(vol) < 4 || vol[:4] != prefix {
+		vol = prefix + vol
+	}
+	testMkdirAllAtRoot(t, vol)
+}
+
+func TestMkdirAllVolumeNameAtRoot(t *testing.T) {
+	if testenv.Builder() == "" {
+		t.Skipf("skipping non-hermetic test outside of Go builders")
+	}
+
+	vol, err := syscall.UTF16PtrFromString(filepath.VolumeName(t.TempDir()) + `\`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	const maxVolNameLen = 50
+	var buf [maxVolNameLen]uint16
+	err = windows.GetVolumeNameForVolumeMountPoint(vol, &buf[0], maxVolNameLen)
+	if err != nil {
+		t.Fatal(err)
+	}
+	volName := syscall.UTF16ToString(buf[:])
+	testMkdirAllAtRoot(t, volName)
+}
diff --git a/src/os/pipe_test.go b/src/os/pipe_test.go
index 6f01d30..a9e0c8b 100644
--- a/src/os/pipe_test.go
+++ b/src/os/pipe_test.go
@@ -263,7 +263,7 @@
 	}
 	defer r.Close()
 	defer w.Close()
-	cmd := testenv.Command(t, os.Args[0], "-test.run="+t.Name())
+	cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$")
 	cmd.Env = append(cmd.Environ(), "GO_WANT_READ_NONBLOCKING_FD=1")
 	cmd.Stdin = r
 	output, err := cmd.CombinedOutput()
diff --git a/src/os/readfrom_linux.go b/src/os/readfrom_linux.go
deleted file mode 100644
index 7e80240..0000000
--- a/src/os/readfrom_linux.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package os
-
-import (
-	"internal/poll"
-	"io"
-	"syscall"
-)
-
-var (
-	pollCopyFileRange = poll.CopyFileRange
-	pollSplice        = poll.Splice
-)
-
-func (f *File) readFrom(r io.Reader) (written int64, handled bool, err error) {
-	// Neither copy_file_range(2) nor splice(2) supports destinations opened with
-	// O_APPEND, so don't bother to try zero-copy with these system calls.
-	//
-	// Visit https://man7.org/linux/man-pages/man2/copy_file_range.2.html#ERRORS and
-	// https://man7.org/linux/man-pages/man2/splice.2.html#ERRORS for details.
-	if f.appendMode {
-		return 0, false, nil
-	}
-
-	written, handled, err = f.copyFileRange(r)
-	if handled {
-		return
-	}
-	return f.spliceToFile(r)
-}
-
-func (f *File) spliceToFile(r io.Reader) (written int64, handled bool, err error) {
-	var (
-		remain int64
-		lr     *io.LimitedReader
-	)
-	if lr, r, remain = tryLimitedReader(r); remain <= 0 {
-		return 0, true, nil
-	}
-
-	pfd := getPollFD(r)
-	// TODO(panjf2000): run some tests to see if we should unlock the non-streams for splice.
-	// Streams benefit the most from the splice(2), non-streams are not even supported in old kernels
-	// where splice(2) will just return EINVAL; newer kernels support non-streams like UDP, but I really
-	// doubt that splice(2) could help non-streams, cuz they usually send small frames respectively
-	// and one splice call would result in one frame.
-	// splice(2) is suitable for large data but the generation of fragments defeats its edge here.
-	// Therefore, don't bother to try splice if the r is not a streaming descriptor.
-	if pfd == nil || !pfd.IsStream {
-		return
-	}
-
-	var syscallName string
-	written, handled, syscallName, err = pollSplice(&f.pfd, pfd, remain)
-
-	if lr != nil {
-		lr.N = remain - written
-	}
-
-	return written, handled, wrapSyscallError(syscallName, err)
-}
-
-// getPollFD tries to get the poll.FD from the given io.Reader by expecting
-// the underlying type of r to be the implementation of syscall.Conn that contains
-// a *net.rawConn.
-func getPollFD(r io.Reader) *poll.FD {
-	sc, ok := r.(syscall.Conn)
-	if !ok {
-		return nil
-	}
-	rc, err := sc.SyscallConn()
-	if err != nil {
-		return nil
-	}
-	ipfd, ok := rc.(interface{ PollFD() *poll.FD })
-	if !ok {
-		return nil
-	}
-	return ipfd.PollFD()
-}
-
-func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err error) {
-	var (
-		remain int64
-		lr     *io.LimitedReader
-	)
-	if lr, r, remain = tryLimitedReader(r); remain <= 0 {
-		return 0, true, nil
-	}
-
-	src, ok := r.(*File)
-	if !ok {
-		return 0, false, nil
-	}
-	if src.checkValid("ReadFrom") != nil {
-		// Avoid returning the error as we report handled as false,
-		// leave further error handling as the responsibility of the caller.
-		return 0, false, nil
-	}
-
-	written, handled, err = pollCopyFileRange(&f.pfd, &src.pfd, remain)
-	if lr != nil {
-		lr.N -= written
-	}
-	return written, handled, wrapSyscallError("copy_file_range", err)
-}
-
-// tryLimitedReader tries to assert the io.Reader to io.LimitedReader, it returns the io.LimitedReader,
-// the underlying io.Reader and the remaining amount of bytes if the assertion succeeds,
-// otherwise it just returns the original io.Reader and the theoretical unlimited remaining amount of bytes.
-func tryLimitedReader(r io.Reader) (*io.LimitedReader, io.Reader, int64) {
-	var remain int64 = 1<<63 - 1 // by default, copy until EOF
-
-	lr, ok := r.(*io.LimitedReader)
-	if !ok {
-		return nil, r, remain
-	}
-
-	remain = lr.N
-	return lr, lr.R, remain
-}
diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go
index 4f98be4..93f7803 100644
--- a/src/os/readfrom_linux_test.go
+++ b/src/os/readfrom_linux_test.go
@@ -749,12 +749,12 @@
 	}
 }
 
-func TestGetPollFDFromReader(t *testing.T) {
-	t.Run("tcp", func(t *testing.T) { testGetPollFromReader(t, "tcp") })
-	t.Run("unix", func(t *testing.T) { testGetPollFromReader(t, "unix") })
+func TestGetPollFDAndNetwork(t *testing.T) {
+	t.Run("tcp4", func(t *testing.T) { testGetPollFDAndNetwork(t, "tcp4") })
+	t.Run("unix", func(t *testing.T) { testGetPollFDAndNetwork(t, "unix") })
 }
 
-func testGetPollFromReader(t *testing.T, proto string) {
+func testGetPollFDAndNetwork(t *testing.T, proto string) {
 	_, server := createSocketPair(t, proto)
 	sc, ok := server.(syscall.Conn)
 	if !ok {
@@ -765,12 +765,15 @@
 		t.Fatalf("server SyscallConn error: %v", err)
 	}
 	if err = rc.Control(func(fd uintptr) {
-		pfd := GetPollFDForTest(server)
+		pfd, network := GetPollFDAndNetwork(server)
 		if pfd == nil {
-			t.Fatalf("GetPollFDForTest didn't return poll.FD")
+			t.Fatalf("GetPollFDAndNetwork didn't return poll.FD")
+		}
+		if string(network) != proto {
+			t.Fatalf("GetPollFDAndNetwork returned wrong network, got: %s, want: %s", network, proto)
 		}
 		if pfd.Sysfd != int(fd) {
-			t.Fatalf("GetPollFDForTest returned wrong poll.FD, got: %d, want: %d", pfd.Sysfd, int(fd))
+			t.Fatalf("GetPollFDAndNetwork returned wrong poll.FD, got: %d, want: %d", pfd.Sysfd, int(fd))
 		}
 		if !pfd.IsStream {
 			t.Fatalf("expected IsStream to be true")
diff --git a/src/os/readfrom_stub.go b/src/os/readfrom_stub.go
deleted file mode 100644
index 8b7d5fb..0000000
--- a/src/os/readfrom_stub.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !linux
-
-package os
-
-import "io"
-
-func (f *File) readFrom(r io.Reader) (n int64, handled bool, err error) {
-	return 0, false, nil
-}
diff --git a/src/os/removeall_test.go b/src/os/removeall_test.go
index 2f7938b..c0b2dd6 100644
--- a/src/os/removeall_test.go
+++ b/src/os/removeall_test.go
@@ -489,7 +489,7 @@
 		}
 	}
 
-	cmd := testenv.Command(t, "/bin/strace", "-f", "-e", "fcntl", me, "-test.run=TestRemoveAllNoFcntl")
+	cmd := testenv.Command(t, "/bin/strace", "-f", "-e", "fcntl", me, "-test.run=^TestRemoveAllNoFcntl$")
 	cmd = testenv.CleanCmdEnv(cmd)
 	cmd.Env = append(cmd.Env, env+"="+subdir)
 	out, err := cmd.CombinedOutput()
diff --git a/src/os/signal/example_unix_test.go b/src/os/signal/example_unix_test.go
index b7047ac..583d4e4 100644
--- a/src/os/signal/example_unix_test.go
+++ b/src/os/signal/example_unix_test.go
@@ -12,9 +12,10 @@
 	"log"
 	"os"
 	"os/signal"
-	"time"
 )
 
+var neverReady = make(chan struct{}) // never closed
+
 // This example passes a context with a signal to tell a blocking function that
 // it should abandon its work after a signal is received.
 func ExampleNotifyContext() {
@@ -35,8 +36,8 @@
 	}
 
 	select {
-	case <-time.After(time.Second):
-		fmt.Println("missed signal")
+	case <-neverReady:
+		fmt.Println("ready")
 	case <-ctx.Done():
 		fmt.Println(ctx.Err()) // prints "context canceled"
 		stop()                 // stop receiving signal notifications as soon as possible.
diff --git a/src/os/signal/signal_cgo_test.go b/src/os/signal/signal_cgo_test.go
index ac59215..0aaf38c 100644
--- a/src/os/signal/signal_cgo_test.go
+++ b/src/os/signal/signal_cgo_test.go
@@ -14,9 +14,9 @@
 	"context"
 	"encoding/binary"
 	"fmt"
+	"internal/testenv"
 	"internal/testpty"
 	"os"
-	"os/exec"
 	"os/signal"
 	"runtime"
 	"strconv"
@@ -93,7 +93,7 @@
 		// Main test process, run code below.
 		break
 	case "1":
-		runSessionLeader(pause)
+		runSessionLeader(t, pause)
 		panic("unreachable")
 	case "2":
 		runStoppingChild()
@@ -128,9 +128,22 @@
 		t.Fatal(err)
 	}
 
-	ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second)
-	defer cancel()
-	cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestTerminalSignal")
+	var (
+		ctx     = context.Background()
+		cmdArgs = []string{"-test.run=^TestTerminalSignal$"}
+	)
+	if deadline, ok := t.Deadline(); ok {
+		d := time.Until(deadline)
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, d)
+		t.Cleanup(cancel)
+
+		// We run the subprocess with an additional 20% margin to allow it to fail
+		// and clean up gracefully if it times out.
+		cmdArgs = append(cmdArgs, fmt.Sprintf("-test.timeout=%v", d*5/4))
+	}
+
+	cmd := testenv.CommandContext(t, ctx, os.Args[0], cmdArgs...)
 	cmd.Env = append(os.Environ(), "GO_TEST_TERMINAL_SIGNALS=1")
 	cmd.Stdin = os.Stdin
 	cmd.Stdout = os.Stdout // for logging
@@ -216,7 +229,7 @@
 }
 
 // GO_TEST_TERMINAL_SIGNALS=1 subprocess above.
-func runSessionLeader(pause time.Duration) {
+func runSessionLeader(t *testing.T, pause time.Duration) {
 	// "Attempts to use tcsetpgrp() from a process which is a
 	// member of a background process group on a fildes associated
 	// with its controlling terminal shall cause the process group
@@ -235,10 +248,22 @@
 	pty := os.NewFile(ptyFD, "pty")
 	controlW := os.NewFile(controlFD, "control-pipe")
 
-	// Slightly shorter timeout than in the parent.
-	ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
-	defer cancel()
-	cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestTerminalSignal")
+	var (
+		ctx     = context.Background()
+		cmdArgs = []string{"-test.run=^TestTerminalSignal$"}
+	)
+	if deadline, ok := t.Deadline(); ok {
+		d := time.Until(deadline)
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, d)
+		t.Cleanup(cancel)
+
+		// We run the subprocess with an additional 20% margin to allow it to fail
+		// and clean up gracefully if it times out.
+		cmdArgs = append(cmdArgs, fmt.Sprintf("-test.timeout=%v", d*5/4))
+	}
+
+	cmd := testenv.CommandContext(t, ctx, os.Args[0], cmdArgs...)
 	cmd.Env = append(os.Environ(), "GO_TEST_TERMINAL_SIGNALS=2")
 	cmd.Stdin = os.Stdin
 	cmd.Stdout = os.Stdout
@@ -263,15 +288,14 @@
 
 		// Wait for stop.
 		var status syscall.WaitStatus
-		var errno syscall.Errno
 		for {
-			_, _, errno = syscall.Syscall6(syscall.SYS_WAIT4, uintptr(cmd.Process.Pid), uintptr(unsafe.Pointer(&status)), syscall.WUNTRACED, 0, 0, 0)
-			if errno != syscall.EINTR {
+			_, err = syscall.Wait4(cmd.Process.Pid, &status, syscall.WUNTRACED, nil)
+			if err != syscall.EINTR {
 				break
 			}
 		}
-		if errno != 0 {
-			return fmt.Errorf("error waiting for stop: %w", errno)
+		if err != nil {
+			return fmt.Errorf("error waiting for stop: %w", err)
 		}
 
 		if !status.Stopped() {
@@ -280,7 +304,7 @@
 
 		// Take TTY.
 		pgrp := int32(syscall.Getpgrp()) // assume that pid_t is int32
-		_, _, errno = syscall.Syscall(syscall.SYS_IOCTL, ptyFD, syscall.TIOCSPGRP, uintptr(unsafe.Pointer(&pgrp)))
+		_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, ptyFD, syscall.TIOCSPGRP, uintptr(unsafe.Pointer(&pgrp)))
 		if errno != 0 {
 			return fmt.Errorf("error setting tty process group: %w", errno)
 		}
diff --git a/src/os/signal/signal_test.go b/src/os/signal/signal_test.go
index ddbd458..d54787b 100644
--- a/src/os/signal/signal_test.go
+++ b/src/os/signal/signal_test.go
@@ -53,7 +53,7 @@
 		// Older linux kernels seem to have some hiccups delivering the signal
 		// in a timely manner on ppc64 and ppc64le. When running on a
 		// ppc64le/ubuntu 16.04/linux 4.4 host the time can vary quite
-		// substantially even on a idle system. 5 seconds is twice any value
+		// substantially even on an idle system. 5 seconds is twice any value
 		// observed when running 10000 tests on such a system.
 		settleTime = 5 * time.Second
 	} else if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
@@ -304,10 +304,11 @@
 		// We have no intention of reading from c.
 		c := make(chan os.Signal, 1)
 		Notify(c, syscall.SIGHUP)
-		if out, err := testenv.Command(t, os.Args[0], "-test.run=TestDetectNohup", "-check_sighup_ignored").CombinedOutput(); err == nil {
+		if out, err := testenv.Command(t, os.Args[0], "-test.run=^TestDetectNohup$", "-check_sighup_ignored").CombinedOutput(); err == nil {
 			t.Errorf("ran test with -check_sighup_ignored and it succeeded: expected failure.\nOutput:\n%s", out)
 		}
 		Stop(c)
+
 		// Again, this time with nohup, assuming we can find it.
 		_, err := os.Stat("/usr/bin/nohup")
 		if err != nil {
@@ -315,11 +316,18 @@
 		}
 		Ignore(syscall.SIGHUP)
 		os.Remove("nohup.out")
-		out, err := testenv.Command(t, "/usr/bin/nohup", os.Args[0], "-test.run=TestDetectNohup", "-check_sighup_ignored").CombinedOutput()
+		out, err := testenv.Command(t, "/usr/bin/nohup", os.Args[0], "-test.run=^TestDetectNohup$", "-check_sighup_ignored").CombinedOutput()
 
 		data, _ := os.ReadFile("nohup.out")
 		os.Remove("nohup.out")
 		if err != nil {
+			// nohup doesn't work on new LUCI darwin builders due to the
+			// type of launchd service the test run under. See
+			// https://go.dev/issue/63875.
+			if runtime.GOOS == "darwin" && strings.Contains(string(out), "nohup: can't detach from console: Inappropriate ioctl for device") {
+				t.Skip("Skipping nohup test due to darwin builder limitation. See https://go.dev/issue/63875.")
+			}
+
 			t.Errorf("ran test with -check_sighup_ignored under nohup and it failed: expected success.\nError: %v\nOutput:\n%s%s", err, out, data)
 		}
 	}
@@ -408,12 +416,6 @@
 
 // Test that when run under nohup, an uncaught SIGHUP does not kill the program.
 func TestNohup(t *testing.T) {
-	// Ugly: ask for SIGHUP so that child will not have no-hup set
-	// even if test is running under nohup environment.
-	// We have no intention of reading from c.
-	c := make(chan os.Signal, 1)
-	Notify(c, syscall.SIGHUP)
-
 	// When run without nohup, the test should crash on an uncaught SIGHUP.
 	// When run under nohup, the test should ignore uncaught SIGHUPs,
 	// because the runtime is not supposed to be listening for them.
@@ -425,88 +427,102 @@
 	//
 	// Both should fail without nohup and succeed with nohup.
 
-	var subTimeout time.Duration
+	t.Run("uncaught", func(t *testing.T) {
+		// Ugly: ask for SIGHUP so that child will not have no-hup set
+		// even if test is running under nohup environment.
+		// We have no intention of reading from c.
+		c := make(chan os.Signal, 1)
+		Notify(c, syscall.SIGHUP)
+		t.Cleanup(func() { Stop(c) })
 
-	var wg sync.WaitGroup
-	wg.Add(2)
-	if deadline, ok := t.Deadline(); ok {
-		subTimeout = time.Until(deadline)
-		subTimeout -= subTimeout / 10 // Leave 10% headroom for propagating output.
-	}
-	for i := 1; i <= 2; i++ {
-		i := i
-		go t.Run(fmt.Sprintf("uncaught-%d", i), func(t *testing.T) {
-			defer wg.Done()
+		var subTimeout time.Duration
+		if deadline, ok := t.Deadline(); ok {
+			subTimeout = time.Until(deadline)
+			subTimeout -= subTimeout / 10 // Leave 10% headroom for propagating output.
+		}
+		for i := 1; i <= 2; i++ {
+			i := i
+			t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+				t.Parallel()
 
-			args := []string{
-				"-test.v",
-				"-test.run=TestStop",
-				"-send_uncaught_sighup=" + strconv.Itoa(i),
-				"-die_from_sighup",
-			}
-			if subTimeout != 0 {
-				args = append(args, fmt.Sprintf("-test.timeout=%v", subTimeout))
-			}
-			out, err := testenv.Command(t, os.Args[0], args...).CombinedOutput()
+				args := []string{
+					"-test.v",
+					"-test.run=^TestStop$",
+					"-send_uncaught_sighup=" + strconv.Itoa(i),
+					"-die_from_sighup",
+				}
+				if subTimeout != 0 {
+					args = append(args, fmt.Sprintf("-test.timeout=%v", subTimeout))
+				}
+				out, err := testenv.Command(t, os.Args[0], args...).CombinedOutput()
 
-			if err == nil {
-				t.Errorf("ran test with -send_uncaught_sighup=%d and it succeeded: expected failure.\nOutput:\n%s", i, out)
-			} else {
-				t.Logf("test with -send_uncaught_sighup=%d failed as expected.\nError: %v\nOutput:\n%s", i, err, out)
-			}
-		})
-	}
-	wg.Wait()
+				if err == nil {
+					t.Errorf("ran test with -send_uncaught_sighup=%d and it succeeded: expected failure.\nOutput:\n%s", i, out)
+				} else {
+					t.Logf("test with -send_uncaught_sighup=%d failed as expected.\nError: %v\nOutput:\n%s", i, err, out)
+				}
+			})
+		}
+	})
 
-	Stop(c)
+	t.Run("nohup", func(t *testing.T) {
+		// Skip the nohup test below when running in tmux on darwin, since nohup
+		// doesn't work correctly there. See issue #5135.
+		if runtime.GOOS == "darwin" && os.Getenv("TMUX") != "" {
+			t.Skip("Skipping nohup test due to running in tmux on darwin")
+		}
 
-	// Skip the nohup test below when running in tmux on darwin, since nohup
-	// doesn't work correctly there. See issue #5135.
-	if runtime.GOOS == "darwin" && os.Getenv("TMUX") != "" {
-		t.Skip("Skipping nohup test due to running in tmux on darwin")
-	}
+		// Again, this time with nohup, assuming we can find it.
+		_, err := exec.LookPath("nohup")
+		if err != nil {
+			t.Skip("cannot find nohup; skipping second half of test")
+		}
 
-	// Again, this time with nohup, assuming we can find it.
-	_, err := exec.LookPath("nohup")
-	if err != nil {
-		t.Skip("cannot find nohup; skipping second half of test")
-	}
+		var subTimeout time.Duration
+		if deadline, ok := t.Deadline(); ok {
+			subTimeout = time.Until(deadline)
+			subTimeout -= subTimeout / 10 // Leave 10% headroom for propagating output.
+		}
+		for i := 1; i <= 2; i++ {
+			i := i
+			t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+				t.Parallel()
 
-	wg.Add(2)
-	if deadline, ok := t.Deadline(); ok {
-		subTimeout = time.Until(deadline)
-		subTimeout -= subTimeout / 10 // Leave 10% headroom for propagating output.
-	}
-	for i := 1; i <= 2; i++ {
-		i := i
-		go t.Run(fmt.Sprintf("nohup-%d", i), func(t *testing.T) {
-			defer wg.Done()
+				// POSIX specifies that nohup writes to a file named nohup.out if standard
+				// output is a terminal. However, for an exec.Cmd, standard output is
+				// not a terminal — so we don't need to read or remove that file (and,
+				// indeed, cannot even create it if the current user is unable to write to
+				// GOROOT/src, such as when GOROOT is installed and owned by root).
 
-			// POSIX specifies that nohup writes to a file named nohup.out if standard
-			// output is a terminal. However, for an exec.Command, standard output is
-			// not a terminal — so we don't need to read or remove that file (and,
-			// indeed, cannot even create it if the current user is unable to write to
-			// GOROOT/src, such as when GOROOT is installed and owned by root).
+				args := []string{
+					os.Args[0],
+					"-test.v",
+					"-test.run=^TestStop$",
+					"-send_uncaught_sighup=" + strconv.Itoa(i),
+				}
+				if subTimeout != 0 {
+					args = append(args, fmt.Sprintf("-test.timeout=%v", subTimeout))
+				}
+				out, err := testenv.Command(t, "nohup", args...).CombinedOutput()
 
-			args := []string{
-				os.Args[0],
-				"-test.v",
-				"-test.run=TestStop",
-				"-send_uncaught_sighup=" + strconv.Itoa(i),
-			}
-			if subTimeout != 0 {
-				args = append(args, fmt.Sprintf("-test.timeout=%v", subTimeout))
-			}
-			out, err := testenv.Command(t, "nohup", args...).CombinedOutput()
+				if err != nil {
+					// nohup doesn't work on new LUCI darwin builders due to the
+					// type of launchd service the test run under. See
+					// https://go.dev/issue/63875.
+					if runtime.GOOS == "darwin" && strings.Contains(string(out), "nohup: can't detach from console: Inappropriate ioctl for device") {
+						// TODO(go.dev/issue/63799): A false-positive in vet reports a
+						// t.Skip here as invalid. Switch back to t.Skip once fixed.
+						t.Logf("Skipping nohup test due to darwin builder limitation. See https://go.dev/issue/63875.")
+						return
+					}
 
-			if err != nil {
-				t.Errorf("ran test with -send_uncaught_sighup=%d under nohup and it failed: expected success.\nError: %v\nOutput:\n%s", i, err, out)
-			} else {
-				t.Logf("ran test with -send_uncaught_sighup=%d under nohup.\nOutput:\n%s", i, out)
-			}
-		})
-	}
-	wg.Wait()
+					t.Errorf("ran test with -send_uncaught_sighup=%d under nohup and it failed: expected success.\nError: %v\nOutput:\n%s", i, err, out)
+				} else {
+					t.Logf("ran test with -send_uncaught_sighup=%d under nohup.\nOutput:\n%s", i, out)
+				}
+			})
+		}
+	})
 }
 
 // Test that SIGCONT works (issue 8953).
@@ -546,7 +562,7 @@
 		if deadline, ok := t.Deadline(); ok {
 			timeout = time.Until(deadline).String()
 		}
-		cmd := testenv.Command(t, os.Args[0], "-test.run=TestAtomicStop", "-test.timeout="+timeout)
+		cmd := testenv.Command(t, os.Args[0], "-test.run=^TestAtomicStop$", "-test.timeout="+timeout)
 		cmd.Env = append(os.Environ(), "GO_TEST_ATOMIC_STOP=1")
 		out, err := cmd.CombinedOutput()
 		if err == nil {
@@ -742,7 +758,7 @@
 
 			args := []string{
 				"-test.v",
-				"-test.run=TestNotifyContextNotifications$",
+				"-test.run=^TestNotifyContextNotifications$",
 				"-check_notify_ctx",
 				fmt.Sprintf("-ctx_notify_times=%d", tc.n),
 			}
@@ -781,13 +797,9 @@
 	}
 
 	stop()
-	select {
-	case <-c.Done():
-		if got := c.Err(); got != context.Canceled {
-			t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
-		}
-	case <-time.After(time.Second):
-		t.Errorf("timed out waiting for context to be done after calling stop")
+	<-c.Done()
+	if got := c.Err(); got != context.Canceled {
+		t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
 	}
 }
 
@@ -802,13 +814,9 @@
 	}
 
 	cancelParent()
-	select {
-	case <-c.Done():
-		if got := c.Err(); got != context.Canceled {
-			t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
-		}
-	case <-time.After(time.Second):
-		t.Errorf("timed out waiting for parent context to be canceled")
+	<-c.Done()
+	if got := c.Err(); got != context.Canceled {
+		t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
 	}
 }
 
@@ -824,13 +832,9 @@
 		t.Errorf("c.String() = %q, want %q", got, want)
 	}
 
-	select {
-	case <-c.Done():
-		if got := c.Err(); got != context.Canceled {
-			t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
-		}
-	case <-time.After(time.Second):
-		t.Errorf("timed out waiting for parent context to be canceled")
+	<-c.Done()
+	if got := c.Err(); got != context.Canceled {
+		t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
 	}
 }
 
@@ -852,13 +856,9 @@
 		}()
 	}
 	wg.Wait()
-	select {
-	case <-c.Done():
-		if got := c.Err(); got != context.Canceled {
-			t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
-		}
-	case <-time.After(time.Second):
-		t.Errorf("expected context to be canceled")
+	<-c.Done()
+	if got := c.Err(); got != context.Canceled {
+		t.Errorf("c.Err() = %q, want %q", got, context.Canceled)
 	}
 }
 
@@ -904,7 +904,6 @@
 		if err := trace.Start(buf); err != nil {
 			t.Fatalf("[%d] failed to start tracing: %v", i, err)
 		}
-		time.After(1 * time.Microsecond)
 		trace.Stop()
 		size := buf.Len()
 		if size == 0 {
diff --git a/src/os/stat.go b/src/os/stat.go
index af66838..11d9efa 100644
--- a/src/os/stat.go
+++ b/src/os/stat.go
@@ -17,6 +17,10 @@
 // If the file is a symbolic link, the returned FileInfo
 // describes the symbolic link. Lstat makes no attempt to follow the link.
 // If there is an error, it will be of type *PathError.
+//
+// On Windows, if the file is a reparse point that is a surrogate for another
+// named entity (such as a symbolic link or mounted folder), the returned
+// FileInfo describes the reparse point, and makes no attempt to resolve it.
 func Lstat(name string) (FileInfo, error) {
 	testlog.Stat(name)
 	return lstatNolog(name)
diff --git a/src/os/stat_windows.go b/src/os/stat_windows.go
index 033c3b9..668255f 100644
--- a/src/os/stat_windows.go
+++ b/src/os/stat_windows.go
@@ -20,7 +20,7 @@
 }
 
 // stat implements both Stat and Lstat of a file.
-func stat(funcname, name string, followSymlinks bool) (FileInfo, error) {
+func stat(funcname, name string, followSurrogates bool) (FileInfo, error) {
 	if len(name) == 0 {
 		return nil, &PathError{Op: funcname, Path: name, Err: syscall.Errno(syscall.ERROR_PATH_NOT_FOUND)}
 	}
@@ -44,7 +44,7 @@
 		}
 		syscall.FindClose(sh)
 		if fd.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
-			// Not a symlink or mount point. FindFirstFile is good enough.
+			// Not a surrogate for another named entity. FindFirstFile is good enough.
 			fs := newFileStatFromWin32finddata(&fd)
 			if err := fs.saveInfoFromPath(name); err != nil {
 				return nil, err
@@ -54,7 +54,7 @@
 	}
 
 	if err == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
-		// The file is definitely not a symlink, because it isn't any kind of reparse point.
+		// Not a surrogate for another named entity, because it isn't any kind of reparse point.
 		// The information we got from GetFileAttributesEx is good enough for now.
 		fs := &fileStat{
 			FileAttributes: fa.FileAttributes,
@@ -70,21 +70,21 @@
 		return fs, nil
 	}
 
-	// Use CreateFile to determine whether the file is a symlink and, if so,
+	// Use CreateFile to determine whether the file is a name surrogate and, if so,
 	// save information about the link target.
 	// Set FILE_FLAG_BACKUP_SEMANTICS so that CreateFile will create the handle
 	// even if name refers to a directory.
 	h, err := syscall.CreateFile(namep, 0, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
 	if err != nil {
 		// Since CreateFile failed, we can't determine whether name refers to a
-		// symlink, or some other kind of reparse point. Since we can't return a
+		// name surrogate, or some other kind of reparse point. Since we can't return a
 		// FileInfo with a known-accurate Mode, we must return an error.
 		return nil, &PathError{Op: "CreateFile", Path: name, Err: err}
 	}
 
 	fi, err := statHandle(name, h)
 	syscall.CloseHandle(h)
-	if err == nil && followSymlinks && fi.(*fileStat).isSymlink() {
+	if err == nil && followSurrogates && fi.(*fileStat).isReparseTagNameSurrogate() {
 		// To obtain information about the link target, we reopen the file without
 		// FILE_FLAG_OPEN_REPARSE_POINT and examine the resulting handle.
 		// (See https://devblogs.microsoft.com/oldnewthing/20100212-00/?p=14963.)
@@ -123,14 +123,14 @@
 
 // lstatNolog implements Lstat for Windows.
 func lstatNolog(name string) (FileInfo, error) {
-	followSymlinks := false
+	followSurrogates := false
 	if name != "" && IsPathSeparator(name[len(name)-1]) {
 		// We try to implement POSIX semantics for Lstat path resolution
 		// (per https://pubs.opengroup.org/onlinepubs/9699919799.2013edition/basedefs/V1_chap04.html#tag_04_12):
 		// symlinks before the last separator in the path must be resolved. Since
 		// the last separator in this case follows the last path element, we should
 		// follow symlinks in the last path element.
-		followSymlinks = true
+		followSurrogates = true
 	}
-	return stat("Lstat", name, followSymlinks)
+	return stat("Lstat", name, followSurrogates)
 }
diff --git a/src/os/str.go b/src/os/str.go
deleted file mode 100644
index 242c945..0000000
--- a/src/os/str.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Simple conversions to avoid depending on strconv.
-
-package os
-
-// itox converts val (an int) to a hexadecimal string.
-func itox(val int) string {
-	if val < 0 {
-		return "-" + uitox(uint(-val))
-	}
-	return uitox(uint(val))
-}
-
-const hex = "0123456789abcdef"
-
-// uitox converts val (a uint) to a hexadecimal string.
-func uitox(val uint) string {
-	if val == 0 { // avoid string allocation
-		return "0x0"
-	}
-	var buf [20]byte // big enough for 64bit value base 16 + 0x
-	i := len(buf) - 1
-	for val >= 16 {
-		q := val / 16
-		buf[i] = hex[val%16]
-		i--
-		val = q
-	}
-	// val < 16
-	buf[i] = hex[val%16]
-	i--
-	buf[i] = 'x'
-	i--
-	buf[i] = '0'
-	return string(buf[i:])
-}
diff --git a/src/os/tempfile.go b/src/os/tempfile.go
index 99f65c6..66c65e6 100644
--- a/src/os/tempfile.go
+++ b/src/os/tempfile.go
@@ -6,17 +6,20 @@
 
 import (
 	"errors"
+	"internal/bytealg"
 	"internal/itoa"
+	_ "unsafe" // for go:linkname
 )
 
-// fastrand provided by runtime.
+// random number source provided by runtime.
 // We generate random temporary file names so that there's a good
 // chance the file doesn't exist yet - keeps the number of tries in
 // TempFile to a minimum.
-func fastrand() uint32
+//go:linkname runtime_rand runtime.rand
+func runtime_rand() uint64
 
 func nextRandom() string {
-	return itoa.Uitoa(uint(fastrand()))
+	return itoa.Uitoa(uint(uint32(runtime_rand())))
 }
 
 // CreateTemp creates a new temporary file in the directory dir,
@@ -62,7 +65,7 @@
 			return "", "", errPatternHasSeparator
 		}
 	}
-	if pos := lastIndex(pattern, '*'); pos != -1 {
+	if pos := bytealg.LastIndexByteString(pattern, '*'); pos != -1 {
 		prefix, suffix = pattern[:pos], pattern[pos+1:]
 	} else {
 		prefix = pattern
@@ -116,13 +119,3 @@
 	}
 	return dir + string(PathSeparator) + name
 }
-
-// lastIndex from the strings package.
-func lastIndex(s string, sep byte) int {
-	for i := len(s) - 1; i >= 0; i-- {
-		if s[i] == sep {
-			return i
-		}
-	}
-	return -1
-}
diff --git a/src/os/types_windows.go b/src/os/types_windows.go
index 9a3d508..6b9fef6 100644
--- a/src/os/types_windows.go
+++ b/src/os/types_windows.go
@@ -16,7 +16,7 @@
 type fileStat struct {
 	name string
 
-	// from ByHandleFileInformation, Win32FileAttributeData and Win32finddata
+	// from ByHandleFileInformation, Win32FileAttributeData, Win32finddata, and GetFileInformationByHandleEx
 	FileAttributes uint32
 	CreationTime   syscall.Filetime
 	LastAccessTime syscall.Filetime
@@ -24,7 +24,7 @@
 	FileSizeHigh   uint32
 	FileSizeLow    uint32
 
-	// from Win32finddata
+	// from Win32finddata and GetFileInformationByHandleEx
 	ReparseTag uint32
 
 	// what syscall.GetFileType returns
@@ -80,6 +80,40 @@
 	}, nil
 }
 
+// newFileStatFromFileIDBothDirInfo copies all required information
+// from windows.FILE_ID_BOTH_DIR_INFO d into the newly created fileStat.
+func newFileStatFromFileIDBothDirInfo(d *windows.FILE_ID_BOTH_DIR_INFO) *fileStat {
+	// The FILE_ID_BOTH_DIR_INFO MSDN documentations isn't completely correct.
+	// FileAttributes can contain any file attributes that is currently set on the file,
+	// not just the ones documented.
+	// EaSize contains the reparse tag if the file is a reparse point.
+	return &fileStat{
+		FileAttributes: d.FileAttributes,
+		CreationTime:   d.CreationTime,
+		LastAccessTime: d.LastAccessTime,
+		LastWriteTime:  d.LastWriteTime,
+		FileSizeHigh:   uint32(d.EndOfFile >> 32),
+		FileSizeLow:    uint32(d.EndOfFile),
+		ReparseTag:     d.EaSize,
+		idxhi:          uint32(d.FileID >> 32),
+		idxlo:          uint32(d.FileID),
+	}
+}
+
+// newFileStatFromFileFullDirInfo copies all required information
+// from windows.FILE_FULL_DIR_INFO d into the newly created fileStat.
+func newFileStatFromFileFullDirInfo(d *windows.FILE_FULL_DIR_INFO) *fileStat {
+	return &fileStat{
+		FileAttributes: d.FileAttributes,
+		CreationTime:   d.CreationTime,
+		LastAccessTime: d.LastAccessTime,
+		LastWriteTime:  d.LastWriteTime,
+		FileSizeHigh:   uint32(d.EndOfFile >> 32),
+		FileSizeLow:    uint32(d.EndOfFile),
+		ReparseTag:     d.EaSize,
+	}
+}
+
 // newFileStatFromWin32finddata copies all required information
 // from syscall.Win32finddata d into the newly created fileStat.
 func newFileStatFromWin32finddata(d *syscall.Win32finddata) *fileStat {
@@ -101,6 +135,16 @@
 	return fs
 }
 
+// isReparseTagNameSurrogate determines whether a tag's associated
+// reparse point is a surrogate for another named entity (for example, a mounted folder).
+//
+// See https://learn.microsoft.com/en-us/windows/win32/api/winnt/nf-winnt-isreparsetagnamesurrogate
+// and https://learn.microsoft.com/en-us/windows/win32/fileio/reparse-point-tags.
+func (fs *fileStat) isReparseTagNameSurrogate() bool {
+	// True for IO_REPARSE_TAG_SYMLINK and IO_REPARSE_TAG_MOUNT_POINT.
+	return fs.ReparseTag&0x20000000 != 0
+}
+
 func (fs *fileStat) isSymlink() bool {
 	// As of https://go.dev/cl/86556, we treat MOUNT_POINT reparse points as
 	// symlinks because otherwise certain directory junction tests in the
@@ -141,7 +185,23 @@
 		m |= ModeDevice | ModeCharDevice
 	}
 	if fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 && m&ModeType == 0 {
-		m |= ModeIrregular
+		if fs.ReparseTag == windows.IO_REPARSE_TAG_DEDUP {
+			// If the Data Deduplication service is enabled on Windows Server, its
+			// Optimization job may convert regular files to IO_REPARSE_TAG_DEDUP
+			// whenever that job runs.
+			//
+			// However, DEDUP reparse points remain similar in most respects to
+			// regular files: they continue to support random-access reads and writes
+			// of persistent data, and they shouldn't add unexpected latency or
+			// unavailability in the way that a network filesystem might.
+			//
+			// Go programs may use ModeIrregular to filter out unusual files (such as
+			// raw device files on Linux, POSIX FIFO special files, and so on), so
+			// to avoid files changing unpredictably from regular to irregular we will
+			// consider DEDUP files to be close enough to regular to treat as such.
+		} else {
+			m |= ModeIrregular
+		}
 	}
 	return m
 }
@@ -171,7 +231,7 @@
 	}
 	var path string
 	if fs.appendNameToPath {
-		path = fs.path + `\` + fs.name
+		path = fixLongPath(fs.path + `\` + fs.name)
 	} else {
 		path = fs.path
 	}
diff --git a/src/os/user/cgo_lookup_unix.go b/src/os/user/cgo_lookup_unix.go
index 3735971..402429b 100644
--- a/src/os/user/cgo_lookup_unix.go
+++ b/src/os/user/cgo_lookup_unix.go
@@ -165,8 +165,8 @@
 // retryWithBuffer repeatedly calls f(), increasing the size of the
 // buffer each time, until f succeeds, fails with a non-ERANGE error,
 // or the buffer exceeds a reasonable limit.
-func retryWithBuffer(startSize bufferKind, f func([]byte) syscall.Errno) error {
-	buf := make([]byte, startSize)
+func retryWithBuffer(kind bufferKind, f func([]byte) syscall.Errno) error {
+	buf := make([]byte, kind.initialSize())
 	for {
 		errno := f(buf)
 		if errno == 0 {
diff --git a/src/os/user/lookup_windows.go b/src/os/user/lookup_windows.go
index e64b8ae..a48fc89 100644
--- a/src/os/user/lookup_windows.go
+++ b/src/os/user/lookup_windows.go
@@ -116,7 +116,7 @@
 	if e != nil {
 		return "", e
 	}
-	// https://msdn.microsoft.com/en-us/library/cc245478.aspx#gt_0387e636-5654-4910-9519-1f8326cf5ec0
+	// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-samr/7b2aeb27-92fc-41f6-8437-deb65d950921#gt_0387e636-5654-4910-9519-1f8326cf5ec0
 	// SidTypeAlias should also be treated as a group type next to SidTypeGroup
 	// and SidTypeWellKnownGroup:
 	// "alias object -> resource group: A group object..."
@@ -145,7 +145,7 @@
 	}
 	var p0 *byte
 	var entriesRead, totalEntries uint32
-	// https://msdn.microsoft.com/en-us/library/windows/desktop/aa370655(v=vs.85).aspx
+	// https://learn.microsoft.com/en-us/windows/win32/api/lmaccess/nf-lmaccess-netusergetlocalgroups
 	// NetUserGetLocalGroups() would return a list of LocalGroupUserInfo0
 	// elements which hold the names of local groups where the user participates.
 	// The list does not follow any sorting order.
@@ -255,7 +255,7 @@
 	//
 	// The correct way to obtain the primary group of a domain user is
 	// probing the user primaryGroupID attribute in the server Active Directory:
-	// https://msdn.microsoft.com/en-us/library/ms679375(v=vs.85).aspx
+	// https://learn.microsoft.com/en-us/windows/win32/adschema/a-primarygroupid
 	//
 	// Note that the primary group of domain users should not be modified
 	// on Windows for performance reasons, even if it's possible to do that.
diff --git a/src/os/wait_waitid.go b/src/os/wait_waitid.go
index c0503b2..cd078f3 100644
--- a/src/os/wait_waitid.go
+++ b/src/os/wait_waitid.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// We used to used this code for Darwin, but according to issue #19314
+// We used to use this code for Darwin, but according to issue #19314
 // waitid returns if the process is stopped, even when using WEXITED.
 
 //go:build linux
diff --git a/src/os/writeto_linux_test.go b/src/os/writeto_linux_test.go
new file mode 100644
index 0000000..5ffab88
--- /dev/null
+++ b/src/os/writeto_linux_test.go
@@ -0,0 +1,171 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os_test
+
+import (
+	"bytes"
+	"internal/poll"
+	"io"
+	"math/rand"
+	"net"
+	. "os"
+	"strconv"
+	"syscall"
+	"testing"
+	"time"
+)
+
+func TestSendFile(t *testing.T) {
+	sizes := []int{
+		1,
+		42,
+		1025,
+		syscall.Getpagesize() + 1,
+		32769,
+	}
+	t.Run("sendfile-to-unix", func(t *testing.T) {
+		for _, size := range sizes {
+			t.Run(strconv.Itoa(size), func(t *testing.T) {
+				testSendFile(t, "unix", int64(size))
+			})
+		}
+	})
+	t.Run("sendfile-to-tcp", func(t *testing.T) {
+		for _, size := range sizes {
+			t.Run(strconv.Itoa(size), func(t *testing.T) {
+				testSendFile(t, "tcp", int64(size))
+			})
+		}
+	})
+}
+
+func testSendFile(t *testing.T, proto string, size int64) {
+	dst, src, recv, data, hook := newSendFileTest(t, proto, size)
+
+	// Now call WriteTo (through io.Copy), which will hopefully call poll.SendFile
+	n, err := io.Copy(dst, src)
+	if err != nil {
+		t.Fatalf("io.Copy error: %v", err)
+	}
+
+	// We should have called poll.Splice with the right file descriptor arguments.
+	if n > 0 && !hook.called {
+		t.Fatal("expected to called poll.SendFile")
+	}
+	if hook.called && hook.srcfd != int(src.Fd()) {
+		t.Fatalf("wrong source file descriptor: got %d, want %d", hook.srcfd, src.Fd())
+	}
+	sc, ok := dst.(syscall.Conn)
+	if !ok {
+		t.Fatalf("destination is not a syscall.Conn")
+	}
+	rc, err := sc.SyscallConn()
+	if err != nil {
+		t.Fatalf("destination SyscallConn error: %v", err)
+	}
+	if err = rc.Control(func(fd uintptr) {
+		if hook.called && hook.dstfd != int(fd) {
+			t.Fatalf("wrong destination file descriptor: got %d, want %d", hook.dstfd, int(fd))
+		}
+	}); err != nil {
+		t.Fatalf("destination Conn Control error: %v", err)
+	}
+
+	// Verify the data size and content.
+	dataSize := len(data)
+	dstData := make([]byte, dataSize)
+	m, err := io.ReadFull(recv, dstData)
+	if err != nil {
+		t.Fatalf("server Conn Read error: %v", err)
+	}
+	if n != int64(dataSize) {
+		t.Fatalf("data length mismatch for io.Copy, got %d, want %d", n, dataSize)
+	}
+	if m != dataSize {
+		t.Fatalf("data length mismatch for net.Conn.Read, got %d, want %d", m, dataSize)
+	}
+	if !bytes.Equal(dstData, data) {
+		t.Errorf("data mismatch, got %s, want %s", dstData, data)
+	}
+}
+
+// newSendFileTest initializes a new test for sendfile.
+//
+// It creates source file and destination sockets, and populates the source file
+// with random data of the specified size. It also hooks package os' call
+// to poll.Sendfile and returns the hook so it can be inspected.
+func newSendFileTest(t *testing.T, proto string, size int64) (net.Conn, *File, net.Conn, []byte, *sendFileHook) {
+	t.Helper()
+
+	hook := hookSendFile(t)
+
+	client, server := createSocketPair(t, proto)
+	tempFile, data := createTempFile(t, size)
+
+	return client, tempFile, server, data, hook
+}
+
+func hookSendFile(t *testing.T) *sendFileHook {
+	h := new(sendFileHook)
+	h.install()
+	t.Cleanup(h.uninstall)
+	return h
+}
+
+type sendFileHook struct {
+	called bool
+	dstfd  int
+	srcfd  int
+	remain int64
+
+	written int64
+	handled bool
+	err     error
+
+	original func(dst *poll.FD, src int, remain int64) (int64, error, bool)
+}
+
+func (h *sendFileHook) install() {
+	h.original = *PollSendFile
+	*PollSendFile = func(dst *poll.FD, src int, remain int64) (int64, error, bool) {
+		h.called = true
+		h.dstfd = dst.Sysfd
+		h.srcfd = src
+		h.remain = remain
+		h.written, h.err, h.handled = h.original(dst, src, remain)
+		return h.written, h.err, h.handled
+	}
+}
+
+func (h *sendFileHook) uninstall() {
+	*PollSendFile = h.original
+}
+
+func createTempFile(t *testing.T, size int64) (*File, []byte) {
+	f, err := CreateTemp(t.TempDir(), "writeto-sendfile-to-socket")
+	if err != nil {
+		t.Fatalf("failed to create temporary file: %v", err)
+	}
+	t.Cleanup(func() {
+		f.Close()
+	})
+
+	randSeed := time.Now().Unix()
+	t.Logf("random data seed: %d\n", randSeed)
+	prng := rand.New(rand.NewSource(randSeed))
+	data := make([]byte, size)
+	prng.Read(data)
+	if _, err := f.Write(data); err != nil {
+		t.Fatalf("failed to create and feed the file: %v", err)
+	}
+	if err := f.Sync(); err != nil {
+		t.Fatalf("failed to save the file: %v", err)
+	}
+	if _, err := f.Seek(0, io.SeekStart); err != nil {
+		t.Fatalf("failed to rewind the file: %v", err)
+	}
+
+	return f, data
+}
diff --git a/src/os/zero_copy_linux.go b/src/os/zero_copy_linux.go
new file mode 100644
index 0000000..7c45aef
--- /dev/null
+++ b/src/os/zero_copy_linux.go
@@ -0,0 +1,167 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os
+
+import (
+	"internal/poll"
+	"io"
+	"syscall"
+)
+
+var (
+	pollCopyFileRange = poll.CopyFileRange
+	pollSplice        = poll.Splice
+	pollSendFile      = poll.SendFile
+)
+
+func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) {
+	pfd, network := getPollFDAndNetwork(w)
+	// TODO(panjf2000): same as File.spliceToFile.
+	if pfd == nil || !pfd.IsStream || !isUnixOrTCP(string(network)) {
+		return
+	}
+
+	sc, err := f.SyscallConn()
+	if err != nil {
+		return
+	}
+
+	rerr := sc.Read(func(fd uintptr) (done bool) {
+		written, err, handled = pollSendFile(pfd, int(fd), 1<<63-1)
+		return true
+	})
+
+	if err == nil {
+		err = rerr
+	}
+
+	return written, handled, wrapSyscallError("sendfile", err)
+}
+
+func (f *File) readFrom(r io.Reader) (written int64, handled bool, err error) {
+	// Neither copy_file_range(2) nor splice(2) supports destinations opened with
+	// O_APPEND, so don't bother to try zero-copy with these system calls.
+	//
+	// Visit https://man7.org/linux/man-pages/man2/copy_file_range.2.html#ERRORS and
+	// https://man7.org/linux/man-pages/man2/splice.2.html#ERRORS for details.
+	if f.appendMode {
+		return 0, false, nil
+	}
+
+	written, handled, err = f.copyFileRange(r)
+	if handled {
+		return
+	}
+	return f.spliceToFile(r)
+}
+
+func (f *File) spliceToFile(r io.Reader) (written int64, handled bool, err error) {
+	var (
+		remain int64
+		lr     *io.LimitedReader
+	)
+	if lr, r, remain = tryLimitedReader(r); remain <= 0 {
+		return 0, true, nil
+	}
+
+	pfd, _ := getPollFDAndNetwork(r)
+	// TODO(panjf2000): run some tests to see if we should unlock the non-streams for splice.
+	// Streams benefit the most from the splice(2), non-streams are not even supported in old kernels
+	// where splice(2) will just return EINVAL; newer kernels support non-streams like UDP, but I really
+	// doubt that splice(2) could help non-streams, cuz they usually send small frames respectively
+	// and one splice call would result in one frame.
+	// splice(2) is suitable for large data but the generation of fragments defeats its edge here.
+	// Therefore, don't bother to try splice if the r is not a streaming descriptor.
+	if pfd == nil || !pfd.IsStream {
+		return
+	}
+
+	var syscallName string
+	written, handled, syscallName, err = pollSplice(&f.pfd, pfd, remain)
+
+	if lr != nil {
+		lr.N = remain - written
+	}
+
+	return written, handled, wrapSyscallError(syscallName, err)
+}
+
+func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err error) {
+	var (
+		remain int64
+		lr     *io.LimitedReader
+	)
+	if lr, r, remain = tryLimitedReader(r); remain <= 0 {
+		return 0, true, nil
+	}
+
+	var src *File
+	switch v := r.(type) {
+	case *File:
+		src = v
+	case fileWithoutWriteTo:
+		src = v.File
+	default:
+		return 0, false, nil
+	}
+
+	if src.checkValid("ReadFrom") != nil {
+		// Avoid returning the error as we report handled as false,
+		// leave further error handling as the responsibility of the caller.
+		return 0, false, nil
+	}
+
+	written, handled, err = pollCopyFileRange(&f.pfd, &src.pfd, remain)
+	if lr != nil {
+		lr.N -= written
+	}
+	return written, handled, wrapSyscallError("copy_file_range", err)
+}
+
+// getPollFDAndNetwork tries to get the poll.FD and network type from the given interface
+// by expecting the underlying type of i to be the implementation of syscall.Conn
+// that contains a *net.rawConn.
+func getPollFDAndNetwork(i any) (*poll.FD, poll.String) {
+	sc, ok := i.(syscall.Conn)
+	if !ok {
+		return nil, ""
+	}
+	rc, err := sc.SyscallConn()
+	if err != nil {
+		return nil, ""
+	}
+	irc, ok := rc.(interface {
+		PollFD() *poll.FD
+		Network() poll.String
+	})
+	if !ok {
+		return nil, ""
+	}
+	return irc.PollFD(), irc.Network()
+}
+
+// tryLimitedReader tries to assert the io.Reader to io.LimitedReader, it returns the io.LimitedReader,
+// the underlying io.Reader and the remaining amount of bytes if the assertion succeeds,
+// otherwise it just returns the original io.Reader and the theoretical unlimited remaining amount of bytes.
+func tryLimitedReader(r io.Reader) (*io.LimitedReader, io.Reader, int64) {
+	var remain int64 = 1<<63 - 1 // by default, copy until EOF
+
+	lr, ok := r.(*io.LimitedReader)
+	if !ok {
+		return nil, r, remain
+	}
+
+	remain = lr.N
+	return lr, lr.R, remain
+}
+
+func isUnixOrTCP(network string) bool {
+	switch network {
+	case "tcp", "tcp4", "tcp6", "unix":
+		return true
+	default:
+		return false
+	}
+}
diff --git a/src/os/zero_copy_stub.go b/src/os/zero_copy_stub.go
new file mode 100644
index 0000000..9ec5808
--- /dev/null
+++ b/src/os/zero_copy_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package os
+
+import "io"
+
+func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) {
+	return 0, false, nil
+}
+
+func (f *File) readFrom(r io.Reader) (n int64, handled bool, err error) {
+	return 0, false, nil
+}
diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go
index b5cc4b8..12f0bfa 100644
--- a/src/path/filepath/match.go
+++ b/src/path/filepath/match.go
@@ -35,7 +35,7 @@
 //		lo '-' hi   matches character c for lo <= c <= hi
 //
 // Match requires pattern to match all of name, not just a substring.
-// The only possible returned error is ErrBadPattern, when pattern
+// The only possible returned error is [ErrBadPattern], when pattern
 // is malformed.
 //
 // On Windows, escaping is disabled. Instead, '\\' is treated as
@@ -233,11 +233,11 @@
 
 // Glob returns the names of all files matching pattern or nil
 // if there is no matching file. The syntax of patterns is the same
-// as in Match. The pattern may describe hierarchical names such as
-// /usr/*/bin/ed (assuming the Separator is '/').
+// as in [Match]. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the [Separator] is '/').
 //
 // Glob ignores file system errors such as I/O errors reading directories.
-// The only possible returned error is ErrBadPattern, when pattern
+// The only possible returned error is [ErrBadPattern], when pattern
 // is malformed.
 func Glob(pattern string) (matches []string, err error) {
 	return globWithLimit(pattern, 0)
diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go
index 3bf3ff6..2af0f5b 100644
--- a/src/path/filepath/path.go
+++ b/src/path/filepath/path.go
@@ -15,6 +15,7 @@
 	"errors"
 	"io/fs"
 	"os"
+	"slices"
 	"sort"
 	"strings"
 )
@@ -52,7 +53,7 @@
 }
 
 func (b *lazybuf) prepend(prefix ...byte) {
-	b.buf = append(prefix, b.buf...)
+	b.buf = slices.Insert(b.buf, 0, prefix...)
 	b.w += len(prefix)
 }
 
@@ -72,7 +73,7 @@
 // by purely lexical processing. It applies the following rules
 // iteratively until no further processing can be done:
 //
-//  1. Replace multiple Separator elements with a single one.
+//  1. Replace multiple [Separator] elements with a single one.
 //  2. Eliminate each . path name element (the current directory).
 //  3. Eliminate each inner .. path name element (the parent directory)
 //     along with the non-.. element that precedes it.
@@ -230,7 +231,7 @@
 	return strings.ReplaceAll(path, "/", string(Separator))
 }
 
-// SplitList splits a list of paths joined by the OS-specific ListSeparator,
+// SplitList splits a list of paths joined by the OS-specific [ListSeparator],
 // usually found in PATH or GOPATH environment variables.
 // Unlike strings.Split, SplitList returns an empty slice when passed an empty
 // string.
@@ -238,7 +239,7 @@
 	return splitList(path)
 }
 
-// Split splits path immediately following the final Separator,
+// Split splits path immediately following the final [Separator],
 // separating it into a directory and file name component.
 // If there is no Separator in path, Split returns an empty dir
 // and file set to path.
@@ -253,7 +254,7 @@
 }
 
 // Join joins any number of path elements into a single path,
-// separating them with an OS specific Separator. Empty elements
+// separating them with an OS specific [Separator]. Empty elements
 // are ignored. The result is Cleaned. However, if the argument
 // list is empty or all its elements are empty, Join returns
 // an empty string.
@@ -280,7 +281,7 @@
 // links.
 // If path is relative the result will be relative to the current directory,
 // unless one of the components is an absolute symbolic link.
-// EvalSymlinks calls Clean on the result.
+// EvalSymlinks calls [Clean] on the result.
 func EvalSymlinks(path string) (string, error) {
 	return evalSymlinks(path)
 }
@@ -289,7 +290,7 @@
 // If the path is not absolute it will be joined with the current
 // working directory to turn it into an absolute path. The absolute
 // path name for a given file is not guaranteed to be unique.
-// Abs calls Clean on the result.
+// Abs calls [Clean] on the result.
 func Abs(path string) (string, error) {
 	return abs(path)
 }
@@ -307,12 +308,12 @@
 
 // Rel returns a relative path that is lexically equivalent to targpath when
 // joined to basepath with an intervening separator. That is,
-// Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
+// [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
 // On success, the returned path will always be relative to basepath,
 // even if basepath and targpath share no elements.
 // An error is returned if targpath can't be made relative to basepath or if
 // knowing the current working directory would be necessary to compute it.
-// Rel calls Clean on the result.
+// Rel calls [Clean] on the result.
 func Rel(basepath, targpath string) (string, error) {
 	baseVol := VolumeName(basepath)
 	targVol := VolumeName(targpath)
@@ -385,17 +386,17 @@
 	return targ[t0:], nil
 }
 
-// SkipDir is used as a return value from WalkFuncs to indicate that
+// SkipDir is used as a return value from [WalkFunc] to indicate that
 // the directory named in the call is to be skipped. It is not returned
 // as an error by any function.
 var SkipDir error = fs.SkipDir
 
-// SkipAll is used as a return value from WalkFuncs to indicate that
+// SkipAll is used as a return value from [WalkFunc] to indicate that
 // all remaining files and directories are to be skipped. It is not returned
 // as an error by any function.
 var SkipAll error = fs.SkipAll
 
-// WalkFunc is the type of the function called by Walk to visit each
+// WalkFunc is the type of the function called by [Walk] to visit each
 // file or directory.
 //
 // The path argument contains the argument to Walk as a prefix.
@@ -411,9 +412,9 @@
 // The info argument is the fs.FileInfo for the named path.
 //
 // The error result returned by the function controls how Walk continues.
-// If the function returns the special value SkipDir, Walk skips the
+// If the function returns the special value [SkipDir], Walk skips the
 // current directory (path if info.IsDir() is true, otherwise path's
-// parent directory). If the function returns the special value SkipAll,
+// parent directory). If the function returns the special value [SkipAll],
 // Walk skips all remaining files and directories. Otherwise, if the function
 // returns a non-nil error, Walk stops entirely and returns that error.
 //
@@ -424,14 +425,14 @@
 //
 // Walk calls the function with a non-nil err argument in two cases.
 //
-// First, if an os.Lstat on the root directory or any directory or file
+// First, if an [os.Lstat] on the root directory or any directory or file
 // in the tree fails, Walk calls the function with path set to that
 // directory or file's path, info set to nil, and err set to the error
 // from os.Lstat.
 //
 // Second, if a directory's Readdirnames method fails, Walk calls the
 // function with path set to the directory's path, info, set to an
-// fs.FileInfo describing the directory, and err set to the error from
+// [fs.FileInfo] describing the directory, and err set to the error from
 // Readdirnames.
 type WalkFunc func(path string, info fs.FileInfo, err error) error
 
@@ -447,7 +448,7 @@
 		return err
 	}
 
-	dirs, err := readDir(path)
+	dirs, err := os.ReadDir(path)
 	if err != nil {
 		// Second call, to report ReadDir error.
 		err = walkDirFn(path, d, err)
@@ -513,7 +514,7 @@
 // directory in the tree, including root.
 //
 // All errors that arise visiting files and directories are filtered by fn:
-// see the fs.WalkDirFunc documentation for details.
+// see the [fs.WalkDirFunc] documentation for details.
 //
 // The files are walked in lexical order, which makes the output deterministic
 // but requires WalkDir to read an entire directory into memory before proceeding
@@ -529,7 +530,7 @@
 	if err != nil {
 		err = fn(root, nil, err)
 	} else {
-		err = walkDir(root, &statDirEntry{info}, fn)
+		err = walkDir(root, fs.FileInfoToDirEntry(info), fn)
 	}
 	if err == SkipDir || err == SkipAll {
 		return nil
@@ -537,24 +538,11 @@
 	return err
 }
 
-type statDirEntry struct {
-	info fs.FileInfo
-}
-
-func (d *statDirEntry) Name() string               { return d.info.Name() }
-func (d *statDirEntry) IsDir() bool                { return d.info.IsDir() }
-func (d *statDirEntry) Type() fs.FileMode          { return d.info.Mode().Type() }
-func (d *statDirEntry) Info() (fs.FileInfo, error) { return d.info, nil }
-
-func (d *statDirEntry) String() string {
-	return fs.FormatDirEntry(d)
-}
-
 // Walk walks the file tree rooted at root, calling fn for each file or
 // directory in the tree, including root.
 //
 // All errors that arise visiting files and directories are filtered by fn:
-// see the WalkFunc documentation for details.
+// see the [WalkFunc] documentation for details.
 //
 // The files are walked in lexical order, which makes the output deterministic
 // but requires Walk to read an entire directory into memory before proceeding
@@ -562,7 +550,7 @@
 //
 // Walk does not follow symbolic links.
 //
-// Walk is less efficient than WalkDir, introduced in Go 1.16,
+// Walk is less efficient than [WalkDir], introduced in Go 1.16,
 // which avoids calling os.Lstat on every visited file or directory.
 func Walk(root string, fn WalkFunc) error {
 	info, err := os.Lstat(root)
@@ -577,22 +565,6 @@
 	return err
 }
 
-// readDir reads the directory named by dirname and returns
-// a sorted list of directory entries.
-func readDir(dirname string) ([]fs.DirEntry, error) {
-	f, err := os.Open(dirname)
-	if err != nil {
-		return nil, err
-	}
-	dirs, err := f.ReadDir(-1)
-	f.Close()
-	if err != nil {
-		return nil, err
-	}
-	sort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() })
-	return dirs, nil
-}
-
 // readDirNames reads the directory named by dirname and returns
 // a sorted list of directory entry names.
 func readDirNames(dirname string) ([]string, error) {
@@ -639,7 +611,7 @@
 }
 
 // Dir returns all but the last element of path, typically the path's directory.
-// After dropping the final element, Dir calls Clean on the path and trailing
+// After dropping the final element, Dir calls [Clean] on the path and trailing
 // slashes are removed.
 // If the path is empty, Dir returns ".".
 // If the path consists entirely of separators, Dir returns a single separator.
diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go
index a646618..ed39908 100644
--- a/src/path/filepath/path_test.go
+++ b/src/path/filepath/path_test.go
@@ -109,6 +109,8 @@
 	{`//abc`, `\\abc`},
 	{`///abc`, `\\\abc`},
 	{`//abc//`, `\\abc\\`},
+	{`\\?\C:\`, `\\?\C:\`},
+	{`\\?\C:\a`, `\\?\C:\a`},
 
 	// Don't allow cleaning to move an element with a colon to the start of the path.
 	{`a/../c:`, `.\c:`},
@@ -584,25 +586,12 @@
 func TestWalk(t *testing.T) {
 	walk := func(root string, fn fs.WalkDirFunc) error {
 		return filepath.Walk(root, func(path string, info fs.FileInfo, err error) error {
-			return fn(path, &statDirEntry{info}, err)
+			return fn(path, fs.FileInfoToDirEntry(info), err)
 		})
 	}
 	testWalk(t, walk, 1)
 }
 
-type statDirEntry struct {
-	info fs.FileInfo
-}
-
-func (d *statDirEntry) Name() string               { return d.info.Name() }
-func (d *statDirEntry) IsDir() bool                { return d.info.IsDir() }
-func (d *statDirEntry) Type() fs.FileMode          { return d.info.Mode().Type() }
-func (d *statDirEntry) Info() (fs.FileInfo, error) { return d.info, nil }
-
-func (d *statDirEntry) String() string {
-	return fs.FormatDirEntry(d)
-}
-
 func TestWalkDir(t *testing.T) {
 	testWalk(t, filepath.WalkDir, 2)
 }
@@ -1610,10 +1599,13 @@
 	{`//.`, `\\.`},
 	{`//./`, `\\.\`},
 	{`//./NUL`, `\\.\NUL`},
-	{`//?/`, `\\?`},
+	{`//?`, `\\?`},
+	{`//?/`, `\\?\`},
+	{`//?/NUL`, `\\?\NUL`},
+	{`/??`, `\??`},
+	{`/??/`, `\??\`},
+	{`/??/NUL`, `\??\NUL`},
 	{`//./a/b`, `\\.\a`},
-	{`//?/`, `\\?`},
-	{`//?/`, `\\?`},
 	{`//./C:`, `\\.\C:`},
 	{`//./C:/`, `\\.\C:`},
 	{`//./C:/a/b/c`, `\\.\C:`},
@@ -1622,8 +1614,8 @@
 	{`//./UNC/host\`, `\\.\UNC\host\`},
 	{`//./UNC`, `\\.\UNC`},
 	{`//./UNC/`, `\\.\UNC\`},
-	{`\\?\x`, `\\?`},
-	{`\??\x`, `\??`},
+	{`\\?\x`, `\\?\x`},
+	{`\??\x`, `\??\x`},
 }
 
 func TestVolumeName(t *testing.T) {
diff --git a/src/path/filepath/path_windows.go b/src/path/filepath/path_windows.go
index c490424..eacab0e 100644
--- a/src/path/filepath/path_windows.go
+++ b/src/path/filepath/path_windows.go
@@ -102,12 +102,14 @@
 		// \\.\unc\a\b\..\c into \\.\unc\a\c.
 		return uncLen(path, len(`\\.\UNC\`))
 
-	case pathHasPrefixFold(path, `\\.`):
-		// Path starts with \\., and is a Local Device path.
+	case pathHasPrefixFold(path, `\\.`) ||
+		pathHasPrefixFold(path, `\\?`) || pathHasPrefixFold(path, `\??`):
+		// Path starts with \\.\, and is a Local Device path; or
+		// path starts with \\?\ or \??\ and is a Root Local Device path.
 		//
-		// We currently treat the next component after the \\.\ prefix
-		// as part of the volume name, although there doesn't seem to be
-		// a principled reason to do this.
+		// We treat the next component after the \\.\ prefix as
+		// part of the volume name, which means Clean(`\\?\c:\`)
+		// won't remove the trailing \. (See #64028.)
 		if len(path) == 3 {
 			return 3 // exactly \\.
 		}
@@ -117,14 +119,6 @@
 		}
 		return len(path) - len(rest) - 1
 
-	case pathHasPrefixFold(path, `\\?`) || pathHasPrefixFold(path, `\??`):
-		// Path starts with \\?\ or \??\, and is a Root Local Device path.
-		//
-		// While Windows usually treats / and \ as equivalent,
-		// /??/ does not seem to be recognized as a Root Local Device path.
-		// We treat it as one anyway here to be safe.
-		return 3
-
 	case len(path) >= 2 && isSlash(path[1]):
 		// Path starts with \\, and is a UNC path.
 		return uncLen(path, 2)
diff --git a/src/path/match.go b/src/path/match.go
index 673bbc7..d8b6809 100644
--- a/src/path/match.go
+++ b/src/path/match.go
@@ -32,7 +32,7 @@
 //		lo '-' hi   matches character c for lo <= c <= hi
 //
 // Match requires pattern to match all of name, not just a substring.
-// The only possible returned error is ErrBadPattern, when pattern
+// The only possible returned error is [ErrBadPattern], when pattern
 // is malformed.
 func Match(pattern, name string) (matched bool, err error) {
 Pattern:
diff --git a/src/path/path.go b/src/path/path.go
index 547b9de..5149a92 100644
--- a/src/path/path.go
+++ b/src/path/path.go
@@ -8,9 +8,11 @@
 // The path package should only be used for paths separated by forward
 // slashes, such as the paths in URLs. This package does not deal with
 // Windows paths with drive letters or backslashes; to manipulate
-// operating system paths, use the path/filepath package.
+// operating system paths, use the [path/filepath] package.
 package path
 
+import "internal/bytealg"
+
 // A lazybuf is a lazily constructed path buffer.
 // It supports append, reading previously appended bytes,
 // and retrieving the final string. It does not allocate a buffer
@@ -135,22 +137,13 @@
 	return out.string()
 }
 
-// lastSlash(s) is strings.LastIndex(s, "/") but we can't import strings.
-func lastSlash(s string) int {
-	i := len(s) - 1
-	for i >= 0 && s[i] != '/' {
-		i--
-	}
-	return i
-}
-
 // Split splits path immediately following the final slash,
 // separating it into a directory and file name component.
 // If there is no slash in path, Split returns an empty dir and
 // file set to path.
 // The returned values have the property that path = dir+file.
 func Split(path string) (dir, file string) {
-	i := lastSlash(path)
+	i := bytealg.LastIndexByteString(path, '/')
 	return path[:i+1], path[i+1:]
 }
 
@@ -205,7 +198,7 @@
 		path = path[0 : len(path)-1]
 	}
 	// Find the last element
-	if i := lastSlash(path); i >= 0 {
+	if i := bytealg.LastIndexByteString(path, '/'); i >= 0 {
 		path = path[i+1:]
 	}
 	// If empty now, it had only slashes.
@@ -221,7 +214,7 @@
 }
 
 // Dir returns all but the last element of path, typically the path's directory.
-// After dropping the final element using Split, the path is Cleaned and trailing
+// After dropping the final element using [Split], the path is Cleaned and trailing
 // slashes are removed.
 // If the path is empty, Dir returns ".".
 // If the path consists entirely of slashes followed by non-slash bytes, Dir
diff --git a/src/plugin/plugin.go b/src/plugin/plugin.go
index 187d127..b4b1697 100644
--- a/src/plugin/plugin.go
+++ b/src/plugin/plugin.go
@@ -74,7 +74,7 @@
 }
 
 // Open opens a Go plugin.
-// If a path has already been opened, then the existing *Plugin is returned.
+// If a path has already been opened, then the existing *[Plugin] is returned.
 // It is safe for concurrent use by multiple goroutines.
 func Open(path string) (*Plugin, error) {
 	return open(path)
@@ -100,7 +100,7 @@
 //
 //	func F() { fmt.Printf("Hello, number %d\n", V) }
 //
-// may be loaded with the Open function and then the exported package
+// may be loaded with the [Open] function and then the exported package
 // symbols V and F can be accessed
 //
 //	p, err := plugin.Open("plugin_name.so")
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 31f6416..e77537c 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -1396,6 +1396,11 @@
 	NotNil(fi, t)
 }
 
+func setField[S, V any](in S, offset uintptr, value V) (out S) {
+	*(*V)(unsafe.Add(unsafe.Pointer(&in), offset)) = value
+	return in
+}
+
 func TestIsZero(t *testing.T) {
 	for i, tt := range []struct {
 		x    any
@@ -1429,14 +1434,14 @@
 		{float32(1.2), false},
 		{float64(0), true},
 		{float64(1.2), false},
-		{math.Copysign(0, -1), false},
+		{math.Copysign(0, -1), true},
 		{complex64(0), true},
 		{complex64(1.2), false},
 		{complex128(0), true},
 		{complex128(1.2), false},
-		{complex(math.Copysign(0, -1), 0), false},
-		{complex(0, math.Copysign(0, -1)), false},
-		{complex(math.Copysign(0, -1), math.Copysign(0, -1)), false},
+		{complex(math.Copysign(0, -1), 0), true},
+		{complex(0, math.Copysign(0, -1)), true},
+		{complex(math.Copysign(0, -1), math.Copysign(0, -1)), true},
 		{uintptr(0), true},
 		{uintptr(128), false},
 		// Array
@@ -1449,6 +1454,8 @@
 		{[3][]int{{1}}, false},                  // incomparable array
 		{[1 << 12]byte{}, true},
 		{[1 << 12]byte{1}, false},
+		{[1]struct{ p *int }{}, true},
+		{[1]struct{ p *int }{{new(int)}}, false},
 		{[3]Value{}, true},
 		{[3]Value{{}, ValueOf(0), {}}, false},
 		// Chan
@@ -1485,6 +1492,20 @@
 		{struct{ s []int }{[]int{1}}, false},  // incomparable struct
 		{struct{ Value }{}, true},
 		{struct{ Value }{ValueOf(0)}, false},
+		{struct{ _, a, _ uintptr }{}, true}, // comparable struct with blank fields
+		{setField(struct{ _, a, _ uintptr }{}, 0*unsafe.Sizeof(uintptr(0)), 1), true},
+		{setField(struct{ _, a, _ uintptr }{}, 1*unsafe.Sizeof(uintptr(0)), 1), false},
+		{setField(struct{ _, a, _ uintptr }{}, 2*unsafe.Sizeof(uintptr(0)), 1), true},
+		{struct{ _, a, _ func() }{}, true}, // incomparable struct with blank fields
+		{setField(struct{ _, a, _ func() }{}, 0*unsafe.Sizeof((func())(nil)), func() {}), true},
+		{setField(struct{ _, a, _ func() }{}, 1*unsafe.Sizeof((func())(nil)), func() {}), false},
+		{setField(struct{ _, a, _ func() }{}, 2*unsafe.Sizeof((func())(nil)), func() {}), true},
+		{struct{ a [256]S }{}, true},
+		{struct{ a [256]S }{a: [256]S{2: {i1: 1}}}, false},
+		{struct{ a [256]float32 }{}, true},
+		{struct{ a [256]float32 }{a: [256]float32{2: 1.0}}, false},
+		{struct{ _, a [256]S }{}, true},
+		{setField(struct{ _, a [256]S }{}, 0*unsafe.Sizeof(int64(0)), int64(1)), true},
 		// UnsafePointer
 		{(unsafe.Pointer)(nil), true},
 		{(unsafe.Pointer)(new(int)), false},
@@ -1523,6 +1544,15 @@
 	}()
 }
 
+func TestInternalIsZero(t *testing.T) {
+	b := make([]byte, 512)
+	for a := 0; a < 8; a++ {
+		for i := 1; i <= 512-a; i++ {
+			InternalIsZero(b[a : a+i])
+		}
+	}
+}
+
 func TestInterfaceExtraction(t *testing.T) {
 	var s struct {
 		W io.Writer
@@ -1706,6 +1736,12 @@
 		if i, ok := cv.Recv(); i.Int() != 0 || ok {
 			t.Errorf("after close Recv %d, %t", i.Int(), ok)
 		}
+		// Closing a read-only channel
+		shouldPanic("", func() {
+			c := make(<-chan int, 1)
+			cv := ValueOf(c)
+			cv.Close()
+		})
 	}
 
 	// check creation of unbuffered channel
@@ -3473,16 +3509,24 @@
 		var i any
 		var v Value
 
-		// We can uncomment this when compiler escape analysis
-		// is good enough to see that the integer assigned to i
-		// does not escape and therefore need not be allocated.
-		//
-		// i = 42 + j
-		// v = ValueOf(i)
-		// if int(v.Int()) != 42+j {
-		// 	panic("wrong int")
-		// }
-
+		i = 42 + j
+		v = ValueOf(i)
+		if int(v.Int()) != 42+j {
+			panic("wrong int")
+		}
+	})
+	noAlloc(t, 100, func(j int) {
+		var i any
+		var v Value
+		i = [3]int{j, j, j}
+		v = ValueOf(i)
+		if v.Len() != 3 {
+			panic("wrong length")
+		}
+	})
+	noAlloc(t, 100, func(j int) {
+		var i any
+		var v Value
 		i = func(j int) int { return j }
 		v = ValueOf(i)
 		if v.Interface().(func(int) int)(j) != j {
@@ -4711,7 +4755,7 @@
 	// Converting a slice to non-empty array needs to return
 	// a non-addressable copy of the original memory.
 	if v.CanAddr() {
-		t.Fatalf("convert slice to non-empty array returns a addressable copy array")
+		t.Fatalf("convert slice to non-empty array returns an addressable copy array")
 	}
 	for i := range s {
 		ov.Index(i).Set(ValueOf(i + 1))
@@ -7011,10 +7055,18 @@
 	// e.g. with rep(2, lit(1, 0)).
 	bits = trimBitmap(bits)
 
-	if !bytes.Equal(heapBits, bits) {
-		_, _, line, _ := runtime.Caller(1)
-		t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
+	if bytes.HasPrefix(heapBits, bits) {
+		// Just the prefix matching is OK.
+		//
+		// The Go runtime's pointer/scalar iterator generates pointers beyond
+		// the size of the type, up to the size of the size class. This space
+		// is safe for the GC to scan since it's zero, and GCBits checks to
+		// make sure that's true. But we need to handle the fact that the bitmap
+		// may be larger than we expect.
+		return
 	}
+	_, _, line, _ := runtime.Caller(1)
+	t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
 }
 
 func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
@@ -7023,15 +7075,20 @@
 	// repeat a bitmap for a small array or executing a repeat in
 	// a GC program.
 	val := MakeSlice(typ, 0, cap)
-	data := NewAt(ArrayOf(cap, typ), val.UnsafePointer())
+	data := NewAt(typ.Elem(), val.UnsafePointer())
 	heapBits := GCBits(data.Interface())
 	// Repeat the bitmap for the slice size, trimming scalars in
 	// the last element.
 	bits = trimBitmap(rep(cap, bits))
-	if !bytes.Equal(heapBits, bits) {
-		_, _, line, _ := runtime.Caller(1)
-		t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
+	if bytes.Equal(heapBits, bits) {
+		return
 	}
+	if len(heapBits) > len(bits) && bytes.Equal(heapBits[:len(bits)], bits) {
+		// Just the prefix matching is OK.
+		return
+	}
+	_, _, line, _ := runtime.Caller(1)
+	t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
 }
 
 func TestGCBits(t *testing.T) {
diff --git a/src/reflect/asm_loong64.s b/src/reflect/asm_loong64.s
index 341a6d5..520f0af 100644
--- a/src/reflect/asm_loong64.s
+++ b/src/reflect/asm_loong64.s
@@ -7,34 +7,83 @@
 
 #define	REGCTXT	R29
 
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 40
+#define LOCAL_REGARGS 48
+
+// The frame size of the functions below is
+// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432.
+
 // makeFuncStub is the code half of the function returned by MakeFunc.
 // See the comment on the declaration of makeFuncStub in makefunc.go
 // for more details.
 // No arg size here, runtime pulls arg map out of the func value.
-TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
 	NO_LOCAL_POINTERS
+	ADDV	$LOCAL_REGARGS, R3, R25 // spillArgs using R25
+	JAL	runtime·spillArgs(SB)
+	MOVV	REGCTXT, 32(R3) // save REGCTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS
+
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	REGCTXT, R4
+	MOVV	R25, R5
+#else
 	MOVV	REGCTXT, 8(R3)
-	MOVV	$argframe+0(FP), R19
-	MOVV	R19, 16(R3)
-	MOVB	R0, 40(R3)
-	ADDV	$40, R3, R19
-	MOVV	R19, 24(R3)
-	MOVV	R0, 32(R3)
+	MOVV	R25, 16(R3)
+#endif
+	JAL	·moveMakeFuncArgPtrs<ABIInternal>(SB)
+	MOVV	32(R3), REGCTXT // restore REGCTXT
+
+	MOVV	REGCTXT, 8(R3)
+	MOVV	$argframe+0(FP), R20
+	MOVV	R20, 16(R3)
+	MOVV	R0, LOCAL_RETVALID(R3)
+	ADDV	$LOCAL_RETVALID, R3, R20
+	MOVV	R20, 24(R3)
+	ADDV	$LOCAL_REGARGS, R3, R20
+	MOVV	R20, 32(R3)
 	JAL	·callReflect(SB)
+	ADDV	$LOCAL_REGARGS, R3, R25	//unspillArgs using R25
+	JAL	runtime·unspillArgs(SB)
 	RET
 
 // methodValueCall is the code half of the function returned by makeMethodValue.
 // See the comment on the declaration of methodValueCall in makefunc.go
 // for more details.
 // No arg size here; runtime pulls arg map out of the func value.
-TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
 	NO_LOCAL_POINTERS
+	ADDV	$LOCAL_REGARGS, R3, R25 // spillArgs using R25
+	JAL	runtime·spillArgs(SB)
+	MOVV	REGCTXT, 32(R3) // save REGCTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	REGCTXT, R4
+	MOVV	R25, R5
+#else
 	MOVV	REGCTXT, 8(R3)
-	MOVV	$argframe+0(FP), R19
-	MOVV	R19, 16(R3)
-	MOVB	R0, 40(R3)
-	ADDV	$40, R3, R19
-	MOVV	R19, 24(R3)
-	MOVV	R0, 32(R3)
+	MOVV	R25, 16(R3)
+#endif
+	JAL	·moveMakeFuncArgPtrs<ABIInternal>(SB)
+	MOVV	32(R3), REGCTXT // restore REGCTXT
+	MOVV	REGCTXT, 8(R3)
+	MOVV	$argframe+0(FP), R20
+	MOVV	R20, 16(R3)
+	MOVB	R0, LOCAL_RETVALID(R3)
+	ADDV	$LOCAL_RETVALID, R3, R20
+	MOVV	R20, 24(R3)
+	ADDV	$LOCAL_REGARGS, R3, R20
+	MOVV	R20, 32(R3) // frame size to 32+SP as callreflect args)
 	JAL	·callMethod(SB)
+	ADDV	$LOCAL_REGARGS, R3, R25 // unspillArgs using R25
+	JAL	runtime·unspillArgs(SB)
 	RET
diff --git a/src/reflect/benchmark_test.go b/src/reflect/benchmark_test.go
index 9241c2c..2e701b0 100644
--- a/src/reflect/benchmark_test.go
+++ b/src/reflect/benchmark_test.go
@@ -107,13 +107,43 @@
 	}
 }
 
+func BenchmarkMapsDeepEqual(b *testing.B) {
+	m1 := map[int]int{
+		1: 1, 2: 2,
+	}
+	m2 := map[int]int{
+		1: 1, 2: 2,
+	}
+	for i := 0; i < b.N; i++ {
+		DeepEqual(m1, m2)
+	}
+}
+
 func BenchmarkIsZero(b *testing.B) {
-	source := ValueOf(struct {
-		ArrayComparable    [4]T
-		ArrayIncomparable  [4]_Complex
-		StructComparable   T
-		StructIncomparable _Complex
-	}{})
+	type Int4 struct {
+		a, b, c, d int
+	}
+	type Int1024 struct {
+		a [1024]int
+	}
+	type Int512 struct {
+		a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16 [16]S
+	}
+	s := struct {
+		ArrayComparable      [4]T
+		ArrayIncomparable    [4]_Complex
+		StructComparable     T
+		StructIncomparable   _Complex
+		ArrayInt_4           [4]int
+		ArrayInt_1024        [1024]int
+		ArrayInt_1024_NoZero [1024]int
+		Struct4Int           Int4
+		ArrayStruct4Int_1024 [256]Int4
+		ArrayChanInt_1024    [1024]chan int
+		StructInt_512        Int512
+	}{}
+	s.ArrayInt_1024_NoZero[512] = 1
+	source := ValueOf(s)
 
 	for i := 0; i < source.NumField(); i++ {
 		name := source.Type().Field(i).Name
diff --git a/src/reflect/deepequal.go b/src/reflect/deepequal.go
index 579781e..961e170 100644
--- a/src/reflect/deepequal.go
+++ b/src/reflect/deepequal.go
@@ -142,9 +142,10 @@
 		if v1.UnsafePointer() == v2.UnsafePointer() {
 			return true
 		}
-		for _, k := range v1.MapKeys() {
-			val1 := v1.MapIndex(k)
-			val2 := v2.MapIndex(k)
+		iter := v1.MapRange()
+		for iter.Next() {
+			val1 := iter.Value()
+			val2 := v2.MapIndex(iter.Key())
 			if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) {
 				return false
 			}
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
index 2496c8d..1648eb3 100644
--- a/src/reflect/export_test.go
+++ b/src/reflect/export_test.go
@@ -80,7 +80,7 @@
 	for i, offs := range offset {
 		rodata := sections[i]
 		for _, off := range offs {
-			typ := (*rtype)(resolveTypeOff(unsafe.Pointer(rodata), off))
+			typ := (*rtype)(resolveTypeOff(rodata, off))
 			r = append(r, typ.String())
 		}
 	}
@@ -164,3 +164,5 @@
 }
 
 var MethodValueCallCodePtr = methodValueCallCodePtr
+
+var InternalIsZero = isZero
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 9fd242e..89c5015 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -172,6 +172,9 @@
 
 	// FieldByName returns the struct field with the given name
 	// and a boolean indicating if the field was found.
+	// If the returned field is promoted from an embedded struct,
+	// then Offset in the returned StructField is the offset in
+	// the embedded struct.
 	FieldByName(name string) (StructField, bool)
 
 	// FieldByNameFunc returns the struct field with a name
@@ -186,6 +189,10 @@
 	// and FieldByNameFunc returns no match.
 	// This behavior mirrors Go's handling of name lookup in
 	// structs containing embedded fields.
+	//
+	// If the returned field is promoted from an embedded struct,
+	// then Offset in the returned StructField is the offset in
+	// the embedded struct.
 	FieldByNameFunc(match func(string) bool) (StructField, bool)
 
 	// In returns the type of a function type's i'th input parameter.
@@ -236,7 +243,7 @@
  * They are also known to ../runtime/type.go.
  */
 
-// A Kind represents the specific kind of type that a Type represents.
+// A Kind represents the specific kind of type that a [Type] represents.
 // The zero Kind is not a valid kind.
 type Kind uint
 
@@ -270,7 +277,7 @@
 	UnsafePointer
 )
 
-// Ptr is the old name for the Pointer kind.
+// Ptr is the old name for the [Pointer] kind.
 const Ptr = Pointer
 
 // uncommonType is present only for defined types or types with methods
@@ -1148,7 +1155,7 @@
 	return t.FieldByNameFunc(func(s string) bool { return s == name })
 }
 
-// TypeOf returns the reflection Type that represents the dynamic type of i.
+// TypeOf returns the reflection [Type] that represents the dynamic type of i.
 // If i is a nil interface value, TypeOf returns nil.
 func TypeOf(i any) Type {
 	eface := *(*emptyInterface)(unsafe.Pointer(&i))
@@ -1169,8 +1176,10 @@
 // PtrTo returns the pointer type with element t.
 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
 //
-// PtrTo is the old spelling of PointerTo.
+// PtrTo is the old spelling of [PointerTo].
 // The two functions behave identically.
+//
+// Deprecated: Superseded by [PointerTo].
 func PtrTo(t Type) Type { return PointerTo(t) }
 
 // PointerTo returns the pointer type with element t.
@@ -1530,7 +1539,7 @@
 		// This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
 		i, j := 0, len(offs)
 		for i < j {
-			h := i + (j-i)>>1 // avoid overflow when computing h
+			h := int(uint(i+j) >> 1) // avoid overflow when computing h
 			// i ≤ h < j
 			if !(stringFor(rtypeOff(section, offs[h])) >= s) {
 				i = h + 1 // preserves f(i-1) == false
@@ -1905,7 +1914,7 @@
 	case Float32, Float64, Complex64, Complex128, Interface, String:
 		// Float keys can be updated from +0 to -0.
 		// String keys can be updated to use a smaller backing store.
-		// Interfaces might have floats of strings in them.
+		// Interfaces might have floats or strings in them.
 		return true
 	case Array:
 		tt := (*arrayType)(unsafe.Pointer(t))
@@ -2147,9 +2156,8 @@
 // The Offset and Index fields are ignored and computed as they would be
 // by the compiler.
 //
-// StructOf currently does not generate wrapper methods for embedded
-// fields and panics if passed unexported StructFields.
-// These limitations may be lifted in a future version.
+// StructOf currently does not support promoted methods of embedded fields
+// and panics if passed unexported StructFields.
 func StructOf(fields []StructField) Type {
 	var (
 		hash       = fnv1(0, []byte("struct {")...)
@@ -2208,61 +2216,18 @@
 			switch Kind(f.Typ.Kind()) {
 			case Interface:
 				ift := (*interfaceType)(unsafe.Pointer(ft))
-				for im, m := range ift.Methods {
+				for _, m := range ift.Methods {
 					if pkgPath(ift.nameOff(m.Name)) != "" {
 						// TODO(sbinet).  Issue 15924.
 						panic("reflect: embedded interface with unexported method(s) not implemented")
 					}
 
-					var (
-						mtyp    = ift.typeOff(m.Typ)
-						ifield  = i
-						imethod = im
-						ifn     Value
-						tfn     Value
-					)
-
-					if ft.Kind_&kindDirectIface != 0 {
-						tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
-							var args []Value
-							var recv = in[0]
-							if len(in) > 1 {
-								args = in[1:]
-							}
-							return recv.Field(ifield).Method(imethod).Call(args)
-						})
-						ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
-							var args []Value
-							var recv = in[0]
-							if len(in) > 1 {
-								args = in[1:]
-							}
-							return recv.Field(ifield).Method(imethod).Call(args)
-						})
-					} else {
-						tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
-							var args []Value
-							var recv = in[0]
-							if len(in) > 1 {
-								args = in[1:]
-							}
-							return recv.Field(ifield).Method(imethod).Call(args)
-						})
-						ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
-							var args []Value
-							var recv = Indirect(in[0])
-							if len(in) > 1 {
-								args = in[1:]
-							}
-							return recv.Field(ifield).Method(imethod).Call(args)
-						})
-					}
-
+					fnStub := resolveReflectText(unsafe.Pointer(abi.FuncPCABIInternal(embeddedIfaceMethStub)))
 					methods = append(methods, abi.Method{
 						Name: resolveReflectName(ift.nameOff(m.Name)),
-						Mtyp: resolveReflectType(mtyp),
-						Ifn:  resolveReflectText(unsafe.Pointer(&ifn)),
-						Tfn:  resolveReflectText(unsafe.Pointer(&tfn)),
+						Mtyp: resolveReflectType(ift.typeOff(m.Typ)),
+						Ifn:  fnStub,
+						Tfn:  fnStub,
 					})
 				}
 			case Pointer:
@@ -2561,6 +2526,10 @@
 	return addToCache(toType(&typ.Type))
 }
 
+func embeddedIfaceMethStub() {
+	panic("reflect: StructOf does not support methods of embedded interfaces")
+}
+
 // runtimeStructField takes a StructField value passed to StructOf and
 // returns both the corresponding internal representation, of type
 // structField, and the pkgpath value to use for this field.
@@ -2909,3 +2878,8 @@
 		}
 	}
 }
+
+// TypeFor returns the [Type] that represents the type argument T.
+func TypeFor[T any]() Type {
+	return TypeOf((*T)(nil)).Elem()
+}
diff --git a/src/reflect/type_test.go b/src/reflect/type_test.go
new file mode 100644
index 0000000..9e12427
--- /dev/null
+++ b/src/reflect/type_test.go
@@ -0,0 +1,59 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestTypeFor(t *testing.T) {
+	type (
+		mystring string
+		myiface  interface{}
+	)
+
+	testcases := []struct {
+		wantFrom any
+		got      reflect.Type
+	}{
+		{new(int), reflect.TypeFor[int]()},
+		{new(int64), reflect.TypeFor[int64]()},
+		{new(string), reflect.TypeFor[string]()},
+		{new(mystring), reflect.TypeFor[mystring]()},
+		{new(any), reflect.TypeFor[any]()},
+		{new(myiface), reflect.TypeFor[myiface]()},
+	}
+	for _, tc := range testcases {
+		want := reflect.ValueOf(tc.wantFrom).Elem().Type()
+		if want != tc.got {
+			t.Errorf("unexpected reflect.Type: got %v; want %v", tc.got, want)
+		}
+	}
+}
+
+func TestStructOfEmbeddedIfaceMethodCall(t *testing.T) {
+	type Named interface {
+		Name() string
+	}
+
+	typ := reflect.StructOf([]reflect.StructField{
+		{
+			Anonymous: true,
+			Name:      "Named",
+			Type:      reflect.TypeFor[Named](),
+		},
+	})
+
+	v := reflect.New(typ).Elem()
+	v.Field(0).Set(
+		reflect.ValueOf(reflect.TypeFor[string]()),
+	)
+
+	x := v.Interface().(Named)
+	shouldPanic("StructOf does not support methods of embedded interfaces", func() {
+		_ = x.Name()
+	})
+}
diff --git a/src/reflect/value.go b/src/reflect/value.go
index 616da6a..06f22f7 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -168,7 +168,7 @@
 }
 
 // A ValueError occurs when a Value method is invoked on
-// a Value that does not support it. Such cases are documented
+// a [Value] that does not support it. Such cases are documented
 // in the description of each method.
 type ValueError struct {
 	Method string
@@ -274,7 +274,7 @@
 }
 
 // Addr returns a pointer value representing the address of v.
-// It panics if CanAddr() returns false.
+// It panics if [Value.CanAddr] returns false.
 // Addr is typically used to obtain a pointer to a struct field
 // or slice element in order to call a method that requires a
 // pointer receiver.
@@ -289,7 +289,7 @@
 }
 
 // Bool returns v's underlying value.
-// It panics if v's kind is not Bool.
+// It panics if v's kind is not [Bool].
 func (v Value) Bool() bool {
 	// panicNotBool is split out to keep Bool inlineable.
 	if v.kind() != Bool {
@@ -348,27 +348,27 @@
 	return *(*[]rune)(v.ptr)
 }
 
-// CanAddr reports whether the value's address can be obtained with Addr.
+// CanAddr reports whether the value's address can be obtained with [Value.Addr].
 // Such values are called addressable. A value is addressable if it is
 // an element of a slice, an element of an addressable array,
 // a field of an addressable struct, or the result of dereferencing a pointer.
-// If CanAddr returns false, calling Addr will panic.
+// If CanAddr returns false, calling [Value.Addr] will panic.
 func (v Value) CanAddr() bool {
 	return v.flag&flagAddr != 0
 }
 
 // CanSet reports whether the value of v can be changed.
-// A Value can be changed only if it is addressable and was not
+// A [Value] can be changed only if it is addressable and was not
 // obtained by the use of unexported struct fields.
-// If CanSet returns false, calling Set or any type-specific
-// setter (e.g., SetBool, SetInt) will panic.
+// If CanSet returns false, calling [Value.Set] or any type-specific
+// setter (e.g., [Value.SetBool], [Value.SetInt]) will panic.
 func (v Value) CanSet() bool {
 	return v.flag&(flagAddr|flagRO) == flagAddr
 }
 
 // Call calls the function v with the input arguments in.
 // For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
-// Call panics if v's Kind is not Func.
+// Call panics if v's Kind is not [Func].
 // It returns the output results as Values.
 // As in Go, each input argument must be assignable to the
 // type of the function's corresponding input parameter.
@@ -383,7 +383,7 @@
 // CallSlice calls the variadic function v with the input arguments in,
 // assigning the slice in[len(in)-1] to v's final variadic argument.
 // For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
-// CallSlice panics if v's Kind is not Func or if v is not variadic.
+// CallSlice panics if v's Kind is not [Func] or if v is not variadic.
 // It returns the output results as Values.
 // As in Go, each input argument must be assignable to the
 // type of the function's corresponding input parameter.
@@ -1161,7 +1161,7 @@
 }
 
 // Cap returns v's capacity.
-// It panics if v's Kind is not Array, Chan, Slice or pointer to Array.
+// It panics if v's Kind is not [Array], [Chan], [Slice] or pointer to [Array].
 func (v Value) Cap() int {
 	// capNonSlice is split out to keep Cap inlineable for slice kinds.
 	if v.kind() == Slice {
@@ -1187,14 +1187,20 @@
 }
 
 // Close closes the channel v.
-// It panics if v's Kind is not Chan.
+// It panics if v's Kind is not [Chan] or
+// v is a receive-only channel.
 func (v Value) Close() {
 	v.mustBe(Chan)
 	v.mustBeExported()
+	tt := (*chanType)(unsafe.Pointer(v.typ()))
+	if ChanDir(tt.Dir)&SendDir == 0 {
+		panic("reflect: close of receive-only channel")
+	}
+
 	chanclose(v.pointer())
 }
 
-// CanComplex reports whether Complex can be used without panicking.
+// CanComplex reports whether [Value.Complex] can be used without panicking.
 func (v Value) CanComplex() bool {
 	switch v.kind() {
 	case Complex64, Complex128:
@@ -1205,7 +1211,7 @@
 }
 
 // Complex returns v's underlying value, as a complex128.
-// It panics if v's Kind is not Complex64 or Complex128
+// It panics if v's Kind is not [Complex64] or [Complex128]
 func (v Value) Complex() complex128 {
 	k := v.kind()
 	switch k {
@@ -1219,7 +1225,7 @@
 
 // Elem returns the value that the interface v contains
 // or that the pointer v points to.
-// It panics if v's Kind is not Interface or Pointer.
+// It panics if v's Kind is not [Interface] or [Pointer].
 // It returns the zero Value if v is nil.
 func (v Value) Elem() Value {
 	k := v.kind()
@@ -1272,7 +1278,7 @@
 }
 
 // Field returns the i'th field of the struct v.
-// It panics if v's Kind is not Struct or i is out of range.
+// It panics if v's Kind is not [Struct] or i is out of range.
 func (v Value) Field(i int) Value {
 	if v.kind() != Struct {
 		panic(&ValueError{"reflect.Value.Field", v.kind()})
@@ -1350,7 +1356,7 @@
 
 // FieldByName returns the struct field with the given name.
 // It returns the zero Value if no field was found.
-// It panics if v's Kind is not struct.
+// It panics if v's Kind is not [Struct].
 func (v Value) FieldByName(name string) Value {
 	v.mustBe(Struct)
 	if f, ok := toRType(v.typ()).FieldByName(name); ok {
@@ -1361,7 +1367,7 @@
 
 // FieldByNameFunc returns the struct field with a name
 // that satisfies the match function.
-// It panics if v's Kind is not struct.
+// It panics if v's Kind is not [Struct].
 // It returns the zero Value if no field was found.
 func (v Value) FieldByNameFunc(match func(string) bool) Value {
 	if f, ok := toRType(v.typ()).FieldByNameFunc(match); ok {
@@ -1370,7 +1376,7 @@
 	return Value{}
 }
 
-// CanFloat reports whether Float can be used without panicking.
+// CanFloat reports whether [Value.Float] can be used without panicking.
 func (v Value) CanFloat() bool {
 	switch v.kind() {
 	case Float32, Float64:
@@ -1381,7 +1387,7 @@
 }
 
 // Float returns v's underlying value, as a float64.
-// It panics if v's Kind is not Float32 or Float64
+// It panics if v's Kind is not [Float32] or [Float64]
 func (v Value) Float() float64 {
 	k := v.kind()
 	switch k {
@@ -1396,7 +1402,7 @@
 var uint8Type = rtypeOf(uint8(0))
 
 // Index returns v's i'th element.
-// It panics if v's Kind is not Array, Slice, or String or i is out of range.
+// It panics if v's Kind is not [Array], [Slice], or [String] or i is out of range.
 func (v Value) Index(i int) Value {
 	switch v.kind() {
 	case Array:
@@ -1452,7 +1458,7 @@
 }
 
 // Int returns v's underlying value, as an int64.
-// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+// It panics if v's Kind is not [Int], [Int8], [Int16], [Int32], or [Int64].
 func (v Value) Int() int64 {
 	k := v.kind()
 	p := v.ptr
@@ -1471,7 +1477,7 @@
 	panic(&ValueError{"reflect.Value.Int", v.kind()})
 }
 
-// CanInterface reports whether Interface can be used without panicking.
+// CanInterface reports whether [Value.Interface] can be used without panicking.
 func (v Value) CanInterface() bool {
 	if v.flag == 0 {
 		panic(&ValueError{"reflect.Value.CanInterface", Invalid})
@@ -1588,23 +1594,27 @@
 	case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
 		return v.Uint() == 0
 	case Float32, Float64:
-		return math.Float64bits(v.Float()) == 0
+		return v.Float() == 0
 	case Complex64, Complex128:
-		c := v.Complex()
-		return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+		return v.Complex() == 0
 	case Array:
+		if v.flag&flagIndir == 0 {
+			return v.ptr == nil
+		}
+		typ := (*abi.ArrayType)(unsafe.Pointer(v.typ()))
 		// If the type is comparable, then compare directly with zero.
-		if v.typ().Equal != nil && v.typ().Size() <= maxZero {
-			if v.flag&flagIndir == 0 {
-				return v.ptr == nil
-			}
+		if typ.Equal != nil && typ.Size() <= abi.ZeroValSize {
 			// v.ptr doesn't escape, as Equal functions are compiler generated
 			// and never escape. The escape analysis doesn't know, as it is a
 			// function pointer call.
-			return v.typ().Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0]))
+			return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0]))
 		}
-
-		n := v.Len()
+		if typ.TFlag&abi.TFlagRegularMemory != 0 {
+			// For some types where the zero value is a value where all bits of this type are 0
+			// optimize it.
+			return isZero(unsafe.Slice(((*byte)(v.ptr)), typ.Size()))
+		}
+		n := int(typ.Len)
 		for i := 0; i < n; i++ {
 			if !v.Index(i).IsZero() {
 				return false
@@ -1616,18 +1626,24 @@
 	case String:
 		return v.Len() == 0
 	case Struct:
+		if v.flag&flagIndir == 0 {
+			return v.ptr == nil
+		}
+		typ := (*abi.StructType)(unsafe.Pointer(v.typ()))
 		// If the type is comparable, then compare directly with zero.
-		if v.typ().Equal != nil && v.typ().Size() <= maxZero {
-			if v.flag&flagIndir == 0 {
-				return v.ptr == nil
-			}
+		if typ.Equal != nil && typ.Size() <= abi.ZeroValSize {
 			// See noescape justification above.
-			return v.typ().Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0]))
+			return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0]))
+		}
+		if typ.TFlag&abi.TFlagRegularMemory != 0 {
+			// For some types where the zero value is a value where all bits of this type are 0
+			// optimize it.
+			return isZero(unsafe.Slice(((*byte)(v.ptr)), typ.Size()))
 		}
 
 		n := v.NumField()
 		for i := 0; i < n; i++ {
-			if !v.Field(i).IsZero() {
+			if !v.Field(i).IsZero() && v.Type().Field(i).Name != "_" {
 				return false
 			}
 		}
@@ -1639,8 +1655,57 @@
 	}
 }
 
+// isZero For all zeros, performance is not as good as
+// return bytealg.Count(b, byte(0)) == len(b)
+func isZero(b []byte) bool {
+	if len(b) == 0 {
+		return true
+	}
+	const n = 32
+	// Align memory addresses to 8 bytes.
+	for uintptr(unsafe.Pointer(&b[0]))%8 != 0 {
+		if b[0] != 0 {
+			return false
+		}
+		b = b[1:]
+		if len(b) == 0 {
+			return true
+		}
+	}
+	for len(b)%8 != 0 {
+		if b[len(b)-1] != 0 {
+			return false
+		}
+		b = b[:len(b)-1]
+	}
+	if len(b) == 0 {
+		return true
+	}
+	w := unsafe.Slice((*uint64)(unsafe.Pointer(&b[0])), len(b)/8)
+	for len(w)%n != 0 {
+		if w[0] != 0 {
+			return false
+		}
+		w = w[1:]
+	}
+	for len(w) >= n {
+		if w[0] != 0 || w[1] != 0 || w[2] != 0 || w[3] != 0 ||
+			w[4] != 0 || w[5] != 0 || w[6] != 0 || w[7] != 0 ||
+			w[8] != 0 || w[9] != 0 || w[10] != 0 || w[11] != 0 ||
+			w[12] != 0 || w[13] != 0 || w[14] != 0 || w[15] != 0 ||
+			w[16] != 0 || w[17] != 0 || w[18] != 0 || w[19] != 0 ||
+			w[20] != 0 || w[21] != 0 || w[22] != 0 || w[23] != 0 ||
+			w[24] != 0 || w[25] != 0 || w[26] != 0 || w[27] != 0 ||
+			w[28] != 0 || w[29] != 0 || w[30] != 0 || w[31] != 0 {
+			return false
+		}
+		w = w[n:]
+	}
+	return true
+}
+
 // SetZero sets v to be the zero value of v's type.
-// It panics if CanSet returns false.
+// It panics if [Value.CanSet] returns false.
 func (v Value) SetZero() {
 	v.mustBeAssignable()
 	switch v.kind() {
@@ -1681,7 +1746,7 @@
 	case Slice:
 		*(*unsafeheader.Slice)(v.ptr) = unsafeheader.Slice{}
 	case Interface:
-		*(*[2]unsafe.Pointer)(v.ptr) = [2]unsafe.Pointer{}
+		*(*emptyInterface)(v.ptr) = emptyInterface{}
 	case Chan, Func, Map, Pointer, UnsafePointer:
 		*(*unsafe.Pointer)(v.ptr) = nil
 	case Array, Struct:
@@ -1694,13 +1759,13 @@
 }
 
 // Kind returns v's Kind.
-// If v is the zero Value (IsValid returns false), Kind returns Invalid.
+// If v is the zero Value ([Value.IsValid] returns false), Kind returns Invalid.
 func (v Value) Kind() Kind {
 	return v.kind()
 }
 
 // Len returns v's length.
-// It panics if v's Kind is not Array, Chan, Map, Slice, String, or pointer to Array.
+// It panics if v's Kind is not [Array], [Chan], [Map], [Slice], [String], or pointer to [Array].
 func (v Value) Len() int {
 	// lenNonSlice is split out to keep Len inlineable for slice kinds.
 	if v.kind() == Slice {
@@ -1733,7 +1798,7 @@
 var stringType = rtypeOf("")
 
 // MapIndex returns the value associated with key in the map v.
-// It panics if v's Kind is not Map.
+// It panics if v's Kind is not [Map].
 // It returns the zero Value if key is not found in the map or if v represents a nil map.
 // As in Go, the key's value must be assignable to the map's key type.
 func (v Value) MapIndex(key Value) Value {
@@ -1773,7 +1838,7 @@
 
 // MapKeys returns a slice containing all the keys present in the map,
 // in unspecified order.
-// It panics if v's Kind is not Map.
+// It panics if v's Kind is not [Map].
 // It returns an empty slice if v represents a nil map.
 func (v Value) MapKeys() []Value {
 	v.mustBe(Map)
@@ -1832,7 +1897,7 @@
 }
 
 // A MapIter is an iterator for ranging over a map.
-// See Value.MapRange.
+// See [Value.MapRange].
 type MapIter struct {
 	m     Value
 	hiter hiter
@@ -1926,7 +1991,7 @@
 
 // Next advances the map iterator and reports whether there is another
 // entry. It returns false when iter is exhausted; subsequent
-// calls to Key, Value, or Next will panic.
+// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
 func (iter *MapIter) Next() bool {
 	if !iter.m.IsValid() {
 		panic("MapIter.Next called on an iterator that does not have an associated map Value")
@@ -1943,7 +2008,7 @@
 }
 
 // Reset modifies iter to iterate over v.
-// It panics if v's Kind is not Map and v is not the zero Value.
+// It panics if v's Kind is not [Map] and v is not the zero Value.
 // Reset(Value{}) causes iter to not to refer to any map,
 // which may allow the previously iterated-over map to be garbage collected.
 func (iter *MapIter) Reset(v Value) {
@@ -1955,10 +2020,10 @@
 }
 
 // MapRange returns a range iterator for a map.
-// It panics if v's Kind is not Map.
+// It panics if v's Kind is not [Map].
 //
-// Call Next to advance the iterator, and Key/Value to access each entry.
-// Next returns false when the iterator is exhausted.
+// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
+// [MapIter.Next] returns false when the iterator is exhausted.
 // MapRange follows the same iteration semantics as a range statement.
 //
 // Example:
@@ -2057,7 +2122,7 @@
 }
 
 // NumField returns the number of fields in the struct v.
-// It panics if v's Kind is not Struct.
+// It panics if v's Kind is not [Struct].
 func (v Value) NumField() int {
 	v.mustBe(Struct)
 	tt := (*structType)(unsafe.Pointer(v.typ()))
@@ -2065,7 +2130,7 @@
 }
 
 // OverflowComplex reports whether the complex128 x cannot be represented by v's type.
-// It panics if v's Kind is not Complex64 or Complex128.
+// It panics if v's Kind is not [Complex64] or [Complex128].
 func (v Value) OverflowComplex(x complex128) bool {
 	k := v.kind()
 	switch k {
@@ -2078,7 +2143,7 @@
 }
 
 // OverflowFloat reports whether the float64 x cannot be represented by v's type.
-// It panics if v's Kind is not Float32 or Float64.
+// It panics if v's Kind is not [Float32] or [Float64].
 func (v Value) OverflowFloat(x float64) bool {
 	k := v.kind()
 	switch k {
@@ -2098,7 +2163,7 @@
 }
 
 // OverflowInt reports whether the int64 x cannot be represented by v's type.
-// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+// It panics if v's Kind is not [Int], [Int8], [Int16], [Int32], or [Int64].
 func (v Value) OverflowInt(x int64) bool {
 	k := v.kind()
 	switch k {
@@ -2111,7 +2176,7 @@
 }
 
 // OverflowUint reports whether the uint64 x cannot be represented by v's type.
-// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+// It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64].
 func (v Value) OverflowUint(x uint64) bool {
 	k := v.kind()
 	switch k {
@@ -2129,14 +2194,14 @@
 // and make an exception.
 
 // Pointer returns v's value as a uintptr.
-// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
+// It panics if v's Kind is not [Chan], [Func], [Map], [Pointer], [Slice], or [UnsafePointer].
 //
-// If v's Kind is Func, the returned pointer is an underlying
+// If v's Kind is [Func], the returned pointer is an underlying
 // code pointer, but not necessarily enough to identify a
 // single function uniquely. The only guarantee is that the
 // result is zero if and only if v is a nil func Value.
 //
-// If v's Kind is Slice, the returned pointer is to the first
+// If v's Kind is [Slice], the returned pointer is to the first
 // element of the slice. If the slice is nil the returned value
 // is 0.  If the slice is empty but non-nil the return value is non-zero.
 //
@@ -2185,7 +2250,7 @@
 }
 
 // Recv receives and returns a value from the channel v.
-// It panics if v's Kind is not Chan.
+// It panics if v's Kind is not [Chan].
 // The receive blocks until a value is ready.
 // The boolean value ok is true if the value x corresponds to a send
 // on the channel, false if it is a zero value received because the channel is closed.
@@ -2220,7 +2285,7 @@
 }
 
 // Send sends x on the channel v.
-// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
+// It panics if v's kind is not [Chan] or if x's type is not the same type as v's element type.
 // As in Go, x's value must be assignable to the channel's element type.
 func (v Value) Send(x Value) {
 	v.mustBe(Chan)
@@ -2247,7 +2312,7 @@
 }
 
 // Set assigns x to the value v.
-// It panics if CanSet returns false.
+// It panics if [Value.CanSet] returns false.
 // As in Go, x's value must be assignable to v's type and
 // must not be derived from an unexported field.
 func (v Value) Set(x Value) {
@@ -2270,7 +2335,7 @@
 }
 
 // SetBool sets v's underlying value.
-// It panics if v's Kind is not Bool or if CanSet() is false.
+// It panics if v's Kind is not [Bool] or if [Value.CanSet] returns false.
 func (v Value) SetBool(x bool) {
 	v.mustBeAssignable()
 	v.mustBe(Bool)
@@ -2300,7 +2365,7 @@
 }
 
 // SetComplex sets v's underlying value to x.
-// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
+// It panics if v's Kind is not [Complex64] or [Complex128], or if [Value.CanSet] returns false.
 func (v Value) SetComplex(x complex128) {
 	v.mustBeAssignable()
 	switch k := v.kind(); k {
@@ -2314,7 +2379,7 @@
 }
 
 // SetFloat sets v's underlying value to x.
-// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
+// It panics if v's Kind is not [Float32] or [Float64], or if [Value.CanSet] returns false.
 func (v Value) SetFloat(x float64) {
 	v.mustBeAssignable()
 	switch k := v.kind(); k {
@@ -2328,7 +2393,7 @@
 }
 
 // SetInt sets v's underlying value to x.
-// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
+// It panics if v's Kind is not [Int], [Int8], [Int16], [Int32], or [Int64], or if [Value.CanSet] returns false.
 func (v Value) SetInt(x int64) {
 	v.mustBeAssignable()
 	switch k := v.kind(); k {
@@ -2348,7 +2413,7 @@
 }
 
 // SetLen sets v's length to n.
-// It panics if v's Kind is not Slice or if n is negative or
+// It panics if v's Kind is not [Slice] or if n is negative or
 // greater than the capacity of the slice.
 func (v Value) SetLen(n int) {
 	v.mustBeAssignable()
@@ -2361,7 +2426,7 @@
 }
 
 // SetCap sets v's capacity to n.
-// It panics if v's Kind is not Slice or if n is smaller than the length or
+// It panics if v's Kind is not [Slice] or if n is smaller than the length or
 // greater than the capacity of the slice.
 func (v Value) SetCap(n int) {
 	v.mustBeAssignable()
@@ -2374,7 +2439,7 @@
 }
 
 // SetMapIndex sets the element associated with key in the map v to elem.
-// It panics if v's Kind is not Map.
+// It panics if v's Kind is not [Map].
 // If elem is the zero Value, SetMapIndex deletes the key from the map.
 // Otherwise if v holds a nil map, SetMapIndex will panic.
 // As in Go, key's elem must be assignable to the map's key type,
@@ -2426,7 +2491,7 @@
 }
 
 // SetUint sets v's underlying value to x.
-// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
+// It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64], or if [Value.CanSet] returns false.
 func (v Value) SetUint(x uint64) {
 	v.mustBeAssignable()
 	switch k := v.kind(); k {
@@ -2456,7 +2521,7 @@
 }
 
 // SetString sets v's underlying value to x.
-// It panics if v's Kind is not String or if CanSet() is false.
+// It panics if v's Kind is not [String] or if [Value.CanSet] returns false.
 func (v Value) SetString(x string) {
 	v.mustBeAssignable()
 	v.mustBe(String)
@@ -2464,7 +2529,7 @@
 }
 
 // Slice returns v[i:j].
-// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
+// It panics if v's Kind is not [Array], [Slice] or [String], or if v is an unaddressable array,
 // or if the indexes are out of bounds.
 func (v Value) Slice(i, j int) Value {
 	var (
@@ -2526,7 +2591,7 @@
 }
 
 // Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
-// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
+// It panics if v's Kind is not [Array] or [Slice], or if v is an unaddressable array,
 // or if the indexes are out of bounds.
 func (v Value) Slice3(i, j, k int) Value {
 	var (
@@ -2579,7 +2644,7 @@
 
 // String returns the string v's underlying value, as a string.
 // String is a special case because of Go's String method convention.
-// Unlike the other getters, it does not panic if v's Kind is not String.
+// Unlike the other getters, it does not panic if v's Kind is not [String].
 // Instead, it returns a string of the form "<T value>" where T is v's type.
 // The fmt package treats Values specially. It does not call their String
 // method implicitly but instead prints the concrete values they hold.
@@ -2601,7 +2666,7 @@
 }
 
 // TryRecv attempts to receive a value from the channel v but will not block.
-// It panics if v's Kind is not Chan.
+// It panics if v's Kind is not [Chan].
 // If the receive delivers a value, x is the transferred value and ok is true.
 // If the receive cannot finish without blocking, x is the zero Value and ok is false.
 // If the channel is closed, x is the zero value for the channel's element type and ok is false.
@@ -2612,7 +2677,7 @@
 }
 
 // TrySend attempts to send x on the channel v but will not block.
-// It panics if v's Kind is not Chan.
+// It panics if v's Kind is not [Chan].
 // It reports whether the value was sent.
 // As in Go, x's value must be assignable to the channel's element type.
 func (v Value) TrySend(x Value) bool {
@@ -2660,7 +2725,7 @@
 	return toRType(typeOffFor(typ, m.Mtyp))
 }
 
-// CanUint reports whether Uint can be used without panicking.
+// CanUint reports whether [Value.Uint] can be used without panicking.
 func (v Value) CanUint() bool {
 	switch v.kind() {
 	case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
@@ -2671,7 +2736,7 @@
 }
 
 // Uint returns v's underlying value, as a uint64.
-// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+// It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64].
 func (v Value) Uint() uint64 {
 	k := v.kind()
 	p := v.ptr
@@ -2714,14 +2779,14 @@
 }
 
 // UnsafePointer returns v's value as a [unsafe.Pointer].
-// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
+// It panics if v's Kind is not [Chan], [Func], [Map], [Pointer], [Slice], or [UnsafePointer].
 //
-// If v's Kind is Func, the returned pointer is an underlying
+// If v's Kind is [Func], the returned pointer is an underlying
 // code pointer, but not necessarily enough to identify a
 // single function uniquely. The only guarantee is that the
 // result is zero if and only if v is a nil func Value.
 //
-// If v's Kind is Slice, the returned pointer is to the first
+// If v's Kind is [Slice], the returned pointer is to the first
 // element of the slice. If the slice is nil the returned value
 // is nil.  If the slice is empty but non-nil the return value is non-nil.
 func (v Value) UnsafePointer() unsafe.Pointer {
@@ -2812,7 +2877,7 @@
 // another n elements. After Grow(n), at least n elements can be appended
 // to the slice without another allocation.
 //
-// It panics if v's Kind is not a Slice or if n is negative or too large to
+// It panics if v's Kind is not a [Slice] or if n is negative or too large to
 // allocate the memory.
 func (v Value) Grow(n int) {
 	v.mustBeAssignable()
@@ -2857,7 +2922,7 @@
 
 // Clear clears the contents of a map or zeros the contents of a slice.
 //
-// It panics if v's Kind is not Map or Slice.
+// It panics if v's Kind is not [Map] or [Slice].
 func (v Value) Clear() {
 	switch v.Kind() {
 	case Slice:
@@ -2899,10 +2964,10 @@
 // Copy copies the contents of src into dst until either
 // dst has been filled or src has been exhausted.
 // It returns the number of elements copied.
-// Dst and src each must have kind Slice or Array, and
+// Dst and src each must have kind [Slice] or [Array], and
 // dst and src must have the same element type.
 //
-// As a special case, src can have kind String if the element type of dst is kind Uint8.
+// As a special case, src can have kind [String] if the element type of dst is kind [Uint8].
 func Copy(dst, src Value) int {
 	dk := dst.kind()
 	if dk != Array && dk != Slice {
@@ -3190,32 +3255,19 @@
 	return v.Elem()
 }
 
-// Before Go 1.21, ValueOf always escapes and a Value's content
-// is always heap allocated.
-// Set go121noForceValueEscape to true to avoid the forced escape,
-// allowing Value content to be on the stack.
-// Set go121noForceValueEscape to false for the legacy behavior
-// (for debugging).
-const go121noForceValueEscape = true
-
 // ValueOf returns a new Value initialized to the concrete value
 // stored in the interface i. ValueOf(nil) returns the zero Value.
 func ValueOf(i any) Value {
 	if i == nil {
 		return Value{}
 	}
-
-	if !go121noForceValueEscape {
-		escapes(i)
-	}
-
 	return unpackEface(i)
 }
 
 // Zero returns a Value representing the zero value for the specified type.
 // The result is different from the zero value of the Value struct,
 // which represents no value at all.
-// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
+// For example, Zero(TypeOf(42)) returns a Value with Kind [Int] and value 0.
 // The returned value is neither addressable nor settable.
 func Zero(typ Type) Value {
 	if typ == nil {
@@ -3225,7 +3277,7 @@
 	fl := flag(t.Kind())
 	if t.IfaceIndir() {
 		var p unsafe.Pointer
-		if t.Size() <= maxZero {
+		if t.Size() <= abi.ZeroValSize {
 			p = unsafe.Pointer(&zeroVal[0])
 		} else {
 			p = unsafe_New(t)
@@ -3235,11 +3287,8 @@
 	return Value{t, nil, fl}
 }
 
-// must match declarations in runtime/map.go.
-const maxZero = 1024
-
 //go:linkname zeroVal runtime.zeroVal
-var zeroVal [maxZero]byte
+var zeroVal [abi.ZeroValSize]byte
 
 // New returns a Value representing a pointer to a new zero value
 // for the specified type. That is, the returned Value's Type is PointerTo(typ).
diff --git a/src/regexp/backtrack.go b/src/regexp/backtrack.go
index 0739f5f..7c37c66 100644
--- a/src/regexp/backtrack.go
+++ b/src/regexp/backtrack.go
@@ -91,9 +91,7 @@
 		b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits)
 	} else {
 		b.visited = b.visited[:visitedSize]
-		for i := range b.visited {
-			b.visited[i] = 0
-		}
+		clear(b.visited) // set to 0
 	}
 
 	if cap(b.cap) < ncap {
diff --git a/src/regexp/example_test.go b/src/regexp/example_test.go
index 466b38b..707445f 100644
--- a/src/regexp/example_test.go
+++ b/src/regexp/example_test.go
@@ -228,11 +228,18 @@
 	fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("$1")))
 	fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("$1W")))
 	fmt.Printf("%s\n", re.ReplaceAll([]byte("-ab-axxb-"), []byte("${1}W")))
+
+	re2 := regexp.MustCompile(`a(?P<1W>x*)b`)
+	fmt.Printf("%s\n", re2.ReplaceAll([]byte("-ab-axxb-"), []byte("$1W")))
+	fmt.Printf("%s\n", re2.ReplaceAll([]byte("-ab-axxb-"), []byte("${1}W")))
+
 	// Output:
 	// -T-T-
 	// --xx-
 	// ---
 	// -W-xxW-
+	// --xx-
+	// -W-xxW-
 }
 
 func ExampleRegexp_ReplaceAllLiteralString() {
@@ -252,11 +259,18 @@
 	fmt.Println(re.ReplaceAllString("-ab-axxb-", "$1"))
 	fmt.Println(re.ReplaceAllString("-ab-axxb-", "$1W"))
 	fmt.Println(re.ReplaceAllString("-ab-axxb-", "${1}W"))
+
+	re2 := regexp.MustCompile(`a(?P<1W>x*)b`)
+	fmt.Printf("%s\n", re2.ReplaceAllString("-ab-axxb-", "$1W"))
+	fmt.Println(re.ReplaceAllString("-ab-axxb-", "${1}W"))
+
 	// Output:
 	// -T-T-
 	// --xx-
 	// ---
 	// -W-xxW-
+	// --xx-
+	// -W-xxW-
 }
 
 func ExampleRegexp_ReplaceAllStringFunc() {
diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go
index 1c9b2fd..462f235 100644
--- a/src/regexp/regexp.go
+++ b/src/regexp/regexp.go
@@ -8,9 +8,7 @@
 // general syntax used by Perl, Python, and other languages.
 // More precisely, it is the syntax accepted by RE2 and described at
 // https://golang.org/s/re2syntax, except for \C.
-// For an overview of the syntax, run
-//
-//	go doc regexp/syntax
+// For an overview of the syntax, see the [regexp/syntax] package.
 //
 // The regexp implementation provided by this package is
 // guaranteed to run in time linear in the size of the input.
@@ -23,10 +21,10 @@
 // or any book about automata theory.
 //
 // All characters are UTF-8-encoded code points.
-// Following utf8.DecodeRune, each byte of an invalid UTF-8 sequence
+// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence
 // is treated as if it encoded utf8.RuneError (U+FFFD).
 //
-// There are 16 methods of Regexp that match a regular expression and identify
+// There are 16 methods of [Regexp] that match a regular expression and identify
 // the matched text. Their names are matched by this regular expression:
 //
 //	Find(All)?(String)?(Submatch)?(Index)?
@@ -82,7 +80,7 @@
 
 // Regexp is the representation of a compiled regular expression.
 // A Regexp is safe for concurrent use by multiple goroutines,
-// except for configuration methods, such as Longest.
+// except for configuration methods, such as [Regexp.Longest].
 type Regexp struct {
 	expr           string       // as passed to Compile
 	prog           *syntax.Prog // compiled program
@@ -110,21 +108,21 @@
 	return re.expr
 }
 
-// Copy returns a new Regexp object copied from re.
-// Calling Longest on one copy does not affect another.
+// Copy returns a new [Regexp] object copied from re.
+// Calling [Regexp.Longest] on one copy does not affect another.
 //
-// Deprecated: In earlier releases, when using a Regexp in multiple goroutines,
+// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines,
 // giving each goroutine its own copy helped to avoid lock contention.
 // As of Go 1.12, using Copy is no longer necessary to avoid lock contention.
 // Copy may still be appropriate if the reason for its use is to make
-// two copies with different Longest settings.
+// two copies with different [Regexp.Longest] settings.
 func (re *Regexp) Copy() *Regexp {
 	re2 := *re
 	return &re2
 }
 
 // Compile parses a regular expression and returns, if successful,
-// a Regexp object that can be used to match against text.
+// a [Regexp] object that can be used to match against text.
 //
 // When matching against text, the regexp returns a match that
 // begins as early as possible in the input (leftmost), and among those
@@ -132,12 +130,12 @@
 // This so-called leftmost-first matching is the same semantics
 // that Perl, Python, and other implementations use, although this
 // package implements it without the expense of backtracking.
-// For POSIX leftmost-longest matching, see CompilePOSIX.
+// For POSIX leftmost-longest matching, see [CompilePOSIX].
 func Compile(expr string) (*Regexp, error) {
 	return compile(expr, syntax.Perl, false)
 }
 
-// CompilePOSIX is like Compile but restricts the regular expression
+// CompilePOSIX is like [Compile] but restricts the regular expression
 // to POSIX ERE (egrep) syntax and changes the match semantics to
 // leftmost-longest.
 //
@@ -164,7 +162,7 @@
 // That is, when matching against text, the regexp returns a match that
 // begins as early as possible in the input (leftmost), and among those
 // it chooses a match that is as long as possible.
-// This method modifies the Regexp and may not be called concurrently
+// This method modifies the [Regexp] and may not be called concurrently
 // with any other methods.
 func (re *Regexp) Longest() {
 	re.longest = true
@@ -310,7 +308,7 @@
 	}
 }
 
-// MustCompile is like Compile but panics if the expression cannot be parsed.
+// MustCompile is like [Compile] but panics if the expression cannot be parsed.
 // It simplifies safe initialization of global variables holding compiled regular
 // expressions.
 func MustCompile(str string) *Regexp {
@@ -321,7 +319,7 @@
 	return regexp
 }
 
-// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed.
+// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed.
 // It simplifies safe initialization of global variables holding compiled regular
 // expressions.
 func MustCompilePOSIX(str string) *Regexp {
@@ -339,13 +337,13 @@
 	return strconv.Quote(s)
 }
 
-// NumSubexp returns the number of parenthesized subexpressions in this Regexp.
+// NumSubexp returns the number of parenthesized subexpressions in this [Regexp].
 func (re *Regexp) NumSubexp() int {
 	return re.numSubexp
 }
 
 // SubexpNames returns the names of the parenthesized subexpressions
-// in this Regexp. The name for the first sub-expression is names[1],
+// in this [Regexp]. The name for the first sub-expression is names[1],
 // so that if m is a match slice, the name for m[i] is SubexpNames()[i].
 // Since the Regexp as a whole cannot be named, names[0] is always
 // the empty string. The slice should not be modified.
@@ -521,7 +519,7 @@
 	return re.prefix, re.prefixComplete
 }
 
-// MatchReader reports whether the text returned by the RuneReader
+// MatchReader reports whether the text returned by the [io.RuneReader]
 // contains any match of the regular expression re.
 func (re *Regexp) MatchReader(r io.RuneReader) bool {
 	return re.doMatch(r, nil, "")
@@ -541,7 +539,7 @@
 
 // MatchReader reports whether the text returned by the RuneReader
 // contains any match of the regular expression pattern.
-// More complicated queries need to use Compile and the full Regexp interface.
+// More complicated queries need to use [Compile] and the full [Regexp] interface.
 func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) {
 	re, err := Compile(pattern)
 	if err != nil {
@@ -552,7 +550,7 @@
 
 // MatchString reports whether the string s
 // contains any match of the regular expression pattern.
-// More complicated queries need to use Compile and the full Regexp interface.
+// More complicated queries need to use [Compile] and the full [Regexp] interface.
 func MatchString(pattern string, s string) (matched bool, err error) {
 	re, err := Compile(pattern)
 	if err != nil {
@@ -563,7 +561,7 @@
 
 // Match reports whether the byte slice b
 // contains any match of the regular expression pattern.
-// More complicated queries need to use Compile and the full Regexp interface.
+// More complicated queries need to use [Compile] and the full [Regexp] interface.
 func Match(pattern string, b []byte) (matched bool, err error) {
 	re, err := Compile(pattern)
 	if err != nil {
@@ -572,9 +570,9 @@
 	return re.Match(b), nil
 }
 
-// ReplaceAllString returns a copy of src, replacing matches of the Regexp
-// with the replacement string repl. Inside repl, $ signs are interpreted as
-// in Expand, so for instance $1 represents the text of the first submatch.
+// ReplaceAllString returns a copy of src, replacing matches of the [Regexp]
+// with the replacement string repl.
+// Inside repl, $ signs are interpreted as in [Regexp.Expand].
 func (re *Regexp) ReplaceAllString(src, repl string) string {
 	n := 2
 	if strings.Contains(repl, "$") {
@@ -586,9 +584,9 @@
 	return string(b)
 }
 
-// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp
+// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp]
 // with the replacement string repl. The replacement repl is substituted directly,
-// without using Expand.
+// without using [Regexp.Expand].
 func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
 	return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
 		return append(dst, repl...)
@@ -596,9 +594,9 @@
 }
 
 // ReplaceAllStringFunc returns a copy of src in which all matches of the
-// Regexp have been replaced by the return value of function repl applied
+// [Regexp] have been replaced by the return value of function repl applied
 // to the matched substring. The replacement returned by repl is substituted
-// directly, without using Expand.
+// directly, without using [Regexp.Expand].
 func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
 	b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
 		return append(dst, repl(src[match[0]:match[1]])...)
@@ -671,9 +669,9 @@
 	return buf
 }
 
-// ReplaceAll returns a copy of src, replacing matches of the Regexp
-// with the replacement text repl. Inside repl, $ signs are interpreted as
-// in Expand, so for instance $1 represents the text of the first submatch.
+// ReplaceAll returns a copy of src, replacing matches of the [Regexp]
+// with the replacement text repl.
+// Inside repl, $ signs are interpreted as in [Regexp.Expand].
 func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
 	n := 2
 	if bytes.IndexByte(repl, '$') >= 0 {
@@ -689,9 +687,9 @@
 	return b
 }
 
-// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp
+// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp]
 // with the replacement bytes repl. The replacement repl is substituted directly,
-// without using Expand.
+// without using [Regexp.Expand].
 func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
 	return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
 		return append(dst, repl...)
@@ -699,9 +697,9 @@
 }
 
 // ReplaceAllFunc returns a copy of src in which all matches of the
-// Regexp have been replaced by the return value of function repl applied
+// [Regexp] have been replaced by the return value of function repl applied
 // to the matched byte slice. The replacement returned by repl is substituted
-// directly, without using Expand.
+// directly, without using [Regexp.Expand].
 func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
 	return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
 		return append(dst, repl(src[match[0]:match[1]])...)
@@ -845,7 +843,7 @@
 // FindString returns a string holding the text of the leftmost match in s of the regular
 // expression. If there is no match, the return value is an empty string,
 // but it will also be empty if the regular expression successfully matches
-// an empty string. Use FindStringIndex or FindStringSubmatch if it is
+// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is
 // necessary to distinguish these cases.
 func (re *Regexp) FindString(s string) string {
 	var dstCap [2]int
@@ -870,7 +868,7 @@
 
 // FindReaderIndex returns a two-element slice of integers defining the
 // location of the leftmost match of the regular expression in text read from
-// the RuneReader. The match text was found in the input stream at
+// the [io.RuneReader]. The match text was found in the input stream at
 // byte offset loc[0] through loc[1]-1.
 // A return value of nil indicates no match.
 func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) {
@@ -904,7 +902,7 @@
 // Expand appends template to dst and returns the result; during the
 // append, Expand replaces variables in the template with corresponding
 // matches drawn from src. The match slice should have been returned by
-// FindSubmatchIndex.
+// [Regexp.FindSubmatchIndex].
 //
 // In the template, a variable is denoted by a substring of the form
 // $name or ${name}, where name is a non-empty sequence of letters,
@@ -922,7 +920,7 @@
 	return re.expand(dst, string(template), src, "", match)
 }
 
-// ExpandString is like Expand but the template and source are strings.
+// ExpandString is like [Regexp.Expand] but the template and source are strings.
 // It appends to and returns a byte slice in order to give the calling
 // code control over allocation.
 func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
@@ -1067,7 +1065,7 @@
 
 // FindReaderSubmatchIndex returns a slice holding the index pairs
 // identifying the leftmost match of the regular expression of text read by
-// the RuneReader, and the matches, if any, of its subexpressions, as defined
+// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined
 // by the 'Submatch' and 'Index' descriptions in the package comment. A
 // return value of nil indicates no match.
 func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
@@ -1094,7 +1092,7 @@
 	return result
 }
 
-// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all
+// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all
 // successive matches of the expression, as defined by the 'All' description
 // in the package comment.
 // A return value of nil indicates no match.
@@ -1112,7 +1110,7 @@
 	return result
 }
 
-// FindAllString is the 'All' version of FindString; it returns a slice of all
+// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all
 // successive matches of the expression, as defined by the 'All' description
 // in the package comment.
 // A return value of nil indicates no match.
@@ -1130,7 +1128,7 @@
 	return result
 }
 
-// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
+// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a
 // slice of all successive matches of the expression, as defined by the 'All'
 // description in the package comment.
 // A return value of nil indicates no match.
@@ -1148,7 +1146,7 @@
 	return result
 }
 
-// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice
+// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice
 // of all successive matches of the expression, as defined by the 'All'
 // description in the package comment.
 // A return value of nil indicates no match.
@@ -1172,7 +1170,7 @@
 	return result
 }
 
-// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns
+// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns
 // a slice of all successive matches of the expression, as defined by the
 // 'All' description in the package comment.
 // A return value of nil indicates no match.
@@ -1190,7 +1188,7 @@
 	return result
 }
 
-// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it
+// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it
 // returns a slice of all successive matches of the expression, as defined by
 // the 'All' description in the package comment.
 // A return value of nil indicates no match.
@@ -1215,7 +1213,7 @@
 }
 
 // FindAllStringSubmatchIndex is the 'All' version of
-// FindStringSubmatchIndex; it returns a slice of all successive matches of
+// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of
 // the expression, as defined by the 'All' description in the package
 // comment.
 // A return value of nil indicates no match.
@@ -1237,8 +1235,8 @@
 // the substrings between those expression matches.
 //
 // The slice returned by this method consists of all the substrings of s
-// not contained in the slice returned by FindAllString. When called on an expression
-// that contains no metacharacters, it is equivalent to strings.SplitN.
+// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression
+// that contains no metacharacters, it is equivalent to [strings.SplitN].
 //
 // Example:
 //
diff --git a/src/regexp/syntax/doc.go b/src/regexp/syntax/doc.go
index f6a4b43..eb8a971 100644
--- a/src/regexp/syntax/doc.go
+++ b/src/regexp/syntax/doc.go
@@ -56,6 +56,7 @@
 
 	(re)           numbered capturing group (submatch)
 	(?P<name>re)   named & numbered capturing group (submatch)
+	(?<name>re)    named & numbered capturing group (submatch)
 	(?:re)         non-capturing group
 	(?flags)       set flags within current group; non-capturing
 	(?flags:re)    set flags during re; non-capturing
diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
index accee9a..6a11b53 100644
--- a/src/regexp/syntax/parse.go
+++ b/src/regexp/syntax/parse.go
@@ -382,14 +382,12 @@
 	if r < minFold || r > maxFold {
 		return r
 	}
-	min := r
+	m := r
 	r0 := r
 	for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) {
-		if min > r {
-			min = r
-		}
+		m = min(m, r)
 	}
-	return min
+	return m
 }
 
 // op pushes a regexp with the given op onto the stack
@@ -1159,9 +1157,18 @@
 	// support all three as well. EcmaScript 4 uses only the Python form.
 	//
 	// In both the open source world (via Code Search) and the
-	// Google source tree, (?P<expr>name) is the dominant form,
-	// so that's the one we implement. One is enough.
-	if len(t) > 4 && t[2] == 'P' && t[3] == '<' {
+	// Google source tree, (?P<expr>name) and (?<expr>name) are the
+	// dominant forms of named captures and both are supported.
+	startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<'
+	startsWithName := len(t) > 3 && t[2] == '<'
+
+	if startsWithP || startsWithName {
+		// position of expr start
+		exprStartPos := 4
+		if startsWithName {
+			exprStartPos = 3
+		}
+
 		// Pull out name.
 		end := strings.IndexRune(t, '>')
 		if end < 0 {
@@ -1171,8 +1178,8 @@
 			return "", &Error{ErrInvalidNamedCapture, s}
 		}
 
-		capture := t[:end+1] // "(?P<name>"
-		name := t[4:end]     // "name"
+		capture := t[:end+1]        // "(?P<name>" or "(?<name>"
+		name := t[exprStartPos:end] // "name"
 		if err = checkUTF8(name); err != nil {
 			return "", err
 		}
@@ -1854,6 +1861,22 @@
 	return r[:w]
 }
 
+// inCharClass reports whether r is in the class.
+// It assumes the class has been cleaned by cleanClass.
+func inCharClass(r rune, class []rune) bool {
+	_, ok := sort.Find(len(class)/2, func(i int) int {
+		lo, hi := class[2*i], class[2*i+1]
+		if r > hi {
+			return +1
+		}
+		if r < lo {
+			return -1
+		}
+		return 0
+	})
+	return ok
+}
+
 // appendLiteral returns the result of appending the literal x to the class r.
 func appendLiteral(r []rune, x rune, flags Flags) []rune {
 	if flags&FoldCase != 0 {
diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
index 67e3c56..0f885bd 100644
--- a/src/regexp/syntax/parse_test.go
+++ b/src/regexp/syntax/parse_test.go
@@ -160,6 +160,7 @@
 
 	// Test named captures
 	{`(?P<name>a)`, `cap{name:lit{a}}`},
+	{`(?<name>a)`, `cap{name:lit{a}}`},
 
 	// Case-folded literals
 	{`[Aa]`, `litfold{A}`},
@@ -482,6 +483,11 @@
 	`(?P<name`,
 	`(?P<x y>a)`,
 	`(?P<>a)`,
+	`(?<name>a`,
+	`(?<name>`,
+	`(?<name`,
+	`(?<x y>a)`,
+	`(?<>a)`,
 	`[a-Z]`,
 	`(?i)[a-Z]`,
 	`\Q\E*`,
@@ -584,3 +590,39 @@
 		}
 	}
 }
+
+var stringTests = []struct {
+	re  string
+	out string
+}{
+	{`x(?i:ab*c|d?e)1`, `x(?i:AB*C|D?E)1`},
+	{`x(?i:ab*cd?e)1`, `x(?i:AB*CD?E)1`},
+	{`0(?i:ab*c|d?e)1`, `(?i:0(?:AB*C|D?E)1)`},
+	{`0(?i:ab*cd?e)1`, `(?i:0AB*CD?E1)`},
+	{`x(?i:ab*c|d?e)`, `x(?i:AB*C|D?E)`},
+	{`x(?i:ab*cd?e)`, `x(?i:AB*CD?E)`},
+	{`0(?i:ab*c|d?e)`, `(?i:0(?:AB*C|D?E))`},
+	{`0(?i:ab*cd?e)`, `(?i:0AB*CD?E)`},
+	{`(?i:ab*c|d?e)1`, `(?i:(?:AB*C|D?E)1)`},
+	{`(?i:ab*cd?e)1`, `(?i:AB*CD?E1)`},
+	{`(?i:ab)[123](?i:cd)`, `(?i:AB[1-3]CD)`},
+	{`(?i:ab*c|d?e)`, `(?i:AB*C|D?E)`},
+	{`[Aa][Bb]`, `(?i:AB)`},
+	{`[Aa][Bb]*[Cc]`, `(?i:AB*C)`},
+	{`A(?:[Bb][Cc]|[Dd])[Zz]`, `A(?i:(?:BC|D)Z)`},
+	{`[Aa](?:[Bb][Cc]|[Dd])Z`, `(?i:A(?:BC|D))Z`},
+}
+
+func TestString(t *testing.T) {
+	for _, tt := range stringTests {
+		re, err := Parse(tt.re, Perl)
+		if err != nil {
+			t.Errorf("Parse(%#q): %v", tt.re, err)
+			continue
+		}
+		out := re.String()
+		if out != tt.out {
+			t.Errorf("Parse(%#q).String() = %#q, want %#q", tt.re, out, tt.out)
+		}
+	}
+}
diff --git a/src/regexp/syntax/prog.go b/src/regexp/syntax/prog.go
index 66995e2..d69ae1a 100644
--- a/src/regexp/syntax/prog.go
+++ b/src/regexp/syntax/prog.go
@@ -247,7 +247,7 @@
 	lo := 0
 	hi := len(rune) / 2
 	for lo < hi {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		if c := rune[2*m]; c <= r {
 			if r <= rune[2*m+1] {
 				return m
diff --git a/src/regexp/syntax/regexp.go b/src/regexp/syntax/regexp.go
index 3a4d2d2..4fa7d0e 100644
--- a/src/regexp/syntax/regexp.go
+++ b/src/regexp/syntax/regexp.go
@@ -112,8 +112,165 @@
 	return true
 }
 
+// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp.
+type printFlags uint8
+
+const (
+	flagI    printFlags = 1 << iota // (?i:
+	flagM                           // (?m:
+	flagS                           // (?s:
+	flagOff                         // )
+	flagPrec                        // (?: )
+	negShift = 5                    // flagI<<negShift is (?-i:
+)
+
+// addSpan enables the flags f around start..last,
+// by setting flags[start] = f and flags[last] = flagOff.
+func addSpan(start, last *Regexp, f printFlags, flags *map[*Regexp]printFlags) {
+	if *flags == nil {
+		*flags = make(map[*Regexp]printFlags)
+	}
+	(*flags)[start] = f
+	(*flags)[last] |= flagOff // maybe start==last
+}
+
+// calcFlags calculates the flags to print around each subexpression in re,
+// storing that information in (*flags)[sub] for each affected subexpression.
+// The first time an entry needs to be written to *flags, calcFlags allocates the map.
+// calcFlags also calculates the flags that must be active or can't be active
+// around re and returns those flags.
+func calcFlags(re *Regexp, flags *map[*Regexp]printFlags) (must, cant printFlags) {
+	switch re.Op {
+	default:
+		return 0, 0
+
+	case OpLiteral:
+		// If literal is fold-sensitive, return (flagI, 0) or (0, flagI)
+		// according to whether (?i) is active.
+		// If literal is not fold-sensitive, return 0, 0.
+		for _, r := range re.Rune {
+			if minFold <= r && r <= maxFold && unicode.SimpleFold(r) != r {
+				if re.Flags&FoldCase != 0 {
+					return flagI, 0
+				} else {
+					return 0, flagI
+				}
+			}
+		}
+		return 0, 0
+
+	case OpCharClass:
+		// If literal is fold-sensitive, return 0, flagI - (?i) has been compiled out.
+		// If literal is not fold-sensitive, return 0, 0.
+		for i := 0; i < len(re.Rune); i += 2 {
+			lo := max(minFold, re.Rune[i])
+			hi := min(maxFold, re.Rune[i+1])
+			for r := lo; r <= hi; r++ {
+				for f := unicode.SimpleFold(r); f != r; f = unicode.SimpleFold(f) {
+					if !(lo <= f && f <= hi) && !inCharClass(f, re.Rune) {
+						return 0, flagI
+					}
+				}
+			}
+		}
+		return 0, 0
+
+	case OpAnyCharNotNL: // (?-s).
+		return 0, flagS
+
+	case OpAnyChar: // (?s).
+		return flagS, 0
+
+	case OpBeginLine, OpEndLine: // (?m)^ (?m)$
+		return flagM, 0
+
+	case OpEndText:
+		if re.Flags&WasDollar != 0 { // (?-m)$
+			return 0, flagM
+		}
+		return 0, 0
+
+	case OpCapture, OpStar, OpPlus, OpQuest, OpRepeat:
+		return calcFlags(re.Sub[0], flags)
+
+	case OpConcat, OpAlternate:
+		// Gather the must and cant for each subexpression.
+		// When we find a conflicting subexpression, insert the necessary
+		// flags around the previously identified span and start over.
+		var must, cant, allCant printFlags
+		start := 0
+		last := 0
+		did := false
+		for i, sub := range re.Sub {
+			subMust, subCant := calcFlags(sub, flags)
+			if must&subCant != 0 || subMust&cant != 0 {
+				if must != 0 {
+					addSpan(re.Sub[start], re.Sub[last], must, flags)
+				}
+				must = 0
+				cant = 0
+				start = i
+				did = true
+			}
+			must |= subMust
+			cant |= subCant
+			allCant |= subCant
+			if subMust != 0 {
+				last = i
+			}
+			if must == 0 && start == i {
+				start++
+			}
+		}
+		if !did {
+			// No conflicts: pass the accumulated must and cant upward.
+			return must, cant
+		}
+		if must != 0 {
+			// Conflicts found; need to finish final span.
+			addSpan(re.Sub[start], re.Sub[last], must, flags)
+		}
+		return 0, allCant
+	}
+}
+
 // writeRegexp writes the Perl syntax for the regular expression re to b.
-func writeRegexp(b *strings.Builder, re *Regexp) {
+func writeRegexp(b *strings.Builder, re *Regexp, f printFlags, flags map[*Regexp]printFlags) {
+	f |= flags[re]
+	if f&flagPrec != 0 && f&^(flagOff|flagPrec) != 0 && f&flagOff != 0 {
+		// flagPrec is redundant with other flags being added and terminated
+		f &^= flagPrec
+	}
+	if f&^(flagOff|flagPrec) != 0 {
+		b.WriteString(`(?`)
+		if f&flagI != 0 {
+			b.WriteString(`i`)
+		}
+		if f&flagM != 0 {
+			b.WriteString(`m`)
+		}
+		if f&flagS != 0 {
+			b.WriteString(`s`)
+		}
+		if f&((flagM|flagS)<<negShift) != 0 {
+			b.WriteString(`-`)
+			if f&(flagM<<negShift) != 0 {
+				b.WriteString(`m`)
+			}
+			if f&(flagS<<negShift) != 0 {
+				b.WriteString(`s`)
+			}
+		}
+		b.WriteString(`:`)
+	}
+	if f&flagOff != 0 {
+		defer b.WriteString(`)`)
+	}
+	if f&flagPrec != 0 {
+		b.WriteString(`(?:`)
+		defer b.WriteString(`)`)
+	}
+
 	switch re.Op {
 	default:
 		b.WriteString("<invalid op" + strconv.Itoa(int(re.Op)) + ">")
@@ -122,15 +279,9 @@
 	case OpEmptyMatch:
 		b.WriteString(`(?:)`)
 	case OpLiteral:
-		if re.Flags&FoldCase != 0 {
-			b.WriteString(`(?i:`)
-		}
 		for _, r := range re.Rune {
 			escape(b, r, false)
 		}
-		if re.Flags&FoldCase != 0 {
-			b.WriteString(`)`)
-		}
 	case OpCharClass:
 		if len(re.Rune)%2 != 0 {
 			b.WriteString(`[invalid char class]`)
@@ -147,7 +298,9 @@
 				lo, hi := re.Rune[i]+1, re.Rune[i+1]-1
 				escape(b, lo, lo == '-')
 				if lo != hi {
-					b.WriteRune('-')
+					if hi != lo+1 {
+						b.WriteRune('-')
+					}
 					escape(b, hi, hi == '-')
 				}
 			}
@@ -156,25 +309,25 @@
 				lo, hi := re.Rune[i], re.Rune[i+1]
 				escape(b, lo, lo == '-')
 				if lo != hi {
-					b.WriteRune('-')
+					if hi != lo+1 {
+						b.WriteRune('-')
+					}
 					escape(b, hi, hi == '-')
 				}
 			}
 		}
 		b.WriteRune(']')
-	case OpAnyCharNotNL:
-		b.WriteString(`(?-s:.)`)
-	case OpAnyChar:
-		b.WriteString(`(?s:.)`)
+	case OpAnyCharNotNL, OpAnyChar:
+		b.WriteString(`.`)
 	case OpBeginLine:
-		b.WriteString(`(?m:^)`)
+		b.WriteString(`^`)
 	case OpEndLine:
-		b.WriteString(`(?m:$)`)
+		b.WriteString(`$`)
 	case OpBeginText:
 		b.WriteString(`\A`)
 	case OpEndText:
 		if re.Flags&WasDollar != 0 {
-			b.WriteString(`(?-m:$)`)
+			b.WriteString(`$`)
 		} else {
 			b.WriteString(`\z`)
 		}
@@ -191,17 +344,17 @@
 			b.WriteRune('(')
 		}
 		if re.Sub[0].Op != OpEmptyMatch {
-			writeRegexp(b, re.Sub[0])
+			writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags)
 		}
 		b.WriteRune(')')
 	case OpStar, OpPlus, OpQuest, OpRepeat:
-		if sub := re.Sub[0]; sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 {
-			b.WriteString(`(?:`)
-			writeRegexp(b, sub)
-			b.WriteString(`)`)
-		} else {
-			writeRegexp(b, sub)
+		p := printFlags(0)
+		sub := re.Sub[0]
+		if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 {
+			p = flagPrec
 		}
+		writeRegexp(b, sub, p, flags)
+
 		switch re.Op {
 		case OpStar:
 			b.WriteRune('*')
@@ -225,27 +378,31 @@
 		}
 	case OpConcat:
 		for _, sub := range re.Sub {
+			p := printFlags(0)
 			if sub.Op == OpAlternate {
-				b.WriteString(`(?:`)
-				writeRegexp(b, sub)
-				b.WriteString(`)`)
-			} else {
-				writeRegexp(b, sub)
+				p = flagPrec
 			}
+			writeRegexp(b, sub, p, flags)
 		}
 	case OpAlternate:
 		for i, sub := range re.Sub {
 			if i > 0 {
 				b.WriteRune('|')
 			}
-			writeRegexp(b, sub)
+			writeRegexp(b, sub, 0, flags)
 		}
 	}
 }
 
 func (re *Regexp) String() string {
 	var b strings.Builder
-	writeRegexp(&b, re)
+	var flags map[*Regexp]printFlags
+	must, cant := calcFlags(re, &flags)
+	must |= (cant &^ flagI) << negShift
+	if must != 0 {
+		must |= flagOff
+	}
+	writeRegexp(&b, re, must, flags)
 	return b.String()
 }
 
diff --git a/src/regexp/syntax/simplify_test.go b/src/regexp/syntax/simplify_test.go
index 9877db3..6d06f99 100644
--- a/src/regexp/syntax/simplify_test.go
+++ b/src/regexp/syntax/simplify_test.go
@@ -13,7 +13,7 @@
 	// Already-simple constructs
 	{`a`, `a`},
 	{`ab`, `ab`},
-	{`a|b`, `[a-b]`},
+	{`a|b`, `[ab]`},
 	{`ab|cd`, `ab|cd`},
 	{`(ab)*`, `(ab)*`},
 	{`(ab)+`, `(ab)+`},
@@ -40,16 +40,16 @@
 
 	// Perl character classes
 	{`\d`, `[0-9]`},
-	{`\s`, `[\t-\n\f-\r ]`},
+	{`\s`, `[\t\n\f\r ]`},
 	{`\w`, `[0-9A-Z_a-z]`},
 	{`\D`, `[^0-9]`},
-	{`\S`, `[^\t-\n\f-\r ]`},
+	{`\S`, `[^\t\n\f\r ]`},
 	{`\W`, `[^0-9A-Z_a-z]`},
 	{`[\d]`, `[0-9]`},
-	{`[\s]`, `[\t-\n\f-\r ]`},
+	{`[\s]`, `[\t\n\f\r ]`},
 	{`[\w]`, `[0-9A-Z_a-z]`},
 	{`[\D]`, `[^0-9]`},
-	{`[\S]`, `[^\t-\n\f-\r ]`},
+	{`[\S]`, `[^\t\n\f\r ]`},
 	{`[\W]`, `[^0-9A-Z_a-z]`},
 
 	// Posix repetitions
@@ -82,7 +82,8 @@
 	{`a{0}`, `(?:)`},
 
 	// Character class simplification
-	{`[ab]`, `[a-b]`},
+	{`[ab]`, `[ab]`},
+	{`[abc]`, `[a-c]`},
 	{`[a-za-za-z]`, `[a-z]`},
 	{`[A-Za-zA-Za-z]`, `[A-Za-z]`},
 	{`[ABCDEFGH]`, `[A-H]`},
@@ -120,7 +121,8 @@
 	// interesting than they might otherwise be. String inserts
 	// explicit (?:) in place of non-parenthesized empty strings,
 	// to make them easier to spot for other parsers.
-	{`(a|b|)`, `([a-b]|(?:))`},
+	{`(a|b|c|)`, `([a-c]|(?:))`},
+	{`(a|b|)`, `([ab]|(?:))`},
 	{`(|)`, `()`},
 	{`a()`, `a()`},
 	{`(()|())`, `(()|())`},
diff --git a/src/regexp/testdata/nullsubexpr.dat b/src/regexp/testdata/nullsubexpr.dat
index 68d9c99..99ce0f5 100644
--- a/src/regexp/testdata/nullsubexpr.dat
+++ b/src/regexp/testdata/nullsubexpr.dat
@@ -44,7 +44,7 @@
 
 E	((z)+|a)*	zabcde		(0,2)(1,2)
 
-#{E	a+?		aaaaaa		(0,1)	no *? +? mimimal match ops
+#{E	a+?		aaaaaa		(0,1)	no *? +? minimal match ops
 #E	(a)		aaa		(0,1)(0,1)
 #E	(a*?)		aaa		(0,0)(0,0)
 #E	(a)*?		aaa		(0,0)
diff --git a/src/runtime/abi_test.go b/src/runtime/abi_test.go
index 0c9488a..d7039e7 100644
--- a/src/runtime/abi_test.go
+++ b/src/runtime/abi_test.go
@@ -40,7 +40,7 @@
 	// Actually run the test in a subprocess because we don't want
 	// finalizers from other tests interfering.
 	if os.Getenv("TEST_FINALIZER_REGABI") != "1" {
-		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestFinalizerRegisterABI", "-test.v"))
+		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestFinalizerRegisterABI$", "-test.v"))
 		cmd.Env = append(cmd.Env, "TEST_FINALIZER_REGABI=1")
 		out, err := cmd.CombinedOutput()
 		if !strings.Contains(string(out), "PASS\n") || err != nil {
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index a1f683f..eaf9c91 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -66,7 +66,7 @@
 	case f == 0:
 		return c1 * (c0 ^ h) // +0, -0
 	case f != f:
-		return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
+		return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN
 	default:
 		return memhash(p, h, 4)
 	}
@@ -78,7 +78,7 @@
 	case f == 0:
 		return c1 * (c0 ^ h) // +0, -0
 	case f != f:
-		return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
+		return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN
 	default:
 		return memhash(p, h, 8)
 	}
@@ -193,6 +193,74 @@
 	}
 }
 
+func mapKeyError(t *maptype, p unsafe.Pointer) error {
+	if !t.HashMightPanic() {
+		return nil
+	}
+	return mapKeyError2(t.Key, p)
+}
+
+func mapKeyError2(t *_type, p unsafe.Pointer) error {
+	if t.TFlag&abi.TFlagRegularMemory != 0 {
+		return nil
+	}
+	switch t.Kind_ & kindMask {
+	case kindFloat32, kindFloat64, kindComplex64, kindComplex128, kindString:
+		return nil
+	case kindInterface:
+		i := (*interfacetype)(unsafe.Pointer(t))
+		var t *_type
+		var pdata *unsafe.Pointer
+		if len(i.Methods) == 0 {
+			a := (*eface)(p)
+			t = a._type
+			if t == nil {
+				return nil
+			}
+			pdata = &a.data
+		} else {
+			a := (*iface)(p)
+			if a.tab == nil {
+				return nil
+			}
+			t = a.tab._type
+			pdata = &a.data
+		}
+
+		if t.Equal == nil {
+			return errorString("hash of unhashable type " + toRType(t).string())
+		}
+
+		if isDirectIface(t) {
+			return mapKeyError2(t, unsafe.Pointer(pdata))
+		} else {
+			return mapKeyError2(t, *pdata)
+		}
+	case kindArray:
+		a := (*arraytype)(unsafe.Pointer(t))
+		for i := uintptr(0); i < a.Len; i++ {
+			if err := mapKeyError2(a.Elem, add(p, i*a.Elem.Size_)); err != nil {
+				return err
+			}
+		}
+		return nil
+	case kindStruct:
+		s := (*structtype)(unsafe.Pointer(t))
+		for _, f := range s.Fields {
+			if f.Name.IsBlank() {
+				continue
+			}
+			if err := mapKeyError2(f.Typ, add(p, f.Offset)); err != nil {
+				return err
+			}
+		}
+		return nil
+	default:
+		// Should never happen, keep this case for robustness.
+		return errorString("hash of unhashable type " + toRType(t).string())
+	}
+}
+
 //go:linkname reflect_typehash reflect.typehash
 func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
 	return typehash(t, p, h)
@@ -322,17 +390,18 @@
 		initAlgAES()
 		return
 	}
-	getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
-	hashkey[0] |= 1 // make sure these numbers are odd
-	hashkey[1] |= 1
-	hashkey[2] |= 1
-	hashkey[3] |= 1
+	for i := range hashkey {
+		hashkey[i] = uintptr(rand()) | 1 // make sure these numbers are odd
+	}
 }
 
 func initAlgAES() {
 	useAeshash = true
 	// Initialize with random data so hash collisions will be hard to engineer.
-	getRandomData(aeskeysched[:])
+	key := (*[hashRandomBytes / 8]uint64)(unsafe.Pointer(&aeskeysched))
+	for i := range key {
+		key[i] = bootstrapRand()
+	}
 }
 
 // Note: These routines perform the read with a native endianness.
diff --git a/src/runtime/arena.go b/src/runtime/arena.go
index f9806c5..e943817 100644
--- a/src/runtime/arena.go
+++ b/src/runtime/arena.go
@@ -84,6 +84,7 @@
 
 import (
 	"internal/goarch"
+	"internal/goexperiment"
 	"runtime/internal/atomic"
 	"runtime/internal/math"
 	"unsafe"
@@ -219,6 +220,19 @@
 	lockInit(&userArenaState.lock, lockRankUserArenaState)
 }
 
+// userArenaChunkReserveBytes returns the amount of additional bytes to reserve for
+// heap metadata.
+func userArenaChunkReserveBytes() uintptr {
+	if goexperiment.AllocHeaders {
+		// In the allocation headers experiment, we reserve the end of the chunk for
+		// a pointer/scalar bitmap. We also reserve space for a dummy _type that
+		// refers to the bitmap. The PtrBytes field of the dummy _type indicates how
+		// many of those bits are valid.
+		return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
+	}
+	return 0
+}
+
 type userArena struct {
 	// full is a list of full chunks that have not enough free memory left, and
 	// that we'll free once this user arena is freed.
@@ -492,9 +506,9 @@
 	// Set up heap bitmap and do extra accounting.
 	if typ.PtrBytes != 0 {
 		if cap >= 0 {
-			userArenaHeapBitsSetSliceType(typ, cap, ptr, s.base())
+			userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
 		} else {
-			userArenaHeapBitsSetType(typ, ptr, s.base())
+			userArenaHeapBitsSetType(typ, ptr, s)
 		}
 		c := getMCache(mp)
 		if c == nil {
@@ -521,111 +535,16 @@
 	return ptr
 }
 
-// userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for
-// non-slice-backing-store Go values allocated in a user arena chunk. It
-// sets up the heap bitmap for the value with type typ allocated at address ptr.
-// base is the base address of the arena chunk.
-func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
-	h := writeHeapBitsForAddr(uintptr(ptr))
-
-	// Our last allocation might have ended right at a noMorePtrs mark,
-	// which we would not have erased. We need to erase that mark here,
-	// because we're going to start adding new heap bitmap bits.
-	// We only need to clear one mark, because below we make sure to
-	// pad out the bits with zeroes and only write one noMorePtrs bit
-	// for each new object.
-	// (This is only necessary at noMorePtrs boundaries, as noMorePtrs
-	// marks within an object allocated with newAt will be erased by
-	// the normal writeHeapBitsForAddr mechanism.)
-	//
-	// Note that we skip this if this is the first allocation in the
-	// arena because there's definitely no previous noMorePtrs mark
-	// (in fact, we *must* do this, because we're going to try to back
-	// up a pointer to fix this up).
-	if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
-		// Back up one pointer and rewrite that pointer. That will
-		// cause the writeHeapBits implementation to clear the
-		// noMorePtrs bit we need to clear.
-		r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
-		_, p := r.next()
-		b := uintptr(0)
-		if p == uintptr(ptr)-goarch.PtrSize {
-			b = 1
-		}
-		h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
-		h = h.write(b, 1)
-	}
-
-	p := typ.GCData // start of 1-bit pointer mask (or GC program)
-	var gcProgBits uintptr
-	if typ.Kind_&kindGCProg != 0 {
-		// Expand gc program, using the object itself for storage.
-		gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
-		p = (*byte)(ptr)
-	}
-	nb := typ.PtrBytes / goarch.PtrSize
-
-	for i := uintptr(0); i < nb; i += ptrBits {
-		k := nb - i
-		if k > ptrBits {
-			k = ptrBits
-		}
-		h = h.write(readUintptr(addb(p, i/8)), k)
-	}
-	// Note: we call pad here to ensure we emit explicit 0 bits
-	// for the pointerless tail of the object. This ensures that
-	// there's only a single noMorePtrs mark for the next object
-	// to clear. We don't need to do this to clear stale noMorePtrs
-	// markers from previous uses because arena chunk pointer bitmaps
-	// are always fully cleared when reused.
-	h = h.pad(typ.Size_ - typ.PtrBytes)
-	h.flush(uintptr(ptr), typ.Size_)
-
-	if typ.Kind_&kindGCProg != 0 {
-		// Zero out temporary ptrmask buffer inside object.
-		memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
-	}
-
-	// Double-check that the bitmap was written out correctly.
-	//
-	// Derived from heapBitsSetType.
-	const doubleCheck = false
-	if doubleCheck {
-		size := typ.Size_
-		x := uintptr(ptr)
-		h := heapBitsForAddr(x, size)
-		for i := uintptr(0); i < size; i += goarch.PtrSize {
-			// Compute the pointer bit we want at offset i.
-			want := false
-			off := i % typ.Size_
-			if off < typ.PtrBytes {
-				j := off / goarch.PtrSize
-				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
-			}
-			if want {
-				var addr uintptr
-				h, addr = h.next()
-				if addr != x+i {
-					throw("userArenaHeapBitsSetType: pointer entry not correct")
-				}
-			}
-		}
-		if _, addr := h.next(); addr != 0 {
-			throw("userArenaHeapBitsSetType: extra pointer")
-		}
-	}
-}
-
 // userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
 // Go slice backing store values allocated in a user arena chunk. It sets up the
 // heap bitmap for n consecutive values with type typ allocated at address ptr.
-func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, base uintptr) {
+func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) {
 	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
 	if overflow || n < 0 || mem > maxAlloc {
 		panic(plainError("runtime: allocation size out of range"))
 	}
 	for i := 0; i < n; i++ {
-		userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), base)
+		userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s)
 	}
 }
 
@@ -670,7 +589,7 @@
 	// This may be racing with GC so do it atomically if there can be
 	// a race marking the bit.
 	if gcphase != _GCoff {
-		gcmarknewobject(span, span.base(), span.elemsize)
+		gcmarknewobject(span, span.base())
 	}
 
 	if raceenabled {
@@ -687,9 +606,12 @@
 		// TODO(mknyszek): Track individual objects.
 		rzSize := computeRZlog(span.elemsize)
 		span.elemsize -= rzSize
-		span.limit -= rzSize
-		span.userArenaChunkFree = makeAddrRange(span.base(), span.limit)
-		asanpoison(unsafe.Pointer(span.limit), span.npages*pageSize-span.elemsize)
+		if goexperiment.AllocHeaders {
+			span.largeType.Size_ = span.elemsize
+		}
+		rzStart := span.base() + span.elemsize
+		span.userArenaChunkFree = makeAddrRange(span.base(), rzStart)
+		asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
 		asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
 	}
 
@@ -790,7 +712,7 @@
 	// the span gets off the quarantine list. The main reason is so that the
 	// amount of bytes allocated doesn't exceed how much is counted as
 	// "mapped ready," which could cause a deadlock in the pacer.
-	gcController.totalFree.Add(int64(s.npages * pageSize))
+	gcController.totalFree.Add(int64(s.elemsize))
 
 	// Update consistent stats to match.
 	//
@@ -800,11 +722,11 @@
 	atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
 	atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
 	atomic.Xadd64(&stats.largeFreeCount, 1)
-	atomic.Xadd64(&stats.largeFree, int64(s.npages*pageSize))
+	atomic.Xadd64(&stats.largeFree, int64(s.elemsize))
 	memstats.heapStats.release()
 
 	// This counts as a free, so update heapLive.
-	gcController.update(-int64(s.npages*pageSize), 0)
+	gcController.update(-int64(s.elemsize), 0)
 
 	// Mark it as free for the race detector.
 	if raceenabled {
@@ -922,7 +844,7 @@
 			// some extra as a result of trying to find an aligned region.
 			//
 			// Divide it up and put it on the ready list.
-			for i := uintptr(userArenaChunkBytes); i < size; i += userArenaChunkBytes {
+			for i := userArenaChunkBytes; i < size; i += userArenaChunkBytes {
 				s := h.allocMSpanLocked()
 				s.init(uintptr(v)+i, userArenaChunkPages)
 				h.userArena.readyList.insertBack(s)
@@ -952,6 +874,10 @@
 	spc := makeSpanClass(0, false)
 	h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
 	s.isUserArenaChunk = true
+	s.elemsize -= userArenaChunkReserveBytes()
+	s.limit = s.base() + s.elemsize
+	s.freeindex = 1
+	s.allocCount = 1
 
 	// Account for this new arena chunk memory.
 	gcController.heapInUse.add(int64(userArenaChunkBytes))
@@ -962,22 +888,15 @@
 	atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes))
 
 	// Model the arena as a single large malloc.
-	atomic.Xadd64(&stats.largeAlloc, int64(userArenaChunkBytes))
+	atomic.Xadd64(&stats.largeAlloc, int64(s.elemsize))
 	atomic.Xadd64(&stats.largeAllocCount, 1)
 	memstats.heapStats.release()
 
 	// Count the alloc in inconsistent, internal stats.
-	gcController.totalAlloc.Add(int64(userArenaChunkBytes))
+	gcController.totalAlloc.Add(int64(s.elemsize))
 
 	// Update heapLive.
-	gcController.update(int64(userArenaChunkBytes), 0)
-
-	// Put the large span in the mcentral swept list so that it's
-	// visible to the background sweeper.
-	h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
-	s.limit = s.base() + userArenaChunkBytes
-	s.freeindex = 1
-	s.allocCount = 1
+	gcController.update(int64(s.elemsize), 0)
 
 	// This must clear the entire heap bitmap so that it's safe
 	// to allocate noscan data without writing anything out.
@@ -998,6 +917,19 @@
 	s.freeIndexForScan = 1
 
 	// Set up the range for allocation.
-	s.userArenaChunkFree = makeAddrRange(base, s.limit)
+	s.userArenaChunkFree = makeAddrRange(base, base+s.elemsize)
+
+	// Put the large span in the mcentral swept list so that it's
+	// visible to the background sweeper.
+	h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
+
+	if goexperiment.AllocHeaders {
+		// Set up an allocation header. Avoid write barriers here because this type
+		// is not a real type, and it exists in an invalid location.
+		*(*uintptr)(unsafe.Pointer(&s.largeType)) = uintptr(unsafe.Pointer(s.limit))
+		*(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{})
+		s.largeType.PtrBytes = 0
+		s.largeType.Size_ = s.elemsize
+	}
 	return s
 }
diff --git a/src/runtime/arena_test.go b/src/runtime/arena_test.go
index 7e121ad..018c423 100644
--- a/src/runtime/arena_test.go
+++ b/src/runtime/arena_test.go
@@ -390,21 +390,18 @@
 	// Create a string as using the same memory as the byte slice, hence in
 	// the arena. This could be an arena API, but hasn't really been needed
 	// yet.
-	var as string
-	asHeader := (*reflect.StringHeader)(unsafe.Pointer(&as))
-	asHeader.Data = (*reflect.SliceHeader)(unsafe.Pointer(&b)).Data
-	asHeader.Len = len(b)
+	as := unsafe.String(&b[0], len(b))
 
 	// Clone should make a copy of as, since it is in the arena.
 	asCopy := UserArenaClone(as)
-	if (*reflect.StringHeader)(unsafe.Pointer(&as)).Data == (*reflect.StringHeader)(unsafe.Pointer(&asCopy)).Data {
+	if unsafe.StringData(as) == unsafe.StringData(asCopy) {
 		t.Error("Clone did not make a copy")
 	}
 
 	// Clone should make a copy of subAs, since subAs is just part of as and so is in the arena.
 	subAs := as[1:3]
 	subAsCopy := UserArenaClone(subAs)
-	if (*reflect.StringHeader)(unsafe.Pointer(&subAs)).Data == (*reflect.StringHeader)(unsafe.Pointer(&subAsCopy)).Data {
+	if unsafe.StringData(subAs) == unsafe.StringData(subAsCopy) {
 		t.Error("Clone did not make a copy")
 	}
 	if len(subAs) != len(subAsCopy) {
@@ -420,13 +417,13 @@
 	// Clone should not make a copy of doubleAs, since doubleAs will be on the heap.
 	doubleAs := as + as
 	doubleAsCopy := UserArenaClone(doubleAs)
-	if (*reflect.StringHeader)(unsafe.Pointer(&doubleAs)).Data != (*reflect.StringHeader)(unsafe.Pointer(&doubleAsCopy)).Data {
+	if unsafe.StringData(doubleAs) != unsafe.StringData(doubleAsCopy) {
 		t.Error("Clone should not have made a copy")
 	}
 
 	// Clone should not make a copy of s, since s is a static string.
 	sCopy := UserArenaClone(s)
-	if (*reflect.StringHeader)(unsafe.Pointer(&s)).Data != (*reflect.StringHeader)(unsafe.Pointer(&sCopy)).Data {
+	if unsafe.StringData(s) != unsafe.StringData(sCopy) {
 		t.Error("Clone should not have made a copy")
 	}
 
diff --git a/src/runtime/asan/asan.go b/src/runtime/asan/asan.go
index 25f15ae..ef70b01 100644
--- a/src/runtime/asan/asan.go
+++ b/src/runtime/asan/asan.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build asan && linux && (arm64 || amd64 || riscv64 || ppc64le)
+//go:build asan && linux && (arm64 || amd64 || loong64 || riscv64 || ppc64le)
 
 package asan
 
diff --git a/src/runtime/asan0.go b/src/runtime/asan0.go
index 0948786..bcfd96f 100644
--- a/src/runtime/asan0.go
+++ b/src/runtime/asan0.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Go Authors.  All rights reserved.
+// Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/runtime/asan_amd64.s b/src/runtime/asan_amd64.s
index bf847f2..195faf4 100644
--- a/src/runtime/asan_amd64.s
+++ b/src/runtime/asan_amd64.s
@@ -28,7 +28,7 @@
 // func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr)
 TEXT	runtime·doasanread(SB), NOSPLIT, $0-32
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	MOVQ	sp+16(FP), RARG2
 	MOVQ	pc+24(FP), RARG3
 	// void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc);
@@ -38,7 +38,7 @@
 // func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr)
 TEXT	runtime·doasanwrite(SB), NOSPLIT, $0-32
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	MOVQ	sp+16(FP), RARG2
 	MOVQ	pc+24(FP), RARG3
 	// void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc);
@@ -48,7 +48,7 @@
 // func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·asanunpoison(SB), NOSPLIT, $0-16
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	// void __asan_unpoison_go(void *addr, uintptr_t sz);
 	MOVQ	$__asan_unpoison_go(SB), AX
 	JMP	asancall<>(SB)
@@ -56,17 +56,17 @@
 // func runtime·asanpoison(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·asanpoison(SB), NOSPLIT, $0-16
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	// void __asan_poison_go(void *addr, uintptr_t sz);
 	MOVQ	$__asan_poison_go(SB), AX
 	JMP	asancall<>(SB)
 
 // func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr)
 TEXT	runtime·asanregisterglobals(SB), NOSPLIT, $0-16
-	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVQ	addr+0(FP), RARG0
+	MOVQ	n+8(FP), RARG1
 	// void __asan_register_globals_go(void *addr, uintptr_t n);
-	MOVD	$__asan_register_globals_go(SB), AX
+	MOVQ	$__asan_register_globals_go(SB), AX
 	JMP	asancall<>(SB)
 
 // Switches SP to g0 stack and calls (AX). Arguments already set.
diff --git a/src/runtime/asan_arm64.s b/src/runtime/asan_arm64.s
index 697c982..dfa3f81 100644
--- a/src/runtime/asan_arm64.s
+++ b/src/runtime/asan_arm64.s
@@ -17,7 +17,7 @@
 // func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr)
 TEXT	runtime·doasanread(SB), NOSPLIT, $0-32
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	MOVD	sp+16(FP), RARG2
 	MOVD	pc+24(FP), RARG3
 	// void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc);
@@ -27,7 +27,7 @@
 // func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr)
 TEXT	runtime·doasanwrite(SB), NOSPLIT, $0-32
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	MOVD	sp+16(FP), RARG2
 	MOVD	pc+24(FP), RARG3
 	// void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc);
@@ -37,7 +37,7 @@
 // func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·asanunpoison(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	// void __asan_unpoison_go(void *addr, uintptr_t sz);
 	MOVD	$__asan_unpoison_go(SB), FARG
 	JMP	asancall<>(SB)
@@ -45,7 +45,7 @@
 // func runtime·asanpoison(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·asanpoison(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	// void __asan_poison_go(void *addr, uintptr_t sz);
 	MOVD	$__asan_poison_go(SB), FARG
 	JMP	asancall<>(SB)
@@ -53,7 +53,7 @@
 // func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr)
 TEXT	runtime·asanregisterglobals(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	n+8(FP), RARG1
 	// void __asan_register_globals_go(void *addr, uintptr_t n);
 	MOVD	$__asan_register_globals_go(SB), FARG
 	JMP	asancall<>(SB)
diff --git a/src/runtime/asan_loong64.s b/src/runtime/asan_loong64.s
new file mode 100644
index 0000000..0034a31
--- /dev/null
+++ b/src/runtime/asan_loong64.s
@@ -0,0 +1,75 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build asan
+
+#include "go_asm.h"
+#include "textflag.h"
+
+#define RARG0 R4
+#define RARG1 R5
+#define RARG2 R6
+#define RARG3 R7
+#define FARG  R8
+
+// Called from instrumented code.
+// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr)
+TEXT	runtime·doasanread(SB), NOSPLIT, $0-32
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	MOVV	sp+16(FP), RARG2
+	MOVV	pc+24(FP), RARG3
+	// void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc);
+	MOVV	$__asan_read_go(SB), FARG
+	JMP	asancall<>(SB)
+
+// func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr)
+TEXT	runtime·doasanwrite(SB), NOSPLIT, $0-32
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	MOVV	sp+16(FP), RARG2
+	MOVV	pc+24(FP), RARG3
+	// void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc);
+	MOVV	$__asan_write_go(SB), FARG
+	JMP	asancall<>(SB)
+
+// func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr)
+TEXT	runtime·asanunpoison(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	// void __asan_unpoison_go(void *addr, uintptr_t sz);
+	MOVV	$__asan_unpoison_go(SB), FARG
+	JMP	asancall<>(SB)
+
+// func runtime·asanpoison(addr unsafe.Pointer, sz uintptr)
+TEXT	runtime·asanpoison(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	// void __asan_poison_go(void *addr, uintptr_t sz);
+	MOVV	$__asan_poison_go(SB), FARG
+	JMP	asancall<>(SB)
+
+// func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr)
+TEXT	runtime·asanregisterglobals(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	n+8(FP), RARG1
+	// void __asan_register_globals_go(void *addr, uintptr_t n);
+	MOVV	$__asan_register_globals_go(SB), FARG
+	JMP	asancall<>(SB)
+
+// Switches SP to g0 stack and calls (FARG). Arguments already set.
+TEXT	asancall<>(SB), NOSPLIT, $0-0
+	MOVV	R3, R23         // callee-saved
+	BEQ	g, g0stack      // no g, still on a system stack
+	MOVV	g_m(g), R14
+	MOVV	m_g0(R14), R15
+	BEQ	R15, g, g0stack
+
+	MOVV	(g_sched+gobuf_sp)(R15), R9
+	MOVV	R9, R3
+
+g0stack:
+	JAL	(FARG)
+	MOVV	R23, R3
+	RET
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index f7bc5d4..24cd0c9 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -12,3 +12,23 @@
 // See map.go comment on the need for this routine.
 TEXT ·mapinitnoop<ABIInternal>(SB),NOSPLIT,$0-0
 	RET
+
+#ifndef GOARCH_amd64
+#ifndef GOARCH_arm64
+#ifndef GOARCH_mips64
+#ifndef GOARCH_mips64le
+#ifndef GOARCH_ppc64
+#ifndef GOARCH_ppc64le
+#ifndef GOARCH_riscv64
+#ifndef GOARCH_wasm
+// stub to appease shared build mode.
+TEXT ·switchToCrashStack0<ABIInternal>(SB),NOSPLIT,$0-0
+	UNDEF
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
diff --git a/src/runtime/asm_amd64.h b/src/runtime/asm_amd64.h
index f7a8896..b263ade 100644
--- a/src/runtime/asm_amd64.h
+++ b/src/runtime/asm_amd64.h
@@ -20,6 +20,9 @@
 #ifdef GOAMD64_v4
 #define hasAVX
 #define hasAVX2
+#define hasAVX512F
+#define hasAVX512BW
+#define hasAVX512VL
 #define hasPOPCNT
 #define hasSSE42
 #endif
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index edf0909..1071d27 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -144,12 +144,12 @@
 #define commpage64_base_address         0x00007fffffe00000
 #define commpage64_cpu_capabilities64   (commpage64_base_address+0x010)
 #define commpage64_version              (commpage64_base_address+0x01E)
-#define hasAVX512F                      0x0000004000000000
-#define hasAVX512CD                     0x0000008000000000
-#define hasAVX512DQ                     0x0000010000000000
-#define hasAVX512BW                     0x0000020000000000
-#define hasAVX512VL                     0x0000100000000000
-#define NEED_DARWIN_SUPPORT             (hasAVX512F | hasAVX512DQ | hasAVX512CD | hasAVX512BW | hasAVX512VL)
+#define AVX512F                         0x0000004000000000
+#define AVX512CD                        0x0000008000000000
+#define AVX512DQ                        0x0000010000000000
+#define AVX512BW                        0x0000020000000000
+#define AVX512VL                        0x0000100000000000
+#define NEED_DARWIN_SUPPORT             (AVX512F | AVX512DQ | AVX512CD | AVX512BW | AVX512VL)
 #else
 #define NEED_OS_SUPPORT_AX V4_OS_SUPPORT_AX
 #endif
@@ -537,6 +537,30 @@
 	CALL	AX
 	INT	$3
 
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0<ABIInternal>(SB), NOSPLIT, $0-8
+	MOVQ	g_m(R14), BX // curm
+
+	// set g to gcrash
+	LEAQ	runtime·gcrash(SB), R14 // g = &gcrash
+	MOVQ	BX, g_m(R14)            // g.m = curm
+	MOVQ	R14, m_g0(BX)           // curm.g0 = g
+	get_tls(CX)
+	MOVQ	R14, g(CX)
+
+	// switch to crashstack
+	MOVQ	(g_stack+stack_hi)(R14), BX
+	SUBQ	$(4*8), BX
+	MOVQ	BX, SP
+
+	// call target function
+	MOVQ	AX, DX
+	MOVQ	0(AX), AX
+	CALL	AX
+
+	// should never return
+	CALL	runtime·abort(SB)
+	UNDEF
 
 /*
  * support for morestack
@@ -551,17 +575,26 @@
 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
 	// Cannot grow scheduler stack (m->g0).
 	get_tls(CX)
-	MOVQ	g(CX), BX
-	MOVQ	g_m(BX), BX
-	MOVQ	m_g0(BX), SI
-	CMPQ	g(CX), SI
+	MOVQ	g(CX), DI     // DI = g
+	MOVQ	g_m(DI), BX   // BX = m
+
+	// Set g->sched to context in f.
+	MOVQ	0(SP), AX // f's PC
+	MOVQ	AX, (g_sched+gobuf_pc)(DI)
+	LEAQ	8(SP), AX // f's SP
+	MOVQ	AX, (g_sched+gobuf_sp)(DI)
+	MOVQ	BP, (g_sched+gobuf_bp)(DI)
+	MOVQ	DX, (g_sched+gobuf_ctxt)(DI)
+
+	MOVQ	m_g0(BX), SI  // SI = m.g0
+	CMPQ	DI, SI
 	JNE	3(PC)
 	CALL	runtime·badmorestackg0(SB)
 	CALL	runtime·abort(SB)
 
 	// Cannot grow signal stack (m->gsignal).
 	MOVQ	m_gsignal(BX), SI
-	CMPQ	g(CX), SI
+	CMPQ	DI, SI
 	JNE	3(PC)
 	CALL	runtime·badmorestackgsignal(SB)
 	CALL	runtime·abort(SB)
@@ -573,17 +606,7 @@
 	MOVQ	AX, (m_morebuf+gobuf_pc)(BX)
 	LEAQ	16(SP), AX	// f's caller's SP
 	MOVQ	AX, (m_morebuf+gobuf_sp)(BX)
-	get_tls(CX)
-	MOVQ	g(CX), SI
-	MOVQ	SI, (m_morebuf+gobuf_g)(BX)
-
-	// Set g->sched to context in f.
-	MOVQ	0(SP), AX // f's PC
-	MOVQ	AX, (g_sched+gobuf_pc)(SI)
-	LEAQ	8(SP), AX // f's SP
-	MOVQ	AX, (g_sched+gobuf_sp)(SI)
-	MOVQ	BP, (g_sched+gobuf_bp)(SI)
-	MOVQ	DX, (g_sched+gobuf_ctxt)(SI)
+	MOVQ	DI, (m_morebuf+gobuf_g)(BX)
 
 	// Call newstack on m->g0's stack.
 	MOVQ	m_g0(BX), BX
@@ -825,6 +848,33 @@
 	MOVQ	DX, SP
 	RET
 
+// asmcgocall_landingpad calls AX with BX as argument.
+// Must be called on the system stack.
+TEXT ·asmcgocall_landingpad(SB),NOSPLIT,$0-0
+#ifdef GOOS_windows
+	// Make sure we have enough room for 4 stack-backed fast-call
+	// registers as per Windows amd64 calling convention.
+	ADJSP	$32
+	// On Windows, asmcgocall_landingpad acts as landing pad for exceptions
+	// thrown in the cgo call. Exceptions that reach this function will be
+	// handled by runtime.sehtramp thanks to the SEH metadata added
+	// by the compiler.
+	// Note that runtime.sehtramp can't be attached directly to asmcgocall
+	// because its initial stack pointer can be outside the system stack bounds,
+	// and Windows stops the stack unwinding without calling the exception handler
+	// when it reaches that point.
+	MOVQ	BX, CX		// CX = first argument in Win64
+	CALL	AX
+	// The exception handler is not called if the next instruction is part of
+	// the epilogue, which includes the RET instruction, so we need to add a NOP here.
+	BYTE	$0x90
+	ADJSP	$-32
+	RET
+#endif
+	// Tail call AX on non-Windows, as the extra stack frame is not needed.
+	MOVQ	BX, DI		// DI = first argument in AMD64 ABI
+	JMP	AX
+
 // func asmcgocall(fn, arg unsafe.Pointer) int32
 // Call fn(arg) on the scheduler stack,
 // aligned appropriately for the gcc ABI.
@@ -859,23 +909,19 @@
 	MOVQ	(g_sched+gobuf_sp)(SI), SP
 
 	// Now on a scheduling stack (a pthread-created stack).
-	// Make sure we have enough room for 4 stack-backed fast-call
-	// registers as per windows amd64 calling convention.
-	SUBQ	$64, SP
+	SUBQ	$16, SP
 	ANDQ	$~15, SP	// alignment for gcc ABI
-	MOVQ	DI, 48(SP)	// save g
+	MOVQ	DI, 8(SP)	// save g
 	MOVQ	(g_stack+stack_hi)(DI), DI
 	SUBQ	DX, DI
-	MOVQ	DI, 40(SP)	// save depth in stack (can't just save SP, as stack might be copied during a callback)
-	MOVQ	BX, DI		// DI = first argument in AMD64 ABI
-	MOVQ	BX, CX		// CX = first argument in Win64
-	CALL	AX
+	MOVQ	DI, 0(SP)	// save depth in stack (can't just save SP, as stack might be copied during a callback)
+	CALL	runtime·asmcgocall_landingpad(SB)
 
 	// Restore registers, g, stack pointer.
 	get_tls(CX)
-	MOVQ	48(SP), DI
+	MOVQ	8(SP), DI
 	MOVQ	(g_stack+stack_hi)(DI), SI
-	SUBQ	40(SP), SI
+	SUBQ	0(SP), SI
 	MOVQ	DI, g(CX)
 	MOVQ	SI, SP
 
@@ -893,14 +939,12 @@
 	// but then the only path through this code would be a rare case on Solaris.
 	// Using this code for all "already on system stack" calls exercises it more,
 	// which should help keep it correct.
-	SUBQ	$64, SP
+	SUBQ	$16, SP
 	ANDQ	$~15, SP
-	MOVQ	$0, 48(SP)		// where above code stores g, in case someone looks during debugging
-	MOVQ	DX, 40(SP)	// save original stack pointer
-	MOVQ	BX, DI		// DI = first argument in AMD64 ABI
-	MOVQ	BX, CX		// CX = first argument in Win64
-	CALL	AX
-	MOVQ	40(SP), SI	// restore original stack pointer
+	MOVQ	$0, 8(SP)		// where above code stores g, in case someone looks during debugging
+	MOVQ	DX, 0(SP)	// save original stack pointer
+	CALL	runtime·asmcgocall_landingpad(SB)
+	MOVQ	0(SP), SI	// restore original stack pointer
 	MOVQ	SI, SP
 	MOVL	AX, ret+16(FP)
 	RET
@@ -1442,6 +1486,7 @@
 	DECQ	CX
 	SHRQ	$7, CX
 
+	PCALIGN $16
 aesloop:
 	// scramble state
 	AESENC	X8, X8
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index e3206a1..31a0584 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -39,10 +39,10 @@
 	MOVW	g, 32(R13)
 	MOVW	R11, 36(R13)
 
-	// Skip floating point registers on GOARM < 6.
-	MOVB    runtime·goarm(SB), R11
-	CMP	$6, R11
-	BLT	skipfpsave
+	// Skip floating point registers on goarmsoftfp != 0.
+	MOVB    runtime·goarmsoftfp(SB), R11
+	CMP	$0, R11
+	BNE     skipfpsave
 	MOVD	F8, (40+8*0)(R13)
 	MOVD	F9, (40+8*1)(R13)
 	MOVD	F10, (40+8*2)(R13)
@@ -77,9 +77,9 @@
 	BL	runtime·newosproc0(SB)
 rr:
 	// Restore callee-save registers and return.
-	MOVB    runtime·goarm(SB), R11
-	CMP	$6, R11
-	BLT	skipfprest
+	MOVB    runtime·goarmsoftfp(SB), R11
+	CMP     $0, R11
+	BNE     skipfprest
 	MOVD	(40+8*0)(R13), F8
 	MOVD	(40+8*1)(R13), F9
 	MOVD	(40+8*2)(R13), F10
@@ -197,10 +197,10 @@
 	RET
 
 TEXT runtime·asminit(SB),NOSPLIT,$0-0
-	// disable runfast (flush-to-zero) mode of vfp if runtime.goarm > 5
-	MOVB	runtime·goarm(SB), R11
-	CMP	$5, R11
-	BLE	4(PC)
+	// disable runfast (flush-to-zero) mode of vfp if runtime.goarmsoftfp == 0
+	MOVB	runtime·goarmsoftfp(SB), R11
+	CMP	$0, R11
+	BNE	4(PC)
 	WORD	$0xeef1ba10	// vmrs r11, fpscr
 	BIC	$(1<<24), R11
 	WORD	$0xeee1ba10	// vmsr fpscr, r11
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index 7866e35..6d77b08 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -262,6 +262,30 @@
 	SUB	$8, RSP, R29	// restore FP
 	B	(R3)
 
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0<ABIInternal>(SB), NOSPLIT, $0-8
+	MOVD	R0, R26    // context register
+	MOVD	g_m(g), R1 // curm
+
+	// set g to gcrash
+	MOVD	$runtime·gcrash(SB), g // g = &gcrash
+	BL	runtime·save_g(SB)         // clobbers R0
+	MOVD	R1, g_m(g)             // g.m = curm
+	MOVD	g, m_g0(R1)            // curm.g0 = g
+
+	// switch to crashstack
+	MOVD	(g_stack+stack_hi)(g), R1
+	SUB	$(4*8), R1
+	MOVD	R1, RSP
+
+	// call target function
+	MOVD	0(R26), R0
+	CALL	(R0)
+
+	// should never return
+	CALL	runtime·abort(SB)
+	UNDEF
+
 /*
  * support for morestack
  */
@@ -278,6 +302,16 @@
 	// Cannot grow scheduler stack (m->g0).
 	MOVD	g_m(g), R8
 	MOVD	m_g0(R8), R4
+
+	// Called from f.
+	// Set g->sched to context in f
+	MOVD	RSP, R0
+	MOVD	R0, (g_sched+gobuf_sp)(g)
+	MOVD	R29, (g_sched+gobuf_bp)(g)
+	MOVD	LR, (g_sched+gobuf_pc)(g)
+	MOVD	R3, (g_sched+gobuf_lr)(g)
+	MOVD	R26, (g_sched+gobuf_ctxt)(g)
+
 	CMP	g, R4
 	BNE	3(PC)
 	BL	runtime·badmorestackg0(SB)
@@ -291,15 +325,6 @@
 	B	runtime·abort(SB)
 
 	// Called from f.
-	// Set g->sched to context in f
-	MOVD	RSP, R0
-	MOVD	R0, (g_sched+gobuf_sp)(g)
-	MOVD	R29, (g_sched+gobuf_bp)(g)
-	MOVD	LR, (g_sched+gobuf_pc)(g)
-	MOVD	R3, (g_sched+gobuf_lr)(g)
-	MOVD	R26, (g_sched+gobuf_ctxt)(g)
-
-	// Called from f.
 	// Set m->morebuf to f's callers.
 	MOVD	R3, (m_morebuf+gobuf_pc)(R8)	// f's caller's PC
 	MOVD	RSP, R0
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index 6ffa139..586bd23 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -72,7 +72,7 @@
 	MOVV	R0, 1(R0)
 	RET
 
-DATA	runtime·mainPC+0(SB)/8,$runtime·main(SB)
+DATA	runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
 GLOBL	runtime·mainPC(SB),RODATA,$8
 
 TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
@@ -123,26 +123,31 @@
 // Switch to m->g0's stack, call fn(g).
 // Fn must never return. It should gogo(&g->sched)
 // to keep running g.
-TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
+TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R4, REGCTXT
+#else
+	MOVV	fn+0(FP), REGCTXT
+#endif
+
 	// Save caller state in g->sched
 	MOVV	R3, (g_sched+gobuf_sp)(g)
 	MOVV	R1, (g_sched+gobuf_pc)(g)
 	MOVV	R0, (g_sched+gobuf_lr)(g)
 
 	// Switch to m->g0 & its stack, call fn.
-	MOVV	g, R19
-	MOVV	g_m(g), R4
-	MOVV	m_g0(R4), g
+	MOVV	g, R4		// arg = g
+	MOVV	g_m(g), R20
+	MOVV	m_g0(R20), g
 	JAL	runtime·save_g(SB)
-	BNE	g, R19, 2(PC)
+	BNE	g, R4, 2(PC)
 	JMP	runtime·badmcall(SB)
-	MOVV	fn+0(FP), REGCTXT			// context
-	MOVV	0(REGCTXT), R5			// code pointer
+	MOVV	0(REGCTXT), R20			// code pointer
 	MOVV	(g_sched+gobuf_sp)(g), R3	// sp = m->g0->sched.sp
 	ADDV	$-16, R3
-	MOVV	R19, 8(R3)
+	MOVV	R4, 8(R3)
 	MOVV	R0, 0(R3)
-	JAL	(R5)
+	JAL	(R20)
 	JMP	runtime·badmcall2(SB)
 
 // systemstack_switch is a dummy routine that systemstack leaves at the bottom
@@ -214,7 +219,7 @@
 
 // Called during function prolog when more stack is needed.
 // Caller has already loaded:
-// loong64: R5: LR
+// loong64: R31: LR
 //
 // The traceback routines see morestack on a g0 as being
 // the top of a stack (for example, morestack calling newstack
@@ -238,12 +243,12 @@
 	// Set g->sched to context in f.
 	MOVV	R3, (g_sched+gobuf_sp)(g)
 	MOVV	R1, (g_sched+gobuf_pc)(g)
-	MOVV	R5, (g_sched+gobuf_lr)(g)
+	MOVV	R31, (g_sched+gobuf_lr)(g)
 	MOVV	REGCTXT, (g_sched+gobuf_ctxt)(g)
 
 	// Called from f.
 	// Set m->morebuf to f's caller.
-	MOVV	R5, (m_morebuf+gobuf_pc)(R7)	// f's caller's PC
+	MOVV	R31, (m_morebuf+gobuf_pc)(R7)	// f's caller's PC
 	MOVV	R3, (m_morebuf+gobuf_sp)(R7)	// f's caller's SP
 	MOVV	g, (m_morebuf+gobuf_g)(R7)
 
@@ -272,7 +277,7 @@
 	JMP	runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -286,7 +291,7 @@
 // Note: can't just "BR NAME(SB)" - bad inlining results.
 
 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
-	MOVWU stackArgsSize+24(FP), R19
+	MOVWU frameSize+32(FP), R19
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
 	DISPATCH(runtime·call128, 128)
@@ -317,7 +322,7 @@
 	JMP	(R4)
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
 	MOVV	arg+16(FP), R4;			\
@@ -331,12 +336,17 @@
 	MOVBU	R6, (R12);			\
 	ADDV	$1, R12;			\
 	JMP	-5(PC);				\
+	/* set up argument registers */		\
+	MOVV	regArgs+40(FP), R25;		\
+	JAL	·unspillArgs(SB);		\
 	/* call function */			\
 	MOVV	f+8(FP), REGCTXT;			\
-	MOVV	(REGCTXT), R6;			\
+	MOVV	(REGCTXT), R25;			\
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
-	JAL	(R6);				\
+	JAL	(R25);				\
 	/* copy return values back */		\
+	MOVV	regArgs+40(FP), R25;		\
+	JAL	·spillArgs(SB);		\
 	MOVV	argtype+0(FP), R7;		\
 	MOVV	arg+16(FP), R4;			\
 	MOVWU	n+24(FP), R5;			\
@@ -352,11 +362,13 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
+	NO_LOCAL_POINTERS
 	MOVV	R7, 8(R3)
 	MOVV	R4, 16(R3)
 	MOVV	R12, 24(R3)
 	MOVV	R5, 32(R3)
+	MOVV	R25, 40(R3)
 	JAL	runtime·reflectcallmove(SB)
 	RET
 
@@ -567,7 +579,7 @@
 	// If the m on entry wasn't nil,
 	// 1. the thread might be a Go thread,
 	// 2. or it wasn't the first call from a C thread on pthread platforms,
-	//    since then we skip dropm to reuse the m in the first call.
+	//    since then we skip dropm to resue the m in the first call.
 	MOVV	savedm-8(SP), R12
 	BNE	R12, droppedm
 
@@ -604,14 +616,14 @@
 	UNDEF
 
 // AES hashing not implemented for loong64
-TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
-	JMP	runtime·memhashFallback(SB)
-TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
-	JMP	runtime·strhashFallback(SB)
-TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
-	JMP	runtime·memhash32Fallback(SB)
-TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
-	JMP	runtime·memhash64Fallback(SB)
+TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
+	JMP	runtime·memhashFallback<ABIInternal>(SB)
+TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+	JMP	runtime·strhashFallback<ABIInternal>(SB)
+TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+	JMP	runtime·memhash32Fallback<ABIInternal>(SB)
+TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+	JMP	runtime·memhash64Fallback<ABIInternal>(SB)
 
 TEXT runtime·return0(SB), NOSPLIT, $0
 	MOVW	$0, R19
@@ -642,11 +654,102 @@
 	// traceback from goexit1 must hit code range of goexit
 	NOOP
 
+// This is called from .init_array and follows the platform, not Go, ABI.
+TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
+	ADDV	$-0x10, R3
+	MOVV	R30, 8(R3) // The access to global variables below implicitly uses R30, which is callee-save
+	MOVV	runtime·lastmoduledatap(SB), R12
+	MOVV	R4, moduledata_next(R12)
+	MOVV	R4, runtime·lastmoduledatap(SB)
+	MOVV	8(R3), R30
+	ADDV	$0x10, R3
+	RET
+
 TEXT ·checkASM(SB),NOSPLIT,$0-1
 	MOVW	$1, R19
 	MOVB	R19, ret+0(FP)
 	RET
 
+#ifdef GOEXPERIMENT_regabiargs
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R25.
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+	MOVV	R4, (0*8)(R25)
+	MOVV	R5, (1*8)(R25)
+	MOVV	R6, (2*8)(R25)
+	MOVV	R7, (3*8)(R25)
+	MOVV	R8, (4*8)(R25)
+	MOVV	R9, (5*8)(R25)
+	MOVV	R10, (6*8)(R25)
+	MOVV	R11, (7*8)(R25)
+	MOVV	R12, (8*8)(R25)
+	MOVV	R13, (9*8)(R25)
+	MOVV	R14, (10*8)(R25)
+	MOVV	R15, (11*8)(R25)
+	MOVV	R16, (12*8)(R25)
+	MOVV	R17, (13*8)(R25)
+	MOVV	R18, (14*8)(R25)
+	MOVV	R19, (15*8)(R25)
+	MOVD	F0, (16*8)(R25)
+	MOVD	F1, (17*8)(R25)
+	MOVD	F2, (18*8)(R25)
+	MOVD	F3, (19*8)(R25)
+	MOVD	F4, (20*8)(R25)
+	MOVD	F5, (21*8)(R25)
+	MOVD	F6, (22*8)(R25)
+	MOVD	F7, (23*8)(R25)
+	MOVD	F8, (24*8)(R25)
+	MOVD	F9, (25*8)(R25)
+	MOVD	F10, (26*8)(R25)
+	MOVD	F11, (27*8)(R25)
+	MOVD	F12, (28*8)(R25)
+	MOVD	F13, (29*8)(R25)
+	MOVD	F14, (30*8)(R25)
+	MOVD	F15, (31*8)(R25)
+	RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R25.
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+	MOVV	(0*8)(R25), R4
+	MOVV	(1*8)(R25), R5
+	MOVV	(2*8)(R25), R6
+	MOVV	(3*8)(R25), R7
+	MOVV	(4*8)(R25), R8
+	MOVV	(5*8)(R25), R9
+	MOVV	(6*8)(R25), R10
+	MOVV	(7*8)(R25), R11
+	MOVV	(8*8)(R25), R12
+	MOVV	(9*8)(R25), R13
+	MOVV	(10*8)(R25), R14
+	MOVV	(11*8)(R25), R15
+	MOVV	(12*8)(R25), R16
+	MOVV	(13*8)(R25), R17
+	MOVV	(14*8)(R25), R18
+	MOVV	(15*8)(R25), R19
+	MOVD	(16*8)(R25), F0
+	MOVD	(17*8)(R25), F1
+	MOVD	(18*8)(R25), F2
+	MOVD	(19*8)(R25), F3
+	MOVD	(20*8)(R25), F4
+	MOVD	(21*8)(R25), F5
+	MOVD	(22*8)(R25), F6
+	MOVD	(23*8)(R25), F7
+	MOVD	(24*8)(R25), F8
+	MOVD	(25*8)(R25), F9
+	MOVD	(26*8)(R25), F10
+	MOVD	(27*8)(R25), F11
+	MOVD	(28*8)(R25), F12
+	MOVD	(29*8)(R25), F13
+	MOVD	(30*8)(R25), F14
+	MOVD	(31*8)(R25), F15
+	RET
+#else
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+	RET
+
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+	RET
+#endif
+
 // gcWriteBarrier informs the GC about heap pointer writes.
 //
 // gcWriteBarrier does NOT follow the Go ABI. It accepts the
@@ -774,71 +877,156 @@
 // in the caller's stack frame. These stubs write the args into that stack space and
 // then tail call to the corresponding runtime handler.
 // The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
-	MOVV	R19, x+0(FP)
-	MOVV	R18, y+8(FP)
-	JMP	runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
-	MOVV	R19, x+0(FP)
-	MOVV	R18, y+8(FP)
-	JMP	runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
-	MOVV	R18, x+0(FP)
-	MOVV	R17, y+8(FP)
-	JMP	runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
-	MOVV	R18, x+0(FP)
-	MOVV	R17, y+8(FP)
-	JMP	runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
-	MOVV	R18, x+0(FP)
-	MOVV	R17, y+8(FP)
-	JMP	runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
-	MOVV	R18, x+0(FP)
-	MOVV	R17, y+8(FP)
-	JMP	runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
-	MOVV	R19, x+0(FP)
-	MOVV	R18, y+8(FP)
-	JMP	runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
-	MOVV	R19, x+0(FP)
-	MOVV	R18, y+8(FP)
-	JMP	runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
-	MOVV	R17, x+0(FP)
-	MOVV	R4, y+8(FP)
-	JMP	runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
-	MOVV	R17, x+0(FP)
-	MOVV	R4, y+8(FP)
-	JMP	runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
-	MOVV	R17, x+0(FP)
-	MOVV	R4, y+8(FP)
-	JMP	runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
-	MOVV	R17, x+0(FP)
-	MOVV	R4, y+8(FP)
-	JMP	runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
-	MOVV	R18, x+0(FP)
-	MOVV	R17, y+8(FP)
-	JMP	runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
-	MOVV	R18, x+0(FP)
-	MOVV	R17, y+8(FP)
-	JMP	runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
-	MOVV	R19, x+0(FP)
-	MOVV	R18, y+8(FP)
-	JMP	runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
-	MOVV	R19, x+0(FP)
-	MOVV	R18, y+8(FP)
-	JMP	runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
-	MOVV	R17, x+0(FP)
-	MOVV	R4, y+8(FP)
-	JMP	runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R20, R4
+	MOVV	R21, R5
+#else
+	MOVV	R20, x+0(FP)
+	MOVV	R21, y+8(FP)
+#endif
+	JMP	runtime·goPanicIndex<ABIInternal>(SB)
+TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R20, R4
+	MOVV	R21, R5
+#else
+	MOVV	R20, x+0(FP)
+	MOVV	R21, y+8(FP)
+#endif
+	JMP	runtime·goPanicIndexU<ABIInternal>(SB)
+TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R21, R4
+	MOVV	R23, R5
+#else
+	MOVV	R21, x+0(FP)
+	MOVV	R23, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceAlen<ABIInternal>(SB)
+TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R21, R4
+	MOVV	R23, R5
+#else
+	MOVV	R21, x+0(FP)
+	MOVV	R23, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceAlenU<ABIInternal>(SB)
+TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R21, R4
+	MOVV	R23, R5
+#else
+	MOVV	R21, x+0(FP)
+	MOVV	R23, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceAcap<ABIInternal>(SB)
+TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R21, R4
+	MOVV	R23, R5
+#else
+	MOVV	R21, x+0(FP)
+	MOVV	R23, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceAcapU<ABIInternal>(SB)
+TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R20, R4
+	MOVV	R21, R5
+#else
+	MOVV	R20, x+0(FP)
+	MOVV	R21, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceB<ABIInternal>(SB)
+TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R20, R4
+	MOVV	R21, R5
+#else
+	MOVV	R20, x+0(FP)
+	MOVV	R21, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceBU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R23, R4
+	MOVV	R24, R5
+#else
+	MOVV	R23, x+0(FP)
+	MOVV	R24, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3Alen<ABIInternal>(SB)
+TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R23, R4
+	MOVV	R24, R5
+#else
+	MOVV	R23, x+0(FP)
+	MOVV	R24, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3AlenU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R23, R4
+	MOVV	R24, R5
+#else
+	MOVV	R23, x+0(FP)
+	MOVV	R24, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3Acap<ABIInternal>(SB)
+TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R23, R4
+	MOVV	R24, R5
+#else
+	MOVV	R23, x+0(FP)
+	MOVV	R24, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3AcapU<ABIInternal>(SB)
+TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R21, R4
+	MOVV	R23, R5
+#else
+	MOVV	R21, x+0(FP)
+	MOVV	R23, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3B<ABIInternal>(SB)
+TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R21, R4
+	MOVV	R23, R5
+#else
+	MOVV	R21, x+0(FP)
+	MOVV	R23, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3BU<ABIInternal>(SB)
+TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R20, R4
+	MOVV	R21, R5
+#else
+	MOVV	R20, x+0(FP)
+	MOVV	R21, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3C<ABIInternal>(SB)
+TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R20, R4
+	MOVV	R21, R5
+#else
+	MOVV	R20, x+0(FP)
+	MOVV	R21, y+8(FP)
+#endif
+	JMP	runtime·goPanicSlice3CU<ABIInternal>(SB)
+TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R23, R4
+	MOVV	R24, R5
+#else
+	MOVV	R23, x+0(FP)
+	MOVV	R24, y+8(FP)
+#endif
+	JMP	runtime·goPanicSliceConvert<ABIInternal>(SB)
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index 19592b5..80cd87c 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -205,6 +205,29 @@
 	ADDV	$8, R29
 	JMP	(R4)
 
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8
+	MOVV	fn+0(FP), REGCTXT	// context register
+	MOVV	g_m(g), R2	// curm
+
+	// set g to gcrash
+	MOVV	$runtime·gcrash(SB), g	// g = &gcrash
+	CALL	runtime·save_g(SB)
+	MOVV	R2, g_m(g)	// g.m = curm
+	MOVV	g, m_g0(R2)	// curm.g0 = g
+
+	// switch to crashstack
+	MOVV	(g_stack+stack_hi)(g), R2
+	ADDV	$(-4*8), R2, R29
+
+	// call target function
+	MOVV	0(REGCTXT), R25
+	JAL	(R25)
+
+	// should never return
+	CALL	runtime·abort(SB)
+	UNDEF
+
 /*
  * support for morestack
  */
@@ -218,6 +241,13 @@
 // calling the scheduler calling newm calling gc), so we must
 // record an argument size. For that purpose, it has no arguments.
 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
+	// Called from f.
+	// Set g->sched to context in f.
+	MOVV	R29, (g_sched+gobuf_sp)(g)
+	MOVV	R31, (g_sched+gobuf_pc)(g)
+	MOVV	R3, (g_sched+gobuf_lr)(g)
+	MOVV	REGCTXT, (g_sched+gobuf_ctxt)(g)
+
 	// Cannot grow scheduler stack (m->g0).
 	MOVV	g_m(g), R7
 	MOVV	m_g0(R7), R8
@@ -232,13 +262,6 @@
 	JAL	runtime·abort(SB)
 
 	// Called from f.
-	// Set g->sched to context in f.
-	MOVV	R29, (g_sched+gobuf_sp)(g)
-	MOVV	R31, (g_sched+gobuf_pc)(g)
-	MOVV	R3, (g_sched+gobuf_lr)(g)
-	MOVV	REGCTXT, (g_sched+gobuf_ctxt)(g)
-
-	// Called from f.
 	// Set m->morebuf to f's caller.
 	MOVV	R3, (m_morebuf+gobuf_pc)(R7)	// f's caller's PC
 	MOVV	R29, (m_morebuf+gobuf_sp)(R7)	// f's caller's SP
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 66d0447..ff9b736 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -98,7 +98,11 @@
 
 	// start this M
 	BL	runtime·mstart(SB)
-
+	// Prevent dead-code elimination of debugCallV2, which is
+	// intended to be called by debuggers.
+#ifdef GOARCH_ppc64le
+	MOVD	$runtime·debugCallV2<ABIInternal>(SB), R31
+#endif
 	MOVD	R0, 0(R0)
 	RET
 
@@ -280,6 +284,31 @@
 #endif
 	RET
 
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0<ABIInternal>(SB), NOSPLIT, $0-8
+	MOVD	R3, R11				// context register
+	MOVD	g_m(g), R3			// curm
+
+	// set g to gcrash
+	MOVD	$runtime·gcrash(SB), g	// g = &gcrash
+	CALL	runtime·save_g(SB)	// clobbers R31
+	MOVD	R3, g_m(g)			// g.m = curm
+	MOVD	g, m_g0(R3)			// curm.g0 = g
+
+	// switch to crashstack
+	MOVD	(g_stack+stack_hi)(g), R3
+	SUB	$(4*8), R3
+	MOVD	R3, R1
+
+	// call target function
+	MOVD	0(R11), R12			// code pointer
+	MOVD	R12, CTR
+	BL	(CTR)
+
+	// should never return
+	CALL	runtime·abort(SB)
+	UNDEF
+
 /*
  * support for morestack
  */
@@ -293,6 +322,14 @@
 // calling the scheduler calling newm calling gc), so we must
 // record an argument size. For that purpose, it has no arguments.
 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
+	// Called from f.
+	// Set g->sched to context in f.
+	MOVD	R1, (g_sched+gobuf_sp)(g)
+	MOVD	LR, R8
+	MOVD	R8, (g_sched+gobuf_pc)(g)
+	MOVD	R5, (g_sched+gobuf_lr)(g)
+	MOVD	R11, (g_sched+gobuf_ctxt)(g)
+
 	// Cannot grow scheduler stack (m->g0).
 	MOVD	g_m(g), R7
 	MOVD	m_g0(R7), R8
@@ -309,14 +346,6 @@
 	BL	runtime·abort(SB)
 
 	// Called from f.
-	// Set g->sched to context in f.
-	MOVD	R1, (g_sched+gobuf_sp)(g)
-	MOVD	LR, R8
-	MOVD	R8, (g_sched+gobuf_pc)(g)
-	MOVD	R5, (g_sched+gobuf_lr)(g)
-	MOVD	R11, (g_sched+gobuf_ctxt)(g)
-
-	// Called from f.
 	// Set m->morebuf to f's caller.
 	MOVD	R5, (m_morebuf+gobuf_pc)(R7)	// f's caller's PC
 	MOVD	R1, (m_morebuf+gobuf_sp)(R7)	// f's caller's SP
@@ -545,6 +574,43 @@
 #define asmcgocallSaveOffset cgoCalleeStackSize
 #endif
 
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
+	MOVD	fn+0(FP), R3
+	MOVD	arg+8(FP), R4
+
+	MOVD	R1, R15
+	SUB	$(asmcgocallSaveOffset+8), R1
+	RLDCR	$0, R1, $~15, R1	// 16-byte alignment for gcc ABI
+	MOVD	R15, asmcgocallSaveOffset(R1)
+
+	MOVD	R0, 0(R1)	// clear back chain pointer (TODO can we give it real back trace information?)
+
+	// This is a "global call", so put the global entry point in r12
+	MOVD	R3, R12
+
+#ifdef GO_PPC64X_HAS_FUNCDESC
+	// Load the real entry address from the first slot of the function descriptor.
+	MOVD	8(R12), R2
+	MOVD	(R12), R12
+#endif
+	MOVD	R12, CTR
+	MOVD	R4, R3		// arg in r3
+	BL	(CTR)
+
+	// C code can clobber R0, so set it back to 0. F27-F31 are
+	// callee save, so we don't need to recover those.
+	XOR	R0, R0
+
+	MOVD	asmcgocallSaveOffset(R1), R1	// Restore stack pointer.
+#ifndef GOOS_aix
+	MOVD	24(R1), R2
+#endif
+
+	RET
+
 // func asmcgocall(fn, arg unsafe.Pointer) int32
 // Call fn(arg) on the scheduler stack,
 // aligned appropriately for the gcc ABI.
@@ -554,6 +620,8 @@
 	MOVD	arg+8(FP), R4
 
 	MOVD	R1, R7		// save original stack pointer
+	CMP	$0, g
+	BEQ	nosave
 	MOVD	g, R5
 
 	// Figure out if we need to switch to m->g0 stack.
@@ -563,29 +631,29 @@
 	MOVD	g_m(g), R8
 	MOVD	m_gsignal(R8), R6
 	CMP	R6, g
-	BEQ	g0
+	BEQ	nosave
 	MOVD	m_g0(R8), R6
 	CMP	R6, g
-	BEQ	g0
+	BEQ	nosave
+
 	BL	gosave_systemstack_switch<>(SB)
 	MOVD	R6, g
 	BL	runtime·save_g(SB)
 	MOVD	(g_sched+gobuf_sp)(g), R1
 
 	// Now on a scheduling stack (a pthread-created stack).
-g0:
 #ifdef GOOS_aix
 	// Create a fake LR to improve backtrace.
 	MOVD	$runtime·asmcgocall(SB), R6
 	MOVD	R6, 16(R1)
-	// AIX also save one argument on the stack.
-	SUB $8, R1
+	// AIX also saves one argument on the stack.
+	SUB	$8, R1
 #endif
 	// Save room for two of our pointers, plus the callee
 	// save area that lives on the caller stack.
 	SUB	$(asmcgocallSaveOffset+16), R1
 	RLDCR	$0, R1, $~15, R1	// 16-byte alignment for gcc ABI
-	MOVD	R5, (asmcgocallSaveOffset+8)(R1)// save old g on stack
+	MOVD	R5, (asmcgocallSaveOffset+8)(R1)	// save old g on stack
 	MOVD	(g_stack+stack_hi)(R5), R5
 	SUB	R7, R5
 	MOVD	R5, asmcgocallSaveOffset(R1)    // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
@@ -605,9 +673,10 @@
 	MOVD	R12, CTR
 	MOVD	R4, R3		// arg in r3
 	BL	(CTR)
-	// C code can clobber R0, so set it back to 0. F27-F31 are
-	// callee save, so we don't need to recover those.
+
+	// Reinitialise zero value register.
 	XOR	R0, R0
+
 	// Restore g, stack pointer, toc pointer.
 	// R3 is errno, so don't touch it
 	MOVD	(asmcgocallSaveOffset+8)(R1), g
@@ -623,6 +692,41 @@
 	MOVW	R3, ret+16(FP)
 	RET
 
+nosave:
+	// Running on a system stack, perhaps even without a g.
+	// Having no g can happen during thread creation or thread teardown.
+	// This code is like the above sequence but without saving/restoring g
+	// and without worrying about the stack moving out from under us
+	// (because we're on a system stack, not a goroutine stack).
+	// The above code could be used directly if already on a system stack,
+	// but then the only path through this code would be a rare case.
+	// Using this code for all "already on system stack" calls exercises it more,
+	// which should help keep it correct.
+
+	SUB	$(asmcgocallSaveOffset+8), R1
+	RLDCR	$0, R1, $~15, R1		// 16-byte alignment for gcc ABI
+	MOVD	R7, asmcgocallSaveOffset(R1)	// Save original stack pointer.
+
+	MOVD	R3, R12		// fn
+#ifdef GO_PPC64X_HAS_FUNCDESC
+	// Load the real entry address from the first slot of the function descriptor.
+	MOVD	8(R12), R2
+	MOVD	(R12), R12
+#endif
+	MOVD	R12, CTR
+	MOVD	R4, R3		// arg
+	BL	(CTR)
+
+	// Reinitialise zero value register.
+	XOR	R0, R0
+
+	MOVD	asmcgocallSaveOffset(R1), R1	// Restore stack pointer.
+#ifndef GOOS_aix
+	MOVD	24(R1), R2
+#endif
+	MOVW	R3, ret+16(FP)
+	RET
+
 // func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
 // See cgocall.go for more details.
 TEXT ·cgocallback(SB),NOSPLIT,$24-24
@@ -639,9 +743,11 @@
 
 loadg:
 	// Load m and g from thread-local storage.
+#ifndef GOOS_openbsd
 	MOVBZ	runtime·iscgo(SB), R3
 	CMP	R3, $0
 	BEQ	nocgo
+#endif
 	BL	runtime·load_g(SB)
 nocgo:
 
@@ -1033,6 +1139,219 @@
 	MOVD	$64, R29
 	JMP	gcWriteBarrier<>(SB)
 
+DATA	debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large"
+GLOBL	debugCallFrameTooLarge<>(SB), RODATA, $20	// Size duplicated below
+
+// debugCallV2 is the entry point for debugger-injected function
+// calls on running goroutines. It informs the runtime that a
+// debug call has been injected and creates a call frame for the
+// debugger to fill in.
+//
+// To inject a function call, a debugger should:
+// 1. Check that the goroutine is in state _Grunning and that
+//    there are at least 320 bytes free on the stack.
+// 2. Set SP as SP-32.
+// 3. Store the current LR in (SP) (using the SP after step 2).
+// 4. Store the current PC in the LR register.
+// 5. Write the desired argument frame size at SP-32
+// 6. Save all machine registers (including flags and floating point registers)
+//    so they can be restored later by the debugger.
+// 7. Set the PC to debugCallV2 and resume execution.
+//
+// If the goroutine is in state _Grunnable, then it's not generally
+// safe to inject a call because it may return out via other runtime
+// operations. Instead, the debugger should unwind the stack to find
+// the return to non-runtime code, add a temporary breakpoint there,
+// and inject the call once that breakpoint is hit.
+//
+// If the goroutine is in any other state, it's not safe to inject a call.
+//
+// This function communicates back to the debugger by setting R20 and
+// invoking TW to raise a breakpoint signal. Note that the signal PC of
+// the signal triggered by the TW instruction is the PC where the signal
+// is trapped, not the next PC, so to resume execution, the debugger needs
+// to set the signal PC to PC+4. See the comments in the implementation for
+// the protocol the debugger is expected to follow. InjectDebugCall in the
+// runtime tests demonstrates this protocol.
+// The debugger must ensure that any pointers passed to the function
+// obey escape analysis requirements. Specifically, it must not pass
+// a stack pointer to an escaping argument. debugCallV2 cannot check
+// this invariant.
+//
+// This is ABIInternal because Go code injects its PC directly into new
+// goroutine stacks.
+#ifdef GOARCH_ppc64le
+TEXT runtime·debugCallV2<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
+	// save scratch register R31 first
+	MOVD	R31, -184(R1)
+	MOVD	0(R1), R31
+	// save caller LR
+	MOVD	R31, -304(R1)
+	MOVD	-32(R1), R31
+	// save argument frame size
+	MOVD	R31, -192(R1)
+	MOVD	LR, R31
+	MOVD	R31, -320(R1)
+	ADD	$-320, R1
+	// save all registers that can contain pointers
+	// and the CR register
+	MOVW	CR, R31
+	MOVD	R31, 8(R1)
+	MOVD	R2, 24(R1)
+	MOVD	R3, 56(R1)
+	MOVD	R4, 64(R1)
+	MOVD	R5, 72(R1)
+	MOVD	R6, 80(R1)
+	MOVD	R7, 88(R1)
+	MOVD	R8, 96(R1)
+	MOVD	R9, 104(R1)
+	MOVD	R10, 112(R1)
+	MOVD	R11, 120(R1)
+	MOVD	R12, 144(R1)
+	MOVD	R13, 152(R1)
+	MOVD	R14, 160(R1)
+	MOVD	R15, 168(R1)
+	MOVD	R16, 176(R1)
+	MOVD	R17, 184(R1)
+	MOVD	R18, 192(R1)
+	MOVD	R19, 200(R1)
+	MOVD	R20, 208(R1)
+	MOVD	R21, 216(R1)
+	MOVD	R22, 224(R1)
+	MOVD	R23, 232(R1)
+	MOVD	R24, 240(R1)
+	MOVD	R25, 248(R1)
+	MOVD	R26, 256(R1)
+	MOVD	R27, 264(R1)
+	MOVD	R28, 272(R1)
+	MOVD	R29, 280(R1)
+	MOVD	g, 288(R1)
+	MOVD	LR, R31
+	MOVD	R31, 32(R1)
+	CALL	runtime·debugCallCheck(SB)
+	MOVD	40(R1), R22
+	XOR	R0, R0
+	CMP	R22, R0
+	BEQ	good
+	MOVD	48(R1), R22
+	MOVD	$8, R20
+	TW	$31, R0, R0
+
+	BR	restore
+
+good:
+#define DEBUG_CALL_DISPATCH(NAME,MAXSIZE)	\
+	MOVD	$MAXSIZE, R23;			\
+	CMP	R26, R23;			\
+	BGT	5(PC);				\
+	MOVD	$NAME(SB), R26;			\
+	MOVD	R26, 32(R1);			\
+	CALL	runtime·debugCallWrap(SB);	\
+	BR	restore
+
+	// the argument frame size
+	MOVD	128(R1), R26
+
+	DEBUG_CALL_DISPATCH(debugCall32<>, 32)
+	DEBUG_CALL_DISPATCH(debugCall64<>, 64)
+	DEBUG_CALL_DISPATCH(debugCall128<>, 128)
+	DEBUG_CALL_DISPATCH(debugCall256<>, 256)
+	DEBUG_CALL_DISPATCH(debugCall512<>, 512)
+	DEBUG_CALL_DISPATCH(debugCall1024<>, 1024)
+	DEBUG_CALL_DISPATCH(debugCall2048<>, 2048)
+	DEBUG_CALL_DISPATCH(debugCall4096<>, 4096)
+	DEBUG_CALL_DISPATCH(debugCall8192<>, 8192)
+	DEBUG_CALL_DISPATCH(debugCall16384<>, 16384)
+	DEBUG_CALL_DISPATCH(debugCall32768<>, 32768)
+	DEBUG_CALL_DISPATCH(debugCall65536<>, 65536)
+	// The frame size is too large. Report the error.
+	MOVD	$debugCallFrameTooLarge<>(SB), R22
+	MOVD	R22, 32(R1)
+	MOVD	$20, R22
+	// length of debugCallFrameTooLarge string
+	MOVD	R22, 40(R1)
+	MOVD	$8, R20
+	TW	$31, R0, R0
+	BR	restore
+restore:
+	MOVD	$16, R20
+	TW	$31, R0, R0
+	// restore all registers that can contain
+	// pointers including CR
+	MOVD	8(R1), R31
+	MOVW	R31, CR
+	MOVD	24(R1), R2
+	MOVD	56(R1), R3
+	MOVD	64(R1), R4
+	MOVD	72(R1), R5
+	MOVD	80(R1), R6
+	MOVD	88(R1), R7
+	MOVD	96(R1), R8
+	MOVD	104(R1), R9
+	MOVD	112(R1), R10
+	MOVD	120(R1), R11
+	MOVD	144(R1), R12
+	MOVD	152(R1), R13
+	MOVD	160(R1), R14
+	MOVD	168(R1), R15
+	MOVD	176(R1), R16
+	MOVD	184(R1), R17
+	MOVD	192(R1), R18
+	MOVD	200(R1), R19
+	MOVD	208(R1), R20
+	MOVD	216(R1), R21
+	MOVD	224(R1), R22
+	MOVD	232(R1), R23
+	MOVD	240(R1), R24
+	MOVD	248(R1), R25
+	MOVD	256(R1), R26
+	MOVD	264(R1), R27
+	MOVD	272(R1), R28
+	MOVD	280(R1), R29
+	MOVD	288(R1), g
+	MOVD	16(R1), R31
+	// restore old LR
+	MOVD	R31, LR
+	// restore caller PC
+	MOVD	0(R1), CTR
+	MOVD	136(R1), R31
+	// Add 32 bytes more to compensate for SP change in saveSigContext
+	ADD	$352, R1
+	JMP	(CTR)
+#endif
+#define DEBUG_CALL_FN(NAME,MAXSIZE)	\
+TEXT NAME(SB),WRAPPER,$MAXSIZE-0;	\
+	NO_LOCAL_POINTERS;		\
+	MOVD	$0, R20;		\
+	TW	$31, R0, R0		\
+	MOVD	$1, R20;		\
+	TW	$31, R0, R0		\
+	RET
+DEBUG_CALL_FN(debugCall32<>, 32)
+DEBUG_CALL_FN(debugCall64<>, 64)
+DEBUG_CALL_FN(debugCall128<>, 128)
+DEBUG_CALL_FN(debugCall256<>, 256)
+DEBUG_CALL_FN(debugCall512<>, 512)
+DEBUG_CALL_FN(debugCall1024<>, 1024)
+DEBUG_CALL_FN(debugCall2048<>, 2048)
+DEBUG_CALL_FN(debugCall4096<>, 4096)
+DEBUG_CALL_FN(debugCall8192<>, 8192)
+DEBUG_CALL_FN(debugCall16384<>, 16384)
+DEBUG_CALL_FN(debugCall32768<>, 32768)
+DEBUG_CALL_FN(debugCall65536<>, 65536)
+
+#ifdef GOARCH_ppc64le
+// func debugCallPanicked(val interface{})
+TEXT runtime·debugCallPanicked(SB),NOSPLIT,$32-16
+	// Copy the panic value to the top of stack at SP+32.
+	MOVD	val_type+0(FP), R31
+	MOVD	R31, 32(R1)
+	MOVD	val_data+8(FP), R31
+	MOVD	R31, 40(R1)
+	MOVD	$2, R20
+	TW	$31, R0, R0
+	RET
+#endif
 // Note: these functions use a special calling convention to save generated code space.
 // Arguments are passed in registers, but the space for those arguments are allocated
 // in the caller's stack frame. These stubs write the args into that stack space and
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index eb53cbb..491635b 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -9,7 +9,7 @@
 // func rt0_go()
 TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
 	// X2 = stack; A0 = argc; A1 = argv
-	ADD	$-24, X2
+	SUB	$24, X2
 	MOV	A0, 8(X2)	// argc
 	MOV	A1, 16(X2)	// argv
 
@@ -57,7 +57,7 @@
 
 	// create a new goroutine to start program
 	MOV	$runtime·mainPC(SB), T0		// entry
-	ADD	$-16, X2
+	SUB	$16, X2
 	MOV	T0, 8(X2)
 	MOV	ZERO, 0(X2)
 	CALL	runtime·newproc(SB)
@@ -148,10 +148,29 @@
 	ADD	$8, X2
 	JMP	(T1)
 
-TEXT runtime·getcallerpc(SB),NOSPLIT|NOFRAME,$0-8
-	MOV	0(X2), T0		// LR saved by caller
-	MOV	T0, ret+0(FP)
-	RET
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0<ABIInternal>(SB), NOSPLIT, $0-8
+	MOV	X10, CTXT			// context register
+	MOV	g_m(g), X11			// curm
+
+	// set g to gcrash
+	MOV	$runtime·gcrash(SB), g	// g = &gcrash
+	CALL	runtime·save_g(SB)	// clobbers X31
+	MOV	X11, g_m(g)			// g.m = curm
+	MOV	g, m_g0(X11)			// curm.g0 = g
+
+	// switch to crashstack
+	MOV	(g_stack+stack_hi)(g), X11
+	SUB	$(4*8), X11
+	MOV	X11, X2
+
+	// call target function
+	MOV	0(CTXT), X10
+	JALR	X1, X10
+
+	// should never return
+	CALL	runtime·abort(SB)
+	UNDEF
 
 /*
  * support for morestack
@@ -168,6 +187,13 @@
 
 // func morestack()
 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
+	// Called from f.
+	// Set g->sched to context in f.
+	MOV	X2, (g_sched+gobuf_sp)(g)
+	MOV	T0, (g_sched+gobuf_pc)(g)
+	MOV	RA, (g_sched+gobuf_lr)(g)
+	MOV	CTXT, (g_sched+gobuf_ctxt)(g)
+
 	// Cannot grow scheduler stack (m->g0).
 	MOV	g_m(g), A0
 	MOV	m_g0(A0), A1
@@ -182,13 +208,6 @@
 	CALL	runtime·abort(SB)
 
 	// Called from f.
-	// Set g->sched to context in f.
-	MOV	X2, (g_sched+gobuf_sp)(g)
-	MOV	T0, (g_sched+gobuf_pc)(g)
-	MOV	RA, (g_sched+gobuf_lr)(g)
-	MOV	CTXT, (g_sched+gobuf_ctxt)(g)
-
-	// Called from f.
 	// Set m->morebuf to f's caller.
 	MOV	RA, (m_morebuf+gobuf_pc)(A0)	// f's caller's PC
 	MOV	X2, (m_morebuf+gobuf_sp)(A0)	// f's caller's SP
@@ -200,7 +219,7 @@
 	MOV	(g_sched+gobuf_sp)(g), X2
 	// Create a stack frame on g0 to call newstack.
 	MOV	ZERO, -8(X2)	// Zero saved LR in frame
-	ADD	$-8, X2
+	SUB	$8, X2
 	CALL	runtime·newstack(SB)
 
 	// Not reached, but make sure the return PC from the call to newstack
@@ -285,7 +304,7 @@
 	MOV	0(CTXT), T1			// code pointer
 	MOV	(g_sched+gobuf_sp)(g), X2	// sp = m->g0->sched.sp
 	// we don't need special macro for regabi since arg0(X10) = g
-	ADD	$-16, X2
+	SUB	$16, X2
 	MOV	X10, 8(X2)			// setup g
 	MOV	ZERO, 0(X2)			// clear return address
 	JALR	RA, T1
@@ -309,6 +328,15 @@
 	CALL	runtime·abort(SB)
 	RET
 
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
+	MOV	fn+0(FP), X5
+	MOV	arg+8(FP), X10
+	JALR	RA, (X5)
+	RET
+
 // func asmcgocall(fn, arg unsafe.Pointer) int32
 // Call fn(arg) on the scheduler stack,
 // aligned appropriately for the gcc ABI.
@@ -338,7 +366,7 @@
 	// Now on a scheduling stack (a pthread-created stack).
 g0:
 	// Save room for two of our pointers.
-	ADD	$-16, X2
+	SUB	$16, X2
 	MOV	X9, 0(X2)	// save old g on stack
 	MOV	(g_stack+stack_hi)(X9), X9
 	SUB	X8, X9, X8
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index a7f414e..a8e1424 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -142,6 +142,7 @@
 
 	// argc/argv are already prepared on stack
 	BL	runtime·args(SB)
+	BL	runtime·checkS390xCPU(SB)
 	BL	runtime·osinit(SB)
 	BL	runtime·schedinit(SB)
 
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index 9cd8b5a..b44a4f7 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -140,6 +140,7 @@
 	I64Ne
 	If
 		CALLNORESUME runtime·badsystemstack(SB)
+		CALLNORESUME runtime·abort(SB)
 	End
 
 	// switch:
@@ -181,6 +182,9 @@
 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
 	RET
 
+TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
+	UNDEF
+
 // AES hashing not implemented for wasm
 TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
 	JMP	runtime·memhashFallback(SB)
@@ -208,6 +212,33 @@
 TEXT runtime·breakpoint(SB), NOSPLIT, $0-0
 	UNDEF
 
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8
+	MOVD fn+0(FP), CTXT	// context register
+	MOVD	g_m(g), R2	// curm
+
+	// set g to gcrash
+	MOVD	$runtime·gcrash(SB), g	// g = &gcrash
+	MOVD	R2, g_m(g)	// g.m = curm
+	MOVD	g, m_g0(R2)	// curm.g0 = g
+
+	// switch to crashstack
+	I64Load (g_stack+stack_hi)(g)
+	I64Const $(-4*8)
+	I64Add
+	I32WrapI64
+	Set SP
+
+	// call target function
+	Get CTXT
+	I32WrapI64
+	I64Load $0
+	CALL
+
+	// should never return
+	CALL	runtime·abort(SB)
+	UNDEF
+
 // Called during function prolog when more stack is needed.
 //
 // The traceback routines see morestack on a g0 as being
@@ -221,12 +252,19 @@
 	// R2 = g0
 	MOVD m_g0(R1), R2
 
+	// Set g->sched to context in f.
+	NOP	SP	// tell vet SP changed - stop checking offsets
+	MOVD 0(SP), g_sched+gobuf_pc(g)
+	MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP
+	MOVD CTXT, g_sched+gobuf_ctxt(g)
+
 	// Cannot grow scheduler stack (m->g0).
 	Get g
-	Get R1
+	Get R2
 	I64Eq
 	If
 		CALLNORESUME runtime·badmorestackg0(SB)
+		CALLNORESUME runtime·abort(SB)
 	End
 
 	// Cannot grow signal stack (m->gsignal).
@@ -235,20 +273,15 @@
 	I64Eq
 	If
 		CALLNORESUME runtime·badmorestackgsignal(SB)
+		CALLNORESUME runtime·abort(SB)
 	End
 
 	// Called from f.
 	// Set m->morebuf to f's caller.
-	NOP	SP	// tell vet SP changed - stop checking offsets
 	MOVD 8(SP), m_morebuf+gobuf_pc(R1)
 	MOVD $16(SP), m_morebuf+gobuf_sp(R1) // f's caller's SP
 	MOVD g, m_morebuf+gobuf_g(R1)
 
-	// Set g->sched to context in f.
-	MOVD 0(SP), g_sched+gobuf_pc(g)
-	MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP
-	MOVD CTXT, g_sched+gobuf_ctxt(g)
-
 	// Call newstack on m->g0's stack.
 	MOVD R2, g
 	MOVD g_sched+gobuf_sp(R2), SP
diff --git a/src/runtime/callers_test.go b/src/runtime/callers_test.go
index d316ee9..49a1d5a 100644
--- a/src/runtime/callers_test.go
+++ b/src/runtime/callers_test.go
@@ -455,70 +455,35 @@
 	if !runtime.FramePointerEnabled {
 		t.Skip("frame pointers not supported for this architecture")
 	}
-	func() {
-		// Make sure that frame pointer unwinding succeeds from a deferred
-		// function run after recovering from a panic. It can fail if the
-		// recovery does not properly restore the caller's frame pointer before
-		// running the remaining deferred functions.
-		//
-		// Wrap this all in an extra function since the unwinding is most likely
-		// to fail trying to unwind *after* the frame we're currently in (since
-		// *that* bp will fail to be restored). Below we'll try to induce a crash,
-		// but if for some reason we can't, let's make sure the stack trace looks
-		// right.
-		want := []string{
-			"runtime_test.TestFPUnwindAfterRecovery.func1.1",
-			"runtime_test.TestFPUnwindAfterRecovery.func1",
-			"runtime_test.TestFPUnwindAfterRecovery",
+	// Make sure that frame pointer unwinding succeeds from a deferred
+	// function run after recovering from a panic. It can fail if the
+	// recovery does not properly restore the caller's frame pointer before
+	// running the remaining deferred functions.
+	//
+	// This test does not verify the accuracy of the call stack (it
+	// currently includes a frame from runtime.deferreturn which would
+	// normally be omitted). It is only intended to check that producing the
+	// call stack won't crash.
+	defer func() {
+		pcs := make([]uintptr, 32)
+		for i := range pcs {
+			// If runtime.recovery doesn't properly restore the
+			// frame pointer before returning control to this
+			// function, it will point somewhere lower in the stack
+			// from one of the frames of runtime.gopanic() or one of
+			// it's callees prior to recovery.  So, we put some
+			// non-zero values on the stack to ensure that frame
+			// pointer unwinding will crash if it sees the old,
+			// invalid frame pointer.
+			pcs[i] = 10
 		}
-		defer func() {
-			pcs := make([]uintptr, 32)
-			for i := range pcs {
-				// If runtime.recovery doesn't properly restore the
-				// frame pointer before returning control to this
-				// function, it will point somewhere lower in the stack
-				// from one of the frames of runtime.gopanic() or one of
-				// it's callees prior to recovery.  So, we put some
-				// non-zero values on the stack to try and get frame
-				// pointer unwinding to crash if it sees the old,
-				// invalid frame pointer.
-				pcs[i] = 10
-			}
-			runtime.FPCallers(pcs)
-			// If it didn't crash, let's symbolize. Something is going
-			// to look wrong if the bp restoration just happened to
-			// reference a valid frame. Look for
-			var got []string
-			frames := runtime.CallersFrames(pcs)
-			for {
-				frame, more := frames.Next()
-				if !more {
-					break
-				}
-				got = append(got, frame.Function)
-			}
-			// Check that we see the frames in want and in that order.
-			// This is a bit roundabout because FPCallers doesn't do
-			// filtering of runtime internals like Callers.
-			i := 0
-			for _, f := range got {
-				if f != want[i] {
-					continue
-				}
-				i++
-				if i == len(want) {
-					break
-				}
-			}
-			if i != len(want) {
-				t.Fatalf("bad unwind: got %v, want %v in that order", got, want)
-			}
-		}()
-		defer func() {
-			if recover() == nil {
-				t.Fatal("did not recover from panic")
-			}
-		}()
-		panic(1)
+		runtime.FPCallers(pcs)
+		t.Logf("%v", pcs)
 	}()
+	defer func() {
+		if recover() == nil {
+			t.Fatal("did not recover from panic")
+		}
+	}()
+	panic(1)
 }
diff --git a/src/runtime/cgo.go b/src/runtime/cgo.go
index 3953035..40c8c74 100644
--- a/src/runtime/cgo.go
+++ b/src/runtime/cgo.go
@@ -61,3 +61,11 @@
 var cgoAlwaysFalse bool
 
 var cgo_yield = &_cgo_yield
+
+func cgoNoCallback(v bool) {
+	g := getg()
+	if g.nocgocallback && v {
+		panic("runtime: unexpected setting cgoNoCallback")
+	}
+	g.nocgocallback = v
+}
diff --git a/src/runtime/cgo/asm_amd64.s b/src/runtime/cgo/asm_amd64.s
index e319094..6bf1363 100644
--- a/src/runtime/cgo/asm_amd64.s
+++ b/src/runtime/cgo/asm_amd64.s
@@ -23,7 +23,7 @@
 // Saves C callee-saved registers and calls cgocallback with three arguments.
 // fn is the PC of a func(a unsafe.Pointer) function.
 // This signature is known to SWIG, so we can't change it.
-TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0-0
+TEXT crosscall2(SB),NOSPLIT,$0-0
 	PUSH_REGS_HOST_TO_ABI0()
 
 	// Make room for arguments to cgocallback.
diff --git a/src/runtime/cgo/asm_arm.s b/src/runtime/cgo/asm_arm.s
index 095e9c0..425899e 100644
--- a/src/runtime/cgo/asm_arm.s
+++ b/src/runtime/cgo/asm_arm.s
@@ -32,10 +32,10 @@
 	// starting at 4(R13).
 	MOVW.W	R14, -4(R13)
 
-	// Skip floating point registers on GOARM < 6.
-	MOVB    runtime·goarm(SB), R11
-	CMP $6, R11
-	BLT skipfpsave
+	// Skip floating point registers if goarmsoftfp!=0.
+	MOVB    runtime·goarmsoftfp(SB), R11
+	CMP     $0, R11
+	BNE     skipfpsave
 	MOVD	F8, (13*4+8*1)(R13)
 	MOVD	F9, (13*4+8*2)(R13)
 	MOVD	F10, (13*4+8*3)(R13)
@@ -50,9 +50,9 @@
 	// We set up the arguments to cgocallback when saving registers above.
 	BL	runtime·cgocallback(SB)
 
-	MOVB    runtime·goarm(SB), R11
-	CMP $6, R11
-	BLT skipfprest
+	MOVB    runtime·goarmsoftfp(SB), R11
+	CMP     $0, R11
+	BNE     skipfprest
 	MOVD	(13*4+8*1)(R13), F8
 	MOVD	(13*4+8*2)(R13), F9
 	MOVD	(13*4+8*3)(R13), F10
diff --git a/src/runtime/cgo/gcc_386.S b/src/runtime/cgo/gcc_386.S
index 5bd677f..d4c5934 100644
--- a/src/runtime/cgo/gcc_386.S
+++ b/src/runtime/cgo/gcc_386.S
@@ -14,20 +14,26 @@
 #endif
 
 /*
- * void crosscall_386(void (*fn)(void))
+ * void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g)
  *
- * Calling into the 8c tool chain, where all registers are caller save.
+ * Calling into the gc tool chain, where all registers are caller save.
  * Called from standard x86 ABI, where %ebp, %ebx, %esi,
  * and %edi are callee-save, so they must be saved explicitly.
  */
-.globl EXT(crosscall_386)
-EXT(crosscall_386):
+.globl EXT(crosscall1)
+EXT(crosscall1):
 	pushl %ebp
 	movl %esp, %ebp
 	pushl %ebx
 	pushl %esi
 	pushl %edi
 
+	movl 16(%ebp), %eax	/* g */
+	pushl %eax
+	movl 12(%ebp), %eax	/* setg_gcc */
+	call *%eax
+	popl %eax
+
 	movl 8(%ebp), %eax	/* fn */
 	call *%eax
 
diff --git a/src/runtime/cgo/gcc_amd64.S b/src/runtime/cgo/gcc_amd64.S
index 5a1629e..3ba793a 100644
--- a/src/runtime/cgo/gcc_amd64.S
+++ b/src/runtime/cgo/gcc_amd64.S
@@ -14,16 +14,16 @@
 #endif
 
 /*
- * void crosscall_amd64(void (*fn)(void), void (*setg_gcc)(void*), void *g)
+ * void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g)
  *
- * Calling into the 6c tool chain, where all registers are caller save.
+ * Calling into the gc tool chain, where all registers are caller save.
  * Called from standard x86-64 ABI, where %rbx, %rbp, %r12-%r15
  * are callee-save so they must be saved explicitly.
  * The standard x86-64 ABI passes the three arguments m, g, fn
  * in %rdi, %rsi, %rdx.
  */
-.globl EXT(crosscall_amd64)
-EXT(crosscall_amd64):
+.globl EXT(crosscall1)
+EXT(crosscall1):
 	pushq %rbx
 	pushq %rbp
 	pushq %r12
diff --git a/src/runtime/cgo/gcc_arm.S b/src/runtime/cgo/gcc_arm.S
index 474fc23..3df8143 100644
--- a/src/runtime/cgo/gcc_arm.S
+++ b/src/runtime/cgo/gcc_arm.S
@@ -5,14 +5,14 @@
 .file "gcc_arm.S"
 
 /*
- * void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void *g), void *g)
+ * void crosscall1(void (*fn)(void), void (*setg_gcc)(void *g), void *g)
  *
- * Calling into the 5c tool chain, where all registers are caller save.
+ * Calling into the gc tool chain, where all registers are caller save.
  * Called from standard ARM EABI, where r4-r11 are callee-save, so they
  * must be saved explicitly.
  */
-.globl crosscall_arm1
-crosscall_arm1:
+.globl crosscall1
+crosscall1:
 	push {r4, r5, r6, r7, r8, r9, r10, r11, ip, lr}
 	mov r4, r0
 	mov r5, r1
diff --git a/src/runtime/cgo/gcc_darwin_amd64.c b/src/runtime/cgo/gcc_darwin_amd64.c
index 955b81d..5b5e369 100644
--- a/src/runtime/cgo/gcc_darwin_amd64.c
+++ b/src/runtime/cgo/gcc_darwin_amd64.c
@@ -14,12 +14,8 @@
 void
 x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
 {
-	size_t size;
-
 	setg_gcc = setg;
-
-	size = pthread_get_stacksize_np(pthread_self());
-	g->stacklo = (uintptr)&size - size + 4096;
+	_cgo_set_stacklo(g, NULL);
 }
 
 
@@ -50,6 +46,7 @@
 	}
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void*
 threadentry(void *v)
 {
@@ -58,6 +55,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_darwin_arm64.c b/src/runtime/cgo/gcc_darwin_arm64.c
index 5b77a42..f1344de 100644
--- a/src/runtime/cgo/gcc_darwin_arm64.c
+++ b/src/runtime/cgo/gcc_darwin_arm64.c
@@ -127,12 +127,9 @@
 void
 x_cgo_init(G *g, void (*setg)(void*))
 {
-	size_t size;
-
 	//fprintf(stderr, "x_cgo_init = %p\n", &x_cgo_init); // aid debugging in presence of ASLR
 	setg_gcc = setg;
-	size = pthread_get_stacksize_np(pthread_self());
-	g->stacklo = (uintptr)&size - size + 4096;
+	_cgo_set_stacklo(g, NULL);
 
 #if TARGET_OS_IPHONE
 	darwin_arm_init_mach_exception_handler();
diff --git a/src/runtime/cgo/gcc_dragonfly_amd64.c b/src/runtime/cgo/gcc_dragonfly_amd64.c
index 0003414..009d4b4 100644
--- a/src/runtime/cgo/gcc_dragonfly_amd64.c
+++ b/src/runtime/cgo/gcc_dragonfly_amd64.c
@@ -16,14 +16,8 @@
 void
 x_cgo_init(G *g, void (*setg)(void*))
 {
-	pthread_attr_t attr;
-	size_t size;
-
 	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
+	_cgo_set_stacklo(g, NULL);
 }
 
 void
@@ -48,11 +42,11 @@
 	pthread_sigmask(SIG_SETMASK, &oset, nil);
 
 	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
+		fatalf("pthread_create failed: %s", strerror(err));
 	}
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void*
 threadentry(void *v)
 {
@@ -61,6 +55,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_fatalf.c b/src/runtime/cgo/gcc_fatalf.c
index 9493dbb..822c015 100644
--- a/src/runtime/cgo/gcc_fatalf.c
+++ b/src/runtime/cgo/gcc_fatalf.c
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || (!android && linux) || freebsd
+//go:build aix || (!android && linux) || dragonfly || freebsd || netbsd || openbsd || solaris
 
 #include <stdarg.h>
 #include <stdio.h>
diff --git a/src/runtime/cgo/gcc_freebsd.c b/src/runtime/cgo/gcc_freebsd.c
new file mode 100644
index 0000000..a941211
--- /dev/null
+++ b/src/runtime/cgo/gcc_freebsd.c
@@ -0,0 +1,71 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd && (386 || arm || arm64 || riscv64)
+
+#include <sys/types.h>
+#include <sys/signalvar.h>
+#include <machine/sysarch.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include "libcgo.h"
+#include "libcgo_unix.h"
+
+#ifdef ARM_TP_ADDRESS
+// ARM_TP_ADDRESS is (ARM_VECTORS_HIGH + 0x1000) or 0xffff1000
+// and is known to runtime.read_tls_fallback. Verify it with
+// cpp.
+#if ARM_TP_ADDRESS != 0xffff1000
+#error Wrong ARM_TP_ADDRESS!
+#endif
+#endif
+
+static void* threadentry(void*);
+static void (*setg_gcc)(void*);
+
+void
+x_cgo_init(G *g, void (*setg)(void*))
+{
+	setg_gcc = setg;
+	_cgo_set_stacklo(g, NULL);
+}
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+	pthread_attr_t attr;
+	sigset_t ign, oset;
+	pthread_t p;
+	size_t size;
+	int err;
+
+	SIGFILLSET(ign);
+	pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+	pthread_attr_init(&attr);
+	pthread_attr_getstacksize(&attr, &size);
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts->g->stackhi = size;
+	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+	if (err != 0) {
+		fatalf("pthread_create failed: %s", strerror(err));
+	}
+}
+
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
+static void*
+threadentry(void *v)
+{
+	ThreadStart ts;
+
+	ts = *(ThreadStart*)v;
+	free(v);
+
+	crosscall1(ts.fn, setg_gcc, ts.g);
+	return nil;
+}
diff --git a/src/runtime/cgo/gcc_freebsd_386.c b/src/runtime/cgo/gcc_freebsd_386.c
deleted file mode 100644
index 9097a2a..0000000
--- a/src/runtime/cgo/gcc_freebsd_386.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <sys/signalvar.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	SIGFILLSET(ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_386(ts.fn);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_freebsd_amd64.c b/src/runtime/cgo/gcc_freebsd_amd64.c
index 6071ec3..31905f2 100644
--- a/src/runtime/cgo/gcc_freebsd_amd64.c
+++ b/src/runtime/cgo/gcc_freebsd_amd64.c
@@ -17,21 +17,17 @@
 void
 x_cgo_init(G *g, void (*setg)(void*))
 {
-	pthread_attr_t *attr;
-	size_t size;
+	uintptr *pbounds;
 
 	// Deal with memory sanitizer/clang interaction.
 	// See gcc_linux_amd64.c for details.
 	setg_gcc = setg;
-	attr = (pthread_attr_t*)malloc(sizeof *attr);
-	if (attr == NULL) {
+	pbounds = (uintptr*)malloc(2 * sizeof(uintptr));
+	if (pbounds == NULL) {
 		fatalf("malloc failed: %s", strerror(errno));
 	}
-	pthread_attr_init(attr);
-	pthread_attr_getstacksize(attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(attr);
-	free(attr);
+	_cgo_set_stacklo(g, pbounds);
+	free(pbounds);
 }
 
 void
@@ -59,6 +55,7 @@
 	}
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void*
 threadentry(void *v)
 {
@@ -69,6 +66,6 @@
 	free(v);
 	_cgo_tsan_release();
 
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_freebsd_arm.c b/src/runtime/cgo/gcc_freebsd_arm.c
deleted file mode 100644
index 5f89978..0000000
--- a/src/runtime/cgo/gcc_freebsd_arm.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <machine/sysarch.h>
-#include <sys/signalvar.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-#ifdef ARM_TP_ADDRESS
-// ARM_TP_ADDRESS is (ARM_VECTORS_HIGH + 0x1000) or 0xffff1000
-// and is known to runtime.read_tls_fallback. Verify it with
-// cpp.
-#if ARM_TP_ADDRESS != 0xffff1000
-#error Wrong ARM_TP_ADDRESS!
-#endif
-#endif
-
-static void *threadentry(void*);
-
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	SIGFILLSET(ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_freebsd_arm64.c b/src/runtime/cgo/gcc_freebsd_arm64.c
deleted file mode 100644
index dd8f888..0000000
--- a/src/runtime/cgo/gcc_freebsd_arm64.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <errno.h>
-#include <sys/signalvar.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	SIGFILLSET(ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_freebsd_riscv64.c b/src/runtime/cgo/gcc_freebsd_riscv64.c
deleted file mode 100644
index 6ce5e65..0000000
--- a/src/runtime/cgo/gcc_freebsd_riscv64.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <errno.h>
-#include <sys/signalvar.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	SIGFILLSET(ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_libinit.c b/src/runtime/cgo/gcc_libinit.c
index 9676593..68f4a02 100644
--- a/src/runtime/cgo/gcc_libinit.c
+++ b/src/runtime/cgo/gcc_libinit.c
@@ -41,30 +41,37 @@
 uintptr_t
 _cgo_wait_runtime_init_done(void) {
 	void (*pfn)(struct context_arg*);
+	pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
 
-	pthread_mutex_lock(&runtime_init_mu);
-	while (runtime_init_done == 0) {
-		pthread_cond_wait(&runtime_init_cond, &runtime_init_mu);
+	int done = 2;
+	if (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) != done) {
+		pthread_mutex_lock(&runtime_init_mu);
+		while (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) == 0) {
+			pthread_cond_wait(&runtime_init_cond, &runtime_init_mu);
+		}
+
+		// The key and x_cgo_pthread_key_created are for the whole program,
+		// whereas the specific and destructor is per thread.
+		if (x_cgo_pthread_key_created == 0 && pthread_key_create(&pthread_g, pthread_key_destructor) == 0) {
+			x_cgo_pthread_key_created = 1;
+		}
+
+
+		// TODO(iant): For the case of a new C thread calling into Go, such
+		// as when using -buildmode=c-archive, we know that Go runtime
+		// initialization is complete but we do not know that all Go init
+		// functions have been run. We should not fetch cgo_context_function
+		// until they have been, because that is where a call to
+		// SetCgoTraceback is likely to occur. We are going to wait for Go
+		// initialization to be complete anyhow, later, by waiting for
+		// main_init_done to be closed in cgocallbackg1. We should wait here
+		// instead. See also issue #15943.
+		pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
+
+		__atomic_store_n(&runtime_init_done, done, __ATOMIC_RELEASE);
+		pthread_mutex_unlock(&runtime_init_mu);
 	}
 
-	// The key and x_cgo_pthread_key_created are for the whole program,
-	// whereas the specific and destructor is per thread.
-	if (x_cgo_pthread_key_created == 0 && pthread_key_create(&pthread_g, pthread_key_destructor) == 0) {
-		x_cgo_pthread_key_created = 1;
-	}
-
-	// TODO(iant): For the case of a new C thread calling into Go, such
-	// as when using -buildmode=c-archive, we know that Go runtime
-	// initialization is complete but we do not know that all Go init
-	// functions have been run. We should not fetch cgo_context_function
-	// until they have been, because that is where a call to
-	// SetCgoTraceback is likely to occur. We are going to wait for Go
-	// initialization to be complete anyhow, later, by waiting for
-	// main_init_done to be closed in cgocallbackg1. We should wait here
-	// instead. See also issue #15943.
-	pfn = cgo_context_function;
-
-	pthread_mutex_unlock(&runtime_init_mu);
 	if (pfn != nil) {
 		struct context_arg arg;
 
@@ -75,6 +82,30 @@
 	return 0;
 }
 
+// _cgo_set_stacklo sets g->stacklo based on the stack size.
+// This is common code called from x_cgo_init, which is itself
+// called by rt0_go in the runtime package.
+void _cgo_set_stacklo(G *g, uintptr *pbounds)
+{
+	uintptr bounds[2];
+
+	// pbounds can be passed in by the caller; see gcc_linux_amd64.c.
+	if (pbounds == NULL) {
+		pbounds = &bounds[0];
+	}
+
+	x_cgo_getstackbound(pbounds);
+
+	g->stacklo = *pbounds;
+
+	// Sanity check the results now, rather than getting a
+	// morestack on g0 crash.
+	if (g->stacklo >= g->stackhi) {
+		fprintf(stderr, "runtime/cgo: bad stack bounds: lo=%p hi=%p\n", (void*)(g->stacklo), (void*)(g->stackhi));
+		abort();
+	}
+}
+
 // Store the g into a thread-specific value associated with the pthread key pthread_g.
 // And pthread_key_destructor will dropm when the thread is exiting.
 void x_cgo_bindm(void* g) {
@@ -88,7 +119,7 @@
 void
 x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) {
 	pthread_mutex_lock(&runtime_init_mu);
-	runtime_init_done = 1;
+	__atomic_store_n(&runtime_init_done, 1, __ATOMIC_RELEASE);
 	pthread_cond_broadcast(&runtime_init_cond);
 	pthread_mutex_unlock(&runtime_init_mu);
 }
@@ -96,19 +127,12 @@
 // Sets the context function to call to record the traceback context
 // when calling a Go function from C code. Called from runtime.SetCgoTraceback.
 void x_cgo_set_context_function(void (*context)(struct context_arg*)) {
-	pthread_mutex_lock(&runtime_init_mu);
-	cgo_context_function = context;
-	pthread_mutex_unlock(&runtime_init_mu);
+	__atomic_store_n(&cgo_context_function, context, __ATOMIC_RELEASE);
 }
 
 // Gets the context function.
 void (*(_cgo_get_context_function(void)))(struct context_arg*) {
-	void (*ret)(struct context_arg*);
-
-	pthread_mutex_lock(&runtime_init_mu);
-	ret = cgo_context_function;
-	pthread_mutex_unlock(&runtime_init_mu);
-	return ret;
+	return __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
 }
 
 // _cgo_try_pthread_create retries pthread_create if it fails with
diff --git a/src/runtime/cgo/gcc_linux.c b/src/runtime/cgo/gcc_linux.c
new file mode 100644
index 0000000..9624df5
--- /dev/null
+++ b/src/runtime/cgo/gcc_linux.c
@@ -0,0 +1,66 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (386 || arm || loong64 || mips || mipsle || mips64 || mips64le || riscv64)
+
+#include <pthread.h>
+#include <string.h>
+#include <signal.h>
+#include "libcgo.h"
+#include "libcgo_unix.h"
+
+static void *threadentry(void*);
+
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
+static void (*setg_gcc)(void*);
+
+void
+x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
+{
+	setg_gcc = setg;
+
+	_cgo_set_stacklo(g, NULL);
+
+	if (x_cgo_inittls) {
+		x_cgo_inittls(tlsg, tlsbase);
+	}
+}
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+	pthread_attr_t attr;
+	sigset_t ign, oset;
+	pthread_t p;
+	size_t size;
+	int err;
+
+	sigfillset(&ign);
+	pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+	pthread_attr_init(&attr);
+	pthread_attr_getstacksize(&attr, &size);
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts->g->stackhi = size;
+	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+	if (err != 0) {
+		fatalf("pthread_create failed: %s", strerror(err));
+	}
+}
+
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
+static void*
+threadentry(void *v)
+{
+	ThreadStart ts;
+
+	ts = *(ThreadStart*)v;
+	free(v);
+
+	crosscall1(ts.fn, setg_gcc, ts.g);
+	return nil;
+}
diff --git a/src/runtime/cgo/gcc_linux_386.c b/src/runtime/cgo/gcc_linux_386.c
deleted file mode 100644
index 0ce9359..0000000
--- a/src/runtime/cgo/gcc_linux_386.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <pthread.h>
-#include <string.h>
-#include <signal.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-static void (*setg_gcc)(void*);
-
-// This will be set in gcc_android.c for android-specific customization.
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-
-	if (x_cgo_inittls) {
-		x_cgo_inittls(tlsg, tlsbase);
-	}
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fatalf("pthread_create failed: %s", strerror(err));
-	}
-}
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_386(ts.fn);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_linux_amd64.c b/src/runtime/cgo/gcc_linux_amd64.c
index fb164c1..dcb596e 100644
--- a/src/runtime/cgo/gcc_linux_amd64.c
+++ b/src/runtime/cgo/gcc_linux_amd64.c
@@ -19,8 +19,7 @@
 void
 x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
 {
-	pthread_attr_t *attr;
-	size_t size;
+	uintptr *pbounds;
 
 	/* The memory sanitizer distributed with versions of clang
 	   before 3.8 has a bug: if you call mmap before malloc, mmap
@@ -38,17 +37,12 @@
 	   malloc, so we actually use the memory we allocate.  */
 
 	setg_gcc = setg;
-	attr = (pthread_attr_t*)malloc(sizeof *attr);
-	if (attr == NULL) {
+	pbounds = (uintptr*)malloc(2 * sizeof(uintptr));
+	if (pbounds == NULL) {
 		fatalf("malloc failed: %s", strerror(errno));
 	}
-	pthread_attr_init(attr);
-	pthread_attr_getstacksize(attr, &size);
-	g->stacklo = (uintptr)__builtin_frame_address(0) - size + 4096;
-	if (g->stacklo >= g->stackhi)
-		fatalf("bad stack bounds: lo=%p hi=%p\n", g->stacklo, g->stackhi);
-	pthread_attr_destroy(attr);
-	free(attr);
+	_cgo_set_stacklo(g, pbounds);
+	free(pbounds);
 
 	if (x_cgo_inittls) {
 		x_cgo_inittls(tlsg, tlsbase);
@@ -81,6 +75,7 @@
 	}
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void*
 threadentry(void *v)
 {
@@ -91,6 +86,6 @@
 	free(v);
 	_cgo_tsan_release();
 
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_linux_arm.c b/src/runtime/cgo/gcc_linux_arm.c
deleted file mode 100644
index 5e97a9e..0000000
--- a/src/runtime/cgo/gcc_linux_arm.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <pthread.h>
-#include <string.h>
-#include <signal.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
-static void (*setg_gcc)(void*);
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fatalf("pthread_create failed: %s", strerror(err));
-	}
-}
-
-extern void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-
-	if (x_cgo_inittls) {
-		x_cgo_inittls(tlsg, tlsbase);
-	}
-}
diff --git a/src/runtime/cgo/gcc_linux_arm64.c b/src/runtime/cgo/gcc_linux_arm64.c
index dac45e4..0dcff2c 100644
--- a/src/runtime/cgo/gcc_linux_arm64.c
+++ b/src/runtime/cgo/gcc_linux_arm64.c
@@ -56,8 +56,7 @@
 void
 x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
 {
-	pthread_attr_t *attr;
-	size_t size;
+	uintptr *pbounds;
 
 	/* The memory sanitizer distributed with versions of clang
 	   before 3.8 has a bug: if you call mmap before malloc, mmap
@@ -75,15 +74,12 @@
 	   malloc, so we actually use the memory we allocate.  */
 
 	setg_gcc = setg;
-	attr = (pthread_attr_t*)malloc(sizeof *attr);
-	if (attr == NULL) {
+	pbounds = (uintptr*)malloc(2 * sizeof(uintptr));
+	if (pbounds == NULL) {
 		fatalf("malloc failed: %s", strerror(errno));
 	}
-	pthread_attr_init(attr);
-	pthread_attr_getstacksize(attr, &size);
-	g->stacklo = (uintptr)&size - size + 4096;
-	pthread_attr_destroy(attr);
-	free(attr);
+	_cgo_set_stacklo(g, pbounds);
+	free(pbounds);
 
 	if (x_cgo_inittls) {
 		x_cgo_inittls(tlsg, tlsbase);
diff --git a/src/runtime/cgo/gcc_linux_loong64.c b/src/runtime/cgo/gcc_linux_loong64.c
deleted file mode 100644
index 96a06eb..0000000
--- a/src/runtime/cgo/gcc_linux_loong64.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <pthread.h>
-#include <string.h>
-#include <signal.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
-static void (*setg_gcc)(void*);
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fatalf("pthread_create failed: %s", strerror(err));
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-
-	if (x_cgo_inittls) {
-		x_cgo_inittls(tlsg, tlsbase);
-	}
-}
diff --git a/src/runtime/cgo/gcc_linux_mips64x.c b/src/runtime/cgo/gcc_linux_mips64x.c
deleted file mode 100644
index c059fd1..0000000
--- a/src/runtime/cgo/gcc_linux_mips64x.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips64 || mips64le)
-
-#include <pthread.h>
-#include <string.h>
-#include <signal.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
-static void (*setg_gcc)(void*);
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fatalf("pthread_create failed: %s", strerror(err));
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-
-	if (x_cgo_inittls) {
-		x_cgo_inittls(tlsg, tlsbase);
-	}
-}
diff --git a/src/runtime/cgo/gcc_linux_mipsx.c b/src/runtime/cgo/gcc_linux_mipsx.c
deleted file mode 100644
index 218b8fd..0000000
--- a/src/runtime/cgo/gcc_linux_mipsx.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips || mipsle)
-
-#include <pthread.h>
-#include <string.h>
-#include <signal.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
-static void (*setg_gcc)(void*);
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fatalf("pthread_create failed: %s", strerror(err));
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-
-	if (x_cgo_inittls) {
-		x_cgo_inittls(tlsg, tlsbase);
-	}
-}
diff --git a/src/runtime/cgo/gcc_linux_riscv64.c b/src/runtime/cgo/gcc_linux_riscv64.c
deleted file mode 100644
index 99c2866..0000000
--- a/src/runtime/cgo/gcc_linux_riscv64.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <pthread.h>
-#include <string.h>
-#include <signal.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
-static void (*setg_gcc)(void*);
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fatalf("pthread_create failed: %s", strerror(err));
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-
-	if (x_cgo_inittls) {
-		x_cgo_inittls(tlsg, tlsbase);
-	}
-}
diff --git a/src/runtime/cgo/gcc_linux_s390x.c b/src/runtime/cgo/gcc_linux_s390x.c
index bb60048..4b9f76c 100644
--- a/src/runtime/cgo/gcc_linux_s390x.c
+++ b/src/runtime/cgo/gcc_linux_s390x.c
@@ -16,14 +16,8 @@
 void
 x_cgo_init(G *g, void (*setg)(void*), void **tlsbase)
 {
-	pthread_attr_t attr;
-	size_t size;
-
 	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
+	_cgo_set_stacklo(g, NULL);
 }
 
 void
diff --git a/src/runtime/cgo/gcc_mmap.c b/src/runtime/cgo/gcc_mmap.c
index 1fbd5e8..eb710a0 100644
--- a/src/runtime/cgo/gcc_mmap.c
+++ b/src/runtime/cgo/gcc_mmap.c
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (linux && (amd64 || arm64 || ppc64le)) || (freebsd && amd64)
+//go:build (linux && (amd64 || arm64 || loong64 || ppc64le)) || (freebsd && amd64)
 
 #include <errno.h>
 #include <stdint.h>
diff --git a/src/runtime/cgo/gcc_netbsd.c b/src/runtime/cgo/gcc_netbsd.c
new file mode 100644
index 0000000..16819ce
--- /dev/null
+++ b/src/runtime/cgo/gcc_netbsd.c
@@ -0,0 +1,73 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build netbsd && (386 || amd64 || arm || arm64)
+
+#include <sys/types.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include "libcgo.h"
+#include "libcgo_unix.h"
+
+static void* threadentry(void*);
+static void (*setg_gcc)(void*);
+
+void
+x_cgo_init(G *g, void (*setg)(void*))
+{
+	setg_gcc = setg;
+	_cgo_set_stacklo(g, NULL);
+}
+
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+	pthread_attr_t attr;
+	sigset_t ign, oset;
+	pthread_t p;
+	size_t size;
+	int err;
+
+	sigfillset(&ign);
+	pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+	pthread_attr_init(&attr);
+	pthread_attr_getstacksize(&attr, &size);
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts->g->stackhi = size;
+	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+	if (err != 0) {
+		fatalf("pthread_create failed: %s", strerror(err));
+	}
+}
+
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
+static void*
+threadentry(void *v)
+{
+	ThreadStart ts;
+	stack_t ss;
+
+	ts = *(ThreadStart*)v;
+	free(v);
+
+	// On NetBSD, a new thread inherits the signal stack of the
+	// creating thread. That confuses minit, so we remove that
+	// signal stack here before calling the regular mstart. It's
+	// a bit baroque to remove a signal stack here only to add one
+	// in minit, but it's a simple change that keeps NetBSD
+	// working like other OS's. At this point all signals are
+	// blocked, so there is no race.
+	memset(&ss, 0, sizeof ss);
+	ss.ss_flags = SS_DISABLE;
+	sigaltstack(&ss, nil);
+
+	crosscall1(ts.fn, setg_gcc, ts.g);
+	return nil;
+}
diff --git a/src/runtime/cgo/gcc_netbsd_386.c b/src/runtime/cgo/gcc_netbsd_386.c
deleted file mode 100644
index 5495f0f..0000000
--- a/src/runtime/cgo/gcc_netbsd_386.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-	stack_t ss;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	// On NetBSD, a new thread inherits the signal stack of the
-	// creating thread. That confuses minit, so we remove that
-	// signal stack here before calling the regular mstart. It's
-	// a bit baroque to remove a signal stack here only to add one
-	// in minit, but it's a simple change that keeps NetBSD
-	// working like other OS's. At this point all signals are
-	// blocked, so there is no race.
-	memset(&ss, 0, sizeof ss);
-	ss.ss_flags = SS_DISABLE;
-	sigaltstack(&ss, nil);
-
-	crosscall_386(ts.fn);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_netbsd_amd64.c b/src/runtime/cgo/gcc_netbsd_amd64.c
deleted file mode 100644
index 9f4b031..0000000
--- a/src/runtime/cgo/gcc_netbsd_amd64.c
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-	stack_t ss;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	// On NetBSD, a new thread inherits the signal stack of the
-	// creating thread. That confuses minit, so we remove that
-	// signal stack here before calling the regular mstart. It's
-	// a bit baroque to remove a signal stack here only to add one
-	// in minit, but it's a simple change that keeps NetBSD
-	// working like other OS's. At this point all signals are
-	// blocked, so there is no race.
-	memset(&ss, 0, sizeof ss);
-	ss.ss_flags = SS_DISABLE;
-	sigaltstack(&ss, nil);
-
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_netbsd_arm.c b/src/runtime/cgo/gcc_netbsd_arm.c
deleted file mode 100644
index b0c80ea..0000000
--- a/src/runtime/cgo/gcc_netbsd_arm.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-	stack_t ss;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	// On NetBSD, a new thread inherits the signal stack of the
-	// creating thread. That confuses minit, so we remove that
-	// signal stack here before calling the regular mstart. It's
-	// a bit baroque to remove a signal stack here only to add one
-	// in minit, but it's a simple change that keeps NetBSD
-	// working like other OS's. At this point all signals are
-	// blocked, so there is no race.
-	memset(&ss, 0, sizeof ss);
-	ss.ss_flags = SS_DISABLE;
-	sigaltstack(&ss, nil);
-
-	crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_netbsd_arm64.c b/src/runtime/cgo/gcc_netbsd_arm64.c
deleted file mode 100644
index 694116c..0000000
--- a/src/runtime/cgo/gcc_netbsd_arm64.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void *threadentry(void*);
-
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-	stack_t ss;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	// On NetBSD, a new thread inherits the signal stack of the
-	// creating thread. That confuses minit, so we remove that
-	// signal stack here before calling the regular mstart. It's
-	// a bit baroque to remove a signal stack here only to add one
-	// in minit, but it's a simple change that keeps NetBSD
-	// working like other OS's. At this point all signals are
-	// blocked, so there is no race.
-	memset(&ss, 0, sizeof ss);
-	ss.ss_flags = SS_DISABLE;
-	sigaltstack(&ss, nil);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_openbsd.c b/src/runtime/cgo/gcc_openbsd.c
new file mode 100644
index 0000000..3a4e545
--- /dev/null
+++ b/src/runtime/cgo/gcc_openbsd.c
@@ -0,0 +1,61 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build openbsd && (386 || arm || amd64 || arm64 || riscv64)
+
+#include <sys/types.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include "libcgo.h"
+#include "libcgo_unix.h"
+
+static void* threadentry(void*);
+static void (*setg_gcc)(void*);
+
+void
+x_cgo_init(G *g, void (*setg)(void*))
+{
+	setg_gcc = setg;
+	_cgo_set_stacklo(g, NULL);
+}
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+	pthread_attr_t attr;
+	sigset_t ign, oset;
+	pthread_t p;
+	size_t size;
+	int err;
+
+	sigfillset(&ign);
+	pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+	pthread_attr_init(&attr);
+	pthread_attr_getstacksize(&attr, &size);
+
+	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+	ts->g->stackhi = size;
+	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
+
+	pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+	if (err != 0) {
+		fatalf("pthread_create failed: %s", strerror(err));
+	}
+}
+
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
+static void*
+threadentry(void *v)
+{
+	ThreadStart ts;
+
+	ts = *(ThreadStart*)v;
+	free(v);
+
+	crosscall1(ts.fn, setg_gcc, ts.g);
+	return nil;
+}
diff --git a/src/runtime/cgo/gcc_openbsd_386.c b/src/runtime/cgo/gcc_openbsd_386.c
deleted file mode 100644
index 127a1b6..0000000
--- a/src/runtime/cgo/gcc_openbsd_386.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_386(ts.fn);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_openbsd_amd64.c b/src/runtime/cgo/gcc_openbsd_amd64.c
deleted file mode 100644
index 09d2750..0000000
--- a/src/runtime/cgo/gcc_openbsd_amd64.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_openbsd_arm.c b/src/runtime/cgo/gcc_openbsd_arm.c
deleted file mode 100644
index 9a5757f..0000000
--- a/src/runtime/cgo/gcc_openbsd_arm.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_openbsd_arm64.c b/src/runtime/cgo/gcc_openbsd_arm64.c
deleted file mode 100644
index abf9f66..0000000
--- a/src/runtime/cgo/gcc_openbsd_arm64.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_openbsd_mips64.c b/src/runtime/cgo/gcc_openbsd_mips64.c
deleted file mode 100644
index 79f039a..0000000
--- a/src/runtime/cgo/gcc_openbsd_mips64.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <sys/types.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-void
-x_cgo_init(G *g, void (*setg)(void*))
-{
-	pthread_attr_t attr;
-	size_t size;
-
-	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
-}
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
-	pthread_attr_t attr;
-	sigset_t ign, oset;
-	pthread_t p;
-	size_t size;
-	int err;
-
-	sigfillset(&ign);
-	pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-
-	// Leave stacklo=0 and set stackhi=size; mstart will do the rest.
-	ts->g->stackhi = size;
-	err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
-	pthread_sigmask(SIG_SETMASK, &oset, nil);
-
-	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
-	}
-}
-
-extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-
-static void*
-threadentry(void *v)
-{
-	ThreadStart ts;
-
-	ts = *(ThreadStart*)v;
-	free(v);
-
-	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
-	return nil;
-}
diff --git a/src/runtime/cgo/gcc_ppc64x.c b/src/runtime/cgo/gcc_ppc64x.c
index bfdcf65..98a6549 100644
--- a/src/runtime/cgo/gcc_ppc64x.c
+++ b/src/runtime/cgo/gcc_ppc64x.c
@@ -18,14 +18,8 @@
 void
 x_cgo_init(G *g, void (*setg)(void*), void **tlsbase)
 {
-	pthread_attr_t attr;
-	size_t size;
-
 	setg_gcc = setg;
-	pthread_attr_init(&attr);
-	pthread_attr_getstacksize(&attr, &size);
-	g->stacklo = (uintptr)&attr - size + 4096;
-	pthread_attr_destroy(&attr);
+	_cgo_set_stacklo(g, NULL);
 }
 
 void
diff --git a/src/runtime/cgo/gcc_solaris_amd64.c b/src/runtime/cgo/gcc_solaris_amd64.c
index e89e844..9b106a6 100644
--- a/src/runtime/cgo/gcc_solaris_amd64.c
+++ b/src/runtime/cgo/gcc_solaris_amd64.c
@@ -27,6 +27,12 @@
 	// See golang.org/issue/12210.
 	if(ctx.uc_stack.ss_size < 1024*1024)
 		g->stacklo -= 1024*1024 - ctx.uc_stack.ss_size;
+
+	// Sanity check the results now, rather than getting a
+	// morestack on g0 crash.
+	if (g->stacklo >= g->stackhi) {
+		fatalf("bad stack bounds: lo=%p hi=%p", (void*)(g->stacklo), (void*)(g->stackhi));
+	}
 }
 
 void
@@ -59,11 +65,11 @@
 	pthread_sigmask(SIG_SETMASK, &oset, nil);
 
 	if (err != 0) {
-		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
-		abort();
+		fatalf("pthread_create failed: %s", strerror(err));
 	}
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void*
 threadentry(void *v)
 {
@@ -72,6 +78,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_windows_386.c b/src/runtime/cgo/gcc_windows_386.c
index 0f4f01c..983e14b 100644
--- a/src/runtime/cgo/gcc_windows_386.c
+++ b/src/runtime/cgo/gcc_windows_386.c
@@ -12,21 +12,23 @@
 #include "libcgo_windows.h"
 
 static void threadentry(void*);
+static void (*setg_gcc)(void*);
 static DWORD *tls_g;
 
 void
 x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
 {
+	setg_gcc = setg;
 	tls_g = (DWORD *)tlsg;
 }
 
-
 void
 _cgo_sys_thread_start(ThreadStart *ts)
 {
 	_cgo_beginthread(threadentry, ts);
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void
 threadentry(void *v)
 {
@@ -47,5 +49,5 @@
 		:: "r"(ts.tls), "r"(*tls_g), "r"(ts.g) : "%eax"
 	);
 
-	crosscall_386(ts.fn);
+	crosscall1(ts.fn, setg_gcc, ts.g);
 }
diff --git a/src/runtime/cgo/gcc_windows_amd64.c b/src/runtime/cgo/gcc_windows_amd64.c
index 3ff3c64..e26887a 100644
--- a/src/runtime/cgo/gcc_windows_amd64.c
+++ b/src/runtime/cgo/gcc_windows_amd64.c
@@ -29,6 +29,7 @@
 	_cgo_beginthread(threadentry, ts);
 }
 
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 static void
 threadentry(void *v)
 {
@@ -47,5 +48,5 @@
 	  :: "r"(ts.tls), "r"(*tls_g)
 	);
 
-	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+	crosscall1(ts.fn, setg_gcc, (void*)ts.g);
 }
diff --git a/src/runtime/cgo/handle.go b/src/runtime/cgo/handle.go
index 061dfb0..59b65da 100644
--- a/src/runtime/cgo/handle.go
+++ b/src/runtime/cgo/handle.go
@@ -61,8 +61,8 @@
 //	}
 //
 // Some C functions accept a void* argument that points to an arbitrary
-// data value supplied by the caller. It is not safe to coerce a cgo.Handle
-// (an integer) to a Go unsafe.Pointer, but instead we can pass the address
+// data value supplied by the caller. It is not safe to coerce a [cgo.Handle]
+// (an integer) to a Go [unsafe.Pointer], but instead we can pass the address
 // of the cgo.Handle to the void* parameter, as in this variant of the
 // previous example:
 //
diff --git a/src/runtime/cgo/libcgo.h b/src/runtime/cgo/libcgo.h
index 04755f0..295c12c 100644
--- a/src/runtime/cgo/libcgo.h
+++ b/src/runtime/cgo/libcgo.h
@@ -69,14 +69,9 @@
 uintptr_t _cgo_wait_runtime_init_done(void);
 
 /*
- * Call fn in the 6c world.
+ * Get the low and high boundaries of the stack.
  */
-void crosscall_amd64(void (*fn)(void), void (*setg_gcc)(void*), void *g);
-
-/*
- * Call fn in the 8c world.
- */
-void crosscall_386(void (*fn)(void));
+void x_cgo_getstackbound(uintptr bounds[2]);
 
 /*
  * Prints error then calls abort. For linux and android.
diff --git a/src/runtime/cgo/libcgo_unix.h b/src/runtime/cgo/libcgo_unix.h
index a56a366..b8f8d30 100644
--- a/src/runtime/cgo/libcgo_unix.h
+++ b/src/runtime/cgo/libcgo_unix.h
@@ -3,6 +3,11 @@
 // license that can be found in the LICENSE file.
 
 /*
+ * Initialize g->stacklo.
+ */
+extern void _cgo_set_stacklo(G *, uintptr *);
+
+/*
  * Call pthread_create, retrying on EAGAIN.
  */
 extern int _cgo_try_pthread_create(pthread_t*, const pthread_attr_t*, void* (*)(void*), void*);
diff --git a/src/runtime/cgo/mmap.go b/src/runtime/cgo/mmap.go
index 2f7e83b..144af2b 100644
--- a/src/runtime/cgo/mmap.go
+++ b/src/runtime/cgo/mmap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (linux && amd64) || (linux && arm64) || (freebsd && amd64)
+//go:build (linux && (amd64 || arm64 || loong64)) || (freebsd && amd64)
 
 package cgo
 
diff --git a/src/runtime/cgo_mmap.go b/src/runtime/cgo_mmap.go
index 30660f7..36d776e 100644
--- a/src/runtime/cgo_mmap.go
+++ b/src/runtime/cgo_mmap.go
@@ -4,7 +4,7 @@
 
 // Support for memory sanitizer. See runtime/cgo/mmap.go.
 
-//go:build (linux && amd64) || (linux && arm64) || (freebsd && amd64)
+//go:build (linux && (amd64 || arm64 || loong64)) || (freebsd && amd64)
 
 package runtime
 
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 1da7249..f2dd987 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -168,7 +168,7 @@
 	// any C on the call stack, which there will be after this point. If
 	// there isn't, we can use frame pointer unwinding to collect call
 	// stacks efficiently. This will be the case for the first Go-to-C call
-	// on a stack, so it's prefereable to update it here, after we emit a
+	// on a stack, so it's preferable to update it here, after we emit a
 	// trace event in entersyscall above.
 	mp.ncgo++
 
@@ -206,6 +206,75 @@
 	return errno
 }
 
+// Set or reset the system stack bounds for a callback on sp.
+//
+// Must be nosplit because it is called by needm prior to fully initializing
+// the M.
+//
+//go:nosplit
+func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
+	g0 := mp.g0
+	if sp > g0.stack.lo && sp <= g0.stack.hi {
+		// Stack already in bounds, nothing to do.
+		return
+	}
+
+	if mp.ncgo > 0 {
+		// ncgo > 0 indicates that this M was in Go further up the stack
+		// (it called C and is now receiving a callback). It is not
+		// safe for the C call to change the stack out from under us.
+
+		// Note that this case isn't possible for signal == true, as
+		// that is always passing a new M from needm.
+
+		// Stack is bogus, but reset the bounds anyway so we can print.
+		hi := g0.stack.hi
+		lo := g0.stack.lo
+		g0.stack.hi = sp + 1024
+		g0.stack.lo = sp - 32*1024
+		g0.stackguard0 = g0.stack.lo + stackGuard
+		g0.stackguard1 = g0.stackguard0
+
+		print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]")
+		print("\n")
+		exit(2)
+	}
+
+	// This M does not have Go further up the stack. However, it may have
+	// previously called into Go, initializing the stack bounds. Between
+	// that call returning and now the stack may have changed (perhaps the
+	// C thread is running a coroutine library). We need to update the
+	// stack bounds for this case.
+	//
+	// Set the stack bounds to match the current stack. If we don't
+	// actually know how big the stack is, like we don't know how big any
+	// scheduling stack is, but we assume there's at least 32 kB. If we
+	// can get a more accurate stack bound from pthread, use that, provided
+	// it actually contains SP..
+	g0.stack.hi = sp + 1024
+	g0.stack.lo = sp - 32*1024
+	if !signal && _cgo_getstackbound != nil {
+		// Don't adjust if called from the signal handler.
+		// We are on the signal stack, not the pthread stack.
+		// (We could get the stack bounds from sigaltstack, but
+		// we're getting out of the signal handler very soon
+		// anyway. Not worth it.)
+		var bounds [2]uintptr
+		asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
+		// getstackbound is an unsupported no-op on Windows.
+		//
+		// Don't use these bounds if they don't contain SP. Perhaps we
+		// were called by something not using the standard thread
+		// stack.
+		if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] {
+			g0.stack.lo = bounds[0]
+			g0.stack.hi = bounds[1]
+		}
+	}
+	g0.stackguard0 = g0.stack.lo + stackGuard
+	g0.stackguard1 = g0.stackguard0
+}
+
 // Call from C back to Go. fn must point to an ABIInternal Go entry-point.
 //
 //go:nosplit
@@ -216,10 +285,14 @@
 		exit(2)
 	}
 
+	sp := gp.m.g0.sched.sp // system sp saved by cgocallback.
+	callbackUpdateSystemStack(gp.m, sp, false)
+
 	// The call from C is on gp.m's g0 stack, so we must ensure
 	// that we stay on that M. We have to do this before calling
 	// exitsyscall, since it would otherwise be free to move us to
-	// a different M. The call to unlockOSThread is in unwindm.
+	// a different M. The call to unlockOSThread is in this function
+	// after cgocallbackg1, or in the case of panicking, in unwindm.
 	lockOSThread()
 
 	checkm := gp.m
@@ -242,13 +315,18 @@
 
 	osPreemptExtExit(gp.m)
 
-	cgocallbackg1(fn, frame, ctxt) // will call unlockOSThread
+	if gp.nocgocallback {
+		panic("runtime: function marked with #cgo nocallback called back into Go")
+	}
 
-	// At this point unlockOSThread has been called.
+	cgocallbackg1(fn, frame, ctxt)
+
+	// At this point we're about to call unlockOSThread.
 	// The following code must not change to a different m.
 	// This is enforced by checking incgo in the schedule function.
-
 	gp.m.incgo = true
+	unlockOSThread()
+
 	if gp.m.isextra {
 		gp.m.isExtraInC = true
 	}
@@ -268,10 +346,6 @@
 func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
 	gp := getg()
 
-	// When we return, undo the call to lockOSThread in cgocallbackg.
-	// We must still stay on the same m.
-	defer unlockOSThread()
-
 	if gp.m.needextram || extraMWaiters.Load() > 0 {
 		gp.m.needextram = false
 		systemstack(newextram)
@@ -356,6 +430,14 @@
 			osPreemptExtExit(mp)
 		}
 
+		// Undo the call to lockOSThread in cgocallbackg, only on the
+		// panicking path. In normal return case cgocallbackg will call
+		// unlockOSThread, ensuring no preemption point after the unlock.
+		// Here we don't need to worry about preemption, because we're
+		// panicking out of the callback and unwinding the g0 stack,
+		// instead of reentering cgo (which requires the same thread).
+		unlockOSThread()
+
 		releasem(mp)
 	}
 }
@@ -451,13 +533,13 @@
 }
 
 const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer"
-const cgoResultFail = "cgo result has Go pointer"
+const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned Go pointer"
 
 // cgoCheckArg is the real work of cgoCheckPointer. The argument p
 // is either a pointer to the value (of type t), or the value itself,
 // depending on indir. The top parameter is whether we are at the top
 // level, where Go pointers are allowed. Go pointers to pinned objects are
-// always allowed.
+// allowed as long as they don't reference other unpinned pointers.
 func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
 	if t.PtrBytes == 0 || p == nil {
 		// If the type has no pointers there is nothing to do.
@@ -582,19 +664,32 @@
 		if base == 0 {
 			return
 		}
-		n := span.elemsize
-		hbits := heapBitsForAddr(base, n)
-		for {
-			var addr uintptr
-			if hbits, addr = hbits.next(); addr == 0 {
-				break
+		if goexperiment.AllocHeaders {
+			tp := span.typePointersOfUnchecked(base)
+			for {
+				var addr uintptr
+				if tp, addr = tp.next(base + span.elemsize); addr == 0 {
+					break
+				}
+				pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
+				if cgoIsGoPointer(pp) && !isPinned(pp) {
+					panic(errorString(msg))
+				}
 			}
-			pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
-			if cgoIsGoPointer(pp) && !isPinned(pp) {
-				panic(errorString(msg))
+		} else {
+			n := span.elemsize
+			hbits := heapBitsForAddr(base, n)
+			for {
+				var addr uintptr
+				if hbits, addr = hbits.next(); addr == 0 {
+					break
+				}
+				pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
+				if cgoIsGoPointer(pp) && !isPinned(pp) {
+					panic(errorString(msg))
+				}
 			}
 		}
-
 		return
 	}
 
@@ -644,8 +739,8 @@
 }
 
 // cgoCheckResult is called to check the result parameter of an
-// exported Go function. It panics if the result is or contains a Go
-// pointer.
+// exported Go function. It panics if the result is or contains any
+// other pointer into unpinned Go memory.
 func cgoCheckResult(val any) {
 	if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
 		return
diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go
index ec5734a..3d6de4f 100644
--- a/src/runtime/cgocheck.go
+++ b/src/runtime/cgocheck.go
@@ -9,6 +9,7 @@
 
 import (
 	"internal/goarch"
+	"internal/goexperiment"
 	"unsafe"
 )
 
@@ -176,16 +177,29 @@
 	}
 
 	// src must be in the regular heap.
-
-	hbits := heapBitsForAddr(uintptr(src), size)
-	for {
-		var addr uintptr
-		if hbits, addr = hbits.next(); addr == 0 {
-			break
+	if goexperiment.AllocHeaders {
+		tp := s.typePointersOf(uintptr(src), size)
+		for {
+			var addr uintptr
+			if tp, addr = tp.next(uintptr(src) + size); addr == 0 {
+				break
+			}
+			v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
+			if cgoIsGoPointer(v) && !isPinned(v) {
+				throw(cgoWriteBarrierFail)
+			}
 		}
-		v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
-		if cgoIsGoPointer(v) && !isPinned(v) {
-			throw(cgoWriteBarrierFail)
+	} else {
+		hbits := heapBitsForAddr(uintptr(src), size)
+		for {
+			var addr uintptr
+			if hbits, addr = hbits.next(); addr == 0 {
+				break
+			}
+			v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
+			if cgoIsGoPointer(v) && !isPinned(v) {
+				throw(cgoWriteBarrierFail)
+			}
 		}
 	}
 }
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index 256f976..526d45b 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -481,12 +481,13 @@
 	}
 	// If the select in the goroutine is fair,
 	// cnt1 and cnt2 should be about the same value.
-	// With 10,000 trials, the expected margin of error at
-	// a confidence level of six nines is 4.891676 / (2 * Sqrt(10000)).
-	r := float64(cnt1) / trials
-	e := math.Abs(r - 0.5)
-	t.Log(cnt1, cnt2, r, e)
-	if e > 4.891676/(2*math.Sqrt(trials)) {
+	// See if we're more than 10 sigma away from the expected value.
+	// 10 sigma is a lot, but we're ok with some systematic bias as
+	// long as it isn't too severe.
+	const mean = trials * 0.5
+	const variance = trials * 0.5 * (1 - 0.5)
+	stddev := math.Sqrt(variance)
+	if math.Abs(float64(cnt1-mean)) > 10*stddev {
 		t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
 	}
 	close(done)
diff --git a/src/runtime/coro.go b/src/runtime/coro.go
new file mode 100644
index 0000000..0d6666e
--- /dev/null
+++ b/src/runtime/coro.go
@@ -0,0 +1,165 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// A coro represents extra concurrency without extra parallelism,
+// as would be needed for a coroutine implementation.
+// The coro does not represent a specific coroutine, only the ability
+// to do coroutine-style control transfers.
+// It can be thought of as like a special channel that always has
+// a goroutine blocked on it. If another goroutine calls coroswitch(c),
+// the caller becomes the goroutine blocked in c, and the goroutine
+// formerly blocked in c starts running.
+// These switches continue until a call to coroexit(c),
+// which ends the use of the coro by releasing the blocked
+// goroutine in c and exiting the current goroutine.
+//
+// Coros are heap allocated and garbage collected, so that user code
+// can hold a pointer to a coro without causing potential dangling
+// pointer errors.
+type coro struct {
+	gp guintptr
+	f  func(*coro)
+}
+
+//go:linkname newcoro
+
+// newcoro creates a new coro containing a
+// goroutine blocked waiting to run f
+// and returns that coro.
+func newcoro(f func(*coro)) *coro {
+	c := new(coro)
+	c.f = f
+	pc := getcallerpc()
+	gp := getg()
+	systemstack(func() {
+		start := corostart
+		startfv := *(**funcval)(unsafe.Pointer(&start))
+		gp = newproc1(startfv, gp, pc)
+	})
+	gp.coroarg = c
+	gp.waitreason = waitReasonCoroutine
+	casgstatus(gp, _Grunnable, _Gwaiting)
+	c.gp.set(gp)
+	return c
+}
+
+//go:linkname corostart
+
+// corostart is the entry func for a new coroutine.
+// It runs the coroutine user function f passed to corostart
+// and then calls coroexit to remove the extra concurrency.
+func corostart() {
+	gp := getg()
+	c := gp.coroarg
+	gp.coroarg = nil
+
+	c.f(c)
+	coroexit(c)
+}
+
+// coroexit is like coroswitch but closes the coro
+// and exits the current goroutine
+func coroexit(c *coro) {
+	gp := getg()
+	gp.coroarg = c
+	gp.coroexit = true
+	mcall(coroswitch_m)
+}
+
+//go:linkname coroswitch
+
+// coroswitch switches to the goroutine blocked on c
+// and then blocks the current goroutine on c.
+func coroswitch(c *coro) {
+	gp := getg()
+	gp.coroarg = c
+	mcall(coroswitch_m)
+}
+
+// coroswitch_m is the implementation of coroswitch
+// that runs on the m stack.
+//
+// Note: Coroutine switches are expected to happen at
+// an order of magnitude (or more) higher frequency
+// than regular goroutine switches, so this path is heavily
+// optimized to remove unnecessary work.
+// The fast path here is three CAS: the one at the top on gp.atomicstatus,
+// the one in the middle to choose the next g,
+// and the one at the bottom on gnext.atomicstatus.
+// It is important not to add more atomic operations or other
+// expensive operations to the fast path.
+func coroswitch_m(gp *g) {
+	// TODO(rsc,mknyszek): add tracing support in a lightweight manner.
+	// Probably the tracer will need a global bool (set and cleared during STW)
+	// that this code can check to decide whether to use trace.gen.Load();
+	// we do not want to do the atomic load all the time, especially when
+	// tracer use is relatively rare.
+	c := gp.coroarg
+	gp.coroarg = nil
+	exit := gp.coroexit
+	gp.coroexit = false
+	mp := gp.m
+
+	if exit {
+		gdestroy(gp)
+		gp = nil
+	} else {
+		// If we can CAS ourselves directly from running to waiting, so do,
+		// keeping the control transfer as lightweight as possible.
+		gp.waitreason = waitReasonCoroutine
+		if !gp.atomicstatus.CompareAndSwap(_Grunning, _Gwaiting) {
+			// The CAS failed: use casgstatus, which will take care of
+			// coordinating with the garbage collector about the state change.
+			casgstatus(gp, _Grunning, _Gwaiting)
+		}
+
+		// Clear gp.m.
+		setMNoWB(&gp.m, nil)
+	}
+
+	// The goroutine stored in c is the one to run next.
+	// Swap it with ourselves.
+	var gnext *g
+	for {
+		// Note: this is a racy load, but it will eventually
+		// get the right value, and if it gets the wrong value,
+		// the c.gp.cas will fail, so no harm done other than
+		// a wasted loop iteration.
+		// The cas will also sync c.gp's
+		// memory enough that the next iteration of the racy load
+		// should see the correct value.
+		// We are avoiding the atomic load to keep this path
+		// as lightweight as absolutely possible.
+		// (The atomic load is free on x86 but not free elsewhere.)
+		next := c.gp
+		if next.ptr() == nil {
+			throw("coroswitch on exited coro")
+		}
+		var self guintptr
+		self.set(gp)
+		if c.gp.cas(next, self) {
+			gnext = next.ptr()
+			break
+		}
+	}
+
+	// Start running next, without heavy scheduling machinery.
+	// Set mp.curg and gnext.m and then update scheduling state
+	// directly if possible.
+	setGNoWB(&mp.curg, gnext)
+	setMNoWB(&gnext.m, mp)
+	if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) {
+		// The CAS failed: use casgstatus, which will take care of
+		// coordinating with the garbage collector about the state change.
+		casgstatus(gnext, _Gwaiting, _Grunnable)
+		casgstatus(gnext, _Grunnable, _Grunning)
+	}
+
+	// Switch to gnext. Does not return.
+	gogo(&gnext.sched)
+}
diff --git a/src/runtime/coverage/apis.go b/src/runtime/coverage/apis.go
index 05da345..15ba04a 100644
--- a/src/runtime/coverage/apis.go
+++ b/src/runtime/coverage/apis.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"internal/coverage"
 	"io"
-	"reflect"
 	"sync/atomic"
 	"unsafe"
 )
@@ -158,13 +157,8 @@
 	// inconsistency when reading the counter array from the thread
 	// running ClearCounters.
 
-	var sd []atomic.Uint32
-
-	bufHdr := (*reflect.SliceHeader)(unsafe.Pointer(&sd))
 	for _, c := range cl {
-		bufHdr.Data = uintptr(unsafe.Pointer(c.Counters))
-		bufHdr.Len = int(c.Len)
-		bufHdr.Cap = int(c.Len)
+		sd := unsafe.Slice((*atomic.Uint32)(unsafe.Pointer(c.Counters)), int(c.Len))
 		for i := 0; i < len(sd); i++ {
 			// Skip ahead until the next non-zero value.
 			sdi := sd[i].Load()
diff --git a/src/runtime/coverage/emit.go b/src/runtime/coverage/emit.go
index bb0c6fb..6fe04da 100644
--- a/src/runtime/coverage/emit.go
+++ b/src/runtime/coverage/emit.go
@@ -14,7 +14,6 @@
 	"io"
 	"os"
 	"path/filepath"
-	"reflect"
 	"runtime"
 	"strconv"
 	"sync/atomic"
@@ -447,26 +446,16 @@
 func writeMetaData(w io.Writer, metalist []rtcov.CovMetaBlob, cmode coverage.CounterMode, gran coverage.CounterGranularity, finalHash [16]byte) error {
 	mfw := encodemeta.NewCoverageMetaFileWriter("<io.Writer>", w)
 
-	// Note: "sd" is re-initialized on each iteration of the loop
-	// below, and would normally be declared inside the loop, but
-	// placed here escape analysis since we capture it in bufHdr.
-	var sd []byte
-	bufHdr := (*reflect.SliceHeader)(unsafe.Pointer(&sd))
-
 	var blobs [][]byte
 	for _, e := range metalist {
-		bufHdr.Data = uintptr(unsafe.Pointer(e.P))
-		bufHdr.Len = int(e.Len)
-		bufHdr.Cap = int(e.Len)
+		sd := unsafe.Slice(e.P, int(e.Len))
 		blobs = append(blobs, sd)
 	}
 	return mfw.Write(finalHash, blobs, cmode, gran)
 }
 
 func (s *emitState) VisitFuncs(f encodecounter.CounterVisitorFn) error {
-	var sd []atomic.Uint32
 	var tcounters []uint32
-	bufHdr := (*reflect.SliceHeader)(unsafe.Pointer(&sd))
 
 	rdCounters := func(actrs []atomic.Uint32, ctrs []uint32) []uint32 {
 		ctrs = ctrs[:0]
@@ -478,9 +467,7 @@
 
 	dpkg := uint32(0)
 	for _, c := range s.counterlist {
-		bufHdr.Data = uintptr(unsafe.Pointer(c.Counters))
-		bufHdr.Len = int(c.Len)
-		bufHdr.Cap = int(c.Len)
+		sd := unsafe.Slice((*atomic.Uint32)(unsafe.Pointer(c.Counters)), int(c.Len))
 		for i := 0; i < len(sd); i++ {
 			// Skip ahead until the next non-zero value.
 			sdi := sd[i].Load()
@@ -588,7 +575,7 @@
 }
 
 // markProfileEmitted signals the runtime/coverage machinery that
-// coverate data output files have already been written out, and there
+// coverage data output files have already been written out, and there
 // is no need to take any additional action at exit time. This
 // function is called (via linknamed reference) from the
 // coverage-related boilerplate code in _testmain.go emitted for go
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index 0d7eeac..b2898ba 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -62,8 +62,8 @@
 // If hz <= 0, SetCPUProfileRate turns off profiling.
 // If the profiler is on, the rate cannot be changed without first turning it off.
 //
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.cpuprofile flag instead of calling
+// Most clients should use the [runtime/pprof] package or
+// the [testing] package's -test.cpuprofile flag instead of calling
 // SetCPUProfileRate directly.
 func SetCPUProfileRate(hz int) {
 	// Clamp hz to something reasonable.
@@ -202,16 +202,16 @@
 // The details of generating that format have changed,
 // so this functionality has been removed.
 //
-// Deprecated: Use the runtime/pprof package,
-// or the handlers in the net/http/pprof package,
-// or the testing package's -test.cpuprofile flag instead.
+// Deprecated: Use the [runtime/pprof] package,
+// or the handlers in the [net/http/pprof] package,
+// or the [testing] package's -test.cpuprofile flag instead.
 func CPUProfile() []byte {
 	panic("CPUProfile no longer available")
 }
 
 //go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
 func runtime_pprof_runtime_cyclesPerSecond() int64 {
-	return tickspersecond()
+	return ticksPerSecond()
 }
 
 // readProfile, provided to runtime/pprof, returns the next chunk of
diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go
index e185180..304f1a7 100644
--- a/src/runtime/crash_cgo_test.go
+++ b/src/runtime/crash_cgo_test.go
@@ -8,6 +8,7 @@
 
 import (
 	"fmt"
+	"internal/goexperiment"
 	"internal/goos"
 	"internal/platform"
 	"internal/testenv"
@@ -753,6 +754,24 @@
 	}
 }
 
+func TestCgoNoCallback(t *testing.T) {
+	t.Skip("TODO(#56378): enable in Go 1.23")
+	got := runTestProg(t, "testprogcgo", "CgoNoCallback")
+	want := "function marked with #cgo nocallback called back into Go"
+	if !strings.Contains(got, want) {
+		t.Fatalf("did not see %q in output:\n%s", want, got)
+	}
+}
+
+func TestCgoNoEscape(t *testing.T) {
+	t.Skip("TODO(#56378): enable in Go 1.23")
+	got := runTestProg(t, "testprogcgo", "CgoNoEscape")
+	want := "OK\n"
+	if got != want {
+		t.Fatalf("want %s, got %s\n", want, got)
+	}
+}
+
 func TestCgoTracebackGoroutineProfile(t *testing.T) {
 	output := runTestProg(t, "testprogcgo", "GoroutineProfile")
 	want := "OK\n"
@@ -767,6 +786,9 @@
 	case "plan9", "windows":
 		t.Skipf("no pthreads on %s", runtime.GOOS)
 	}
+	if goexperiment.ExecTracer2 {
+		t.Skip("skipping test that is covered elsewhere for the new execution tracer")
+	}
 	output := runTestProg(t, "testprogcgo", "CgoTraceParser")
 	want := "OK\n"
 	ErrTimeOrder := "ErrTimeOrder\n"
@@ -783,6 +805,9 @@
 	case "plan9", "windows":
 		t.Skipf("no pthreads on %s", runtime.GOOS)
 	}
+	if goexperiment.ExecTracer2 {
+		t.Skip("skipping test that is covered elsewhere for the new execution tracer")
+	}
 	output := runTestProg(t, "testprogcgo", "CgoTraceParser", "GOMAXPROCS=1")
 	want := "OK\n"
 	ErrTimeOrder := "ErrTimeOrder\n"
@@ -853,3 +878,20 @@
 		t.Errorf("expected %q, got %v", want, got)
 	}
 }
+
+func TestStackSwitchCallback(t *testing.T) {
+	t.Parallel()
+	switch runtime.GOOS {
+	case "windows", "plan9", "android", "ios", "openbsd": // no getcontext
+		t.Skipf("skipping test on %s", runtime.GOOS)
+	}
+	got := runTestProg(t, "testprogcgo", "StackSwitchCallback")
+	skip := "SKIP\n"
+	if got == skip {
+		t.Skip("skipping on musl/bionic libc")
+	}
+	want := "OK\n"
+	if got != want {
+		t.Errorf("expected %q, got %v", want, got)
+	}
+}
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 8f11333..2ed0fd8 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -24,10 +24,21 @@
 var toRemove []string
 
 func TestMain(m *testing.M) {
+	_, coreErrBefore := os.Stat("core")
+
 	status := m.Run()
 	for _, file := range toRemove {
 		os.RemoveAll(file)
 	}
+
+	_, coreErrAfter := os.Stat("core")
+	if coreErrBefore != nil && coreErrAfter == nil {
+		fmt.Fprintln(os.Stderr, "runtime.test: some test left a core file behind")
+		if status == 0 {
+			status = 1
+		}
+	}
+
 	os.Exit(status)
 }
 
@@ -766,7 +777,7 @@
 
 func TestRuntimePanic(t *testing.T) {
 	testenv.MustHaveExec(t)
-	cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestRuntimePanic"))
+	cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestRuntimePanic$"))
 	cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_PANIC=1")
 	out, err := cmd.CombinedOutput()
 	t.Logf("%s", out)
@@ -781,19 +792,27 @@
 func TestG0StackOverflow(t *testing.T) {
 	testenv.MustHaveExec(t)
 
-	switch runtime.GOOS {
-	case "android", "darwin", "dragonfly", "freebsd", "ios", "linux", "netbsd", "openbsd":
-		t.Skipf("g0 stack is wrong on pthread platforms (see golang.org/issue/26061)")
+	if runtime.GOOS == "ios" {
+		testenv.SkipFlaky(t, 62671)
 	}
 
 	if os.Getenv("TEST_G0_STACK_OVERFLOW") != "1" {
-		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestG0StackOverflow", "-test.v"))
+		cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestG0StackOverflow$", "-test.v"))
 		cmd.Env = append(cmd.Env, "TEST_G0_STACK_OVERFLOW=1")
 		out, err := cmd.CombinedOutput()
+		t.Logf("output:\n%s", out)
 		// Don't check err since it's expected to crash.
 		if n := strings.Count(string(out), "morestack on g0\n"); n != 1 {
 			t.Fatalf("%s\n(exit status %v)", out, err)
 		}
+		if runtime.CrashStackImplemented {
+			// check for a stack trace
+			want := "runtime.stackOverflow"
+			if n := strings.Count(string(out), want); n < 5 {
+				t.Errorf("output does not contain %q at least 5 times:\n%s", want, out)
+			}
+			return // it's not a signal-style traceback
+		}
 		// Check that it's a signal-style traceback.
 		if runtime.GOOS != "windows" {
 			if want := "PC="; !strings.Contains(string(out), want) {
@@ -869,3 +888,12 @@
 		t.Errorf("output does not contain %q:\n%s", want, output)
 	}
 }
+
+func TestNetpollWaiters(t *testing.T) {
+	t.Parallel()
+	output := runTestProg(t, "testprognet", "NetpollWaiters")
+	want := "OK\n"
+	if output != want {
+		t.Fatalf("output is not %q\n%s", want, output)
+	}
+}
diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go
index 6bca2ac..123a462 100644
--- a/src/runtime/crash_unix_test.go
+++ b/src/runtime/crash_unix_test.go
@@ -65,7 +65,7 @@
 		t.Skipf("skipping; not supported on %v", runtime.GOOS)
 	}
 
-	if runtime.GOOS == "openbsd" && (runtime.GOARCH == "arm" || runtime.GOARCH == "mips64") {
+	if runtime.GOOS == "openbsd" && (runtime.GOARCH == "arm" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64") {
 		// This may be ncpu < 2 related...
 		t.Skipf("skipping; test fails on %s/%s - see issue #42464", runtime.GOOS, runtime.GOARCH)
 	}
@@ -91,6 +91,7 @@
 
 	cmd := testenv.Command(t, exe, "CrashDumpsAllThreads")
 	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Dir = t.TempDir() // put any core file in tempdir
 	cmd.Env = append(cmd.Env,
 		"GOTRACEBACK=crash",
 		// Set GOGC=off. Because of golang.org/issue/10958, the tight
@@ -164,6 +165,7 @@
 	t.Parallel()
 	cmd := exec.Command(os.Args[0], "testPanicSystemstackInternal")
 	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Dir = t.TempDir() // put any core file in tempdir
 	cmd.Env = append(cmd.Env, "GOTRACEBACK=crash")
 	pr, pw, err := os.Pipe()
 	if err != nil {
@@ -214,6 +216,12 @@
 	if nUser != 2 || nSys != 2 {
 		t.Fatalf("want %d user stack frames in %s and %d system stack frames in %s, got %d and %d:\n%s", 2, userFunc, 2, sysFunc, nUser, nSys, string(tb))
 	}
+
+	// Traceback should not contain "unexpected SPWRITE" when
+	// unwinding the system stacks.
+	if bytes.Contains(tb, []byte("unexpected SPWRITE")) {
+		t.Errorf("unexpected \"unexpected SPWRITE\" in traceback:\n%s", tb)
+	}
 }
 
 func init() {
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index 9a92b45..3233ce8 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -11,7 +11,7 @@
 
 // GOMAXPROCS sets the maximum number of CPUs that can be executing
 // simultaneously and returns the previous setting. It defaults to
-// the value of runtime.NumCPU. If n < 1, it does not change the current setting.
+// the value of [runtime.NumCPU]. If n < 1, it does not change the current setting.
 // This call will go away when the scheduler improves.
 func GOMAXPROCS(n int) int {
 	if GOARCH == "wasm" && n > 1 {
@@ -25,12 +25,12 @@
 		return ret
 	}
 
-	stopTheWorldGC(stwGOMAXPROCS)
+	stw := stopTheWorldGC(stwGOMAXPROCS)
 
 	// newprocs will be processed by startTheWorld
 	newprocs = int32(n)
 
-	startTheWorldGC()
+	startTheWorldGC(stw)
 	return ret
 }
 
@@ -52,6 +52,17 @@
 	return n
 }
 
+func totalMutexWaitTimeNanos() int64 {
+	total := sched.totalMutexWaitTime.Load()
+
+	total += sched.totalRuntimeLockWaitTime.Load()
+	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
+		total += mp.mLockProfile.waitTime.Load()
+	}
+
+	return total
+}
+
 // NumGoroutine returns the number of goroutines that currently exist.
 func NumGoroutine() int {
 	return int(gcount())
diff --git a/src/runtime/debug/garbage.go b/src/runtime/debug/garbage.go
index 0f53928..cb3248d 100644
--- a/src/runtime/debug/garbage.go
+++ b/src/runtime/debug/garbage.go
@@ -214,7 +214,7 @@
 //
 // The memory limit is always respected by the Go runtime, so to
 // effectively disable this behavior, set the limit very high.
-// math.MaxInt64 is the canonical value for disabling the limit,
+// [math.MaxInt64] is the canonical value for disabling the limit,
 // but values much greater than the available memory on the underlying
 // system work just as well.
 //
diff --git a/src/runtime/debug/mod.go b/src/runtime/debug/mod.go
index 7f85174..a470560 100644
--- a/src/runtime/debug/mod.go
+++ b/src/runtime/debug/mod.go
@@ -75,8 +75,8 @@
 //   - CGO_ENABLED: the effective CGO_ENABLED environment variable
 //   - CGO_CFLAGS: the effective CGO_CFLAGS environment variable
 //   - CGO_CPPFLAGS: the effective CGO_CPPFLAGS environment variable
-//   - CGO_CXXFLAGS:  the effective CGO_CPPFLAGS environment variable
-//   - CGO_LDFLAGS: the effective CGO_CPPFLAGS environment variable
+//   - CGO_CXXFLAGS:  the effective CGO_CXXFLAGS environment variable
+//   - CGO_LDFLAGS: the effective CGO_LDFLAGS environment variable
 //   - GOARCH: the architecture target
 //   - GOAMD64/GOARM/GO386/etc: the architecture feature level for GOARCH
 //   - GOOS: the operating system target
diff --git a/src/runtime/debug/stack.go b/src/runtime/debug/stack.go
index 5d810af..3999840 100644
--- a/src/runtime/debug/stack.go
+++ b/src/runtime/debug/stack.go
@@ -17,7 +17,7 @@
 }
 
 // Stack returns a formatted stack trace of the goroutine that calls it.
-// It calls runtime.Stack with a large enough buffer to capture the entire trace.
+// It calls [runtime.Stack] with a large enough buffer to capture the entire trace.
 func Stack() []byte {
 	buf := make([]byte, 1024)
 	for {
diff --git a/src/runtime/debug_test.go b/src/runtime/debug_test.go
index 75fe07e..1c00d2f 100644
--- a/src/runtime/debug_test.go
+++ b/src/runtime/debug_test.go
@@ -9,7 +9,7 @@
 // spends all of its time in the race runtime, which isn't a safe
 // point.
 
-//go:build (amd64 || arm64) && linux && !race
+//go:build (amd64 || arm64 || ppc64le) && linux && !race
 
 package runtime_test
 
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
index e793545..5dd8306 100644
--- a/src/runtime/debugcall.go
+++ b/src/runtime/debugcall.go
@@ -2,7 +2,10 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build amd64 || arm64
+// Though the debug call function feature is not enabled on
+// ppc64, inserted ppc64 to avoid missing Go declaration error
+// for debugCallPanicked while building runtime.test
+//go:build amd64 || arm64 || ppc64le || ppc64
 
 package runtime
 
@@ -83,7 +86,7 @@
 		if pc != f.entry() {
 			pc--
 		}
-		up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc, nil)
+		up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
 		if up != abi.UnsafePointSafe {
 			// Not at a safe point.
 			ret = debugCallUnsafePoint
@@ -163,10 +166,12 @@
 		gp.schedlink = 0
 
 		// Park the calling goroutine.
-		if traceEnabled() {
-			traceGoPark(traceBlockDebugCall, 1)
-		}
+		trace := traceAcquire()
 		casGToWaiting(gp, _Grunning, waitReasonDebugCall)
+		if trace.ok() {
+			trace.GoPark(traceBlockDebugCall, 1)
+			traceRelease(trace)
+		}
 		dropg()
 
 		// Directly execute the new goroutine. The debug
@@ -222,19 +227,23 @@
 		// Switch back to the calling goroutine. At some point
 		// the scheduler will schedule us again and we'll
 		// finish exiting.
-		if traceEnabled() {
-			traceGoSched()
-		}
+		trace := traceAcquire()
 		casgstatus(gp, _Grunning, _Grunnable)
+		if trace.ok() {
+			trace.GoSched()
+			traceRelease(trace)
+		}
 		dropg()
 		lock(&sched.lock)
 		globrunqput(gp)
 		unlock(&sched.lock)
 
-		if traceEnabled() {
-			traceGoUnpark(callingG, 0)
-		}
+		trace = traceAcquire()
 		casgstatus(callingG, _Gwaiting, _Grunnable)
+		if trace.ok() {
+			trace.GoUnpark(callingG, 0)
+			traceRelease(trace)
+		}
 		execute(callingG, true)
 	})
 }
diff --git a/src/runtime/defer_test.go b/src/runtime/defer_test.go
index 3a54951..d73202a 100644
--- a/src/runtime/defer_test.go
+++ b/src/runtime/defer_test.go
@@ -5,7 +5,6 @@
 package runtime_test
 
 import (
-	"fmt"
 	"reflect"
 	"runtime"
 	"testing"
@@ -85,7 +84,7 @@
 		}
 		want := []int{4, 2, 1}
 		if !reflect.DeepEqual(want, list) {
-			t.Fatal(fmt.Sprintf("wanted %v, got %v", want, list))
+			t.Fatalf("wanted %v, got %v", want, list)
 		}
 
 	}()
@@ -133,13 +132,13 @@
 	defer func() {
 		r := recover()
 		if r != nil {
-			t.Fatal(fmt.Sprintf("wanted nil recover, got %v", r))
+			t.Fatalf("wanted nil recover, got %v", r)
 		}
 	}()
 	defer func() {
 		r := recover()
 		if r != "panic2" {
-			t.Fatal(fmt.Sprintf("wanted %v, got %v", "panic2", r))
+			t.Fatalf("wanted %v, got %v", "panic2", r)
 		}
 	}()
 	defer func() {
@@ -156,7 +155,7 @@
 	defer func() {
 		r := recover()
 		if r != "panic1" {
-			t.Fatal(fmt.Sprintf("wanted %v, got %v", "panic1", r))
+			t.Fatalf("wanted %v, got %v", "panic1", r)
 		}
 	}()
 	defer func() {
@@ -166,7 +165,7 @@
 			// not directly called by the panic.
 			r := recover()
 			if r != nil {
-				t.Fatal(fmt.Sprintf("wanted nil recover, got %v", r))
+				t.Fatalf("wanted nil recover, got %v", r)
 			}
 		}()
 	}()
@@ -213,25 +212,25 @@
 
 	defer func() {
 		if globint1 != 1 {
-			t.Fatal(fmt.Sprintf("globint1:  wanted: 1, got %v", globint1))
+			t.Fatalf("globint1:  wanted: 1, got %v", globint1)
 		}
 		if save1 != 5 {
-			t.Fatal(fmt.Sprintf("save1:  wanted: 5, got %v", save1))
+			t.Fatalf("save1:  wanted: 5, got %v", save1)
 		}
 		if globint2 != 1 {
-			t.Fatal(fmt.Sprintf("globint2:  wanted: 1, got %v", globint2))
+			t.Fatalf("globint2:  wanted: 1, got %v", globint2)
 		}
 		if save2 != 2 {
-			t.Fatal(fmt.Sprintf("save2:  wanted: 2, got %v", save2))
+			t.Fatalf("save2:  wanted: 2, got %v", save2)
 		}
 		if save3 != 4 {
-			t.Fatal(fmt.Sprintf("save3:  wanted: 4, got %v", save3))
+			t.Fatalf("save3:  wanted: 4, got %v", save3)
 		}
 		if globint3 != 1 {
-			t.Fatal(fmt.Sprintf("globint3:  wanted: 1, got %v", globint3))
+			t.Fatalf("globint3:  wanted: 1, got %v", globint3)
 		}
 		if save4 != 4 {
-			t.Fatal(fmt.Sprintf("save1:  wanted: 4, got %v", save4))
+			t.Fatalf("save1:  wanted: 4, got %v", save4)
 		}
 	}()
 
@@ -264,7 +263,7 @@
 	cond := 1
 	defer func() {
 		if cond != 2 {
-			t.Fatal(fmt.Sprintf("cond: wanted 2, got %v", cond))
+			t.Fatalf("cond: wanted 2, got %v", cond)
 		}
 		if recover() != "Test panic" {
 			t.Fatal("Didn't find expected panic")
diff --git a/src/runtime/defs_darwin.go b/src/runtime/defs_darwin.go
index 9c6eeee..e374433 100644
--- a/src/runtime/defs_darwin.go
+++ b/src/runtime/defs_darwin.go
@@ -122,6 +122,9 @@
 	O_NONBLOCK = C.O_NONBLOCK
 	O_CREAT    = C.O_CREAT
 	O_TRUNC    = C.O_TRUNC
+
+	VM_REGION_BASIC_INFO_COUNT_64 = C.VM_REGION_BASIC_INFO_COUNT_64
+	VM_REGION_BASIC_INFO_64       = C.VM_REGION_BASIC_INFO_64
 )
 
 type StackT C.struct_sigaltstack
@@ -163,3 +166,11 @@
 type PthreadCondAttr C.pthread_condattr_t
 
 type MachTimebaseInfo C.mach_timebase_info_data_t
+
+type MachPort C.mach_port_t
+type MachVMMapRead C.vm_map_read_t
+type MachVMAddress C.mach_vm_address_t
+type MachVMSize C.mach_vm_size_t
+type MachVMRegionFlavour C.vm_region_flavor_t
+type MachVMRegionInfo C.vm_region_info_t
+type MachMsgTypeNumber C.mach_msg_type_number_t
diff --git a/src/runtime/defs_darwin_amd64.go b/src/runtime/defs_darwin_amd64.go
index fc7de33..f998b0b 100644
--- a/src/runtime/defs_darwin_amd64.go
+++ b/src/runtime/defs_darwin_amd64.go
@@ -101,6 +101,9 @@
 	_O_NONBLOCK = 0x4
 	_O_CREAT    = 0x200
 	_O_TRUNC    = 0x400
+
+	_VM_REGION_BASIC_INFO_COUNT_64 = 0x9
+	_VM_REGION_BASIC_INFO_64       = 0x9
 )
 
 type stackt struct {
@@ -371,3 +374,11 @@
 	numer uint32
 	denom uint32
 }
+
+type machPort uint32
+type machVMMapRead uint32
+type machVMAddress uint64
+type machVMSize uint64
+type machVMRegionFlavour int32
+type machVMRegionInfo *int32
+type machMsgTypeNumber uint32
diff --git a/src/runtime/defs_darwin_arm64.go b/src/runtime/defs_darwin_arm64.go
index e26df02..e07b08e 100644
--- a/src/runtime/defs_darwin_arm64.go
+++ b/src/runtime/defs_darwin_arm64.go
@@ -103,6 +103,9 @@
 	_O_NONBLOCK = 0x4
 	_O_CREAT    = 0x200
 	_O_TRUNC    = 0x400
+
+	_VM_REGION_BASIC_INFO_COUNT_64 = 0x9
+	_VM_REGION_BASIC_INFO_64       = 0x9
 )
 
 type stackt struct {
@@ -238,3 +241,11 @@
 }
 
 type pthreadkey uint64
+
+type machPort uint32
+type machVMMapRead uint32
+type machVMAddress uint64
+type machVMSize uint64
+type machVMRegionFlavour int32
+type machVMRegionInfo *int32
+type machMsgTypeNumber uint32
diff --git a/src/runtime/defs_dragonfly.go b/src/runtime/defs_dragonfly.go
index 9dcfdf0..0463f1f 100644
--- a/src/runtime/defs_dragonfly.go
+++ b/src/runtime/defs_dragonfly.go
@@ -27,10 +27,11 @@
 import "C"
 
 const (
-	EINTR  = C.EINTR
-	EFAULT = C.EFAULT
-	EBUSY  = C.EBUSY
-	EAGAIN = C.EAGAIN
+	EINTR     = C.EINTR
+	EFAULT    = C.EFAULT
+	EBUSY     = C.EBUSY
+	EAGAIN    = C.EAGAIN
+	ETIMEDOUT = C.ETIMEDOUT
 
 	O_WRONLY   = C.O_WRONLY
 	O_NONBLOCK = C.O_NONBLOCK
diff --git a/src/runtime/defs_dragonfly_amd64.go b/src/runtime/defs_dragonfly_amd64.go
index f1a2302..41bfb08 100644
--- a/src/runtime/defs_dragonfly_amd64.go
+++ b/src/runtime/defs_dragonfly_amd64.go
@@ -6,10 +6,11 @@
 import "unsafe"
 
 const (
-	_EINTR  = 0x4
-	_EFAULT = 0xe
-	_EBUSY  = 0x10
-	_EAGAIN = 0x23
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EBUSY     = 0x10
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
 
 	_O_WRONLY   = 0x1
 	_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_openbsd.go b/src/runtime/defs_openbsd.go
index 2ca6a88..d93c087 100644
--- a/src/runtime/defs_openbsd.go
+++ b/src/runtime/defs_openbsd.go
@@ -31,9 +31,10 @@
 import "C"
 
 const (
-	EINTR  = C.EINTR
-	EFAULT = C.EFAULT
-	EAGAIN = C.EAGAIN
+	EINTR     = C.EINTR
+	EFAULT    = C.EFAULT
+	EAGAIN    = C.EAGAIN
+	ETIMEDOUT = C.ETIMEDOUT
 
 	O_NONBLOCK = C.O_NONBLOCK
 	O_CLOEXEC  = C.O_CLOEXEC
@@ -57,9 +58,6 @@
 
 	PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
 
-	F_GETFL = C.F_GETFL
-	F_SETFL = C.F_SETFL
-
 	SIGHUP    = C.SIGHUP
 	SIGINT    = C.SIGINT
 	SIGQUIT   = C.SIGQUIT
diff --git a/src/runtime/defs_openbsd_386.go b/src/runtime/defs_openbsd_386.go
index fde8af5..996745f 100644
--- a/src/runtime/defs_openbsd_386.go
+++ b/src/runtime/defs_openbsd_386.go
@@ -6,9 +6,10 @@
 import "unsafe"
 
 const (
-	_EINTR  = 0x4
-	_EFAULT = 0xe
-	_EAGAIN = 0x23
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
 
 	_O_WRONLY   = 0x1
 	_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_openbsd_amd64.go b/src/runtime/defs_openbsd_amd64.go
index 0f29d0c..739853b 100644
--- a/src/runtime/defs_openbsd_amd64.go
+++ b/src/runtime/defs_openbsd_amd64.go
@@ -6,9 +6,10 @@
 import "unsafe"
 
 const (
-	_EINTR  = 0x4
-	_EFAULT = 0xe
-	_EAGAIN = 0x23
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
 
 	_O_WRONLY   = 0x1
 	_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_openbsd_arm.go b/src/runtime/defs_openbsd_arm.go
index b56f3b4..cdda6b4 100644
--- a/src/runtime/defs_openbsd_arm.go
+++ b/src/runtime/defs_openbsd_arm.go
@@ -6,9 +6,10 @@
 import "unsafe"
 
 const (
-	_EINTR  = 0x4
-	_EFAULT = 0xe
-	_EAGAIN = 0x23
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
 
 	_O_WRONLY   = 0x1
 	_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_openbsd_arm64.go b/src/runtime/defs_openbsd_arm64.go
index 0a9acc0..67f35b4 100644
--- a/src/runtime/defs_openbsd_arm64.go
+++ b/src/runtime/defs_openbsd_arm64.go
@@ -7,9 +7,10 @@
 import "unsafe"
 
 const (
-	_EINTR  = 0x4
-	_EFAULT = 0xe
-	_EAGAIN = 0x23
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
 
 	_O_WRONLY   = 0x1
 	_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_openbsd_mips64.go b/src/runtime/defs_openbsd_mips64.go
index 1e469e4..7958044 100644
--- a/src/runtime/defs_openbsd_mips64.go
+++ b/src/runtime/defs_openbsd_mips64.go
@@ -13,9 +13,10 @@
 import "unsafe"
 
 const (
-	_EINTR  = 0x4
-	_EFAULT = 0xe
-	_EAGAIN = 0x23
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
 
 	_O_WRONLY   = 0x1
 	_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_openbsd_ppc64.go b/src/runtime/defs_openbsd_ppc64.go
new file mode 100644
index 0000000..83f300c
--- /dev/null
+++ b/src/runtime/defs_openbsd_ppc64.go
@@ -0,0 +1,184 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Generated from:
+//
+//   GOARCH=ppc64 go tool cgo -godefs defs_openbsd.go
+//
+// Then converted to the form used by the runtime.
+
+package runtime
+
+import "unsafe"
+
+const (
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
+
+	_O_WRONLY   = 0x1
+	_O_NONBLOCK = 0x4
+	_O_CREAT    = 0x200
+	_O_TRUNC    = 0x400
+	_O_CLOEXEC  = 0x10000
+
+	_PROT_NONE  = 0x0
+	_PROT_READ  = 0x1
+	_PROT_WRITE = 0x2
+	_PROT_EXEC  = 0x4
+
+	_MAP_ANON    = 0x1000
+	_MAP_PRIVATE = 0x2
+	_MAP_FIXED   = 0x10
+	_MAP_STACK   = 0x4000
+
+	_MADV_DONTNEED = 0x4
+	_MADV_FREE     = 0x6
+
+	_SA_SIGINFO = 0x40
+	_SA_RESTART = 0x2
+	_SA_ONSTACK = 0x1
+
+	_PTHREAD_CREATE_DETACHED = 0x1
+
+	_SIGHUP    = 0x1
+	_SIGINT    = 0x2
+	_SIGQUIT   = 0x3
+	_SIGILL    = 0x4
+	_SIGTRAP   = 0x5
+	_SIGABRT   = 0x6
+	_SIGEMT    = 0x7
+	_SIGFPE    = 0x8
+	_SIGKILL   = 0x9
+	_SIGBUS    = 0xa
+	_SIGSEGV   = 0xb
+	_SIGSYS    = 0xc
+	_SIGPIPE   = 0xd
+	_SIGALRM   = 0xe
+	_SIGTERM   = 0xf
+	_SIGURG    = 0x10
+	_SIGSTOP   = 0x11
+	_SIGTSTP   = 0x12
+	_SIGCONT   = 0x13
+	_SIGCHLD   = 0x14
+	_SIGTTIN   = 0x15
+	_SIGTTOU   = 0x16
+	_SIGIO     = 0x17
+	_SIGXCPU   = 0x18
+	_SIGXFSZ   = 0x19
+	_SIGVTALRM = 0x1a
+	_SIGPROF   = 0x1b
+	_SIGWINCH  = 0x1c
+	_SIGINFO   = 0x1d
+	_SIGUSR1   = 0x1e
+	_SIGUSR2   = 0x1f
+
+	_FPE_INTDIV = 0x1
+	_FPE_INTOVF = 0x2
+	_FPE_FLTDIV = 0x3
+	_FPE_FLTOVF = 0x4
+	_FPE_FLTUND = 0x5
+	_FPE_FLTRES = 0x6
+	_FPE_FLTINV = 0x7
+	_FPE_FLTSUB = 0x8
+
+	_BUS_ADRALN = 0x1
+	_BUS_ADRERR = 0x2
+	_BUS_OBJERR = 0x3
+
+	_SEGV_MAPERR = 0x1
+	_SEGV_ACCERR = 0x2
+
+	_ITIMER_REAL    = 0x0
+	_ITIMER_VIRTUAL = 0x1
+	_ITIMER_PROF    = 0x2
+
+	_EV_ADD       = 0x1
+	_EV_DELETE    = 0x2
+	_EV_CLEAR     = 0x20
+	_EV_ERROR     = 0x4000
+	_EV_EOF       = 0x8000
+	_EVFILT_READ  = -0x1
+	_EVFILT_WRITE = -0x2
+)
+
+type tforkt struct {
+	tf_tcb   unsafe.Pointer
+	tf_tid   *int32
+	tf_stack uintptr
+}
+
+type sigcontext struct {
+	sc_cookie uint64
+	sc_mask   int32
+	sc_reg    [32]uint64
+	sc_lr     uint64
+	sc_cr     uint64
+	sc_xer    uint64
+	sc_ctr    uint64
+	sc_pc     uint64
+	sc_ps     uint64
+	sc_vrsave uint64
+	pad_cgo_0 [8]byte
+	sc_vsx    [64][16]uint8
+	sc_fpscr  uint64
+	sc_vscr   uint64
+}
+
+type siginfo struct {
+	si_signo  int32
+	si_code   int32
+	si_errno  int32
+	pad_cgo_0 [4]byte
+	_data     [120]byte
+}
+
+type stackt struct {
+	ss_sp     uintptr
+	ss_size   uintptr
+	ss_flags  int32
+	pad_cgo_0 [4]byte
+}
+
+type timespec struct {
+	tv_sec  int64
+	tv_nsec int64
+}
+
+//go:nosplit
+func (ts *timespec) setNsec(ns int64) {
+	ts.tv_sec = ns / 1e9
+	ts.tv_nsec = ns % 1e9
+}
+
+type timeval struct {
+	tv_sec  int64
+	tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+	tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+	it_interval timeval
+	it_value    timeval
+}
+
+type keventt struct {
+	ident  uint64
+	filter int16
+	flags  uint16
+	fflags uint32
+	data   int64
+	udata  *byte
+}
+
+type pthread uintptr
+type pthreadattr uintptr
+type pthreadcond uintptr
+type pthreadcondattr uintptr
+type pthreadmutex uintptr
+type pthreadmutexattr uintptr
diff --git a/src/runtime/defs_openbsd_riscv64.go b/src/runtime/defs_openbsd_riscv64.go
new file mode 100644
index 0000000..2a044d5
--- /dev/null
+++ b/src/runtime/defs_openbsd_riscv64.go
@@ -0,0 +1,177 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+const (
+	_EINTR     = 0x4
+	_EFAULT    = 0xe
+	_EAGAIN    = 0x23
+	_ETIMEDOUT = 0x3c
+
+	_O_WRONLY   = 0x1
+	_O_NONBLOCK = 0x4
+	_O_CREAT    = 0x200
+	_O_TRUNC    = 0x400
+	_O_CLOEXEC  = 0x10000
+
+	_PROT_NONE  = 0x0
+	_PROT_READ  = 0x1
+	_PROT_WRITE = 0x2
+	_PROT_EXEC  = 0x4
+
+	_MAP_ANON    = 0x1000
+	_MAP_PRIVATE = 0x2
+	_MAP_FIXED   = 0x10
+	_MAP_STACK   = 0x4000
+
+	_MADV_DONTNEED = 0x4
+	_MADV_FREE     = 0x6
+
+	_SA_SIGINFO = 0x40
+	_SA_RESTART = 0x2
+	_SA_ONSTACK = 0x1
+
+	_PTHREAD_CREATE_DETACHED = 0x1
+
+	_SIGHUP    = 0x1
+	_SIGINT    = 0x2
+	_SIGQUIT   = 0x3
+	_SIGILL    = 0x4
+	_SIGTRAP   = 0x5
+	_SIGABRT   = 0x6
+	_SIGEMT    = 0x7
+	_SIGFPE    = 0x8
+	_SIGKILL   = 0x9
+	_SIGBUS    = 0xa
+	_SIGSEGV   = 0xb
+	_SIGSYS    = 0xc
+	_SIGPIPE   = 0xd
+	_SIGALRM   = 0xe
+	_SIGTERM   = 0xf
+	_SIGURG    = 0x10
+	_SIGSTOP   = 0x11
+	_SIGTSTP   = 0x12
+	_SIGCONT   = 0x13
+	_SIGCHLD   = 0x14
+	_SIGTTIN   = 0x15
+	_SIGTTOU   = 0x16
+	_SIGIO     = 0x17
+	_SIGXCPU   = 0x18
+	_SIGXFSZ   = 0x19
+	_SIGVTALRM = 0x1a
+	_SIGPROF   = 0x1b
+	_SIGWINCH  = 0x1c
+	_SIGINFO   = 0x1d
+	_SIGUSR1   = 0x1e
+	_SIGUSR2   = 0x1f
+
+	_FPE_INTDIV = 0x1
+	_FPE_INTOVF = 0x2
+	_FPE_FLTDIV = 0x3
+	_FPE_FLTOVF = 0x4
+	_FPE_FLTUND = 0x5
+	_FPE_FLTRES = 0x6
+	_FPE_FLTINV = 0x7
+	_FPE_FLTSUB = 0x8
+
+	_BUS_ADRALN = 0x1
+	_BUS_ADRERR = 0x2
+	_BUS_OBJERR = 0x3
+
+	_SEGV_MAPERR = 0x1
+	_SEGV_ACCERR = 0x2
+
+	_ITIMER_REAL    = 0x0
+	_ITIMER_VIRTUAL = 0x1
+	_ITIMER_PROF    = 0x2
+
+	_EV_ADD       = 0x1
+	_EV_DELETE    = 0x2
+	_EV_CLEAR     = 0x20
+	_EV_ERROR     = 0x4000
+	_EV_EOF       = 0x8000
+	_EVFILT_READ  = -0x1
+	_EVFILT_WRITE = -0x2
+)
+
+type tforkt struct {
+	tf_tcb   unsafe.Pointer
+	tf_tid   *int32
+	tf_stack uintptr
+}
+
+type sigcontext struct {
+	__sc_unused int32
+	sc_mask     int32
+	sc_ra       uintptr
+	sc_sp       uintptr
+	sc_gp       uintptr
+	sc_tp       uintptr
+	sc_t        [7]uintptr
+	sc_s        [12]uintptr
+	sc_a        [8]uintptr
+	sc_sepc     uintptr
+	sc_f        [32]uintptr
+	sc_fcsr     uintptr
+	sc_cookie   int64
+}
+
+type siginfo struct {
+	si_signo  int32
+	si_code   int32
+	si_errno  int32
+	pad_cgo_0 [4]byte
+	_data     [120]byte
+}
+
+type stackt struct {
+	ss_sp     uintptr
+	ss_size   uintptr
+	ss_flags  int32
+	pad_cgo_0 [4]byte
+}
+
+type timespec struct {
+	tv_sec  int64
+	tv_nsec int64
+}
+
+//go:nosplit
+func (ts *timespec) setNsec(ns int64) {
+	ts.tv_sec = ns / 1e9
+	ts.tv_nsec = ns % 1e9
+}
+
+type timeval struct {
+	tv_sec  int64
+	tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+	tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+	it_interval timeval
+	it_value    timeval
+}
+
+type keventt struct {
+	ident  uint64
+	filter int16
+	flags  uint16
+	fflags uint32
+	data   int64
+	udata  *byte
+}
+
+type pthread uintptr
+type pthreadattr uintptr
+type pthreadcond uintptr
+type pthreadcondattr uintptr
+type pthreadmutex uintptr
+type pthreadmutexattr uintptr
diff --git a/src/runtime/defs_solaris.go b/src/runtime/defs_solaris.go
index 11708ee..54c4008 100644
--- a/src/runtime/defs_solaris.go
+++ b/src/runtime/defs_solaris.go
@@ -125,8 +125,6 @@
 	O_CREAT    = C.O_CREAT
 	O_TRUNC    = C.O_TRUNC
 	O_CLOEXEC  = C.O_CLOEXEC
-	F_GETFL    = C.F_GETFL
-	F_SETFL    = C.F_SETFL
 
 	POLLIN  = C.POLLIN
 	POLLOUT = C.POLLOUT
diff --git a/src/runtime/defs_windows.go b/src/runtime/defs_windows.go
index 56698fa..2dbe144 100644
--- a/src/runtime/defs_windows.go
+++ b/src/runtime/defs_windows.go
@@ -41,8 +41,9 @@
 	_INFINITE     = 0xffffffff
 	_WAIT_TIMEOUT = 0x102
 
-	_EXCEPTION_CONTINUE_EXECUTION = -0x1
-	_EXCEPTION_CONTINUE_SEARCH    = 0x0
+	_EXCEPTION_CONTINUE_EXECUTION  = -0x1
+	_EXCEPTION_CONTINUE_SEARCH     = 0x0
+	_EXCEPTION_CONTINUE_SEARCH_SEH = 0x1
 )
 
 type systeminfo struct {
diff --git a/src/runtime/defs_windows_386.go b/src/runtime/defs_windows_386.go
index b11b155..8cf2bfc 100644
--- a/src/runtime/defs_windows_386.go
+++ b/src/runtime/defs_windows_386.go
@@ -79,3 +79,10 @@
 	print("fs      ", hex(r.segfs), "\n")
 	print("gs      ", hex(r.seggs), "\n")
 }
+
+// _DISPATCHER_CONTEXT is not defined on 386.
+type _DISPATCHER_CONTEXT struct{}
+
+func (c *_DISPATCHER_CONTEXT) ctx() *context {
+	return nil
+}
diff --git a/src/runtime/defs_windows_amd64.go b/src/runtime/defs_windows_amd64.go
index 20c9c4d..9dbfb40 100644
--- a/src/runtime/defs_windows_amd64.go
+++ b/src/runtime/defs_windows_amd64.go
@@ -80,6 +80,7 @@
 	print("rax     ", hex(r.rax), "\n")
 	print("rbx     ", hex(r.rbx), "\n")
 	print("rcx     ", hex(r.rcx), "\n")
+	print("rdx     ", hex(r.rdx), "\n")
 	print("rdi     ", hex(r.rdi), "\n")
 	print("rsi     ", hex(r.rsi), "\n")
 	print("rbp     ", hex(r.rbp), "\n")
@@ -98,3 +99,18 @@
 	print("fs      ", hex(r.segfs), "\n")
 	print("gs      ", hex(r.seggs), "\n")
 }
+
+type _DISPATCHER_CONTEXT struct {
+	controlPc        uint64
+	imageBase        uint64
+	functionEntry    uintptr
+	establisherFrame uint64
+	targetIp         uint64
+	context          *context
+	languageHandler  uintptr
+	handlerData      uintptr
+}
+
+func (c *_DISPATCHER_CONTEXT) ctx() *context {
+	return c.context
+}
diff --git a/src/runtime/defs_windows_arm.go b/src/runtime/defs_windows_arm.go
index 7a18c95..861a884 100644
--- a/src/runtime/defs_windows_arm.go
+++ b/src/runtime/defs_windows_arm.go
@@ -89,3 +89,18 @@
 func stackcheck() {
 	// TODO: not implemented on ARM
 }
+
+type _DISPATCHER_CONTEXT struct {
+	controlPc        uint32
+	imageBase        uint32
+	functionEntry    uintptr
+	establisherFrame uint32
+	targetIp         uint32
+	context          *context
+	languageHandler  uintptr
+	handlerData      uintptr
+}
+
+func (c *_DISPATCHER_CONTEXT) ctx() *context {
+	return c.context
+}
diff --git a/src/runtime/defs_windows_arm64.go b/src/runtime/defs_windows_arm64.go
index ef2efb1..70e28d2 100644
--- a/src/runtime/defs_windows_arm64.go
+++ b/src/runtime/defs_windows_arm64.go
@@ -87,3 +87,18 @@
 func stackcheck() {
 	// TODO: not implemented on ARM
 }
+
+type _DISPATCHER_CONTEXT struct {
+	controlPc        uint64
+	imageBase        uint64
+	functionEntry    uintptr
+	establisherFrame uint64
+	targetIp         uint64
+	context          *context
+	languageHandler  uintptr
+	handlerData      uintptr
+}
+
+func (c *_DISPATCHER_CONTEXT) ctx() *context {
+	return c.context
+}
diff --git a/src/runtime/duff_loong64.s b/src/runtime/duff_loong64.s
index 7f78e4f..b05502d 100644
--- a/src/runtime/duff_loong64.s
+++ b/src/runtime/duff_loong64.s
@@ -4,904 +4,904 @@
 
 #include "textflag.h"
 
-TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
-	MOVV	R0, 8(R19)
-	ADDV	$8, R19
+TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
+	MOVV	R0, (R20)
+	ADDV	$8, R20
 	RET
 
-TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
-	MOVV	(R19), R30
-	ADDV	$8, R19
-	MOVV	R30, (R20)
+	MOVV	(R20), R30
 	ADDV	$8, R20
+	MOVV	R30, (R21)
+	ADDV	$8, R21
 
 	RET
diff --git a/src/runtime/error.go b/src/runtime/error.go
index 3590ccd..b507f25 100644
--- a/src/runtime/error.go
+++ b/src/runtime/error.go
@@ -93,7 +93,7 @@
 // The address provided is best-effort.
 // The veracity of the result may depend on the platform.
 // Errors providing this method will only be returned as
-// a result of using runtime/debug.SetPanicOnFault.
+// a result of using [runtime/debug.SetPanicOnFault].
 func (e errorAddressString) Addr() uintptr {
 	return e.addr
 }
diff --git a/src/runtime/export_debug_ppc64le_test.go b/src/runtime/export_debug_ppc64le_test.go
new file mode 100644
index 0000000..dd5dce5
--- /dev/null
+++ b/src/runtime/export_debug_ppc64le_test.go
@@ -0,0 +1,131 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le && linux
+
+package runtime
+
+import (
+	"internal/abi"
+	"internal/goarch"
+	"math"
+	"unsafe"
+)
+
+type sigContext struct {
+	savedRegs sigcontext
+}
+
+func sigctxtSetContextRegister(ctxt *sigctxt, x uint64) {
+	ctxt.regs().gpr[11] = x
+}
+
+func sigctxtAtTrapInstruction(ctxt *sigctxt) bool {
+	return *(*uint32)(unsafe.Pointer(ctxt.sigpc())) == 0x7fe00008 // Trap
+}
+
+func sigctxtStatus(ctxt *sigctxt) uint64 {
+	return ctxt.r20()
+}
+
+func (h *debugCallHandler) saveSigContext(ctxt *sigctxt) {
+	sp := ctxt.sp()
+	sp -= 4 * goarch.PtrSize
+	ctxt.set_sp(sp)
+	*(*uint64)(unsafe.Pointer(uintptr(sp))) = ctxt.link() // save the current lr
+	ctxt.set_link(ctxt.pc())                              // set new lr to the current pc
+	// Write the argument frame size.
+	*(*uintptr)(unsafe.Pointer(uintptr(sp - 32))) = h.argSize
+	// Save current registers.
+	h.sigCtxt.savedRegs = *ctxt.cregs()
+}
+
+// case 0
+func (h *debugCallHandler) debugCallRun(ctxt *sigctxt) {
+	sp := ctxt.sp()
+	memmove(unsafe.Pointer(uintptr(sp)+32), h.argp, h.argSize)
+	if h.regArgs != nil {
+		storeRegArgs(ctxt.cregs(), h.regArgs)
+	}
+	// Push return PC, which should be the signal PC+4, because
+	// the signal PC is the PC of the trap instruction itself.
+	ctxt.set_link(ctxt.pc() + 4)
+	// Set PC to call and context register.
+	ctxt.set_pc(uint64(h.fv.fn))
+	sigctxtSetContextRegister(ctxt, uint64(uintptr(unsafe.Pointer(h.fv))))
+}
+
+// case 1
+func (h *debugCallHandler) debugCallReturn(ctxt *sigctxt) {
+	sp := ctxt.sp()
+	memmove(h.argp, unsafe.Pointer(uintptr(sp)+32), h.argSize)
+	if h.regArgs != nil {
+		loadRegArgs(h.regArgs, ctxt.cregs())
+	}
+	// Restore the old lr from *sp
+	olr := *(*uint64)(unsafe.Pointer(uintptr(sp)))
+	ctxt.set_link(olr)
+	pc := ctxt.pc()
+	ctxt.set_pc(pc + 4) // step to next instruction
+}
+
+// case 2
+func (h *debugCallHandler) debugCallPanicOut(ctxt *sigctxt) {
+	sp := ctxt.sp()
+	memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)+32), 2*goarch.PtrSize)
+	ctxt.set_pc(ctxt.pc() + 4)
+}
+
+// case 8
+func (h *debugCallHandler) debugCallUnsafe(ctxt *sigctxt) {
+	sp := ctxt.sp()
+	reason := *(*string)(unsafe.Pointer(uintptr(sp) + 40))
+	h.err = plainError(reason)
+	ctxt.set_pc(ctxt.pc() + 4)
+}
+
+// case 16
+func (h *debugCallHandler) restoreSigContext(ctxt *sigctxt) {
+	// Restore all registers except for pc and sp
+	pc, sp := ctxt.pc(), ctxt.sp()
+	*ctxt.cregs() = h.sigCtxt.savedRegs
+	ctxt.set_pc(pc + 4)
+	ctxt.set_sp(sp)
+}
+
+// storeRegArgs sets up argument registers in the signal
+// context state from an abi.RegArgs.
+//
+// Both src and dst must be non-nil.
+func storeRegArgs(dst *sigcontext, src *abi.RegArgs) {
+	// Gprs R3..R10, R14..R17 are used to pass int arguments in registers on PPC64
+	for i := 0; i < 12; i++ {
+		if i > 7 {
+			dst.gp_regs[i+6] = uint64(src.Ints[i])
+		} else {
+			dst.gp_regs[i+3] = uint64(src.Ints[i])
+		}
+	}
+	// Fprs F1..F13 are used to pass float arguments in registers on PPC64
+	for i := 0; i < 12; i++ {
+		dst.fp_regs[i+1] = math.Float64frombits(src.Floats[i])
+	}
+
+}
+
+func loadRegArgs(dst *abi.RegArgs, src *sigcontext) {
+	// Gprs R3..R10, R14..R17 are used to pass int arguments in registers on PPC64
+	for i := range [12]int{} {
+		if i > 7 {
+			dst.Ints[i] = uintptr(src.gp_regs[i+6])
+		} else {
+			dst.Ints[i] = uintptr(src.gp_regs[i+3])
+		}
+	}
+	// Fprs F1..F13 are used to pass float arguments in registers on PPC64
+	for i := range [12]int{} {
+		dst.Floats[i] = math.Float64bits(src.fp_regs[i+1])
+	}
+
+}
diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go
index 76dc206..7ee73ef 100644
--- a/src/runtime/export_debug_test.go
+++ b/src/runtime/export_debug_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (amd64 || arm64) && linux
+//go:build (amd64 || arm64 || ppc64le) && linux
 
 package runtime
 
diff --git a/src/runtime/export_debuglog_test.go b/src/runtime/export_debuglog_test.go
index f12aab0..04ac79f 100644
--- a/src/runtime/export_debuglog_test.go
+++ b/src/runtime/export_debuglog_test.go
@@ -35,12 +35,12 @@
 }
 
 func ResetDebugLog() {
-	stopTheWorld(stwForTestResetDebugLog)
+	stw := stopTheWorld(stwForTestResetDebugLog)
 	for l := allDloggers; l != nil; l = l.allLink {
 		l.w.write = 0
 		l.w.tick, l.w.nano = 0, 0
 		l.w.r.begin, l.w.r.end = 0, 0
 		l.w.r.tick, l.w.r.nano = 0, 0
 	}
-	startTheWorld()
+	startTheWorld(stw)
 }
diff --git a/src/runtime/export_linux_test.go b/src/runtime/export_linux_test.go
index 426fd1e..52afd28 100644
--- a/src/runtime/export_linux_test.go
+++ b/src/runtime/export_linux_test.go
@@ -11,7 +11,6 @@
 
 var NewOSProc0 = newosproc0
 var Mincore = mincore
-var Add = add
 
 type Siginfo siginfo
 type Sigevent sigevent
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index f7ce503..e25f748 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -7,7 +7,9 @@
 package runtime
 
 import (
+	"internal/abi"
 	"internal/goarch"
+	"internal/goexperiment"
 	"internal/goos"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
@@ -29,6 +31,8 @@
 var LockedOSThread = lockedOSThread
 var Xadduintptr = atomic.Xadduintptr
 
+var ReadRandomFailed = &readRandomFailed
+
 var Fastlog2 = fastlog2
 
 var Atoi = atoi
@@ -49,9 +53,14 @@
 
 var CgoCheckPointer = cgoCheckPointer
 
+const CrashStackImplemented = crashStackImplemented
+
 const TracebackInnerFrames = tracebackInnerFrames
 const TracebackOuterFrames = tracebackOuterFrames
 
+var MapKeys = keys
+var MapValues = values
+
 var LockPartialOrder = lockPartialOrder
 
 type LockRank lockRank
@@ -72,7 +81,7 @@
 }
 
 func LFStackPop(head *uint64) *LFNode {
-	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
+	return (*LFNode)((*lfstack)(head).pop())
 }
 func LFNodeValidate(node *LFNode) {
 	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
@@ -320,6 +329,14 @@
 // no valid racectx, but if we're instantiated in the runtime_test package,
 // we might accidentally cause runtime code to be incorrectly instrumented.
 func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
+	// This benchmark doesn't work with the allocheaders experiment. It sets up
+	// an elaborate scenario to be able to benchmark the function safely, but doing
+	// this work for the allocheaders' version of the function would be complex.
+	// Just fail instead and rely on the test code making sure we never get here.
+	if goexperiment.AllocHeaders {
+		panic("called benchSetType with allocheaders experiment enabled")
+	}
+
 	// Compute the input sizes.
 	size := t.Size() * uintptr(len)
 
@@ -334,7 +351,7 @@
 
 	// Round up the size to the size class to make the benchmark a little more
 	// realistic. However, validate it, to make sure this is safe.
-	allocSize := roundupsize(size)
+	allocSize := roundupsize(size, t.PtrBytes == 0)
 	if s.npages*pageSize < allocSize {
 		panic("backing span not large enough for benchmark")
 	}
@@ -368,9 +385,9 @@
 var ReadUnaligned64 = readUnaligned64
 
 func CountPagesInUse() (pagesInUse, counted uintptr) {
-	stopTheWorld(stwForTestCountPagesInUse)
+	stw := stopTheWorld(stwForTestCountPagesInUse)
 
-	pagesInUse = uintptr(mheap_.pagesInUse.Load())
+	pagesInUse = mheap_.pagesInUse.Load()
 
 	for _, s := range mheap_.allspans {
 		if s.state.get() == mSpanInUse {
@@ -378,14 +395,14 @@
 		}
 	}
 
-	startTheWorld()
+	startTheWorld(stw)
 
 	return
 }
 
-func Fastrand() uint32          { return fastrand() }
-func Fastrand64() uint64        { return fastrand64() }
-func Fastrandn(n uint32) uint32 { return fastrandn(n) }
+func Fastrand() uint32          { return uint32(rand()) }
+func Fastrand64() uint64        { return rand() }
+func Fastrandn(n uint32) uint32 { return randn(n) }
 
 type ProfBuf profBuf
 
@@ -403,7 +420,7 @@
 )
 
 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
-	return (*profBuf)(p).read(profBufReadMode(mode))
+	return (*profBuf)(p).read(mode)
 }
 
 func (p *ProfBuf) Close() {
@@ -411,35 +428,50 @@
 }
 
 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
-	stopTheWorld(stwForTestReadMetricsSlow)
+	stw := stopTheWorld(stwForTestReadMetricsSlow)
 
 	// Initialize the metrics beforehand because this could
 	// allocate and skew the stats.
 	metricsLock()
 	initMetrics()
-	metricsUnlock()
 
 	systemstack(func() {
+		// Donate the racectx to g0. readMetricsLocked calls into the race detector
+		// via map access.
+		getg().racectx = getg().m.curg.racectx
+
+		// Read the metrics once before in case it allocates and skews the metrics.
+		// readMetricsLocked is designed to only allocate the first time it is called
+		// with a given slice of samples. In effect, this extra read tests that this
+		// remains true, since otherwise the second readMetricsLocked below could
+		// allocate before it returns.
+		readMetricsLocked(samplesp, len, cap)
+
 		// Read memstats first. It's going to flush
 		// the mcaches which readMetrics does not do, so
 		// going the other way around may result in
 		// inconsistent statistics.
 		readmemstats_m(memStats)
+
+		// Read metrics again. We need to be sure we're on the
+		// system stack with readmemstats_m so that we don't call into
+		// the stack allocator and adjust metrics between there and here.
+		readMetricsLocked(samplesp, len, cap)
+
+		// Undo the donation.
+		getg().racectx = 0
 	})
+	metricsUnlock()
 
-	// Read metrics off the system stack.
-	//
-	// The only part of readMetrics that could allocate
-	// and skew the stats is initMetrics.
-	readMetrics(samplesp, len, cap)
-
-	startTheWorld()
+	startTheWorld(stw)
 }
 
+var DoubleCheckReadMemStats = &doubleCheckReadMemStats
+
 // ReadMemStatsSlow returns both the runtime-computed MemStats and
 // MemStats accumulated by scanning the heap.
 func ReadMemStatsSlow() (base, slow MemStats) {
-	stopTheWorld(stwForTestReadMemStatsSlow)
+	stw := stopTheWorld(stwForTestReadMemStatsSlow)
 
 	// Run on the system stack to avoid stack growth allocation.
 	systemstack(func() {
@@ -485,15 +517,15 @@
 		// Collect per-sizeclass free stats.
 		var smallFree uint64
 		for i := 0; i < _NumSizeClasses; i++ {
-			slow.Frees += uint64(m.smallFreeCount[i])
-			bySize[i].Frees += uint64(m.smallFreeCount[i])
-			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
-			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
+			slow.Frees += m.smallFreeCount[i]
+			bySize[i].Frees += m.smallFreeCount[i]
+			bySize[i].Mallocs += m.smallFreeCount[i]
+			smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
 		}
-		slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
+		slow.Frees += m.tinyAllocCount + m.largeFreeCount
 		slow.Mallocs += slow.Frees
 
-		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
+		slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
 
 		for i := range slow.BySize {
 			slow.BySize[i].Mallocs = bySize[i].Mallocs
@@ -516,7 +548,7 @@
 		getg().m.mallocing--
 	})
 
-	startTheWorld()
+	startTheWorld(stw)
 	return
 }
 
@@ -554,6 +586,10 @@
 	rw rwmutex
 }
 
+func (rw *RWMutex) Init() {
+	rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
+}
+
 func (rw *RWMutex) RLock() {
 	rw.rw.rlock()
 }
@@ -582,6 +618,10 @@
 	return h.buckets == nil
 }
 
+func OverLoadFactor(count int, B uint8) bool {
+	return overLoadFactor(count, B)
+}
+
 func LockOSCounts() (external, internal uint32) {
 	gp := getg()
 	if gp.m.lockedExt+gp.m.lockedInt == 0 {
@@ -681,6 +721,15 @@
 
 func G0StackOverflow() {
 	systemstack(func() {
+		g0 := getg()
+		sp := getcallersp()
+		// The stack bounds for g0 stack is not always precise.
+		// Use an artificially small stack, to trigger a stack overflow
+		// without actually run out of the system stack (which may seg fault).
+		g0.stack.lo = sp - 4096 - stackSystem
+		g0.stackguard0 = g0.stack.lo + stackGuard
+		g0.stackguard1 = g0.stackguard0
+
 		stackOverflow(nil)
 	})
 }
@@ -789,7 +838,7 @@
 // SummarizeSlow is a slow but more obviously correct implementation
 // of (*pallocBits).summarize. Used for testing.
 func SummarizeSlow(b *PallocBits) PallocSum {
-	var start, max, end uint
+	var start, most, end uint
 
 	const N = uint(len(b)) * 64
 	for start < N && (*pageBits)(b).get(start) == 0 {
@@ -805,11 +854,9 @@
 		} else {
 			run = 0
 		}
-		if run > max {
-			max = run
-		}
+		most = max(most, run)
 	}
-	return PackPallocSum(start, max, end)
+	return PackPallocSum(start, most, end)
 }
 
 // Expose non-trivial helpers for testing.
@@ -1285,7 +1332,7 @@
 }
 
 func PageCachePagesLeaked() (leaked uintptr) {
-	stopTheWorld(stwForTestPageCachePagesLeaked)
+	stw := stopTheWorld(stwForTestPageCachePagesLeaked)
 
 	// Walk over destroyed Ps and look for unflushed caches.
 	deadp := allp[len(allp):cap(allp)]
@@ -1297,10 +1344,22 @@
 		}
 	}
 
-	startTheWorld()
+	startTheWorld(stw)
 	return
 }
 
+type Mutex = mutex
+
+var Lock = lock
+var Unlock = unlock
+
+var MutexContended = mutexContended
+
+func SemRootLock(addr *uint32) *mutex {
+	root := semtable.rootFor(addr)
+	return &root.lock
+}
+
 var Semacquire = semacquire
 var Semrelease1 = semrelease1
 
@@ -1329,7 +1388,7 @@
 //
 // Returns true if there actually was a waiter to be dequeued.
 func (t *SemTable) Dequeue(addr *uint32) bool {
-	s, _ := t.semTable.rootFor(addr).dequeue(addr)
+	s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
 	if s != nil {
 		releaseSudog(s)
 		return true
@@ -1362,7 +1421,7 @@
 
 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
 	s := (*mspan)(ms)
-	s.nelems = uintptr(len(bits) * 8)
+	s.nelems = uint16(len(bits) * 8)
 	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
 	result := s.countAlloc()
 	s.gcmarkBits = nil
@@ -1819,10 +1878,6 @@
 	s.i.setEmpty(chunkIdx(ci))
 }
 
-func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) {
-	s.i.setNoHugePage(chunkIdx(ci))
-}
-
 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
 	sc0 := scavChunkData{
 		gen:            gen,
@@ -1885,24 +1940,8 @@
 
 var AlignUp = alignUp
 
-// BlockUntilEmptyFinalizerQueue blocks until either the finalizer
-// queue is emptied (and the finalizers have executed) or the timeout
-// is reached. Returns true if the finalizer queue was emptied.
 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
-	start := nanotime()
-	for nanotime()-start < timeout {
-		lock(&finlock)
-		// We know the queue has been drained when both finq is nil
-		// and the finalizer g has stopped executing.
-		empty := finq == nil
-		empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
-		unlock(&finlock)
-		if empty {
-			return true
-		}
-		Gosched()
-	}
-	return false
+	return blockUntilEmptyFinalizerQueue(timeout)
 }
 
 func FrameStartLine(f *Frame) int {
@@ -1934,3 +1973,29 @@
 func GetPinnerLeakPanic() func() {
 	return pinnerLeakPanic
 }
+
+var testUintptr uintptr
+
+func MyGenericFunc[T any]() {
+	systemstack(func() {
+		testUintptr = 4
+	})
+}
+
+func UnsafePoint(pc uintptr) bool {
+	fi := findfunc(pc)
+	v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
+	switch v {
+	case abi.UnsafePointUnsafe:
+		return true
+	case abi.UnsafePointSafe:
+		return false
+	case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
+		// These are all interruptible, they just encode a nonstandard
+		// way of recovering when interrupted.
+		return false
+	default:
+		var buf [20]byte
+		panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
+	}
+}
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index 26dcf0b..e42122f 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -5,7 +5,7 @@
 /*
 Package runtime contains operations that interact with Go's runtime system,
 such as functions to control goroutines. It also includes the low-level type information
-used by the reflect package; see reflect's documentation for the programmable
+used by the reflect package; see [reflect]'s documentation for the programmable
 interface to the run-time type system.
 
 # Environment Variables
@@ -28,7 +28,7 @@
 The supported suffixes include B, KiB, MiB, GiB, and TiB. These suffixes
 represent quantities of bytes as defined by the IEC 80000-13 standard. That is,
 they are based on powers of two: KiB means 2^10 bytes, MiB means 2^20 bytes,
-and so on. The default setting is math.MaxInt64, which effectively disables the
+and so on. The default setting is [math.MaxInt64], which effectively disables the
 memory limit. [runtime/debug.SetMemoryLimit] allows changing this limit at run
 time.
 
@@ -55,6 +55,13 @@
 	cgocheck mode can be enabled using GOEXPERIMENT (which
 	requires a rebuild), see https://pkg.go.dev/internal/goexperiment for details.
 
+	disablethp: setting disablethp=1 on Linux disables transparent huge pages for the heap.
+	It has no effect on other platforms. disablethp is meant for compatibility with versions
+	of Go before 1.21, which stopped working around a Linux kernel default that can result
+	in significant memory overuse. See https://go.dev/issue/64332. This setting will be
+	removed in a future release, so operators should tweak their Linux configuration to suit
+	their needs before then. See https://go.dev/doc/gc-guide#Linux_transparent_huge_pages.
+
 	dontfreezetheworld: by default, the start of a fatal panic or throw
 	"freezes the world", preempting all threads to stop all running
 	goroutines, which makes it possible to traceback all goroutines, and
@@ -145,6 +152,18 @@
 	risk in that scenario. Currently not supported on Windows, plan9 or js/wasm. Setting this
 	option for some applications can produce large traces, so use with care.
 
+	panicnil: setting panicnil=1 disables the runtime error when calling panic with nil
+	interface value or an untyped nil.
+
+	runtimecontentionstacks: setting runtimecontentionstacks=1 enables inclusion of call stacks
+	related to contention on runtime-internal locks in the "mutex" profile, subject to the
+	MutexProfileFraction setting. When runtimecontentionstacks=0, contention on
+	runtime-internal locks will report as "runtime._LostContendedRuntimeLock". When
+	runtimecontentionstacks=1, the call stacks will correspond to the unlock call that released
+	the lock. But instead of the value corresponding to the amount of contention that call
+	stack caused, it corresponds to the amount of time the caller of unlock had to wait in its
+	original call to lock. A future release is expected to align those and remove this setting.
+
 	invalidptr: invalidptr=1 (the default) causes the garbage collector and stack
 	copier to crash the program if an invalid pointer value (for example, 1)
 	is found in a pointer-typed location. Setting invalidptr=0 disables this check.
@@ -188,6 +207,10 @@
 	This increases tracer overhead, but could be helpful as a workaround or for
 	debugging unexpected regressions caused by frame pointer unwinding.
 
+	traceadvanceperiod: the approximate period in nanoseconds between trace generations. Only
+	applies if a program is built with GOEXPERIMENT=exectracer2. Used primarily for testing
+	and debugging the execution tracer.
+
 	asyncpreemptoff: asyncpreemptoff=1 disables signal-based
 	asynchronous goroutine preemption. This makes some loops
 	non-preemptible for long periods, which may delay GC and
@@ -195,17 +218,17 @@
 	because it also disables the conservative stack scanning used
 	for asynchronously preempted goroutines.
 
-The net and net/http packages also refer to debugging variables in GODEBUG.
+The [net] and [net/http] packages also refer to debugging variables in GODEBUG.
 See the documentation for those packages for details.
 
 The GOMAXPROCS variable limits the number of operating system threads that
 can execute user-level Go code simultaneously. There is no limit to the number of threads
 that can be blocked in system calls on behalf of Go code; those do not count against
-the GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes
+the GOMAXPROCS limit. This package's [GOMAXPROCS] function queries and changes
 the limit.
 
 The GORACE variable configures the race detector, for programs built using -race.
-See https://golang.org/doc/articles/race_detector.html for details.
+See the [Race Detector article] for details.
 
 The GOTRACEBACK variable controls the amount of output generated when a Go
 program fails due to an unrecovered panic or an unexpected runtime condition.
@@ -224,14 +247,13 @@
 GOTRACEBACK=wer is like “crash” but doesn't disable Windows Error Reporting (WER).
 For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for
 none, all, and system, respectively.
-The runtime/debug package's SetTraceback function allows increasing the
+The [runtime/debug.SetTraceback] function allows increasing the
 amount of output at run time, but it cannot reduce the amount below that
 specified by the environment variable.
-See https://golang.org/pkg/runtime/debug/#SetTraceback.
 
 The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
 the set of Go environment variables. They influence the building of Go programs
-(see https://golang.org/cmd/go and https://golang.org/pkg/go/build).
+(see [cmd/go] and [go/build]).
 GOARCH, GOOS, and GOROOT are recorded at compile time and made available by
 constants or functions in this package, but they do not influence the execution
 of the run-time system.
@@ -254,6 +276,8 @@
     encounters an unrecoverable panic that would otherwise override the value
     of GOTRACEBACK, the goroutine stack, registers, and other memory related
     information are omitted.
+
+[Race Detector article]: https://go.dev/doc/articles/race_detector
 */
 package runtime
 
@@ -265,7 +289,7 @@
 // Caller reports file and line number information about function invocations on
 // the calling goroutine's stack. The argument skip is the number of stack frames
 // to ascend, with 0 identifying the caller of Caller.  (For historical reasons the
-// meaning of skip differs between Caller and Callers.) The return values report the
+// meaning of skip differs between Caller and [Callers].) The return values report the
 // program counter, file name, and line number within the file of the corresponding
 // call. The boolean ok is false if it was not possible to recover the information.
 func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
@@ -285,10 +309,10 @@
 // It returns the number of entries written to pc.
 //
 // To translate these PCs into symbolic information such as function
-// names and line numbers, use CallersFrames. CallersFrames accounts
+// names and line numbers, use [CallersFrames]. CallersFrames accounts
 // for inlined functions and adjusts the return program counters into
 // call program counters. Iterating over the returned slice of PCs
-// directly is discouraged, as is using FuncForPC on any of the
+// directly is discouraged, as is using [FuncForPC] on any of the
 // returned PCs, since these cannot account for inlining or return
 // program counter adjustment.
 func Callers(skip int, pc []uintptr) int {
diff --git a/src/runtime/fds_nonunix.go b/src/runtime/fds_nonunix.go
new file mode 100644
index 0000000..81e59f3
--- /dev/null
+++ b/src/runtime/fds_nonunix.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package runtime
+
+func checkfds() {
+	// Nothing to do on non-Unix platforms.
+}
diff --git a/src/runtime/fds_test.go b/src/runtime/fds_test.go
new file mode 100644
index 0000000..8d349ec
--- /dev/null
+++ b/src/runtime/fds_test.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package runtime_test
+
+import (
+	"internal/testenv"
+	"os"
+	"strings"
+	"testing"
+)
+
+func TestCheckFDs(t *testing.T) {
+	if *flagQuick {
+		t.Skip("-quick")
+	}
+
+	testenv.MustHaveGoBuild(t)
+
+	fdsBin, err := buildTestProg(t, "testfds")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	i, err := os.CreateTemp(t.TempDir(), "fds-input")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := i.Write([]byte("stdin")); err != nil {
+		t.Fatal(err)
+	}
+	if err := i.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	o, err := os.CreateTemp(t.TempDir(), "fds-output")
+	if err != nil {
+		t.Fatal(err)
+	}
+	outputPath := o.Name()
+	if err := o.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	env := []string{"TEST_OUTPUT=" + outputPath}
+	for _, e := range os.Environ() {
+		if strings.HasPrefix(e, "GODEBUG=") || strings.HasPrefix(e, "GOTRACEBACK=") {
+			continue
+		}
+		env = append(env, e)
+	}
+
+	proc, err := os.StartProcess(fdsBin, []string{fdsBin}, &os.ProcAttr{
+		Env:   env,
+		Files: []*os.File{},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	ps, err := proc.Wait()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ps.ExitCode() != 0 {
+		t.Fatalf("testfds failed: %d", ps.ExitCode())
+	}
+
+	fc, err := os.ReadFile(outputPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(fc) != "" {
+		t.Errorf("unexpected file content, got: %q", string(fc))
+	}
+}
diff --git a/src/runtime/fds_unix.go b/src/runtime/fds_unix.go
new file mode 100644
index 0000000..7182ef0
--- /dev/null
+++ b/src/runtime/fds_unix.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package runtime
+
+func checkfds() {
+	if islibrary || isarchive {
+		// If the program is actually a library, presumably being consumed by
+		// another program, we don't want to mess around with the file
+		// descriptors.
+		return
+	}
+
+	const (
+		// F_GETFD, EBADF, O_RDWR are standard across all unixes we support, so
+		// we define them here rather than in each of the OS specific files.
+		F_GETFD = 0x01
+		EBADF   = 0x09
+		O_RDWR  = 0x02
+	)
+
+	devNull := []byte("/dev/null\x00")
+	for i := 0; i < 3; i++ {
+		ret, errno := fcntl(int32(i), F_GETFD, 0)
+		if ret >= 0 {
+			continue
+		}
+
+		if errno != EBADF {
+			print("runtime: unexpected error while checking standard file descriptor ", i, ", errno=", errno, "\n")
+			throw("cannot open standard fds")
+		}
+
+		if ret := open(&devNull[0], O_RDWR, 0); ret < 0 {
+			print("runtime: standard file descriptor ", i, " closed, unable to open /dev/null, errno=", errno, "\n")
+			throw("cannot open standard fds")
+		} else if ret != int32(i) {
+			print("runtime: opened unexpected file descriptor ", ret, " when attempting to open ", i, "\n")
+			throw("cannot open standard fds")
+		}
+	}
+}
diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h
index edc0316..4bbc58e 100644
--- a/src/runtime/funcdata.h
+++ b/src/runtime/funcdata.h
@@ -35,9 +35,9 @@
 // defines the pointer map for the function's arguments.
 // GO_ARGS should be the first instruction in a function that uses it.
 // It can be omitted if there are no arguments at all.
-// GO_ARGS is inserted implicitly by the linker for any function whose
-// name starts with a middle-dot and that also has a Go prototype; it
-// is therefore usually not necessary to write explicitly.
+// GO_ARGS is inserted implicitly by the assembler for any function
+// whose package-qualified symbol name belongs to the current package;
+// it is therefore usually not necessary to write explicitly.
 #define GO_ARGS	FUNCDATA $FUNCDATA_ArgsPointerMaps, go_args_stackmap(SB)
 
 // GO_RESULTS_INITIALIZED indicates that the assembly function
diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go
index bd01e36..c6759a1 100644
--- a/src/runtime/gc_test.go
+++ b/src/runtime/gc_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"fmt"
+	"internal/goexperiment"
 	"math/rand"
 	"os"
 	"reflect"
@@ -457,11 +458,17 @@
 }
 
 func benchSetType[T any](b *testing.B) {
+	if goexperiment.AllocHeaders {
+		b.Skip("not supported with allocation headers experiment")
+	}
 	b.SetBytes(int64(unsafe.Sizeof(*new(T))))
 	runtime.BenchSetType[T](b.N, b.ResetTimer)
 }
 
 func benchSetTypeSlice[T any](b *testing.B, len int) {
+	if goexperiment.AllocHeaders {
+		b.Skip("not supported with allocation headers experiment")
+	}
 	b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len)))
 	runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len)
 }
@@ -570,6 +577,11 @@
 	}
 }
 
+func init() {
+	// Enable ReadMemStats' double-check mode.
+	*runtime.DoubleCheckReadMemStats = true
+}
+
 func TestReadMemStats(t *testing.T) {
 	base, slow := runtime.ReadMemStatsSlow()
 	if base != slow {
@@ -929,3 +941,7 @@
 		t.Fatalf("expected %q, but got %q", want, got)
 	}
 }
+
+func TestMyGenericFunc(t *testing.T) {
+	runtime.MyGenericFunc[int]()
+}
diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go
index 787160d..5f72caf 100644
--- a/src/runtime/gcinfo_test.go
+++ b/src/runtime/gcinfo_test.go
@@ -91,10 +91,17 @@
 
 func verifyGCInfo(t *testing.T, name string, p any, mask0 []byte) {
 	mask := runtime.GCMask(p)
-	if !bytes.Equal(mask, mask0) {
-		t.Errorf("bad GC program for %v:\nwant %+v\ngot  %+v", name, mask0, mask)
+	if bytes.HasPrefix(mask, mask0) {
+		// Just the prefix matching is OK.
+		//
+		// The Go runtime's pointer/scalar iterator generates pointers beyond
+		// the size of the type, up to the size of the size class. This space
+		// is safe for the GC to scan since it's zero, and GCBits checks to
+		// make sure that's true. But we need to handle the fact that the bitmap
+		// may be larger than we expect.
 		return
 	}
+	t.Errorf("bad GC program for %v:\nwant %+v\ngot  %+v", name, mask0, mask)
 }
 
 func trimDead(mask []byte) []byte {
diff --git a/src/runtime/hash_test.go b/src/runtime/hash_test.go
index 6562829..c1d4bfa 100644
--- a/src/runtime/hash_test.go
+++ b/src/runtime/hash_test.go
@@ -513,7 +513,7 @@
 	// find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999
 	for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 {
 	}
-	c *= 8.0 // allowed slack - we don't need to be perfectly random
+	c *= 11.0 // allowed slack: 40% to 60% - we don't need to be perfectly random
 	mean := .5 * REP
 	stddev := .5 * math.Sqrt(REP)
 	low := int(mean - c*stddev)
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 8ddec8b..276c5bf 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -14,12 +14,13 @@
 import (
 	"internal/abi"
 	"internal/goarch"
+	"internal/goexperiment"
 	"unsafe"
 )
 
 //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
 func runtime_debug_WriteHeapDump(fd uintptr) {
-	stopTheWorld(stwWriteHeapDump)
+	stw := stopTheWorld(stwWriteHeapDump)
 
 	// Keep m on this G's stack instead of the system stack.
 	// Both readmemstats_m and writeheapdump_m have pretty large
@@ -36,7 +37,7 @@
 		writeheapdump_m(fd, &m)
 	})
 
-	startTheWorld()
+	startTheWorld(stw)
 }
 
 const (
@@ -259,7 +260,7 @@
 	pcdata := int32(-1) // Use the entry map at function entry
 	if pc != f.entry() {
 		pc--
-		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc, nil)
+		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
 	}
 	if pcdata == -1 {
 		// We do not have a valid pcdata value but there might be a
@@ -398,7 +399,7 @@
 		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
 		eface := efaceOf(&p.arg)
 		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
-		dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
+		dumpint(uint64(uintptr(eface.data)))
 		dumpint(0) // was p->defer, no longer recorded
 		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
 	}
@@ -488,8 +489,8 @@
 			throw("freemark array doesn't have enough entries")
 		}
 
-		for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
-			if s.isFree(freeIndex) {
+		for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
+			if s.isFree(uintptr(freeIndex)) {
 				freemark[freeIndex] = true
 			}
 		}
@@ -737,16 +738,28 @@
 	for i := uintptr(0); i < nptr/8+1; i++ {
 		tmpbuf[i] = 0
 	}
-
-	hbits := heapBitsForAddr(p, size)
-	for {
-		var addr uintptr
-		hbits, addr = hbits.next()
-		if addr == 0 {
-			break
+	if goexperiment.AllocHeaders {
+		s := spanOf(p)
+		tp := s.typePointersOf(p, size)
+		for {
+			var addr uintptr
+			if tp, addr = tp.next(p + size); addr == 0 {
+				break
+			}
+			i := (addr - p) / goarch.PtrSize
+			tmpbuf[i/8] |= 1 << (i % 8)
 		}
-		i := (addr - p) / goarch.PtrSize
-		tmpbuf[i/8] |= 1 << (i % 8)
+	} else {
+		hbits := heapBitsForAddr(p, size)
+		for {
+			var addr uintptr
+			hbits, addr = hbits.next()
+			if addr == 0 {
+				break
+			}
+			i := (addr - p) / goarch.PtrSize
+			tmpbuf[i/8] |= 1 << (i % 8)
+		}
 	}
 	return bitvector{int32(nptr), &tmpbuf[0]}
 }
diff --git a/src/runtime/histogram.go b/src/runtime/histogram.go
index 43dfe61..f243667 100644
--- a/src/runtime/histogram.go
+++ b/src/runtime/histogram.go
@@ -134,6 +134,19 @@
 	h.counts[bucket*timeHistNumSubBuckets+subBucket].Add(1)
 }
 
+// write dumps the histogram to the passed metricValue as a float64 histogram.
+func (h *timeHistogram) write(out *metricValue) {
+	hist := out.float64HistOrInit(timeHistBuckets)
+	// The bottom-most bucket, containing negative values, is tracked
+	// separately as underflow, so fill that in manually and then iterate
+	// over the rest.
+	hist.counts[0] = h.underflow.Load()
+	for i := range h.counts {
+		hist.counts[i+1] = h.counts[i].Load()
+	}
+	hist.counts[len(hist.counts)-1] = h.overflow.Load()
+}
+
 const (
 	fInf    = 0x7FF0000000000000
 	fNegInf = 0xFFF0000000000000
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index 87f7c20..bad49a3 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -8,6 +8,7 @@
 	"internal/abi"
 	"internal/goarch"
 	"runtime/internal/atomic"
+	"runtime/internal/sys"
 	"unsafe"
 )
 
@@ -224,13 +225,11 @@
 					pkgPath = rtyp.nameOff(x.PkgPath).Name()
 				}
 				if tname.IsExported() || pkgPath == ipkg {
-					if m != nil {
-						ifn := rtyp.textOff(t.Ifn)
-						if k == 0 {
-							fun0 = ifn // we'll set m.fun[0] at the end
-						} else {
-							methods[k] = ifn
-						}
+					ifn := rtyp.textOff(t.Ifn)
+					if k == 0 {
+						fun0 = ifn // we'll set m.fun[0] at the end
+					} else {
+						methods[k] = ifn
 					}
 					continue imethods
 				}
@@ -407,45 +406,6 @@
 	return
 }
 
-// convI2I returns the new itab to be used for the destination value
-// when converting a value with itab src to the dst interface.
-func convI2I(dst *interfacetype, src *itab) *itab {
-	if src == nil {
-		return nil
-	}
-	if src.inter == dst {
-		return src
-	}
-	return getitab(dst, src._type, false)
-}
-
-func assertI2I(inter *interfacetype, tab *itab) *itab {
-	if tab == nil {
-		// explicit conversions require non-nil interface value.
-		panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
-	}
-	if tab.inter == inter {
-		return tab
-	}
-	return getitab(inter, tab._type, false)
-}
-
-func assertI2I2(inter *interfacetype, i iface) (r iface) {
-	tab := i.tab
-	if tab == nil {
-		return
-	}
-	if tab.inter != inter {
-		tab = getitab(inter, tab._type, true)
-		if tab == nil {
-			return
-		}
-	}
-	r.tab = tab
-	r.data = i.data
-	return
-}
-
 func assertE2I(inter *interfacetype, t *_type) *itab {
 	if t == nil {
 		// explicit conversions require non-nil interface value.
@@ -454,20 +414,212 @@
 	return getitab(inter, t, false)
 }
 
-func assertE2I2(inter *interfacetype, e eface) (r iface) {
-	t := e._type
+func assertE2I2(inter *interfacetype, t *_type) *itab {
 	if t == nil {
-		return
+		return nil
 	}
-	tab := getitab(inter, t, true)
-	if tab == nil {
-		return
-	}
-	r.tab = tab
-	r.data = e.data
-	return
+	return getitab(inter, t, true)
 }
 
+// typeAssert builds an itab for the concrete type t and the
+// interface type s.Inter. If the conversion is not possible it
+// panics if s.CanFail is false and returns nil if s.CanFail is true.
+func typeAssert(s *abi.TypeAssert, t *_type) *itab {
+	var tab *itab
+	if t == nil {
+		if !s.CanFail {
+			panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
+		}
+	} else {
+		tab = getitab(s.Inter, t, s.CanFail)
+	}
+
+	if !abi.UseInterfaceSwitchCache(GOARCH) {
+		return tab
+	}
+
+	// Maybe update the cache, so the next time the generated code
+	// doesn't need to call into the runtime.
+	if cheaprand()&1023 != 0 {
+		// Only bother updating the cache ~1 in 1000 times.
+		return tab
+	}
+	// Load the current cache.
+	oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
+
+	if cheaprand()&uint32(oldC.Mask) != 0 {
+		// As cache gets larger, choose to update it less often
+		// so we can amortize the cost of building a new cache.
+		return tab
+	}
+
+	// Make a new cache.
+	newC := buildTypeAssertCache(oldC, t, tab)
+
+	// Update cache. Use compare-and-swap so if multiple threads
+	// are fighting to update the cache, at least one of their
+	// updates will stick.
+	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
+
+	return tab
+}
+
+func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
+	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
+
+	// Count the number of entries we need.
+	n := 1
+	for _, e := range oldEntries {
+		if e.Typ != 0 {
+			n++
+		}
+	}
+
+	// Figure out how big a table we need.
+	// We need at least one more slot than the number of entries
+	// so that we are guaranteed an empty slot (for termination).
+	newN := n * 2                         // make it at most 50% full
+	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
+
+	// Allocate the new table.
+	newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
+	newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
+	newC.Mask = uintptr(newN - 1)
+	newEntries := unsafe.Slice(&newC.Entries[0], newN)
+
+	// Fill the new table.
+	addEntry := func(typ *_type, tab *itab) {
+		h := int(typ.Hash) & (newN - 1)
+		for {
+			if newEntries[h].Typ == 0 {
+				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
+				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
+				return
+			}
+			h = (h + 1) & (newN - 1)
+		}
+	}
+	for _, e := range oldEntries {
+		if e.Typ != 0 {
+			addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
+		}
+	}
+	addEntry(typ, tab)
+
+	return newC
+}
+
+// Empty type assert cache. Contains one entry with a nil Typ (which
+// causes a cache lookup to fail immediately.)
+var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
+
+// interfaceSwitch compares t against the list of cases in s.
+// If t matches case i, interfaceSwitch returns the case index i and
+// an itab for the pair <t, s.Cases[i]>.
+// If there is no match, return N,nil, where N is the number
+// of cases.
+func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
+	cases := unsafe.Slice(&s.Cases[0], s.NCases)
+
+	// Results if we don't find a match.
+	case_ := len(cases)
+	var tab *itab
+
+	// Look through each case in order.
+	for i, c := range cases {
+		tab = getitab(c, t, true)
+		if tab != nil {
+			case_ = i
+			break
+		}
+	}
+
+	if !abi.UseInterfaceSwitchCache(GOARCH) {
+		return case_, tab
+	}
+
+	// Maybe update the cache, so the next time the generated code
+	// doesn't need to call into the runtime.
+	if cheaprand()&1023 != 0 {
+		// Only bother updating the cache ~1 in 1000 times.
+		// This ensures we don't waste memory on switches, or
+		// switch arguments, that only happen a few times.
+		return case_, tab
+	}
+	// Load the current cache.
+	oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
+
+	if cheaprand()&uint32(oldC.Mask) != 0 {
+		// As cache gets larger, choose to update it less often
+		// so we can amortize the cost of building a new cache
+		// (that cost is linear in oldc.Mask).
+		return case_, tab
+	}
+
+	// Make a new cache.
+	newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
+
+	// Update cache. Use compare-and-swap so if multiple threads
+	// are fighting to update the cache, at least one of their
+	// updates will stick.
+	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
+
+	return case_, tab
+}
+
+// buildInterfaceSwitchCache constructs an interface switch cache
+// containing all the entries from oldC plus the new entry
+// (typ,case_,tab).
+func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
+	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
+
+	// Count the number of entries we need.
+	n := 1
+	for _, e := range oldEntries {
+		if e.Typ != 0 {
+			n++
+		}
+	}
+
+	// Figure out how big a table we need.
+	// We need at least one more slot than the number of entries
+	// so that we are guaranteed an empty slot (for termination).
+	newN := n * 2                         // make it at most 50% full
+	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
+
+	// Allocate the new table.
+	newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
+	newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
+	newC.Mask = uintptr(newN - 1)
+	newEntries := unsafe.Slice(&newC.Entries[0], newN)
+
+	// Fill the new table.
+	addEntry := func(typ *_type, case_ int, tab *itab) {
+		h := int(typ.Hash) & (newN - 1)
+		for {
+			if newEntries[h].Typ == 0 {
+				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
+				newEntries[h].Case = case_
+				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
+				return
+			}
+			h = (h + 1) & (newN - 1)
+		}
+	}
+	for _, e := range oldEntries {
+		if e.Typ != 0 {
+			addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
+		}
+	}
+	addEntry(typ, case_, tab)
+
+	return newC
+}
+
+// Empty interface switch cache. Contains one entry with a nil Typ (which
+// causes a cache lookup to fail immediately.)
+var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
+
 //go:linkname reflect_ifaceE2I reflect.ifaceE2I
 func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
 	*dst = iface{assertE2I(inter, e._type), e.data}
diff --git a/src/runtime/internal/atomic/atomic_386.go b/src/runtime/internal/atomic/atomic_386.go
index bf2f4b9..e74dcaa 100644
--- a/src/runtime/internal/atomic/atomic_386.go
+++ b/src/runtime/internal/atomic/atomic_386.go
@@ -76,6 +76,24 @@
 //go:noescape
 func Or(ptr *uint32, val uint32)
 
+//go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func And64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Or64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
 // NOTE: Do not add atomicxor8 (XOR is not idempotent).
 
 //go:noescape
diff --git a/src/runtime/internal/atomic/atomic_386.s b/src/runtime/internal/atomic/atomic_386.s
index 724d515..08812c3 100644
--- a/src/runtime/internal/atomic/atomic_386.s
+++ b/src/runtime/internal/atomic/atomic_386.s
@@ -283,3 +283,84 @@
 	LOCK
 	ANDL	BX, (AX)
 	RET
+
+// func And32(addr *uint32, v uint32) old uint32
+TEXT ·And32(SB), NOSPLIT, $0-12
+	MOVL	ptr+0(FP), BX
+	MOVL	val+4(FP), CX
+casloop:
+	MOVL 	CX, DX
+	MOVL	(BX), AX
+	ANDL	AX, DX
+	LOCK
+	CMPXCHGL	DX, (BX)
+	JNZ casloop
+	MOVL 	AX, ret+8(FP)
+	RET
+
+// func Or32(addr *uint32, v uint32) old uint32
+TEXT ·Or32(SB), NOSPLIT, $0-12
+	MOVL	ptr+0(FP), BX
+	MOVL	val+4(FP), CX
+casloop:
+	MOVL 	CX, DX
+	MOVL	(BX), AX
+	ORL	AX, DX
+	LOCK
+	CMPXCHGL	DX, (BX)
+	JNZ casloop
+	MOVL 	AX, ret+8(FP)
+	RET
+
+// func And64(addr *uint64, v uint64) old uint64
+TEXT ·And64(SB), NOSPLIT, $0-20
+	MOVL	ptr+0(FP), BP
+	// DI:SI = v
+	MOVL	val_lo+4(FP), SI
+	MOVL	val_hi+8(FP), DI
+	// DX:AX = *addr
+	MOVL	0(BP), AX
+	MOVL	4(BP), DX
+casloop:
+	// CX:BX = DX:AX (*addr) & DI:SI (mask)
+	MOVL	AX, BX
+	MOVL	DX, CX
+	ANDL	SI, BX
+	ANDL	DI, CX
+	LOCK
+	CMPXCHG8B	0(BP)
+	JNZ casloop
+	MOVL	AX, ret_lo+12(FP)
+	MOVL	DX, ret_hi+16(FP)
+	RET
+
+
+// func Or64(addr *uint64, v uint64) old uint64
+TEXT ·Or64(SB), NOSPLIT, $0-20
+	MOVL	ptr+0(FP), BP
+	// DI:SI = v
+	MOVL	val_lo+4(FP), SI
+	MOVL	val_hi+8(FP), DI
+	// DX:AX = *addr
+	MOVL	0(BP), AX
+	MOVL	4(BP), DX
+casloop:
+	// CX:BX = DX:AX (*addr) | DI:SI (mask)
+	MOVL	AX, BX
+	MOVL	DX, CX
+	ORL	SI, BX
+	ORL	DI, CX
+	LOCK
+	CMPXCHG8B	0(BP)
+	JNZ casloop
+	MOVL	AX, ret_lo+12(FP)
+	MOVL	DX, ret_hi+16(FP)
+	RET
+
+// func Anduintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-12
+	JMP	·And32(SB)
+
+// func Oruintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-12
+	JMP	·Or32(SB)
diff --git a/src/runtime/internal/atomic/atomic_amd64.go b/src/runtime/internal/atomic/atomic_amd64.go
index 52a8362..b439954 100644
--- a/src/runtime/internal/atomic/atomic_amd64.go
+++ b/src/runtime/internal/atomic/atomic_amd64.go
@@ -84,6 +84,24 @@
 //go:noescape
 func Or(ptr *uint32, val uint32)
 
+//go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func And64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Or64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
 // NOTE: Do not add atomicxor8 (XOR is not idempotent).
 
 //go:noescape
diff --git a/src/runtime/internal/atomic/atomic_amd64.s b/src/runtime/internal/atomic/atomic_amd64.s
index d21514b..ec75bf9 100644
--- a/src/runtime/internal/atomic/atomic_amd64.s
+++ b/src/runtime/internal/atomic/atomic_amd64.s
@@ -223,3 +223,67 @@
 	LOCK
 	ANDL	BX, (AX)
 	RET
+
+// func Or32(addr *uint32, v uint32) old uint32
+TEXT ·Or32(SB), NOSPLIT, $0-20
+	MOVQ	ptr+0(FP), BX
+	MOVL	val+8(FP), CX
+casloop:
+	MOVL 	CX, DX
+	MOVL	(BX), AX
+	ORL	AX, DX
+	LOCK
+	CMPXCHGL	DX, (BX)
+	JNZ casloop
+	MOVL 	AX, ret+16(FP)
+	RET
+
+// func And32(addr *uint32, v uint32) old uint32
+TEXT ·And32(SB), NOSPLIT, $0-20
+	MOVQ	ptr+0(FP), BX
+	MOVL	val+8(FP), CX
+casloop:
+	MOVL 	CX, DX
+	MOVL	(BX), AX
+	ANDL	AX, DX
+	LOCK
+	CMPXCHGL	DX, (BX)
+	JNZ casloop
+	MOVL 	AX, ret+16(FP)
+	RET
+
+// func Or64(addr *uint64, v uint64) old uint64
+TEXT ·Or64(SB), NOSPLIT, $0-24
+	MOVQ	ptr+0(FP), BX
+	MOVQ	val+8(FP), CX
+casloop:
+	MOVQ 	CX, DX
+	MOVQ	(BX), AX
+	ORQ	AX, DX
+	LOCK
+	CMPXCHGQ	DX, (BX)
+	JNZ casloop
+	MOVQ 	AX, ret+16(FP)
+	RET
+
+// func And64(addr *uint64, v uint64) old uint64
+TEXT ·And64(SB), NOSPLIT, $0-24
+	MOVQ	ptr+0(FP), BX
+	MOVQ	val+8(FP), CX
+casloop:
+	MOVQ 	CX, DX
+	MOVQ	(BX), AX
+	ANDQ	AX, DX
+	LOCK
+	CMPXCHGQ	DX, (BX)
+	JNZ casloop
+	MOVQ 	AX, ret+16(FP)
+	RET
+
+// func Anduintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-24
+	JMP	·And64(SB)
+
+// func Oruintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-24
+	JMP	·Or64(SB)
diff --git a/src/runtime/internal/atomic/atomic_andor_generic.go b/src/runtime/internal/atomic/atomic_andor_generic.go
new file mode 100644
index 0000000..00b4026
--- /dev/null
+++ b/src/runtime/internal/atomic/atomic_andor_generic.go
@@ -0,0 +1,67 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm || s390x || loong64 || mips || mipsle || mips64 || mips64le || wasm
+
+package atomic
+
+//go:nosplit
+func And32(ptr *uint32, val uint32) uint32 {
+	for {
+		old := *ptr
+		if Cas(ptr, old, old&val) {
+			return old
+		}
+	}
+}
+
+//go:nosplit
+func Or32(ptr *uint32, val uint32) uint32 {
+	for {
+		old := *ptr
+		if Cas(ptr, old, old|val) {
+			return old
+		}
+	}
+}
+
+//go:nosplit
+func And64(ptr *uint64, val uint64) uint64 {
+	for {
+		old := *ptr
+		if Cas64(ptr, old, old&val) {
+			return old
+		}
+	}
+}
+
+//go:nosplit
+func Or64(ptr *uint64, val uint64) uint64 {
+	for {
+		old := *ptr
+		if Cas64(ptr, old, old|val) {
+			return old
+		}
+	}
+}
+
+//go:nosplit
+func Anduintptr(ptr *uintptr, val uintptr) uintptr {
+	for {
+		old := *ptr
+		if Casuintptr(ptr, old, old&val) {
+			return old
+		}
+	}
+}
+
+//go:nosplit
+func Oruintptr(ptr *uintptr, val uintptr) uintptr {
+	for {
+		old := *ptr
+		if Casuintptr(ptr, old, old|val) {
+			return old
+		}
+	}
+}
diff --git a/src/runtime/internal/atomic/atomic_andor_test.go b/src/runtime/internal/atomic/atomic_andor_test.go
new file mode 100644
index 0000000..a2f3b6f
--- /dev/null
+++ b/src/runtime/internal/atomic/atomic_andor_test.go
@@ -0,0 +1,246 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(61395): move these tests to atomic_test.go once And/Or have
+// implementations for all architectures.
+package atomic_test
+
+import (
+	"runtime/internal/atomic"
+	"testing"
+)
+
+func TestAnd32(t *testing.T) {
+	// Basic sanity check.
+	x := uint32(0xffffffff)
+	for i := uint32(0); i < 32; i++ {
+		old := x
+		v := atomic.And32(&x, ^(1 << i))
+		if r := uint32(0xffffffff) << (i + 1); x != r || v != old {
+			t.Fatalf("clearing bit %#x: want %#x, got new %#x and old %#v", uint32(1<<i), r, x, v)
+		}
+	}
+
+	// Set every bit in array to 1.
+	a := make([]uint32, 1<<12)
+	for i := range a {
+		a[i] = 0xffffffff
+	}
+
+	// Clear array bit-by-bit in different goroutines.
+	done := make(chan bool)
+	for i := 0; i < 32; i++ {
+		m := ^uint32(1 << i)
+		go func() {
+			for i := range a {
+				atomic.And(&a[i], m)
+			}
+			done <- true
+		}()
+	}
+	for i := 0; i < 32; i++ {
+		<-done
+	}
+
+	// Check that the array has been totally cleared.
+	for i, v := range a {
+		if v != 0 {
+			t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint32(0), v)
+		}
+	}
+}
+
+func TestAnd64(t *testing.T) {
+	// Basic sanity check.
+	x := uint64(0xffffffffffffffff)
+	for i := uint64(0); i < 64; i++ {
+		old := x
+		v := atomic.And64(&x, ^(1 << i))
+		if r := uint64(0xffffffffffffffff) << (i + 1); x != r || v != old {
+			t.Fatalf("clearing bit %#x: want %#x, got new %#x and old %#v", uint64(1<<i), r, x, v)
+		}
+	}
+
+	// Set every bit in array to 1.
+	a := make([]uint64, 1<<12)
+	for i := range a {
+		a[i] = 0xffffffffffffffff
+	}
+
+	// Clear array bit-by-bit in different goroutines.
+	done := make(chan bool)
+	for i := 0; i < 64; i++ {
+		m := ^uint64(1 << i)
+		go func() {
+			for i := range a {
+				atomic.And64(&a[i], m)
+			}
+			done <- true
+		}()
+	}
+	for i := 0; i < 64; i++ {
+		<-done
+	}
+
+	// Check that the array has been totally cleared.
+	for i, v := range a {
+		if v != 0 {
+			t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint64(0), v)
+		}
+	}
+}
+
+func TestOr32(t *testing.T) {
+	// Basic sanity check.
+	x := uint32(0)
+	for i := uint32(0); i < 32; i++ {
+		old := x
+		v := atomic.Or32(&x, 1<<i)
+		if r := (uint32(1) << (i + 1)) - 1; x != r || v != old {
+			t.Fatalf("setting bit %#x: want %#x, got new %#x and old %#v", uint32(1<<i), r, x, v)
+		}
+	}
+
+	// Start with every bit in array set to 0.
+	a := make([]uint32, 1<<12)
+
+	// Set every bit in array bit-by-bit in different goroutines.
+	done := make(chan bool)
+	for i := 0; i < 32; i++ {
+		m := uint32(1 << i)
+		go func() {
+			for i := range a {
+				atomic.Or32(&a[i], m)
+			}
+			done <- true
+		}()
+	}
+	for i := 0; i < 32; i++ {
+		<-done
+	}
+
+	// Check that the array has been totally set.
+	for i, v := range a {
+		if v != 0xffffffff {
+			t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint32(0xffffffff), v)
+		}
+	}
+}
+
+func TestOr64(t *testing.T) {
+	// Basic sanity check.
+	x := uint64(0)
+	for i := uint64(0); i < 64; i++ {
+		old := x
+		v := atomic.Or64(&x, 1<<i)
+		if r := (uint64(1) << (i + 1)) - 1; x != r || v != old {
+			t.Fatalf("setting bit %#x: want %#x, got new %#x and old %#v", uint64(1<<i), r, x, v)
+		}
+	}
+
+	// Start with every bit in array set to 0.
+	a := make([]uint64, 1<<12)
+
+	// Set every bit in array bit-by-bit in different goroutines.
+	done := make(chan bool)
+	for i := 0; i < 64; i++ {
+		m := uint64(1 << i)
+		go func() {
+			for i := range a {
+				atomic.Or64(&a[i], m)
+			}
+			done <- true
+		}()
+	}
+	for i := 0; i < 64; i++ {
+		<-done
+	}
+
+	// Check that the array has been totally set.
+	for i, v := range a {
+		if v != 0xffffffffffffffff {
+			t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint64(0xffffffffffffffff), v)
+		}
+	}
+}
+
+func BenchmarkAnd32(b *testing.B) {
+	var x [128]uint32 // give x its own cache line
+	sink = &x
+	for i := 0; i < b.N; i++ {
+		atomic.And32(&x[63], uint32(i))
+	}
+}
+
+func BenchmarkAnd32Parallel(b *testing.B) {
+	var x [128]uint32 // give x its own cache line
+	sink = &x
+	b.RunParallel(func(pb *testing.PB) {
+		i := uint32(0)
+		for pb.Next() {
+			atomic.And32(&x[63], i)
+			i++
+		}
+	})
+}
+
+func BenchmarkAnd64(b *testing.B) {
+	var x [128]uint64 // give x its own cache line
+	sink = &x
+	for i := 0; i < b.N; i++ {
+		atomic.And64(&x[63], uint64(i))
+	}
+}
+
+func BenchmarkAnd64Parallel(b *testing.B) {
+	var x [128]uint64 // give x its own cache line
+	sink = &x
+	b.RunParallel(func(pb *testing.PB) {
+		i := uint64(0)
+		for pb.Next() {
+			atomic.And64(&x[63], i)
+			i++
+		}
+	})
+}
+
+func BenchmarkOr32(b *testing.B) {
+	var x [128]uint32 // give x its own cache line
+	sink = &x
+	for i := 0; i < b.N; i++ {
+		atomic.Or32(&x[63], uint32(i))
+	}
+}
+
+func BenchmarkOr32Parallel(b *testing.B) {
+	var x [128]uint32 // give x its own cache line
+	sink = &x
+	b.RunParallel(func(pb *testing.PB) {
+		i := uint32(0)
+		for pb.Next() {
+			atomic.Or32(&x[63], i)
+			i++
+		}
+	})
+}
+
+func BenchmarkOr64(b *testing.B) {
+	var x [128]uint64 // give x its own cache line
+	sink = &x
+	for i := 0; i < b.N; i++ {
+		atomic.Or64(&x[63], uint64(i))
+	}
+}
+
+func BenchmarkOr64Parallel(b *testing.B) {
+	var x [128]uint64 // give x its own cache line
+	sink = &x
+	b.RunParallel(func(pb *testing.PB) {
+		i := uint64(0)
+		for pb.Next() {
+			atomic.Or64(&x[63], i)
+			i++
+		}
+	})
+}
diff --git a/src/runtime/internal/atomic/atomic_arm.go b/src/runtime/internal/atomic/atomic_arm.go
index bdb1847..567e951 100644
--- a/src/runtime/internal/atomic/atomic_arm.go
+++ b/src/runtime/internal/atomic/atomic_arm.go
@@ -11,6 +11,10 @@
 	"unsafe"
 )
 
+const (
+	offsetARMHasV7Atomics = unsafe.Offsetof(cpu.ARM.HasV7Atomics)
+)
+
 // Export some functions via linkname to assembly in sync/atomic.
 //
 //go:linkname Xchg
diff --git a/src/runtime/internal/atomic/atomic_arm.s b/src/runtime/internal/atomic/atomic_arm.s
index 92cbe8a..1cf7d8f 100644
--- a/src/runtime/internal/atomic/atomic_arm.s
+++ b/src/runtime/internal/atomic/atomic_arm.s
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+#include "go_asm.h"
 #include "textflag.h"
 #include "funcdata.h"
 
@@ -28,9 +29,11 @@
 	CMP	R0, R2
 	BNE	casfail
 
-	MOVB	runtime·goarm(SB), R8
-	CMP	$7, R8
-	BLT	2(PC)
+#ifndef GOARM_7
+	MOVB	internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11
+	CMP	$0, R11
+	BEQ	2(PC)
+#endif
 	DMB	MB_ISHST
 
 	STREX	R3, (R1), R0
@@ -38,8 +41,10 @@
 	BNE	casl
 	MOVW	$1, R0
 
-	CMP	$7, R8
-	BLT	2(PC)
+#ifndef GOARM_7
+	CMP	$0, R11
+	BEQ	2(PC)
+#endif
 	DMB	MB_ISH
 
 	MOVB	R0, ret+12(FP)
@@ -246,52 +251,62 @@
 	MOVW	addr+0(FP), R1
 	CHECK_ALIGN
 
-	MOVB	runtime·goarm(SB), R11
-	CMP	$7, R11
-	BLT	2(PC)
-	JMP	armCas64<>(SB)
+#ifndef GOARM_7
+	MOVB	internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11
+	CMP	$1, R11
+	BEQ	2(PC)
 	JMP	·goCas64(SB)
+#endif
+	JMP	armCas64<>(SB)
 
 TEXT ·Xadd64(SB),NOSPLIT,$-4-20
 	NO_LOCAL_POINTERS
 	MOVW	addr+0(FP), R1
 	CHECK_ALIGN
 
-	MOVB	runtime·goarm(SB), R11
-	CMP	$7, R11
-	BLT	2(PC)
-	JMP	armXadd64<>(SB)
+#ifndef GOARM_7
+	MOVB	internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11
+	CMP	$1, R11
+	BEQ	2(PC)
 	JMP	·goXadd64(SB)
+#endif
+	JMP	armXadd64<>(SB)
 
 TEXT ·Xchg64(SB),NOSPLIT,$-4-20
 	NO_LOCAL_POINTERS
 	MOVW	addr+0(FP), R1
 	CHECK_ALIGN
 
-	MOVB	runtime·goarm(SB), R11
-	CMP	$7, R11
-	BLT	2(PC)
-	JMP	armXchg64<>(SB)
+#ifndef GOARM_7
+	MOVB	internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11
+	CMP	$1, R11
+	BEQ	2(PC)
 	JMP	·goXchg64(SB)
+#endif
+	JMP	armXchg64<>(SB)
 
 TEXT ·Load64(SB),NOSPLIT,$-4-12
 	NO_LOCAL_POINTERS
 	MOVW	addr+0(FP), R1
 	CHECK_ALIGN
 
-	MOVB	runtime·goarm(SB), R11
-	CMP	$7, R11
-	BLT	2(PC)
-	JMP	armLoad64<>(SB)
+#ifndef GOARM_7
+	MOVB	internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11
+	CMP	$1, R11
+	BEQ	2(PC)
 	JMP	·goLoad64(SB)
+#endif
+	JMP	armLoad64<>(SB)
 
 TEXT ·Store64(SB),NOSPLIT,$-4-12
 	NO_LOCAL_POINTERS
 	MOVW	addr+0(FP), R1
 	CHECK_ALIGN
 
-	MOVB	runtime·goarm(SB), R11
-	CMP	$7, R11
-	BLT	2(PC)
-	JMP	armStore64<>(SB)
+#ifndef GOARM_7
+	MOVB	internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11
+	CMP	$1, R11
+	BEQ	2(PC)
 	JMP	·goStore64(SB)
+#endif
+	JMP	armStore64<>(SB)
diff --git a/src/runtime/internal/atomic/atomic_arm64.go b/src/runtime/internal/atomic/atomic_arm64.go
index 459fb99..c4c56ae 100644
--- a/src/runtime/internal/atomic/atomic_arm64.go
+++ b/src/runtime/internal/atomic/atomic_arm64.go
@@ -67,6 +67,24 @@
 func Or(ptr *uint32, val uint32)
 
 //go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func And64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Or64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
 func Cas64(ptr *uint64, old, new uint64) bool
 
 //go:noescape
diff --git a/src/runtime/internal/atomic/atomic_arm64.s b/src/runtime/internal/atomic/atomic_arm64.s
index 5f77d92..3a249d3 100644
--- a/src/runtime/internal/atomic/atomic_arm64.s
+++ b/src/runtime/internal/atomic/atomic_arm64.s
@@ -331,3 +331,81 @@
 	STLXRW	R2, (R0), R3
 	CBNZ	R3, load_store_loop
 	RET
+
+// func Or32(addr *uint32, v uint32) old uint32
+TEXT ·Or32(SB), NOSPLIT, $0-20
+	MOVD	ptr+0(FP), R0
+	MOVW	val+8(FP), R1
+	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
+	CBZ 	R4, load_store_loop
+	LDORALW	R1, (R0), R2
+	MOVD	R2, ret+16(FP)
+	RET
+load_store_loop:
+	LDAXRW	(R0), R2
+	ORR	R1, R2, R3
+	STLXRW	R3, (R0), R4
+	CBNZ	R4, load_store_loop
+	MOVD R2, ret+16(FP)
+	RET
+
+// func And32(addr *uint32, v uint32) old uint32
+TEXT ·And32(SB), NOSPLIT, $0-20
+	MOVD	ptr+0(FP), R0
+	MOVW	val+8(FP), R1
+	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
+	CBZ 	R4, load_store_loop
+	MVN 	R1, R2
+	LDCLRALW	R2, (R0), R3
+	MOVD	R3, ret+16(FP)
+	RET
+load_store_loop:
+	LDAXRW	(R0), R2
+	AND	R1, R2, R3
+	STLXRW	R3, (R0), R4
+	CBNZ	R4, load_store_loop
+	MOVD R2, ret+16(FP)
+	RET
+
+// func Or64(addr *uint64, v uint64) old uint64
+TEXT ·Or64(SB), NOSPLIT, $0-24
+	MOVD	ptr+0(FP), R0
+	MOVD	val+8(FP), R1
+	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
+	CBZ 	R4, load_store_loop
+	LDORALD	R1, (R0), R2
+	MOVD	R2, ret+16(FP)
+	RET
+load_store_loop:
+	LDAXR	(R0), R2
+	ORR	R1, R2, R3
+	STLXR	R3, (R0), R4
+	CBNZ	R4, load_store_loop
+	MOVD 	R2, ret+16(FP)
+	RET
+
+// func And64(addr *uint64, v uint64) old uint64
+TEXT ·And64(SB), NOSPLIT, $0-24
+	MOVD	ptr+0(FP), R0
+	MOVD	val+8(FP), R1
+	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
+	CBZ 	R4, load_store_loop
+	MVN 	R1, R2
+	LDCLRALD	R2, (R0), R3
+	MOVD	R3, ret+16(FP)
+	RET
+load_store_loop:
+	LDAXR	(R0), R2
+	AND	R1, R2, R3
+	STLXR	R3, (R0), R4
+	CBNZ	R4, load_store_loop
+	MOVD 	R2, ret+16(FP)
+	RET
+
+// func Anduintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-24
+	B	·And64(SB)
+
+// func Oruintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-24
+	B	·Or64(SB)
diff --git a/src/runtime/internal/atomic/atomic_mipsx.go b/src/runtime/internal/atomic/atomic_mipsx.go
index 5dd15a0..e3dcde1 100644
--- a/src/runtime/internal/atomic/atomic_mipsx.go
+++ b/src/runtime/internal/atomic/atomic_mipsx.go
@@ -49,11 +49,6 @@
 }
 
 //go:nosplit
-func unlockNoFence() {
-	lock.state = 0
-}
-
-//go:nosplit
 func Xadd64(addr *uint64, delta int64) (new uint64) {
 	lockAndCheck(addr)
 
@@ -85,7 +80,7 @@
 		return true
 	}
 
-	unlockNoFence()
+	unlock()
 	return false
 }
 
diff --git a/src/runtime/internal/atomic/atomic_mipsx.s b/src/runtime/internal/atomic/atomic_mipsx.s
index 390e9ce..8f5fc53 100644
--- a/src/runtime/internal/atomic/atomic_mipsx.s
+++ b/src/runtime/internal/atomic/atomic_mipsx.s
@@ -28,6 +28,7 @@
 	MOVB	R3, ret+12(FP)
 	RET
 cas_fail:
+	SYNC
 	MOVB	R0, ret+12(FP)
 	RET
 
diff --git a/src/runtime/internal/atomic/atomic_ppc64x.go b/src/runtime/internal/atomic/atomic_ppc64x.go
index 998d16e..33a92b5 100644
--- a/src/runtime/internal/atomic/atomic_ppc64x.go
+++ b/src/runtime/internal/atomic/atomic_ppc64x.go
@@ -62,6 +62,24 @@
 func Or(ptr *uint32, val uint32)
 
 //go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func And64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Or64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
 func Cas64(ptr *uint64, old, new uint64) bool
 
 //go:noescape
diff --git a/src/runtime/internal/atomic/atomic_ppc64x.s b/src/runtime/internal/atomic/atomic_ppc64x.s
index 04f0ead..75635b9 100644
--- a/src/runtime/internal/atomic/atomic_ppc64x.s
+++ b/src/runtime/internal/atomic/atomic_ppc64x.s
@@ -101,6 +101,7 @@
 	MOVB	R3, ret+16(FP)
 	RET
 cas_fail:
+	LWSYNC
 	MOVB	R0, ret+16(FP)
 	RET
 
@@ -128,6 +129,7 @@
 	MOVB	R3, ret+24(FP)
 	RET
 cas64_fail:
+	LWSYNC
 	MOVB	R0, ret+24(FP)
 	RET
 
@@ -360,3 +362,63 @@
 	STWCCC	R6, (R3)
 	BNE	again
 	RET
+
+// func Or32(addr *uint32, v uint32) old uint32
+TEXT ·Or32(SB), NOSPLIT, $0-20
+	MOVD	ptr+0(FP), R3
+	MOVW	val+8(FP), R4
+	LWSYNC
+again:
+	LWAR	(R3), R6
+	OR	R4, R6, R7
+	STWCCC	R7, (R3)
+	BNE	again
+	MOVW	R6, ret+16(FP)
+	RET
+
+// func And32(addr *uint32, v uint32) old uint32
+TEXT ·And32(SB), NOSPLIT, $0-20
+	MOVD	ptr+0(FP), R3
+	MOVW	val+8(FP), R4
+	LWSYNC
+again:
+	LWAR	(R3),R6
+	AND	R4, R6, R7
+	STWCCC	R7, (R3)
+	BNE	again
+	MOVW	R6, ret+16(FP)
+	RET
+
+// func Or64(addr *uint64, v uint64) old uint64
+TEXT ·Or64(SB), NOSPLIT, $0-24
+	MOVD	ptr+0(FP), R3
+	MOVD	val+8(FP), R4
+	LWSYNC
+again:
+	LDAR	(R3), R6
+	OR	R4, R6, R7
+	STDCCC	R7, (R3)
+	BNE	again
+	MOVD	R6, ret+16(FP)
+	RET
+
+// func And64(addr *uint64, v uint64) old uint64
+TEXT ·And64(SB), NOSPLIT, $0-24
+	MOVD	ptr+0(FP), R3
+	MOVD	val+8(FP), R4
+	LWSYNC
+again:
+	LDAR	(R3),R6
+	AND	R4, R6, R7
+	STDCCC	R7, (R3)
+	BNE	again
+	MOVD	R6, ret+16(FP)
+	RET
+
+// func Anduintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-24
+	JMP	·And64(SB)
+
+// func Oruintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-24
+	JMP	·Or64(SB)
diff --git a/src/runtime/internal/atomic/atomic_riscv64.go b/src/runtime/internal/atomic/atomic_riscv64.go
index 8f24d61..9fc3837 100644
--- a/src/runtime/internal/atomic/atomic_riscv64.go
+++ b/src/runtime/internal/atomic/atomic_riscv64.go
@@ -58,6 +58,24 @@
 func Or(ptr *uint32, val uint32)
 
 //go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func And64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Or64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
 func Cas64(ptr *uint64, old, new uint64) bool
 
 //go:noescape
diff --git a/src/runtime/internal/atomic/atomic_riscv64.s b/src/runtime/internal/atomic/atomic_riscv64.s
index 21d5adc..bf6bd35 100644
--- a/src/runtime/internal/atomic/atomic_riscv64.s
+++ b/src/runtime/internal/atomic/atomic_riscv64.s
@@ -282,3 +282,43 @@
 	MOVW	val+8(FP), A1
 	AMOORW	A1, (A0), ZERO
 	RET
+
+// func Or32(ptr *uint32, val uint32) uint32
+TEXT ·Or32(SB), NOSPLIT, $0-20
+	MOV	ptr+0(FP), A0
+	MOVW	val+8(FP), A1
+	AMOORW	A1, (A0), A2
+	MOVW	A2, ret+16(FP)
+	RET
+
+// func And32(ptr *uint32, val uint32) uint32
+TEXT ·And32(SB), NOSPLIT, $0-20
+	MOV	ptr+0(FP), A0
+	MOVW	val+8(FP), A1
+	AMOANDW	A1, (A0), A2
+	MOVW	A2, ret+16(FP)
+	RET
+
+// func Or64(ptr *uint64, val uint64) uint64
+TEXT ·Or64(SB), NOSPLIT, $0-24
+	MOV	ptr+0(FP), A0
+	MOV	val+8(FP), A1
+	AMOORD	A1, (A0), A2
+	MOV	A2, ret+16(FP)
+	RET
+
+// func And64(ptr *uint64, val uint64) uint64
+TEXT ·And64(SB), NOSPLIT, $0-24
+	MOV	ptr+0(FP), A0
+	MOV	val+8(FP), A1
+	AMOANDD	A1, (A0), A2
+	MOV	A2, ret+16(FP)
+	RET
+
+// func Anduintptr(ptr *uintptr, val uintptr) uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-24
+	JMP	·And64(SB)
+
+// func Oruintptr(ptr *uintptr, val uintptr) uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-24
+	JMP	·Or64(SB)
diff --git a/src/runtime/internal/math/math.go b/src/runtime/internal/math/math.go
index c3fac36..b2e5508 100644
--- a/src/runtime/internal/math/math.go
+++ b/src/runtime/internal/math/math.go
@@ -38,3 +38,18 @@
 	lo = x * y
 	return
 }
+
+// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry.
+// The carry input must be 0 or 1; otherwise the behavior is undefined.
+// The carryOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+// On supported platforms this is an intrinsic lowered by the compiler.
+func Add64(x, y, carry uint64) (sum, carryOut uint64) {
+	sum = x + y + carry
+	// The sum will overflow if both top bits are set (x & y) or if one of them
+	// is (x | y), and a carry from the lower place happened. If such a carry
+	// happens, the top bit will be 1 + 0 + 1 = 0 (&^ sum).
+	carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
+	return
+}
diff --git a/src/runtime/internal/sys/intrinsics_test.go b/src/runtime/internal/sys/intrinsics_test.go
index bf75f19..6799885 100644
--- a/src/runtime/internal/sys/intrinsics_test.go
+++ b/src/runtime/internal/sys/intrinsics_test.go
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package sys_test
 
 import (
diff --git a/src/runtime/internal/syscall/asm_linux_loong64.s b/src/runtime/internal/syscall/asm_linux_loong64.s
index d6a33f9..11c5bc2 100644
--- a/src/runtime/internal/syscall/asm_linux_loong64.s
+++ b/src/runtime/internal/syscall/asm_linux_loong64.s
@@ -5,7 +5,32 @@
 #include "textflag.h"
 
 // func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
+//
+// We need to convert to the syscall ABI.
+//
+// arg | ABIInternal | Syscall
+// ---------------------------
+// num | R4          | R11
+// a1  | R5          | R4
+// a2  | R6          | R5
+// a3  | R7          | R6
+// a4  | R8          | R7
+// a5  | R9          | R8
+// a6  | R10         | R9
+//
+// r1  | R4          | R4
+// r2  | R5          | R5
+// err | R6          | part of R4
+TEXT ·Syscall6<ABIInternal>(SB),NOSPLIT,$0-80
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R4, R11  // syscall entry
+	MOVV	R5, R4
+	MOVV	R6, R5
+	MOVV	R7, R6
+	MOVV	R8, R7
+	MOVV	R9, R8
+	MOVV	R10, R9
+#else
 	MOVV	num+0(FP), R11  // syscall entry
 	MOVV	a1+8(FP), R4
 	MOVV	a2+16(FP), R5
@@ -13,7 +38,15 @@
 	MOVV	a4+32(FP), R7
 	MOVV	a5+40(FP), R8
 	MOVV	a6+48(FP), R9
+#endif
 	SYSCALL
+#ifdef GOEXPERIMENT_regabiargs
+	MOVV	R0, R5      // r2 is not used. Always set to 0.
+	MOVW	$-4096, R12
+	BGEU	R12, R4, ok
+	SUBVU	R4, R0, R6  // errno
+	MOVV	$-1, R4     // r1
+#else
 	MOVW	$-4096, R12
 	BGEU	R12, R4, ok
 	MOVV	$-1, R12
@@ -21,9 +54,15 @@
 	MOVV	R0, r2+64(FP)
 	SUBVU	R4, R0, R4
 	MOVV	R4, errno+72(FP)
+#endif
 	RET
 ok:
+#ifdef GOEXPERIMENT_regabiargs
+	// r1 already in R4
+	MOVV	R0, R6     // errno
+#else
 	MOVV	R4, r1+56(FP)
 	MOVV	R0, r2+64(FP)	// r2 is not used. Always set to 0.
 	MOVV	R0, errno+72(FP)
+#endif
 	RET
diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go
index cc7d465..867e2b3 100644
--- a/src/runtime/lock_futex.go
+++ b/src/runtime/lock_futex.go
@@ -44,6 +44,10 @@
 	return (*uint32)(unsafe.Pointer(p))
 }
 
+func mutexContended(l *mutex) bool {
+	return atomic.Load(key32(&l.key)) > mutex_locked
+}
+
 func lock(l *mutex) {
 	lockWithRank(l, getLockRank(l))
 }
@@ -71,6 +75,8 @@
 	// its wakeup call.
 	wait := v
 
+	timer := &lockTimer{lock: l}
+	timer.begin()
 	// On uniprocessors, no point spinning.
 	// On multiprocessors, spin for ACTIVE_SPIN attempts.
 	spin := 0
@@ -82,6 +88,7 @@
 		for i := 0; i < spin; i++ {
 			for l.key == mutex_unlocked {
 				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
+					timer.end()
 					return
 				}
 			}
@@ -92,6 +99,7 @@
 		for i := 0; i < passive_spin; i++ {
 			for l.key == mutex_unlocked {
 				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
+					timer.end()
 					return
 				}
 			}
@@ -101,6 +109,7 @@
 		// Sleep.
 		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
 		if v == mutex_unlocked {
+			timer.end()
 			return
 		}
 		wait = mutex_sleeping
@@ -122,6 +131,7 @@
 	}
 
 	gp := getg()
+	gp.m.mLockProfile.recordUnlock(l)
 	gp.m.locks--
 	if gp.m.locks < 0 {
 		throw("runtime·unlock: lock count")
diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go
index 91ad7be..b6ee5ec 100644
--- a/src/runtime/lock_js.go
+++ b/src/runtime/lock_js.go
@@ -23,6 +23,10 @@
 	passive_spin    = 1
 )
 
+func mutexContended(l *mutex) bool {
+	return false
+}
+
 func lock(l *mutex) {
 	lockWithRank(l, getLockRank(l))
 }
diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go
index e15bbf7..073e7d4 100644
--- a/src/runtime/lock_sema.go
+++ b/src/runtime/lock_sema.go
@@ -31,6 +31,10 @@
 	passive_spin    = 1
 )
 
+func mutexContended(l *mutex) bool {
+	return atomic.Loaduintptr(&l.key) > locked
+}
+
 func lock(l *mutex) {
 	lockWithRank(l, getLockRank(l))
 }
@@ -48,6 +52,8 @@
 	}
 	semacreate(gp.m)
 
+	timer := &lockTimer{lock: l}
+	timer.begin()
 	// On uniprocessor's, no point spinning.
 	// On multiprocessors, spin for ACTIVE_SPIN attempts.
 	spin := 0
@@ -60,6 +66,7 @@
 		if v&locked == 0 {
 			// Unlocked. Try to lock.
 			if atomic.Casuintptr(&l.key, v, v|locked) {
+				timer.end()
 				return
 			}
 			i = 0
@@ -119,6 +126,7 @@
 			}
 		}
 	}
+	gp.m.mLockProfile.recordUnlock(l)
 	gp.m.locks--
 	if gp.m.locks < 0 {
 		throw("runtime·unlock: lock count")
@@ -130,13 +138,7 @@
 
 // One-time notifications.
 func noteclear(n *note) {
-	if GOOS == "aix" {
-		// On AIX, semaphores might not synchronize the memory in some
-		// rare cases. See issue #30189.
-		atomic.Storeuintptr(&n.key, 0)
-	} else {
-		n.key = 0
-	}
+	n.key = 0
 }
 
 func notewakeup(n *note) {
diff --git a/src/runtime/lock_wasip1.go b/src/runtime/lock_wasip1.go
index c4fc59f..acfc62a 100644
--- a/src/runtime/lock_wasip1.go
+++ b/src/runtime/lock_wasip1.go
@@ -19,6 +19,10 @@
 	active_spin_cnt = 30
 )
 
+func mutexContended(l *mutex) bool {
+	return false
+}
+
 func lock(l *mutex) {
 	lockWithRank(l, getLockRank(l))
 }
diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go
index 4d661e9..b27e6c5 100644
--- a/src/runtime/lockrank.go
+++ b/src/runtime/lockrank.go
@@ -18,8 +18,16 @@
 	lockRankSweepWaiters
 	lockRankAssistQueue
 	lockRankSweep
-	lockRankPollDesc
+	lockRankTestR
+	lockRankTestW
+	lockRankAllocmW
+	lockRankExecW
 	lockRankCpuprof
+	lockRankPollDesc
+	lockRankWakeableSleep
+	// SCHED
+	lockRankAllocmR
+	lockRankExecR
 	lockRankSched
 	lockRankAllg
 	lockRankAllp
@@ -28,8 +36,6 @@
 	lockRankHchan
 	lockRankNotifyList
 	lockRankSudog
-	lockRankRwmutexW
-	lockRankRwmutexR
 	lockRankRoot
 	lockRankItab
 	lockRankReflectOffs
@@ -63,6 +69,9 @@
 	lockRankPanic
 	lockRankDeadlock
 	lockRankRaceFini
+	lockRankAllocmRInternal
+	lockRankExecRInternal
+	lockRankTestRInternal
 )
 
 // lockRankLeafRank is the rank of lock that does not have a declared rank,
@@ -71,52 +80,60 @@
 
 // lockNames gives the names associated with each of the above ranks.
 var lockNames = []string{
-	lockRankSysmon:         "sysmon",
-	lockRankScavenge:       "scavenge",
-	lockRankForcegc:        "forcegc",
-	lockRankDefer:          "defer",
-	lockRankSweepWaiters:   "sweepWaiters",
-	lockRankAssistQueue:    "assistQueue",
-	lockRankSweep:          "sweep",
-	lockRankPollDesc:       "pollDesc",
-	lockRankCpuprof:        "cpuprof",
-	lockRankSched:          "sched",
-	lockRankAllg:           "allg",
-	lockRankAllp:           "allp",
-	lockRankTimers:         "timers",
-	lockRankNetpollInit:    "netpollInit",
-	lockRankHchan:          "hchan",
-	lockRankNotifyList:     "notifyList",
-	lockRankSudog:          "sudog",
-	lockRankRwmutexW:       "rwmutexW",
-	lockRankRwmutexR:       "rwmutexR",
-	lockRankRoot:           "root",
-	lockRankItab:           "itab",
-	lockRankReflectOffs:    "reflectOffs",
-	lockRankUserArenaState: "userArenaState",
-	lockRankTraceBuf:       "traceBuf",
-	lockRankTraceStrings:   "traceStrings",
-	lockRankFin:            "fin",
-	lockRankSpanSetSpine:   "spanSetSpine",
-	lockRankMspanSpecial:   "mspanSpecial",
-	lockRankGcBitsArenas:   "gcBitsArenas",
-	lockRankProfInsert:     "profInsert",
-	lockRankProfBlock:      "profBlock",
-	lockRankProfMemActive:  "profMemActive",
-	lockRankProfMemFuture:  "profMemFuture",
-	lockRankGscan:          "gscan",
-	lockRankStackpool:      "stackpool",
-	lockRankStackLarge:     "stackLarge",
-	lockRankHchanLeaf:      "hchanLeaf",
-	lockRankWbufSpans:      "wbufSpans",
-	lockRankMheap:          "mheap",
-	lockRankMheapSpecial:   "mheapSpecial",
-	lockRankGlobalAlloc:    "globalAlloc",
-	lockRankTrace:          "trace",
-	lockRankTraceStackTab:  "traceStackTab",
-	lockRankPanic:          "panic",
-	lockRankDeadlock:       "deadlock",
-	lockRankRaceFini:       "raceFini",
+	lockRankSysmon:          "sysmon",
+	lockRankScavenge:        "scavenge",
+	lockRankForcegc:         "forcegc",
+	lockRankDefer:           "defer",
+	lockRankSweepWaiters:    "sweepWaiters",
+	lockRankAssistQueue:     "assistQueue",
+	lockRankSweep:           "sweep",
+	lockRankTestR:           "testR",
+	lockRankTestW:           "testW",
+	lockRankAllocmW:         "allocmW",
+	lockRankExecW:           "execW",
+	lockRankCpuprof:         "cpuprof",
+	lockRankPollDesc:        "pollDesc",
+	lockRankWakeableSleep:   "wakeableSleep",
+	lockRankAllocmR:         "allocmR",
+	lockRankExecR:           "execR",
+	lockRankSched:           "sched",
+	lockRankAllg:            "allg",
+	lockRankAllp:            "allp",
+	lockRankTimers:          "timers",
+	lockRankNetpollInit:     "netpollInit",
+	lockRankHchan:           "hchan",
+	lockRankNotifyList:      "notifyList",
+	lockRankSudog:           "sudog",
+	lockRankRoot:            "root",
+	lockRankItab:            "itab",
+	lockRankReflectOffs:     "reflectOffs",
+	lockRankUserArenaState:  "userArenaState",
+	lockRankTraceBuf:        "traceBuf",
+	lockRankTraceStrings:    "traceStrings",
+	lockRankFin:             "fin",
+	lockRankSpanSetSpine:    "spanSetSpine",
+	lockRankMspanSpecial:    "mspanSpecial",
+	lockRankGcBitsArenas:    "gcBitsArenas",
+	lockRankProfInsert:      "profInsert",
+	lockRankProfBlock:       "profBlock",
+	lockRankProfMemActive:   "profMemActive",
+	lockRankProfMemFuture:   "profMemFuture",
+	lockRankGscan:           "gscan",
+	lockRankStackpool:       "stackpool",
+	lockRankStackLarge:      "stackLarge",
+	lockRankHchanLeaf:       "hchanLeaf",
+	lockRankWbufSpans:       "wbufSpans",
+	lockRankMheap:           "mheap",
+	lockRankMheapSpecial:    "mheapSpecial",
+	lockRankGlobalAlloc:     "globalAlloc",
+	lockRankTrace:           "trace",
+	lockRankTraceStackTab:   "traceStackTab",
+	lockRankPanic:           "panic",
+	lockRankDeadlock:        "deadlock",
+	lockRankRaceFini:        "raceFini",
+	lockRankAllocmRInternal: "allocmRInternal",
+	lockRankExecRInternal:   "execRInternal",
+	lockRankTestRInternal:   "testRInternal",
 }
 
 func (rank lockRank) String() string {
@@ -138,50 +155,58 @@
 //
 // Lock ranks that allow self-cycles list themselves.
 var lockPartialOrder [][]lockRank = [][]lockRank{
-	lockRankSysmon:         {},
-	lockRankScavenge:       {lockRankSysmon},
-	lockRankForcegc:        {lockRankSysmon},
-	lockRankDefer:          {},
-	lockRankSweepWaiters:   {},
-	lockRankAssistQueue:    {},
-	lockRankSweep:          {},
-	lockRankPollDesc:       {},
-	lockRankCpuprof:        {},
-	lockRankSched:          {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof},
-	lockRankAllg:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
-	lockRankAllp:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
-	lockRankTimers:         {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
-	lockRankNetpollInit:    {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
-	lockRankHchan:          {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan},
-	lockRankNotifyList:     {},
-	lockRankSudog:          {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan, lockRankNotifyList},
-	lockRankRwmutexW:       {},
-	lockRankRwmutexR:       {lockRankSysmon, lockRankRwmutexW},
-	lockRankRoot:           {},
-	lockRankItab:           {},
-	lockRankReflectOffs:    {lockRankItab},
-	lockRankUserArenaState: {},
-	lockRankTraceBuf:       {lockRankSysmon, lockRankScavenge},
-	lockRankTraceStrings:   {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
-	lockRankFin:            {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
-	lockRankSpanSetSpine:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
-	lockRankMspanSpecial:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
-	lockRankGcBitsArenas:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
-	lockRankProfInsert:     {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
-	lockRankProfBlock:      {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
-	lockRankProfMemActive:  {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
-	lockRankProfMemFuture:  {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
-	lockRankGscan:          {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
-	lockRankStackpool:      {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
-	lockRankStackLarge:     {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
-	lockRankHchanLeaf:      {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
-	lockRankWbufSpans:      {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
-	lockRankMheap:          {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
-	lockRankMheapSpecial:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
-	lockRankGlobalAlloc:    {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
-	lockRankTrace:          {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
-	lockRankTraceStackTab:  {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
-	lockRankPanic:          {},
-	lockRankDeadlock:       {lockRankPanic, lockRankDeadlock},
-	lockRankRaceFini:       {lockRankPanic},
+	lockRankSysmon:          {},
+	lockRankScavenge:        {lockRankSysmon},
+	lockRankForcegc:         {lockRankSysmon},
+	lockRankDefer:           {},
+	lockRankSweepWaiters:    {},
+	lockRankAssistQueue:     {},
+	lockRankSweep:           {},
+	lockRankTestR:           {},
+	lockRankTestW:           {},
+	lockRankAllocmW:         {},
+	lockRankExecW:           {},
+	lockRankCpuprof:         {},
+	lockRankPollDesc:        {},
+	lockRankWakeableSleep:   {},
+	lockRankAllocmR:         {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep},
+	lockRankExecR:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep},
+	lockRankSched:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR},
+	lockRankAllg:            {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched},
+	lockRankAllp:            {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched},
+	lockRankTimers:          {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers},
+	lockRankNetpollInit:     {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers},
+	lockRankHchan:           {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan},
+	lockRankNotifyList:      {},
+	lockRankSudog:           {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList},
+	lockRankRoot:            {},
+	lockRankItab:            {},
+	lockRankReflectOffs:     {lockRankItab},
+	lockRankUserArenaState:  {},
+	lockRankTraceBuf:        {lockRankSysmon, lockRankScavenge},
+	lockRankTraceStrings:    {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
+	lockRankFin:             {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+	lockRankSpanSetSpine:    {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+	lockRankMspanSpecial:    {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+	lockRankGcBitsArenas:    {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
+	lockRankProfInsert:      {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+	lockRankProfBlock:       {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+	lockRankProfMemActive:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+	lockRankProfMemFuture:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
+	lockRankGscan:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
+	lockRankStackpool:       {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+	lockRankStackLarge:      {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+	lockRankHchanLeaf:       {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
+	lockRankWbufSpans:       {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+	lockRankMheap:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
+	lockRankMheapSpecial:    {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
+	lockRankGlobalAlloc:     {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
+	lockRankTrace:           {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
+	lockRankTraceStackTab:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
+	lockRankPanic:           {},
+	lockRankDeadlock:        {lockRankPanic, lockRankDeadlock},
+	lockRankRaceFini:        {lockRankPanic},
+	lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR},
+	lockRankExecRInternal:   {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankExecR},
+	lockRankTestRInternal:   {lockRankTestR, lockRankTestW},
 }
diff --git a/src/runtime/lockrank_on.go b/src/runtime/lockrank_on.go
index bf530ee..b1d9999 100644
--- a/src/runtime/lockrank_on.go
+++ b/src/runtime/lockrank_on.go
@@ -213,7 +213,9 @@
 	})
 }
 
-// See comment on lockWithRank regarding stack splitting.
+// nosplit because it may be called from nosplit contexts.
+//
+//go:nosplit
 func lockWithRankMayAcquire(l *mutex, rank lockRank) {
 	gp := getg()
 	if gp.m.locksHeldLen == 0 {
diff --git a/src/runtime/lockrank_test.go b/src/runtime/lockrank_test.go
index a7b1b8d..dd99eb4 100644
--- a/src/runtime/lockrank_test.go
+++ b/src/runtime/lockrank_test.go
@@ -15,9 +15,13 @@
 // Test that the generated code for the lock rank graph is up-to-date.
 func TestLockRankGenerated(t *testing.T) {
 	testenv.MustHaveGoRun(t)
-	want, err := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "run", "mklockrank.go")).CombinedOutput()
+	cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "run", "mklockrank.go"))
+	want, err := cmd.Output()
 	if err != nil {
-		t.Fatal(err)
+		if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+			t.Fatalf("%v: %v\n%s", cmd, err, ee.Stderr)
+		}
+		t.Fatalf("%v: %v", cmd, err)
 	}
 	got, err := os.ReadFile("lockrank.go")
 	if err != nil {
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 44479cc..e2cb2e4 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -102,6 +102,7 @@
 
 import (
 	"internal/goarch"
+	"internal/goexperiment"
 	"internal/goos"
 	"runtime/internal/atomic"
 	"runtime/internal/math"
@@ -117,8 +118,6 @@
 	pageShift = _PageShift
 	pageSize  = _PageSize
 
-	concurrentSweep = _ConcurrentSweep
-
 	_PageSize = 1 << _PageShift
 	_PageMask = _PageSize - 1
 
@@ -426,6 +425,26 @@
 		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
 		throw("bad pagesPerReclaimerChunk")
 	}
+	if goexperiment.AllocHeaders {
+		// Check that the minimum size (exclusive) for a malloc header is also
+		// a size class boundary. This is important to making sure checks align
+		// across different parts of the runtime.
+		minSizeForMallocHeaderIsSizeClass := false
+		for i := 0; i < len(class_to_size); i++ {
+			if minSizeForMallocHeader == uintptr(class_to_size[i]) {
+				minSizeForMallocHeaderIsSizeClass = true
+				break
+			}
+		}
+		if !minSizeForMallocHeaderIsSizeClass {
+			throw("min size of malloc header is not a size class boundary")
+		}
+		// Check that the pointer bitmap for all small sizes without a malloc header
+		// fits in a word.
+		if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
+			throw("max pointer/scan bitmap size for headerless objects is too large")
+		}
+	}
 
 	if minTagBits > taggedPointerBits {
 		throw("taggedPointerbits too small")
@@ -853,6 +872,10 @@
 //
 // The heap lock must not be held over this operation, since it will briefly acquire
 // the heap lock.
+//
+// Must be called on the system stack because it acquires the heap lock.
+//
+//go:systemstack
 func (h *mheap) enableMetadataHugePages() {
 	// Enable huge pages for page structure.
 	h.pages.enableChunkHugePages()
@@ -888,7 +911,7 @@
 func nextFreeFast(s *mspan) gclinkptr {
 	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
 	if theBit < 64 {
-		result := s.freeindex + uintptr(theBit)
+		result := s.freeindex + uint16(theBit)
 		if result < s.nelems {
 			freeidx := result + 1
 			if freeidx%64 == 0 && freeidx != s.nelems {
@@ -897,7 +920,7 @@
 			s.allocCache >>= uint(theBit + 1)
 			s.freeindex = freeidx
 			s.allocCount++
-			return gclinkptr(result*s.elemsize + s.base())
+			return gclinkptr(uintptr(result)*s.elemsize + s.base())
 		}
 	}
 	return 0
@@ -918,7 +941,7 @@
 	freeIndex := s.nextFreeIndex()
 	if freeIndex == s.nelems {
 		// The span is full.
-		if uintptr(s.allocCount) != s.nelems {
+		if s.allocCount != s.nelems {
 			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
 			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
 		}
@@ -933,9 +956,9 @@
 		throw("freeIndex is not valid")
 	}
 
-	v = gclinkptr(freeIndex*s.elemsize + s.base())
+	v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base())
 	s.allocCount++
-	if uintptr(s.allocCount) > s.nelems {
+	if s.allocCount > s.nelems {
 		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
 		throw("s.allocCount > s.nelems")
 	}
@@ -1018,12 +1041,22 @@
 		throw("mallocgc called without a P or outside bootstrapping")
 	}
 	var span *mspan
+	var header **_type
 	var x unsafe.Pointer
 	noscan := typ == nil || typ.PtrBytes == 0
 	// In some cases block zeroing can profitably (for latency reduction purposes)
 	// be delayed till preemption is possible; delayedZeroing tracks that state.
 	delayedZeroing := false
-	if size <= maxSmallSize {
+	// Determine if it's a 'small' object that goes into a size-classed span.
+	//
+	// Note: This comparison looks a little strange, but it exists to smooth out
+	// the crossover between the largest size class and large objects that have
+	// their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize
+	// and maxSmallSize will be considered large, even though they might fit in
+	// a size class. In practice this is completely fine, since the largest small
+	// size class has a single object in it already, precisely to make the transition
+	// to large objects smooth.
+	if size <= maxSmallSize-mallocHeaderSize {
 		if noscan && size < maxTinySize {
 			// Tiny allocator.
 			//
@@ -1098,6 +1131,10 @@
 			}
 			size = maxTinySize
 		} else {
+			hasHeader := !noscan && !heapBitsInSpan(size)
+			if goexperiment.AllocHeaders && hasHeader {
+				size += mallocHeaderSize
+			}
 			var sizeclass uint8
 			if size <= smallSizeMax-8 {
 				sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
@@ -1115,6 +1152,11 @@
 			if needzero && span.needzero != 0 {
 				memclrNoHeapPointers(x, size)
 			}
+			if goexperiment.AllocHeaders && hasHeader {
+				header = (**_type)(x)
+				x = add(x, mallocHeaderSize)
+				size -= mallocHeaderSize
+			}
 		}
 	} else {
 		shouldhelpgc = true
@@ -1130,29 +1172,30 @@
 				delayedZeroing = true
 			} else {
 				memclrNoHeapPointers(x, size)
-				// We've in theory cleared almost the whole span here,
-				// and could take the extra step of actually clearing
-				// the whole thing. However, don't. Any GC bits for the
-				// uncleared parts will be zero, and it's just going to
-				// be needzero = 1 once freed anyway.
 			}
 		}
+		if goexperiment.AllocHeaders && !noscan {
+			header = &span.largeType
+		}
 	}
-
 	if !noscan {
-		var scanSize uintptr
-		heapBitsSetType(uintptr(x), size, dataSize, typ)
-		if dataSize > typ.Size_ {
-			// Array allocation. If there are any
-			// pointers, GC has to scan to the last
-			// element.
-			if typ.PtrBytes != 0 {
-				scanSize = dataSize - typ.Size_ + typ.PtrBytes
-			}
+		if goexperiment.AllocHeaders {
+			c.scanAlloc += heapSetType(uintptr(x), dataSize, typ, header, span)
 		} else {
-			scanSize = typ.PtrBytes
+			var scanSize uintptr
+			heapBitsSetType(uintptr(x), size, dataSize, typ)
+			if dataSize > typ.Size_ {
+				// Array allocation. If there are any
+				// pointers, GC has to scan to the last
+				// element.
+				if typ.PtrBytes != 0 {
+					scanSize = dataSize - typ.Size_ + typ.PtrBytes
+				}
+			} else {
+				scanSize = typ.PtrBytes
+			}
+			c.scanAlloc += scanSize
 		}
-		c.scanAlloc += scanSize
 	}
 
 	// Ensure that the stores above that initialize x to
@@ -1178,7 +1221,7 @@
 	// This may be racing with GC so do it atomically if there can be
 	// a race marking the bit.
 	if gcphase != _GCoff {
-		gcmarknewobject(span, uintptr(x), size)
+		gcmarknewobject(span, uintptr(x))
 	}
 
 	if raceenabled {
@@ -1200,12 +1243,28 @@
 		asanunpoison(x, userSize)
 	}
 
+	// If !goexperiment.AllocHeaders, "size" doesn't include the
+	// allocation header, so use span.elemsize as the "full" size
+	// for various computations below.
+	//
+	// TODO(mknyszek): We should really count the header as part
+	// of gc_sys or something, but it's risky to change the
+	// accounting so much right now. Just pretend its internal
+	// fragmentation and match the GC's accounting by using the
+	// whole allocation slot.
+	fullSize := size
+	if goexperiment.AllocHeaders {
+		fullSize = span.elemsize
+	}
 	if rate := MemProfileRate; rate > 0 {
 		// Note cache c only valid while m acquired; see #47302
-		if rate != 1 && size < c.nextSample {
-			c.nextSample -= size
+		//
+		// N.B. Use the full size because that matches how the GC
+		// will update the mem profile on the "free" side.
+		if rate != 1 && fullSize < c.nextSample {
+			c.nextSample -= fullSize
 		} else {
-			profilealloc(mp, x, size)
+			profilealloc(mp, x, fullSize)
 		}
 	}
 	mp.mallocing = 0
@@ -1217,6 +1276,10 @@
 		if !noscan {
 			throw("delayed zeroing on data that may contain pointers")
 		}
+		if goexperiment.AllocHeaders && header != nil {
+			throw("unexpected malloc header in delayed zeroing of large object")
+		}
+		// N.B. size == fullSize always in this case.
 		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
 	}
 
@@ -1227,14 +1290,17 @@
 
 		if inittrace.active && inittrace.id == getg().goid {
 			// Init functions are executed sequentially in a single goroutine.
-			inittrace.bytes += uint64(size)
+			inittrace.bytes += uint64(fullSize)
 		}
 	}
 
 	if assistG != nil {
 		// Account for internal fragmentation in the assist
 		// debt now that we know it.
-		assistG.gcAssistBytes -= int64(size - dataSize)
+		//
+		// N.B. Use the full size because that's how the rest
+		// of the GC accounts for bytes marked.
+		assistG.gcAssistBytes -= int64(fullSize - dataSize)
 	}
 
 	if shouldhelpgc {
@@ -1406,7 +1472,7 @@
 	// x = -log_e(q) * mean
 	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
 	const randomBitCount = 26
-	q := fastrandn(1<<randomBitCount) + 1
+	q := cheaprandn(1<<randomBitCount) + 1
 	qlog := fastlog2(float64(q)) - randomBitCount
 	if qlog > 0 {
 		qlog = 0
@@ -1424,7 +1490,7 @@
 		rate = 0x3fffffff
 	}
 	if rate != 0 {
-		return uintptr(fastrandn(uint32(2 * rate)))
+		return uintptr(cheaprandn(uint32(2 * rate)))
 	}
 	return 0
 }
diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go
index 5b9ce98..8c162fb 100644
--- a/src/runtime/malloc_test.go
+++ b/src/runtime/malloc_test.go
@@ -268,7 +268,7 @@
 	// Test that mheap.sysAlloc handles collisions with other
 	// memory mappings.
 	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
-		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
+		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestArenaCollision$", "-test.v"))
 		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
 		out, err := cmd.CombinedOutput()
 		if race.Enabled {
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 6b85681..cd3f838 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -70,7 +70,7 @@
 	// Because of minimum alignment rules, bucketCnt is known to be at least 8.
 	// Represent as loadFactorNum/loadFactorDen, to allow integer math.
 	loadFactorDen = 2
-	loadFactorNum = (bucketCnt * 13 / 16) * loadFactorDen
+	loadFactorNum = loadFactorDen * bucketCnt * 13 / 16
 
 	// Maximum key or elem size to keep inline (instead of mallocing per element).
 	// Must fit in a uint8.
@@ -238,8 +238,8 @@
 	// as many overflow buckets as buckets.
 	mask := uint32(1)<<(h.B-15) - 1
 	// Example: if h.B == 18, then mask == 7,
-	// and fastrand & 7 == 0 with probability 1/8.
-	if fastrand()&mask == 0 {
+	// and rand() & 7 == 0 with probability 1/8.
+	if uint32(rand())&mask == 0 {
 		h.noverflow++
 	}
 }
@@ -293,7 +293,7 @@
 // at compile time and the map needs to be allocated on the heap.
 func makemap_small() *hmap {
 	h := new(hmap)
-	h.hash0 = fastrand()
+	h.hash0 = uint32(rand())
 	return h
 }
 
@@ -312,7 +312,7 @@
 	if h == nil {
 		h = new(hmap)
 	}
-	h.hash0 = fastrand()
+	h.hash0 = uint32(rand())
 
 	// Find the size parameter B which will hold the requested # of elements.
 	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
@@ -354,7 +354,7 @@
 		// used with this value of b.
 		nbuckets += bucketShift(b - 4)
 		sz := t.Bucket.Size_ * nbuckets
-		up := roundupsize(sz)
+		up := roundupsize(sz, t.Bucket.PtrBytes == 0)
 		if up != sz {
 			nbuckets = up / t.Bucket.Size_
 		}
@@ -407,8 +407,8 @@
 		asanread(key, t.Key.Size_)
 	}
 	if h == nil || h.count == 0 {
-		if t.HashMightPanic() {
-			t.Hasher(key, 0) // see issue 23734
+		if err := mapKeyError(t, key); err != nil {
+			panic(err) // see issue 23734
 		}
 		return unsafe.Pointer(&zeroVal[0])
 	}
@@ -468,8 +468,8 @@
 		asanread(key, t.Key.Size_)
 	}
 	if h == nil || h.count == 0 {
-		if t.HashMightPanic() {
-			t.Hasher(key, 0) // see issue 23734
+		if err := mapKeyError(t, key); err != nil {
+			panic(err) // see issue 23734
 		}
 		return unsafe.Pointer(&zeroVal[0]), false
 	}
@@ -707,8 +707,8 @@
 		asanread(key, t.Key.Size_)
 	}
 	if h == nil || h.count == 0 {
-		if t.HashMightPanic() {
-			t.Hasher(key, 0) // see issue 23734
+		if err := mapKeyError(t, key); err != nil {
+			panic(err) // see issue 23734
 		}
 		return
 	}
@@ -797,7 +797,7 @@
 			// Reset the hash seed to make it more difficult for attackers to
 			// repeatedly trigger hash collisions. See issue 25237.
 			if h.count == 0 {
-				h.hash0 = fastrand()
+				h.hash0 = uint32(rand())
 			}
 			break search
 		}
@@ -843,12 +843,7 @@
 	}
 
 	// decide where to start
-	var r uintptr
-	if h.B > 31-bucketCntBits {
-		r = uintptr(fastrand64())
-	} else {
-		r = uintptr(fastrand())
-	}
+	r := uintptr(rand())
 	it.startBucket = r & bucketMask(h.B)
 	it.offset = uint8(r >> h.B & (bucketCnt - 1))
 
@@ -1032,7 +1027,7 @@
 
 	// Reset the hash seed to make it more difficult for attackers to
 	// repeatedly trigger hash collisions. See issue 25237.
-	h.hash0 = fastrand()
+	h.hash0 = uint32(rand())
 
 	// Keep the mapextra allocation but clear any extra information.
 	if h.extra != nil {
@@ -1436,8 +1431,7 @@
 	return h.count
 }
 
-const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
-var zeroVal [maxZero]byte
+var zeroVal [abi.ZeroValSize]byte
 
 // mapinitnoop is a no-op function known the Go linker; if a given global
 // map (of the right size) is determined to be dead, the linker will
@@ -1481,12 +1475,24 @@
 
 		dst.tophash[pos] = src.tophash[i]
 		if t.IndirectKey() {
-			*(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK)
+			srcK = *(*unsafe.Pointer)(srcK)
+			if t.NeedKeyUpdate() {
+				kStore := newobject(t.Key)
+				typedmemmove(t.Key, kStore, srcK)
+				srcK = kStore
+			}
+			// Note: if NeedKeyUpdate is false, then the memory
+			// used to store the key is immutable, so we can share
+			// it between the original map and its clone.
+			*(*unsafe.Pointer)(dstK) = srcK
 		} else {
 			typedmemmove(t.Key, dstK, srcK)
 		}
 		if t.IndirectElem() {
-			*(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle)
+			srcEle = *(*unsafe.Pointer)(srcEle)
+			eStore := newobject(t.Elem)
+			typedmemmove(t.Elem, eStore, srcEle)
+			*(*unsafe.Pointer)(dstEle) = eStore
 		} else {
 			typedmemmove(t.Elem, dstEle, srcEle)
 		}
@@ -1510,14 +1516,14 @@
 		fatal("concurrent map clone and map write")
 	}
 
-	if src.B == 0 {
+	if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
+		// Quick copy for small maps.
 		dst.buckets = newobject(t.Bucket)
 		dst.count = src.count
 		typedmemmove(t.Bucket, dst.buckets, src.buckets)
 		return dst
 	}
 
-	//src.B != 0
 	if dst.B == 0 {
 		dst.buckets = newobject(t.Bucket)
 	}
@@ -1565,6 +1571,8 @@
 			continue
 		}
 
+		// oldB < dst.B, so a single source bucket may go to multiple destination buckets.
+		// Process entries one at a time.
 		for srcBmap != nil {
 			// move from oldBlucket to new bucket
 			for i := uintptr(0); i < bucketCnt; i++ {
@@ -1606,7 +1614,7 @@
 		return
 	}
 	s := (*slice)(p)
-	r := int(fastrand())
+	r := int(rand())
 	offset := uint8(r >> h.B & (bucketCnt - 1))
 	if h.B == 0 {
 		copyKeys(t, h, (*bmap)(h.buckets), s, offset)
@@ -1651,7 +1659,7 @@
 			if s.len >= s.cap {
 				fatal("concurrent map read and map write")
 			}
-			typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.KeySize)), k)
+			typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
 			s.len++
 		}
 		b = b.overflow(t)
@@ -1669,7 +1677,7 @@
 		return
 	}
 	s := (*slice)(p)
-	r := int(fastrand())
+	r := int(rand())
 	offset := uint8(r >> h.B & (bucketCnt - 1))
 	if h.B == 0 {
 		copyValues(t, h, (*bmap)(h.buckets), s, offset)
@@ -1716,7 +1724,7 @@
 			if s.len >= s.cap {
 				fatal("concurrent map read and map write")
 			}
-			typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.ValueSize)), ele)
+			typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
 			s.len++
 		}
 		b = b.overflow(t)
diff --git a/src/runtime/map_benchmark_test.go b/src/runtime/map_benchmark_test.go
index b46d2a4..43d1acc 100644
--- a/src/runtime/map_benchmark_test.go
+++ b/src/runtime/map_benchmark_test.go
@@ -168,6 +168,15 @@
 	}
 }
 
+func BenchmarkMegEmptyMapWithInterfaceKey(b *testing.B) {
+	m := make(map[any]bool)
+	key := strings.Repeat("X", 1<<20)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		_, _ = m[key]
+	}
+}
+
 func BenchmarkSmallStrMap(b *testing.B) {
 	m := make(map[string]bool)
 	for suffix := 'A'; suffix <= 'G'; suffix++ {
@@ -429,9 +438,7 @@
 				m := make(map[int]int, size)
 				for i := 0; i < b.N; i++ {
 					m[0] = size // Add one element so len(m) != 0 avoiding fast paths.
-					for k := range m {
-						delete(m, k)
-					}
+					clear(m)
 				}
 			})
 		}
@@ -442,9 +449,7 @@
 				m := make(map[float64]int, size)
 				for i := 0; i < b.N; i++ {
 					m[1.0] = size // Add one element so len(m) != 0 avoiding fast paths.
-					for k := range m {
-						delete(m, k)
-					}
+					clear(m)
 				}
 			})
 		}
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index d10dca3..e1dd495 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -348,7 +348,7 @@
 			// Reset the hash seed to make it more difficult for attackers to
 			// repeatedly trigger hash collisions. See issue 25237.
 			if h.count == 0 {
-				h.hash0 = fastrand()
+				h.hash0 = uint32(rand())
 			}
 			break search
 		}
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index d771e0b..7ca35ec 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -350,7 +350,7 @@
 			// Reset the hash seed to make it more difficult for attackers to
 			// repeatedly trigger hash collisions. See issue 25237.
 			if h.count == 0 {
-				h.hash0 = fastrand()
+				h.hash0 = uint32(rand())
 			}
 			break search
 		}
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index ef71da8..22e1f61 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -376,7 +376,7 @@
 			// Reset the hash seed to make it more difficult for attackers to
 			// repeatedly trigger hash collisions. See issue 25237.
 			if h.count == 0 {
-				h.hash0 = fastrand()
+				h.hash0 = uint32(rand())
 			}
 			break search
 		}
diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go
index 3675106..2c51236 100644
--- a/src/runtime/map_test.go
+++ b/src/runtime/map_test.go
@@ -16,6 +16,7 @@
 	"strings"
 	"sync"
 	"testing"
+	"unsafe"
 )
 
 func TestHmapSize(t *testing.T) {
@@ -1256,3 +1257,210 @@
 		panic("array not found")
 	}
 }
+
+type panicStructKey struct {
+	sli []int
+}
+
+func (p panicStructKey) String() string {
+	return "panic"
+}
+
+type structKey struct {
+}
+
+func (structKey) String() string {
+	return "structKey"
+}
+
+func TestEmptyMapWithInterfaceKey(t *testing.T) {
+	var (
+		b    bool
+		i    int
+		i8   int8
+		i16  int16
+		i32  int32
+		i64  int64
+		ui   uint
+		ui8  uint8
+		ui16 uint16
+		ui32 uint32
+		ui64 uint64
+		uipt uintptr
+		f32  float32
+		f64  float64
+		c64  complex64
+		c128 complex128
+		a    [4]string
+		s    string
+		p    *int
+		up   unsafe.Pointer
+		ch   chan int
+		i0   any
+		i1   interface {
+			String() string
+		}
+		structKey structKey
+		i0Panic   any = []int{}
+		i1Panic   interface {
+			String() string
+		} = panicStructKey{}
+		panicStructKey = panicStructKey{}
+		sli            []int
+		me             = map[any]struct{}{}
+		mi             = map[interface {
+			String() string
+		}]struct{}{}
+	)
+	mustNotPanic := func(f func()) {
+		f()
+	}
+	mustPanic := func(f func()) {
+		defer func() {
+			r := recover()
+			if r == nil {
+				t.Errorf("didn't panic")
+			}
+		}()
+		f()
+	}
+	mustNotPanic(func() {
+		_ = me[b]
+	})
+	mustNotPanic(func() {
+		_ = me[i]
+	})
+	mustNotPanic(func() {
+		_ = me[i8]
+	})
+	mustNotPanic(func() {
+		_ = me[i16]
+	})
+	mustNotPanic(func() {
+		_ = me[i32]
+	})
+	mustNotPanic(func() {
+		_ = me[i64]
+	})
+	mustNotPanic(func() {
+		_ = me[ui]
+	})
+	mustNotPanic(func() {
+		_ = me[ui8]
+	})
+	mustNotPanic(func() {
+		_ = me[ui16]
+	})
+	mustNotPanic(func() {
+		_ = me[ui32]
+	})
+	mustNotPanic(func() {
+		_ = me[ui64]
+	})
+	mustNotPanic(func() {
+		_ = me[uipt]
+	})
+	mustNotPanic(func() {
+		_ = me[f32]
+	})
+	mustNotPanic(func() {
+		_ = me[f64]
+	})
+	mustNotPanic(func() {
+		_ = me[c64]
+	})
+	mustNotPanic(func() {
+		_ = me[c128]
+	})
+	mustNotPanic(func() {
+		_ = me[a]
+	})
+	mustNotPanic(func() {
+		_ = me[s]
+	})
+	mustNotPanic(func() {
+		_ = me[p]
+	})
+	mustNotPanic(func() {
+		_ = me[up]
+	})
+	mustNotPanic(func() {
+		_ = me[ch]
+	})
+	mustNotPanic(func() {
+		_ = me[i0]
+	})
+	mustNotPanic(func() {
+		_ = me[i1]
+	})
+	mustNotPanic(func() {
+		_ = me[structKey]
+	})
+	mustPanic(func() {
+		_ = me[i0Panic]
+	})
+	mustPanic(func() {
+		_ = me[i1Panic]
+	})
+	mustPanic(func() {
+		_ = me[panicStructKey]
+	})
+	mustPanic(func() {
+		_ = me[sli]
+	})
+	mustPanic(func() {
+		_ = me[me]
+	})
+
+	mustNotPanic(func() {
+		_ = mi[structKey]
+	})
+	mustPanic(func() {
+		_ = mi[panicStructKey]
+	})
+}
+
+func TestLoadFactor(t *testing.T) {
+	for b := uint8(0); b < 20; b++ {
+		count := 13 * (1 << b) / 2 // 6.5
+		if b == 0 {
+			count = 8
+		}
+		if runtime.OverLoadFactor(count, b) {
+			t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
+		}
+		if !runtime.OverLoadFactor(count+1, b) {
+			t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
+		}
+	}
+}
+
+func TestMapKeys(t *testing.T) {
+	type key struct {
+		s   string
+		pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes
+	}
+	m := map[key]int{{s: "a"}: 1, {s: "b"}: 2}
+	keys := make([]key, 0, len(m))
+	runtime.MapKeys(m, unsafe.Pointer(&keys))
+	for _, k := range keys {
+		if len(k.s) != 1 {
+			t.Errorf("len(k.s) == %d, want 1", len(k.s))
+		}
+	}
+}
+
+func TestMapValues(t *testing.T) {
+	type val struct {
+		s   string
+		pad [128]byte // sizeof(val) > abi.MapMaxElemBytes
+	}
+	m := map[int]val{1: {s: "a"}, 2: {s: "b"}}
+	vals := make([]val, 0, len(m))
+	runtime.MapValues(m, unsafe.Pointer(&vals))
+	for _, v := range vals {
+		if len(v.s) != 1 {
+			t.Errorf("len(v.s) == %d, want 1", len(v.s))
+		}
+	}
+}
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 159a298..c4b6c2a 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -153,8 +153,11 @@
 	if dst == src {
 		return
 	}
-	if writeBarrier.needed && typ.PtrBytes != 0 {
-		bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
+	if writeBarrier.enabled && typ.PtrBytes != 0 {
+		// This always copies a full value of type typ so it's safe
+		// to pass typ along as an optimization. See the comment on
+		// bulkBarrierPreWrite.
+		bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
 	}
 	// There's a race here: if some other goroutine can write to
 	// src, it may change some pointer in src after we've
@@ -176,7 +179,10 @@
 //go:nowritebarrierrec
 //go:nosplit
 func wbZero(typ *_type, dst unsafe.Pointer) {
-	bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes)
+	// This always copies a full value of type typ so it's safe
+	// to pass typ along as an optimization. See the comment on
+	// bulkBarrierPreWrite.
+	bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes, typ)
 }
 
 // wbMove performs the write barrier operations necessary before
@@ -186,7 +192,11 @@
 //go:nowritebarrierrec
 //go:nosplit
 func wbMove(typ *_type, dst, src unsafe.Pointer) {
-	bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
+	// This always copies a full value of type typ so it's safe to
+	// pass a type here.
+	//
+	// See the comment on bulkBarrierPreWrite.
+	bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
 }
 
 //go:linkname reflect_typedmemmove reflect.typedmemmove
@@ -222,8 +232,11 @@
 //
 //go:nosplit
 func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
-	if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
-		bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
+	if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
+		// Pass nil for the type. dst does not point to value of type typ,
+		// but rather points into one, so applying the optimization is not
+		// safe. See the comment on this function.
+		bulkBarrierPreWrite(uintptr(dst), uintptr(src), size, nil)
 	}
 	memmove(dst, src, size)
 
@@ -277,9 +290,12 @@
 	// and growslice and reflect_typedslicecopy check for pointers
 	// before calling typedslicecopy.
 	size := uintptr(n) * typ.Size_
-	if writeBarrier.needed {
+	if writeBarrier.enabled {
+		// This always copies one or more full values of type typ so
+		// it's safe to pass typ along as an optimization. See the comment on
+		// bulkBarrierPreWrite.
 		pwsize := size - typ.Size_ + typ.PtrBytes
-		bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
+		bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize, typ)
 	}
 	// See typedmemmove for a discussion of the race between the
 	// barrier and memmove.
@@ -307,8 +323,11 @@
 //
 //go:nosplit
 func typedmemclr(typ *_type, ptr unsafe.Pointer) {
-	if writeBarrier.needed && typ.PtrBytes != 0 {
-		bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
+	if writeBarrier.enabled && typ.PtrBytes != 0 {
+		// This always clears a whole value of type typ, so it's
+		// safe to pass a type here and apply the optimization.
+		// See the comment on bulkBarrierPreWrite.
+		bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes, typ)
 	}
 	memclrNoHeapPointers(ptr, typ.Size_)
 }
@@ -320,8 +339,12 @@
 
 //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
 func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
-	if writeBarrier.needed && typ.PtrBytes != 0 {
-		bulkBarrierPreWrite(uintptr(ptr), 0, size)
+	if writeBarrier.enabled && typ.PtrBytes != 0 {
+		// Pass nil for the type. ptr does not point to value of type typ,
+		// but rather points into one so it's not safe to apply the optimization.
+		// See the comment on this function in the reflect package and the
+		// comment on bulkBarrierPreWrite.
+		bulkBarrierPreWrite(uintptr(ptr), 0, size, nil)
 	}
 	memclrNoHeapPointers(ptr, size)
 }
@@ -329,8 +352,10 @@
 //go:linkname reflect_typedarrayclear reflect.typedarrayclear
 func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
 	size := typ.Size_ * uintptr(len)
-	if writeBarrier.needed && typ.PtrBytes != 0 {
-		bulkBarrierPreWrite(uintptr(ptr), 0, size)
+	if writeBarrier.enabled && typ.PtrBytes != 0 {
+		// This always clears whole elements of an array, so it's
+		// safe to pass a type here. See the comment on bulkBarrierPreWrite.
+		bulkBarrierPreWrite(uintptr(ptr), 0, size, typ)
 	}
 	memclrNoHeapPointers(ptr, size)
 }
@@ -342,6 +367,7 @@
 //
 //go:nosplit
 func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
-	bulkBarrierPreWrite(uintptr(ptr), 0, n)
+	// Pass nil for the type since we don't have one here anyway.
+	bulkBarrierPreWrite(uintptr(ptr), 0, n, nil)
 	memclrNoHeapPointers(ptr, n)
 }
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index a242872..cdd1c5f 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -2,41 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Garbage collector: type and heap bitmaps.
-//
-// Stack, data, and bss bitmaps
-//
-// Stack frames and global variables in the data and bss sections are
-// described by bitmaps with 1 bit per pointer-sized word. A "1" bit
-// means the word is a live pointer to be visited by the GC (referred to
-// as "pointer"). A "0" bit means the word should be ignored by GC
-// (referred to as "scalar", though it could be a dead pointer value).
-//
-// Heap bitmap
-//
-// The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
-// recording whether a pointer is stored in that word or not. This bitmap
-// is stored in the heapArena metadata backing each heap arena.
-// That is, if ha is the heapArena for the arena starting at "start",
-// then ha.bitmap[0] holds the 64 bits for the 64 words "start"
-// through start+63*ptrSize, ha.bitmap[1] holds the entries for
-// start+64*ptrSize through start+127*ptrSize, and so on.
-// Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
-// the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
-// (For 32-bit platforms, s/64/32/.)
-//
-// We also keep a noMorePtrs bitmap which allows us to stop scanning
-// the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
-// is 1, then the object containing the last word described by ha.bitmap[8*i+j]
-// has no more pointers beyond those described by ha.bitmap[8*i+j].
-// If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
-// beyond must all be zero until the start of the next object.
-//
-// The bitmap for noscan spans is set to all zero at span allocation time.
-//
-// The bitmap for unallocated objects in scannable spans is not maintained
-// (can be junk).
-
 package runtime
 
 import (
@@ -117,8 +82,8 @@
 // and negates them so that ctz (count trailing zeros) instructions
 // can be used. It then places these 8 bytes into the cached 64 bit
 // s.allocCache.
-func (s *mspan) refillAllocCache(whichByte uintptr) {
-	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
+func (s *mspan) refillAllocCache(whichByte uint16) {
+	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
 	aCache := uint64(0)
 	aCache |= uint64(bytes[0])
 	aCache |= uint64(bytes[1]) << (1 * 8)
@@ -135,7 +100,7 @@
 // or after s.freeindex.
 // There are hardware instructions that can be used to make this
 // faster if profiling warrants it.
-func (s *mspan) nextFreeIndex() uintptr {
+func (s *mspan) nextFreeIndex() uint16 {
 	sfreeindex := s.freeindex
 	snelems := s.nelems
 	if sfreeindex == snelems {
@@ -163,7 +128,7 @@
 		// nothing available in cached bits
 		// grab the next 8 bytes and try again.
 	}
-	result := sfreeindex + uintptr(bitIndex)
+	result := sfreeindex + uint16(bitIndex)
 	if result >= snelems {
 		s.freeindex = snelems
 		return snelems
@@ -191,7 +156,7 @@
 // been no preemption points since ensuring this (which could allow a
 // GC transition, which would allow the state to change).
 func (s *mspan) isFree(index uintptr) bool {
-	if index < s.freeIndexForScan {
+	if index < uintptr(s.freeIndexForScan) {
 		return false
 	}
 	bytep, mask := s.allocBits.bitp(index)
@@ -383,251 +348,6 @@
 
 const ptrBits = 8 * goarch.PtrSize
 
-// heapBits provides access to the bitmap bits for a single heap word.
-// The methods on heapBits take value receivers so that the compiler
-// can more easily inline calls to those methods and registerize the
-// struct fields independently.
-type heapBits struct {
-	// heapBits will report on pointers in the range [addr,addr+size).
-	// The low bit of mask contains the pointerness of the word at addr
-	// (assuming valid>0).
-	addr, size uintptr
-
-	// The next few pointer bits representing words starting at addr.
-	// Those bits already returned by next() are zeroed.
-	mask uintptr
-	// Number of bits in mask that are valid. mask is always less than 1<<valid.
-	valid uintptr
-}
-
-// heapBitsForAddr returns the heapBits for the address addr.
-// The caller must ensure [addr,addr+size) is in an allocated span.
-// In particular, be careful not to point past the end of an object.
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//
-//go:nosplit
-func heapBitsForAddr(addr, size uintptr) heapBits {
-	// Find arena
-	ai := arenaIndex(addr)
-	ha := mheap_.arenas[ai.l1()][ai.l2()]
-
-	// Word index in arena.
-	word := addr / goarch.PtrSize % heapArenaWords
-
-	// Word index and bit offset in bitmap array.
-	idx := word / ptrBits
-	off := word % ptrBits
-
-	// Grab relevant bits of bitmap.
-	mask := ha.bitmap[idx] >> off
-	valid := ptrBits - off
-
-	// Process depending on where the object ends.
-	nptr := size / goarch.PtrSize
-	if nptr < valid {
-		// Bits for this object end before the end of this bitmap word.
-		// Squash bits for the following objects.
-		mask &= 1<<(nptr&(ptrBits-1)) - 1
-		valid = nptr
-	} else if nptr == valid {
-		// Bits for this object end at exactly the end of this bitmap word.
-		// All good.
-	} else {
-		// Bits for this object extend into the next bitmap word. See if there
-		// may be any pointers recorded there.
-		if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
-			// No more pointers in this object after this bitmap word.
-			// Update size so we know not to look there.
-			size = valid * goarch.PtrSize
-		}
-	}
-
-	return heapBits{addr: addr, size: size, mask: mask, valid: valid}
-}
-
-// Returns the (absolute) address of the next known pointer and
-// a heapBits iterator representing any remaining pointers.
-// If there are no more pointers, returns address 0.
-// Note that next does not modify h. The caller must record the result.
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//
-//go:nosplit
-func (h heapBits) next() (heapBits, uintptr) {
-	for {
-		if h.mask != 0 {
-			var i int
-			if goarch.PtrSize == 8 {
-				i = sys.TrailingZeros64(uint64(h.mask))
-			} else {
-				i = sys.TrailingZeros32(uint32(h.mask))
-			}
-			h.mask ^= uintptr(1) << (i & (ptrBits - 1))
-			return h, h.addr + uintptr(i)*goarch.PtrSize
-		}
-
-		// Skip words that we've already processed.
-		h.addr += h.valid * goarch.PtrSize
-		h.size -= h.valid * goarch.PtrSize
-		if h.size == 0 {
-			return h, 0 // no more pointers
-		}
-
-		// Grab more bits and try again.
-		h = heapBitsForAddr(h.addr, h.size)
-	}
-}
-
-// nextFast is like next, but can return 0 even when there are more pointers
-// to be found. Callers should call next if nextFast returns 0 as its second
-// return value.
-//
-//	if addr, h = h.nextFast(); addr == 0 {
-//	    if addr, h = h.next(); addr == 0 {
-//	        ... no more pointers ...
-//	    }
-//	}
-//	... process pointer at addr ...
-//
-// nextFast is designed to be inlineable.
-//
-//go:nosplit
-func (h heapBits) nextFast() (heapBits, uintptr) {
-	// TESTQ/JEQ
-	if h.mask == 0 {
-		return h, 0
-	}
-	// BSFQ
-	var i int
-	if goarch.PtrSize == 8 {
-		i = sys.TrailingZeros64(uint64(h.mask))
-	} else {
-		i = sys.TrailingZeros32(uint32(h.mask))
-	}
-	// BTCQ
-	h.mask ^= uintptr(1) << (i & (ptrBits - 1))
-	// LEAQ (XX)(XX*8)
-	return h, h.addr + uintptr(i)*goarch.PtrSize
-}
-
-// bulkBarrierPreWrite executes a write barrier
-// for every pointer slot in the memory range [src, src+size),
-// using pointer/scalar information from [dst, dst+size).
-// This executes the write barriers necessary before a memmove.
-// src, dst, and size must be pointer-aligned.
-// The range [dst, dst+size) must lie within a single object.
-// It does not perform the actual writes.
-//
-// As a special case, src == 0 indicates that this is being used for a
-// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
-// barrier.
-//
-// Callers should call bulkBarrierPreWrite immediately before
-// calling memmove(dst, src, size). This function is marked nosplit
-// to avoid being preempted; the GC must not stop the goroutine
-// between the memmove and the execution of the barriers.
-// The caller is also responsible for cgo pointer checks if this
-// may be writing Go pointers into non-Go memory.
-//
-// The pointer bitmap is not maintained for allocations containing
-// no pointers at all; any caller of bulkBarrierPreWrite must first
-// make sure the underlying allocation contains pointers, usually
-// by checking typ.PtrBytes.
-//
-// Callers must perform cgo checks if goexperiment.CgoCheck2.
-//
-//go:nosplit
-func bulkBarrierPreWrite(dst, src, size uintptr) {
-	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
-		throw("bulkBarrierPreWrite: unaligned arguments")
-	}
-	if !writeBarrier.needed {
-		return
-	}
-	if s := spanOf(dst); s == nil {
-		// If dst is a global, use the data or BSS bitmaps to
-		// execute write barriers.
-		for _, datap := range activeModules() {
-			if datap.data <= dst && dst < datap.edata {
-				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
-				return
-			}
-		}
-		for _, datap := range activeModules() {
-			if datap.bss <= dst && dst < datap.ebss {
-				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
-				return
-			}
-		}
-		return
-	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
-		// dst was heap memory at some point, but isn't now.
-		// It can't be a global. It must be either our stack,
-		// or in the case of direct channel sends, it could be
-		// another stack. Either way, no need for barriers.
-		// This will also catch if dst is in a freed span,
-		// though that should never have.
-		return
-	}
-
-	buf := &getg().m.p.ptr().wbBuf
-	h := heapBitsForAddr(dst, size)
-	if src == 0 {
-		for {
-			var addr uintptr
-			if h, addr = h.next(); addr == 0 {
-				break
-			}
-			dstx := (*uintptr)(unsafe.Pointer(addr))
-			p := buf.get1()
-			p[0] = *dstx
-		}
-	} else {
-		for {
-			var addr uintptr
-			if h, addr = h.next(); addr == 0 {
-				break
-			}
-			dstx := (*uintptr)(unsafe.Pointer(addr))
-			srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
-			p := buf.get2()
-			p[0] = *dstx
-			p[1] = *srcx
-		}
-	}
-}
-
-// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
-// does not execute write barriers for [dst, dst+size).
-//
-// In addition to the requirements of bulkBarrierPreWrite
-// callers need to ensure [dst, dst+size) is zeroed.
-//
-// This is used for special cases where e.g. dst was just
-// created and zeroed with malloc.
-//
-//go:nosplit
-func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
-	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
-		throw("bulkBarrierPreWrite: unaligned arguments")
-	}
-	if !writeBarrier.needed {
-		return
-	}
-	buf := &getg().m.p.ptr().wbBuf
-	h := heapBitsForAddr(dst, size)
-	for {
-		var addr uintptr
-		if h, addr = h.next(); addr == 0 {
-			break
-		}
-		srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
-		p := buf.get1()
-		p[0] = *srcx
-	}
-}
-
 // bulkBarrierBitmap executes write barriers for copying from [src,
 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
 // assumed to start maskOffset bytes into the data covered by the
@@ -697,7 +417,7 @@
 		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
 		throw("runtime: invalid typeBitsBulkBarrier")
 	}
-	if !writeBarrier.needed {
+	if !writeBarrier.enabled {
 		return
 	}
 	ptrmask := typ.GCData
@@ -720,38 +440,11 @@
 	}
 }
 
-// initHeapBits initializes the heap bitmap for a span.
-// If this is a span of single pointer allocations, it initializes all
-// words to pointer. If force is true, clears all bits.
-func (s *mspan) initHeapBits(forceClear bool) {
-	if forceClear || s.spanclass.noscan() {
-		// Set all the pointer bits to zero. We do this once
-		// when the span is allocated so we don't have to do it
-		// for each object allocation.
-		base := s.base()
-		size := s.npages * pageSize
-		h := writeHeapBitsForAddr(base)
-		h.flush(base, size)
-		return
-	}
-	isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
-	if !isPtrs {
-		return // nothing to do
-	}
-	h := writeHeapBitsForAddr(s.base())
-	size := s.npages * pageSize
-	nptrs := size / goarch.PtrSize
-	for i := uintptr(0); i < nptrs; i += ptrBits {
-		h = h.write(^uintptr(0), ptrBits)
-	}
-	h.flush(s.base(), size)
-}
-
 // countAlloc returns the number of objects allocated in span s by
-// scanning the allocation bitmap.
+// scanning the mark bitmap.
 func (s *mspan) countAlloc() int {
 	count := 0
-	bytes := divRoundUp(s.nelems, 8)
+	bytes := divRoundUp(uintptr(s.nelems), 8)
 	// Iterate over each 8-byte chunk and count allocations
 	// with an intrinsic. Note that newMarkBits guarantees that
 	// gcmarkBits will be 8-byte aligned, so we don't have to
@@ -767,146 +460,6 @@
 	return count
 }
 
-type writeHeapBits struct {
-	addr  uintptr // address that the low bit of mask represents the pointer state of.
-	mask  uintptr // some pointer bits starting at the address addr.
-	valid uintptr // number of bits in buf that are valid (including low)
-	low   uintptr // number of low-order bits to not overwrite
-}
-
-func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
-	// We start writing bits maybe in the middle of a heap bitmap word.
-	// Remember how many bits into the word we started, so we can be sure
-	// not to overwrite the previous bits.
-	h.low = addr / goarch.PtrSize % ptrBits
-
-	// round down to heap word that starts the bitmap word.
-	h.addr = addr - h.low*goarch.PtrSize
-
-	// We don't have any bits yet.
-	h.mask = 0
-	h.valid = h.low
-
-	return
-}
-
-// write appends the pointerness of the next valid pointer slots
-// using the low valid bits of bits. 1=pointer, 0=scalar.
-func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
-	if h.valid+valid <= ptrBits {
-		// Fast path - just accumulate the bits.
-		h.mask |= bits << h.valid
-		h.valid += valid
-		return h
-	}
-	// Too many bits to fit in this word. Write the current word
-	// out and move on to the next word.
-
-	data := h.mask | bits<<h.valid       // mask for this word
-	h.mask = bits >> (ptrBits - h.valid) // leftover for next word
-	h.valid += valid - ptrBits           // have h.valid+valid bits, writing ptrBits of them
-
-	// Flush mask to the memory bitmap.
-	// TODO: figure out how to cache arena lookup.
-	ai := arenaIndex(h.addr)
-	ha := mheap_.arenas[ai.l1()][ai.l2()]
-	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
-	m := uintptr(1)<<h.low - 1
-	ha.bitmap[idx] = ha.bitmap[idx]&m | data
-	// Note: no synchronization required for this write because
-	// the allocator has exclusive access to the page, and the bitmap
-	// entries are all for a single page. Also, visibility of these
-	// writes is guaranteed by the publication barrier in mallocgc.
-
-	// Clear noMorePtrs bit, since we're going to be writing bits
-	// into the following word.
-	ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
-	// Note: same as above
-
-	// Move to next word of bitmap.
-	h.addr += ptrBits * goarch.PtrSize
-	h.low = 0
-	return h
-}
-
-// Add padding of size bytes.
-func (h writeHeapBits) pad(size uintptr) writeHeapBits {
-	if size == 0 {
-		return h
-	}
-	words := size / goarch.PtrSize
-	for words > ptrBits {
-		h = h.write(0, ptrBits)
-		words -= ptrBits
-	}
-	return h.write(0, words)
-}
-
-// Flush the bits that have been written, and add zeros as needed
-// to cover the full object [addr, addr+size).
-func (h writeHeapBits) flush(addr, size uintptr) {
-	// zeros counts the number of bits needed to represent the object minus the
-	// number of bits we've already written. This is the number of 0 bits
-	// that need to be added.
-	zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
-
-	// Add zero bits up to the bitmap word boundary
-	if zeros > 0 {
-		z := ptrBits - h.valid
-		if z > zeros {
-			z = zeros
-		}
-		h.valid += z
-		zeros -= z
-	}
-
-	// Find word in bitmap that we're going to write.
-	ai := arenaIndex(h.addr)
-	ha := mheap_.arenas[ai.l1()][ai.l2()]
-	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
-
-	// Write remaining bits.
-	if h.valid != h.low {
-		m := uintptr(1)<<h.low - 1      // don't clear existing bits below "low"
-		m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
-		ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
-	}
-	if zeros == 0 {
-		return
-	}
-
-	// Record in the noMorePtrs map that there won't be any more 1 bits,
-	// so readers can stop early.
-	ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
-
-	// Advance to next bitmap word.
-	h.addr += ptrBits * goarch.PtrSize
-
-	// Continue on writing zeros for the rest of the object.
-	// For standard use of the ptr bits this is not required, as
-	// the bits are read from the beginning of the object. Some uses,
-	// like noscan spans, oblets, bulk write barriers, and cgocheck, might
-	// start mid-object, so these writes are still required.
-	for {
-		// Write zero bits.
-		ai := arenaIndex(h.addr)
-		ha := mheap_.arenas[ai.l1()][ai.l2()]
-		idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
-		if zeros < ptrBits {
-			ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
-			break
-		} else if zeros == ptrBits {
-			ha.bitmap[idx] = 0
-			break
-		} else {
-			ha.bitmap[idx] = 0
-			zeros -= ptrBits
-		}
-		ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
-		h.addr += ptrBits * goarch.PtrSize
-	}
-}
-
 // Read the bytes starting at the aligned pointer p into a uintptr.
 // Read is little-endian.
 func readUintptr(p *byte) uintptr {
@@ -920,197 +473,6 @@
 	return x
 }
 
-// heapBitsSetType records that the new allocation [x, x+size)
-// holds in [x, x+dataSize) one or more values of type typ.
-// (The number of values is given by dataSize / typ.Size.)
-// If dataSize < size, the fragment [x+dataSize, x+size) is
-// recorded as non-pointer data.
-// It is known that the type has pointers somewhere;
-// malloc does not call heapBitsSetType when there are no pointers,
-// because all free objects are marked as noscan during
-// heapBitsSweepSpan.
-//
-// There can only be one allocation from a given span active at a time,
-// and the bitmap for a span always falls on word boundaries,
-// so there are no write-write races for access to the heap bitmap.
-// Hence, heapBitsSetType can access the bitmap without atomics.
-//
-// There can be read-write races between heapBitsSetType and things
-// that read the heap bitmap like scanobject. However, since
-// heapBitsSetType is only used for objects that have not yet been
-// made reachable, readers will ignore bits being modified by this
-// function. This does mean this function cannot transiently modify
-// bits that belong to neighboring objects. Also, on weakly-ordered
-// machines, callers must execute a store/store (publication) barrier
-// between calling this function and making the object reachable.
-func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
-	const doubleCheck = false // slow but helpful; enable to test modifications to this code
-
-	if doubleCheck && dataSize%typ.Size_ != 0 {
-		throw("heapBitsSetType: dataSize not a multiple of typ.Size")
-	}
-
-	if goarch.PtrSize == 8 && size == goarch.PtrSize {
-		// It's one word and it has pointers, it must be a pointer.
-		// Since all allocated one-word objects are pointers
-		// (non-pointers are aggregated into tinySize allocations),
-		// (*mspan).initHeapBits sets the pointer bits for us.
-		// Nothing to do here.
-		if doubleCheck {
-			h, addr := heapBitsForAddr(x, size).next()
-			if addr != x {
-				throw("heapBitsSetType: pointer bit missing")
-			}
-			_, addr = h.next()
-			if addr != 0 {
-				throw("heapBitsSetType: second pointer bit found")
-			}
-		}
-		return
-	}
-
-	h := writeHeapBitsForAddr(x)
-
-	// Handle GC program.
-	if typ.Kind_&kindGCProg != 0 {
-		// Expand the gc program into the storage we're going to use for the actual object.
-		obj := (*uint8)(unsafe.Pointer(x))
-		n := runGCProg(addb(typ.GCData, 4), obj)
-		// Use the expanded program to set the heap bits.
-		for i := uintptr(0); true; i += typ.Size_ {
-			// Copy expanded program to heap bitmap.
-			p := obj
-			j := n
-			for j > 8 {
-				h = h.write(uintptr(*p), 8)
-				p = add1(p)
-				j -= 8
-			}
-			h = h.write(uintptr(*p), j)
-
-			if i+typ.Size_ == dataSize {
-				break // no padding after last element
-			}
-
-			// Pad with zeros to the start of the next element.
-			h = h.pad(typ.Size_ - n*goarch.PtrSize)
-		}
-
-		h.flush(x, size)
-
-		// Erase the expanded GC program.
-		memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
-		return
-	}
-
-	// Note about sizes:
-	//
-	// typ.Size is the number of words in the object,
-	// and typ.PtrBytes is the number of words in the prefix
-	// of the object that contains pointers. That is, the final
-	// typ.Size - typ.PtrBytes words contain no pointers.
-	// This allows optimization of a common pattern where
-	// an object has a small header followed by a large scalar
-	// buffer. If we know the pointers are over, we don't have
-	// to scan the buffer's heap bitmap at all.
-	// The 1-bit ptrmasks are sized to contain only bits for
-	// the typ.PtrBytes prefix, zero padded out to a full byte
-	// of bitmap. If there is more room in the allocated object,
-	// that space is pointerless. The noMorePtrs bitmap will prevent
-	// scanning large pointerless tails of an object.
-	//
-	// Replicated copies are not as nice: if there is an array of
-	// objects with scalar tails, all but the last tail does have to
-	// be initialized, because there is no way to say "skip forward".
-
-	ptrs := typ.PtrBytes / goarch.PtrSize
-	if typ.Size_ == dataSize { // Single element
-		if ptrs <= ptrBits { // Single small element
-			m := readUintptr(typ.GCData)
-			h = h.write(m, ptrs)
-		} else { // Single large element
-			p := typ.GCData
-			for {
-				h = h.write(readUintptr(p), ptrBits)
-				p = addb(p, ptrBits/8)
-				ptrs -= ptrBits
-				if ptrs <= ptrBits {
-					break
-				}
-			}
-			m := readUintptr(p)
-			h = h.write(m, ptrs)
-		}
-	} else { // Repeated element
-		words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
-		if words <= ptrBits {               // Repeated small element
-			n := dataSize / typ.Size_
-			m := readUintptr(typ.GCData)
-			// Make larger unit to repeat
-			for words <= ptrBits/2 {
-				if n&1 != 0 {
-					h = h.write(m, words)
-				}
-				n /= 2
-				m |= m << words
-				ptrs += words
-				words *= 2
-				if n == 1 {
-					break
-				}
-			}
-			for n > 1 {
-				h = h.write(m, words)
-				n--
-			}
-			h = h.write(m, ptrs)
-		} else { // Repeated large element
-			for i := uintptr(0); true; i += typ.Size_ {
-				p := typ.GCData
-				j := ptrs
-				for j > ptrBits {
-					h = h.write(readUintptr(p), ptrBits)
-					p = addb(p, ptrBits/8)
-					j -= ptrBits
-				}
-				m := readUintptr(p)
-				h = h.write(m, j)
-				if i+typ.Size_ == dataSize {
-					break // don't need the trailing nonptr bits on the last element.
-				}
-				// Pad with zeros to the start of the next element.
-				h = h.pad(typ.Size_ - typ.PtrBytes)
-			}
-		}
-	}
-	h.flush(x, size)
-
-	if doubleCheck {
-		h := heapBitsForAddr(x, size)
-		for i := uintptr(0); i < size; i += goarch.PtrSize {
-			// Compute the pointer bit we want at offset i.
-			want := false
-			if i < dataSize {
-				off := i % typ.Size_
-				if off < typ.PtrBytes {
-					j := off / goarch.PtrSize
-					want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
-				}
-			}
-			if want {
-				var addr uintptr
-				h, addr = h.next()
-				if addr != x+i {
-					throw("heapBitsSetType: pointer entry not correct")
-				}
-			}
-		}
-		if _, addr := h.next(); addr != 0 {
-			throw("heapBitsSetType: extra pointer")
-		}
-	}
-}
-
 var debugPtrmask struct {
 	lock mutex
 	data *byte
@@ -1411,91 +773,3 @@
 func reflect_gcbits(x any) []byte {
 	return getgcmask(x)
 }
-
-// Returns GC type info for the pointer stored in ep for testing.
-// If ep points to the stack, only static live information will be returned
-// (i.e. not for objects which are only dynamically live stack objects).
-func getgcmask(ep any) (mask []byte) {
-	e := *efaceOf(&ep)
-	p := e.data
-	t := e._type
-	// data or bss
-	for _, datap := range activeModules() {
-		// data
-		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
-			bitmap := datap.gcdatamask.bytedata
-			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
-			mask = make([]byte, n/goarch.PtrSize)
-			for i := uintptr(0); i < n; i += goarch.PtrSize {
-				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
-				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
-			}
-			return
-		}
-
-		// bss
-		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
-			bitmap := datap.gcbssmask.bytedata
-			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
-			mask = make([]byte, n/goarch.PtrSize)
-			for i := uintptr(0); i < n; i += goarch.PtrSize {
-				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
-				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
-			}
-			return
-		}
-	}
-
-	// heap
-	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
-		if s.spanclass.noscan() {
-			return nil
-		}
-		n := s.elemsize
-		hbits := heapBitsForAddr(base, n)
-		mask = make([]byte, n/goarch.PtrSize)
-		for {
-			var addr uintptr
-			if hbits, addr = hbits.next(); addr == 0 {
-				break
-			}
-			mask[(addr-base)/goarch.PtrSize] = 1
-		}
-		// Callers expect this mask to end at the last pointer.
-		for len(mask) > 0 && mask[len(mask)-1] == 0 {
-			mask = mask[:len(mask)-1]
-		}
-		return
-	}
-
-	// stack
-	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
-		found := false
-		var u unwinder
-		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
-			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
-				found = true
-				break
-			}
-		}
-		if found {
-			locals, _, _ := u.frame.getStackMap(nil, false)
-			if locals.n == 0 {
-				return
-			}
-			size := uintptr(locals.n) * goarch.PtrSize
-			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
-			mask = make([]byte, n/goarch.PtrSize)
-			for i := uintptr(0); i < n; i += goarch.PtrSize {
-				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
-				mask[i/goarch.PtrSize] = locals.ptrbit(off)
-			}
-		}
-		return
-	}
-
-	// otherwise, not something the GC knows about.
-	// possibly read-only data, like malloc(0).
-	// must not have pointers
-	return
-}
diff --git a/src/runtime/mbitmap_allocheaders.go b/src/runtime/mbitmap_allocheaders.go
new file mode 100644
index 0000000..1ec0553
--- /dev/null
+++ b/src/runtime/mbitmap_allocheaders.go
@@ -0,0 +1,1376 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.allocheaders
+
+// Garbage collector: type and heap bitmaps.
+//
+// Stack, data, and bss bitmaps
+//
+// Stack frames and global variables in the data and bss sections are
+// described by bitmaps with 1 bit per pointer-sized word. A "1" bit
+// means the word is a live pointer to be visited by the GC (referred to
+// as "pointer"). A "0" bit means the word should be ignored by GC
+// (referred to as "scalar", though it could be a dead pointer value).
+//
+// Heap bitmaps
+//
+// The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
+// recording whether a pointer is stored in that word or not. This bitmap
+// is stored at the end of a span for small objects and is unrolled at
+// runtime from type metadata for all larger objects. Objects without
+// pointers have neither a bitmap nor associated type metadata.
+//
+// Bits in all cases correspond to words in little-endian order.
+//
+// For small objects, if s is the mspan for the span starting at "start",
+// then s.heapBits() returns a slice containing the bitmap for the whole span.
+// That is, s.heapBits()[0] holds the goarch.PtrSize*8 bits for the first
+// goarch.PtrSize*8 words from "start" through "start+63*ptrSize" in the span.
+// On a related note, small objects are always small enough that their bitmap
+// fits in goarch.PtrSize*8 bits, so writing out bitmap data takes two bitmap
+// writes at most (because object boundaries don't generally lie on
+// s.heapBits()[i] boundaries).
+//
+// For larger objects, if t is the type for the object starting at "start",
+// within some span whose mspan is s, then the bitmap at t.GCData is "tiled"
+// from "start" through "start+s.elemsize".
+// Specifically, the first bit of t.GCData corresponds to the word at "start",
+// the second to the word after "start", and so on up to t.PtrBytes. At t.PtrBytes,
+// we skip to "start+t.Size_" and begin again from there. This process is
+// repeated until we hit "start+s.elemsize".
+// This tiling algorithm supports array data, since the type always refers to
+// the element type of the array. Single objects are considered the same as
+// single-element arrays.
+// The tiling algorithm may scan data past the end of the compiler-recognized
+// object, but any unused data within the allocation slot (i.e. within s.elemsize)
+// is zeroed, so the GC just observes nil pointers.
+// Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly.
+//
+// For objects without their own span, the type metadata is stored in the first
+// word before the object at the beginning of the allocation slot. For objects
+// with their own span, the type metadata is stored in the mspan.
+//
+// The bitmap for small unallocated objects in scannable spans is not maintained
+// (can be junk).
+
+package runtime
+
+import (
+	"internal/abi"
+	"internal/goarch"
+	"runtime/internal/sys"
+	"unsafe"
+)
+
+const (
+	// A malloc header is functionally a single type pointer, but
+	// we need to use 8 here to ensure 8-byte alignment of allocations
+	// on 32-bit platforms. It's wasteful, but a lot of code relies on
+	// 8-byte alignment for 8-byte atomics.
+	mallocHeaderSize = 8
+
+	// The minimum object size that has a malloc header, exclusive.
+	//
+	// The size of this value controls overheads from the malloc header.
+	// The minimum size is bound by writeHeapBitsSmall, which assumes that the
+	// pointer bitmap for objects of a size smaller than this doesn't cross
+	// more than one pointer-word boundary. This sets an upper-bound on this
+	// value at the number of bits in a uintptr, multiplied by the pointer
+	// size in bytes.
+	//
+	// We choose a value here that has a natural cutover point in terms of memory
+	// overheads. This value just happens to be the maximum possible value this
+	// can be.
+	//
+	// A span with heap bits in it will have 128 bytes of heap bits on 64-bit
+	// platforms, and 256 bytes of heap bits on 32-bit platforms. The first size
+	// class where malloc headers match this overhead for 64-bit platforms is
+	// 512 bytes (8 KiB / 512 bytes * 8 bytes-per-header = 128 bytes of overhead).
+	// On 32-bit platforms, this same point is the 256 byte size class
+	// (8 KiB / 256 bytes * 8 bytes-per-header = 256 bytes of overhead).
+	//
+	// Guaranteed to be exactly at a size class boundary. The reason this value is
+	// an exclusive minimum is subtle. Suppose we're allocating a 504-byte object
+	// and its rounded up to 512 bytes for the size class. If minSizeForMallocHeader
+	// is 512 and an inclusive minimum, then a comparison against minSizeForMallocHeader
+	// by the two values would produce different results. In other words, the comparison
+	// would not be invariant to size-class rounding. Eschewing this property means a
+	// more complex check or possibly storing additional state to determine whether a
+	// span has malloc headers.
+	minSizeForMallocHeader = goarch.PtrSize * ptrBits
+)
+
+// heapBitsInSpan returns true if the size of an object implies its ptr/scalar
+// data is stored at the end of the span, and is accessible via span.heapBits.
+//
+// Note: this works for both rounded-up sizes (span.elemsize) and unrounded
+// type sizes because minSizeForMallocHeader is guaranteed to be at a size
+// class boundary.
+//
+//go:nosplit
+func heapBitsInSpan(userSize uintptr) bool {
+	// N.B. minSizeForMallocHeader is an exclusive minimum so that this function is
+	// invariant under size-class rounding on its input.
+	return userSize <= minSizeForMallocHeader
+}
+
+// heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
+type heapArenaPtrScalar struct {
+	// N.B. This is no longer necessary with allocation headers.
+}
+
+// typePointers is an iterator over the pointers in a heap object.
+//
+// Iteration through this type implements the tiling algorithm described at the
+// top of this file.
+type typePointers struct {
+	// elem is the address of the current array element of type typ being iterated over.
+	// Objects that are not arrays are treated as single-element arrays, in which case
+	// this value does not change.
+	elem uintptr
+
+	// addr is the address the iterator is currently working from and describes
+	// the address of the first word referenced by mask.
+	addr uintptr
+
+	// mask is a bitmask where each bit corresponds to pointer-words after addr.
+	// Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on.
+	// If a bit is 1, then there is a pointer at that word.
+	// nextFast and next mask out bits in this mask as their pointers are processed.
+	mask uintptr
+
+	// typ is a pointer to the type information for the heap object's type.
+	// This may be nil if the object is in a span where heapBitsInSpan(span.elemsize) is true.
+	typ *_type
+}
+
+// typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
+//
+// addr and addr+size must be in the range [span.base(), span.limit).
+//
+// Note: addr+size must be passed as the limit argument to the iterator's next method on
+// each iteration. This slightly awkward API is to allow typePointers to be destructured
+// by the compiler.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
+	base := span.objBase(addr)
+	tp := span.typePointersOfUnchecked(base)
+	if base == addr && size == span.elemsize {
+		return tp
+	}
+	return tp.fastForward(addr-tp.addr, addr+size)
+}
+
+// typePointersOfUnchecked is like typePointersOf, but assumes addr is the base
+// of an allocation slot in a span (the start of the object if no header, the
+// header otherwise). It returns an iterator that generates all pointers
+// in the range [addr, addr+span.elemsize).
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
+	const doubleCheck = false
+	if doubleCheck && span.objBase(addr) != addr {
+		print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
+		throw("typePointersOfUnchecked consisting of non-base-address for object")
+	}
+
+	spc := span.spanclass
+	if spc.noscan() {
+		return typePointers{}
+	}
+	if heapBitsInSpan(span.elemsize) {
+		// Handle header-less objects.
+		return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
+	}
+
+	// All of these objects have a header.
+	var typ *_type
+	if spc.sizeclass() != 0 {
+		// Pull the allocation header from the first word of the object.
+		typ = *(**_type)(unsafe.Pointer(addr))
+		addr += mallocHeaderSize
+	} else {
+		typ = span.largeType
+	}
+	gcdata := typ.GCData
+	return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
+}
+
+// typePointersOfType is like typePointersOf, but assumes addr points to one or more
+// contiguous instances of the provided type. The provided type must not be nil and
+// it must not have its type metadata encoded as a gcprog.
+//
+// It returns an iterator that tiles typ.GCData starting from addr. It's the caller's
+// responsibility to limit iteration.
+//
+// nosplit because its callers are nosplit and require all their callees to be nosplit.
+//
+//go:nosplit
+func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
+	const doubleCheck = false
+	if doubleCheck && (typ == nil || typ.Kind_&kindGCProg != 0) {
+		throw("bad type passed to typePointersOfType")
+	}
+	if span.spanclass.noscan() {
+		return typePointers{}
+	}
+	// Since we have the type, pretend we have a header.
+	gcdata := typ.GCData
+	return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
+}
+
+// nextFast is the fast path of next. nextFast is written to be inlineable and,
+// as the name implies, fast.
+//
+// Callers that are performance-critical should iterate using the following
+// pattern:
+//
+//	for {
+//		var addr uintptr
+//		if tp, addr = tp.nextFast(); addr == 0 {
+//			if tp, addr = tp.next(limit); addr == 0 {
+//				break
+//			}
+//		}
+//		// Use addr.
+//		...
+//	}
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (tp typePointers) nextFast() (typePointers, uintptr) {
+	// TESTQ/JEQ
+	if tp.mask == 0 {
+		return tp, 0
+	}
+	// BSFQ
+	var i int
+	if goarch.PtrSize == 8 {
+		i = sys.TrailingZeros64(uint64(tp.mask))
+	} else {
+		i = sys.TrailingZeros32(uint32(tp.mask))
+	}
+	// BTCQ
+	tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
+	// LEAQ (XX)(XX*8)
+	return tp, tp.addr + uintptr(i)*goarch.PtrSize
+}
+
+// next advances the pointers iterator, returning the updated iterator and
+// the address of the next pointer.
+//
+// limit must be the same each time it is passed to next.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
+	for {
+		if tp.mask != 0 {
+			return tp.nextFast()
+		}
+
+		// Stop if we don't actually have type information.
+		if tp.typ == nil {
+			return typePointers{}, 0
+		}
+
+		// Advance to the next element if necessary.
+		if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
+			tp.elem += tp.typ.Size_
+			tp.addr = tp.elem
+		} else {
+			tp.addr += ptrBits * goarch.PtrSize
+		}
+
+		// Check if we've exceeded the limit with the last update.
+		if tp.addr >= limit {
+			return typePointers{}, 0
+		}
+
+		// Grab more bits and try again.
+		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
+		if tp.addr+goarch.PtrSize*ptrBits > limit {
+			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
+			tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
+		}
+	}
+}
+
+// fastForward moves the iterator forward by n bytes. n must be a multiple
+// of goarch.PtrSize. limit must be the same limit passed to next for this
+// iterator.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (tp typePointers) fastForward(n, limit uintptr) typePointers {
+	// Basic bounds check.
+	target := tp.addr + n
+	if target >= limit {
+		return typePointers{}
+	}
+	if tp.typ == nil {
+		// Handle small objects.
+		// Clear any bits before the target address.
+		tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
+		// Clear any bits past the limit.
+		if tp.addr+goarch.PtrSize*ptrBits > limit {
+			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
+			tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
+		}
+		return tp
+	}
+
+	// Move up elem and addr.
+	// Offsets within an element are always at a ptrBits*goarch.PtrSize boundary.
+	if n >= tp.typ.Size_ {
+		// elem needs to be moved to the element containing
+		// tp.addr + n.
+		oldelem := tp.elem
+		tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
+		tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
+	} else {
+		tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
+	}
+
+	if tp.addr-tp.elem >= tp.typ.PtrBytes {
+		// We're starting in the non-pointer area of an array.
+		// Move up to the next element.
+		tp.elem += tp.typ.Size_
+		tp.addr = tp.elem
+		tp.mask = readUintptr(tp.typ.GCData)
+
+		// We may have exceeded the limit after this. Bail just like next does.
+		if tp.addr >= limit {
+			return typePointers{}
+		}
+	} else {
+		// Grab the mask, but then clear any bits before the target address and any
+		// bits over the limit.
+		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
+		tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
+	}
+	if tp.addr+goarch.PtrSize*ptrBits > limit {
+		bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
+		tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
+	}
+	return tp
+}
+
+// objBase returns the base pointer for the object containing addr in span.
+//
+// Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
+//
+//go:nosplit
+func (span *mspan) objBase(addr uintptr) uintptr {
+	return span.base() + span.objIndex(addr)*span.elemsize
+}
+
+// bulkBarrierPreWrite executes a write barrier
+// for every pointer slot in the memory range [src, src+size),
+// using pointer/scalar information from [dst, dst+size).
+// This executes the write barriers necessary before a memmove.
+// src, dst, and size must be pointer-aligned.
+// The range [dst, dst+size) must lie within a single object.
+// It does not perform the actual writes.
+//
+// As a special case, src == 0 indicates that this is being used for a
+// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
+// barrier.
+//
+// Callers should call bulkBarrierPreWrite immediately before
+// calling memmove(dst, src, size). This function is marked nosplit
+// to avoid being preempted; the GC must not stop the goroutine
+// between the memmove and the execution of the barriers.
+// The caller is also responsible for cgo pointer checks if this
+// may be writing Go pointers into non-Go memory.
+//
+// Pointer data is not maintained for allocations containing
+// no pointers at all; any caller of bulkBarrierPreWrite must first
+// make sure the underlying allocation contains pointers, usually
+// by checking typ.PtrBytes.
+//
+// The typ argument is the type of the space at src and dst (and the
+// element type if src and dst refer to arrays) and it is optional.
+// If typ is nil, the barrier will still behave as expected and typ
+// is used purely as an optimization. However, it must be used with
+// care.
+//
+// If typ is not nil, then src and dst must point to one or more values
+// of type typ. The caller must ensure that the ranges [src, src+size)
+// and [dst, dst+size) refer to one or more whole values of type src and
+// dst (leaving off the pointerless tail of the space is OK). If this
+// precondition is not followed, this function will fail to scan the
+// right pointers.
+//
+// When in doubt, pass nil for typ. That is safe and will always work.
+//
+// Callers must perform cgo checks if goexperiment.CgoCheck2.
+//
+//go:nosplit
+func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
+	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
+		throw("bulkBarrierPreWrite: unaligned arguments")
+	}
+	if !writeBarrier.enabled {
+		return
+	}
+	s := spanOf(dst)
+	if s == nil {
+		// If dst is a global, use the data or BSS bitmaps to
+		// execute write barriers.
+		for _, datap := range activeModules() {
+			if datap.data <= dst && dst < datap.edata {
+				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
+				return
+			}
+		}
+		for _, datap := range activeModules() {
+			if datap.bss <= dst && dst < datap.ebss {
+				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
+				return
+			}
+		}
+		return
+	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
+		// dst was heap memory at some point, but isn't now.
+		// It can't be a global. It must be either our stack,
+		// or in the case of direct channel sends, it could be
+		// another stack. Either way, no need for barriers.
+		// This will also catch if dst is in a freed span,
+		// though that should never have.
+		return
+	}
+	buf := &getg().m.p.ptr().wbBuf
+
+	// Double-check that the bitmaps generated in the two possible paths match.
+	const doubleCheck = false
+	if doubleCheck {
+		doubleCheckTypePointersOfType(s, typ, dst, size)
+	}
+
+	var tp typePointers
+	if typ != nil && typ.Kind_&kindGCProg == 0 {
+		tp = s.typePointersOfType(typ, dst)
+	} else {
+		tp = s.typePointersOf(dst, size)
+	}
+	if src == 0 {
+		for {
+			var addr uintptr
+			if tp, addr = tp.next(dst + size); addr == 0 {
+				break
+			}
+			dstx := (*uintptr)(unsafe.Pointer(addr))
+			p := buf.get1()
+			p[0] = *dstx
+		}
+	} else {
+		for {
+			var addr uintptr
+			if tp, addr = tp.next(dst + size); addr == 0 {
+				break
+			}
+			dstx := (*uintptr)(unsafe.Pointer(addr))
+			srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
+			p := buf.get2()
+			p[0] = *dstx
+			p[1] = *srcx
+		}
+	}
+}
+
+// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
+// does not execute write barriers for [dst, dst+size).
+//
+// In addition to the requirements of bulkBarrierPreWrite
+// callers need to ensure [dst, dst+size) is zeroed.
+//
+// This is used for special cases where e.g. dst was just
+// created and zeroed with malloc.
+//
+// The type of the space can be provided purely as an optimization.
+// See bulkBarrierPreWrite's comment for more details -- use this
+// optimization with great care.
+//
+//go:nosplit
+func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
+	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
+		throw("bulkBarrierPreWrite: unaligned arguments")
+	}
+	if !writeBarrier.enabled {
+		return
+	}
+	buf := &getg().m.p.ptr().wbBuf
+	s := spanOf(dst)
+
+	// Double-check that the bitmaps generated in the two possible paths match.
+	const doubleCheck = false
+	if doubleCheck {
+		doubleCheckTypePointersOfType(s, typ, dst, size)
+	}
+
+	var tp typePointers
+	if typ != nil && typ.Kind_&kindGCProg == 0 {
+		tp = s.typePointersOfType(typ, dst)
+	} else {
+		tp = s.typePointersOf(dst, size)
+	}
+	for {
+		var addr uintptr
+		if tp, addr = tp.next(dst + size); addr == 0 {
+			break
+		}
+		srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
+		p := buf.get1()
+		p[0] = *srcx
+	}
+}
+
+// initHeapBits initializes the heap bitmap for a span.
+//
+// TODO(mknyszek): This should set the heap bits for single pointer
+// allocations eagerly to avoid calling heapSetType at allocation time,
+// just to write one bit.
+func (s *mspan) initHeapBits(forceClear bool) {
+	if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
+		b := s.heapBits()
+		for i := range b {
+			b[i] = 0
+		}
+	}
+}
+
+// bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms,
+// and leaves it alone elsewhere.
+func bswapIfBigEndian(x uintptr) uintptr {
+	if goarch.BigEndian {
+		if goarch.PtrSize == 8 {
+			return uintptr(sys.Bswap64(uint64(x)))
+		}
+		return uintptr(sys.Bswap32(uint32(x)))
+	}
+	return x
+}
+
+type writeUserArenaHeapBits struct {
+	offset uintptr // offset in span that the low bit of mask represents the pointer state of.
+	mask   uintptr // some pointer bits starting at the address addr.
+	valid  uintptr // number of bits in buf that are valid (including low)
+	low    uintptr // number of low-order bits to not overwrite
+}
+
+func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits) {
+	offset := addr - s.base()
+
+	// We start writing bits maybe in the middle of a heap bitmap word.
+	// Remember how many bits into the word we started, so we can be sure
+	// not to overwrite the previous bits.
+	h.low = offset / goarch.PtrSize % ptrBits
+
+	// round down to heap word that starts the bitmap word.
+	h.offset = offset - h.low*goarch.PtrSize
+
+	// We don't have any bits yet.
+	h.mask = 0
+	h.valid = h.low
+
+	return
+}
+
+// write appends the pointerness of the next valid pointer slots
+// using the low valid bits of bits. 1=pointer, 0=scalar.
+func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits {
+	if h.valid+valid <= ptrBits {
+		// Fast path - just accumulate the bits.
+		h.mask |= bits << h.valid
+		h.valid += valid
+		return h
+	}
+	// Too many bits to fit in this word. Write the current word
+	// out and move on to the next word.
+
+	data := h.mask | bits<<h.valid       // mask for this word
+	h.mask = bits >> (ptrBits - h.valid) // leftover for next word
+	h.valid += valid - ptrBits           // have h.valid+valid bits, writing ptrBits of them
+
+	// Flush mask to the memory bitmap.
+	idx := h.offset / (ptrBits * goarch.PtrSize)
+	m := uintptr(1)<<h.low - 1
+	bitmap := s.heapBits()
+	bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | data)
+	// Note: no synchronization required for this write because
+	// the allocator has exclusive access to the page, and the bitmap
+	// entries are all for a single page. Also, visibility of these
+	// writes is guaranteed by the publication barrier in mallocgc.
+
+	// Move to next word of bitmap.
+	h.offset += ptrBits * goarch.PtrSize
+	h.low = 0
+	return h
+}
+
+// Add padding of size bytes.
+func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits {
+	if size == 0 {
+		return h
+	}
+	words := size / goarch.PtrSize
+	for words > ptrBits {
+		h = h.write(s, 0, ptrBits)
+		words -= ptrBits
+	}
+	return h.write(s, 0, words)
+}
+
+// Flush the bits that have been written, and add zeros as needed
+// to cover the full object [addr, addr+size).
+func (h writeUserArenaHeapBits) flush(s *mspan, addr, size uintptr) {
+	offset := addr - s.base()
+
+	// zeros counts the number of bits needed to represent the object minus the
+	// number of bits we've already written. This is the number of 0 bits
+	// that need to be added.
+	zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
+
+	// Add zero bits up to the bitmap word boundary
+	if zeros > 0 {
+		z := ptrBits - h.valid
+		if z > zeros {
+			z = zeros
+		}
+		h.valid += z
+		zeros -= z
+	}
+
+	// Find word in bitmap that we're going to write.
+	bitmap := s.heapBits()
+	idx := h.offset / (ptrBits * goarch.PtrSize)
+
+	// Write remaining bits.
+	if h.valid != h.low {
+		m := uintptr(1)<<h.low - 1      // don't clear existing bits below "low"
+		m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
+		bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | h.mask)
+	}
+	if zeros == 0 {
+		return
+	}
+
+	// Advance to next bitmap word.
+	h.offset += ptrBits * goarch.PtrSize
+
+	// Continue on writing zeros for the rest of the object.
+	// For standard use of the ptr bits this is not required, as
+	// the bits are read from the beginning of the object. Some uses,
+	// like noscan spans, oblets, bulk write barriers, and cgocheck, might
+	// start mid-object, so these writes are still required.
+	for {
+		// Write zero bits.
+		idx := h.offset / (ptrBits * goarch.PtrSize)
+		if zeros < ptrBits {
+			bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx]) &^ (uintptr(1)<<zeros - 1))
+			break
+		} else if zeros == ptrBits {
+			bitmap[idx] = 0
+			break
+		} else {
+			bitmap[idx] = 0
+			zeros -= ptrBits
+		}
+		h.offset += ptrBits * goarch.PtrSize
+	}
+}
+
+// heapBits returns the heap ptr/scalar bits stored at the end of the span for
+// small object spans and heap arena spans.
+//
+// Note that the uintptr of each element means something different for small object
+// spans and for heap arena spans. Small object spans are easy: they're never interpreted
+// as anything but uintptr, so they're immune to differences in endianness. However, the
+// heapBits for user arena spans is exposed through a dummy type descriptor, so the byte
+// ordering needs to match the same byte ordering the compiler would emit. The compiler always
+// emits the bitmap data in little endian byte ordering, so on big endian platforms these
+// uintptrs will have their byte orders swapped from what they normally would be.
+//
+// heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
+//
+//go:nosplit
+func (span *mspan) heapBits() []uintptr {
+	const doubleCheck = false
+
+	if doubleCheck && !span.isUserArenaChunk {
+		if span.spanclass.noscan() {
+			throw("heapBits called for noscan")
+		}
+		if span.elemsize > minSizeForMallocHeader {
+			throw("heapBits called for span class that should have a malloc header")
+		}
+	}
+	// Find the bitmap at the end of the span.
+	//
+	// Nearly every span with heap bits is exactly one page in size. Arenas are the only exception.
+	if span.npages == 1 {
+		// This will be inlined and constant-folded down.
+		return heapBitsSlice(span.base(), pageSize)
+	}
+	return heapBitsSlice(span.base(), span.npages*pageSize)
+}
+
+// Helper for constructing a slice for the span's heap bits.
+//
+//go:nosplit
+func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
+	bitmapSize := spanSize / goarch.PtrSize / 8
+	elems := int(bitmapSize / goarch.PtrSize)
+	var sl notInHeapSlice
+	sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
+	return *(*[]uintptr)(unsafe.Pointer(&sl))
+}
+
+// heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
+//
+// addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
+// must be true.
+//
+//go:nosplit
+func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
+	spanSize := span.npages * pageSize
+	bitmapSize := spanSize / goarch.PtrSize / 8
+	hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
+
+	// These objects are always small enough that their bitmaps
+	// fit in a single word, so just load the word or two we need.
+	//
+	// Mirrors mspan.writeHeapBitsSmall.
+	//
+	// We should be using heapBits(), but unfortunately it introduces
+	// both bounds checks panics and throw which causes us to exceed
+	// the nosplit limit in quite a few cases.
+	i := (addr - span.base()) / goarch.PtrSize / ptrBits
+	j := (addr - span.base()) / goarch.PtrSize % ptrBits
+	bits := span.elemsize / goarch.PtrSize
+	word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
+	word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
+
+	var read uintptr
+	if j+bits > ptrBits {
+		// Two reads.
+		bits0 := ptrBits - j
+		bits1 := bits - bits0
+		read = *word0 >> j
+		read |= (*word1 & ((1 << bits1) - 1)) << bits0
+	} else {
+		// One read.
+		read = (*word0 >> j) & ((1 << bits) - 1)
+	}
+	return read
+}
+
+// writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is
+// stored as a bitmap at the end of the span.
+//
+// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
+// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
+//
+//go:nosplit
+func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
+	// The objects here are always really small, so a single load is sufficient.
+	src0 := readUintptr(typ.GCData)
+
+	// Create repetitions of the bitmap if we have a small array.
+	bits := span.elemsize / goarch.PtrSize
+	scanSize = typ.PtrBytes
+	src := src0
+	switch typ.Size_ {
+	case goarch.PtrSize:
+		src = (1 << (dataSize / goarch.PtrSize)) - 1
+	default:
+		for i := typ.Size_; i < dataSize; i += typ.Size_ {
+			src |= src0 << (i / goarch.PtrSize)
+			scanSize += typ.Size_
+		}
+	}
+
+	// Since we're never writing more than one uintptr's worth of bits, we're either going
+	// to do one or two writes.
+	dst := span.heapBits()
+	o := (x - span.base()) / goarch.PtrSize
+	i := o / ptrBits
+	j := o % ptrBits
+	if j+bits > ptrBits {
+		// Two writes.
+		bits0 := ptrBits - j
+		bits1 := bits - bits0
+		dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j)
+		dst[i+1] = dst[i+1]&^((1<<bits1)-1) | (src >> bits0)
+	} else {
+		// One write.
+		dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j)
+	}
+
+	const doubleCheck = false
+	if doubleCheck {
+		srcRead := span.heapBitsSmallForAddr(x)
+		if srcRead != src {
+			print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
+			print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
+			print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
+			throw("bad pointer bits written for small object")
+		}
+	}
+	return
+}
+
+// For !goexperiment.AllocHeaders.
+func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
+}
+
+// heapSetType records that the new allocation [x, x+size)
+// holds in [x, x+dataSize) one or more values of type typ.
+// (The number of values is given by dataSize / typ.Size.)
+// If dataSize < size, the fragment [x+dataSize, x+size) is
+// recorded as non-pointer data.
+// It is known that the type has pointers somewhere;
+// malloc does not call heapSetType when there are no pointers.
+//
+// There can be read-write races between heapSetType and things
+// that read the heap metadata like scanobject. However, since
+// heapSetType is only used for objects that have not yet been
+// made reachable, readers will ignore bits being modified by this
+// function. This does mean this function cannot transiently modify
+// shared memory that belongs to neighboring objects. Also, on weakly-ordered
+// machines, callers must execute a store/store (publication) barrier
+// between calling this function and making the object reachable.
+func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
+	const doubleCheck = false
+
+	gctyp := typ
+	if header == nil {
+		if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
+			throw("tried to write heap bits, but no heap bits in span")
+		}
+		// Handle the case where we have no malloc header.
+		scanSize = span.writeHeapBitsSmall(x, dataSize, typ)
+	} else {
+		if typ.Kind_&kindGCProg != 0 {
+			// Allocate space to unroll the gcprog. This space will consist of
+			// a dummy _type value and the unrolled gcprog. The dummy _type will
+			// refer to the bitmap, and the mspan will refer to the dummy _type.
+			if span.spanclass.sizeclass() != 0 {
+				throw("GCProg for type that isn't large")
+			}
+			spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
+			heapBitsOff := spaceNeeded
+			spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
+			npages := alignUp(spaceNeeded, pageSize) / pageSize
+			var progSpan *mspan
+			systemstack(func() {
+				progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
+				memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
+			})
+			// Write a dummy _type in the new space.
+			//
+			// We only need to write size, PtrBytes, and GCData, since that's all
+			// the GC cares about.
+			gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
+			gctyp.Size_ = typ.Size_
+			gctyp.PtrBytes = typ.PtrBytes
+			gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
+			gctyp.TFlag = abi.TFlagUnrolledBitmap
+
+			// Expand the GC program into space reserved at the end of the new span.
+			runGCProg(addb(typ.GCData, 4), gctyp.GCData)
+		}
+
+		// Write out the header.
+		*header = gctyp
+		scanSize = span.elemsize
+	}
+
+	if doubleCheck {
+		doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
+
+		// To exercise the less common path more often, generate
+		// a random interior pointer and make sure iterating from
+		// that point works correctly too.
+		maxIterBytes := span.elemsize
+		if header == nil {
+			maxIterBytes = dataSize
+		}
+		off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
+		size := dataSize - off
+		if size == 0 {
+			off -= goarch.PtrSize
+			size += goarch.PtrSize
+		}
+		interior := x + off
+		size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
+		if size == 0 {
+			size = goarch.PtrSize
+		}
+		// Round up the type to the size of the type.
+		size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
+		if interior+size > x+maxIterBytes {
+			size = x + maxIterBytes - interior
+		}
+		doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
+	}
+	return
+}
+
+func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
+	// Check that scanning the full object works.
+	tp := span.typePointersOfUnchecked(span.objBase(x))
+	maxIterBytes := span.elemsize
+	if header == nil {
+		maxIterBytes = dataSize
+	}
+	bad := false
+	for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
+		// Compute the pointer bit we want at offset i.
+		want := false
+		if i < span.elemsize {
+			off := i % typ.Size_
+			if off < typ.PtrBytes {
+				j := off / goarch.PtrSize
+				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
+			}
+		}
+		if want {
+			var addr uintptr
+			tp, addr = tp.next(x + span.elemsize)
+			if addr == 0 {
+				println("runtime: found bad iterator")
+			}
+			if addr != x+i {
+				print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
+				bad = true
+			}
+		}
+	}
+	if !bad {
+		var addr uintptr
+		tp, addr = tp.next(x + span.elemsize)
+		if addr == 0 {
+			return
+		}
+		println("runtime: extra pointer:", hex(addr))
+	}
+	print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&kindGCProg != 0, "\n")
+	print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
+	print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
+	print("runtime: limit=", hex(x+span.elemsize), "\n")
+	tp = span.typePointersOfUnchecked(x)
+	dumpTypePointers(tp)
+	for {
+		var addr uintptr
+		if tp, addr = tp.next(x + span.elemsize); addr == 0 {
+			println("runtime: would've stopped here")
+			dumpTypePointers(tp)
+			break
+		}
+		print("runtime: addr=", hex(addr), "\n")
+		dumpTypePointers(tp)
+	}
+	throw("heapSetType: pointer entry not correct")
+}
+
+func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
+	bad := false
+	if interior < x {
+		print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
+		throw("found bad interior pointer")
+	}
+	off := interior - x
+	tp := span.typePointersOf(interior, size)
+	for i := off; i < off+size; i += goarch.PtrSize {
+		// Compute the pointer bit we want at offset i.
+		want := false
+		if i < span.elemsize {
+			off := i % typ.Size_
+			if off < typ.PtrBytes {
+				j := off / goarch.PtrSize
+				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
+			}
+		}
+		if want {
+			var addr uintptr
+			tp, addr = tp.next(interior + size)
+			if addr == 0 {
+				println("runtime: found bad iterator")
+				bad = true
+			}
+			if addr != x+i {
+				print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
+				bad = true
+			}
+		}
+	}
+	if !bad {
+		var addr uintptr
+		tp, addr = tp.next(interior + size)
+		if addr == 0 {
+			return
+		}
+		println("runtime: extra pointer:", hex(addr))
+	}
+	print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
+	print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
+	print("runtime: limit=", hex(interior+size), "\n")
+	tp = span.typePointersOf(interior, size)
+	dumpTypePointers(tp)
+	for {
+		var addr uintptr
+		if tp, addr = tp.next(interior + size); addr == 0 {
+			println("runtime: would've stopped here")
+			dumpTypePointers(tp)
+			break
+		}
+		print("runtime: addr=", hex(addr), "\n")
+		dumpTypePointers(tp)
+	}
+
+	print("runtime: want: ")
+	for i := off; i < off+size; i += goarch.PtrSize {
+		// Compute the pointer bit we want at offset i.
+		want := false
+		if i < dataSize {
+			off := i % typ.Size_
+			if off < typ.PtrBytes {
+				j := off / goarch.PtrSize
+				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
+			}
+		}
+		if want {
+			print("1")
+		} else {
+			print("0")
+		}
+	}
+	println()
+
+	throw("heapSetType: pointer entry not correct")
+}
+
+//go:nosplit
+func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
+	if typ == nil || typ.Kind_&kindGCProg != 0 {
+		return
+	}
+	if typ.Kind_&kindMask == kindInterface {
+		// Interfaces are unfortunately inconsistently handled
+		// when it comes to the type pointer, so it's easy to
+		// produce a lot of false positives here.
+		return
+	}
+	tp0 := s.typePointersOfType(typ, addr)
+	tp1 := s.typePointersOf(addr, size)
+	failed := false
+	for {
+		var addr0, addr1 uintptr
+		tp0, addr0 = tp0.next(addr + size)
+		tp1, addr1 = tp1.next(addr + size)
+		if addr0 != addr1 {
+			failed = true
+			break
+		}
+		if addr0 == 0 {
+			break
+		}
+	}
+	if failed {
+		tp0 := s.typePointersOfType(typ, addr)
+		tp1 := s.typePointersOf(addr, size)
+		print("runtime: addr=", hex(addr), " size=", size, "\n")
+		print("runtime: type=", toRType(typ).string(), "\n")
+		dumpTypePointers(tp0)
+		dumpTypePointers(tp1)
+		for {
+			var addr0, addr1 uintptr
+			tp0, addr0 = tp0.next(addr + size)
+			tp1, addr1 = tp1.next(addr + size)
+			print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
+			if addr0 == 0 && addr1 == 0 {
+				break
+			}
+		}
+		throw("mismatch between typePointersOfType and typePointersOf")
+	}
+}
+
+func dumpTypePointers(tp typePointers) {
+	print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
+	print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
+	for i := uintptr(0); i < ptrBits; i++ {
+		if tp.mask&(uintptr(1)<<i) != 0 {
+			print("1")
+		} else {
+			print("0")
+		}
+	}
+	println()
+}
+
+// Testing.
+
+// Returns GC type info for the pointer stored in ep for testing.
+// If ep points to the stack, only static live information will be returned
+// (i.e. not for objects which are only dynamically live stack objects).
+func getgcmask(ep any) (mask []byte) {
+	e := *efaceOf(&ep)
+	p := e.data
+	t := e._type
+
+	var et *_type
+	if t.Kind_&kindMask != kindPtr {
+		throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
+	}
+	et = (*ptrtype)(unsafe.Pointer(t)).Elem
+
+	// data or bss
+	for _, datap := range activeModules() {
+		// data
+		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
+			bitmap := datap.gcdatamask.bytedata
+			n := et.Size_
+			mask = make([]byte, n/goarch.PtrSize)
+			for i := uintptr(0); i < n; i += goarch.PtrSize {
+				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+			}
+			return
+		}
+
+		// bss
+		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
+			bitmap := datap.gcbssmask.bytedata
+			n := et.Size_
+			mask = make([]byte, n/goarch.PtrSize)
+			for i := uintptr(0); i < n; i += goarch.PtrSize {
+				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+			}
+			return
+		}
+	}
+
+	// heap
+	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
+		if s.spanclass.noscan() {
+			return nil
+		}
+		limit := base + s.elemsize
+
+		// Move the base up to the iterator's start, because
+		// we want to hide evidence of a malloc header from the
+		// caller.
+		tp := s.typePointersOfUnchecked(base)
+		base = tp.addr
+
+		// Unroll the full bitmap the GC would actually observe.
+		maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
+		for {
+			var addr uintptr
+			if tp, addr = tp.next(limit); addr == 0 {
+				break
+			}
+			maskFromHeap[(addr-base)/goarch.PtrSize] = 1
+		}
+
+		// Double-check that every part of the ptr/scalar we're not
+		// showing the caller is zeroed. This keeps us honest that
+		// that information is actually irrelevant.
+		for i := limit; i < s.elemsize; i++ {
+			if *(*byte)(unsafe.Pointer(i)) != 0 {
+				throw("found non-zeroed tail of allocation")
+			}
+		}
+
+		// Callers (and a check we're about to run) expects this mask
+		// to end at the last pointer.
+		for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
+			maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
+		}
+
+		if et.Kind_&kindGCProg == 0 {
+			// Unroll again, but this time from the type information.
+			maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
+			tp = s.typePointersOfType(et, base)
+			for {
+				var addr uintptr
+				if tp, addr = tp.next(limit); addr == 0 {
+					break
+				}
+				maskFromType[(addr-base)/goarch.PtrSize] = 1
+			}
+
+			// Validate that the prefix of maskFromType is equal to
+			// maskFromHeap. maskFromType may contain more pointers than
+			// maskFromHeap produces because maskFromHeap may be able to
+			// get exact type information for certain classes of objects.
+			// With maskFromType, we're always just tiling the type bitmap
+			// through to the elemsize.
+			//
+			// It's OK if maskFromType has pointers in elemsize that extend
+			// past the actual populated space; we checked above that all
+			// that space is zeroed, so just the GC will just see nil pointers.
+			differs := false
+			for i := range maskFromHeap {
+				if maskFromHeap[i] != maskFromType[i] {
+					differs = true
+					break
+				}
+			}
+
+			if differs {
+				print("runtime: heap mask=")
+				for _, b := range maskFromHeap {
+					print(b)
+				}
+				println()
+				print("runtime: type mask=")
+				for _, b := range maskFromType {
+					print(b)
+				}
+				println()
+				print("runtime: type=", toRType(et).string(), "\n")
+				throw("found two different masks from two different methods")
+			}
+		}
+
+		// Select the heap mask to return. We may not have a type mask.
+		mask = maskFromHeap
+
+		// Make sure we keep ep alive. We may have stopped referencing
+		// ep's data pointer sometime before this point and it's possible
+		// for that memory to get freed.
+		KeepAlive(ep)
+		return
+	}
+
+	// stack
+	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
+		found := false
+		var u unwinder
+		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
+			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
+				found = true
+				break
+			}
+		}
+		if found {
+			locals, _, _ := u.frame.getStackMap(false)
+			if locals.n == 0 {
+				return
+			}
+			size := uintptr(locals.n) * goarch.PtrSize
+			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
+			mask = make([]byte, n/goarch.PtrSize)
+			for i := uintptr(0); i < n; i += goarch.PtrSize {
+				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
+				mask[i/goarch.PtrSize] = locals.ptrbit(off)
+			}
+		}
+		return
+	}
+
+	// otherwise, not something the GC knows about.
+	// possibly read-only data, like malloc(0).
+	// must not have pointers
+	return
+}
+
+// userArenaHeapBitsSetType is the equivalent of heapSetType but for
+// non-slice-backing-store Go values allocated in a user arena chunk. It
+// sets up the type metadata for the value with type typ allocated at address ptr.
+// base is the base address of the arena chunk.
+func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
+	base := s.base()
+	h := s.writeUserArenaHeapBits(uintptr(ptr))
+
+	p := typ.GCData // start of 1-bit pointer mask (or GC program)
+	var gcProgBits uintptr
+	if typ.Kind_&kindGCProg != 0 {
+		// Expand gc program, using the object itself for storage.
+		gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
+		p = (*byte)(ptr)
+	}
+	nb := typ.PtrBytes / goarch.PtrSize
+
+	for i := uintptr(0); i < nb; i += ptrBits {
+		k := nb - i
+		if k > ptrBits {
+			k = ptrBits
+		}
+		// N.B. On big endian platforms we byte swap the data that we
+		// read from GCData, which is always stored in little-endian order
+		// by the compiler. writeUserArenaHeapBits handles data in
+		// a platform-ordered way for efficiency, but stores back the
+		// data in little endian order, since we expose the bitmap through
+		// a dummy type.
+		h = h.write(s, readUintptr(addb(p, i/8)), k)
+	}
+	// Note: we call pad here to ensure we emit explicit 0 bits
+	// for the pointerless tail of the object. This ensures that
+	// there's only a single noMorePtrs mark for the next object
+	// to clear. We don't need to do this to clear stale noMorePtrs
+	// markers from previous uses because arena chunk pointer bitmaps
+	// are always fully cleared when reused.
+	h = h.pad(s, typ.Size_-typ.PtrBytes)
+	h.flush(s, uintptr(ptr), typ.Size_)
+
+	if typ.Kind_&kindGCProg != 0 {
+		// Zero out temporary ptrmask buffer inside object.
+		memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
+	}
+
+	// Update the PtrBytes value in the type information. After this
+	// point, the GC will observe the new bitmap.
+	s.largeType.PtrBytes = uintptr(ptr) - base + typ.PtrBytes
+
+	// Double-check that the bitmap was written out correctly.
+	const doubleCheck = false
+	if doubleCheck {
+		doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s)
+	}
+}
+
+// For !goexperiment.AllocHeaders, to pass TestIntendedInlining.
+func writeHeapBitsForAddr() {
+	panic("not implemented")
+}
+
+// For !goexperiment.AllocHeaders.
+type heapBits struct {
+}
+
+// For !goexperiment.AllocHeaders.
+//
+//go:nosplit
+func heapBitsForAddr(addr, size uintptr) heapBits {
+	panic("not implemented")
+}
+
+// For !goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (h heapBits) next() (heapBits, uintptr) {
+	panic("not implemented")
+}
+
+// For !goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (h heapBits) nextFast() (heapBits, uintptr) {
+	panic("not implemented")
+}
diff --git a/src/runtime/mbitmap_noallocheaders.go b/src/runtime/mbitmap_noallocheaders.go
new file mode 100644
index 0000000..383993a
--- /dev/null
+++ b/src/runtime/mbitmap_noallocheaders.go
@@ -0,0 +1,938 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.allocheaders
+
+// Garbage collector: type and heap bitmaps.
+//
+// Stack, data, and bss bitmaps
+//
+// Stack frames and global variables in the data and bss sections are
+// described by bitmaps with 1 bit per pointer-sized word. A "1" bit
+// means the word is a live pointer to be visited by the GC (referred to
+// as "pointer"). A "0" bit means the word should be ignored by GC
+// (referred to as "scalar", though it could be a dead pointer value).
+//
+// Heap bitmap
+//
+// The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
+// recording whether a pointer is stored in that word or not. This bitmap
+// is stored in the heapArena metadata backing each heap arena.
+// That is, if ha is the heapArena for the arena starting at "start",
+// then ha.bitmap[0] holds the 64 bits for the 64 words "start"
+// through start+63*ptrSize, ha.bitmap[1] holds the entries for
+// start+64*ptrSize through start+127*ptrSize, and so on.
+// Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
+// the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
+// (For 32-bit platforms, s/64/32/.)
+//
+// We also keep a noMorePtrs bitmap which allows us to stop scanning
+// the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
+// is 1, then the object containing the last word described by ha.bitmap[8*i+j]
+// has no more pointers beyond those described by ha.bitmap[8*i+j].
+// If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
+// beyond must all be zero until the start of the next object.
+//
+// The bitmap for noscan spans is set to all zero at span allocation time.
+//
+// The bitmap for unallocated objects in scannable spans is not maintained
+// (can be junk).
+
+package runtime
+
+import (
+	"internal/abi"
+	"internal/goarch"
+	"runtime/internal/sys"
+	"unsafe"
+)
+
+const (
+	// For compatibility with the allocheaders GOEXPERIMENT.
+	mallocHeaderSize       = 0
+	minSizeForMallocHeader = ^uintptr(0)
+)
+
+// For compatibility with the allocheaders GOEXPERIMENT.
+//
+//go:nosplit
+func heapBitsInSpan(_ uintptr) bool {
+	return false
+}
+
+// heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
+type heapArenaPtrScalar struct {
+	// bitmap stores the pointer/scalar bitmap for the words in
+	// this arena. See mbitmap.go for a description.
+	// This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit).
+	bitmap [heapArenaBitmapWords]uintptr
+
+	// If the ith bit of noMorePtrs is true, then there are no more
+	// pointers for the object containing the word described by the
+	// high bit of bitmap[i].
+	// In that case, bitmap[i+1], ... must be zero until the start
+	// of the next object.
+	// We never operate on these entries using bit-parallel techniques,
+	// so it is ok if they are small. Also, they can't be bigger than
+	// uint16 because at that size a single noMorePtrs entry
+	// represents 8K of memory, the minimum size of a span. Any larger
+	// and we'd have to worry about concurrent updates.
+	// This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit).
+	noMorePtrs [heapArenaBitmapWords / 8]uint8
+}
+
+// heapBits provides access to the bitmap bits for a single heap word.
+// The methods on heapBits take value receivers so that the compiler
+// can more easily inline calls to those methods and registerize the
+// struct fields independently.
+type heapBits struct {
+	// heapBits will report on pointers in the range [addr,addr+size).
+	// The low bit of mask contains the pointerness of the word at addr
+	// (assuming valid>0).
+	addr, size uintptr
+
+	// The next few pointer bits representing words starting at addr.
+	// Those bits already returned by next() are zeroed.
+	mask uintptr
+	// Number of bits in mask that are valid. mask is always less than 1<<valid.
+	valid uintptr
+}
+
+// heapBitsForAddr returns the heapBits for the address addr.
+// The caller must ensure [addr,addr+size) is in an allocated span.
+// In particular, be careful not to point past the end of an object.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func heapBitsForAddr(addr, size uintptr) heapBits {
+	// Find arena
+	ai := arenaIndex(addr)
+	ha := mheap_.arenas[ai.l1()][ai.l2()]
+
+	// Word index in arena.
+	word := addr / goarch.PtrSize % heapArenaWords
+
+	// Word index and bit offset in bitmap array.
+	idx := word / ptrBits
+	off := word % ptrBits
+
+	// Grab relevant bits of bitmap.
+	mask := ha.bitmap[idx] >> off
+	valid := ptrBits - off
+
+	// Process depending on where the object ends.
+	nptr := size / goarch.PtrSize
+	if nptr < valid {
+		// Bits for this object end before the end of this bitmap word.
+		// Squash bits for the following objects.
+		mask &= 1<<(nptr&(ptrBits-1)) - 1
+		valid = nptr
+	} else if nptr == valid {
+		// Bits for this object end at exactly the end of this bitmap word.
+		// All good.
+	} else {
+		// Bits for this object extend into the next bitmap word. See if there
+		// may be any pointers recorded there.
+		if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
+			// No more pointers in this object after this bitmap word.
+			// Update size so we know not to look there.
+			size = valid * goarch.PtrSize
+		}
+	}
+
+	return heapBits{addr: addr, size: size, mask: mask, valid: valid}
+}
+
+// Returns the (absolute) address of the next known pointer and
+// a heapBits iterator representing any remaining pointers.
+// If there are no more pointers, returns address 0.
+// Note that next does not modify h. The caller must record the result.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (h heapBits) next() (heapBits, uintptr) {
+	for {
+		if h.mask != 0 {
+			var i int
+			if goarch.PtrSize == 8 {
+				i = sys.TrailingZeros64(uint64(h.mask))
+			} else {
+				i = sys.TrailingZeros32(uint32(h.mask))
+			}
+			h.mask ^= uintptr(1) << (i & (ptrBits - 1))
+			return h, h.addr + uintptr(i)*goarch.PtrSize
+		}
+
+		// Skip words that we've already processed.
+		h.addr += h.valid * goarch.PtrSize
+		h.size -= h.valid * goarch.PtrSize
+		if h.size == 0 {
+			return h, 0 // no more pointers
+		}
+
+		// Grab more bits and try again.
+		h = heapBitsForAddr(h.addr, h.size)
+	}
+}
+
+// nextFast is like next, but can return 0 even when there are more pointers
+// to be found. Callers should call next if nextFast returns 0 as its second
+// return value.
+//
+//	if addr, h = h.nextFast(); addr == 0 {
+//	    if addr, h = h.next(); addr == 0 {
+//	        ... no more pointers ...
+//	    }
+//	}
+//	... process pointer at addr ...
+//
+// nextFast is designed to be inlineable.
+//
+//go:nosplit
+func (h heapBits) nextFast() (heapBits, uintptr) {
+	// TESTQ/JEQ
+	if h.mask == 0 {
+		return h, 0
+	}
+	// BSFQ
+	var i int
+	if goarch.PtrSize == 8 {
+		i = sys.TrailingZeros64(uint64(h.mask))
+	} else {
+		i = sys.TrailingZeros32(uint32(h.mask))
+	}
+	// BTCQ
+	h.mask ^= uintptr(1) << (i & (ptrBits - 1))
+	// LEAQ (XX)(XX*8)
+	return h, h.addr + uintptr(i)*goarch.PtrSize
+}
+
+// bulkBarrierPreWrite executes a write barrier
+// for every pointer slot in the memory range [src, src+size),
+// using pointer/scalar information from [dst, dst+size).
+// This executes the write barriers necessary before a memmove.
+// src, dst, and size must be pointer-aligned.
+// The range [dst, dst+size) must lie within a single object.
+// It does not perform the actual writes.
+//
+// As a special case, src == 0 indicates that this is being used for a
+// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
+// barrier.
+//
+// Callers should call bulkBarrierPreWrite immediately before
+// calling memmove(dst, src, size). This function is marked nosplit
+// to avoid being preempted; the GC must not stop the goroutine
+// between the memmove and the execution of the barriers.
+// The caller is also responsible for cgo pointer checks if this
+// may be writing Go pointers into non-Go memory.
+//
+// The pointer bitmap is not maintained for allocations containing
+// no pointers at all; any caller of bulkBarrierPreWrite must first
+// make sure the underlying allocation contains pointers, usually
+// by checking typ.PtrBytes.
+//
+// The type of the space can be provided purely as an optimization,
+// however it is not used with GOEXPERIMENT=noallocheaders.
+//
+// Callers must perform cgo checks if goexperiment.CgoCheck2.
+//
+//go:nosplit
+func bulkBarrierPreWrite(dst, src, size uintptr, _ *abi.Type) {
+	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
+		throw("bulkBarrierPreWrite: unaligned arguments")
+	}
+	if !writeBarrier.enabled {
+		return
+	}
+	if s := spanOf(dst); s == nil {
+		// If dst is a global, use the data or BSS bitmaps to
+		// execute write barriers.
+		for _, datap := range activeModules() {
+			if datap.data <= dst && dst < datap.edata {
+				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
+				return
+			}
+		}
+		for _, datap := range activeModules() {
+			if datap.bss <= dst && dst < datap.ebss {
+				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
+				return
+			}
+		}
+		return
+	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
+		// dst was heap memory at some point, but isn't now.
+		// It can't be a global. It must be either our stack,
+		// or in the case of direct channel sends, it could be
+		// another stack. Either way, no need for barriers.
+		// This will also catch if dst is in a freed span,
+		// though that should never have.
+		return
+	}
+
+	buf := &getg().m.p.ptr().wbBuf
+	h := heapBitsForAddr(dst, size)
+	if src == 0 {
+		for {
+			var addr uintptr
+			if h, addr = h.next(); addr == 0 {
+				break
+			}
+			dstx := (*uintptr)(unsafe.Pointer(addr))
+			p := buf.get1()
+			p[0] = *dstx
+		}
+	} else {
+		for {
+			var addr uintptr
+			if h, addr = h.next(); addr == 0 {
+				break
+			}
+			dstx := (*uintptr)(unsafe.Pointer(addr))
+			srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
+			p := buf.get2()
+			p[0] = *dstx
+			p[1] = *srcx
+		}
+	}
+}
+
+// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
+// does not execute write barriers for [dst, dst+size).
+//
+// In addition to the requirements of bulkBarrierPreWrite
+// callers need to ensure [dst, dst+size) is zeroed.
+//
+// This is used for special cases where e.g. dst was just
+// created and zeroed with malloc.
+//
+// The type of the space can be provided purely as an optimization,
+// however it is not used with GOEXPERIMENT=noallocheaders.
+//
+//go:nosplit
+func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, _ *abi.Type) {
+	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
+		throw("bulkBarrierPreWrite: unaligned arguments")
+	}
+	if !writeBarrier.enabled {
+		return
+	}
+	buf := &getg().m.p.ptr().wbBuf
+	h := heapBitsForAddr(dst, size)
+	for {
+		var addr uintptr
+		if h, addr = h.next(); addr == 0 {
+			break
+		}
+		srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
+		p := buf.get1()
+		p[0] = *srcx
+	}
+}
+
+// initHeapBits initializes the heap bitmap for a span.
+// If this is a span of single pointer allocations, it initializes all
+// words to pointer. If force is true, clears all bits.
+func (s *mspan) initHeapBits(forceClear bool) {
+	if forceClear || s.spanclass.noscan() {
+		// Set all the pointer bits to zero. We do this once
+		// when the span is allocated so we don't have to do it
+		// for each object allocation.
+		base := s.base()
+		size := s.npages * pageSize
+		h := writeHeapBitsForAddr(base)
+		h.flush(base, size)
+		return
+	}
+	isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
+	if !isPtrs {
+		return // nothing to do
+	}
+	h := writeHeapBitsForAddr(s.base())
+	size := s.npages * pageSize
+	nptrs := size / goarch.PtrSize
+	for i := uintptr(0); i < nptrs; i += ptrBits {
+		h = h.write(^uintptr(0), ptrBits)
+	}
+	h.flush(s.base(), size)
+}
+
+type writeHeapBits struct {
+	addr  uintptr // address that the low bit of mask represents the pointer state of.
+	mask  uintptr // some pointer bits starting at the address addr.
+	valid uintptr // number of bits in buf that are valid (including low)
+	low   uintptr // number of low-order bits to not overwrite
+}
+
+func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
+	// We start writing bits maybe in the middle of a heap bitmap word.
+	// Remember how many bits into the word we started, so we can be sure
+	// not to overwrite the previous bits.
+	h.low = addr / goarch.PtrSize % ptrBits
+
+	// round down to heap word that starts the bitmap word.
+	h.addr = addr - h.low*goarch.PtrSize
+
+	// We don't have any bits yet.
+	h.mask = 0
+	h.valid = h.low
+
+	return
+}
+
+// write appends the pointerness of the next valid pointer slots
+// using the low valid bits of bits. 1=pointer, 0=scalar.
+func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
+	if h.valid+valid <= ptrBits {
+		// Fast path - just accumulate the bits.
+		h.mask |= bits << h.valid
+		h.valid += valid
+		return h
+	}
+	// Too many bits to fit in this word. Write the current word
+	// out and move on to the next word.
+
+	data := h.mask | bits<<h.valid       // mask for this word
+	h.mask = bits >> (ptrBits - h.valid) // leftover for next word
+	h.valid += valid - ptrBits           // have h.valid+valid bits, writing ptrBits of them
+
+	// Flush mask to the memory bitmap.
+	// TODO: figure out how to cache arena lookup.
+	ai := arenaIndex(h.addr)
+	ha := mheap_.arenas[ai.l1()][ai.l2()]
+	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
+	m := uintptr(1)<<h.low - 1
+	ha.bitmap[idx] = ha.bitmap[idx]&m | data
+	// Note: no synchronization required for this write because
+	// the allocator has exclusive access to the page, and the bitmap
+	// entries are all for a single page. Also, visibility of these
+	// writes is guaranteed by the publication barrier in mallocgc.
+
+	// Clear noMorePtrs bit, since we're going to be writing bits
+	// into the following word.
+	ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
+	// Note: same as above
+
+	// Move to next word of bitmap.
+	h.addr += ptrBits * goarch.PtrSize
+	h.low = 0
+	return h
+}
+
+// Add padding of size bytes.
+func (h writeHeapBits) pad(size uintptr) writeHeapBits {
+	if size == 0 {
+		return h
+	}
+	words := size / goarch.PtrSize
+	for words > ptrBits {
+		h = h.write(0, ptrBits)
+		words -= ptrBits
+	}
+	return h.write(0, words)
+}
+
+// Flush the bits that have been written, and add zeros as needed
+// to cover the full object [addr, addr+size).
+func (h writeHeapBits) flush(addr, size uintptr) {
+	// zeros counts the number of bits needed to represent the object minus the
+	// number of bits we've already written. This is the number of 0 bits
+	// that need to be added.
+	zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
+
+	// Add zero bits up to the bitmap word boundary
+	if zeros > 0 {
+		z := ptrBits - h.valid
+		if z > zeros {
+			z = zeros
+		}
+		h.valid += z
+		zeros -= z
+	}
+
+	// Find word in bitmap that we're going to write.
+	ai := arenaIndex(h.addr)
+	ha := mheap_.arenas[ai.l1()][ai.l2()]
+	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
+
+	// Write remaining bits.
+	if h.valid != h.low {
+		m := uintptr(1)<<h.low - 1      // don't clear existing bits below "low"
+		m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
+		ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
+	}
+	if zeros == 0 {
+		return
+	}
+
+	// Record in the noMorePtrs map that there won't be any more 1 bits,
+	// so readers can stop early.
+	ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
+
+	// Advance to next bitmap word.
+	h.addr += ptrBits * goarch.PtrSize
+
+	// Continue on writing zeros for the rest of the object.
+	// For standard use of the ptr bits this is not required, as
+	// the bits are read from the beginning of the object. Some uses,
+	// like noscan spans, oblets, bulk write barriers, and cgocheck, might
+	// start mid-object, so these writes are still required.
+	for {
+		// Write zero bits.
+		ai := arenaIndex(h.addr)
+		ha := mheap_.arenas[ai.l1()][ai.l2()]
+		idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
+		if zeros < ptrBits {
+			ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
+			break
+		} else if zeros == ptrBits {
+			ha.bitmap[idx] = 0
+			break
+		} else {
+			ha.bitmap[idx] = 0
+			zeros -= ptrBits
+		}
+		ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
+		h.addr += ptrBits * goarch.PtrSize
+	}
+}
+
+// heapBitsSetType records that the new allocation [x, x+size)
+// holds in [x, x+dataSize) one or more values of type typ.
+// (The number of values is given by dataSize / typ.Size.)
+// If dataSize < size, the fragment [x+dataSize, x+size) is
+// recorded as non-pointer data.
+// It is known that the type has pointers somewhere;
+// malloc does not call heapBitsSetType when there are no pointers,
+// because all free objects are marked as noscan during
+// heapBitsSweepSpan.
+//
+// There can only be one allocation from a given span active at a time,
+// and the bitmap for a span always falls on word boundaries,
+// so there are no write-write races for access to the heap bitmap.
+// Hence, heapBitsSetType can access the bitmap without atomics.
+//
+// There can be read-write races between heapBitsSetType and things
+// that read the heap bitmap like scanobject. However, since
+// heapBitsSetType is only used for objects that have not yet been
+// made reachable, readers will ignore bits being modified by this
+// function. This does mean this function cannot transiently modify
+// bits that belong to neighboring objects. Also, on weakly-ordered
+// machines, callers must execute a store/store (publication) barrier
+// between calling this function and making the object reachable.
+func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
+	const doubleCheck = false // slow but helpful; enable to test modifications to this code
+
+	if doubleCheck && dataSize%typ.Size_ != 0 {
+		throw("heapBitsSetType: dataSize not a multiple of typ.Size")
+	}
+
+	if goarch.PtrSize == 8 && size == goarch.PtrSize {
+		// It's one word and it has pointers, it must be a pointer.
+		// Since all allocated one-word objects are pointers
+		// (non-pointers are aggregated into tinySize allocations),
+		// (*mspan).initHeapBits sets the pointer bits for us.
+		// Nothing to do here.
+		if doubleCheck {
+			h, addr := heapBitsForAddr(x, size).next()
+			if addr != x {
+				throw("heapBitsSetType: pointer bit missing")
+			}
+			_, addr = h.next()
+			if addr != 0 {
+				throw("heapBitsSetType: second pointer bit found")
+			}
+		}
+		return
+	}
+
+	h := writeHeapBitsForAddr(x)
+
+	// Handle GC program.
+	if typ.Kind_&kindGCProg != 0 {
+		// Expand the gc program into the storage we're going to use for the actual object.
+		obj := (*uint8)(unsafe.Pointer(x))
+		n := runGCProg(addb(typ.GCData, 4), obj)
+		// Use the expanded program to set the heap bits.
+		for i := uintptr(0); true; i += typ.Size_ {
+			// Copy expanded program to heap bitmap.
+			p := obj
+			j := n
+			for j > 8 {
+				h = h.write(uintptr(*p), 8)
+				p = add1(p)
+				j -= 8
+			}
+			h = h.write(uintptr(*p), j)
+
+			if i+typ.Size_ == dataSize {
+				break // no padding after last element
+			}
+
+			// Pad with zeros to the start of the next element.
+			h = h.pad(typ.Size_ - n*goarch.PtrSize)
+		}
+
+		h.flush(x, size)
+
+		// Erase the expanded GC program.
+		memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
+		return
+	}
+
+	// Note about sizes:
+	//
+	// typ.Size is the number of words in the object,
+	// and typ.PtrBytes is the number of words in the prefix
+	// of the object that contains pointers. That is, the final
+	// typ.Size - typ.PtrBytes words contain no pointers.
+	// This allows optimization of a common pattern where
+	// an object has a small header followed by a large scalar
+	// buffer. If we know the pointers are over, we don't have
+	// to scan the buffer's heap bitmap at all.
+	// The 1-bit ptrmasks are sized to contain only bits for
+	// the typ.PtrBytes prefix, zero padded out to a full byte
+	// of bitmap. If there is more room in the allocated object,
+	// that space is pointerless. The noMorePtrs bitmap will prevent
+	// scanning large pointerless tails of an object.
+	//
+	// Replicated copies are not as nice: if there is an array of
+	// objects with scalar tails, all but the last tail does have to
+	// be initialized, because there is no way to say "skip forward".
+
+	ptrs := typ.PtrBytes / goarch.PtrSize
+	if typ.Size_ == dataSize { // Single element
+		if ptrs <= ptrBits { // Single small element
+			m := readUintptr(typ.GCData)
+			h = h.write(m, ptrs)
+		} else { // Single large element
+			p := typ.GCData
+			for {
+				h = h.write(readUintptr(p), ptrBits)
+				p = addb(p, ptrBits/8)
+				ptrs -= ptrBits
+				if ptrs <= ptrBits {
+					break
+				}
+			}
+			m := readUintptr(p)
+			h = h.write(m, ptrs)
+		}
+	} else { // Repeated element
+		words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
+		if words <= ptrBits {               // Repeated small element
+			n := dataSize / typ.Size_
+			m := readUintptr(typ.GCData)
+			// Make larger unit to repeat
+			for words <= ptrBits/2 {
+				if n&1 != 0 {
+					h = h.write(m, words)
+				}
+				n /= 2
+				m |= m << words
+				ptrs += words
+				words *= 2
+				if n == 1 {
+					break
+				}
+			}
+			for n > 1 {
+				h = h.write(m, words)
+				n--
+			}
+			h = h.write(m, ptrs)
+		} else { // Repeated large element
+			for i := uintptr(0); true; i += typ.Size_ {
+				p := typ.GCData
+				j := ptrs
+				for j > ptrBits {
+					h = h.write(readUintptr(p), ptrBits)
+					p = addb(p, ptrBits/8)
+					j -= ptrBits
+				}
+				m := readUintptr(p)
+				h = h.write(m, j)
+				if i+typ.Size_ == dataSize {
+					break // don't need the trailing nonptr bits on the last element.
+				}
+				// Pad with zeros to the start of the next element.
+				h = h.pad(typ.Size_ - typ.PtrBytes)
+			}
+		}
+	}
+	h.flush(x, size)
+
+	if doubleCheck {
+		h := heapBitsForAddr(x, size)
+		for i := uintptr(0); i < size; i += goarch.PtrSize {
+			// Compute the pointer bit we want at offset i.
+			want := false
+			if i < dataSize {
+				off := i % typ.Size_
+				if off < typ.PtrBytes {
+					j := off / goarch.PtrSize
+					want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
+				}
+			}
+			if want {
+				var addr uintptr
+				h, addr = h.next()
+				if addr != x+i {
+					throw("heapBitsSetType: pointer entry not correct")
+				}
+			}
+		}
+		if _, addr := h.next(); addr != 0 {
+			throw("heapBitsSetType: extra pointer")
+		}
+	}
+}
+
+// For goexperiment.AllocHeaders
+func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
+	return 0
+}
+
+// Testing.
+
+// Returns GC type info for the pointer stored in ep for testing.
+// If ep points to the stack, only static live information will be returned
+// (i.e. not for objects which are only dynamically live stack objects).
+func getgcmask(ep any) (mask []byte) {
+	e := *efaceOf(&ep)
+	p := e.data
+	t := e._type
+	// data or bss
+	for _, datap := range activeModules() {
+		// data
+		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
+			bitmap := datap.gcdatamask.bytedata
+			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
+			mask = make([]byte, n/goarch.PtrSize)
+			for i := uintptr(0); i < n; i += goarch.PtrSize {
+				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+			}
+			return
+		}
+
+		// bss
+		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
+			bitmap := datap.gcbssmask.bytedata
+			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
+			mask = make([]byte, n/goarch.PtrSize)
+			for i := uintptr(0); i < n; i += goarch.PtrSize {
+				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+			}
+			return
+		}
+	}
+
+	// heap
+	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
+		if s.spanclass.noscan() {
+			return nil
+		}
+		n := s.elemsize
+		hbits := heapBitsForAddr(base, n)
+		mask = make([]byte, n/goarch.PtrSize)
+		for {
+			var addr uintptr
+			if hbits, addr = hbits.next(); addr == 0 {
+				break
+			}
+			mask[(addr-base)/goarch.PtrSize] = 1
+		}
+		// Callers expect this mask to end at the last pointer.
+		for len(mask) > 0 && mask[len(mask)-1] == 0 {
+			mask = mask[:len(mask)-1]
+		}
+
+		// Make sure we keep ep alive. We may have stopped referencing
+		// ep's data pointer sometime before this point and it's possible
+		// for that memory to get freed.
+		KeepAlive(ep)
+		return
+	}
+
+	// stack
+	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
+		found := false
+		var u unwinder
+		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
+			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
+				found = true
+				break
+			}
+		}
+		if found {
+			locals, _, _ := u.frame.getStackMap(false)
+			if locals.n == 0 {
+				return
+			}
+			size := uintptr(locals.n) * goarch.PtrSize
+			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
+			mask = make([]byte, n/goarch.PtrSize)
+			for i := uintptr(0); i < n; i += goarch.PtrSize {
+				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
+				mask[i/goarch.PtrSize] = locals.ptrbit(off)
+			}
+		}
+		return
+	}
+
+	// otherwise, not something the GC knows about.
+	// possibly read-only data, like malloc(0).
+	// must not have pointers
+	return
+}
+
+// userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for
+// non-slice-backing-store Go values allocated in a user arena chunk. It
+// sets up the heap bitmap for the value with type typ allocated at address ptr.
+// base is the base address of the arena chunk.
+func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
+	base := s.base()
+	h := writeHeapBitsForAddr(uintptr(ptr))
+
+	// Our last allocation might have ended right at a noMorePtrs mark,
+	// which we would not have erased. We need to erase that mark here,
+	// because we're going to start adding new heap bitmap bits.
+	// We only need to clear one mark, because below we make sure to
+	// pad out the bits with zeroes and only write one noMorePtrs bit
+	// for each new object.
+	// (This is only necessary at noMorePtrs boundaries, as noMorePtrs
+	// marks within an object allocated with newAt will be erased by
+	// the normal writeHeapBitsForAddr mechanism.)
+	//
+	// Note that we skip this if this is the first allocation in the
+	// arena because there's definitely no previous noMorePtrs mark
+	// (in fact, we *must* do this, because we're going to try to back
+	// up a pointer to fix this up).
+	if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
+		// Back up one pointer and rewrite that pointer. That will
+		// cause the writeHeapBits implementation to clear the
+		// noMorePtrs bit we need to clear.
+		r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
+		_, p := r.next()
+		b := uintptr(0)
+		if p == uintptr(ptr)-goarch.PtrSize {
+			b = 1
+		}
+		h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
+		h = h.write(b, 1)
+	}
+
+	p := typ.GCData // start of 1-bit pointer mask (or GC program)
+	var gcProgBits uintptr
+	if typ.Kind_&kindGCProg != 0 {
+		// Expand gc program, using the object itself for storage.
+		gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
+		p = (*byte)(ptr)
+	}
+	nb := typ.PtrBytes / goarch.PtrSize
+
+	for i := uintptr(0); i < nb; i += ptrBits {
+		k := nb - i
+		if k > ptrBits {
+			k = ptrBits
+		}
+		h = h.write(readUintptr(addb(p, i/8)), k)
+	}
+	// Note: we call pad here to ensure we emit explicit 0 bits
+	// for the pointerless tail of the object. This ensures that
+	// there's only a single noMorePtrs mark for the next object
+	// to clear. We don't need to do this to clear stale noMorePtrs
+	// markers from previous uses because arena chunk pointer bitmaps
+	// are always fully cleared when reused.
+	h = h.pad(typ.Size_ - typ.PtrBytes)
+	h.flush(uintptr(ptr), typ.Size_)
+
+	if typ.Kind_&kindGCProg != 0 {
+		// Zero out temporary ptrmask buffer inside object.
+		memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
+	}
+
+	// Double-check that the bitmap was written out correctly.
+	//
+	// Derived from heapBitsSetType.
+	const doubleCheck = false
+	if doubleCheck {
+		size := typ.Size_
+		x := uintptr(ptr)
+		h := heapBitsForAddr(x, size)
+		for i := uintptr(0); i < size; i += goarch.PtrSize {
+			// Compute the pointer bit we want at offset i.
+			want := false
+			off := i % typ.Size_
+			if off < typ.PtrBytes {
+				j := off / goarch.PtrSize
+				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
+			}
+			if want {
+				var addr uintptr
+				h, addr = h.next()
+				if addr != x+i {
+					throw("userArenaHeapBitsSetType: pointer entry not correct")
+				}
+			}
+		}
+		if _, addr := h.next(); addr != 0 {
+			throw("userArenaHeapBitsSetType: extra pointer")
+		}
+	}
+}
+
+// For goexperiment.AllocHeaders.
+type typePointers struct {
+	addr uintptr
+}
+
+// For goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
+	panic("not implemented")
+}
+
+// For goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
+	panic("not implemented")
+}
+
+// For goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (tp typePointers) nextFast() (typePointers, uintptr) {
+	panic("not implemented")
+}
+
+// For goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
+	panic("not implemented")
+}
+
+// For goexperiment.AllocHeaders.
+//
+//go:nosplit
+func (tp typePointers) fastForward(n, limit uintptr) typePointers {
+	panic("not implemented")
+}
+
+// For goexperiment.AllocHeaders, to pass TestIntendedInlining.
+func (s *mspan) writeUserArenaHeapBits() {
+	panic("not implemented")
+}
+
+// For goexperiment.AllocHeaders, to pass TestIntendedInlining.
+func heapBitsSlice() {
+	panic("not implemented")
+}
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index acfd99b..d4b6eef 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -148,7 +148,7 @@
 	// Return the current cached span to the central lists.
 	s := c.alloc[spc]
 
-	if uintptr(s.allocCount) != s.nelems {
+	if s.allocCount != s.nelems {
 		throw("refill of span with free space remaining")
 	}
 	if s != &emptymspan {
@@ -184,7 +184,7 @@
 		throw("out of memory")
 	}
 
-	if uintptr(s.allocCount) == s.nelems {
+	if s.allocCount == s.nelems {
 		throw("span has no free space")
 	}
 
@@ -284,7 +284,7 @@
 				//
 				// If this span was cached before sweep, then gcController.heapLive was totally
 				// recomputed since caching this span, so we don't do this for stale spans.
-				dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
+				dHeapLive -= int64(s.nelems-s.allocCount) * int64(s.elemsize)
 			}
 
 			// Release the span to the mcentral.
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
index 7861199..e190b56 100644
--- a/src/runtime/mcentral.go
+++ b/src/runtime/mcentral.go
@@ -84,8 +84,10 @@
 	deductSweepCredit(spanBytes, 0)
 
 	traceDone := false
-	if traceEnabled() {
-		traceGCSweepStart()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GCSweepStart()
+		traceRelease(trace)
 	}
 
 	// If we sweep spanBudget spans without finding any free
@@ -157,9 +159,11 @@
 		}
 		sweep.active.end(sl)
 	}
-	if traceEnabled() {
-		traceGCSweepDone()
+	trace = traceAcquire()
+	if trace.ok() {
+		trace.GCSweepDone()
 		traceDone = true
+		traceRelease(trace)
 	}
 
 	// We failed to get a span from the mcentral so get one from mheap.
@@ -170,11 +174,15 @@
 
 	// At this point s is a span that should have free slots.
 havespan:
-	if traceEnabled() && !traceDone {
-		traceGCSweepDone()
+	if !traceDone {
+		trace := traceAcquire()
+		if trace.ok() {
+			trace.GCSweepDone()
+			traceRelease(trace)
+		}
 	}
 	n := int(s.nelems) - int(s.allocCount)
-	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
+	if n == 0 || s.freeindex == s.nelems || s.allocCount == s.nelems {
 		throw("span has no free objects")
 	}
 	freeByteBase := s.freeindex &^ (64 - 1)
diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go
index c9823d3..d63c38c 100644
--- a/src/runtime/mem_linux.go
+++ b/src/runtime/mem_linux.go
@@ -170,4 +170,12 @@
 		print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
 		throw("runtime: cannot map pages in arena address space")
 	}
+
+	// Disable huge pages if the GODEBUG for it is set.
+	//
+	// Note that there are a few sysHugePage calls that can override this, but
+	// they're all for GC metadata.
+	if debug.disablethp != 0 {
+		sysNoHugePageOS(v, n)
+	}
 }
diff --git a/src/runtime/memclr_loong64.s b/src/runtime/memclr_loong64.s
index 7bb6f3d..313e4d4 100644
--- a/src/runtime/memclr_loong64.s
+++ b/src/runtime/memclr_loong64.s
@@ -6,37 +6,39 @@
 #include "textflag.h"
 
 // func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
-TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
-	MOVV	ptr+0(FP), R6
-	MOVV	n+8(FP), R7
-	ADDV	R6, R7, R4
+TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB),NOSPLIT,$0-16
+#ifndef GOEXPERIMENT_regabiargs
+	MOVV	ptr+0(FP), R4
+	MOVV	n+8(FP), R5
+#endif
+	ADDV	R4, R5, R6
 
 	// if less than 8 bytes, do one byte at a time
-	SGTU	$8, R7, R8
+	SGTU	$8, R5, R8
 	BNE	R8, out
 
 	// do one byte at a time until 8-aligned
-	AND	$7, R6, R8
+	AND	$7, R4, R8
 	BEQ	R8, words
-	MOVB	R0, (R6)
-	ADDV	$1, R6
+	MOVB	R0, (R4)
+	ADDV	$1, R4
 	JMP	-4(PC)
 
 words:
 	// do 8 bytes at a time if there is room
-	ADDV	$-7, R4, R7
+	ADDV	$-7, R6, R5
 
 	PCALIGN	$16
-	SGTU	R7, R6, R8
+	SGTU	R5, R4, R8
 	BEQ	R8, out
-	MOVV	R0, (R6)
-	ADDV	$8, R6
+	MOVV	R0, (R4)
+	ADDV	$8, R4
 	JMP	-4(PC)
 
 out:
-	BEQ	R6, R4, done
-	MOVB	R0, (R6)
-	ADDV	$1, R6
+	BEQ	R4, R6, done
+	MOVB	R0, (R4)
+	ADDV	$1, R4
 	JMP	-3(PC)
 done:
 	RET
diff --git a/src/runtime/memclr_riscv64.s b/src/runtime/memclr_riscv64.s
index 1c1e6ab..16c511c 100644
--- a/src/runtime/memclr_riscv64.s
+++ b/src/runtime/memclr_riscv64.s
@@ -23,7 +23,7 @@
 	SUB	X5, X9, X5
 	SUB	X5, X11, X11
 align:
-	ADD	$-1, X5
+	SUB	$1, X5
 	MOVB	ZERO, 0(X10)
 	ADD	$1, X10
 	BNEZ	X5, align
@@ -47,7 +47,7 @@
 	MOV	ZERO, 48(X10)
 	MOV	ZERO, 56(X10)
 	ADD	$64, X10
-	ADD	$-64, X11
+	SUB	$64, X11
 	BGE	X11, X9, loop64
 	BEQZ	X11, done
 
@@ -60,7 +60,7 @@
 	MOV	ZERO, 16(X10)
 	MOV	ZERO, 24(X10)
 	ADD	$32, X10
-	ADD	$-32, X11
+	SUB	$32, X11
 	BEQZ	X11, done
 
 check16:
@@ -70,7 +70,7 @@
 	MOV	ZERO, 0(X10)
 	MOV	ZERO, 8(X10)
 	ADD	$16, X10
-	ADD	$-16, X11
+	SUB	$16, X11
 	BEQZ	X11, done
 
 check8:
@@ -79,7 +79,7 @@
 zero8:
 	MOV	ZERO, 0(X10)
 	ADD	$8, X10
-	ADD	$-8, X11
+	SUB	$8, X11
 	BEQZ	X11, done
 
 check4:
@@ -91,13 +91,13 @@
 	MOVB	ZERO, 2(X10)
 	MOVB	ZERO, 3(X10)
 	ADD	$4, X10
-	ADD	$-4, X11
+	SUB	$4, X11
 
 loop1:
 	BEQZ	X11, done
 	MOVB	ZERO, 0(X10)
 	ADD	$1, X10
-	ADD	$-1, X11
+	SUB	$1, X11
 	JMP	loop1
 
 done:
diff --git a/src/runtime/memmove_loong64.s b/src/runtime/memmove_loong64.s
index 0f139bc..5b7aeba 100644
--- a/src/runtime/memmove_loong64.s
+++ b/src/runtime/memmove_loong64.s
@@ -7,10 +7,12 @@
 // See memmove Go doc for important implementation constraints.
 
 // func memmove(to, from unsafe.Pointer, n uintptr)
-TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
+TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24
+#ifndef GOEXPERIMENT_regabiargs
 	MOVV	to+0(FP), R4
 	MOVV	from+8(FP), R5
 	MOVV	n+16(FP), R6
+#endif
 	BNE	R6, check
 	RET
 
diff --git a/src/runtime/memmove_riscv64.s b/src/runtime/memmove_riscv64.s
index f5db865..e099a64 100644
--- a/src/runtime/memmove_riscv64.s
+++ b/src/runtime/memmove_riscv64.s
@@ -32,7 +32,7 @@
 	SUB	X5, X9, X5
 	SUB	X5, X12, X12
 f_align:
-	ADD	$-1, X5
+	SUB	$1, X5
 	MOVB	0(X11), X14
 	MOVB	X14, 0(X10)
 	ADD	$1, X10
@@ -65,7 +65,7 @@
 	MOV	X21, 56(X10)
 	ADD	$64, X10
 	ADD	$64, X11
-	ADD	$-64, X12
+	SUB	$64, X12
 	BGE	X12, X9, f_loop64
 	BEQZ	X12, done
 
@@ -83,7 +83,7 @@
 	MOV	X17, 24(X10)
 	ADD	$32, X10
 	ADD	$32, X11
-	ADD	$-32, X12
+	SUB	$32, X12
 	BGE	X12, X9, f_loop32
 	BEQZ	X12, done
 
@@ -97,7 +97,7 @@
 	MOV	X15, 8(X10)
 	ADD	$16, X10
 	ADD	$16, X11
-	ADD	$-16, X12
+	SUB	$16, X12
 	BGE	X12, X9, f_loop16
 	BEQZ	X12, done
 
@@ -109,7 +109,7 @@
 	MOV	X14, 0(X10)
 	ADD	$8, X10
 	ADD	$8, X11
-	ADD	$-8, X12
+	SUB	$8, X12
 	BGE	X12, X9, f_loop8
 	BEQZ	X12, done
 	JMP	f_loop4_check
@@ -136,7 +136,7 @@
 	MOVB	X21, 7(X10)
 	ADD	$8, X10
 	ADD	$8, X11
-	ADD	$-8, X12
+	SUB	$8, X12
 	BGE	X12, X9, f_loop8_unaligned
 
 f_loop4_check:
@@ -153,7 +153,7 @@
 	MOVB	X17, 3(X10)
 	ADD	$4, X10
 	ADD	$4, X11
-	ADD	$-4, X12
+	SUB	$4, X12
 	BGE	X12, X9, f_loop4
 
 f_loop1:
@@ -162,7 +162,7 @@
 	MOVB	X14, 0(X10)
 	ADD	$1, X10
 	ADD	$1, X11
-	ADD	$-1, X12
+	SUB	$1, X12
 	JMP	f_loop1
 
 backward:
@@ -182,9 +182,9 @@
 	// Move one byte at a time until we reach 8 byte alignment.
 	SUB	X5, X12, X12
 b_align:
-	ADD	$-1, X5
-	ADD	$-1, X10
-	ADD	$-1, X11
+	SUB	$1, X5
+	SUB	$1, X10
+	SUB	$1, X11
 	MOVB	0(X11), X14
 	MOVB	X14, 0(X10)
 	BNEZ	X5, b_align
@@ -197,8 +197,8 @@
 	MOV	$64, X9
 	BLT	X12, X9, b_loop32_check
 b_loop64:
-	ADD	$-64, X10
-	ADD	$-64, X11
+	SUB	$64, X10
+	SUB	$64, X11
 	MOV	0(X11), X14
 	MOV	8(X11), X15
 	MOV	16(X11), X16
@@ -215,7 +215,7 @@
 	MOV	X19, 40(X10)
 	MOV	X20, 48(X10)
 	MOV	X21, 56(X10)
-	ADD	$-64, X12
+	SUB	$64, X12
 	BGE	X12, X9, b_loop64
 	BEQZ	X12, done
 
@@ -223,8 +223,8 @@
 	MOV	$32, X9
 	BLT	X12, X9, b_loop16_check
 b_loop32:
-	ADD	$-32, X10
-	ADD	$-32, X11
+	SUB	$32, X10
+	SUB	$32, X11
 	MOV	0(X11), X14
 	MOV	8(X11), X15
 	MOV	16(X11), X16
@@ -233,7 +233,7 @@
 	MOV	X15, 8(X10)
 	MOV	X16, 16(X10)
 	MOV	X17, 24(X10)
-	ADD	$-32, X12
+	SUB	$32, X12
 	BGE	X12, X9, b_loop32
 	BEQZ	X12, done
 
@@ -241,13 +241,13 @@
 	MOV	$16, X9
 	BLT	X12, X9, b_loop8_check
 b_loop16:
-	ADD	$-16, X10
-	ADD	$-16, X11
+	SUB	$16, X10
+	SUB	$16, X11
 	MOV	0(X11), X14
 	MOV	8(X11), X15
 	MOV	X14, 0(X10)
 	MOV	X15, 8(X10)
-	ADD	$-16, X12
+	SUB	$16, X12
 	BGE	X12, X9, b_loop16
 	BEQZ	X12, done
 
@@ -255,11 +255,11 @@
 	MOV	$8, X9
 	BLT	X12, X9, b_loop4_check
 b_loop8:
-	ADD	$-8, X10
-	ADD	$-8, X11
+	SUB	$8, X10
+	SUB	$8, X11
 	MOV	0(X11), X14
 	MOV	X14, 0(X10)
-	ADD	$-8, X12
+	SUB	$8, X12
 	BGE	X12, X9, b_loop8
 	BEQZ	X12, done
 	JMP	b_loop4_check
@@ -268,8 +268,8 @@
 	MOV	$8, X9
 	BLT	X12, X9, b_loop4_check
 b_loop8_unaligned:
-	ADD	$-8, X10
-	ADD	$-8, X11
+	SUB	$8, X10
+	SUB	$8, X11
 	MOVB	0(X11), X14
 	MOVB	1(X11), X15
 	MOVB	2(X11), X16
@@ -286,15 +286,15 @@
 	MOVB	X19, 5(X10)
 	MOVB	X20, 6(X10)
 	MOVB	X21, 7(X10)
-	ADD	$-8, X12
+	SUB	$8, X12
 	BGE	X12, X9, b_loop8_unaligned
 
 b_loop4_check:
 	MOV	$4, X9
 	BLT	X12, X9, b_loop1
 b_loop4:
-	ADD	$-4, X10
-	ADD	$-4, X11
+	SUB	$4, X10
+	SUB	$4, X11
 	MOVB	0(X11), X14
 	MOVB	1(X11), X15
 	MOVB	2(X11), X16
@@ -303,16 +303,16 @@
 	MOVB	X15, 1(X10)
 	MOVB	X16, 2(X10)
 	MOVB	X17, 3(X10)
-	ADD	$-4, X12
+	SUB	$4, X12
 	BGE	X12, X9, b_loop4
 
 b_loop1:
 	BEQZ	X12, done
-	ADD	$-1, X10
-	ADD	$-1, X11
+	SUB	$1, X10
+	SUB	$1, X11
 	MOVB	0(X11), X14
 	MOVB	X14, 0(X10)
-	ADD	$-1, X12
+	SUB	$1, X12
 	JMP	b_loop1
 
 done:
diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go
index 21236d1..587e03d 100644
--- a/src/runtime/memmove_test.go
+++ b/src/runtime/memmove_test.go
@@ -488,9 +488,7 @@
 		maxLen := 0
 
 		for _, clrLen := range t.data {
-			if clrLen > maxLen {
-				maxLen = clrLen
-			}
+			maxLen = max(maxLen, clrLen)
 			if clrLen < minLen || minLen == 0 {
 				minLen = clrLen
 			}
diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go
index 3d0f174..f97a380 100644
--- a/src/runtime/metrics.go
+++ b/src/runtime/metrics.go
@@ -221,11 +221,11 @@
 			deps: makeStatDepSet(heapStatsDep),
 			compute: func(in *statAggregate, out *metricValue) {
 				hist := out.float64HistOrInit(sizeClassBuckets)
-				hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
+				hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
 				// Cut off the first index which is ostensibly for size class 0,
 				// but large objects are tracked separately so it's actually unused.
 				for i, count := range in.heapStats.smallAllocCount[1:] {
-					hist.counts[i] = uint64(count)
+					hist.counts[i] = count
 				}
 			},
 		},
@@ -247,11 +247,11 @@
 			deps: makeStatDepSet(heapStatsDep),
 			compute: func(in *statAggregate, out *metricValue) {
 				hist := out.float64HistOrInit(sizeClassBuckets)
-				hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
+				hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
 				// Cut off the first index which is ostensibly for size class 0,
 				// but large objects are tracked separately so it's actually unused.
 				for i, count := range in.heapStats.smallFreeCount[1:] {
-					hist.counts[i] = uint64(count)
+					hist.counts[i] = count
 				}
 			},
 		},
@@ -306,7 +306,7 @@
 			deps: makeStatDepSet(heapStatsDep),
 			compute: func(in *statAggregate, out *metricValue) {
 				out.kind = metricKindUint64
-				out.scalar = uint64(in.heapStats.tinyAllocCount)
+				out.scalar = in.heapStats.tinyAllocCount
 			},
 		},
 		"/gc/limiter/last-enabled:gc-cycle": {
@@ -317,15 +317,8 @@
 		},
 		"/gc/pauses:seconds": {
 			compute: func(_ *statAggregate, out *metricValue) {
-				hist := out.float64HistOrInit(timeHistBuckets)
-				// The bottom-most bucket, containing negative values, is tracked
-				// as a separately as underflow, so fill that in manually and then
-				// iterate over the rest.
-				hist.counts[0] = memstats.gcPauseDist.underflow.Load()
-				for i := range memstats.gcPauseDist.counts {
-					hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
-				}
-				hist.counts[len(hist.counts)-1] = memstats.gcPauseDist.overflow.Load()
+				// N.B. this is identical to /sched/pauses/total/gc:seconds.
+				sched.stwTotalTimeGC.write(out)
 			},
 		},
 		"/gc/stack/starting-size:bytes": {
@@ -451,18 +444,33 @@
 		},
 		"/sched/latencies:seconds": {
 			compute: func(_ *statAggregate, out *metricValue) {
-				hist := out.float64HistOrInit(timeHistBuckets)
-				hist.counts[0] = sched.timeToRun.underflow.Load()
-				for i := range sched.timeToRun.counts {
-					hist.counts[i+1] = sched.timeToRun.counts[i].Load()
-				}
-				hist.counts[len(hist.counts)-1] = sched.timeToRun.overflow.Load()
+				sched.timeToRun.write(out)
+			},
+		},
+		"/sched/pauses/stopping/gc:seconds": {
+			compute: func(_ *statAggregate, out *metricValue) {
+				sched.stwStoppingTimeGC.write(out)
+			},
+		},
+		"/sched/pauses/stopping/other:seconds": {
+			compute: func(_ *statAggregate, out *metricValue) {
+				sched.stwStoppingTimeOther.write(out)
+			},
+		},
+		"/sched/pauses/total/gc:seconds": {
+			compute: func(_ *statAggregate, out *metricValue) {
+				sched.stwTotalTimeGC.write(out)
+			},
+		},
+		"/sched/pauses/total/other:seconds": {
+			compute: func(_ *statAggregate, out *metricValue) {
+				sched.stwTotalTimeOther.write(out)
 			},
 		},
 		"/sync/mutex/wait/total:seconds": {
 			compute: func(_ *statAggregate, out *metricValue) {
 				out.kind = metricKindFloat64
-				out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load()))
+				out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
 			},
 		},
 	}
@@ -683,7 +691,7 @@
 // compute populates the gcStatsAggregate with values from the runtime.
 func (a *gcStatsAggregate) compute() {
 	a.heapScan = gcController.heapScan.Load()
-	a.stackScan = uint64(gcController.lastStackScan.Load())
+	a.stackScan = gcController.lastStackScan.Load()
 	a.globalsScan = gcController.globalsScan.Load()
 	a.totalScan = a.heapScan + a.stackScan + a.globalsScan
 }
@@ -823,15 +831,25 @@
 //
 //go:linkname readMetrics runtime/metrics.runtime_readMetrics
 func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
-	// Construct a slice from the args.
-	sl := slice{samplesp, len, cap}
-	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
-
 	metricsLock()
 
 	// Ensure the map is initialized.
 	initMetrics()
 
+	// Read the metrics.
+	readMetricsLocked(samplesp, len, cap)
+	metricsUnlock()
+}
+
+// readMetricsLocked is the internal, locked portion of readMetrics.
+//
+// Broken out for more robust testing. metricsLock must be held and
+// initMetrics must have been called already.
+func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) {
+	// Construct a slice from the args.
+	sl := slice{samplesp, len, cap}
+	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
+
 	// Clear agg defensively.
 	agg = statAggregate{}
 
@@ -850,6 +868,4 @@
 		// Compute the value based on the stats we have.
 		data.compute(&agg, &sample.value)
 	}
-
-	metricsUnlock()
 }
diff --git a/src/runtime/metrics/description.go b/src/runtime/metrics/description.go
index 745691b..19a7dbf 100644
--- a/src/runtime/metrics/description.go
+++ b/src/runtime/metrics/description.go
@@ -96,11 +96,11 @@
 		Description: "Estimated total CPU time spent with the application paused by " +
 			"the GC. Even if only one thread is running during the pause, this is " +
 			"computed as GOMAXPROCS times the pause latency because nothing else " +
-			"can be executing. This is the exact sum of samples in /gc/pause:seconds " +
-			"if each sample is multiplied by GOMAXPROCS at the time it is taken. " +
-			"This metric is an overestimate, and not directly comparable to " +
-			"system CPU time measurements. Compare only with other /cpu/classes " +
-			"metrics.",
+			"can be executing. This is the exact sum of samples in " +
+			"/sched/pauses/total/gc:seconds if each sample is multiplied by " +
+			"GOMAXPROCS at the time it is taken. This metric is an overestimate, " +
+			"and not directly comparable to system CPU time measurements. Compare " +
+			"only with other /cpu/classes metrics.",
 		Kind:       KindFloat64,
 		Cumulative: true,
 	},
@@ -289,7 +289,7 @@
 	},
 	{
 		Name:        "/gc/pauses:seconds",
-		Description: "Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically.",
+		Description: "Deprecated. Prefer the identical /sched/pauses/total/gc:seconds.",
 		Kind:        KindFloat64Histogram,
 		Cumulative:  true,
 	},
@@ -416,8 +416,32 @@
 		Cumulative:  true,
 	},
 	{
+		Name:        "/sched/pauses/stopping/gc:seconds",
+		Description: "Distribution of individual GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During this time, some threads may be executing. Bucket counts increase monotonically.",
+		Kind:        KindFloat64Histogram,
+		Cumulative:  true,
+	},
+	{
+		Name:        "/sched/pauses/stopping/other:seconds",
+		Description: "Distribution of individual non-GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total non-GC-related stop-the-world time (/sched/pauses/total/other:seconds). During this time, some threads may be executing. Bucket counts increase monotonically.",
+		Kind:        KindFloat64Histogram,
+		Cumulative:  true,
+	},
+	{
+		Name:        "/sched/pauses/total/gc:seconds",
+		Description: "Distribution of individual GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (this is measured directly in /sched/pauses/stopping/gc:seconds), during which some threads may still be running. Bucket counts increase monotonically.",
+		Kind:        KindFloat64Histogram,
+		Cumulative:  true,
+	},
+	{
+		Name:        "/sched/pauses/total/other:seconds",
+		Description: "Distribution of individual non-GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (measured directly in /sched/pauses/stopping/other:seconds). Bucket counts increase monotonically.",
+		Kind:        KindFloat64Histogram,
+		Cumulative:  true,
+	},
+	{
 		Name:        "/sync/mutex/wait/total:seconds",
-		Description: "Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data.",
+		Description: "Approximate cumulative time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data.",
 		Kind:        KindFloat64,
 		Cumulative:  true,
 	},
diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go
index 55d1f65..fb2f44d 100644
--- a/src/runtime/metrics/doc.go
+++ b/src/runtime/metrics/doc.go
@@ -8,7 +8,7 @@
 /*
 Package metrics provides a stable interface to access implementation-defined
 metrics exported by the Go runtime. This package is similar to existing functions
-like [runtime.ReadMemStats] and [debug.ReadGCStats], but significantly more general.
+like [runtime.ReadMemStats] and [runtime/debug.ReadGCStats], but significantly more general.
 
 The set of metrics defined by this package may evolve as the runtime itself
 evolves, and also enables variation across Go implementations, whose relevant
@@ -18,7 +18,7 @@
 
 Metrics are designated by a string key, rather than, for example, a field name in
 a struct. The full list of supported metrics is always available in the slice of
-Descriptions returned by All. Each Description also includes useful information
+Descriptions returned by [All]. Each [Description] also includes useful information
 about the metric.
 
 Thus, users of this API are encouraged to sample supported metrics defined by the
@@ -28,7 +28,8 @@
 users should consider this to be an exceptional and rare event, coinciding with a
 very large change in a particular Go implementation.
 
-Each metric key also has a "kind" that describes the format of the metric's value.
+Each metric key also has a "kind" (see [ValueKind]) that describes the format of the
+metric's value.
 In the interest of not breaking users of this package, the "kind" for a given metric
 is guaranteed not to change. If it must change, then a new metric will be introduced
 with a new key and a new "kind."
@@ -83,10 +84,10 @@
 		the GC. Even if only one thread is running during the pause,
 		this is computed as GOMAXPROCS times the pause latency because
 		nothing else can be executing. This is the exact sum of samples
-		in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS
-		at the time it is taken. This metric is an overestimate,
-		and not directly comparable to system CPU time measurements.
-		Compare only with other /cpu/classes metrics.
+		in /sched/pauses/total/gc:seconds if each sample is multiplied
+		by GOMAXPROCS at the time it is taken. This metric is an
+		overestimate, and not directly comparable to system CPU time
+		measurements. Compare only with other /cpu/classes metrics.
 
 	/cpu/classes/gc/total:cpu-seconds
 		Estimated total CPU time spent performing GC tasks. This metric
@@ -211,8 +212,7 @@
 		1, so a value of 0 indicates that it was never enabled.
 
 	/gc/pauses:seconds
-		Distribution of individual GC-related stop-the-world pause
-		latencies. Bucket counts increase monotonically.
+		Deprecated. Prefer the identical /sched/pauses/total/gc:seconds.
 
 	/gc/scan/globals:bytes
 		The total amount of global variable space that is scannable.
@@ -246,6 +246,10 @@
 		The number of non-default behaviors executed by the cmd/go
 		package due to a non-default GODEBUG=gocacheverify=... setting.
 
+	/godebug/non-default-behavior/gotypesalias:events
+		The number of non-default behaviors executed by the go/types
+		package due to a non-default GODEBUG=gotypesalias=... setting.
+
 	/godebug/non-default-behavior/http2client:events
 		The number of non-default behaviors executed by the net/http
 		package due to a non-default GODEBUG=http2client=... setting.
@@ -254,6 +258,15 @@
 		The number of non-default behaviors executed by the net/http
 		package due to a non-default GODEBUG=http2server=... setting.
 
+	/godebug/non-default-behavior/httplaxcontentlength:events
+		The number of non-default behaviors executed by the net/http
+		package due to a non-default GODEBUG=httplaxcontentlength=...
+		setting.
+
+	/godebug/non-default-behavior/httpmuxgo121:events
+		The number of non-default behaviors executed by the net/http
+		package due to a non-default GODEBUG=httpmuxgo121=... setting.
+
 	/godebug/non-default-behavior/installgoroot:events
 		The number of non-default behaviors executed by the go/build
 		package due to a non-default GODEBUG=installgoroot=... setting.
@@ -290,10 +303,22 @@
 		package due to a non-default GODEBUG=tarinsecurepath=...
 		setting.
 
+	/godebug/non-default-behavior/tls10server:events
+		The number of non-default behaviors executed by the crypto/tls
+		package due to a non-default GODEBUG=tls10server=... setting.
+
 	/godebug/non-default-behavior/tlsmaxrsasize:events
 		The number of non-default behaviors executed by the crypto/tls
 		package due to a non-default GODEBUG=tlsmaxrsasize=... setting.
 
+	/godebug/non-default-behavior/tlsrsakex:events
+		The number of non-default behaviors executed by the crypto/tls
+		package due to a non-default GODEBUG=tlsrsakex=... setting.
+
+	/godebug/non-default-behavior/tlsunsafeekm:events
+		The number of non-default behaviors executed by the crypto/tls
+		package due to a non-default GODEBUG=tlsunsafeekm=... setting.
+
 	/godebug/non-default-behavior/x509sha1:events
 		The number of non-default behaviors executed by the crypto/x509
 		package due to a non-default GODEBUG=x509sha1=... setting.
@@ -303,6 +328,11 @@
 		package due to a non-default GODEBUG=x509usefallbackroots=...
 		setting.
 
+	/godebug/non-default-behavior/x509usepolicies:events
+		The number of non-default behaviors executed by the crypto/x509
+		package due to a non-default GODEBUG=x509usepolicies=...
+		setting.
+
 	/godebug/non-default-behavior/zipinsecurepath:events
 		The number of non-default behaviors executed by the archive/zip
 		package due to a non-default GODEBUG=zipinsecurepath=...
@@ -390,11 +420,43 @@
 		in a runnable state before actually running. Bucket counts
 		increase monotonically.
 
+	/sched/pauses/stopping/gc:seconds
+		Distribution of individual GC-related stop-the-world stopping
+		latencies. This is the time it takes from deciding to stop the
+		world until all Ps are stopped. This is a subset of the total
+		GC-related stop-the-world time (/sched/pauses/total/gc:seconds).
+		During this time, some threads may be executing. Bucket counts
+		increase monotonically.
+
+	/sched/pauses/stopping/other:seconds
+		Distribution of individual non-GC-related stop-the-world
+		stopping latencies. This is the time it takes from deciding
+		to stop the world until all Ps are stopped. This is a
+		subset of the total non-GC-related stop-the-world time
+		(/sched/pauses/total/other:seconds). During this time, some
+		threads may be executing. Bucket counts increase monotonically.
+
+	/sched/pauses/total/gc:seconds
+		Distribution of individual GC-related stop-the-world pause
+		latencies. This is the time from deciding to stop the world
+		until the world is started again. Some of this time is spent
+		getting all threads to stop (this is measured directly in
+		/sched/pauses/stopping/gc:seconds), during which some threads
+		may still be running. Bucket counts increase monotonically.
+
+	/sched/pauses/total/other:seconds
+		Distribution of individual non-GC-related stop-the-world
+		pause latencies. This is the time from deciding to stop the
+		world until the world is started again. Some of this time
+		is spent getting all threads to stop (measured directly in
+		/sched/pauses/stopping/other:seconds). Bucket counts increase
+		monotonically.
+
 	/sync/mutex/wait/total:seconds
-		Approximate cumulative time goroutines have spent blocked
-		on a sync.Mutex or sync.RWMutex. This metric is useful for
-		identifying global changes in lock contention. Collect a mutex
-		or block profile using the runtime/pprof package for more
-		detailed contention data.
+		Approximate cumulative time goroutines have spent blocked on a
+		sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric
+		is useful for identifying global changes in lock contention.
+		Collect a mutex or block profile using the runtime/pprof package
+		for more detailed contention data.
 */
 package metrics
diff --git a/src/runtime/metrics/sample.go b/src/runtime/metrics/sample.go
index 4cf8cdf..9efc5c5 100644
--- a/src/runtime/metrics/sample.go
+++ b/src/runtime/metrics/sample.go
@@ -24,7 +24,7 @@
 // Implemented in the runtime.
 func runtime_readMetrics(unsafe.Pointer, int, int)
 
-// Read populates each Value field in the given slice of metric samples.
+// Read populates each [Value] field in the given slice of metric samples.
 //
 // Desired metrics should be present in the slice with the appropriate name.
 // The user of this API is encouraged to re-use the same slice between calls for
@@ -32,7 +32,7 @@
 //
 // Note that re-use has some caveats. Notably, Values should not be read or
 // manipulated while a Read with that value is outstanding; that is a data race.
-// This property includes pointer-typed Values (for example, Float64Histogram)
+// This property includes pointer-typed Values (for example, [Float64Histogram])
 // whose underlying storage will be reused by Read when possible. To safely use
 // such values in a concurrent setting, all data must be deep-copied.
 //
@@ -40,7 +40,7 @@
 // must share no underlying memory. When in doubt, create a new []Sample from
 // scratch, which is always safe, though may be inefficient.
 //
-// Sample values with names not appearing in All will have their Value populated
+// Sample values with names not appearing in [All] will have their Value populated
 // as KindBad to indicate that the name is unknown.
 func Read(m []Sample) {
 	runtime_readMetrics(unsafe.Pointer(&m[0]), len(m), cap(m))
diff --git a/src/runtime/metrics/value.go b/src/runtime/metrics/value.go
index ed9a33d..3059749 100644
--- a/src/runtime/metrics/value.go
+++ b/src/runtime/metrics/value.go
@@ -9,7 +9,7 @@
 	"unsafe"
 )
 
-// ValueKind is a tag for a metric Value which indicates its type.
+// ValueKind is a tag for a metric [Value] which indicates its type.
 type ValueKind int
 
 const (
diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go
index cfb09a3..d7f4133 100644
--- a/src/runtime/metrics_test.go
+++ b/src/runtime/metrics_test.go
@@ -5,13 +5,23 @@
 package runtime_test
 
 import (
+	"bytes"
+	"fmt"
+	"internal/goexperiment"
+	"internal/profile"
+	"internal/testenv"
+	"os"
 	"reflect"
 	"runtime"
 	"runtime/debug"
 	"runtime/metrics"
+	"runtime/pprof"
+	"runtime/trace"
+	"slices"
 	"sort"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"testing"
 	"time"
 	"unsafe"
@@ -37,7 +47,7 @@
 	oldLimit := debug.SetMemoryLimit(limit)
 	defer debug.SetMemoryLimit(oldLimit)
 
-	// Set an GC percent to check the metric for it
+	// Set a GC percent to check the metric for it
 	gcPercent := 99
 	oldGCPercent := debug.SetGCPercent(gcPercent)
 	defer debug.SetGCPercent(oldGCPercent)
@@ -56,7 +66,7 @@
 	}
 
 	// Check to make sure the values we read line up with other values we read.
-	var allocsBySize *metrics.Float64Histogram
+	var allocsBySize, gcPauses, schedPausesTotalGC *metrics.Float64Histogram
 	var tinyAllocs uint64
 	var mallocs, frees uint64
 	for i := range samples {
@@ -171,6 +181,10 @@
 			checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumForcedGC))
 		case "/gc/cycles/total:gc-cycles":
 			checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC))
+		case "/gc/pauses:seconds":
+			gcPauses = samples[i].Value.Float64Histogram()
+		case "/sched/pauses/total/gc:seconds":
+			schedPausesTotalGC = samples[i].Value.Float64Histogram()
 		}
 	}
 
@@ -184,6 +198,14 @@
 	// Check allocation and free counts.
 	checkUint64(t, "/gc/heap/allocs:objects", mallocs, mstats.Mallocs-tinyAllocs)
 	checkUint64(t, "/gc/heap/frees:objects", frees, mstats.Frees-tinyAllocs)
+
+	// Verify that /gc/pauses:seconds is a copy of /sched/pauses/total/gc:seconds
+	if !reflect.DeepEqual(gcPauses.Buckets, schedPausesTotalGC.Buckets) {
+		t.Errorf("/gc/pauses:seconds buckets %v do not match /sched/pauses/total/gc:seconds buckets %v", gcPauses.Buckets, schedPausesTotalGC.Counts)
+	}
+	if !reflect.DeepEqual(gcPauses.Counts, schedPausesTotalGC.Counts) {
+		t.Errorf("/gc/pauses:seconds counts %v do not match /sched/pauses/total/gc:seconds counts %v", gcPauses.Counts, schedPausesTotalGC.Counts)
+	}
 }
 
 func TestReadMetricsConsistency(t *testing.T) {
@@ -761,3 +783,510 @@
 	}
 	t.Errorf(`time.Sleep did not contribute enough to "idle" class: minimum idle time = %.5fs`, minIdleCPUSeconds)
 }
+
+// Call f() and verify that the correct STW metrics increment. If isGC is true,
+// fn triggers a GC STW. Otherwise, fn triggers an other STW.
+func testSchedPauseMetrics(t *testing.T, fn func(t *testing.T), isGC bool) {
+	m := []metrics.Sample{
+		{Name: "/sched/pauses/stopping/gc:seconds"},
+		{Name: "/sched/pauses/stopping/other:seconds"},
+		{Name: "/sched/pauses/total/gc:seconds"},
+		{Name: "/sched/pauses/total/other:seconds"},
+	}
+
+	stoppingGC := &m[0]
+	stoppingOther := &m[1]
+	totalGC := &m[2]
+	totalOther := &m[3]
+
+	sampleCount := func(s *metrics.Sample) uint64 {
+		h := s.Value.Float64Histogram()
+
+		var n uint64
+		for _, c := range h.Counts {
+			n += c
+		}
+		return n
+	}
+
+	// Read baseline.
+	metrics.Read(m)
+
+	baselineStartGC := sampleCount(stoppingGC)
+	baselineStartOther := sampleCount(stoppingOther)
+	baselineTotalGC := sampleCount(totalGC)
+	baselineTotalOther := sampleCount(totalOther)
+
+	fn(t)
+
+	metrics.Read(m)
+
+	if isGC {
+		if got := sampleCount(stoppingGC); got <= baselineStartGC {
+			t.Errorf("/sched/pauses/stopping/gc:seconds sample count %d did not increase from baseline of %d", got, baselineStartGC)
+		}
+		if got := sampleCount(totalGC); got <= baselineTotalGC {
+			t.Errorf("/sched/pauses/total/gc:seconds sample count %d did not increase from baseline of %d", got, baselineTotalGC)
+		}
+
+		if got := sampleCount(stoppingOther); got != baselineStartOther {
+			t.Errorf("/sched/pauses/stopping/other:seconds sample count %d changed from baseline of %d", got, baselineStartOther)
+		}
+		if got := sampleCount(totalOther); got != baselineTotalOther {
+			t.Errorf("/sched/pauses/stopping/other:seconds sample count %d changed from baseline of %d", got, baselineTotalOther)
+		}
+	} else {
+		if got := sampleCount(stoppingGC); got != baselineStartGC {
+			t.Errorf("/sched/pauses/stopping/gc:seconds sample count %d changed from baseline of %d", got, baselineStartGC)
+		}
+		if got := sampleCount(totalGC); got != baselineTotalGC {
+			t.Errorf("/sched/pauses/total/gc:seconds sample count %d changed from baseline of %d", got, baselineTotalGC)
+		}
+
+		if got := sampleCount(stoppingOther); got <= baselineStartOther {
+			t.Errorf("/sched/pauses/stopping/other:seconds sample count %d did not increase from baseline of %d", got, baselineStartOther)
+		}
+		if got := sampleCount(totalOther); got <= baselineTotalOther {
+			t.Errorf("/sched/pauses/stopping/other:seconds sample count %d did not increase from baseline of %d", got, baselineTotalOther)
+		}
+	}
+}
+
+func TestSchedPauseMetrics(t *testing.T) {
+	tests := []struct {
+		name string
+		isGC bool
+		fn   func(t *testing.T)
+	}{
+		{
+			name: "runtime.GC",
+			isGC: true,
+			fn: func(t *testing.T) {
+				runtime.GC()
+			},
+		},
+		{
+			name: "runtime.GOMAXPROCS",
+			fn: func(t *testing.T) {
+				if runtime.GOARCH == "wasm" {
+					t.Skip("GOMAXPROCS >1 not supported on wasm")
+				}
+
+				n := runtime.GOMAXPROCS(0)
+				defer runtime.GOMAXPROCS(n)
+
+				runtime.GOMAXPROCS(n + 1)
+			},
+		},
+		{
+			name: "runtime.GoroutineProfile",
+			fn: func(t *testing.T) {
+				var s [1]runtime.StackRecord
+				runtime.GoroutineProfile(s[:])
+			},
+		},
+		{
+			name: "runtime.ReadMemStats",
+			fn: func(t *testing.T) {
+				var mstats runtime.MemStats
+				runtime.ReadMemStats(&mstats)
+			},
+		},
+		{
+			name: "runtime.Stack",
+			fn: func(t *testing.T) {
+				var b [64]byte
+				runtime.Stack(b[:], true)
+			},
+		},
+		{
+			name: "runtime/debug.WriteHeapDump",
+			fn: func(t *testing.T) {
+				if runtime.GOOS == "js" {
+					t.Skip("WriteHeapDump not supported on js")
+				}
+
+				f, err := os.CreateTemp(t.TempDir(), "heapdumptest")
+				if err != nil {
+					t.Fatalf("os.CreateTemp failed: %v", err)
+				}
+				defer os.Remove(f.Name())
+				defer f.Close()
+				debug.WriteHeapDump(f.Fd())
+			},
+		},
+		{
+			name: "runtime/trace.Start",
+			fn: func(t *testing.T) {
+				if trace.IsEnabled() {
+					t.Skip("tracing already enabled")
+				}
+
+				var buf bytes.Buffer
+				if err := trace.Start(&buf); err != nil {
+					t.Errorf("trace.Start err got %v want nil", err)
+				}
+				trace.Stop()
+			},
+		},
+	}
+
+	// These tests count STW pauses, classified based on whether they're related
+	// to the GC or not. Disable automatic GC cycles during the test so we don't
+	// have an incidental GC pause when we're trying to observe only
+	// non-GC-related pauses. This is especially important for the
+	// runtime/trace.Start test, since (as of this writing) that will block
+	// until any active GC mark phase completes.
+	defer debug.SetGCPercent(debug.SetGCPercent(-1))
+	runtime.GC()
+
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			testSchedPauseMetrics(t, tc.fn, tc.isGC)
+		})
+	}
+}
+
+func TestRuntimeLockMetricsAndProfile(t *testing.T) {
+	testenv.SkipFlaky(t, 64253)
+
+	old := runtime.SetMutexProfileFraction(0) // enabled during sub-tests
+	defer runtime.SetMutexProfileFraction(old)
+	if old != 0 {
+		t.Fatalf("need MutexProfileRate 0, got %d", old)
+	}
+
+	{
+		before := os.Getenv("GODEBUG")
+		for _, s := range strings.Split(before, ",") {
+			if strings.HasPrefix(s, "runtimecontentionstacks=") {
+				t.Logf("GODEBUG includes explicit setting %q", s)
+			}
+		}
+		defer func() { os.Setenv("GODEBUG", before) }()
+		os.Setenv("GODEBUG", fmt.Sprintf("%s,runtimecontentionstacks=1", before))
+	}
+
+	t.Logf("NumCPU %d", runtime.NumCPU())
+	t.Logf("GOMAXPROCS %d", runtime.GOMAXPROCS(0))
+	if minCPU := 2; runtime.NumCPU() < minCPU {
+		t.Skipf("creating and observing contention on runtime-internal locks requires NumCPU >= %d", minCPU)
+	}
+
+	loadProfile := func(t *testing.T) *profile.Profile {
+		var w bytes.Buffer
+		pprof.Lookup("mutex").WriteTo(&w, 0)
+		p, err := profile.Parse(&w)
+		if err != nil {
+			t.Fatalf("failed to parse profile: %v", err)
+		}
+		if err := p.CheckValid(); err != nil {
+			t.Fatalf("invalid profile: %v", err)
+		}
+		return p
+	}
+
+	measureDelta := func(t *testing.T, fn func()) (metricGrowth, profileGrowth float64, p *profile.Profile) {
+		beforeProfile := loadProfile(t)
+		beforeMetrics := []metrics.Sample{{Name: "/sync/mutex/wait/total:seconds"}}
+		metrics.Read(beforeMetrics)
+
+		fn()
+
+		afterProfile := loadProfile(t)
+		afterMetrics := []metrics.Sample{{Name: "/sync/mutex/wait/total:seconds"}}
+		metrics.Read(afterMetrics)
+
+		sumSamples := func(p *profile.Profile, i int) int64 {
+			var sum int64
+			for _, s := range p.Sample {
+				sum += s.Value[i]
+			}
+			return sum
+		}
+
+		metricGrowth = afterMetrics[0].Value.Float64() - beforeMetrics[0].Value.Float64()
+		profileGrowth = float64(sumSamples(afterProfile, 1)-sumSamples(beforeProfile, 1)) * time.Nanosecond.Seconds()
+
+		// The internal/profile package does not support compaction; this delta
+		// profile will include separate positive and negative entries.
+		p = afterProfile.Copy()
+		if len(beforeProfile.Sample) > 0 {
+			err := p.Merge(beforeProfile, -1)
+			if err != nil {
+				t.Fatalf("Merge profiles: %v", err)
+			}
+		}
+
+		return metricGrowth, profileGrowth, p
+	}
+
+	testcase := func(strictTiming bool, acceptStacks [][]string, workers int, fn func() bool) func(t *testing.T) (metricGrowth, profileGrowth float64, n, value int64) {
+		return func(t *testing.T) (metricGrowth, profileGrowth float64, n, value int64) {
+			metricGrowth, profileGrowth, p := measureDelta(t, func() {
+				var started, stopped sync.WaitGroup
+				started.Add(workers)
+				stopped.Add(workers)
+				for i := 0; i < workers; i++ {
+					w := &contentionWorker{
+						before: func() {
+							started.Done()
+							started.Wait()
+						},
+						after: func() {
+							stopped.Done()
+						},
+						fn: fn,
+					}
+					go w.run()
+				}
+				stopped.Wait()
+			})
+
+			if profileGrowth == 0 {
+				t.Errorf("no increase in mutex profile")
+			}
+			if metricGrowth == 0 && strictTiming {
+				// If the critical section is very short, systems with low timer
+				// resolution may be unable to measure it via nanotime.
+				t.Errorf("no increase in /sync/mutex/wait/total:seconds metric")
+			}
+			// This comparison is possible because the time measurements in support of
+			// runtime/pprof and runtime/metrics for runtime-internal locks are so close
+			// together. It doesn't work as well for user-space contention, where the
+			// involved goroutines are not _Grunnable the whole time and so need to pass
+			// through the scheduler.
+			t.Logf("lock contention growth in runtime/pprof's view  (%fs)", profileGrowth)
+			t.Logf("lock contention growth in runtime/metrics' view (%fs)", metricGrowth)
+
+			acceptStacks = append([][]string(nil), acceptStacks...)
+			for i, stk := range acceptStacks {
+				if goexperiment.StaticLockRanking {
+					if !slices.ContainsFunc(stk, func(s string) bool {
+						return s == "runtime.systemstack" || s == "runtime.mcall" || s == "runtime.mstart"
+					}) {
+						// stk is a call stack that is still on the user stack when
+						// it calls runtime.unlock. Add the extra function that
+						// we'll see, when the static lock ranking implementation of
+						// runtime.unlockWithRank switches to the system stack.
+						stk = append([]string{"runtime.unlockWithRank"}, stk...)
+					}
+				}
+				acceptStacks[i] = stk
+			}
+
+			var stks [][]string
+			values := make([][2]int64, len(acceptStacks))
+			for _, s := range p.Sample {
+				var have []string
+				for _, loc := range s.Location {
+					for _, line := range loc.Line {
+						have = append(have, line.Function.Name)
+					}
+				}
+				stks = append(stks, have)
+				for i, stk := range acceptStacks {
+					if slices.Equal(have, stk) {
+						values[i][0] += s.Value[0]
+						values[i][1] += s.Value[1]
+					}
+				}
+			}
+			for i, stk := range acceptStacks {
+				n += values[i][0]
+				value += values[i][1]
+				t.Logf("stack %v has samples totaling n=%d value=%d", stk, values[i][0], values[i][1])
+			}
+			if n == 0 && value == 0 {
+				t.Logf("profile:\n%s", p)
+				for _, have := range stks {
+					t.Logf("have stack %v", have)
+				}
+				for _, stk := range acceptStacks {
+					t.Errorf("want stack %v", stk)
+				}
+			}
+
+			return metricGrowth, profileGrowth, n, value
+		}
+	}
+
+	name := t.Name()
+
+	t.Run("runtime.lock", func(t *testing.T) {
+		mus := make([]runtime.Mutex, 100)
+		var needContention atomic.Int64
+		delay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks
+		delayMicros := delay.Microseconds()
+
+		// The goroutine that acquires the lock will only proceed when it
+		// detects that its partner is contended for the lock. That will lead to
+		// live-lock if anything (such as a STW) prevents the partner goroutine
+		// from running. Allowing the contention workers to pause and restart
+		// (to allow a STW to proceed) makes it harder to confirm that we're
+		// counting the correct number of contention events, since some locks
+		// will end up contended twice. Instead, disable the GC.
+		defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+		const workers = 2
+		if runtime.GOMAXPROCS(0) < workers {
+			t.Skipf("contention on runtime-internal locks requires GOMAXPROCS >= %d", workers)
+		}
+
+		fn := func() bool {
+			n := int(needContention.Load())
+			if n < 0 {
+				return false
+			}
+			mu := &mus[n]
+
+			runtime.Lock(mu)
+			for int(needContention.Load()) == n {
+				if runtime.MutexContended(mu) {
+					// make them wait a little while
+					for start := runtime.Nanotime(); (runtime.Nanotime()-start)/1000 < delayMicros; {
+						runtime.Usleep(uint32(delayMicros))
+					}
+					break
+				}
+			}
+			runtime.Unlock(mu)
+			needContention.Store(int64(n - 1))
+
+			return true
+		}
+
+		stks := [][]string{{
+			"runtime.unlock",
+			"runtime_test." + name + ".func5.1",
+			"runtime_test.(*contentionWorker).run",
+		}}
+
+		t.Run("sample-1", func(t *testing.T) {
+			old := runtime.SetMutexProfileFraction(1)
+			defer runtime.SetMutexProfileFraction(old)
+
+			needContention.Store(int64(len(mus) - 1))
+			metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t)
+
+			if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); have < want {
+				// The test imposes a delay with usleep, verified with calls to
+				// nanotime. Compare against the runtime/metrics package's view
+				// (based on nanotime) rather than runtime/pprof's view (based
+				// on cputicks).
+				t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
+			}
+			if have, want := n, int64(len(mus)); have != want {
+				t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want)
+			}
+
+			const slop = 1.5 // account for nanotime vs cputicks
+			if profileGrowth > slop*metricGrowth || metricGrowth > slop*profileGrowth {
+				t.Errorf("views differ by more than %fx", slop)
+			}
+		})
+
+		t.Run("sample-2", func(t *testing.T) {
+			old := runtime.SetMutexProfileFraction(2)
+			defer runtime.SetMutexProfileFraction(old)
+
+			needContention.Store(int64(len(mus) - 1))
+			metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t)
+
+			// With 100 trials and profile fraction of 2, we expect to capture
+			// 50 samples. Allow the test to pass if we get at least 20 samples;
+			// the CDF of the binomial distribution says there's less than a
+			// 1e-9 chance of that, which is an acceptably low flakiness rate.
+			const samplingSlop = 2.5
+
+			if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); samplingSlop*have < want {
+				// The test imposes a delay with usleep, verified with calls to
+				// nanotime. Compare against the runtime/metrics package's view
+				// (based on nanotime) rather than runtime/pprof's view (based
+				// on cputicks).
+				t.Errorf("runtime/metrics reported less than the known minimum contention duration (%f * %fs < %fs)", samplingSlop, have, want)
+			}
+			if have, want := n, int64(len(mus)); float64(have) > float64(want)*samplingSlop || float64(want) > float64(have)*samplingSlop {
+				t.Errorf("mutex profile reported contention count too different from the expected count (%d far from %d)", have, want)
+			}
+
+			const timerSlop = 1.5 * samplingSlop // account for nanotime vs cputicks, plus the two views' independent sampling
+			if profileGrowth > timerSlop*metricGrowth || metricGrowth > timerSlop*profileGrowth {
+				t.Errorf("views differ by more than %fx", timerSlop)
+			}
+		})
+	})
+
+	t.Run("runtime.semrelease", func(t *testing.T) {
+		old := runtime.SetMutexProfileFraction(1)
+		defer runtime.SetMutexProfileFraction(old)
+
+		const workers = 3
+		if runtime.GOMAXPROCS(0) < workers {
+			t.Skipf("creating and observing contention on runtime-internal semaphores requires GOMAXPROCS >= %d", workers)
+		}
+
+		var sem uint32 = 1
+		var tries atomic.Int32
+		tries.Store(10_000_000) // prefer controlled failure to timeout
+		var sawContention atomic.Int32
+		var need int32 = 1
+		fn := func() bool {
+			if sawContention.Load() >= need {
+				return false
+			}
+			if tries.Add(-1) < 0 {
+				return false
+			}
+
+			runtime.Semacquire(&sem)
+			runtime.Semrelease1(&sem, false, 0)
+			if runtime.MutexContended(runtime.SemRootLock(&sem)) {
+				sawContention.Add(1)
+			}
+			return true
+		}
+
+		stks := [][]string{
+			{
+				"runtime.unlock",
+				"runtime.semrelease1",
+				"runtime_test.TestRuntimeLockMetricsAndProfile.func6.1",
+				"runtime_test.(*contentionWorker).run",
+			},
+			{
+				"runtime.unlock",
+				"runtime.semacquire1",
+				"runtime.semacquire",
+				"runtime_test.TestRuntimeLockMetricsAndProfile.func6.1",
+				"runtime_test.(*contentionWorker).run",
+			},
+		}
+
+		// Verify that we get call stack we expect, with anything more than zero
+		// cycles / zero samples. The duration of each contention event is too
+		// small relative to the expected overhead for us to verify its value
+		// more directly. Leave that to the explicit lock/unlock test.
+
+		testcase(false, stks, workers, fn)(t)
+
+		if remaining := tries.Load(); remaining >= 0 {
+			t.Logf("finished test early (%d tries remaining)", remaining)
+		}
+	})
+}
+
+// contentionWorker provides cleaner call stacks for lock contention profile tests
+type contentionWorker struct {
+	before func()
+	fn     func() bool
+	after  func()
+}
+
+func (w *contentionWorker) run() {
+	defer w.after()
+	w.before()
+
+	for w.fn() {
+	}
+}
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index 650db18..7d9d547 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -9,6 +9,7 @@
 import (
 	"internal/abi"
 	"internal/goarch"
+	"internal/goexperiment"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
@@ -299,6 +300,27 @@
 	return false
 }
 
+// blockUntilEmptyFinalizerQueue blocks until either the finalizer
+// queue is emptied (and the finalizers have executed) or the timeout
+// is reached. Returns true if the finalizer queue was emptied.
+// This is used by the runtime and sync tests.
+func blockUntilEmptyFinalizerQueue(timeout int64) bool {
+	start := nanotime()
+	for nanotime()-start < timeout {
+		lock(&finlock)
+		// We know the queue has been drained when both finq is nil
+		// and the finalizer g has stopped executing.
+		empty := finq == nil
+		empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
+		unlock(&finlock)
+		if empty {
+			return true
+		}
+		Gosched()
+	}
+	return false
+}
+
 // SetFinalizer sets the finalizer associated with obj to the provided
 // finalizer function. When the garbage collector finds an unreachable block
 // with an associated finalizer, it clears the association and runs
@@ -330,11 +352,11 @@
 // There is no guarantee that finalizers will run before a program exits,
 // so typically they are useful only for releasing non-memory resources
 // associated with an object during a long-running program.
-// For example, an os.File object could use a finalizer to close the
+// For example, an [os.File] object could use a finalizer to close the
 // associated operating system file descriptor when a program discards
 // an os.File without calling Close, but it would be a mistake
 // to depend on a finalizer to flush an in-memory I/O buffer such as a
-// bufio.Writer, because the buffer would not be flushed at program exit.
+// [bufio.Writer], because the buffer would not be flushed at program exit.
 //
 // It is not guaranteed that a finalizer will run if the size of *obj is
 // zero bytes, because it may share same address with other zero-size
@@ -357,14 +379,14 @@
 // the object is reachable until it is no longer required.
 // Objects stored in global variables, or that can be found by tracing
 // pointers from a global variable, are reachable. For other objects,
-// pass the object to a call of the KeepAlive function to mark the
+// pass the object to a call of the [KeepAlive] function to mark the
 // last point in the function where the object must be reachable.
 //
 // For example, if p points to a struct, such as os.File, that contains
 // a file descriptor d, and p has a finalizer that closes that file
 // descriptor, and if the last use of p in a function is a call to
 // syscall.Write(p.d, buf, size), then p may be unreachable as soon as
-// the program enters syscall.Write. The finalizer may run at that moment,
+// the program enters [syscall.Write]. The finalizer may run at that moment,
 // closing p.d, causing syscall.Write to fail because it is writing to
 // a closed file descriptor (or, worse, to an entirely different
 // file descriptor opened by a different goroutine). To avoid this problem,
@@ -410,7 +432,7 @@
 	}
 
 	// find the containing object
-	base, _, _ := findObject(uintptr(e.data), 0, 0)
+	base, span, _ := findObject(uintptr(e.data), 0, 0)
 
 	if base == 0 {
 		if isGoPointerWithoutSpan(e.data) {
@@ -419,6 +441,11 @@
 		throw("runtime.SetFinalizer: pointer not in allocated block")
 	}
 
+	// Move base forward if we've got an allocation header.
+	if goexperiment.AllocHeaders && !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 {
+		base += mallocHeaderSize
+	}
+
 	if uintptr(e.data) != base {
 		// As an implementation detail we allow to set finalizers for an inner byte
 		// of an object if it could come from tiny alloc (see mallocgc for details).
@@ -464,7 +491,7 @@
 			// ok - satisfies empty interface
 			goto okarg
 		}
-		if iface := assertE2I2(ityp, *efaceOf(&obj)); iface.tab != nil {
+		if itab := assertE2I2(ityp, efaceOf(&obj)._type); itab != nil {
 			goto okarg
 		}
 	}
@@ -473,7 +500,7 @@
 	// compute size needed for return parameters
 	nret := uintptr(0)
 	for _, t := range ft.OutSlice() {
-		nret = alignUp(nret, uintptr(t.Align_)) + uintptr(t.Size_)
+		nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
 	}
 	nret = alignUp(nret, goarch.PtrSize)
 
@@ -509,11 +536,11 @@
 //	// No more uses of p after this point.
 //
 // Without the KeepAlive call, the finalizer could run at the start of
-// syscall.Read, closing the file descriptor before syscall.Read makes
+// [syscall.Read], closing the file descriptor before syscall.Read makes
 // the actual system call.
 //
 // Note: KeepAlive should only be used to prevent finalizers from
-// running prematurely. In particular, when used with unsafe.Pointer,
+// running prematurely. In particular, when used with [unsafe.Pointer],
 // the rules for valid uses of unsafe.Pointer still apply.
 func KeepAlive(x any) {
 	// Introduce a use of x that the compiler can't eliminate.
diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go
index 1a249e5..7760ada 100644
--- a/src/runtime/mfixalloc.go
+++ b/src/runtime/mfixalloc.go
@@ -57,9 +57,7 @@
 	if size > _FixAllocChunk {
 		throw("runtime: fixalloc size too large")
 	}
-	if min := unsafe.Sizeof(mlink{}); size < min {
-		size = min
-	}
+	size = max(size, unsafe.Sizeof(mlink{}))
 
 	f.size = size
 	f.first = first
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index de5ae0a..6c51517 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -135,9 +135,12 @@
 )
 
 const (
-	_DebugGC         = 0
-	_ConcurrentSweep = true
-	_FinBlockSize    = 4 * 1024
+	_DebugGC      = 0
+	_FinBlockSize = 4 * 1024
+
+	// concurrentSweep is a debug flag. Disabling this flag
+	// ensures all spans are swept while the world is stopped.
+	concurrentSweep = true
 
 	// debugScanConservative enables debug logging for stack
 	// frames that are scanned conservatively.
@@ -215,7 +218,6 @@
 var writeBarrier struct {
 	enabled bool    // compiler emits a check of this before calling write barrier
 	pad     [3]byte // compiler uses 32-bit load for "enabled" field
-	needed  bool    // identical to enabled, for now (TODO: dedup)
 	alignme uint64  // guarantee alignment so that compiler can use a 32 or 64-bit load
 }
 
@@ -233,8 +235,7 @@
 //go:nosplit
 func setGCPhase(x uint32) {
 	atomic.Store(&gcphase, x)
-	writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
-	writeBarrier.enabled = writeBarrier.needed
+	writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination
 }
 
 // gcMarkWorkerMode represents the mode that a concurrent mark worker
@@ -417,8 +418,7 @@
 	stwprocs, maxprocs                 int32
 	tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
 
-	pauseNS    int64 // total STW time this cycle
-	pauseStart int64 // nanotime() of last STW
+	pauseNS int64 // total STW time this cycle
 
 	// debug.gctrace heap sizes for this cycle.
 	heap0, heap1, heap2 uint64
@@ -473,7 +473,6 @@
 	// as part of tests and benchmarks to get the system into a
 	// relatively stable and isolated state.
 	for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
-		sweep.nbgsweep++
 		Gosched()
 	}
 
@@ -572,10 +571,6 @@
 	}
 	switch t.kind {
 	case gcTriggerHeap:
-		// Non-atomic access to gcController.heapLive for performance. If
-		// we are going to trigger on this, this thread just
-		// atomically wrote gcController.heapLive anyway and we'll see our
-		// own write.
 		trigger, _ := gcController.trigger()
 		return gcController.heapLive.Load() >= trigger
 	case gcTriggerTime:
@@ -621,7 +616,6 @@
 	// We check the transition condition continuously here in case
 	// this G gets delayed in to the next GC cycle.
 	for trigger.test() && sweepone() != ^uintptr(0) {
-		sweep.nbgsweep++
 	}
 
 	// Perform GC initialization and the sweep termination
@@ -652,8 +646,10 @@
 	// Update it under gcsema to avoid gctrace getting wrong values.
 	work.userForced = trigger.kind == gcTriggerCycle
 
-	if traceEnabled() {
-		traceGCStart()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GCStart()
+		traceRelease(trace)
 	}
 
 	// Check that all Ps have finished deferred mcache flushes.
@@ -680,14 +676,16 @@
 
 	now := nanotime()
 	work.tSweepTerm = now
-	work.pauseStart = now
-	systemstack(func() { stopTheWorldWithSema(stwGCSweepTerm) })
+	var stw worldStop
+	systemstack(func() {
+		stw = stopTheWorldWithSema(stwGCSweepTerm)
+	})
 	// Finish sweep before we start concurrent scan.
 	systemstack(func() {
 		finishsweep_m()
 	})
 
-	// clearpools before we start the GC. If we wait they memory will not be
+	// clearpools before we start the GC. If we wait the memory will not be
 	// reclaimed until the next GC cycle.
 	clearpools()
 
@@ -719,11 +717,11 @@
 	// enabled because they must be enabled before
 	// any non-leaf heap objects are marked. Since
 	// allocations are blocked until assists can
-	// happen, we want enable assists as early as
+	// happen, we want to enable assists as early as
 	// possible.
 	setGCPhase(_GCmark)
 
-	gcBgMarkPrepare() // Must happen before assist enable.
+	gcBgMarkPrepare() // Must happen before assists are enabled.
 	gcMarkRootPrepare()
 
 	// Mark all active tinyalloc blocks. Since we're
@@ -746,10 +744,9 @@
 
 	// Concurrent mark.
 	systemstack(func() {
-		now = startTheWorldWithSema()
-		work.pauseNS += now - work.pauseStart
+		now = startTheWorldWithSema(0, stw)
+		work.pauseNS += now - stw.start
 		work.tMark = now
-		memstats.gcPauseDist.record(now - work.pauseStart)
 
 		sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
 		work.cpuStats.gcPauseTime += sweepTermCpu
@@ -827,31 +824,22 @@
 
 	// Flush all local buffers and collect flushedWork flags.
 	gcMarkDoneFlushed = 0
-	systemstack(func() {
-		gp := getg().m.curg
-		// Mark the user stack as preemptible so that it may be scanned.
-		// Otherwise, our attempt to force all P's to a safepoint could
-		// result in a deadlock as we attempt to preempt a worker that's
-		// trying to preempt us (e.g. for a stack scan).
-		casGToWaiting(gp, _Grunning, waitReasonGCMarkTermination)
-		forEachP(func(pp *p) {
-			// Flush the write barrier buffer, since this may add
-			// work to the gcWork.
-			wbBufFlush1(pp)
+	forEachP(waitReasonGCMarkTermination, func(pp *p) {
+		// Flush the write barrier buffer, since this may add
+		// work to the gcWork.
+		wbBufFlush1(pp)
 
-			// Flush the gcWork, since this may create global work
-			// and set the flushedWork flag.
-			//
-			// TODO(austin): Break up these workbufs to
-			// better distribute work.
-			pp.gcw.dispose()
-			// Collect the flushedWork flag.
-			if pp.gcw.flushedWork {
-				atomic.Xadd(&gcMarkDoneFlushed, 1)
-				pp.gcw.flushedWork = false
-			}
-		})
-		casgstatus(gp, _Gwaiting, _Grunning)
+		// Flush the gcWork, since this may create global work
+		// and set the flushedWork flag.
+		//
+		// TODO(austin): Break up these workbufs to
+		// better distribute work.
+		pp.gcw.dispose()
+		// Collect the flushedWork flag.
+		if pp.gcw.flushedWork {
+			atomic.Xadd(&gcMarkDoneFlushed, 1)
+			pp.gcw.flushedWork = false
+		}
 	})
 
 	if gcMarkDoneFlushed != 0 {
@@ -870,9 +858,11 @@
 	// shaded. Transition to mark termination.
 	now := nanotime()
 	work.tMarkTerm = now
-	work.pauseStart = now
 	getg().m.preemptoff = "gcing"
-	systemstack(func() { stopTheWorldWithSema(stwGCMarkTerm) })
+	var stw worldStop
+	systemstack(func() {
+		stw = stopTheWorldWithSema(stwGCMarkTerm)
+	})
 	// The gcphase is _GCmark, it will transition to _GCmarktermination
 	// below. The important thing is that the wb remains active until
 	// all marking is complete. This includes writes made by the GC.
@@ -899,9 +889,8 @@
 	if restart {
 		getg().m.preemptoff = ""
 		systemstack(func() {
-			now := startTheWorldWithSema()
-			work.pauseNS += now - work.pauseStart
-			memstats.gcPauseDist.record(now - work.pauseStart)
+			now := startTheWorldWithSema(0, stw)
+			work.pauseNS += now - stw.start
 		})
 		semrelease(&worldsema)
 		goto top
@@ -935,12 +924,12 @@
 	gcController.endCycle(now, int(gomaxprocs), work.userForced)
 
 	// Perform mark termination. This will restart the world.
-	gcMarkTermination()
+	gcMarkTermination(stw)
 }
 
 // World must be stopped and mark assists and background workers must be
 // disabled.
-func gcMarkTermination() {
+func gcMarkTermination(stw worldStop) {
 	// Start marktermination (write barrier remains enabled for now).
 	setGCPhase(_GCmarktermination)
 
@@ -951,6 +940,9 @@
 	mp.preemptoff = "gcing"
 	mp.traceback = 2
 	curgp := mp.curg
+	// N.B. The execution tracer is not aware of this status
+	// transition and handles it specially based on the
+	// wait reason.
 	casGToWaiting(curgp, _Grunning, waitReasonGarbageCollection)
 
 	// Run gc on the g0 stack. We do this so that the g stack
@@ -969,6 +961,7 @@
 		// before continuing.
 	})
 
+	var stwSwept bool
 	systemstack(func() {
 		work.heap2 = work.bytesMarked
 		if debug.gccheckmark > 0 {
@@ -987,14 +980,16 @@
 
 		// marking is complete so we can turn the write barrier off
 		setGCPhase(_GCoff)
-		gcSweep(work.mode)
+		stwSwept = gcSweep(work.mode)
 	})
 
 	mp.traceback = 0
 	casgstatus(curgp, _Gwaiting, _Grunning)
 
-	if traceEnabled() {
-		traceGCDone()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GCDone()
+		traceRelease(trace)
 	}
 
 	// all done
@@ -1015,9 +1010,8 @@
 	now := nanotime()
 	sec, nsec, _ := time_now()
 	unixNow := sec*1e9 + int64(nsec)
-	work.pauseNS += now - work.pauseStart
+	work.pauseNS += now - stw.start
 	work.tEnd = now
-	memstats.gcPauseDist.record(now - work.pauseStart)
 	atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
 	atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
 	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
@@ -1047,10 +1041,6 @@
 	// Reset idle time stat.
 	sched.idleTime.Store(0)
 
-	// Reset sweep state.
-	sweep.nbgsweep = 0
-	sweep.npausesweep = 0
-
 	if work.userForced {
 		memstats.numforcedgc++
 	}
@@ -1079,12 +1069,33 @@
 	// Those aren't tracked in any sweep lists, so we need to
 	// count them against sweep completion until we ensure all
 	// those spans have been forced out.
+	//
+	// If gcSweep fully swept the heap (for example if the sweep
+	// is not concurrent due to a GODEBUG setting), then we expect
+	// the sweepLocker to be invalid, since sweeping is done.
+	//
+	// N.B. Below we might duplicate some work from gcSweep; this is
+	// fine as all that work is idempotent within a GC cycle, and
+	// we're still holding worldsema so a new cycle can't start.
 	sl := sweep.active.begin()
-	if !sl.valid {
+	if !stwSwept && !sl.valid {
 		throw("failed to set sweep barrier")
+	} else if stwSwept && sl.valid {
+		throw("non-concurrent sweep failed to drain all sweep queues")
 	}
 
-	systemstack(func() { startTheWorldWithSema() })
+	systemstack(func() {
+		// The memstats updated above must be updated with the world
+		// stopped to ensure consistency of some values, such as
+		// sched.idleTime and sched.totaltime. memstats also include
+		// the pause time (work,pauseNS), forcing computation of the
+		// total pause time before the pause actually ends.
+		//
+		// Here we reuse the same now for start the world so that the
+		// time added to /sched/pauses/total/gc:seconds will be
+		// consistent with the value in memstats.
+		startTheWorldWithSema(now, stw)
+	})
 
 	// Flush the heap profile so we can start a new cycle next GC.
 	// This is relatively expensive, so we don't do it with the
@@ -1110,22 +1121,26 @@
 	//
 	// Also, flush the pinner cache, to avoid leaking that memory
 	// indefinitely.
-	systemstack(func() {
-		forEachP(func(pp *p) {
-			pp.mcache.prepareForSweep()
-			if pp.status == _Pidle {
-				systemstack(func() {
-					lock(&mheap_.lock)
-					pp.pcache.flush(&mheap_.pages)
-					unlock(&mheap_.lock)
-				})
-			}
-			pp.pinnerCache = nil
-		})
+	forEachP(waitReasonFlushProcCaches, func(pp *p) {
+		pp.mcache.prepareForSweep()
+		if pp.status == _Pidle {
+			systemstack(func() {
+				lock(&mheap_.lock)
+				pp.pcache.flush(&mheap_.pages)
+				unlock(&mheap_.lock)
+			})
+		}
+		pp.pinnerCache = nil
 	})
-	// Now that we've swept stale spans in mcaches, they don't
-	// count against unswept spans.
-	sweep.active.end(sl)
+	if sl.valid {
+		// Now that we've swept stale spans in mcaches, they don't
+		// count against unswept spans.
+		//
+		// Note: this sweepLocker may not be valid if sweeping had
+		// already completed during the STW. See the corresponding
+		// begin() call that produced sl.
+		sweep.active.end(sl)
+	}
 
 	// Print gctrace before dropping worldsema. As soon as we drop
 	// worldsema another cycle could start and smash the stats
@@ -1186,7 +1201,9 @@
 
 	// Enable huge pages on some metadata if we cross a heap threshold.
 	if gcController.heapGoal() > minHeapForMetadataHugePages {
-		mheap_.enableMetadataHugePages()
+		systemstack(func() {
+			mheap_.enableMetadataHugePages()
+		})
 	}
 
 	semrelease(&worldsema)
@@ -1358,12 +1375,16 @@
 			// the G stack. However, stack shrinking is
 			// disabled for mark workers, so it is safe to
 			// read from the G stack.
+			//
+			// N.B. The execution tracer is not aware of this status
+			// transition and handles it specially based on the
+			// wait reason.
 			casGToWaiting(gp, _Grunning, waitReasonGCWorkerActive)
 			switch pp.gcMarkWorkerMode {
 			default:
 				throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
 			case gcMarkWorkerDedicatedMode:
-				gcDrain(&pp.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
+				gcDrainMarkWorkerDedicated(&pp.gcw, true)
 				if gp.preempt {
 					// We were preempted. This is
 					// a useful signal to kick
@@ -1378,11 +1399,11 @@
 				}
 				// Go back to draining, this time
 				// without preemption.
-				gcDrain(&pp.gcw, gcDrainFlushBgCredit)
+				gcDrainMarkWorkerDedicated(&pp.gcw, false)
 			case gcMarkWorkerFractionalMode:
-				gcDrain(&pp.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+				gcDrainMarkWorkerFractional(&pp.gcw)
 			case gcMarkWorkerIdleMode:
-				gcDrain(&pp.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+				gcDrainMarkWorkerIdle(&pp.gcw)
 			}
 			casgstatus(gp, _Gwaiting, _Grunning)
 		})
@@ -1538,10 +1559,12 @@
 // gcSweep must be called on the system stack because it acquires the heap
 // lock. See mheap for details.
 //
+// Returns true if the heap was fully swept by this function.
+//
 // The world must be stopped.
 //
 //go:systemstack
-func gcSweep(mode gcMode) {
+func gcSweep(mode gcMode) bool {
 	assertWorldStopped()
 
 	if gcphase != _GCoff {
@@ -1559,15 +1582,18 @@
 
 	sweep.centralIndex.clear()
 
-	if !_ConcurrentSweep || mode == gcForceBlockMode {
+	if !concurrentSweep || mode == gcForceBlockMode {
 		// Special case synchronous sweep.
 		// Record that no proportional sweeping has to happen.
 		lock(&mheap_.lock)
 		mheap_.sweepPagesPerByte = 0
 		unlock(&mheap_.lock)
+		// Flush all mcaches.
+		for _, pp := range allp {
+			pp.mcache.prepareForSweep()
+		}
 		// Sweep all spans eagerly.
 		for sweepone() != ^uintptr(0) {
-			sweep.npausesweep++
 		}
 		// Free workbufs eagerly.
 		prepareFreeWorkbufs()
@@ -1578,7 +1604,7 @@
 		// available immediately.
 		mProf_NextCycle()
 		mProf_Flush()
-		return
+		return true
 	}
 
 	// Background sweep.
@@ -1588,6 +1614,7 @@
 		ready(sweep.g, 0, true)
 	}
 	unlock(&sweep.lock)
+	return false
 }
 
 // gcResetMarkState resets global state prior to marking (concurrent
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 2ed411a..b515568 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -9,6 +9,7 @@
 import (
 	"internal/abi"
 	"internal/goarch"
+	"internal/goexperiment"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
@@ -413,13 +414,48 @@
 		return
 	}
 
-	traced := false
+	// This extremely verbose boolean indicates whether we've
+	// entered mark assist from the perspective of the tracer.
+	//
+	// In the old tracer, this is just before we call gcAssistAlloc1
+	// *and* tracing is enabled. Because the old tracer doesn't
+	// do any extra tracking, we need to be careful to not emit an
+	// "end" event if there was no corresponding "begin" for the
+	// mark assist.
+	//
+	// In the new tracer, this is just before we call gcAssistAlloc1
+	// *regardless* of whether tracing is enabled. This is because
+	// the new tracer allows for tracing to begin (and advance
+	// generations) in the middle of a GC mark phase, so we need to
+	// record some state so that the tracer can pick it up to ensure
+	// a consistent trace result.
+	//
+	// TODO(mknyszek): Hide the details of inMarkAssist in tracer
+	// functions and simplify all the state tracking. This is a lot.
+	enteredMarkAssistForTracing := false
 retry:
 	if gcCPULimiter.limiting() {
 		// If the CPU limiter is enabled, intentionally don't
 		// assist to reduce the amount of CPU time spent in the GC.
-		if traced {
-			traceGCMarkAssistDone()
+		if enteredMarkAssistForTracing {
+			trace := traceAcquire()
+			if trace.ok() {
+				trace.GCMarkAssistDone()
+				// Set this *after* we trace the end to make sure
+				// that we emit an in-progress event if this is
+				// the first event for the goroutine in the trace
+				// or trace generation. Also, do this between
+				// acquire/release because this is part of the
+				// goroutine's trace state, and it must be atomic
+				// with respect to the tracer.
+				gp.inMarkAssist = false
+				traceRelease(trace)
+			} else {
+				// This state is tracked even if tracing isn't enabled.
+				// It's only used by the new tracer.
+				// See the comment on enteredMarkAssistForTracing.
+				gp.inMarkAssist = false
+			}
 		}
 		return
 	}
@@ -459,16 +495,60 @@
 		if scanWork == 0 {
 			// We were able to steal all of the credit we
 			// needed.
-			if traced {
-				traceGCMarkAssistDone()
+			if enteredMarkAssistForTracing {
+				trace := traceAcquire()
+				if trace.ok() {
+					trace.GCMarkAssistDone()
+					// Set this *after* we trace the end to make sure
+					// that we emit an in-progress event if this is
+					// the first event for the goroutine in the trace
+					// or trace generation. Also, do this between
+					// acquire/release because this is part of the
+					// goroutine's trace state, and it must be atomic
+					// with respect to the tracer.
+					gp.inMarkAssist = false
+					traceRelease(trace)
+				} else {
+					// This state is tracked even if tracing isn't enabled.
+					// It's only used by the new tracer.
+					// See the comment on enteredMarkAssistForTracing.
+					gp.inMarkAssist = false
+				}
 			}
 			return
 		}
 	}
-
-	if traceEnabled() && !traced {
-		traced = true
-		traceGCMarkAssistStart()
+	if !enteredMarkAssistForTracing {
+		trace := traceAcquire()
+		if trace.ok() {
+			if !goexperiment.ExecTracer2 {
+				// In the old tracer, enter mark assist tracing only
+				// if we actually traced an event. Otherwise a goroutine
+				// waking up from mark assist post-GC might end up
+				// writing a stray "end" event.
+				//
+				// This means inMarkAssist will not be meaningful
+				// in the old tracer; that's OK, it's unused.
+				//
+				// See the comment on enteredMarkAssistForTracing.
+				enteredMarkAssistForTracing = true
+			}
+			trace.GCMarkAssistStart()
+			// Set this *after* we trace the start, otherwise we may
+			// emit an in-progress event for an assist we're about to start.
+			gp.inMarkAssist = true
+			traceRelease(trace)
+		} else {
+			gp.inMarkAssist = true
+		}
+		if goexperiment.ExecTracer2 {
+			// In the new tracer, set enter mark assist tracing if we
+			// ever pass this point, because we must manage inMarkAssist
+			// correctly.
+			//
+			// See the comment on enteredMarkAssistForTracing.
+			enteredMarkAssistForTracing = true
+		}
 	}
 
 	// Perform assist work
@@ -513,8 +593,25 @@
 		// At this point either background GC has satisfied
 		// this G's assist debt, or the GC cycle is over.
 	}
-	if traced {
-		traceGCMarkAssistDone()
+	if enteredMarkAssistForTracing {
+		trace := traceAcquire()
+		if trace.ok() {
+			trace.GCMarkAssistDone()
+			// Set this *after* we trace the end to make sure
+			// that we emit an in-progress event if this is
+			// the first event for the goroutine in the trace
+			// or trace generation. Also, do this between
+			// acquire/release because this is part of the
+			// goroutine's trace state, and it must be atomic
+			// with respect to the tracer.
+			gp.inMarkAssist = false
+			traceRelease(trace)
+		} else {
+			// This state is tracked even if tracing isn't enabled.
+			// It's only used by the new tracer.
+			// See the comment on enteredMarkAssistForTracing.
+			gp.inMarkAssist = false
+		}
 	}
 }
 
@@ -537,7 +634,7 @@
 		// The gcBlackenEnabled check in malloc races with the
 		// store that clears it but an atomic check in every malloc
 		// would be a performance hit.
-		// Instead we recheck it here on the non-preemptable system
+		// Instead we recheck it here on the non-preemptible system
 		// stack to determine if we should perform an assist.
 
 		// GC is done, so ignore any remaining debt.
@@ -964,7 +1061,7 @@
 		return
 	}
 
-	locals, args, objs := frame.getStackMap(&state.cache, false)
+	locals, args, objs := frame.getStackMap(false)
 
 	// Scan local variables if stack frame has been allocated.
 	if locals.n > 0 {
@@ -1011,6 +1108,28 @@
 	gcDrainFractional
 )
 
+// gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account
+// mark time in profiles.
+func gcDrainMarkWorkerIdle(gcw *gcWork) {
+	gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+}
+
+// gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account
+// mark time in profiles.
+func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) {
+	flags := gcDrainFlushBgCredit
+	if untilPreempt {
+		flags |= gcDrainUntilPreempt
+	}
+	gcDrain(gcw, flags)
+}
+
+// gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account
+// mark time in profiles.
+func gcDrainMarkWorkerFractional(gcw *gcWork) {
+	gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+}
+
 // gcDrain scans roots and objects in work buffers, blackening grey
 // objects until it is unable to get more work. It may return before
 // GC is done; it's the caller's responsibility to balance work from
@@ -1030,15 +1149,26 @@
 // credit to gcController.bgScanCredit every gcCreditSlack units of
 // scan work.
 //
-// gcDrain will always return if there is a pending STW.
+// gcDrain will always return if there is a pending STW or forEachP.
+//
+// Disabling write barriers is necessary to ensure that after we've
+// confirmed that we've drained gcw, that we don't accidentally end
+// up flipping that condition by immediately adding work in the form
+// of a write barrier buffer flush.
+//
+// Don't set nowritebarrierrec because it's safe for some callees to
+// have write barriers enabled.
 //
 //go:nowritebarrier
 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
-	if !writeBarrier.needed {
+	if !writeBarrier.enabled {
 		throw("gcDrain phase incorrect")
 	}
 
+	// N.B. We must be running in a non-preemptible context, so it's
+	// safe to hold a reference to our P here.
 	gp := getg().m.curg
+	pp := gp.m.p.ptr()
 	preemptible := flags&gcDrainUntilPreempt != 0
 	flushBgCredit := flags&gcDrainFlushBgCredit != 0
 	idle := flags&gcDrainIdle != 0
@@ -1060,8 +1190,9 @@
 
 	// Drain root marking jobs.
 	if work.markrootNext < work.markrootJobs {
-		// Stop if we're preemptible or if someone wants to STW.
-		for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) {
+		// Stop if we're preemptible, if someone wants to STW, or if
+		// someone is calling forEachP.
+		for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
 			job := atomic.Xadd(&work.markrootNext, +1) - 1
 			if job >= work.markrootJobs {
 				break
@@ -1074,8 +1205,16 @@
 	}
 
 	// Drain heap marking jobs.
-	// Stop if we're preemptible or if someone wants to STW.
-	for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) {
+	//
+	// Stop if we're preemptible, if someone wants to STW, or if
+	// someone is calling forEachP.
+	//
+	// TODO(mknyszek): Consider always checking gp.preempt instead
+	// of having the preempt flag, and making an exception for certain
+	// mark workers in retake. That might be simpler than trying to
+	// enumerate all the reasons why we might want to preempt, even
+	// if we're supposed to be mostly non-preemptible.
+	for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
 		// Try to keep work available on the global queue. We used to
 		// check if there were waiting workers, but it's better to
 		// just keep work available than to make workers wait. In the
@@ -1148,7 +1287,7 @@
 //go:nowritebarrier
 //go:systemstack
 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
-	if !writeBarrier.needed {
+	if !writeBarrier.enabled {
 		throw("gcDrainN phase incorrect")
 	}
 
@@ -1276,6 +1415,7 @@
 		throw("scanobject of a noscan object")
 	}
 
+	var tp typePointers
 	if n > maxObletBytes {
 		// Large object. Break into oblets for better
 		// parallelism and lower latency.
@@ -1296,18 +1436,35 @@
 		// must be a large object, s.base() is the beginning
 		// of the object.
 		n = s.base() + s.elemsize - b
-		if n > maxObletBytes {
-			n = maxObletBytes
+		n = min(n, maxObletBytes)
+		if goexperiment.AllocHeaders {
+			tp = s.typePointersOfUnchecked(s.base())
+			tp = tp.fastForward(b-tp.addr, b+n)
+		}
+	} else {
+		if goexperiment.AllocHeaders {
+			tp = s.typePointersOfUnchecked(b)
 		}
 	}
 
-	hbits := heapBitsForAddr(b, n)
+	var hbits heapBits
+	if !goexperiment.AllocHeaders {
+		hbits = heapBitsForAddr(b, n)
+	}
 	var scanSize uintptr
 	for {
 		var addr uintptr
-		if hbits, addr = hbits.nextFast(); addr == 0 {
-			if hbits, addr = hbits.next(); addr == 0 {
-				break
+		if goexperiment.AllocHeaders {
+			if tp, addr = tp.nextFast(); addr == 0 {
+				if tp, addr = tp.next(b + n); addr == 0 {
+					break
+				}
+			}
+		} else {
+			if hbits, addr = hbits.nextFast(); addr == 0 {
+				if hbits, addr = hbits.next(); addr == 0 {
+					break
+				}
 			}
 		}
 
@@ -1561,7 +1718,7 @@
 //
 //go:nowritebarrier
 //go:nosplit
-func gcmarknewobject(span *mspan, obj, size uintptr) {
+func gcmarknewobject(span *mspan, obj uintptr) {
 	if useCheckmark { // The world should be stopped so this should not happen.
 		throw("gcmarknewobject called while doing checkmark")
 	}
@@ -1577,7 +1734,7 @@
 	}
 
 	gcw := &getg().m.p.ptr().gcw
-	gcw.bytesMarked += uint64(size)
+	gcw.bytesMarked += uint64(span.elemsize)
 }
 
 // gcMarkTinyAllocs greys all active tiny alloc blocks.
diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go
index 32e19f9..e9af3d6 100644
--- a/src/runtime/mgcpacer.go
+++ b/src/runtime/mgcpacer.go
@@ -712,7 +712,7 @@
 	}
 	myID := gp.m.p.ptr().id
 	for tries := 0; tries < 5; tries++ {
-		id := int32(fastrandn(uint32(gomaxprocs - 1)))
+		id := int32(cheaprandn(uint32(gomaxprocs - 1)))
 		if id >= myID {
 			id++
 		}
@@ -807,9 +807,11 @@
 
 	// Run the background mark worker.
 	gp := node.gp.ptr()
+	trace := traceAcquire()
 	casgstatus(gp, _Gwaiting, _Grunnable)
-	if traceEnabled() {
-		traceGoUnpark(gp, 0)
+	if trace.ok() {
+		trace.GoUnpark(gp, 0)
+		traceRelease(trace)
 	}
 	return gp, now
 }
@@ -828,8 +830,10 @@
 	c.triggered = ^uint64(0) // Reset triggered.
 
 	// heapLive was updated, so emit a trace event.
-	if traceEnabled() {
-		traceHeapAlloc(bytesMarked)
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.HeapAlloc(bytesMarked)
+		traceRelease(trace)
 	}
 }
 
@@ -856,10 +860,12 @@
 
 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
 	if dHeapLive != 0 {
+		trace := traceAcquire()
 		live := gcController.heapLive.Add(dHeapLive)
-		if traceEnabled() {
+		if trace.ok() {
 			// gcController.heapLive changed.
-			traceHeapAlloc(live)
+			trace.HeapAlloc(live)
+			traceRelease(trace)
 		}
 	}
 	if gcBlackenEnabled == 0 {
@@ -1119,7 +1125,7 @@
 	// increase in RSS. By capping us at a point >0, we're essentially
 	// saying that we're OK using more CPU during the GC to prevent
 	// this growth in RSS.
-	triggerLowerBound := uint64(((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum) + c.heapMarked
+	triggerLowerBound := ((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum + c.heapMarked
 	if minTrigger < triggerLowerBound {
 		minTrigger = triggerLowerBound
 	}
@@ -1133,13 +1139,11 @@
 	// to reflect the costs of a GC with no work to do. With a large heap but
 	// very little scan work to perform, this gives us exactly as much runway
 	// as we would need, in the worst case.
-	maxTrigger := uint64(((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum) + c.heapMarked
+	maxTrigger := ((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum + c.heapMarked
 	if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger {
 		maxTrigger = goal - defaultHeapMinimum
 	}
-	if maxTrigger < minTrigger {
-		maxTrigger = minTrigger
-	}
+	maxTrigger = max(maxTrigger, minTrigger)
 
 	// Compute the trigger from our bounds and the runway stored by commit.
 	var trigger uint64
@@ -1149,12 +1153,8 @@
 	} else {
 		trigger = goal - runway
 	}
-	if trigger < minTrigger {
-		trigger = minTrigger
-	}
-	if trigger > maxTrigger {
-		trigger = maxTrigger
-	}
+	trigger = max(trigger, minTrigger)
+	trigger = min(trigger, maxTrigger)
 	if trigger > goal {
 		print("trigger=", trigger, " heapGoal=", goal, "\n")
 		print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n")
@@ -1377,7 +1377,7 @@
 	return n < max
 }
 
-// removeIdleMarkWorker must be called when an new idle mark worker stops executing.
+// removeIdleMarkWorker must be called when a new idle mark worker stops executing.
 func (c *gcControllerState) removeIdleMarkWorker() {
 	for {
 		old := c.idleMarkWorkers.Load()
@@ -1434,8 +1434,10 @@
 
 	// TODO(mknyszek): This isn't really accurate any longer because the heap
 	// goal is computed dynamically. Still useful to snapshot, but not as useful.
-	if traceEnabled() {
-		traceHeapGoal()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.HeapGoal()
+		traceRelease(trace)
 	}
 
 	trigger, heapGoal := gcController.trigger()
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 659ca8d..86c2103 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -172,7 +172,7 @@
 	// it's simpler.
 
 	// We want to target retaining (100-reduceExtraPercent)% of the heap.
-	memoryLimitGoal := uint64(float64(memoryLimit) * (100.0 - reduceExtraPercent))
+	memoryLimitGoal := uint64(float64(memoryLimit) * (1 - reduceExtraPercent/100.0))
 
 	// mappedReady is comparable to memoryLimit, and represents how much total memory
 	// the Go runtime has committed now (estimated).
@@ -307,7 +307,7 @@
 	// See sleepRatio for more details.
 	sleepController piController
 
-	// cooldown is the time left in nanoseconds during which we avoid
+	// controllerCooldown is the time left in nanoseconds during which we avoid
 	// using the controller and we hold sleepRatio at a conservative
 	// value. Used if the controller's assumptions fail to hold.
 	controllerCooldown int64
@@ -769,10 +769,6 @@
 			p.chunkOf(ci).allocRange(base, npages)
 			p.update(addr, uintptr(npages), true, true)
 
-			// Grab whether the chunk is hugepage backed and if it is,
-			// clear it. We're about to break up this huge page.
-			p.scav.index.setNoHugePage(ci)
-
 			// With that done, it's safe to unlock.
 			unlock(p.mheapLock)
 
@@ -897,12 +893,12 @@
 // will round up). That is, even if max is small, the returned size is not guaranteed
 // to be equal to max. max is allowed to be less than min, in which case it is as if
 // max == min.
-func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
-	if min&(min-1) != 0 || min == 0 {
-		print("runtime: min = ", min, "\n")
+func (m *pallocData) findScavengeCandidate(searchIdx uint, minimum, max uintptr) (uint, uint) {
+	if minimum&(minimum-1) != 0 || minimum == 0 {
+		print("runtime: min = ", minimum, "\n")
 		throw("min must be a non-zero power of 2")
-	} else if min > maxPagesPerPhysPage {
-		print("runtime: min = ", min, "\n")
+	} else if minimum > maxPagesPerPhysPage {
+		print("runtime: min = ", minimum, "\n")
 		throw("min too large")
 	}
 	// max may not be min-aligned, so we might accidentally truncate to
@@ -911,16 +907,16 @@
 	// a power of 2). This also prevents max from ever being less than
 	// min, unless it's zero, so handle that explicitly.
 	if max == 0 {
-		max = min
+		max = minimum
 	} else {
-		max = alignUp(max, min)
+		max = alignUp(max, minimum)
 	}
 
 	i := int(searchIdx / 64)
 	// Start by quickly skipping over blocks of non-free or scavenged pages.
 	for ; i >= 0; i-- {
 		// 1s are scavenged OR non-free => 0s are unscavenged AND free
-		x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
+		x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum))
 		if x != ^uint64(0) {
 			break
 		}
@@ -933,7 +929,7 @@
 	// extend further. Loop until we find the extent of it.
 
 	// 1s are scavenged OR non-free => 0s are unscavenged AND free
-	x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
+	x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum))
 	z1 := uint(sys.LeadingZeros64(^x))
 	run, end := uint(0), uint(i)*64+(64-z1)
 	if x<<z1 != 0 {
@@ -946,7 +942,7 @@
 		// word so it may extend into further words.
 		run = 64 - z1
 		for j := i - 1; j >= 0; j-- {
-			x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min))
+			x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(minimum))
 			run += uint(sys.LeadingZeros64(x))
 			if x != 0 {
 				// The run stopped in this word.
@@ -957,10 +953,7 @@
 
 	// Split the run we found if it's larger than max but hold on to
 	// our original length, since we may need it later.
-	size := run
-	if size > uint(max) {
-		size = uint(max)
-	}
+	size := min(run, uint(max))
 	start := end - size
 
 	// Each huge page is guaranteed to fit in a single palloc chunk.
@@ -975,7 +968,7 @@
 		// to include that huge page.
 
 		// Compute the huge page boundary above our candidate.
-		pagesPerHugePage := uintptr(physHugePageSize / pageSize)
+		pagesPerHugePage := physHugePageSize / pageSize
 		hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
 
 		// If that boundary is within our current candidate, then we may be breaking
@@ -1098,7 +1091,7 @@
 	// Starting from searchAddr's chunk, iterate until we find a chunk with pages to scavenge.
 	gen := s.gen
 	min := chunkIdx(s.minHeapIdx.Load())
-	start := chunkIndex(uintptr(searchAddr))
+	start := chunkIndex(searchAddr)
 	// N.B. We'll never map the 0'th chunk, so minHeapIdx ensures this loop overflow.
 	for i := start; i >= min; i-- {
 		// Skip over chunks.
@@ -1107,7 +1100,7 @@
 		}
 		// We're still scavenging this chunk.
 		if i == start {
-			return i, chunkPageIndex(uintptr(searchAddr))
+			return i, chunkPageIndex(searchAddr)
 		}
 		// Try to reduce searchAddr to newSearchAddr.
 		newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
@@ -1141,16 +1134,11 @@
 func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) {
 	sc := s.chunks[ci].load()
 	sc.alloc(npages, s.gen)
-	if !sc.isHugePage() && sc.inUse > scavChunkHiOccPages {
-		// Mark that we're considering this chunk as backed by huge pages.
-		sc.setHugePage()
-
-		// TODO(mknyszek): Consider eagerly backing memory with huge pages
-		// here. In the past we've attempted to use sysHugePageCollapse
-		// (which uses MADV_COLLAPSE on Linux, and is unsupported elswhere)
-		// for this purpose, but that caused performance issues in production
-		// environments.
-	}
+	// TODO(mknyszek): Consider eagerly backing memory with huge pages
+	// here and track whether we believe this chunk is backed by huge pages.
+	// In the past we've attempted to use sysHugePageCollapse (which uses
+	// MADV_COLLAPSE on Linux, and is unsupported elswhere) for this purpose,
+	// but that caused performance issues in production environments.
 	s.chunks[ci].store(sc)
 }
 
@@ -1204,19 +1192,6 @@
 	s.chunks[ci].store(val)
 }
 
-// setNoHugePage updates the backed-by-hugepages status of a particular chunk.
-// Returns true if the set was successful (not already backed by huge pages).
-//
-// setNoHugePage may only run concurrently with find.
-func (s *scavengeIndex) setNoHugePage(ci chunkIdx) {
-	val := s.chunks[ci].load()
-	if !val.isHugePage() {
-		return
-	}
-	val.setNoHugePage()
-	s.chunks[ci].store(val)
-}
-
 // atomicScavChunkData is an atomic wrapper around a scavChunkData
 // that stores it in its packed form.
 type atomicScavChunkData struct {
@@ -1285,13 +1260,6 @@
 	// file. The reason we say "HasFree" here is so the zero value is
 	// correct for a newly-grown chunk. (New memory is scavenged.)
 	scavChunkHasFree scavChunkFlags = 1 << iota
-	// scavChunkNoHugePage indicates whether this chunk has had any huge
-	// pages broken by the scavenger.
-	//.
-	// The negative here is unfortunate, but necessary to make it so that
-	// the zero value of scavChunkData accurately represents the state of
-	// a newly-grown chunk. (New memory is marked as backed by huge pages.)
-	scavChunkNoHugePage
 
 	// scavChunkMaxFlags is the maximum number of flags we can have, given how
 	// a scavChunkData is packed into 8 bytes.
@@ -1324,21 +1292,6 @@
 	*sc |= scavChunkHasFree
 }
 
-// isHugePage returns false if the noHugePage flag is set.
-func (sc *scavChunkFlags) isHugePage() bool {
-	return (*sc)&scavChunkNoHugePage == 0
-}
-
-// setHugePage clears the noHugePage flag.
-func (sc *scavChunkFlags) setHugePage() {
-	*sc &^= scavChunkNoHugePage
-}
-
-// setNoHugePage sets the noHugePage flag.
-func (sc *scavChunkFlags) setNoHugePage() {
-	*sc |= scavChunkNoHugePage
-}
-
 // shouldScavenge returns true if the corresponding chunk should be interrogated
 // by the scavenger.
 func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool {
diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go
index 6b55220..f4a83f5 100644
--- a/src/runtime/mgcstack.go
+++ b/src/runtime/mgcstack.go
@@ -166,8 +166,6 @@
 // A stackScanState keeps track of the state used during the GC walk
 // of a goroutine.
 type stackScanState struct {
-	cache pcvalueCache
-
 	// stack limits
 	stack stack
 
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 68f1aae..3dbe9bc 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -25,6 +25,8 @@
 package runtime
 
 import (
+	"internal/abi"
+	"internal/goexperiment"
 	"runtime/internal/atomic"
 	"unsafe"
 )
@@ -37,9 +39,6 @@
 	g      *g
 	parked bool
 
-	nbgsweep    uint32
-	npausesweep uint32
-
 	// active tracks outstanding sweepers and the sweep
 	// termination condition.
 	active activeSweep
@@ -237,7 +236,6 @@
 	// instantly. If GC was forced before the concurrent sweep
 	// finished, there may be spans to sweep.
 	for sweepone() != ^uintptr(0) {
-		sweep.npausesweep++
 	}
 
 	// Make sure there aren't any outstanding sweepers left.
@@ -299,7 +297,6 @@
 		const sweepBatchSize = 10
 		nSwept := 0
 		for sweepone() != ^uintptr(0) {
-			sweep.nbgsweep++
 			nSwept++
 			if nSwept%sweepBatchSize == 0 {
 				goschedIfBusy()
@@ -520,8 +517,10 @@
 		throw("mspan.sweep: bad span state")
 	}
 
-	if traceEnabled() {
-		traceGCSweepSpan(s.npages * _PageSize)
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GCSweepSpan(s.npages * _PageSize)
+		traceRelease(trace)
 	}
 
 	mheap_.pagesSwept.Add(int64(s.npages))
@@ -602,8 +601,8 @@
 		// efficient; allocfreetrace has massive overhead.
 		mbits := s.markBitsForBase()
 		abits := s.allocBitsForIndex(0)
-		for i := uintptr(0); i < s.nelems; i++ {
-			if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
+		for i := uintptr(0); i < uintptr(s.nelems); i++ {
+			if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
 				x := s.base() + i*s.elemsize
 				if debug.allocfreetrace != 0 {
 					tracefree(unsafe.Pointer(x), size)
@@ -634,12 +633,12 @@
 		//
 		// Check the first bitmap byte, where we have to be
 		// careful with freeindex.
-		obj := s.freeindex
+		obj := uintptr(s.freeindex)
 		if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
 			s.reportZombies()
 		}
 		// Check remaining bytes.
-		for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
+		for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ {
 			if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
 				s.reportZombies()
 			}
@@ -666,7 +665,7 @@
 	// gcmarkBits becomes the allocBits.
 	// get a fresh cleared gcmarkBits in preparation for next GC
 	s.allocBits = s.gcmarkBits
-	s.gcmarkBits = newMarkBits(s.nelems)
+	s.gcmarkBits = newMarkBits(uintptr(s.nelems))
 
 	// refresh pinnerBits if they exists
 	if s.pinnerBits != nil {
@@ -760,7 +759,7 @@
 				return true
 			}
 			// Return span back to the right mcentral list.
-			if uintptr(nalloc) == s.nelems {
+			if nalloc == s.nelems {
 				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
 			} else {
 				mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
@@ -791,6 +790,18 @@
 			} else {
 				mheap_.freeSpan(s)
 			}
+			if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.TFlag&abi.TFlagUnrolledBitmap != 0 {
+				// In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately.
+				// Free the space for the unrolled bitmap.
+				systemstack(func() {
+					s := spanOf(uintptr(unsafe.Pointer(s.largeType)))
+					mheap_.freeManual(s, spanAllocPtrScalarBits)
+				})
+				// Make sure to zero this pointer without putting the old
+				// value in a write buffer, as the old value might be an
+				// invalid pointer. See arena.go:(*mheap).allocUserArenaChunk.
+				*(*uintptr)(unsafe.Pointer(&s.largeType)) = 0
+			}
 
 			// Count the free in the consistent, external stats.
 			stats := memstats.heapStats.acquire()
@@ -829,10 +840,10 @@
 	print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
 	mbits := s.markBitsForBase()
 	abits := s.allocBitsForIndex(0)
-	for i := uintptr(0); i < s.nelems; i++ {
+	for i := uintptr(0); i < uintptr(s.nelems); i++ {
 		addr := s.base() + i*s.elemsize
 		print(hex(addr))
-		alloc := i < s.freeindex || abits.isMarked()
+		alloc := i < uintptr(s.freeindex) || abits.isMarked()
 		if alloc {
 			print(" alloc")
 		} else {
@@ -884,8 +895,10 @@
 		return
 	}
 
-	if traceEnabled() {
-		traceGCSweepStart()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GCSweepStart()
+		traceRelease(trace)
 	}
 
 	// Fix debt if necessary.
@@ -924,8 +937,10 @@
 		}
 	}
 
-	if traceEnabled() {
-		traceGCSweepDone()
+	trace = traceAcquire()
+	if trace.ok() {
+		trace.GCSweepDone()
+		traceRelease(trace)
 	}
 }
 
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index f836d91..0069328 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -11,6 +11,7 @@
 import (
 	"internal/cpu"
 	"internal/goarch"
+	"internal/goexperiment"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
@@ -122,6 +123,8 @@
 	// credit pool.
 	reclaimCredit atomic.Uintptr
 
+	_ cpu.CacheLinePad // prevents false-sharing between arenas and preceding variables
+
 	// arenas is the heap arena map. It points to the metadata for
 	// the heap for every arena frame of the entire usable virtual
 	// address space.
@@ -237,23 +240,8 @@
 type heapArena struct {
 	_ sys.NotInHeap
 
-	// bitmap stores the pointer/scalar bitmap for the words in
-	// this arena. See mbitmap.go for a description.
-	// This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit).
-	bitmap [heapArenaBitmapWords]uintptr
-
-	// If the ith bit of noMorePtrs is true, then there are no more
-	// pointers for the object containing the word described by the
-	// high bit of bitmap[i].
-	// In that case, bitmap[i+1], ... must be zero until the start
-	// of the next object.
-	// We never operate on these entries using bit-parallel techniques,
-	// so it is ok if they are small. Also, they can't be bigger than
-	// uint16 because at that size a single noMorePtrs entry
-	// represents 8K of memory, the minimum size of a span. Any larger
-	// and we'd have to worry about concurrent updates.
-	// This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit).
-	noMorePtrs [heapArenaBitmapWords / 8]uint8
+	// heapArenaPtrScalar contains pointer/scalar data about the heap for this heap arena.
+	heapArenaPtrScalar
 
 	// spans maps from virtual address page ID within this arena to *mspan.
 	// For allocated spans, their pages map to the span itself.
@@ -411,7 +399,7 @@
 	_    sys.NotInHeap
 	next *mspan     // next span in list, or nil if none
 	prev *mspan     // previous span in list, or nil if none
-	list *mSpanList // For debugging. TODO: Remove.
+	list *mSpanList // For debugging.
 
 	startAddr uintptr // address of first byte of span aka s.base()
 	npages    uintptr // number of pages in span
@@ -433,10 +421,17 @@
 	// undefined and should never be referenced.
 	//
 	// Object n starts at address n*elemsize + (start << pageShift).
-	freeindex uintptr
+	freeindex uint16
 	// TODO: Look up nelems from sizeclass and remove this field if it
 	// helps performance.
-	nelems uintptr // number of object in the span.
+	nelems uint16 // number of object in the span.
+	// freeIndexForScan is like freeindex, except that freeindex is
+	// used by the allocator whereas freeIndexForScan is used by the
+	// GC scanner. They are two fields so that the GC sees the object
+	// is allocated only when the object and the heap bits are
+	// initialized (see also the assignment of freeIndexForScan in
+	// mallocgc, and issue 54596).
+	freeIndexForScan uint16
 
 	// Cache of the allocBits at freeindex. allocCache is shifted
 	// such that the lowest bit corresponds to the bit freeindex.
@@ -493,14 +488,7 @@
 	speciallock           mutex         // guards specials list and changes to pinnerBits
 	specials              *special      // linked list of special records sorted by offset.
 	userArenaChunkFree    addrRange     // interval for managing chunk allocation
-
-	// freeIndexForScan is like freeindex, except that freeindex is
-	// used by the allocator whereas freeIndexForScan is used by the
-	// GC scanner. They are two fields so that the GC sees the object
-	// is allocated only when the object and the heap bits are
-	// initialized (see also the assignment of freeIndexForScan in
-	// mallocgc, and issue 54596).
-	freeIndexForScan uintptr
+	largeType             *_type        // malloc header for large objects.
 }
 
 func (s *mspan) base() uintptr {
@@ -578,10 +566,12 @@
 	return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
 }
 
+//go:nosplit
 func (sc spanClass) sizeclass() int8 {
 	return int8(sc >> 1)
 }
 
+//go:nosplit
 func (sc spanClass) noscan() bool {
 	return sc&1 != 0
 }
@@ -801,8 +791,10 @@
 	// traceGCSweepStart/Done pair on the P.
 	mp := acquirem()
 
-	if traceEnabled() {
-		traceGCSweepStart()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GCSweepStart()
+		traceRelease(trace)
 	}
 
 	arenas := h.sweepArenas
@@ -849,8 +841,10 @@
 		unlock(&h.lock)
 	}
 
-	if traceEnabled() {
-		traceGCSweepDone()
+	trace = traceAcquire()
+	if trace.ok() {
+		trace.GCSweepDone()
+		traceRelease(trace)
 	}
 	releasem(mp)
 }
@@ -921,10 +915,12 @@
 		n -= uintptr(len(inUse) * 8)
 	}
 	sweep.active.end(sl)
-	if traceEnabled() {
+	trace := traceAcquire()
+	if trace.ok() {
 		unlock(&h.lock)
 		// Account for pages scanned but not reclaimed.
-		traceGCSweepSpan((n0 - nFreed) * pageSize)
+		trace.GCSweepSpan((n0 - nFreed) * pageSize)
+		traceRelease(trace)
 		lock(&h.lock)
 	}
 
@@ -1401,7 +1397,12 @@
 			s.divMul = 0
 		} else {
 			s.elemsize = uintptr(class_to_size[sizeclass])
-			s.nelems = nbytes / s.elemsize
+			if goexperiment.AllocHeaders && !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
+				// In the allocheaders experiment, reserve space for the pointer/scan bitmap at the end.
+				s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
+			} else {
+				s.nelems = uint16(nbytes / s.elemsize)
+			}
 			s.divMul = class_to_divmagic[sizeclass]
 		}
 
@@ -1409,8 +1410,8 @@
 		s.freeindex = 0
 		s.freeIndexForScan = 0
 		s.allocCache = ^uint64(0) // all 1s indicating all free.
-		s.gcmarkBits = newMarkBits(s.nelems)
-		s.allocBits = newAllocBits(s.nelems)
+		s.gcmarkBits = newMarkBits(uintptr(s.nelems))
+		s.allocBits = newAllocBits(uintptr(s.nelems))
 
 		// It's safe to access h.sweepgen without the heap lock because it's
 		// only ever updated with the world stopped and we run on the
@@ -2139,7 +2140,7 @@
 // newMarkBits returns a pointer to 8 byte aligned bytes
 // to be used for a span's mark bits.
 func newMarkBits(nelems uintptr) *gcBits {
-	blocksNeeded := uintptr((nelems + 63) / 64)
+	blocksNeeded := (nelems + 63) / 64
 	bytesNeeded := blocksNeeded * 8
 
 	// Try directly allocating from the current head arena.
@@ -2251,7 +2252,7 @@
 	result.next = nil
 	// If result.bits is not 8 byte aligned adjust index so
 	// that &result.bits[result.free] is 8 byte aligned.
-	if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
+	if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 {
 		result.free = 0
 	} else {
 		result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go
index cc58558..b7f07b5 100644
--- a/src/runtime/mkduff.go
+++ b/src/runtime/mkduff.go
@@ -179,23 +179,23 @@
 
 func zeroLOONG64(w io.Writer) {
 	// R0: always zero
-	// R19 (aka REGRT1): ptr to memory to be zeroed - 8
+	// R19 (aka REGRT1): ptr to memory to be zeroed
 	// On return, R19 points to the last zeroed dword.
-	fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0")
+	fmt.Fprintln(w, "TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
 	for i := 0; i < 128; i++ {
-		fmt.Fprintln(w, "\tMOVV\tR0, 8(R19)")
-		fmt.Fprintln(w, "\tADDV\t$8, R19")
+		fmt.Fprintln(w, "\tMOVV\tR0, (R20)")
+		fmt.Fprintln(w, "\tADDV\t$8, R20")
 	}
 	fmt.Fprintln(w, "\tRET")
 }
 
 func copyLOONG64(w io.Writer) {
-	fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0")
+	fmt.Fprintln(w, "TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
 	for i := 0; i < 128; i++ {
-		fmt.Fprintln(w, "\tMOVV\t(R19), R30")
-		fmt.Fprintln(w, "\tADDV\t$8, R19")
-		fmt.Fprintln(w, "\tMOVV\tR30, (R20)")
+		fmt.Fprintln(w, "\tMOVV\t(R20), R30")
 		fmt.Fprintln(w, "\tADDV\t$8, R20")
+		fmt.Fprintln(w, "\tMOVV\tR30, (R21)")
+		fmt.Fprintln(w, "\tADDV\t$8, R21")
 		fmt.Fprintln(w)
 	}
 	fmt.Fprintln(w, "\tRET")
diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go
index c0d5a02..c8e34d8 100644
--- a/src/runtime/mklockrank.go
+++ b/src/runtime/mklockrank.go
@@ -52,29 +52,40 @@
   assistQueue,
   sweep;
 
+# Test only
+NONE < testR, testW;
+
 # Scheduler, timers, netpoll
-NONE < pollDesc, cpuprof;
+NONE <
+  allocmW,
+  execW,
+  cpuprof,
+  pollDesc,
+  wakeableSleep;
 assistQueue,
   cpuprof,
   forcegc,
   pollDesc, # pollDesc can interact with timers, which can lock sched.
   scavenge,
   sweep,
-  sweepWaiters
+  sweepWaiters,
+  testR,
+  wakeableSleep
+# Above SCHED are things that can call into the scheduler.
+< SCHED
+# Below SCHED is the scheduler implementation.
+< allocmR,
+  execR
 < sched;
 sched < allg, allp;
-allp < timers;
+allp, wakeableSleep < timers;
 timers < netpollInit;
 
 # Channels
-scavenge, sweep < hchan;
+scavenge, sweep, testR, wakeableSleep < hchan;
 NONE < notifyList;
 hchan, notifyList < sudog;
 
-# RWMutex
-NONE < rwmutexW;
-rwmutexW, sysmon < rwmutexR;
-
 # Semaphores
 NONE < root;
 
@@ -99,6 +110,9 @@
 
 # Malloc
 allg,
+  allocmR,
+  execR, # May grow stack
+  execW, # May allocate after BeforeFork
   hchan,
   notifyList,
   reflectOffs,
@@ -135,7 +149,7 @@
 < STACKGROW
 # Below STACKGROW is the stack allocator/copying implementation.
 < gscan;
-gscan, rwmutexR < stackpool;
+gscan < stackpool;
 gscan < stackLarge;
 # Generally, hchan must be acquired before gscan. But in one case,
 # where we suspend a G and then shrink its stack, syncadjustsudogs
@@ -188,6 +202,20 @@
 panic < deadlock;
 # raceFini is only held while exiting.
 panic < raceFini;
+
+# RWMutex internal read lock
+
+allocmR,
+  allocmW
+< allocmRInternal;
+
+execR,
+  execW
+< execRInternal;
+
+testR,
+  testW
+< testRInternal;
 `
 
 // cyclicRanks lists lock ranks that allow multiple locks of the same
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 0bfbd37..17544d6 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -317,11 +317,11 @@
 
 	p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR
 	l.save()
-	p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp") // test goarm, and skip FP registers if goarm=5.
+	p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
 	lfp.save()
 	label("nofp:")
 	p("CALL ·asyncPreempt2(SB)")
-	p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp2") // test goarm, and skip FP registers if goarm=5.
+	p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
 	lfp.restore()
 	label("nofp2:")
 	l.restore()
@@ -576,7 +576,7 @@
 	}
 
 	p("MOV X1, -%d(X2)", l.stack)
-	p("ADD $-%d, X2", l.stack)
+	p("SUB $%d, X2", l.stack)
 	l.save()
 	p("CALL ·asyncPreempt2(SB)")
 	l.restore()
diff --git a/src/runtime/mksizeclasses.go b/src/runtime/mksizeclasses.go
index 156e613..26ca49e 100644
--- a/src/runtime/mksizeclasses.go
+++ b/src/runtime/mksizeclasses.go
@@ -278,14 +278,12 @@
 }
 
 func maxObjsPerSpan(classes []class) int {
-	max := 0
+	most := 0
 	for _, c := range classes[1:] {
 		n := c.npages * pageSize / c.size
-		if n > max {
-			max = n
-		}
+		most = max(most, n)
 	}
-	return max
+	return most
 }
 
 func printClasses(w io.Writer, classes []class) {
diff --git a/src/runtime/mmap.go b/src/runtime/mmap.go
index f0183f6..9a7b298 100644
--- a/src/runtime/mmap.go
+++ b/src/runtime/mmap.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !aix && !darwin && !js && (!linux || !amd64) && (!linux || !arm64) && (!freebsd || !amd64) && !openbsd && !plan9 && !solaris && !windows
+//go:build !aix && !darwin && !js && !((linux && (amd64 || arm64 || loong64)) || (freebsd && amd64)) && !openbsd && !plan9 && !solaris && !windows
 
 package runtime
 
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 3e789ab..d533f84 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -437,6 +437,10 @@
 //
 // The heap lock must not be held over this operation, since it will briefly acquire
 // the heap lock.
+//
+// Must be called on the system stack because it acquires the heap lock.
+//
+//go:systemstack
 func (p *pageAlloc) enableChunkHugePages() {
 	// Grab the heap lock to turn on huge pages for new chunks and clone the current
 	// heap address space ranges.
@@ -1038,8 +1042,8 @@
 	// Merge the summaries in sums into one.
 	//
 	// We do this by keeping a running summary representing the merged
-	// summaries of sums[:i] in start, max, and end.
-	start, max, end := sums[0].unpack()
+	// summaries of sums[:i] in start, most, and end.
+	start, most, end := sums[0].unpack()
 	for i := 1; i < len(sums); i++ {
 		// Merge in sums[i].
 		si, mi, ei := sums[i].unpack()
@@ -1055,12 +1059,7 @@
 		// across the boundary between the running sum and sums[i]
 		// and at the max sums[i], taking the greatest of those two
 		// and the max of the running sum.
-		if end+si > max {
-			max = end + si
-		}
-		if mi > max {
-			max = mi
-		}
+		most = max(most, end+si, mi)
 
 		// Merge in end by checking if this new summary is totally
 		// free. If it is, then we want to extend the running sum's
@@ -1073,5 +1072,5 @@
 			end = ei
 		}
 	}
-	return packPallocSum(start, max, end)
+	return packPallocSum(start, most, end)
 }
diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go
index 1418831..36cd222 100644
--- a/src/runtime/mpagealloc_64bit.go
+++ b/src/runtime/mpagealloc_64bit.go
@@ -209,23 +209,20 @@
 	haveMax := s.max.Load()
 	needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
 	needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
-	// Extend the range down to what we have, if there's no overlap.
+
+	// We need a contiguous range, so extend the range if there's no overlap.
 	if needMax < haveMin {
 		needMax = haveMin
 	}
 	if haveMax != 0 && needMin > haveMax {
 		needMin = haveMax
 	}
-	have := makeAddrRange(
-		// Avoid a panic from indexing one past the last element.
-		uintptr(unsafe.Pointer(&s.chunks[0]))+haveMin*scSize,
-		uintptr(unsafe.Pointer(&s.chunks[0]))+haveMax*scSize,
-	)
-	need := makeAddrRange(
-		// Avoid a panic from indexing one past the last element.
-		uintptr(unsafe.Pointer(&s.chunks[0]))+needMin*scSize,
-		uintptr(unsafe.Pointer(&s.chunks[0]))+needMax*scSize,
-	)
+
+	// Avoid a panic from indexing one past the last element.
+	chunksBase := uintptr(unsafe.Pointer(&s.chunks[0]))
+	have := makeAddrRange(chunksBase+haveMin*scSize, chunksBase+haveMax*scSize)
+	need := makeAddrRange(chunksBase+needMin*scSize, chunksBase+needMax*scSize)
+
 	// Subtract any overlap from rounding. We can't re-map memory because
 	// it'll be zeroed.
 	need = need.subtract(have)
@@ -235,10 +232,10 @@
 		sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
 		sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
 		// Update the indices only after the new memory is valid.
-		if haveMin == 0 || needMin < haveMin {
+		if haveMax == 0 || needMin < haveMin {
 			s.min.Store(needMin)
 		}
-		if haveMax == 0 || needMax > haveMax {
+		if needMax > haveMax {
 			s.max.Store(needMax)
 		}
 	}
diff --git a/src/runtime/mpallocbits.go b/src/runtime/mpallocbits.go
index 2f35ce0..6b5f15d 100644
--- a/src/runtime/mpallocbits.go
+++ b/src/runtime/mpallocbits.go
@@ -134,7 +134,7 @@
 
 // summarize returns a packed summary of the bitmap in pallocBits.
 func (b *pallocBits) summarize() pallocSum {
-	var start, max, cur uint
+	var start, most, cur uint
 	const notSetYet = ^uint(0) // sentinel for start value
 	start = notSetYet
 	for i := 0; i < len(b); i++ {
@@ -151,9 +151,7 @@
 		if start == notSetYet {
 			start = cur
 		}
-		if cur > max {
-			max = cur
-		}
+		most = max(most, cur)
 		// Final region that might span to next uint64
 		cur = l
 	}
@@ -162,12 +160,11 @@
 		const n = uint(64 * len(b))
 		return packPallocSum(n, n, n)
 	}
-	if cur > max {
-		max = cur
-	}
-	if max >= 64-2 {
+	most = max(most, cur)
+
+	if most >= 64-2 {
 		// There is no way an internal run of zeros could beat max.
-		return packPallocSum(start, max, cur)
+		return packPallocSum(start, most, cur)
 	}
 	// Now look inside each uint64 for runs of zeros.
 	// All uint64s must be nonzero, or we would have aborted above.
@@ -188,7 +185,7 @@
 
 		// Strategy: shrink all runs of zeros by max. If any runs of zero
 		// remain, then we've identified a larger maximum zero run.
-		p := max     // number of zeros we still need to shrink by.
+		p := most    // number of zeros we still need to shrink by.
 		k := uint(1) // current minimum length of runs of ones in x.
 		for {
 			// Shrink all runs of zeros by p places (except the top zeros).
@@ -217,14 +214,14 @@
 			x >>= j & 63                       // remove trailing ones
 			j = uint(sys.TrailingZeros64(x))   // count contiguous trailing zeros
 			x >>= j & 63                       // remove zeros
-			max += j                           // we have a new maximum!
+			most += j                          // we have a new maximum!
 			if x&(x+1) == 0 {                  // no more zeros (except at the top).
 				continue outer
 			}
 			p = j // remove j more zeros from each zero run.
 		}
 	}
-	return packPallocSum(start, max, cur)
+	return packPallocSum(start, most, cur)
 }
 
 // find searches for npages contiguous free pages in pallocBits and returns
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 308ebae..abdd2f3 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -39,7 +39,10 @@
 	// size of bucket hash table
 	buckHashSize = 179999
 
-	// max depth of stack to record in bucket
+	// maxStack is the max depth of stack to record in bucket.
+	// Note that it's only used internally as a guard against
+	// wildly out-of-bounds slicing of the PCs that come after
+	// a bucket struct, and it could increase in the future.
 	maxStack = 32
 )
 
@@ -231,6 +234,10 @@
 // stk returns the slice in b holding the stack.
 func (b *bucket) stk() []uintptr {
 	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+	if b.nstk > maxStack {
+		// prove that slicing works; otherwise a failure requires a P
+		throw("bad profile stack count")
+	}
 	return stk[:b.nstk:b.nstk]
 }
 
@@ -468,7 +475,7 @@
 		r = 1 // profile everything
 	} else {
 		// convert ns to cycles, use float64 to prevent overflow during multiplication
-		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
+		r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000))
 		if r == 0 {
 			r = 1
 		}
@@ -491,7 +498,7 @@
 // blocksampled returns true for all events where cycles >= rate. Shorter
 // events have a cycles/rate random chance of returning true.
 func blocksampled(cycles, rate int64) bool {
-	if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
+	if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) {
 		return false
 	}
 	return true
@@ -506,7 +513,237 @@
 	} else {
 		nstk = gcallers(gp.m.curg, skip, stk[:])
 	}
-	b := stkbucket(which, 0, stk[:nstk], true)
+
+	saveBlockEventStack(cycles, rate, stk[:nstk], which)
+}
+
+// lockTimer assists with profiling contention on runtime-internal locks.
+//
+// There are several steps between the time that an M experiences contention and
+// when that contention may be added to the profile. This comes from our
+// constraints: We need to keep the critical section of each lock small,
+// especially when those locks are contended. The reporting code cannot acquire
+// new locks until the M has released all other locks, which means no memory
+// allocations and encourages use of (temporary) M-local storage.
+//
+// The M will have space for storing one call stack that caused contention, and
+// for the magnitude of that contention. It will also have space to store the
+// magnitude of additional contention the M caused, since it only has space to
+// remember one call stack and might encounter several contention events before
+// it releases all of its locks and is thus able to transfer the local buffer
+// into the profile.
+//
+// The M will collect the call stack when it unlocks the contended lock. That
+// minimizes the impact on the critical section of the contended lock, and
+// matches the mutex profile's behavior for contention in sync.Mutex: measured
+// at the Unlock method.
+//
+// The profile for contention on sync.Mutex blames the caller of Unlock for the
+// amount of contention experienced by the callers of Lock which had to wait.
+// When there are several critical sections, this allows identifying which of
+// them is responsible.
+//
+// Matching that behavior for runtime-internal locks will require identifying
+// which Ms are blocked on the mutex. The semaphore-based implementation is
+// ready to allow that, but the futex-based implementation will require a bit
+// more work. Until then, we report contention on runtime-internal locks with a
+// call stack taken from the unlock call (like the rest of the user-space
+// "mutex" profile), but assign it a duration value based on how long the
+// previous lock call took (like the user-space "block" profile).
+//
+// Thus, reporting the call stacks of runtime-internal lock contention is
+// guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable.
+//
+// TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment
+//
+// The M will track this by storing a pointer to the lock; lock/unlock pairs for
+// runtime-internal locks are always on the same M.
+//
+// Together, that demands several steps for recording contention. First, when
+// finally acquiring a contended lock, the M decides whether it should plan to
+// profile that event by storing a pointer to the lock in its "to be profiled
+// upon unlock" field. If that field is already set, it uses the relative
+// magnitudes to weight a random choice between itself and the other lock, with
+// the loser's time being added to the "additional contention" field. Otherwise
+// if the M's call stack buffer is occupied, it does the comparison against that
+// sample's magnitude.
+//
+// Second, having unlocked a mutex the M checks to see if it should capture the
+// call stack into its local buffer. Finally, when the M unlocks its last mutex,
+// it transfers the local buffer into the profile. As part of that step, it also
+// transfers any "additional contention" time to the profile. Any lock
+// contention that it experiences while adding samples to the profile will be
+// recorded later as "additional contention" and not include a call stack, to
+// avoid an echo.
+type lockTimer struct {
+	lock      *mutex
+	timeRate  int64
+	timeStart int64
+	tickStart int64
+}
+
+func (lt *lockTimer) begin() {
+	rate := int64(atomic.Load64(&mutexprofilerate))
+
+	lt.timeRate = gTrackingPeriod
+	if rate != 0 && rate < lt.timeRate {
+		lt.timeRate = rate
+	}
+	if int64(cheaprand())%lt.timeRate == 0 {
+		lt.timeStart = nanotime()
+	}
+
+	if rate > 0 && int64(cheaprand())%rate == 0 {
+		lt.tickStart = cputicks()
+	}
+}
+
+func (lt *lockTimer) end() {
+	gp := getg()
+
+	if lt.timeStart != 0 {
+		nowTime := nanotime()
+		gp.m.mLockProfile.waitTime.Add((nowTime - lt.timeStart) * lt.timeRate)
+	}
+
+	if lt.tickStart != 0 {
+		nowTick := cputicks()
+		gp.m.mLockProfile.recordLock(nowTick-lt.tickStart, lt.lock)
+	}
+}
+
+type mLockProfile struct {
+	waitTime   atomic.Int64      // total nanoseconds spent waiting in runtime.lockWithRank
+	stack      [maxStack]uintptr // stack that experienced contention in runtime.lockWithRank
+	pending    uintptr           // *mutex that experienced contention (to be traceback-ed)
+	cycles     int64             // cycles attributable to "pending" (if set), otherwise to "stack"
+	cyclesLost int64             // contention for which we weren't able to record a call stack
+	disabled   bool              // attribute all time to "lost"
+}
+
+func (prof *mLockProfile) recordLock(cycles int64, l *mutex) {
+	if cycles <= 0 {
+		return
+	}
+
+	if prof.disabled {
+		// We're experiencing contention while attempting to report contention.
+		// Make a note of its magnitude, but don't allow it to be the sole cause
+		// of another contention report.
+		prof.cyclesLost += cycles
+		return
+	}
+
+	if uintptr(unsafe.Pointer(l)) == prof.pending {
+		// Optimization: we'd already planned to profile this same lock (though
+		// possibly from a different unlock site).
+		prof.cycles += cycles
+		return
+	}
+
+	if prev := prof.cycles; prev > 0 {
+		// We can only store one call stack for runtime-internal lock contention
+		// on this M, and we've already got one. Decide which should stay, and
+		// add the other to the report for runtime._LostContendedRuntimeLock.
+		prevScore := uint64(cheaprand64()) % uint64(prev)
+		thisScore := uint64(cheaprand64()) % uint64(cycles)
+		if prevScore > thisScore {
+			prof.cyclesLost += cycles
+			return
+		} else {
+			prof.cyclesLost += prev
+		}
+	}
+	// Saving the *mutex as a uintptr is safe because:
+	//  - lockrank_on.go does this too, which gives it regular exercise
+	//  - the lock would only move if it's stack allocated, which means it
+	//      cannot experience multi-M contention
+	prof.pending = uintptr(unsafe.Pointer(l))
+	prof.cycles = cycles
+}
+
+// From unlock2, we might not be holding a p in this code.
+//
+//go:nowritebarrierrec
+func (prof *mLockProfile) recordUnlock(l *mutex) {
+	if uintptr(unsafe.Pointer(l)) == prof.pending {
+		prof.captureStack()
+	}
+	if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.cycles != 0 {
+		prof.store()
+	}
+}
+
+func (prof *mLockProfile) captureStack() {
+	skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank
+	if staticLockRanking {
+		// When static lock ranking is enabled, we'll always be on the system
+		// stack at this point. There will be a runtime.unlockWithRank.func1
+		// frame, and if the call to runtime.unlock took place on a user stack
+		// then there'll also be a runtime.systemstack frame. To keep stack
+		// traces somewhat consistent whether or not static lock ranking is
+		// enabled, we'd like to skip those. But it's hard to tell how long
+		// we've been on the system stack so accept an extra frame in that case,
+		// with a leaf of "runtime.unlockWithRank runtime.unlock" instead of
+		// "runtime.unlock".
+		skip += 1 // runtime.unlockWithRank.func1
+	}
+	prof.pending = 0
+
+	if debug.runtimeContentionStacks.Load() == 0 {
+		prof.stack[0] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum
+		prof.stack[1] = 0
+		return
+	}
+
+	var nstk int
+	gp := getg()
+	sp := getcallersp()
+	pc := getcallerpc()
+	systemstack(func() {
+		var u unwinder
+		u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
+		nstk = tracebackPCs(&u, skip, prof.stack[:])
+	})
+	if nstk < len(prof.stack) {
+		prof.stack[nstk] = 0
+	}
+}
+
+func (prof *mLockProfile) store() {
+	// Report any contention we experience within this function as "lost"; it's
+	// important that the act of reporting a contention event not lead to a
+	// reportable contention event. This also means we can use prof.stack
+	// without copying, since it won't change during this function.
+	mp := acquirem()
+	prof.disabled = true
+
+	nstk := maxStack
+	for i := 0; i < nstk; i++ {
+		if pc := prof.stack[i]; pc == 0 {
+			nstk = i
+			break
+		}
+	}
+
+	cycles, lost := prof.cycles, prof.cyclesLost
+	prof.cycles, prof.cyclesLost = 0, 0
+
+	rate := int64(atomic.Load64(&mutexprofilerate))
+	saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
+	if lost > 0 {
+		lostStk := [...]uintptr{
+			abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
+		}
+		saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
+	}
+
+	prof.disabled = false
+	releasem(mp)
+}
+
+func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) {
+	b := stkbucket(which, 0, stk, true)
 	bp := b.bp()
 
 	lock(&profBlockLock)
@@ -553,9 +790,7 @@
 		cycles = 0
 	}
 	rate := int64(atomic.Load64(&mutexprofilerate))
-	// TODO(pjw): measure impact of always calling fastrand vs using something
-	// like malloc.go:nextSample()
-	if rate > 0 && int64(fastrand())%rate == 0 {
+	if rate > 0 && cheaprand64()%rate == 0 {
 		saveblockevent(cycles, rate, skip+1, mutexProfile)
 	}
 }
@@ -746,8 +981,8 @@
 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
 // If len(p) < n, BlockProfile does not change p and returns n, false.
 //
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.blockprofile flag instead
+// Most clients should use the [runtime/pprof] package or
+// the [testing] package's -test.blockprofile flag instead
 // of calling BlockProfile directly.
 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
 	lock(&profBlockLock)
@@ -791,7 +1026,7 @@
 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
 // Otherwise, MutexProfile does not change p, and returns n, false.
 //
-// Most clients should use the runtime/pprof package
+// Most clients should use the [runtime/pprof] package
 // instead of calling MutexProfile directly.
 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
 	lock(&profBlockLock)
@@ -901,7 +1136,7 @@
 
 	ourg := getg()
 
-	stopTheWorld(stwGoroutineProfile)
+	stw := stopTheWorld(stwGoroutineProfile)
 	// Using gcount while the world is stopped should give us a consistent view
 	// of the number of live goroutines, minus the number of goroutines that are
 	// alive and permanently marked as "system". But to make this count agree
@@ -918,7 +1153,7 @@
 		// There's not enough space in p to store the whole profile, so (per the
 		// contract of runtime.GoroutineProfile) we're not allowed to write to p
 		// at all and must return n, false.
-		startTheWorld()
+		startTheWorld(stw)
 		semrelease(&goroutineProfile.sema)
 		return n, false
 	}
@@ -929,6 +1164,9 @@
 	systemstack(func() {
 		saveg(pc, sp, ourg, &p[0])
 	})
+	if labels != nil {
+		labels[0] = ourg.labels
+	}
 	ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
 	goroutineProfile.offset.Store(1)
 
@@ -949,7 +1187,7 @@
 			doRecordGoroutineProfile(fing)
 		}
 	}
-	startTheWorld()
+	startTheWorld(stw)
 
 	// Visit each goroutine that existed as of the startTheWorld call above.
 	//
@@ -966,12 +1204,12 @@
 		tryRecordGoroutineProfile(gp1, Gosched)
 	})
 
-	stopTheWorld(stwGoroutineProfileCleanup)
+	stw = stopTheWorld(stwGoroutineProfileCleanup)
 	endOffset := goroutineProfile.offset.Swap(0)
 	goroutineProfile.active = false
 	goroutineProfile.records = nil
 	goroutineProfile.labels = nil
-	startTheWorld()
+	startTheWorld(stw)
 
 	// Restore the invariant that every goroutine struct in allgs has its
 	// goroutineProfiled field cleared.
@@ -1101,7 +1339,7 @@
 		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
 	}
 
-	stopTheWorld(stwGoroutineProfile)
+	stw := stopTheWorld(stwGoroutineProfile)
 
 	// World is stopped, no locking required.
 	n = 1
@@ -1157,7 +1395,7 @@
 		raceacquire(unsafe.Pointer(&labelSync))
 	}
 
-	startTheWorld()
+	startTheWorld(stw)
 	return n, ok
 }
 
@@ -1165,7 +1403,7 @@
 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
 // If len(p) < n, GoroutineProfile does not change p and returns n, false.
 //
-// Most clients should use the runtime/pprof package instead
+// Most clients should use the [runtime/pprof] package instead
 // of calling GoroutineProfile directly.
 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
 
@@ -1186,8 +1424,9 @@
 // If all is true, Stack formats stack traces of all other goroutines
 // into buf after the trace for the current goroutine.
 func Stack(buf []byte, all bool) int {
+	var stw worldStop
 	if all {
-		stopTheWorld(stwAllGoroutinesStack)
+		stw = stopTheWorld(stwAllGoroutinesStack)
 	}
 
 	n := 0
@@ -1214,7 +1453,7 @@
 	}
 
 	if all {
-		startTheWorld()
+		startTheWorld(stw)
 	}
 	return n
 }
diff --git a/src/runtime/mranges.go b/src/runtime/mranges.go
index 4388d26..6dd1a75 100644
--- a/src/runtime/mranges.go
+++ b/src/runtime/mranges.go
@@ -271,7 +271,7 @@
 	const iterMax = 8
 	bot, top := 0, len(a.ranges)
 	for top-bot > iterMax {
-		i := ((top - bot) / 2) + bot
+		i := int(uint(bot+top) >> 1)
 		if a.ranges[i].contains(base.addr()) {
 			// a.ranges[i] contains base, so
 			// its successor is the next index.
diff --git a/src/runtime/msan/msan.go b/src/runtime/msan/msan.go
index 4e41f85..7b3e8e6 100644
--- a/src/runtime/msan/msan.go
+++ b/src/runtime/msan/msan.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build msan && ((linux && (amd64 || arm64)) || (freebsd && amd64))
+//go:build msan && ((linux && (amd64 || arm64 || loong64)) || (freebsd && amd64))
 
 package msan
 
diff --git a/src/runtime/msan_amd64.s b/src/runtime/msan_amd64.s
index 89ed304..a1dc388 100644
--- a/src/runtime/msan_amd64.s
+++ b/src/runtime/msan_amd64.s
@@ -28,7 +28,7 @@
 // Called from msanread.
 TEXT	runtime·domsanread(SB), NOSPLIT, $0-16
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	// void __msan_read_go(void *addr, uintptr_t sz);
 	MOVQ	$__msan_read_go(SB), AX
 	JMP	msancall<>(SB)
@@ -37,7 +37,7 @@
 // Called from instrumented code.
 TEXT	runtime·msanwrite(SB), NOSPLIT, $0-16
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	// void __msan_write_go(void *addr, uintptr_t sz);
 	MOVQ	$__msan_write_go(SB), AX
 	JMP	msancall<>(SB)
@@ -45,7 +45,7 @@
 // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·msanmalloc(SB), NOSPLIT, $0-16
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	// void __msan_malloc_go(void *addr, uintptr_t sz);
 	MOVQ	$__msan_malloc_go(SB), AX
 	JMP	msancall<>(SB)
@@ -53,7 +53,7 @@
 // func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·msanfree(SB), NOSPLIT, $0-16
 	MOVQ	addr+0(FP), RARG0
-	MOVQ	size+8(FP), RARG1
+	MOVQ	sz+8(FP), RARG1
 	// void __msan_free_go(void *addr, uintptr_t sz);
 	MOVQ	$__msan_free_go(SB), AX
 	JMP	msancall<>(SB)
@@ -62,7 +62,7 @@
 TEXT	runtime·msanmove(SB), NOSPLIT, $0-24
 	MOVQ	dst+0(FP), RARG0
 	MOVQ	src+8(FP), RARG1
-	MOVQ	size+16(FP), RARG2
+	MOVQ	sz+16(FP), RARG2
 	// void __msan_memmove(void *dst, void *src, uintptr_t sz);
 	MOVQ	$__msan_memmove(SB), AX
 	JMP	msancall<>(SB)
diff --git a/src/runtime/msan_arm64.s b/src/runtime/msan_arm64.s
index b9eff34..ce475cf 100644
--- a/src/runtime/msan_arm64.s
+++ b/src/runtime/msan_arm64.s
@@ -16,7 +16,7 @@
 // Called from msanread.
 TEXT	runtime·domsanread(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	// void __msan_read_go(void *addr, uintptr_t sz);
 	MOVD	$__msan_read_go(SB), FARG
 	JMP	msancall<>(SB)
@@ -25,7 +25,7 @@
 // Called from instrumented code.
 TEXT	runtime·msanwrite(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	// void __msan_write_go(void *addr, uintptr_t sz);
 	MOVD	$__msan_write_go(SB), FARG
 	JMP	msancall<>(SB)
@@ -33,7 +33,7 @@
 // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·msanmalloc(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	// void __msan_malloc_go(void *addr, uintptr_t sz);
 	MOVD	$__msan_malloc_go(SB), FARG
 	JMP	msancall<>(SB)
@@ -41,7 +41,7 @@
 // func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
 TEXT	runtime·msanfree(SB), NOSPLIT, $0-16
 	MOVD	addr+0(FP), RARG0
-	MOVD	size+8(FP), RARG1
+	MOVD	sz+8(FP), RARG1
 	// void __msan_free_go(void *addr, uintptr_t sz);
 	MOVD	$__msan_free_go(SB), FARG
 	JMP	msancall<>(SB)
@@ -50,7 +50,7 @@
 TEXT	runtime·msanmove(SB), NOSPLIT, $0-24
 	MOVD	dst+0(FP), RARG0
 	MOVD	src+8(FP), RARG1
-	MOVD	size+16(FP), RARG2
+	MOVD	sz+16(FP), RARG2
 	// void __msan_memmove(void *dst, void *src, uintptr_t sz);
 	MOVD	$__msan_memmove(SB), FARG
 	JMP	msancall<>(SB)
diff --git a/src/runtime/msan_loong64.s b/src/runtime/msan_loong64.s
new file mode 100644
index 0000000..b9fa5fd
--- /dev/null
+++ b/src/runtime/msan_loong64.s
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build msan
+
+#include "go_asm.h"
+#include "textflag.h"
+
+#define RARG0 R4
+#define RARG1 R5
+#define RARG2 R6
+#define FARG  R7
+
+// func runtime·domsanread(addr unsafe.Pointer, sz uintptr)
+// Called from msanread.
+TEXT	runtime·domsanread(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	// void __msan_read_go(void *addr, uintptr_t sz);
+	MOVV	$__msan_read_go(SB), FARG
+	JMP	msancall<>(SB)
+
+// func runtime·msanwrite(addr unsafe.Pointer, sz uintptr)
+// Called from instrumented code.
+TEXT	runtime·msanwrite(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	// void __msan_write_go(void *addr, uintptr_t sz);
+	MOVV	$__msan_write_go(SB), FARG
+	JMP	msancall<>(SB)
+
+// func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
+TEXT	runtime·msanmalloc(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	// void __msan_malloc_go(void *addr, uintptr_t sz);
+	MOVV	$__msan_malloc_go(SB), FARG
+	JMP	msancall<>(SB)
+
+// func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
+TEXT	runtime·msanfree(SB), NOSPLIT, $0-16
+	MOVV	addr+0(FP), RARG0
+	MOVV	sz+8(FP), RARG1
+	// void __msan_free_go(void *addr, uintptr_t sz);
+	MOVV	$__msan_free_go(SB), FARG
+	JMP	msancall<>(SB)
+
+// func runtime·msanmove(dst, src unsafe.Pointer, sz uintptr)
+TEXT	runtime·msanmove(SB), NOSPLIT, $0-24
+	MOVV	dst+0(FP), RARG0
+	MOVV	src+8(FP), RARG1
+	MOVV	sz+16(FP), RARG2
+	// void __msan_memmove(void *dst, void *src, uintptr_t sz);
+	MOVV	$__msan_memmove(SB), FARG
+	JMP	msancall<>(SB)
+
+// Switches SP to g0 stack and calls (FARG). Arguments already set.
+TEXT	msancall<>(SB), NOSPLIT, $0-0
+	MOVV	R3, R23         // callee-saved
+	BEQ	g, g0stack      // no g, still on a system stack
+	MOVV	g_m(g), R14
+	MOVV	m_g0(R14), R15
+	BEQ	R15, g, g0stack
+
+	MOVV	(g_sched+gobuf_sp)(R15), R9
+	MOVV	R9, R3
+
+g0stack:
+	JAL	(FARG)
+	MOVV	R23, R3
+	RET
diff --git a/src/runtime/msize.go b/src/runtime/msize.go
deleted file mode 100644
index c56aa5a..0000000
--- a/src/runtime/msize.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Malloc small size classes.
-//
-// See malloc.go for overview.
-// See also mksizeclasses.go for how we decide what size classes to use.
-
-package runtime
-
-// Returns size of the memory block that mallocgc will allocate if you ask for the size.
-func roundupsize(size uintptr) uintptr {
-	if size < _MaxSmallSize {
-		if size <= smallSizeMax-8 {
-			return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
-		} else {
-			return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
-		}
-	}
-	if size+_PageSize < size {
-		return size
-	}
-	return alignUp(size, _PageSize)
-}
diff --git a/src/runtime/msize_allocheaders.go b/src/runtime/msize_allocheaders.go
new file mode 100644
index 0000000..6873ec6
--- /dev/null
+++ b/src/runtime/msize_allocheaders.go
@@ -0,0 +1,36 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.allocheaders
+
+// Malloc small size classes.
+//
+// See malloc.go for overview.
+// See also mksizeclasses.go for how we decide what size classes to use.
+
+package runtime
+
+// Returns size of the memory block that mallocgc will allocate if you ask for the size,
+// minus any inline space for metadata.
+func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
+	reqSize = size
+	if reqSize <= maxSmallSize-mallocHeaderSize {
+		// Small object.
+		if !noscan && reqSize > minSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize)
+			reqSize += mallocHeaderSize
+		}
+		// (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
+		// from the result if we have one, since mallocgc will add it back in.
+		if reqSize <= smallSizeMax-8 {
+			return uintptr(class_to_size[size_to_class8[divRoundUp(reqSize, smallSizeDiv)]]) - (reqSize - size)
+		}
+		return uintptr(class_to_size[size_to_class128[divRoundUp(reqSize-smallSizeMax, largeSizeDiv)]]) - (reqSize - size)
+	}
+	// Large object. Align reqSize up to the next page. Check for overflow.
+	reqSize += pageSize - 1
+	if reqSize < size {
+		return size
+	}
+	return reqSize &^ (pageSize - 1)
+}
diff --git a/src/runtime/msize_noallocheaders.go b/src/runtime/msize_noallocheaders.go
new file mode 100644
index 0000000..d89e0d6
--- /dev/null
+++ b/src/runtime/msize_noallocheaders.go
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.allocheaders
+
+// Malloc small size classes.
+//
+// See malloc.go for overview.
+// See also mksizeclasses.go for how we decide what size classes to use.
+
+package runtime
+
+// Returns size of the memory block that mallocgc will allocate if you ask for the size.
+//
+// The noscan argument is purely for compatibility with goexperiment.AllocHeaders.
+func roundupsize(size uintptr, noscan bool) uintptr {
+	if size < _MaxSmallSize {
+		if size <= smallSizeMax-8 {
+			return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
+		} else {
+			return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
+		}
+	}
+	if size+_PageSize < size {
+		return size
+	}
+	return alignUp(size, _PageSize)
+}
diff --git a/src/runtime/mspanset.go b/src/runtime/mspanset.go
index 5520d6c..5687627 100644
--- a/src/runtime/mspanset.go
+++ b/src/runtime/mspanset.go
@@ -284,7 +284,7 @@
 
 // Stores the spanSetSpinePointer.
 //
-// It has the same semantics as atomic.UnsafePointer.
+// It has the same semantics as [atomic.UnsafePointer].
 func (s *atomicSpanSetSpinePointer) StoreNoWB(p spanSetSpinePointer) {
 	s.a.StoreNoWB(p.p)
 }
@@ -296,7 +296,7 @@
 
 // lookup returns &s[idx].
 func (s spanSetSpinePointer) lookup(idx uintptr) *atomic.Pointer[spanSetBlock] {
-	return (*atomic.Pointer[spanSetBlock])(add(unsafe.Pointer(s.p), goarch.PtrSize*idx))
+	return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
 }
 
 // spanSetBlockPool is a global pool of spanSetBlocks.
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 9cdc565..87afec4 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -31,7 +31,7 @@
 
 	// Statistics about the garbage collector.
 
-	// Protected by mheap or stopping the world during GC.
+	// Protected by mheap or worldsema during GC.
 	last_gc_unix    uint64 // last gc (in unix time)
 	pause_total_ns  uint64
 	pause_ns        [256]uint64 // circular buffer of recent gc pause lengths
@@ -44,12 +44,6 @@
 	lastHeapInUse    uint64 // heapInUse at mark termination of the previous GC
 
 	enablegc bool
-
-	// gcPauseDist represents the distribution of all GC-related
-	// application pauses in the runtime.
-	//
-	// Each individual pause is counted separately, unlike pause_ns.
-	gcPauseDist timeHistogram
 }
 
 var memstats mstats
@@ -358,15 +352,20 @@
 // collection cycle.
 func ReadMemStats(m *MemStats) {
 	_ = m.Alloc // nil check test before we switch stacks, see issue 61158
-	stopTheWorld(stwReadMemStats)
+	stw := stopTheWorld(stwReadMemStats)
 
 	systemstack(func() {
 		readmemstats_m(m)
 	})
 
-	startTheWorld()
+	startTheWorld(stw)
 }
 
+// doubleCheckReadMemStats controls a double-check mode for ReadMemStats that
+// ensures consistency between the values that ReadMemStats is using and the
+// runtime-internal stats.
+var doubleCheckReadMemStats = false
+
 // readmemstats_m populates stats for internal runtime values.
 //
 // The world must be stopped.
@@ -441,56 +440,65 @@
 
 	heapGoal := gcController.heapGoal()
 
-	// The world is stopped, so the consistent stats (after aggregation)
-	// should be identical to some combination of memstats. In particular:
-	//
-	// * memstats.heapInUse == inHeap
-	// * memstats.heapReleased == released
-	// * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits
-	// * memstats.totalAlloc == totalAlloc
-	// * memstats.totalFree == totalFree
-	//
-	// Check if that's actually true.
-	//
-	// TODO(mknyszek): Maybe don't throw here. It would be bad if a
-	// bug in otherwise benign accounting caused the whole application
-	// to crash.
-	if gcController.heapInUse.load() != uint64(consStats.inHeap) {
-		print("runtime: heapInUse=", gcController.heapInUse.load(), "\n")
-		print("runtime: consistent value=", consStats.inHeap, "\n")
-		throw("heapInUse and consistent stats are not equal")
-	}
-	if gcController.heapReleased.load() != uint64(consStats.released) {
-		print("runtime: heapReleased=", gcController.heapReleased.load(), "\n")
-		print("runtime: consistent value=", consStats.released, "\n")
-		throw("heapReleased and consistent stats are not equal")
-	}
-	heapRetained := gcController.heapInUse.load() + gcController.heapFree.load()
-	consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
-	if heapRetained != consRetained {
-		print("runtime: global value=", heapRetained, "\n")
-		print("runtime: consistent value=", consRetained, "\n")
-		throw("measures of the retained heap are not equal")
-	}
-	if gcController.totalAlloc.Load() != totalAlloc {
-		print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n")
-		print("runtime: consistent value=", totalAlloc, "\n")
-		throw("totalAlloc and consistent stats are not equal")
-	}
-	if gcController.totalFree.Load() != totalFree {
-		print("runtime: totalFree=", gcController.totalFree.Load(), "\n")
-		print("runtime: consistent value=", totalFree, "\n")
-		throw("totalFree and consistent stats are not equal")
-	}
-	// Also check that mappedReady lines up with totalMapped - released.
-	// This isn't really the same type of "make sure consistent stats line up" situation,
-	// but this is an opportune time to check.
-	if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) {
-		print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n")
-		print("runtime: totalMapped=", totalMapped, "\n")
-		print("runtime: released=", uint64(consStats.released), "\n")
-		print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n")
-		throw("mappedReady and other memstats are not equal")
+	if doubleCheckReadMemStats {
+		// Only check this if we're debugging. It would be bad to crash an application
+		// just because the debugging stats are wrong. We mostly rely on tests to catch
+		// these issues, and we enable the double check mode for tests.
+		//
+		// The world is stopped, so the consistent stats (after aggregation)
+		// should be identical to some combination of memstats. In particular:
+		//
+		// * memstats.heapInUse == inHeap
+		// * memstats.heapReleased == released
+		// * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits
+		// * memstats.totalAlloc == totalAlloc
+		// * memstats.totalFree == totalFree
+		//
+		// Check if that's actually true.
+		//
+		// Prevent sysmon and the tracer from skewing the stats since they can
+		// act without synchronizing with a STW. See #64401.
+		lock(&sched.sysmonlock)
+		lock(&trace.lock)
+		if gcController.heapInUse.load() != uint64(consStats.inHeap) {
+			print("runtime: heapInUse=", gcController.heapInUse.load(), "\n")
+			print("runtime: consistent value=", consStats.inHeap, "\n")
+			throw("heapInUse and consistent stats are not equal")
+		}
+		if gcController.heapReleased.load() != uint64(consStats.released) {
+			print("runtime: heapReleased=", gcController.heapReleased.load(), "\n")
+			print("runtime: consistent value=", consStats.released, "\n")
+			throw("heapReleased and consistent stats are not equal")
+		}
+		heapRetained := gcController.heapInUse.load() + gcController.heapFree.load()
+		consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
+		if heapRetained != consRetained {
+			print("runtime: global value=", heapRetained, "\n")
+			print("runtime: consistent value=", consRetained, "\n")
+			throw("measures of the retained heap are not equal")
+		}
+		if gcController.totalAlloc.Load() != totalAlloc {
+			print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n")
+			print("runtime: consistent value=", totalAlloc, "\n")
+			throw("totalAlloc and consistent stats are not equal")
+		}
+		if gcController.totalFree.Load() != totalFree {
+			print("runtime: totalFree=", gcController.totalFree.Load(), "\n")
+			print("runtime: consistent value=", totalFree, "\n")
+			throw("totalFree and consistent stats are not equal")
+		}
+		// Also check that mappedReady lines up with totalMapped - released.
+		// This isn't really the same type of "make sure consistent stats line up" situation,
+		// but this is an opportune time to check.
+		if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) {
+			print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n")
+			print("runtime: totalMapped=", totalMapped, "\n")
+			print("runtime: released=", uint64(consStats.released), "\n")
+			print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n")
+			throw("mappedReady and other memstats are not equal")
+		}
+		unlock(&trace.lock)
+		unlock(&sched.sysmonlock)
 	}
 
 	// We've calculated all the values we need. Now, populate stats.
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index 9b54e8e..9c2e40c 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -26,10 +26,12 @@
 // func netpollclose(fd uintptr) int32
 //     Disable notifications for fd. Return an errno value.
 //
-// func netpoll(delta int64) gList
+// func netpoll(delta int64) (gList, int32)
 //     Poll the network. If delta < 0, block indefinitely. If delta == 0,
 //     poll without blocking. If delta > 0, block for up to delta nanoseconds.
-//     Return a list of goroutines built by calling netpollready.
+//     Return a list of goroutines built by calling netpollready,
+//     and a delta to add to netpollWaiters when all goroutines are ready.
+//     This will never return an empty list with a non-zero delta.
 //
 // func netpollBreak()
 //     Wake up the network poller, assumed to be blocked in netpoll.
@@ -426,12 +428,13 @@
 	}
 	// If we set the new deadline in the past, unblock currently pending IO if any.
 	// Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
+	delta := int32(0)
 	var rg, wg *g
 	if pd.rd < 0 {
-		rg = netpollunblock(pd, 'r', false)
+		rg = netpollunblock(pd, 'r', false, &delta)
 	}
 	if pd.wd < 0 {
-		wg = netpollunblock(pd, 'w', false)
+		wg = netpollunblock(pd, 'w', false, &delta)
 	}
 	unlock(&pd.lock)
 	if rg != nil {
@@ -440,6 +443,7 @@
 	if wg != nil {
 		netpollgoready(wg, 3)
 	}
+	netpollAdjustWaiters(delta)
 }
 
 //go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
@@ -453,8 +457,9 @@
 	pd.wseq++
 	var rg, wg *g
 	pd.publishInfo()
-	rg = netpollunblock(pd, 'r', false)
-	wg = netpollunblock(pd, 'w', false)
+	delta := int32(0)
+	rg = netpollunblock(pd, 'r', false, &delta)
+	wg = netpollunblock(pd, 'w', false, &delta)
 	if pd.rt.f != nil {
 		deltimer(&pd.rt)
 		pd.rt.f = nil
@@ -470,6 +475,7 @@
 	if wg != nil {
 		netpollgoready(wg, 3)
 	}
+	netpollAdjustWaiters(delta)
 }
 
 // netpollready is called by the platform-specific netpoll function.
@@ -478,16 +484,19 @@
 // from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
 // whether the fd is ready for reading or writing or both.
 //
+// This returns a delta to apply to netpollWaiters.
+//
 // This may run while the world is stopped, so write barriers are not allowed.
 //
 //go:nowritebarrier
-func netpollready(toRun *gList, pd *pollDesc, mode int32) {
+func netpollready(toRun *gList, pd *pollDesc, mode int32) int32 {
+	delta := int32(0)
 	var rg, wg *g
 	if mode == 'r' || mode == 'r'+'w' {
-		rg = netpollunblock(pd, 'r', true)
+		rg = netpollunblock(pd, 'r', true, &delta)
 	}
 	if mode == 'w' || mode == 'r'+'w' {
-		wg = netpollunblock(pd, 'w', true)
+		wg = netpollunblock(pd, 'w', true, &delta)
 	}
 	if rg != nil {
 		toRun.push(rg)
@@ -495,6 +504,7 @@
 	if wg != nil {
 		toRun.push(wg)
 	}
+	return delta
 }
 
 func netpollcheckerr(pd *pollDesc, mode int32) int {
@@ -520,13 +530,12 @@
 		// Bump the count of goroutines waiting for the poller.
 		// The scheduler uses this to decide whether to block
 		// waiting for the poller if there is nothing else to do.
-		netpollWaiters.Add(1)
+		netpollAdjustWaiters(1)
 	}
 	return r
 }
 
 func netpollgoready(gp *g, traceskip int) {
-	netpollWaiters.Add(-1)
 	goready(gp, traceskip+1)
 }
 
@@ -571,7 +580,13 @@
 	return old == pdReady
 }
 
-func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
+// netpollunblock moves either pd.rg (if mode == 'r') or
+// pd.wg (if mode == 'w') into the pdReady state.
+// This returns any goroutine blocked on pd.{rg,wg}.
+// It adds any adjustment to netpollWaiters to *delta;
+// this adjustment should be applied after the goroutine has
+// been marked ready.
+func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g {
 	gpp := &pd.rg
 	if mode == 'w' {
 		gpp = &pd.wg
@@ -587,13 +602,15 @@
 			// will check for timeout/cancel before waiting.
 			return nil
 		}
-		var new uintptr
+		new := pdNil
 		if ioready {
 			new = pdReady
 		}
 		if gpp.CompareAndSwap(old, new) {
 			if old == pdWait {
 				old = pdNil
+			} else if old != pdNil {
+				*delta -= 1
 			}
 			return (*g)(unsafe.Pointer(old))
 		}
@@ -613,6 +630,7 @@
 		unlock(&pd.lock)
 		return
 	}
+	delta := int32(0)
 	var rg *g
 	if read {
 		if pd.rd <= 0 || pd.rt.f == nil {
@@ -620,7 +638,7 @@
 		}
 		pd.rd = -1
 		pd.publishInfo()
-		rg = netpollunblock(pd, 'r', false)
+		rg = netpollunblock(pd, 'r', false, &delta)
 	}
 	var wg *g
 	if write {
@@ -629,7 +647,7 @@
 		}
 		pd.wd = -1
 		pd.publishInfo()
-		wg = netpollunblock(pd, 'w', false)
+		wg = netpollunblock(pd, 'w', false, &delta)
 	}
 	unlock(&pd.lock)
 	if rg != nil {
@@ -638,6 +656,7 @@
 	if wg != nil {
 		netpollgoready(wg, 0)
 	}
+	netpollAdjustWaiters(delta)
 }
 
 func netpollDeadline(arg any, seq uintptr) {
@@ -652,6 +671,18 @@
 	netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
 }
 
+// netpollAnyWaiters reports whether any goroutines are waiting for I/O.
+func netpollAnyWaiters() bool {
+	return netpollWaiters.Load() > 0
+}
+
+// netpollAdjustWaiters adds delta to netpollWaiters.
+func netpollAdjustWaiters(delta int32) {
+	if delta != 0 {
+		netpollWaiters.Add(delta)
+	}
+}
+
 func (c *pollCache) alloc() *pollDesc {
 	lock(&c.lock)
 	if c.first == nil {
diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go
index fad976b..a34b4d8 100644
--- a/src/runtime/netpoll_aix.go
+++ b/src/runtime/netpoll_aix.go
@@ -154,13 +154,13 @@
 // delay > 0: block for up to that many nanoseconds
 //
 //go:nowritebarrierrec
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	var timeout uintptr
 	if delay < 0 {
 		timeout = ^uintptr(0)
 	} else if delay == 0 {
 		// TODO: call poll with timeout == 0
-		return gList{}
+		return gList{}, 0
 	} else if delay < 1e6 {
 		timeout = 1
 	} else if delay < 1e15 {
@@ -186,7 +186,7 @@
 		// If a timed sleep was interrupted, just return to
 		// recalculate how long we should sleep now.
 		if timeout > 0 {
-			return gList{}
+			return gList{}, 0
 		}
 		goto retry
 	}
@@ -206,6 +206,7 @@
 		n--
 	}
 	var toRun gList
+	delta := int32(0)
 	for i := 1; i < len(pfds) && n > 0; i++ {
 		pfd := &pfds[i]
 
@@ -220,10 +221,10 @@
 		}
 		if mode != 0 {
 			pds[i].setEventErr(pfd.revents == _POLLERR, 0)
-			netpollready(&toRun, pds[i], mode)
+			delta += netpollready(&toRun, pds[i], mode)
 			n--
 		}
 	}
 	unlock(&mtxset)
-	return toRun
+	return toRun, delta
 }
diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go
index e29b64d..cda19fb 100644
--- a/src/runtime/netpoll_epoll.go
+++ b/src/runtime/netpoll_epoll.go
@@ -95,9 +95,9 @@
 // delay < 0: blocks indefinitely
 // delay == 0: does not block, just polls
 // delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	if epfd == -1 {
-		return gList{}
+		return gList{}, 0
 	}
 	var waitms int32
 	if delay < 0 {
@@ -124,11 +124,12 @@
 		// If a timed sleep was interrupted, just return to
 		// recalculate how long we should sleep now.
 		if waitms > 0 {
-			return gList{}
+			return gList{}, 0
 		}
 		goto retry
 	}
 	var toRun gList
+	delta := int32(0)
 	for i := int32(0); i < n; i++ {
 		ev := events[i]
 		if ev.Events == 0 {
@@ -164,9 +165,9 @@
 			tag := tp.tag()
 			if pd.fdseq.Load() == tag {
 				pd.setEventErr(ev.Events == syscall.EPOLLERR, tag)
-				netpollready(&toRun, pd, mode)
+				delta += netpollready(&toRun, pd, mode)
 			}
 		}
 	}
-	return toRun
+	return toRun, delta
 }
diff --git a/src/runtime/netpoll_fake.go b/src/runtime/netpoll_fake.go
index 5319561..41f86a8 100644
--- a/src/runtime/netpoll_fake.go
+++ b/src/runtime/netpoll_fake.go
@@ -30,6 +30,6 @@
 func netpollBreak() {
 }
 
-func netpoll(delay int64) gList {
-	return gList{}
+func netpoll(delay int64) (gList, int32) {
+	return gList{}, 0
 }
diff --git a/src/runtime/netpoll_kqueue.go b/src/runtime/netpoll_kqueue.go
index 3af45e6..d774dce 100644
--- a/src/runtime/netpoll_kqueue.go
+++ b/src/runtime/netpoll_kqueue.go
@@ -118,9 +118,9 @@
 // delay < 0: blocks indefinitely
 // delay == 0: does not block, just polls
 // delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	if kq == -1 {
-		return gList{}
+		return gList{}, 0
 	}
 	var tp *timespec
 	var ts timespec
@@ -140,18 +140,22 @@
 retry:
 	n := kevent(kq, nil, 0, &events[0], int32(len(events)), tp)
 	if n < 0 {
-		if n != -_EINTR {
+		// Ignore the ETIMEDOUT error for now, but try to dive deep and
+		// figure out what really happened with n == ETIMEOUT,
+		// see https://go.dev/issue/59679 for details.
+		if n != -_EINTR && n != -_ETIMEDOUT {
 			println("runtime: kevent on fd", kq, "failed with", -n)
 			throw("runtime: netpoll failed")
 		}
 		// If a timed sleep was interrupted, just return to
 		// recalculate how long we should sleep now.
 		if delay > 0 {
-			return gList{}
+			return gList{}, 0
 		}
 		goto retry
 	}
 	var toRun gList
+	delta := int32(0)
 	for i := 0; i < int(n); i++ {
 		ev := &events[i]
 
@@ -208,8 +212,8 @@
 				}
 			}
 			pd.setEventErr(ev.flags == _EV_ERROR, tag)
-			netpollready(&toRun, pd, mode)
+			delta += netpollready(&toRun, pd, mode)
 		}
 	}
-	return toRun
+	return toRun, delta
 }
diff --git a/src/runtime/netpoll_os_test.go b/src/runtime/netpoll_os_test.go
index b96b9f3..1e375f8 100644
--- a/src/runtime/netpoll_os_test.go
+++ b/src/runtime/netpoll_os_test.go
@@ -1,3 +1,7 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package runtime_test
 
 import (
diff --git a/src/runtime/netpoll_solaris.go b/src/runtime/netpoll_solaris.go
index 13c7ffc..41f145c 100644
--- a/src/runtime/netpoll_solaris.go
+++ b/src/runtime/netpoll_solaris.go
@@ -219,9 +219,9 @@
 // delay < 0: blocks indefinitely
 // delay == 0: does not block, just polls
 // delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	if portfd == -1 {
-		return gList{}
+		return gList{}, 0
 	}
 
 	var wait *timespec
@@ -259,12 +259,13 @@
 		// If a timed sleep was interrupted and there are no events,
 		// just return to recalculate how long we should sleep now.
 		if delay > 0 {
-			return gList{}
+			return gList{}, 0
 		}
 		goto retry
 	}
 
 	var toRun gList
+	delta := int32(0)
 	for i := 0; i < int(n); i++ {
 		ev := &events[i]
 
@@ -324,9 +325,9 @@
 			// about the event port on SmartOS.
 			//
 			// See golang.org/x/issue/30840.
-			netpollready(&toRun, pd, mode)
+			delta += netpollready(&toRun, pd, mode)
 		}
 	}
 
-	return toRun
+	return toRun, delta
 }
diff --git a/src/runtime/netpoll_stub.go b/src/runtime/netpoll_stub.go
index 14cf0c3..d950661 100644
--- a/src/runtime/netpoll_stub.go
+++ b/src/runtime/netpoll_stub.go
@@ -9,7 +9,6 @@
 import "runtime/internal/atomic"
 
 var netpollInited atomic.Uint32
-var netpollWaiters atomic.Uint32
 
 var netpollStubLock mutex
 var netpollNote note
@@ -34,7 +33,7 @@
 
 // Polls for ready network connections.
 // Returns list of goroutines that become runnable.
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	// Implementation for platforms that do not support
 	// integrated network poller.
 	if delay != 0 {
@@ -53,9 +52,16 @@
 		// (eg when running TestNetpollBreak).
 		osyield()
 	}
-	return gList{}
+	return gList{}, 0
 }
 
 func netpollinited() bool {
 	return netpollInited.Load() != 0
 }
+
+func netpollAnyWaiters() bool {
+	return false
+}
+
+func netpollAdjustWaiters(delta int32) {
+}
diff --git a/src/runtime/netpoll_wasip1.go b/src/runtime/netpoll_wasip1.go
index 677287b..9903726 100644
--- a/src/runtime/netpoll_wasip1.go
+++ b/src/runtime/netpoll_wasip1.go
@@ -184,7 +184,7 @@
 
 func netpollBreak() {}
 
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	lock(&mtx)
 
 	// If delay >= 0, we include a subscription of type Clock that we use as
@@ -201,7 +201,7 @@
 
 	if len(pollsubs) == 0 {
 		unlock(&mtx)
-		return gList{}
+		return gList{}, 0
 	}
 
 	evts = evts[:len(pollsubs)]
@@ -221,12 +221,13 @@
 		// recalculate how long we should sleep now.
 		if delay > 0 {
 			unlock(&mtx)
-			return gList{}
+			return gList{}, 0
 		}
 		goto retry
 	}
 
 	var toRun gList
+	delta := int32(0)
 	for i := 0; i < int(nevents); i++ {
 		e := &evts[i]
 		if e.typ == eventtypeClock {
@@ -245,10 +246,10 @@
 			pd := (*pollDesc)(unsafe.Pointer(uintptr(e.userdata)))
 			netpolldisarm(pd, mode)
 			pd.setEventErr(e.error != 0, 0)
-			netpollready(&toRun, pd, mode)
+			delta += netpollready(&toRun, pd, mode)
 		}
 	}
 
 	unlock(&mtx)
-	return toRun
+	return toRun, delta
 }
diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go
index bb77d8d..484a9e8 100644
--- a/src/runtime/netpoll_windows.go
+++ b/src/runtime/netpoll_windows.go
@@ -84,7 +84,7 @@
 // delay < 0: blocks indefinitely
 // delay == 0: does not block, just polls
 // delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
 	var entries [64]overlappedEntry
 	var wait, qty, flags, n, i uint32
 	var errno int32
@@ -94,7 +94,7 @@
 	mp := getg().m
 
 	if iocphandle == _INVALID_HANDLE_VALUE {
-		return gList{}
+		return gList{}, 0
 	}
 	if delay < 0 {
 		wait = _INFINITE
@@ -121,12 +121,13 @@
 		mp.blocked = false
 		errno = int32(getlasterror())
 		if errno == _WAIT_TIMEOUT {
-			return gList{}
+			return gList{}, 0
 		}
 		println("runtime: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
 		throw("runtime: netpoll failed")
 	}
 	mp.blocked = false
+	delta := int32(0)
 	for i = 0; i < n; i++ {
 		op = entries[i].op
 		if op != nil && op.pd == entries[i].key {
@@ -135,7 +136,7 @@
 			if stdcall5(_WSAGetOverlappedResult, op.pd.fd, uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 {
 				errno = int32(getlasterror())
 			}
-			handlecompletion(&toRun, op, errno, qty)
+			delta += handlecompletion(&toRun, op, errno, qty)
 		} else {
 			netpollWakeSig.Store(0)
 			if delay == 0 {
@@ -145,10 +146,10 @@
 			}
 		}
 	}
-	return toRun
+	return toRun, delta
 }
 
-func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) {
+func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) int32 {
 	mode := op.mode
 	if mode != 'r' && mode != 'w' {
 		println("runtime: GetQueuedCompletionStatusEx returned invalid mode=", mode)
@@ -156,5 +157,5 @@
 	}
 	op.errno = errno
 	op.qty = qty
-	netpollready(toRun, op.pd, mode)
+	return netpollready(toRun, op.pd, mode)
 }
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 8af88d1..0d20079 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -428,7 +428,6 @@
 	}
 	// Note that in this case we can't return a valid errno value.
 	return write2(fd, uintptr(p), n)
-
 }
 
 //go:nosplit
@@ -641,7 +640,6 @@
 		throw("syscall sysconf")
 	}
 	return r
-
 }
 
 // pthread functions returns its error code in the main return value
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 046d173..92daf13 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -133,6 +133,10 @@
 }
 
 func osinit() {
+	// Call miniterrno so that we can safely make system calls
+	// before calling minit on m0.
+	asmcgocall(unsafe.Pointer(abi.FuncPCABI0(miniterrno)), unsafe.Pointer(&libc____errno))
+
 	ncpu = getncpu()
 	if physPageSize == 0 {
 		physPageSize = getPageSize()
@@ -194,11 +198,11 @@
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -227,6 +231,7 @@
 // Called from dropm to undo the effect of an minit.
 func unminit() {
 	unminitSignals()
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index 0583e9a..3a5078a 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -93,6 +93,10 @@
 }
 
 func osinit() {
+	// Call miniterrno so that we can safely make system calls
+	// before calling minit on m0.
+	miniterrno()
+
 	ncpu = int32(sysconf(__SC_NPROCESSORS_ONLN))
 	physPageSize = sysconf(__SC_PAGE_SIZE)
 }
@@ -179,6 +183,7 @@
 
 func unminit() {
 	unminitSignals()
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
@@ -234,11 +239,11 @@
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 105de47..430d186 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -41,7 +41,12 @@
 	if ns >= 0 {
 		start = nanotime()
 	}
-	mp := getg().m
+	g := getg()
+	mp := g.m
+	if g == mp.gsignal {
+		// sema sleep/wakeup are implemented with pthreads, which are not async-signal-safe on Darwin.
+		throw("semasleep on Darwin signal stack")
+	}
 	pthread_mutex_lock(&mp.mutex)
 	for {
 		if mp.count > 0 {
@@ -70,6 +75,9 @@
 
 //go:nosplit
 func semawakeup(mp *m) {
+	if g := getg(); g == g.m.gsignal {
+		throw("semawakeup on Darwin signal stack")
+	}
 	pthread_mutex_lock(&mp.mutex)
 	mp.count++
 	if mp.count > 0 {
@@ -186,11 +194,11 @@
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -336,6 +344,7 @@
 	if !(GOOS == "ios" && GOARCH == "arm64") {
 		unminitSignals()
 	}
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
diff --git a/src/runtime/os_darwin_arm64.go b/src/runtime/os_darwin_arm64.go
index b808150..ebc1b13 100644
--- a/src/runtime/os_darwin_arm64.go
+++ b/src/runtime/os_darwin_arm64.go
@@ -6,7 +6,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index 8268c7f..2aeea17 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -181,11 +181,11 @@
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -211,6 +211,7 @@
 //go:nosplit
 func unminit() {
 	unminitSignals()
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index 3af234e..d0d6f14 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -283,11 +283,11 @@
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -328,6 +328,7 @@
 //go:nosplit
 func unminit() {
 	unminitSignals()
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
@@ -419,6 +420,7 @@
 const (
 	_AT_NULL     = 0  // Terminates the vector
 	_AT_PAGESZ   = 6  // Page size in bytes
+	_AT_PLATFORM = 15 // string identifying platform
 	_AT_TIMEKEEP = 22 // Pointer to timehands.
 	_AT_HWCAP    = 25 // CPU feature flags
 	_AT_HWCAP2   = 26 // CPU feature flags 2
diff --git a/src/runtime/os_freebsd_arm.go b/src/runtime/os_freebsd_arm.go
index 3feaa5e..5f6bf46 100644
--- a/src/runtime/os_freebsd_arm.go
+++ b/src/runtime/os_freebsd_arm.go
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "internal/cpu"
+import (
+	"internal/cpu"
+	"unsafe"
+)
 
 const (
 	_HWCAP_VFP   = 1 << 6
@@ -12,14 +15,16 @@
 )
 
 func checkgoarm() {
-	if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 {
+	if cpu.HWCap&_HWCAP_VFP == 0 && goarmsoftfp == 0 {
 		print("runtime: this CPU has no floating point hardware, so it cannot run\n")
-		print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n")
+		print("a binary compiled for hard floating point. Recompile adding ,softfloat\n")
+		print("to GOARM.\n")
 		exit(1)
 	}
-	if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 {
+	if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 && goarmsoftfp == 0 {
 		print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n")
-		print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n")
+		print("a binary compiled for VFPv3 hard floating point. Recompile adding ,softfloat\n")
+		print("to GOARM or changing GOARM to 6.\n")
 		exit(1)
 	}
 
@@ -37,12 +42,13 @@
 		cpu.HWCap = uint(val)
 	case _AT_HWCAP2:
 		cpu.HWCap2 = uint(val)
+	case _AT_PLATFORM:
+		cpu.Platform = gostringnocopy((*byte)(unsafe.Pointer(val)))
 	}
 }
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_freebsd_arm64.go b/src/runtime/os_freebsd_arm64.go
index b5b25f0..58bc5d3 100644
--- a/src/runtime/os_freebsd_arm64.go
+++ b/src/runtime/os_freebsd_arm64.go
@@ -6,7 +6,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed fastrand().
 	// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go
index 65fb499..099c526 100644
--- a/src/runtime/os_js.go
+++ b/src/runtime/os_js.go
@@ -32,6 +32,11 @@
 //go:noescape
 func getRandomData(r []byte)
 
+func readRandom(r []byte) int {
+	getRandomData(r)
+	return len(r)
+}
+
 func goenvs() {
 	goenvs_unix()
 }
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 0b05610..0ba607f 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -213,12 +213,13 @@
 }
 
 const (
-	_AT_NULL   = 0  // End of vector
-	_AT_PAGESZ = 6  // System physical page size
-	_AT_HWCAP  = 16 // hardware capability bit vector
-	_AT_SECURE = 23 // secure mode boolean
-	_AT_RANDOM = 25 // introduced in 2.6.29
-	_AT_HWCAP2 = 26 // hardware capability bit vector 2
+	_AT_NULL     = 0  // End of vector
+	_AT_PAGESZ   = 6  // System physical page size
+	_AT_PLATFORM = 15 // string identifying platform
+	_AT_HWCAP    = 16 // hardware capability bit vector
+	_AT_SECURE   = 23 // secure mode boolean
+	_AT_RANDOM   = 25 // introduced in 2.6.29
+	_AT_HWCAP2   = 26 // hardware capability bit vector 2
 )
 
 var procAuxv = []byte("/proc/self/auxv\x00")
@@ -287,10 +288,6 @@
 	auxv = auxvreadbuf[: pairs*2 : pairs*2]
 }
 
-// startupRandomData holds random bytes initialized at startup. These come from
-// the ELF AT_RANDOM auxiliary vector.
-var startupRandomData []byte
-
 // secureMode holds the value of AT_SECURE passed in the auxiliary vector.
 var secureMode bool
 
@@ -302,7 +299,7 @@
 		case _AT_RANDOM:
 			// The kernel provides a pointer to 16-bytes
 			// worth of random data.
-			startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:]
+			startupRand = (*[16]byte)(unsafe.Pointer(val))[:]
 
 		case _AT_PAGESZ:
 			physPageSize = val
@@ -346,39 +343,16 @@
 func osinit() {
 	ncpu = getproccount()
 	physHugePageSize = getHugePageSize()
-	if iscgo {
-		// #42494 glibc and musl reserve some signals for
-		// internal use and require they not be blocked by
-		// the rest of a normal C runtime. When the go runtime
-		// blocks...unblocks signals, temporarily, the blocked
-		// interval of time is generally very short. As such,
-		// these expectations of *libc code are mostly met by
-		// the combined go+cgo system of threads. However,
-		// when go causes a thread to exit, via a return from
-		// mstart(), the combined runtime can deadlock if
-		// these signals are blocked. Thus, don't block these
-		// signals when exiting threads.
-		// - glibc: SIGCANCEL (32), SIGSETXID (33)
-		// - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
-		sigdelset(&sigsetAllExiting, 32)
-		sigdelset(&sigsetAllExiting, 33)
-		sigdelset(&sigsetAllExiting, 34)
-	}
 	osArchInit()
 }
 
 var urandom_dev = []byte("/dev/urandom\x00")
 
-func getRandomData(r []byte) {
-	if startupRandomData != nil {
-		n := copy(r, startupRandomData)
-		extendRandom(r, n)
-		return
-	}
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -420,6 +394,7 @@
 //go:nosplit
 func unminit() {
 	unminitSignals()
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
@@ -672,7 +647,7 @@
 	// activates may do a couple milliseconds of GC-related work and nothing
 	// else in the few seconds that the profiler observes.
 	spec := new(itimerspec)
-	spec.it_value.setNsec(1 + int64(fastrandn(uint32(1e9/hz))))
+	spec.it_value.setNsec(1 + int64(cheaprandn(uint32(1e9/hz))))
 	spec.it_interval.setNsec(1e9 / int64(hz))
 
 	var timerid int32
@@ -742,7 +717,7 @@
 	// N.B. Internally, this function does not depend on STW to
 	// successfully change every thread. It is only needed for user
 	// expectations, per above.
-	stopTheWorld(stwAllThreadsSyscall)
+	stw := stopTheWorld(stwAllThreadsSyscall)
 
 	// This function depends on several properties:
 	//
@@ -786,7 +761,7 @@
 	if errno != 0 {
 		releasem(getg().m)
 		allocmLock.unlock()
-		startTheWorld()
+		startTheWorld(stw)
 		return r1, r2, errno
 	}
 
@@ -871,7 +846,7 @@
 
 	releasem(getg().m)
 	allocmLock.unlock()
-	startTheWorld()
+	startTheWorld(stw)
 
 	return r1, r2, errno
 }
diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go
index bd3ab44..5e1274e 100644
--- a/src/runtime/os_linux_arm.go
+++ b/src/runtime/os_linux_arm.go
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "internal/cpu"
+import (
+	"internal/cpu"
+	"unsafe"
+)
 
 const (
 	_HWCAP_VFP   = 1 << 6  // introduced in at least 2.6.11
@@ -20,14 +23,16 @@
 	if GOOS == "android" {
 		return
 	}
-	if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 {
+	if cpu.HWCap&_HWCAP_VFP == 0 && goarmsoftfp == 0 {
 		print("runtime: this CPU has no floating point hardware, so it cannot run\n")
-		print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n")
+		print("a binary compiled for hard floating point. Recompile adding ,softfloat\n")
+		print("to GOARM.\n")
 		exit(1)
 	}
-	if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 {
+	if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 && goarmsoftfp == 0 {
 		print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n")
-		print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n")
+		print("a binary compiled for VFPv3 hard floating point. Recompile adding ,softfloat\n")
+		print("to GOARM or changing GOARM to 6.\n")
 		exit(1)
 	}
 }
@@ -38,6 +43,8 @@
 		cpu.HWCap = uint(val)
 	case _AT_HWCAP2:
 		cpu.HWCap2 = uint(val)
+	case _AT_PLATFORM:
+		cpu.Platform = gostringnocopy((*byte)(unsafe.Pointer(val)))
 	}
 }
 
@@ -45,7 +52,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed fastrand().
 	// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_linux_arm64.go b/src/runtime/os_linux_arm64.go
index 2daa56f..62cead1 100644
--- a/src/runtime/os_linux_arm64.go
+++ b/src/runtime/os_linux_arm64.go
@@ -19,7 +19,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed fastrand().
 	// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_linux_mips64x.go b/src/runtime/os_linux_mips64x.go
index 11d35bc..770cc27 100644
--- a/src/runtime/os_linux_mips64x.go
+++ b/src/runtime/os_linux_mips64x.go
@@ -19,7 +19,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed fastrand().
 	// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_linux_mipsx.go b/src/runtime/os_linux_mipsx.go
index cdf83ff..3807e6d 100644
--- a/src/runtime/os_linux_mipsx.go
+++ b/src/runtime/os_linux_mipsx.go
@@ -13,7 +13,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed fastrand().
 	// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_linux_s390x.go b/src/runtime/os_linux_s390x.go
index b9651f1..0a1d959 100644
--- a/src/runtime/os_linux_s390x.go
+++ b/src/runtime/os_linux_s390x.go
@@ -6,6 +6,10 @@
 
 import "internal/cpu"
 
+const (
+	_HWCAP_VX = 1 << 11 // vector facility
+)
+
 func archauxv(tag, val uintptr) {
 	switch tag {
 	case _AT_HWCAP:
@@ -14,3 +18,14 @@
 }
 
 func osArchInit() {}
+
+func checkS390xCPU() {
+	// Check if the present z-system has the hardware capability to carryout
+	// floating point operations. Check if hwcap reflects CPU capability for the
+	// necessary floating point hardware (HasVX) availability.
+	// Starting with Go1.19, z13 is the minimum machine level for running Go on LoZ
+	if cpu.HWCap&_HWCAP_VX == 0 {
+		print("runtime: This CPU has no floating point hardware, so this program cannot be run. \n")
+		exit(1)
+	}
+}
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index b50ed4b..8abb688 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -274,11 +274,11 @@
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -316,6 +316,8 @@
 //go:nosplit
 func unminit() {
 	unminitSignals()
+	// Don't clear procid, it is used by locking (semawake), and locking
+	// must continue working after unminit.
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go
index 5fb4e08..7494a38 100644
--- a/src/runtime/os_netbsd_arm.go
+++ b/src/runtime/os_netbsd_arm.go
@@ -31,7 +31,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_netbsd_arm64.go b/src/runtime/os_netbsd_arm64.go
index 2dda9c9..48841af 100644
--- a/src/runtime/os_netbsd_arm64.go
+++ b/src/runtime/os_netbsd_arm64.go
@@ -20,7 +20,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 500286a..8569799 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -32,9 +32,6 @@
 
 // From OpenBSD's <sys/sysctl.h>
 const (
-	_CTL_KERN   = 1
-	_KERN_OSREV = 3
-
 	_CTL_HW        = 6
 	_HW_NCPU       = 3
 	_HW_PAGESIZE   = 7
@@ -86,13 +83,6 @@
 	return 0
 }
 
-func getOSRev() int {
-	if osrev, ok := sysctlInt([]uint32{_CTL_KERN, _KERN_OSREV}); ok {
-		return int(osrev)
-	}
-	return 0
-}
-
 //go:nosplit
 func semacreate(mp *m) {
 }
@@ -147,17 +137,16 @@
 func osinit() {
 	ncpu = getncpu()
 	physPageSize = getPageSize()
-	haveMapStack = getOSRev() >= 201805 // OpenBSD 6.3
 }
 
 var urandom_dev = []byte("/dev/urandom\x00")
 
 //go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
 	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
 	closefd(fd)
-	extendRandom(r, int(n))
+	return int(n)
 }
 
 func goenvs() {
@@ -187,6 +176,7 @@
 //go:nosplit
 func unminit() {
 	unminitSignals()
+	getg().m.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
@@ -263,15 +253,7 @@
 	return true
 }
 
-var haveMapStack = false
-
 func osStackAlloc(s *mspan) {
-	// OpenBSD 6.4+ requires that stacks be mapped with MAP_STACK.
-	// It will check this on entry to system calls, traps, and
-	// when switching to the alternate system stack.
-	//
-	// This function is called before s is used for any data, so
-	// it's safe to simply re-map it.
 	osStackRemap(s, _MAP_STACK)
 }
 
@@ -281,13 +263,6 @@
 }
 
 func osStackRemap(s *mspan, flags int32) {
-	if !haveMapStack {
-		// OpenBSD prior to 6.3 did not have MAP_STACK and so
-		// the following mmap will fail. But it also didn't
-		// require MAP_STACK (obviously), so there's no need
-		// to do the mmap.
-		return
-	}
 	a, err := mmap(unsafe.Pointer(s.base()), s.npages*pageSize, _PROT_READ|_PROT_WRITE, _MAP_PRIVATE|_MAP_ANON|_MAP_FIXED|flags, -1, 0)
 	if err != 0 || uintptr(a) != s.base() {
 		print("runtime: remapping stack memory ", hex(s.base()), " ", s.npages*pageSize, " a=", a, " err=", err, "\n")
diff --git a/src/runtime/os_openbsd_arm.go b/src/runtime/os_openbsd_arm.go
index 0a24096..d5dc8cb 100644
--- a/src/runtime/os_openbsd_arm.go
+++ b/src/runtime/os_openbsd_arm.go
@@ -17,7 +17,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_openbsd_arm64.go b/src/runtime/os_openbsd_arm64.go
index d71de7d..4b2c6e3 100644
--- a/src/runtime/os_openbsd_arm64.go
+++ b/src/runtime/os_openbsd_arm64.go
@@ -6,7 +6,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_openbsd_mips64.go b/src/runtime/os_openbsd_mips64.go
index ae220cd..e5eeb2d 100644
--- a/src/runtime/os_openbsd_mips64.go
+++ b/src/runtime/os_openbsd_mips64.go
@@ -6,7 +6,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index f4ff4d5..77446d0 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -327,24 +327,8 @@
 }
 
 //go:nosplit
-func getRandomData(r []byte) {
-	// inspired by wyrand see hash32.go for detail
-	t := nanotime()
-	v := getg().m.procid ^ uint64(t)
-
-	for len(r) > 0 {
-		v ^= 0xa0761d6478bd642f
-		v *= 0xe7037ed1a0b428db
-		size := 8
-		if len(r) < 8 {
-			size = len(r)
-		}
-		for i := 0; i < size; i++ {
-			r[i] = byte(v >> (8 * i))
-		}
-		r = r[size:]
-		v = v>>32 | v<<32
-	}
+func readRandom(r []byte) int {
+	return 0
 }
 
 func initsig(preinit bool) {
diff --git a/src/runtime/os_plan9_arm.go b/src/runtime/os_plan9_arm.go
index f165a34..cce6229 100644
--- a/src/runtime/os_plan9_arm.go
+++ b/src/runtime/os_plan9_arm.go
@@ -10,7 +10,6 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
 	return nanotime()
 }
diff --git a/src/runtime/os_wasip1.go b/src/runtime/os_wasip1.go
index 8811bb6..acac2b3 100644
--- a/src/runtime/os_wasip1.go
+++ b/src/runtime/os_wasip1.go
@@ -180,10 +180,11 @@
 	}
 }
 
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	if random_get(unsafe.Pointer(&r[0]), size(len(r))) != 0 {
-		throw("random_get failed")
+		return 0
 	}
+	return len(r)
 }
 
 func goenvs() {
diff --git a/src/runtime/os_wasm.go b/src/runtime/os_wasm.go
index bf78dfb..ce260de 100644
--- a/src/runtime/os_wasm.go
+++ b/src/runtime/os_wasm.go
@@ -122,9 +122,7 @@
 
 //go:nosplit
 func cputicks() int64 {
-	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
 	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
-	// TODO: need more entropy to better seed fastrand.
 	return nanotime()
 }
 
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index f5c2429..6533b64 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -16,6 +16,7 @@
 	_NSIG = 65
 )
 
+//go:cgo_import_dynamic runtime._AddVectoredContinueHandler AddVectoredContinueHandler%2 "kernel32.dll"
 //go:cgo_import_dynamic runtime._AddVectoredExceptionHandler AddVectoredExceptionHandler%2 "kernel32.dll"
 //go:cgo_import_dynamic runtime._CloseHandle CloseHandle%1 "kernel32.dll"
 //go:cgo_import_dynamic runtime._CreateEventA CreateEventA%4 "kernel32.dll"
@@ -42,8 +43,11 @@
 //go:cgo_import_dynamic runtime._LoadLibraryExW LoadLibraryExW%3 "kernel32.dll"
 //go:cgo_import_dynamic runtime._LoadLibraryW LoadLibraryW%1 "kernel32.dll"
 //go:cgo_import_dynamic runtime._PostQueuedCompletionStatus PostQueuedCompletionStatus%4 "kernel32.dll"
+//go:cgo_import_dynamic runtime._QueryPerformanceCounter QueryPerformanceCounter%1 "kernel32.dll"
 //go:cgo_import_dynamic runtime._RaiseFailFastException RaiseFailFastException%3 "kernel32.dll"
 //go:cgo_import_dynamic runtime._ResumeThread ResumeThread%1 "kernel32.dll"
+//go:cgo_import_dynamic runtime._RtlLookupFunctionEntry RtlLookupFunctionEntry%3 "kernel32.dll"
+//go:cgo_import_dynamic runtime._RtlVirtualUnwind  RtlVirtualUnwind%8 "kernel32.dll"
 //go:cgo_import_dynamic runtime._SetConsoleCtrlHandler SetConsoleCtrlHandler%2 "kernel32.dll"
 //go:cgo_import_dynamic runtime._SetErrorMode SetErrorMode%1 "kernel32.dll"
 //go:cgo_import_dynamic runtime._SetEvent SetEvent%1 "kernel32.dll"
@@ -70,6 +74,7 @@
 	// Following syscalls are available on every Windows PC.
 	// All these variables are set by the Windows executable
 	// loader before the Go program starts.
+	_AddVectoredContinueHandler,
 	_AddVectoredExceptionHandler,
 	_CloseHandle,
 	_CreateEventA,
@@ -91,16 +96,16 @@
 	_GetStdHandle,
 	_GetSystemDirectoryA,
 	_GetSystemInfo,
-	_GetSystemTimeAsFileTime,
 	_GetThreadContext,
 	_SetThreadContext,
 	_LoadLibraryExW,
 	_LoadLibraryW,
 	_PostQueuedCompletionStatus,
 	_QueryPerformanceCounter,
-	_QueryPerformanceFrequency,
 	_RaiseFailFastException,
 	_ResumeThread,
+	_RtlLookupFunctionEntry,
+	_RtlVirtualUnwind,
 	_SetConsoleCtrlHandler,
 	_SetErrorMode,
 	_SetEvent,
@@ -122,25 +127,12 @@
 	_WriteFile,
 	_ stdFunction
 
-	// Following syscalls are only available on some Windows PCs.
-	// We will load syscalls, if available, before using them.
-	_AddVectoredContinueHandler,
-	_ stdFunction
-
-	// Use RtlGenRandom to generate cryptographically random data.
-	// This approach has been recommended by Microsoft (see issue
-	// 15589 for details).
-	// The RtlGenRandom is not listed in advapi32.dll, instead
-	// RtlGenRandom function can be found by searching for SystemFunction036.
-	// Also some versions of Mingw cannot link to SystemFunction036
-	// when building executable as Cgo. So load SystemFunction036
-	// manually during runtime startup.
-	_RtlGenRandom stdFunction
+	// Use ProcessPrng to generate cryptographically random data.
+	_ProcessPrng stdFunction
 
 	// Load ntdll.dll manually during startup, otherwise Mingw
 	// links wrong printf function to cgo executable (see issue
 	// 12030 for details).
-	_NtWaitForSingleObject  stdFunction
 	_RtlGetCurrentPeb       stdFunction
 	_RtlGetNtVersionNumbers stdFunction
 
@@ -152,12 +144,11 @@
 )
 
 var (
-	advapi32dll = [...]uint16{'a', 'd', 'v', 'a', 'p', 'i', '3', '2', '.', 'd', 'l', 'l', 0}
-	kernel32dll = [...]uint16{'k', 'e', 'r', 'n', 'e', 'l', '3', '2', '.', 'd', 'l', 'l', 0}
-	ntdlldll    = [...]uint16{'n', 't', 'd', 'l', 'l', '.', 'd', 'l', 'l', 0}
-	powrprofdll = [...]uint16{'p', 'o', 'w', 'r', 'p', 'r', 'o', 'f', '.', 'd', 'l', 'l', 0}
-	winmmdll    = [...]uint16{'w', 'i', 'n', 'm', 'm', '.', 'd', 'l', 'l', 0}
-	ws2_32dll   = [...]uint16{'w', 's', '2', '_', '3', '2', '.', 'd', 'l', 'l', 0}
+	bcryptprimitivesdll = [...]uint16{'b', 'c', 'r', 'y', 'p', 't', 'p', 'r', 'i', 'm', 'i', 't', 'i', 'v', 'e', 's', '.', 'd', 'l', 'l', 0}
+	ntdlldll            = [...]uint16{'n', 't', 'd', 'l', 'l', '.', 'd', 'l', 'l', 0}
+	powrprofdll         = [...]uint16{'p', 'o', 'w', 'r', 'p', 'r', 'o', 'f', '.', 'd', 'l', 'l', 0}
+	winmmdll            = [...]uint16{'w', 'i', 'n', 'm', 'm', '.', 'd', 'l', 'l', 0}
+	ws2_32dll           = [...]uint16{'w', 's', '2', '_', '3', '2', '.', 'd', 'l', 'l', 0}
 )
 
 // Function to be called by windows CreateThread
@@ -243,40 +234,29 @@
 	sysDirectoryLen = l + 1
 }
 
+//go:linkname windows_GetSystemDirectory internal/syscall/windows.GetSystemDirectory
+func windows_GetSystemDirectory() string {
+	return unsafe.String(&sysDirectory[0], sysDirectoryLen)
+}
+
 func windowsLoadSystemLib(name []uint16) uintptr {
 	return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
 }
 
-const haveCputicksAsm = GOARCH == "386" || GOARCH == "amd64"
-
 func loadOptionalSyscalls() {
-	k32 := windowsLoadSystemLib(kernel32dll[:])
-	if k32 == 0 {
-		throw("kernel32.dll not found")
+	bcryptPrimitives := windowsLoadSystemLib(bcryptprimitivesdll[:])
+	if bcryptPrimitives == 0 {
+		throw("bcryptprimitives.dll not found")
 	}
-	_AddVectoredContinueHandler = windowsFindfunc(k32, []byte("AddVectoredContinueHandler\000"))
-
-	a32 := windowsLoadSystemLib(advapi32dll[:])
-	if a32 == 0 {
-		throw("advapi32.dll not found")
-	}
-	_RtlGenRandom = windowsFindfunc(a32, []byte("SystemFunction036\000"))
+	_ProcessPrng = windowsFindfunc(bcryptPrimitives, []byte("ProcessPrng\000"))
 
 	n32 := windowsLoadSystemLib(ntdlldll[:])
 	if n32 == 0 {
 		throw("ntdll.dll not found")
 	}
-	_NtWaitForSingleObject = windowsFindfunc(n32, []byte("NtWaitForSingleObject\000"))
 	_RtlGetCurrentPeb = windowsFindfunc(n32, []byte("RtlGetCurrentPeb\000"))
 	_RtlGetNtVersionNumbers = windowsFindfunc(n32, []byte("RtlGetNtVersionNumbers\000"))
 
-	if !haveCputicksAsm {
-		_QueryPerformanceCounter = windowsFindfunc(k32, []byte("QueryPerformanceCounter\000"))
-		if _QueryPerformanceCounter == nil {
-			throw("could not find QPC syscalls")
-		}
-	}
-
 	m32 := windowsLoadSystemLib(winmmdll[:])
 	if m32 == 0 {
 		throw("winmm.dll not found")
@@ -295,11 +275,6 @@
 	if _WSAGetOverlappedResult == nil {
 		throw("WSAGetOverlappedResult not found")
 	}
-
-	if windowsFindfunc(n32, []byte("wine_get_version\000")) != nil {
-		// running on Wine
-		initWine(k32)
-	}
 }
 
 func monitorSuspendResume() {
@@ -493,7 +468,10 @@
 	// strictly necessary, but is a nice validity check for the near to
 	// medium term, when this functionality is still relatively new in
 	// Windows.
-	getRandomData(longFileName[len(longFileName)-33 : len(longFileName)-1])
+	targ := longFileName[len(longFileName)-33 : len(longFileName)-1]
+	if readRandom(targ) != len(targ) {
+		readTimeRandom(targ)
+	}
 	start := copy(longFileName[:], sysDirectory[:sysDirectoryLen])
 	const dig = "0123456789abcdef"
 	for i := 0; i < 32; i++ {
@@ -543,84 +521,13 @@
 	stdcall2(_SetProcessPriorityBoost, currentProcess, 1)
 }
 
-// useQPCTime controls whether time.now and nanotime use QueryPerformanceCounter.
-// This is only set to 1 when running under Wine.
-var useQPCTime uint8
-
-var qpcStartCounter int64
-var qpcMultiplier int64
-
 //go:nosplit
-func nanotimeQPC() int64 {
-	var counter int64 = 0
-	stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
-
-	// returns number of nanoseconds
-	return (counter - qpcStartCounter) * qpcMultiplier
-}
-
-//go:nosplit
-func nowQPC() (sec int64, nsec int32, mono int64) {
-	var ft int64
-	stdcall1(_GetSystemTimeAsFileTime, uintptr(unsafe.Pointer(&ft)))
-
-	t := (ft - 116444736000000000) * 100
-
-	sec = t / 1000000000
-	nsec = int32(t - sec*1000000000)
-
-	mono = nanotimeQPC()
-	return
-}
-
-func initWine(k32 uintptr) {
-	_GetSystemTimeAsFileTime = windowsFindfunc(k32, []byte("GetSystemTimeAsFileTime\000"))
-	if _GetSystemTimeAsFileTime == nil {
-		throw("could not find GetSystemTimeAsFileTime() syscall")
-	}
-
-	_QueryPerformanceCounter = windowsFindfunc(k32, []byte("QueryPerformanceCounter\000"))
-	_QueryPerformanceFrequency = windowsFindfunc(k32, []byte("QueryPerformanceFrequency\000"))
-	if _QueryPerformanceCounter == nil || _QueryPerformanceFrequency == nil {
-		throw("could not find QPC syscalls")
-	}
-
-	// We can not simply fallback to GetSystemTimeAsFileTime() syscall, since its time is not monotonic,
-	// instead we use QueryPerformanceCounter family of syscalls to implement monotonic timer
-	// https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
-
-	var tmp int64
-	stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&tmp)))
-	if tmp == 0 {
-		throw("QueryPerformanceFrequency syscall returned zero, running on unsupported hardware")
-	}
-
-	// This should not overflow, it is a number of ticks of the performance counter per second,
-	// its resolution is at most 10 per usecond (on Wine, even smaller on real hardware), so it will be at most 10 millions here,
-	// panic if overflows.
-	if tmp > (1<<31 - 1) {
-		throw("QueryPerformanceFrequency overflow 32 bit divider, check nosplit discussion to proceed")
-	}
-	qpcFrequency := int32(tmp)
-	stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&qpcStartCounter)))
-
-	// Since we are supposed to run this time calls only on Wine, it does not lose precision,
-	// since Wine's timer is kind of emulated at 10 Mhz, so it will be a nice round multiplier of 100
-	// but for general purpose system (like 3.3 Mhz timer on i7) it will not be very precise.
-	// We have to do it this way (or similar), since multiplying QPC counter by 100 millions overflows
-	// int64 and resulted time will always be invalid.
-	qpcMultiplier = int64(timediv(1000000000, qpcFrequency, nil))
-
-	useQPCTime = 1
-}
-
-//go:nosplit
-func getRandomData(r []byte) {
+func readRandom(r []byte) int {
 	n := 0
-	if stdcall2(_RtlGenRandom, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
+	if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
 		n = len(r)
 	}
-	extendRandom(r, n)
+	return n
 }
 
 func goenvs() {
@@ -1005,6 +912,8 @@
 		mp.thread = 0
 	}
 	unlock(&mp.threadLock)
+
+	mp.procid = 0
 }
 
 // Called from exitm, but not from drop, to undo the effect of thread-owned
@@ -1026,6 +935,22 @@
 	}
 }
 
+// asmstdcall_trampoline calls asmstdcall converting from Go to C calling convention.
+func asmstdcall_trampoline(args unsafe.Pointer)
+
+// stdcall_no_g calls asmstdcall on os stack without using g.
+//
+//go:nosplit
+func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr {
+	libcall := libcall{
+		fn:   uintptr(unsafe.Pointer(fn)),
+		n:    uintptr(n),
+		args: args,
+	}
+	asmstdcall_trampoline(noescape(unsafe.Pointer(&libcall)))
+	return libcall.r1
+}
+
 // Calling stdcall on os stack.
 // May run during STW, so write barriers are not allowed.
 //
@@ -1056,7 +981,7 @@
 func stdcall0(fn stdFunction) uintptr {
 	mp := getg().m
 	mp.libcall.n = 0
-	mp.libcall.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes
+	mp.libcall.args = 0
 	return stdcall(fn)
 }
 
@@ -1123,39 +1048,52 @@
 	return stdcall(fn)
 }
 
+//go:nosplit
+//go:cgo_unsafe_args
+func stdcall8(fn stdFunction, a0, a1, a2, a3, a4, a5, a6, a7 uintptr) uintptr {
+	mp := getg().m
+	mp.libcall.n = 8
+	mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
+	return stdcall(fn)
+}
+
 // These must run on the system stack only.
-func usleep2(dt int32)
-func switchtothread()
 
 //go:nosplit
 func osyield_no_g() {
-	switchtothread()
+	stdcall_no_g(_SwitchToThread, 0, 0)
 }
 
 //go:nosplit
 func osyield() {
-	systemstack(switchtothread)
+	systemstack(func() {
+		stdcall0(_SwitchToThread)
+	})
 }
 
 //go:nosplit
 func usleep_no_g(us uint32) {
-	dt := -10 * int32(us) // relative sleep (negative), 100ns units
-	usleep2(dt)
+	timeout := uintptr(us) / 1000 // ms units
+	args := [...]uintptr{_INVALID_HANDLE_VALUE, timeout}
+	stdcall_no_g(_WaitForSingleObject, len(args), uintptr(noescape(unsafe.Pointer(&args[0]))))
 }
 
 //go:nosplit
 func usleep(us uint32) {
 	systemstack(func() {
-		dt := -10 * int64(us) // relative sleep (negative), 100ns units
+		var h, timeout uintptr
 		// If the high-res timer is available and its handle has been allocated for this m, use it.
 		// Otherwise fall back to the low-res one, which doesn't need a handle.
 		if haveHighResTimer && getg().m.highResTimer != 0 {
-			h := getg().m.highResTimer
+			h = getg().m.highResTimer
+			dt := -10 * int64(us) // relative sleep (negative), 100ns units
 			stdcall6(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
-			stdcall3(_NtWaitForSingleObject, h, 0, 0)
+			timeout = _INFINITE
 		} else {
-			usleep2(int32(dt))
+			h = _INVALID_HANDLE_VALUE
+			timeout = uintptr(us) / 1000 // ms units
 		}
+		stdcall2(_WaitForSingleObject, h, timeout)
 	})
 }
 
@@ -1266,7 +1204,12 @@
 
 func setProcessCPUProfiler(hz int32) {
 	if profiletimer == 0 {
-		timer := stdcall3(_CreateWaitableTimerA, 0, 0, 0)
+		var timer uintptr
+		if haveHighResTimer {
+			timer = createHighResTimer()
+		} else {
+			timer = stdcall3(_CreateWaitableTimerA, 0, 0, 0)
+		}
 		atomic.Storeuintptr(&profiletimer, timer)
 		newm(profileLoop, nil, -1)
 	}
diff --git a/src/runtime/pagetrace_on.go b/src/runtime/pagetrace_on.go
index 0e621cb..f82521c 100644
--- a/src/runtime/pagetrace_on.go
+++ b/src/runtime/pagetrace_on.go
@@ -317,7 +317,7 @@
 		pageTrace.enabled = false
 
 		// Execute a ragged barrier, flushing each trace buffer.
-		forEachP(func(pp *p) {
+		forEachP(waitReasonPageTraceFlush, func(pp *p) {
 			if pp.pageTraceBuf.buf != nil {
 				pp.pageTraceBuf = pp.pageTraceBuf.flush(pp.id, nanotime())
 			}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 39c27a4..36d658a 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -276,9 +276,6 @@
 	}
 
 	d := newdefer()
-	if d._panic != nil {
-		throw("deferproc: d.panic != nil after newdefer")
-	}
 	d.link = gp._defer
 	gp._defer = d
 	d.fn = fn
@@ -299,6 +296,163 @@
 	// been set and must not be clobbered.
 }
 
+var rangeExitError = error(errorString("range function continued iteration after exit"))
+
+//go:noinline
+func panicrangeexit() {
+	panic(rangeExitError)
+}
+
+// deferrangefunc is called by functions that are about to
+// execute a range-over-function loop in which the loop body
+// may execute a defer statement. That defer needs to add to
+// the chain for the current function, not the func literal synthesized
+// to represent the loop body. To do that, the original function
+// calls deferrangefunc to obtain an opaque token representing
+// the current frame, and then the loop body uses deferprocat
+// instead of deferproc to add to that frame's defer lists.
+//
+// The token is an 'any' with underlying type *atomic.Pointer[_defer].
+// It is the atomically-updated head of a linked list of _defer structs
+// representing deferred calls. At the same time, we create a _defer
+// struct on the main g._defer list with d.head set to this head pointer.
+//
+// The g._defer list is now a linked list of deferred calls,
+// but an atomic list hanging off:
+//
+//		g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
+//	                             | .head
+//	                             |
+//	                             +--> dY -> dX -> nil
+//
+// with each -> indicating a d.link pointer, and where drangefunc
+// has the d.rangefunc = true bit set.
+// Note that the function being ranged over may have added
+// its own defers (d4 and d3), so drangefunc need not be at the
+// top of the list when deferprocat is used. This is why we pass
+// the atomic head explicitly.
+//
+// To keep misbehaving programs from crashing the runtime,
+// deferprocat pushes new defers onto the .head list atomically.
+// The fact that it is a separate list from the main goroutine
+// defer list means that the main goroutine's defers can still
+// be handled non-atomically.
+//
+// In the diagram, dY and dX are meant to be processed when
+// drangefunc would be processed, which is to say the defer order
+// should be d4, d3, dY, dX, d2, d1. To make that happen,
+// when defer processing reaches a d with rangefunc=true,
+// it calls deferconvert to atomically take the extras
+// away from d.head and then adds them to the main list.
+//
+// That is, deferconvert changes this list:
+//
+//		g._defer => drangefunc -> d2 -> d1 -> nil
+//	                 | .head
+//	                 |
+//	                 +--> dY -> dX -> nil
+//
+// into this list:
+//
+//	g._defer => dY -> dX -> d2 -> d1 -> nil
+//
+// It also poisons *drangefunc.head so that any future
+// deferprocat using that head will throw.
+// (The atomic head is ordinary garbage collected memory so that
+// it's not a problem if user code holds onto it beyond
+// the lifetime of drangefunc.)
+//
+// TODO: We could arrange for the compiler to call into the
+// runtime after the loop finishes normally, to do an eager
+// deferconvert, which would catch calling the loop body
+// and having it defer after the loop is done. If we have a
+// more general catch of loop body misuse, though, this
+// might not be worth worrying about in addition.
+//
+// See also ../cmd/compile/internal/rangefunc/rewrite.go.
+func deferrangefunc() any {
+	gp := getg()
+	if gp.m.curg != gp {
+		// go code on the system stack can't defer
+		throw("defer on system stack")
+	}
+
+	d := newdefer()
+	d.link = gp._defer
+	gp._defer = d
+	d.pc = getcallerpc()
+	// We must not be preempted between calling getcallersp and
+	// storing it to d.sp because getcallersp's result is a
+	// uintptr stack pointer.
+	d.sp = getcallersp()
+
+	d.rangefunc = true
+	d.head = new(atomic.Pointer[_defer])
+
+	return d.head
+}
+
+// badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
+func badDefer() *_defer {
+	return (*_defer)(unsafe.Pointer(uintptr(1)))
+}
+
+// deferprocat is like deferproc but adds to the atomic list represented by frame.
+// See the doc comment for deferrangefunc for details.
+func deferprocat(fn func(), frame any) {
+	head := frame.(*atomic.Pointer[_defer])
+	if raceenabled {
+		racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat))
+	}
+	d1 := newdefer()
+	d1.fn = fn
+	for {
+		d1.link = head.Load()
+		if d1.link == badDefer() {
+			throw("defer after range func returned")
+		}
+		if head.CompareAndSwap(d1.link, d1) {
+			break
+		}
+	}
+
+	// Must be last - see deferproc above.
+	return0()
+}
+
+// deferconvert converts a rangefunc defer list into an ordinary list.
+// See the doc comment for deferrangefunc for details.
+func deferconvert(d *_defer) *_defer {
+	head := d.head
+	if raceenabled {
+		racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
+	}
+	tail := d.link
+	d.rangefunc = false
+	d0 := d
+
+	for {
+		d = head.Load()
+		if head.CompareAndSwap(d, badDefer()) {
+			break
+		}
+	}
+	if d == nil {
+		freedefer(d0)
+		return tail
+	}
+	for d1 := d; ; d1 = d1.link {
+		d1.sp = d0.sp
+		d1.pc = d0.pc
+		if d1.link == nil {
+			d1.link = tail
+			break
+		}
+	}
+	freedefer(d0)
+	return d
+}
+
 // deferprocStack queues a new deferred function with a defer record on the stack.
 // The defer record must have its fn field initialized.
 // All other fields can contain junk.
@@ -314,17 +468,15 @@
 	// fn is already set.
 	// The other fields are junk on entry to deferprocStack and
 	// are initialized here.
-	d.started = false
 	d.heap = false
-	d.openDefer = false
+	d.rangefunc = false
 	d.sp = getcallersp()
 	d.pc = getcallerpc()
-	d.framepc = 0
-	d.varp = 0
 	// The lines below implement:
 	//   d.panic = nil
 	//   d.fd = nil
 	//   d.link = gp._defer
+	//   d.head = nil
 	//   gp._defer = d
 	// But without write barriers. The first three are writes to
 	// the stack so they don't need a write barrier, and furthermore
@@ -332,9 +484,8 @@
 	// The fourth write does not require a write barrier because we
 	// explicitly mark all the defer structures, so we don't need to
 	// keep track of pointers to them with a write barrier.
-	*(*uintptr)(unsafe.Pointer(&d._panic)) = 0
-	*(*uintptr)(unsafe.Pointer(&d.fd)) = 0
 	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
+	*(*uintptr)(unsafe.Pointer(&d.head)) = 0
 	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
 
 	return0()
@@ -390,9 +541,6 @@
 	d.link = nil
 	// After this point we can copy the stack.
 
-	if d._panic != nil {
-		freedeferpanic()
-	}
 	if d.fn != nil {
 		freedeferfn()
 	}
@@ -433,11 +581,6 @@
 
 // Separate function so that it can split stack.
 // Windows otherwise runs out of stack space.
-func freedeferpanic() {
-	// _panic must be cleared before d is unlinked from gp.
-	throw("freedefer with d._panic != nil")
-}
-
 func freedeferfn() {
 	// fn must be cleared before d is unlinked from gp.
 	throw("freedefer with d.fn != nil")
@@ -447,33 +590,15 @@
 // The compiler inserts a call to this at the end of any
 // function which calls defer.
 func deferreturn() {
-	gp := getg()
-	for {
-		d := gp._defer
-		if d == nil {
-			return
-		}
-		sp := getcallersp()
-		if d.sp != sp {
-			return
-		}
-		if d.openDefer {
-			done := runOpenDeferFrame(d)
-			if !done {
-				throw("unfinished open-coded defers in deferreturn")
-			}
-			gp._defer = d.link
-			freedefer(d)
-			// If this frame uses open defers, then this
-			// must be the only defer record for the
-			// frame, so we can just return.
-			return
-		}
+	var p _panic
+	p.deferreturn = true
 
-		fn := d.fn
-		d.fn = nil
-		gp._defer = d.link
-		freedefer(d)
+	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
+	for {
+		fn, ok := p.nextDefer()
+		if !ok {
+			break
+		}
 		fn()
 	}
 }
@@ -487,78 +612,20 @@
 // the program continues execution of other goroutines.
 // If all other goroutines exit, the program crashes.
 func Goexit() {
-	// Run all deferred functions for the current goroutine.
-	// This code is similar to gopanic, see that implementation
-	// for detailed comments.
-	gp := getg()
-
 	// Create a panic object for Goexit, so we can recognize when it might be
 	// bypassed by a recover().
 	var p _panic
 	p.goexit = true
-	p.link = gp._panic
-	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
 
-	addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
+	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
 	for {
-		d := gp._defer
-		if d == nil {
+		fn, ok := p.nextDefer()
+		if !ok {
 			break
 		}
-		if d.started {
-			if d._panic != nil {
-				d._panic.aborted = true
-				d._panic = nil
-			}
-			if !d.openDefer {
-				d.fn = nil
-				gp._defer = d.link
-				freedefer(d)
-				continue
-			}
-		}
-		d.started = true
-		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
-		if d.openDefer {
-			done := runOpenDeferFrame(d)
-			if !done {
-				// We should always run all defers in the frame,
-				// since there is no panic associated with this
-				// defer that can be recovered.
-				throw("unfinished open-coded defers in Goexit")
-			}
-			if p.aborted {
-				// Since our current defer caused a panic and may
-				// have been already freed, just restart scanning
-				// for open-coded defers from this frame again.
-				addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
-			} else {
-				addOneOpenDeferFrame(gp, 0, nil)
-			}
-		} else {
-			// Save the pc/sp in deferCallSave(), so we can "recover" back to this
-			// loop if necessary.
-			deferCallSave(&p, d.fn)
-		}
-		if p.aborted {
-			// We had a recursive panic in the defer d we started, and
-			// then did a recover in a defer that was further down the
-			// defer chain than d. In the case of an outstanding Goexit,
-			// we force the recover to return back to this loop. d will
-			// have already been freed if completed, so just continue
-			// immediately to the next defer on the chain.
-			p.aborted = false
-			continue
-		}
-		if gp._defer != d {
-			throw("bad defer entry in Goexit")
-		}
-		d._panic = nil
-		d.fn = nil
-		gp._defer = d.link
-		freedefer(d)
-		// Note: we ignore recovers here because Goexit isn't a panic
+		fn()
 	}
+
 	goexit1()
 }
 
@@ -607,134 +674,21 @@
 	print("\n")
 }
 
-// addOneOpenDeferFrame scans the stack (in gentraceback order, from inner frames to
-// outer frames) for the first frame (if any) with open-coded defers. If it finds
-// one, it adds a single entry to the defer chain for that frame. The entry added
-// represents all the defers in the associated open defer frame, and is sorted in
-// order with respect to any non-open-coded defers.
-//
-// addOneOpenDeferFrame stops (possibly without adding a new entry) if it encounters
-// an in-progress open defer entry. An in-progress open defer entry means there has
-// been a new panic because of a defer in the associated frame. addOneOpenDeferFrame
-// does not add an open defer entry past a started entry, because that started entry
-// still needs to finished, and addOneOpenDeferFrame will be called when that started
-// entry is completed. The defer removal loop in gopanic() similarly stops at an
-// in-progress defer entry. Together, addOneOpenDeferFrame and the defer removal loop
-// ensure the invariant that there is no open defer entry further up the stack than
-// an in-progress defer, and also that the defer removal loop is guaranteed to remove
-// all not-in-progress open defer entries from the defer chain.
-//
-// If sp is non-nil, addOneOpenDeferFrame starts the stack scan from the frame
-// specified by sp. If sp is nil, it uses the sp from the current defer record (which
-// has just been finished). Hence, it continues the stack scan from the frame of the
-// defer that just finished. It skips any frame that already has a (not-in-progress)
-// open-coded _defer record in the defer chain.
-//
-// Note: All entries of the defer chain (including this new open-coded entry) have
-// their pointers (including sp) adjusted properly if the stack moves while
-// running deferred functions. Also, it is safe to pass in the sp arg (which is
-// the direct result of calling getcallersp()), because all pointer variables
-// (including arguments) are adjusted as needed during stack copies.
-func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
-	var prevDefer *_defer
-	if sp == nil {
-		prevDefer = gp._defer
-		pc = prevDefer.framepc
-		sp = unsafe.Pointer(prevDefer.sp)
-	}
-	systemstack(func() {
-		var u unwinder
-	frames:
-		for u.initAt(pc, uintptr(sp), 0, gp, 0); u.valid(); u.next() {
-			frame := &u.frame
-			if prevDefer != nil && prevDefer.sp == frame.sp {
-				// Skip the frame for the previous defer that
-				// we just finished (and was used to set
-				// where we restarted the stack scan)
-				continue
-			}
-			f := frame.fn
-			fd := funcdata(f, abi.FUNCDATA_OpenCodedDeferInfo)
-			if fd == nil {
-				continue
-			}
-			// Insert the open defer record in the
-			// chain, in order sorted by sp.
-			d := gp._defer
-			var prev *_defer
-			for d != nil {
-				dsp := d.sp
-				if frame.sp < dsp {
-					break
-				}
-				if frame.sp == dsp {
-					if !d.openDefer {
-						throw("duplicated defer entry")
-					}
-					// Don't add any record past an
-					// in-progress defer entry. We don't
-					// need it, and more importantly, we
-					// want to keep the invariant that
-					// there is no open defer entry
-					// passed an in-progress entry (see
-					// header comment).
-					if d.started {
-						break frames
-					}
-					continue frames
-				}
-				prev = d
-				d = d.link
-			}
-			if frame.fn.deferreturn == 0 {
-				throw("missing deferreturn")
-			}
-
-			d1 := newdefer()
-			d1.openDefer = true
-			d1._panic = nil
-			// These are the pc/sp to set after we've
-			// run a defer in this frame that did a
-			// recover. We return to a special
-			// deferreturn that runs any remaining
-			// defers and then returns from the
-			// function.
-			d1.pc = frame.fn.entry() + uintptr(frame.fn.deferreturn)
-			d1.varp = frame.varp
-			d1.fd = fd
-			// Save the SP/PC associated with current frame,
-			// so we can continue stack trace later if needed.
-			d1.framepc = frame.pc
-			d1.sp = frame.sp
-			d1.link = d
-			if prev == nil {
-				gp._defer = d1
-			} else {
-				prev.link = d1
-			}
-			// Stop stack scanning after adding one open defer record
-			break
-		}
-	})
-}
-
 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
 // uint32 and a pointer to the byte following the varint.
 //
-// There is a similar function runtime.readvarint, which takes a slice of bytes,
-// rather than an unsafe pointer. These functions are duplicated, because one of
-// the two use cases for the functions would get slower if the functions were
-// combined.
+// The implementation is the same with runtime.readvarint, except that this function
+// uses unsafe.Pointer for speed.
 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
 	var r uint32
 	var shift int
 	for {
-		b := *(*uint8)((unsafe.Pointer(fd)))
+		b := *(*uint8)(fd)
 		fd = add(fd, unsafe.Sizeof(b))
 		if b < 128 {
 			return r + uint32(b)<<shift, fd
 		}
-		r += ((uint32(b) &^ 128) << shift)
+		r += uint32(b&0x7F) << (shift & 31)
 		shift += 7
 		if shift > 28 {
 			panic("Bad varint")
@@ -742,66 +696,6 @@
 	}
 }
 
-// runOpenDeferFrame runs the active open-coded defers in the frame specified by
-// d. It normally processes all active defers in the frame, but stops immediately
-// if a defer does a successful recover. It returns true if there are no
-// remaining defers to run in the frame.
-func runOpenDeferFrame(d *_defer) bool {
-	done := true
-	fd := d.fd
-
-	deferBitsOffset, fd := readvarintUnsafe(fd)
-	nDefers, fd := readvarintUnsafe(fd)
-	deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
-
-	for i := int(nDefers) - 1; i >= 0; i-- {
-		// read the funcdata info for this defer
-		var closureOffset uint32
-		closureOffset, fd = readvarintUnsafe(fd)
-		if deferBits&(1<<i) == 0 {
-			continue
-		}
-		closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
-		d.fn = closure
-		deferBits = deferBits &^ (1 << i)
-		*(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
-		p := d._panic
-		// Call the defer. Note that this can change d.varp if
-		// the stack moves.
-		deferCallSave(p, d.fn)
-		if p != nil && p.aborted {
-			break
-		}
-		d.fn = nil
-		if d._panic != nil && d._panic.recovered {
-			done = deferBits == 0
-			break
-		}
-	}
-
-	return done
-}
-
-// deferCallSave calls fn() after saving the caller's pc and sp in the
-// panic record. This allows the runtime to return to the Goexit defer
-// processing loop, in the unusual case where the Goexit may be
-// bypassed by a successful recover.
-//
-// This is marked as a wrapper by the compiler so it doesn't appear in
-// tracebacks.
-func deferCallSave(p *_panic, fn func()) {
-	if p != nil {
-		p.argp = unsafe.Pointer(getargp())
-		p.pc = getcallerpc()
-		p.sp = unsafe.Pointer(getcallersp())
-	}
-	fn()
-	if p != nil {
-		p.pc = 0
-		p.sp = unsafe.Pointer(nil)
-	}
-}
-
 // A PanicNilError happens when code calls panic(nil).
 //
 // Before Go 1.21, programs that called panic(nil) observed recover returning nil.
@@ -864,167 +758,219 @@
 
 	var p _panic
 	p.arg = e
-	p.link = gp._panic
-	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
 
 	runningPanicDefers.Add(1)
 
-	// By calculating getcallerpc/getcallersp here, we avoid scanning the
-	// gopanic frame (stack scanning is slow...)
-	addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
-
+	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
 	for {
-		d := gp._defer
-		if d == nil {
+		fn, ok := p.nextDefer()
+		if !ok {
 			break
 		}
-
-		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
-		// take defer off list. An earlier panic will not continue running, but we will make sure below that an
-		// earlier Goexit does continue running.
-		if d.started {
-			if d._panic != nil {
-				d._panic.aborted = true
-			}
-			d._panic = nil
-			if !d.openDefer {
-				// For open-coded defers, we need to process the
-				// defer again, in case there are any other defers
-				// to call in the frame (not including the defer
-				// call that caused the panic).
-				d.fn = nil
-				gp._defer = d.link
-				freedefer(d)
-				continue
-			}
-		}
-
-		// Mark defer as started, but keep on list, so that traceback
-		// can find and update the defer's argument frame if stack growth
-		// or a garbage collection happens before executing d.fn.
-		d.started = true
-
-		// Record the panic that is running the defer.
-		// If there is a new panic during the deferred call, that panic
-		// will find d in the list and will mark d._panic (this panic) aborted.
-		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
-
-		done := true
-		if d.openDefer {
-			done = runOpenDeferFrame(d)
-			if done && !d._panic.recovered {
-				addOneOpenDeferFrame(gp, 0, nil)
-			}
-		} else {
-			p.argp = unsafe.Pointer(getargp())
-			d.fn()
-		}
-		p.argp = nil
-
-		// Deferred function did not panic. Remove d.
-		if gp._defer != d {
-			throw("bad defer entry in panic")
-		}
-		d._panic = nil
-
-		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
-		//GC()
-
-		pc := d.pc
-		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
-		if done {
-			d.fn = nil
-			gp._defer = d.link
-			freedefer(d)
-		}
-		if p.recovered {
-			gp._panic = p.link
-			if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
-				// A normal recover would bypass/abort the Goexit.  Instead,
-				// we return to the processing loop of the Goexit.
-				gp.sigcode0 = uintptr(gp._panic.sp)
-				gp.sigcode1 = uintptr(gp._panic.pc)
-				mcall(recovery)
-				throw("bypassed recovery failed") // mcall should not return
-			}
-			runningPanicDefers.Add(-1)
-
-			// After a recover, remove any remaining non-started,
-			// open-coded defer entries, since the corresponding defers
-			// will be executed normally (inline). Any such entry will
-			// become stale once we run the corresponding defers inline
-			// and exit the associated stack frame. We only remove up to
-			// the first started (in-progress) open defer entry, not
-			// including the current frame, since any higher entries will
-			// be from a higher panic in progress, and will still be
-			// needed.
-			d := gp._defer
-			var prev *_defer
-			if !done {
-				// Skip our current frame, if not done. It is
-				// needed to complete any remaining defers in
-				// deferreturn()
-				prev = d
-				d = d.link
-			}
-			for d != nil {
-				if d.started {
-					// This defer is started but we
-					// are in the middle of a
-					// defer-panic-recover inside of
-					// it, so don't remove it or any
-					// further defer entries
-					break
-				}
-				if d.openDefer {
-					if prev == nil {
-						gp._defer = d.link
-					} else {
-						prev.link = d.link
-					}
-					newd := d.link
-					freedefer(d)
-					d = newd
-				} else {
-					prev = d
-					d = d.link
-				}
-			}
-
-			gp._panic = p.link
-			// Aborted panics are marked but remain on the g.panic list.
-			// Remove them from the list.
-			for gp._panic != nil && gp._panic.aborted {
-				gp._panic = gp._panic.link
-			}
-			if gp._panic == nil { // must be done with signal
-				gp.sig = 0
-			}
-			// Pass information about recovering frame to recovery.
-			gp.sigcode0 = uintptr(sp)
-			gp.sigcode1 = pc
-			mcall(recovery)
-			throw("recovery failed") // mcall should not return
-		}
+		fn()
 	}
 
 	// ran out of deferred calls - old-school panic now
 	// Because it is unsafe to call arbitrary user code after freezing
 	// the world, we call preprintpanics to invoke all necessary Error
 	// and String methods to prepare the panic strings before startpanic.
-	preprintpanics(gp._panic)
+	preprintpanics(&p)
 
-	fatalpanic(gp._panic) // should not return
-	*(*int)(nil) = 0      // not reached
+	fatalpanic(&p)   // should not return
+	*(*int)(nil) = 0 // not reached
 }
 
-// getargp returns the location where the caller
-// writes outgoing function call arguments.
+// start initializes a panic to start unwinding the stack.
 //
-//go:nosplit
-//go:noinline
-func getargp() uintptr {
-	return getcallersp() + sys.MinFrameSize
+// If p.goexit is true, then start may return multiple times.
+func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
+	gp := getg()
+
+	// Record the caller's PC and SP, so recovery can identify panics
+	// that have been recovered. Also, so that if p is from Goexit, we
+	// can restart its defer processing loop if a recovered panic tries
+	// to jump past it.
+	p.startPC = getcallerpc()
+	p.startSP = unsafe.Pointer(getcallersp())
+
+	if p.deferreturn {
+		p.sp = sp
+
+		if s := (*savedOpenDeferState)(gp.param); s != nil {
+			// recovery saved some state for us, so that we can resume
+			// calling open-coded defers without unwinding the stack.
+
+			gp.param = nil
+
+			p.retpc = s.retpc
+			p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
+			p.slotsPtr = add(sp, s.slotsOffset)
+		}
+
+		return
+	}
+
+	p.link = gp._panic
+	gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
+
+	// Initialize state machine, and find the first frame with a defer.
+	//
+	// Note: We could use startPC and startSP here, but callers will
+	// never have defer statements themselves. By starting at their
+	// caller instead, we avoid needing to unwind through an extra
+	// frame. It also somewhat simplifies the terminating condition for
+	// deferreturn.
+	p.lr, p.fp = pc, sp
+	p.nextFrame()
+}
+
+// nextDefer returns the next deferred function to invoke, if any.
+//
+// Note: The "ok bool" result is necessary to correctly handle when
+// the deferred function itself was nil (e.g., "defer (func())(nil)").
+func (p *_panic) nextDefer() (func(), bool) {
+	gp := getg()
+
+	if !p.deferreturn {
+		if gp._panic != p {
+			throw("bad panic stack")
+		}
+
+		if p.recovered {
+			mcall(recovery) // does not return
+			throw("recovery failed")
+		}
+	}
+
+	// The assembler adjusts p.argp in wrapper functions that shouldn't
+	// be visible to recover(), so we need to restore it each iteration.
+	p.argp = add(p.startSP, sys.MinFrameSize)
+
+	for {
+		for p.deferBitsPtr != nil {
+			bits := *p.deferBitsPtr
+
+			// Check whether any open-coded defers are still pending.
+			//
+			// Note: We need to check this upfront (rather than after
+			// clearing the top bit) because it's possible that Goexit
+			// invokes a deferred call, and there were still more pending
+			// open-coded defers in the frame; but then the deferred call
+			// panic and invoked the remaining defers in the frame, before
+			// recovering and restarting the Goexit loop.
+			if bits == 0 {
+				p.deferBitsPtr = nil
+				break
+			}
+
+			// Find index of top bit set.
+			i := 7 - uintptr(sys.LeadingZeros8(bits))
+
+			// Clear bit and store it back.
+			bits &^= 1 << i
+			*p.deferBitsPtr = bits
+
+			return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
+		}
+
+	Recheck:
+		if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
+			if d.rangefunc {
+				gp._defer = deferconvert(d)
+				goto Recheck
+			}
+
+			fn := d.fn
+			d.fn = nil
+
+			// TODO(mdempsky): Instead of having each deferproc call have
+			// its own "deferreturn(); return" sequence, we should just make
+			// them reuse the one we emit for open-coded defers.
+			p.retpc = d.pc
+
+			// Unlink and free.
+			gp._defer = d.link
+			freedefer(d)
+
+			return fn, true
+		}
+
+		if !p.nextFrame() {
+			return nil, false
+		}
+	}
+}
+
+// nextFrame finds the next frame that contains deferred calls, if any.
+func (p *_panic) nextFrame() (ok bool) {
+	if p.lr == 0 {
+		return false
+	}
+
+	gp := getg()
+	systemstack(func() {
+		var limit uintptr
+		if d := gp._defer; d != nil {
+			limit = d.sp
+		}
+
+		var u unwinder
+		u.initAt(p.lr, uintptr(p.fp), 0, gp, 0)
+		for {
+			if !u.valid() {
+				p.lr = 0
+				return // ok == false
+			}
+
+			// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
+			// every frame containing a defer (not just open-coded defers),
+			// then we can simply loop until we find the next frame where
+			// it's non-zero.
+
+			if u.frame.sp == limit {
+				break // found a frame with linked defers
+			}
+
+			if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
+				break // found a frame with open-coded defers
+			}
+
+			u.next()
+		}
+
+		p.lr = u.frame.lr
+		p.sp = unsafe.Pointer(u.frame.sp)
+		p.fp = unsafe.Pointer(u.frame.fp)
+
+		ok = true
+	})
+
+	return
+}
+
+func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
+	fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
+	if fd == nil {
+		return false
+	}
+
+	if fn.deferreturn == 0 {
+		throw("missing deferreturn")
+	}
+
+	deferBitsOffset, fd := readvarintUnsafe(fd)
+	deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
+	if *deferBitsPtr == 0 {
+		return false // has open-coded defers, but none pending
+	}
+
+	slotsOffset, fd := readvarintUnsafe(fd)
+
+	p.retpc = fn.entry() + uintptr(fn.deferreturn)
+	p.deferBitsPtr = deferBitsPtr
+	p.slotsPtr = add(varp, -uintptr(slotsOffset))
+
+	return true
 }
 
 // The implementation of the predeclared function recover.
@@ -1110,12 +1056,93 @@
 // Unwind the stack after a deferred function calls recover
 // after a panic. Then arrange to continue running as though
 // the caller of the deferred function returned normally.
+//
+// However, if unwinding the stack would skip over a Goexit call, we
+// return into the Goexit loop instead, so it can continue processing
+// defers instead.
 func recovery(gp *g) {
-	// Info about defer passed in G struct.
-	sp := gp.sigcode0
-	pc := gp.sigcode1
+	p := gp._panic
+	pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
+	p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
 
-	// d's arguments need to be in the stack.
+	// Unwind the panic stack.
+	for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
+		// Don't allow jumping past a pending Goexit.
+		// Instead, have its _panic.start() call return again.
+		//
+		// TODO(mdempsky): In this case, Goexit will resume walking the
+		// stack where it left off, which means it will need to rewalk
+		// frames that we've already processed.
+		//
+		// There's a similar issue with nested panics, when the inner
+		// panic supercedes the outer panic. Again, we end up needing to
+		// walk the same stack frames.
+		//
+		// These are probably pretty rare occurrences in practice, and
+		// they don't seem any worse than the existing logic. But if we
+		// move the unwinding state into _panic, we could detect when we
+		// run into where the last panic started, and then just pick up
+		// where it left off instead.
+		//
+		// With how subtle defer handling is, this might not actually be
+		// worthwhile though.
+		if p.goexit {
+			pc, sp = p.startPC, uintptr(p.startSP)
+			saveOpenDeferState = false // goexit is unwinding the stack anyway
+			break
+		}
+
+		runningPanicDefers.Add(-1)
+	}
+	gp._panic = p
+
+	if p == nil { // must be done with signal
+		gp.sig = 0
+	}
+
+	if gp.param != nil {
+		throw("unexpected gp.param")
+	}
+	if saveOpenDeferState {
+		// If we're returning to deferreturn and there are more open-coded
+		// defers for it to call, save enough state for it to be able to
+		// pick up where p0 left off.
+		gp.param = unsafe.Pointer(&savedOpenDeferState{
+			retpc: p0.retpc,
+
+			// We need to save deferBitsPtr and slotsPtr too, but those are
+			// stack pointers. To avoid issues around heap objects pointing
+			// to the stack, save them as offsets from SP.
+			deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
+			slotsOffset:     uintptr(p0.slotsPtr) - uintptr(p0.sp),
+		})
+	}
+
+	// TODO(mdempsky): Currently, we rely on frames containing "defer"
+	// to end with "CALL deferreturn; RET". This allows deferreturn to
+	// finish running any pending defers in the frame.
+	//
+	// But we should be able to tell whether there are still pending
+	// defers here. If there aren't, we can just jump directly to the
+	// "RET" instruction. And if there are, we don't need an actual
+	// "CALL deferreturn" instruction; we can simulate it with something
+	// like:
+	//
+	//	if usesLR {
+	//		lr = pc
+	//	} else {
+	//		sp -= sizeof(pc)
+	//		*(*uintptr)(sp) = pc
+	//	}
+	//	pc = funcPC(deferreturn)
+	//
+	// So that we effectively tail call into deferreturn, such that it
+	// then returns to the simple "RET" epilogue. That would save the
+	// overhead of the "deferreturn" call when there aren't actually any
+	// pending defers left, and shrink the TEXT size of compiled
+	// binaries. (Admittedly, both of these are modest savings.)
+
+	// Ensure we're recovering within the appropriate stack.
 	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
 		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
 		throw("bad recovery")
@@ -1132,12 +1159,14 @@
 	// support frame pointers, since nothing consumes them.
 	switch {
 	case goarch.IsAmd64 != 0:
-		// On x86, the architectural bp is stored 2 words below the
-		// stack pointer.
-		gp.sched.bp = *(*uintptr)(unsafe.Pointer(sp - 2*goarch.PtrSize))
+		// on x86, fp actually points one word higher than the top of
+		// the frame since the return address is saved on the stack by
+		// the caller
+		gp.sched.bp = fp - 2*goarch.PtrSize
 	case goarch.IsArm64 != 0:
 		// on arm64, the architectural bp points one word higher
-		// than the sp.
+		// than the sp. fp is totally useless to us here, because it
+		// only gets us to the caller's fp.
 		gp.sched.bp = sp - goarch.PtrSize
 	}
 	gp.sched.ret = 1
diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go
index 8bb351e..1ede111 100644
--- a/src/runtime/pinner.go
+++ b/src/runtime/pinner.go
@@ -9,26 +9,23 @@
 	"unsafe"
 )
 
-// A Pinner is a set of pinned Go objects. An object can be pinned with
-// the Pin method and all pinned objects of a Pinner can be unpinned with the
-// Unpin method.
+// A Pinner is a set of Go objects each pinned to a fixed location in memory. The
+// [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned
+// objects. See their comments for more information.
 type Pinner struct {
 	*pinner
 }
 
 // Pin pins a Go object, preventing it from being moved or freed by the garbage
-// collector until the Unpin method has been called.
+// collector until the [Pinner.Unpin] method has been called.
 //
-// A pointer to a pinned
-// object can be directly stored in C memory or can be contained in Go memory
-// passed to C functions. If the pinned object itself contains pointers to Go
-// objects, these objects must be pinned separately if they are going to be
-// accessed from C code.
+// A pointer to a pinned object can be directly stored in C memory or can be
+// contained in Go memory passed to C functions. If the pinned object itself
+// contains pointers to Go objects, these objects must be pinned separately if they
+// are going to be accessed from C code.
 //
-// The argument must be a pointer of any type or an
-// unsafe.Pointer. It must be the result of calling new,
-// taking the address of a composite literal, or taking the address of a
-// local variable. If one of these conditions is not met, Pin will panic.
+// The argument must be a pointer of any type or an [unsafe.Pointer].
+// It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
 func (p *Pinner) Pin(pointer any) {
 	if p.pinner == nil {
 		// Check the pinner cache first.
@@ -59,11 +56,12 @@
 		}
 	}
 	ptr := pinnerGetPtr(&pointer)
-	setPinned(ptr, true)
-	p.refs = append(p.refs, ptr)
+	if setPinned(ptr, true) {
+		p.refs = append(p.refs, ptr)
+	}
 }
 
-// Unpin unpins all pinned objects of the Pinner.
+// Unpin unpins all pinned objects of the [Pinner].
 func (p *Pinner) Unpin() {
 	p.pinner.unpin()
 
@@ -143,15 +141,19 @@
 	return pinState.isPinned()
 }
 
-// setPinned marks or unmarks a Go pointer as pinned.
-func setPinned(ptr unsafe.Pointer, pin bool) {
+// setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer.
+// It will be ignored while try to pin a non-Go pointer,
+// and it will be panic while try to unpin a non-Go pointer,
+// which should not happen in normal usage.
+func setPinned(ptr unsafe.Pointer, pin bool) bool {
 	span := spanOfHeap(uintptr(ptr))
 	if span == nil {
-		if isGoPointerWithoutSpan(ptr) {
-			// this is a linker-allocated or zero size object, nothing to do.
-			return
+		if !pin {
+			panic(errorString("tried to unpin non-Go pointer"))
 		}
-		panic(errorString("runtime.Pinner.Pin: argument is not a Go pointer"))
+		// This is a linker-allocated, zero size object or other object,
+		// nothing to do, silently ignore it.
+		return false
 	}
 
 	// ensure that the span is swept, b/c sweeping accesses the specials list
@@ -209,7 +211,7 @@
 	}
 	unlock(&span.speciallock)
 	releasem(mp)
-	return
+	return true
 }
 
 type pinState struct {
@@ -265,14 +267,14 @@
 }
 
 func (s *mspan) pinnerBitSize() uintptr {
-	return divRoundUp(s.nelems*2, 8)
+	return divRoundUp(uintptr(s.nelems)*2, 8)
 }
 
 // newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
 // span's pinner bits. newPinneBits is used to mark objects that are pinned.
 // They are copied when the span is swept.
 func (s *mspan) newPinnerBits() *pinnerBits {
-	return (*pinnerBits)(newMarkBits(s.nelems * 2))
+	return (*pinnerBits)(newMarkBits(uintptr(s.nelems) * 2))
 }
 
 // nosplit, because it's called by isPinned, which is nosplit
diff --git a/src/runtime/pinner_test.go b/src/runtime/pinner_test.go
index 88ead7c..ef8500c 100644
--- a/src/runtime/pinner_test.go
+++ b/src/runtime/pinner_test.go
@@ -522,3 +522,19 @@
 		}
 	})
 }
+
+// const string data is not in span.
+func TestPinnerConstStringData(t *testing.T) {
+	var pinner runtime.Pinner
+	str := "test-const-string"
+	p := unsafe.StringData(str)
+	addr := unsafe.Pointer(p)
+	if !runtime.IsPinned(addr) {
+		t.Fatal("not marked as pinned")
+	}
+	pinner.Pin(p)
+	pinner.Unpin()
+	if !runtime.IsPinned(addr) {
+		t.Fatal("not marked as pinned")
+	}
+}
diff --git a/src/runtime/pprof/defs_darwin.go b/src/runtime/pprof/defs_darwin.go
new file mode 100644
index 0000000..2b2f681
--- /dev/null
+++ b/src/runtime/pprof/defs_darwin.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used as input to cgo --godefs (GOOS=arm64 or amd64) to
+// generate the types used in viminfo_darwin_{arm64,amd64}.go which are
+// hand edited as appropriate, primarily to avoid exporting the types.
+
+//go:build ignore
+
+package pprof
+
+/*
+#include <sys/param.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_region.h>
+*/
+import "C"
+
+type machVMRegionBasicInfoData C.vm_region_basic_info_data_64_t
+
+const (
+	_VM_PROT_READ    = C.VM_PROT_READ
+	_VM_PROT_WRITE   = C.VM_PROT_WRITE
+	_VM_PROT_EXECUTE = C.VM_PROT_EXECUTE
+
+	_MACH_SEND_INVALID_DEST = C.MACH_SEND_INVALID_DEST
+
+	_MAXPATHLEN = C.MAXPATHLEN
+)
diff --git a/src/runtime/pprof/defs_darwin_amd64.go b/src/runtime/pprof/defs_darwin_amd64.go
new file mode 100644
index 0000000..fa428b9
--- /dev/null
+++ b/src/runtime/pprof/defs_darwin_amd64.go
@@ -0,0 +1,26 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs defs_darwin.go
+
+package pprof
+
+type machVMRegionBasicInfoData struct {
+	Protection       int32
+	Max_protection   int32
+	Inheritance      uint32
+	Shared           uint32
+	Reserved         uint32
+	Offset           [8]byte // This is hand-edited since godefs generates: Pad_cgo_0 [8]byte. Cannot use uint64 due to alignment.
+	Behavior         int32
+	User_wired_count uint16
+	Pad_cgo_1        [2]byte
+}
+
+const (
+	_VM_PROT_READ    = 0x1
+	_VM_PROT_WRITE   = 0x2
+	_VM_PROT_EXECUTE = 0x4
+
+	_MACH_SEND_INVALID_DEST = 0x10000003
+
+	_MAXPATHLEN = 0x400
+)
diff --git a/src/runtime/pprof/defs_darwin_arm64.go b/src/runtime/pprof/defs_darwin_arm64.go
new file mode 100644
index 0000000..16c68a2
--- /dev/null
+++ b/src/runtime/pprof/defs_darwin_arm64.go
@@ -0,0 +1,26 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs defs_darwin.go
+
+package pprof
+
+type machVMRegionBasicInfoData struct {
+	Protection       int32
+	Max_protection   int32
+	Inheritance      uint32
+	Shared           int32
+	Reserved         int32
+	Offset           [8]byte // This is hand-edited since godefs generates: Pad_cgo_0 [8]byte. Cannot use uint64 due to alignment.
+	Behavior         int32
+	User_wired_count uint16
+	Pad_cgo_1        [2]byte
+}
+
+const (
+	_VM_PROT_READ    = 0x1
+	_VM_PROT_WRITE   = 0x2
+	_VM_PROT_EXECUTE = 0x4
+
+	_MACH_SEND_INVALID_DEST = 0x10000003
+
+	_MAXPATHLEN = 0x400
+)
diff --git a/src/runtime/pprof/label.go b/src/runtime/pprof/label.go
index d39e0ad..3684ae3 100644
--- a/src/runtime/pprof/label.go
+++ b/src/runtime/pprof/label.go
@@ -54,7 +54,7 @@
 	return "{" + strings.Join(keyVals, ", ") + "}"
 }
 
-// WithLabels returns a new context.Context with the given labels added.
+// WithLabels returns a new [context.Context] with the given labels added.
 // A label overwrites a prior label with the same key.
 func WithLabels(ctx context.Context, labels LabelSet) context.Context {
 	parentLabels := labelValue(ctx)
@@ -72,7 +72,7 @@
 }
 
 // Labels takes an even number of strings representing key-value pairs
-// and makes a LabelSet containing them.
+// and makes a [LabelSet] containing them.
 // A label overwrites a prior label with the same key.
 // Currently only the CPU and goroutine profiles utilize any labels
 // information.
diff --git a/src/runtime/pprof/label_test.go b/src/runtime/pprof/label_test.go
index fcb00bd..cefd9a5 100644
--- a/src/runtime/pprof/label_test.go
+++ b/src/runtime/pprof/label_test.go
@@ -1,3 +1,7 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package pprof
 
 import (
diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go
index 17a490e..a4dcf33 100644
--- a/src/runtime/pprof/pprof.go
+++ b/src/runtime/pprof/pprof.go
@@ -69,7 +69,7 @@
 // all pprof commands.
 //
 // For more information about pprof, see
-// https://github.com/google/pprof/blob/master/doc/README.md.
+// https://github.com/google/pprof/blob/main/doc/README.md.
 package pprof
 
 import (
@@ -107,7 +107,13 @@
 //	mutex        - stack traces of holders of contended mutexes
 //
 // These predefined profiles maintain themselves and panic on an explicit
-// Add or Remove method call.
+// [Profile.Add] or [Profile.Remove] method call.
+//
+// The CPU profile is not available as a Profile. It has a special API,
+// the [StartCPUProfile] and [StopCPUProfile] functions, because it streams
+// output to a writer during profiling.
+//
+// # Heap profile
 //
 // The heap profile reports statistics as of the most recently completed
 // garbage collection; it elides more recent allocation to avoid skewing
@@ -122,13 +128,47 @@
 // flags select which to display, defaulting to -inuse_space (live objects,
 // scaled by size).
 //
+// # Allocs profile
+//
 // The allocs profile is the same as the heap profile but changes the default
 // pprof display to -alloc_space, the total number of bytes allocated since
 // the program began (including garbage-collected bytes).
 //
-// The CPU profile is not available as a Profile. It has a special API,
-// the StartCPUProfile and StopCPUProfile functions, because it streams
-// output to a writer during profiling.
+// # Block profile
+//
+// The block profile tracks time spent blocked on synchronization primitives,
+// such as [sync.Mutex], [sync.RWMutex], [sync.WaitGroup], [sync.Cond], and
+// channel send/receive/select.
+//
+// Stack traces correspond to the location that blocked (for example,
+// [sync.Mutex.Lock]).
+//
+// Sample values correspond to cumulative time spent blocked at that stack
+// trace, subject to time-based sampling specified by
+// [runtime.SetBlockProfileRate].
+//
+// # Mutex profile
+//
+// The mutex profile tracks contention on mutexes, such as [sync.Mutex],
+// [sync.RWMutex], and runtime-internal locks.
+//
+// Stack traces correspond to the end of the critical section causing
+// contention. For example, a lock held for a long time while other goroutines
+// are waiting to acquire the lock will report contention when the lock is
+// finally unlocked (that is, at [sync.Mutex.Unlock]).
+//
+// Sample values correspond to the approximate cumulative time other goroutines
+// spent blocked waiting for the lock, subject to event-based sampling
+// specified by [runtime.SetMutexProfileFraction]. For example, if a caller
+// holds a lock for 1s while 5 other goroutines are waiting for the entire
+// second to acquire the lock, its unlock call stack will report 5s of
+// contention.
+//
+// Runtime-internal locks are always reported at the location
+// "runtime._LostContendedRuntimeLock". More detailed stack traces for
+// runtime-internal locks can be obtained by setting
+// `GODEBUG=runtimecontentionstacks=1` (see package [runtime] docs for
+// caveats).
 type Profile struct {
 	name  string
 	mu    sync.Mutex
@@ -242,7 +282,7 @@
 	return all
 }
 
-// Name returns this profile's name, which can be passed to Lookup to reobtain the profile.
+// Name returns this profile's name, which can be passed to [Lookup] to reobtain the profile.
 func (p *Profile) Name() string {
 	return p.name
 }
@@ -260,9 +300,9 @@
 // Add adds the current execution stack to the profile, associated with value.
 // Add stores value in an internal map, so value must be suitable for use as
 // a map key and will not be garbage collected until the corresponding
-// call to Remove. Add panics if the profile already contains a stack for value.
+// call to [Profile.Remove]. Add panics if the profile already contains a stack for value.
 //
-// The skip parameter has the same meaning as runtime.Caller's skip
+// The skip parameter has the same meaning as [runtime.Caller]'s skip
 // and controls where the stack trace begins. Passing skip=0 begins the
 // trace in the function calling Add. For example, given this
 // execution stack:
@@ -515,7 +555,7 @@
 
 // Interface to system profiles.
 
-// WriteHeapProfile is shorthand for Lookup("heap").WriteTo(w, 0).
+// WriteHeapProfile is shorthand for [Lookup]("heap").WriteTo(w, 0).
 // It is preserved for backwards compatibility.
 func WriteHeapProfile(w io.Writer) error {
 	return writeHeap(w, 0)
@@ -765,8 +805,8 @@
 // Go code built with -buildmode=c-archive or -buildmode=c-shared.
 // StartCPUProfile relies on the SIGPROF signal, but that signal will
 // be delivered to the main program's SIGPROF signal handler (if any)
-// not to the one used by Go. To make it work, call os/signal.Notify
-// for syscall.SIGPROF, but note that doing so may break any profiling
+// not to the one used by Go. To make it work, call [os/signal.Notify]
+// for [syscall.SIGPROF], but note that doing so may break any profiling
 // being done by the main program.
 func StartCPUProfile(w io.Writer) error {
 	// The runtime routines allow a variable profiling rate,
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 56ba6d9..6b299e5 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -18,7 +18,6 @@
 	"math"
 	"math/big"
 	"os"
-	"os/exec"
 	"regexp"
 	"runtime"
 	"runtime/debug"
@@ -440,7 +439,7 @@
 func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
 	switch runtime.GOOS {
 	case "darwin":
-		out, err := exec.Command("uname", "-a").CombinedOutput()
+		out, err := testenv.Command(t, "uname", "-a").CombinedOutput()
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -653,6 +652,11 @@
 func TestCPUProfileWithFork(t *testing.T) {
 	testenv.MustHaveExec(t)
 
+	exe, err := os.Executable()
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	heap := 1 << 30
 	if runtime.GOOS == "android" {
 		// Use smaller size for Android to avoid crash.
@@ -684,7 +688,7 @@
 	defer StopCPUProfile()
 
 	for i := 0; i < 10; i++ {
-		exec.Command(os.Args[0], "-h").CombinedOutput()
+		testenv.Command(t, exe, "-h").CombinedOutput()
 	}
 }
 
@@ -1019,7 +1023,7 @@
 // awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
 // shows a goroutine in the given state with a stack frame in
 // runtime/pprof.<fName>.
-func awaitBlockedGoroutine(t *testing.T, state, fName string) {
+func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {
 	re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
 	r := regexp.MustCompile(re)
 
@@ -1043,7 +1047,7 @@
 			buf = make([]byte, 2*len(buf))
 			continue
 		}
-		if r.Match(buf[:n]) {
+		if len(r.FindAll(buf[:n], -1)) >= count {
 			return
 		}
 	}
@@ -1052,7 +1056,7 @@
 func blockChanRecv(t *testing.T) {
 	c := make(chan bool)
 	go func() {
-		awaitBlockedGoroutine(t, "chan receive", "blockChanRecv")
+		awaitBlockedGoroutine(t, "chan receive", "blockChanRecv", 1)
 		c <- true
 	}()
 	<-c
@@ -1061,7 +1065,7 @@
 func blockChanSend(t *testing.T) {
 	c := make(chan bool)
 	go func() {
-		awaitBlockedGoroutine(t, "chan send", "blockChanSend")
+		awaitBlockedGoroutine(t, "chan send", "blockChanSend", 1)
 		<-c
 	}()
 	c <- true
@@ -1070,7 +1074,7 @@
 func blockChanClose(t *testing.T) {
 	c := make(chan bool)
 	go func() {
-		awaitBlockedGoroutine(t, "chan receive", "blockChanClose")
+		awaitBlockedGoroutine(t, "chan receive", "blockChanClose", 1)
 		close(c)
 	}()
 	<-c
@@ -1082,7 +1086,7 @@
 	c2 := make(chan bool, 1)
 	go func() {
 		for i := 0; i < numTries; i++ {
-			awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync")
+			awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync", 1)
 			c <- true
 		}
 	}()
@@ -1098,7 +1102,7 @@
 	c := make(chan bool)
 	c2 := make(chan bool)
 	go func() {
-		awaitBlockedGoroutine(t, "select", "blockSelectSendSync")
+		awaitBlockedGoroutine(t, "select", "blockSelectSendSync", 1)
 		<-c
 	}()
 	select {
@@ -1111,7 +1115,7 @@
 	var mu sync.Mutex
 	mu.Lock()
 	go func() {
-		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex")
+		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", 1)
 		mu.Unlock()
 	}()
 	// Note: Unlock releases mu before recording the mutex event,
@@ -1121,12 +1125,36 @@
 	mu.Lock()
 }
 
+func blockMutexN(t *testing.T, n int, d time.Duration) {
+	var wg sync.WaitGroup
+	var mu sync.Mutex
+	mu.Lock()
+	go func() {
+		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", n)
+		time.Sleep(d)
+		mu.Unlock()
+	}()
+	// Note: Unlock releases mu before recording the mutex event,
+	// so it's theoretically possible for this to proceed and
+	// capture the profile before the event is recorded. As long
+	// as this is blocked before the unlock happens, it's okay.
+	for i := 0; i < n; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			mu.Lock()
+			mu.Unlock()
+		}()
+	}
+	wg.Wait()
+}
+
 func blockCond(t *testing.T) {
 	var mu sync.Mutex
 	c := sync.NewCond(&mu)
 	mu.Lock()
 	go func() {
-		awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond")
+		awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond", 1)
 		mu.Lock()
 		c.Signal()
 		mu.Unlock()
@@ -1213,7 +1241,13 @@
 		t.Fatalf("need MutexProfileRate 0, got %d", old)
 	}
 
-	blockMutex(t)
+	const (
+		N = 100
+		D = 100 * time.Millisecond
+	)
+	start := time.Now()
+	blockMutexN(t, N, D)
+	blockMutexNTime := time.Since(start)
 
 	t.Run("debug=1", func(t *testing.T) {
 		var w strings.Builder
@@ -1226,15 +1260,11 @@
 		}
 		prof = strings.Trim(prof, "\n")
 		lines := strings.Split(prof, "\n")
-		if len(lines) != 6 {
-			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
-		}
 		if len(lines) < 6 {
-			return
+			t.Fatalf("expected >=6 lines, got %d %q\n%s", len(lines), prof, prof)
 		}
 		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
 		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
-		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
 		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
 			t.Errorf("%q didn't match %q", lines[3], r2)
 		}
@@ -1259,12 +1289,43 @@
 
 		stks := stacks(p)
 		for _, want := range [][]string{
-			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
+			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
 		} {
 			if !containsStack(stks, want) {
 				t.Errorf("No matching stack entry for %+v", want)
 			}
 		}
+
+		i := 0
+		for ; i < len(p.SampleType); i++ {
+			if p.SampleType[i].Unit == "nanoseconds" {
+				break
+			}
+		}
+		if i >= len(p.SampleType) {
+			t.Fatalf("profile did not contain nanoseconds sample")
+		}
+		total := int64(0)
+		for _, s := range p.Sample {
+			total += s.Value[i]
+		}
+		// Want d to be at least N*D, but give some wiggle-room to avoid
+		// a test flaking. Set an upper-bound proportional to the total
+		// wall time spent in blockMutexN. Generally speaking, the total
+		// contention time could be arbitrarily high when considering
+		// OS scheduler delays, or any other delays from the environment:
+		// time keeps ticking during these delays. By making the upper
+		// bound proportional to the wall time in blockMutexN, in theory
+		// we're accounting for all these possible delays.
+		d := time.Duration(total)
+		lo := time.Duration(N * D * 9 / 10)
+		hi := time.Duration(N) * blockMutexNTime * 11 / 10
+		if d < lo || d > hi {
+			for _, s := range p.Sample {
+				t.Logf("sample: %s", time.Duration(s.Value[i]))
+			}
+			t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
+		}
 	})
 }
 
@@ -1358,6 +1419,23 @@
 		}
 	})
 
+	SetGoroutineLabels(WithLabels(context.Background(), Labels("self-label", "self-value")))
+	defer SetGoroutineLabels(context.Background())
+
+	garbage := new(*int)
+	fingReady := make(chan struct{})
+	runtime.SetFinalizer(garbage, func(v **int) {
+		Do(context.Background(), Labels("fing-label", "fing-value"), func(ctx context.Context) {
+			close(fingReady)
+			<-c
+		})
+	})
+	garbage = nil
+	for i := 0; i < 2; i++ {
+		runtime.GC()
+	}
+	<-fingReady
+
 	var w bytes.Buffer
 	goroutineProf := Lookup("goroutine")
 
@@ -1367,8 +1445,22 @@
 
 	labels := labelMap{"label": "value"}
 	labelStr := "\n# labels: " + labels.String()
-	if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
-		"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
+	selfLabel := labelMap{"self-label": "self-value"}
+	selfLabelStr := "\n# labels: " + selfLabel.String()
+	fingLabel := labelMap{"fing-label": "fing-value"}
+	fingLabelStr := "\n# labels: " + fingLabel.String()
+	orderedPrefix := []string{
+		"\n50 @ ",
+		"\n44 @", labelStr,
+		"\n40 @",
+		"\n36 @", labelStr,
+		"\n10 @",
+		"\n9 @", labelStr,
+		"\n1 @"}
+	if !containsInOrder(prof, append(orderedPrefix, selfLabelStr)...) {
+		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
+	}
+	if !containsInOrder(prof, append(orderedPrefix, fingLabelStr)...) {
 		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
 	}
 
@@ -1389,7 +1481,7 @@
 		36: {"label": "value"},
 		10: {},
 		9:  {"label": "value"},
-		1:  {},
+		1:  {"self-label": "self-value", "fing-label": "fing-value"},
 	}
 	if !containsCountsLabels(p, expectedLabels) {
 		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go
index cdc4bd7..5214374 100644
--- a/src/runtime/pprof/proto.go
+++ b/src/runtime/pprof/proto.go
@@ -561,7 +561,7 @@
 		if last.Entry != newFrame.Entry { // newFrame is for a different function.
 			return false
 		}
-		if last.Function == newFrame.Function { // maybe recursion.
+		if runtime_FrameSymbolName(&last) == runtime_FrameSymbolName(&newFrame) { // maybe recursion.
 			return false
 		}
 	}
@@ -611,13 +611,14 @@
 	b.pb.uint64Opt(tagLocation_Address, uint64(firstFrame.PC))
 	for _, frame := range b.deck.frames {
 		// Write out each line in frame expansion.
-		funcID := uint64(b.funcs[frame.Function])
+		funcName := runtime_FrameSymbolName(&frame)
+		funcID := uint64(b.funcs[funcName])
 		if funcID == 0 {
 			funcID = uint64(len(b.funcs)) + 1
-			b.funcs[frame.Function] = int(funcID)
+			b.funcs[funcName] = int(funcID)
 			newFuncs = append(newFuncs, newFunc{
 				id:        funcID,
-				name:      runtime_FrameSymbolName(&frame),
+				name:      funcName,
 				file:      frame.File,
 				startLine: int64(runtime_FrameStartLine(&frame)),
 			})
diff --git a/src/runtime/pprof/proto_darwin.go b/src/runtime/pprof/proto_darwin.go
new file mode 100644
index 0000000..8db9e1d
--- /dev/null
+++ b/src/runtime/pprof/proto_darwin.go
@@ -0,0 +1,36 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+	"errors"
+)
+
+// readMapping adds a mapping entry for the text region of the running process.
+// It uses the mach_vm_region region system call to add mapping entries for the
+// text region of the running process. Note that currently no attempt is
+// made to obtain the buildID information.
+func (b *profileBuilder) readMapping() {
+	if !machVMInfo(b.addMapping) {
+		b.addMappingEntry(0, 0, 0, "", "", true)
+	}
+}
+
+func readMainModuleMapping() (start, end uint64, exe, buildID string, err error) {
+	first := true
+	ok := machVMInfo(func(lo, hi, off uint64, file, build string) {
+		if first {
+			start, end = lo, hi
+			exe, buildID = file, build
+		}
+		// May see multiple text segments if rosetta is used for running
+		// the go toolchain itself.
+		first = false
+	})
+	if !ok {
+		return 0, 0, "", "", errors.New("machVMInfo failed")
+	}
+	return start, end, exe, buildID, nil
+}
diff --git a/src/runtime/pprof/proto_other.go b/src/runtime/pprof/proto_other.go
index 4a7fe79..7322e84 100644
--- a/src/runtime/pprof/proto_other.go
+++ b/src/runtime/pprof/proto_other.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !windows
+//go:build !windows && !darwin
 
 package pprof
 
@@ -25,6 +25,6 @@
 	}
 }
 
-func readMainModuleMapping() (start, end uint64, err error) {
-	return 0, 0, errors.New("not implemented")
+func readMainModuleMapping() (start, end uint64, exe, buildID string, err error) {
+	return 0, 0, "", "", errors.New("not implemented")
 }
diff --git a/src/runtime/pprof/proto_test.go b/src/runtime/pprof/proto_test.go
index 8ec9c91..85cd066 100644
--- a/src/runtime/pprof/proto_test.go
+++ b/src/runtime/pprof/proto_test.go
@@ -45,7 +45,7 @@
 	return string(js)
 }
 
-func TestConvertCPUProfileEmpty(t *testing.T) {
+func TestConvertCPUProfileNoSamples(t *testing.T) {
 	// A test server with mock cpu profile data.
 	var buf bytes.Buffer
 
@@ -101,16 +101,11 @@
 		addr2 = mprof.Mapping[1].Start
 		map2 = mprof.Mapping[1]
 		map2.BuildID, _ = elfBuildID(map2.File)
-	case "windows":
+	case "windows", "darwin", "ios":
 		addr1 = uint64(abi.FuncPCABIInternal(f1))
 		addr2 = uint64(abi.FuncPCABIInternal(f2))
 
-		exe, err := os.Executable()
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		start, end, err := readMainModuleMapping()
+		start, end, exe, buildID, err := readMainModuleMapping()
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -120,7 +115,7 @@
 			Start:        start,
 			Limit:        end,
 			File:         exe,
-			BuildID:      peBuildID(exe),
+			BuildID:      buildID,
 			HasFunctions: true,
 		}
 		map2 = &profile.Mapping{
@@ -128,7 +123,7 @@
 			Start:        start,
 			Limit:        end,
 			File:         exe,
-			BuildID:      peBuildID(exe),
+			BuildID:      buildID,
 			HasFunctions: true,
 		}
 	case "js", "wasip1":
diff --git a/src/runtime/pprof/proto_windows.go b/src/runtime/pprof/proto_windows.go
index d5ae4a5..f4dc44b 100644
--- a/src/runtime/pprof/proto_windows.go
+++ b/src/runtime/pprof/proto_windows.go
@@ -7,6 +7,7 @@
 import (
 	"errors"
 	"internal/syscall/windows"
+	"os"
 	"syscall"
 )
 
@@ -42,10 +43,14 @@
 	}
 }
 
-func readMainModuleMapping() (start, end uint64, err error) {
+func readMainModuleMapping() (start, end uint64, exe, buildID string, err error) {
+	exe, err = os.Executable()
+	if err != nil {
+		return 0, 0, "", "", err
+	}
 	snap, err := createModuleSnapshot()
 	if err != nil {
-		return 0, 0, err
+		return 0, 0, "", "", err
 	}
 	defer func() { _ = syscall.CloseHandle(snap) }()
 
@@ -53,10 +58,10 @@
 	module.Size = uint32(windows.SizeofModuleEntry32)
 	err = windows.Module32First(snap, &module)
 	if err != nil {
-		return 0, 0, err
+		return 0, 0, "", "", err
 	}
 
-	return uint64(module.ModBaseAddr), uint64(module.ModBaseAddr) + uint64(module.ModBaseSize), nil
+	return uint64(module.ModBaseAddr), uint64(module.ModBaseAddr) + uint64(module.ModBaseSize), exe, peBuildID(exe), nil
 }
 
 func createModuleSnapshot() (syscall.Handle, error) {
diff --git a/src/runtime/pprof/protomem_test.go b/src/runtime/pprof/protomem_test.go
index 156f628..5fb67c5 100644
--- a/src/runtime/pprof/protomem_test.go
+++ b/src/runtime/pprof/protomem_test.go
@@ -6,8 +6,12 @@
 
 import (
 	"bytes"
+	"fmt"
 	"internal/profile"
+	"internal/testenv"
 	"runtime"
+	"slices"
+	"strings"
 	"testing"
 )
 
@@ -82,3 +86,138 @@
 		})
 	}
 }
+
+func genericAllocFunc[T interface{ uint32 | uint64 }](n int) []T {
+	return make([]T, n)
+}
+
+func profileToStrings(p *profile.Profile) []string {
+	var res []string
+	for _, s := range p.Sample {
+		res = append(res, sampleToString(s))
+	}
+	return res
+}
+
+func sampleToString(s *profile.Sample) string {
+	var funcs []string
+	for i := len(s.Location) - 1; i >= 0; i-- {
+		loc := s.Location[i]
+		funcs = locationToStrings(loc, funcs)
+	}
+	return fmt.Sprintf("%s %v", strings.Join(funcs, ";"), s.Value)
+}
+
+func locationToStrings(loc *profile.Location, funcs []string) []string {
+	for j := range loc.Line {
+		line := loc.Line[len(loc.Line)-1-j]
+		funcs = append(funcs, line.Function.Name)
+	}
+	return funcs
+}
+
+// This is a regression test for https://go.dev/issue/64528 .
+func TestGenericsHashKeyInPprofBuilder(t *testing.T) {
+	previousRate := runtime.MemProfileRate
+	runtime.MemProfileRate = 1
+	defer func() {
+		runtime.MemProfileRate = previousRate
+	}()
+	for _, sz := range []int{128, 256} {
+		genericAllocFunc[uint32](sz / 4)
+	}
+	for _, sz := range []int{32, 64} {
+		genericAllocFunc[uint64](sz / 8)
+	}
+
+	runtime.GC()
+	buf := bytes.NewBuffer(nil)
+	if err := WriteHeapProfile(buf); err != nil {
+		t.Fatalf("writing profile: %v", err)
+	}
+	p, err := profile.Parse(buf)
+	if err != nil {
+		t.Fatalf("profile.Parse: %v", err)
+	}
+
+	actual := profileToStrings(p)
+	expected := []string{
+		"testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint32] [1 128 0 0]",
+		"testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint32] [1 256 0 0]",
+		"testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint64] [1 32 0 0]",
+		"testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint64] [1 64 0 0]",
+	}
+
+	for _, l := range expected {
+		if !slices.Contains(actual, l) {
+			t.Errorf("profile = %v\nwant = %v", strings.Join(actual, "\n"), l)
+		}
+	}
+}
+
+type opAlloc struct {
+	buf [128]byte
+}
+
+type opCall struct {
+}
+
+var sink []byte
+
+func storeAlloc() {
+	sink = make([]byte, 16)
+}
+
+func nonRecursiveGenericAllocFunction[CurrentOp any, OtherOp any](alloc bool) {
+	if alloc {
+		storeAlloc()
+	} else {
+		nonRecursiveGenericAllocFunction[OtherOp, CurrentOp](true)
+	}
+}
+
+func TestGenericsInlineLocations(t *testing.T) {
+	if testenv.OptimizationOff() {
+		t.Skip("skipping test with optimizations disabled")
+	}
+
+	previousRate := runtime.MemProfileRate
+	runtime.MemProfileRate = 1
+	defer func() {
+		runtime.MemProfileRate = previousRate
+		sink = nil
+	}()
+
+	nonRecursiveGenericAllocFunction[opAlloc, opCall](true)
+	nonRecursiveGenericAllocFunction[opCall, opAlloc](false)
+
+	runtime.GC()
+
+	buf := bytes.NewBuffer(nil)
+	if err := WriteHeapProfile(buf); err != nil {
+		t.Fatalf("writing profile: %v", err)
+	}
+	p, err := profile.Parse(buf)
+	if err != nil {
+		t.Fatalf("profile.Parse: %v", err)
+	}
+
+	const expectedSample = "testing.tRunner;runtime/pprof.TestGenericsInlineLocations;runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct {},go.shape.struct { runtime/pprof.buf [128]uint8 }];runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct { runtime/pprof.buf [128]uint8 },go.shape.struct {}];runtime/pprof.storeAlloc [1 16 1 16]"
+	const expectedLocation = "runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct {},go.shape.struct { runtime/pprof.buf [128]uint8 }];runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct { runtime/pprof.buf [128]uint8 },go.shape.struct {}];runtime/pprof.storeAlloc"
+	const expectedLocationNewInliner = "runtime/pprof.TestGenericsInlineLocations;" + expectedLocation
+	var s *profile.Sample
+	for _, sample := range p.Sample {
+		if sampleToString(sample) == expectedSample {
+			s = sample
+			break
+		}
+	}
+	if s == nil {
+		t.Fatalf("expected \n%s\ngot\n%s", expectedSample, strings.Join(profileToStrings(p), "\n"))
+	}
+	loc := s.Location[0]
+	actual := strings.Join(locationToStrings(loc, nil), ";")
+	if expectedLocation != actual && expectedLocationNewInliner != actual {
+		t.Errorf("expected a location with at least 3 functions\n%s\ngot\n%s\n", expectedLocation, actual)
+	}
+}
diff --git a/src/runtime/pprof/runtime.go b/src/runtime/pprof/runtime.go
index 71f89ca..8d37c7d 100644
--- a/src/runtime/pprof/runtime.go
+++ b/src/runtime/pprof/runtime.go
@@ -31,7 +31,7 @@
 
 // SetGoroutineLabels sets the current goroutine's labels to match ctx.
 // A new goroutine inherits the labels of the goroutine that created it.
-// This is a lower-level API than Do, which should be used instead when possible.
+// This is a lower-level API than [Do], which should be used instead when possible.
 func SetGoroutineLabels(ctx context.Context) {
 	ctxLabels, _ := ctx.Value(labelContextKey{}).(*labelMap)
 	runtime_setProfLabel(unsafe.Pointer(ctxLabels))
diff --git a/src/runtime/pprof/vminfo_darwin.go b/src/runtime/pprof/vminfo_darwin.go
new file mode 100644
index 0000000..35b9e6d
--- /dev/null
+++ b/src/runtime/pprof/vminfo_darwin.go
@@ -0,0 +1,76 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+	"os"
+	"unsafe"
+)
+
+func isExecutable(protection int32) bool {
+	return (protection&_VM_PROT_EXECUTE) != 0 && (protection&_VM_PROT_READ) != 0
+}
+
+// machVMInfo uses the mach_vm_region region system call to add mapping entries
+// for the text region of the running process.
+func machVMInfo(addMapping func(lo, hi, offset uint64, file, buildID string)) bool {
+	added := false
+	var addr uint64 = 0x1
+	for {
+		var memRegionSize uint64
+		var info machVMRegionBasicInfoData
+		// Get the first address and page size.
+		kr := mach_vm_region(
+			&addr,
+			&memRegionSize,
+			unsafe.Pointer(&info))
+		if kr != 0 {
+			if kr == _MACH_SEND_INVALID_DEST {
+				// No more memory regions.
+				return true
+			}
+			return added // return true if at least one mapping was added
+		}
+		if isExecutable(info.Protection) {
+			// NOTE: the meaning/value of Offset is unclear. However,
+			// this likely doesn't matter as the text segment's file
+			// offset is usually 0.
+			addMapping(addr,
+				addr+memRegionSize,
+				read64(&info.Offset),
+				regionFilename(addr),
+				"")
+			added = true
+		}
+		addr += memRegionSize
+	}
+}
+
+func read64(p *[8]byte) uint64 {
+	// all supported darwin platforms are little endian
+	return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func regionFilename(address uint64) string {
+	buf := make([]byte, _MAXPATHLEN)
+	r := proc_regionfilename(
+		os.Getpid(),
+		address,
+		unsafe.SliceData(buf),
+		int64(cap(buf)))
+	if r == 0 {
+		return ""
+	}
+	return string(buf[:r])
+}
+
+// mach_vm_region and proc_regionfilename are implemented by
+// the runtime package (runtime/sys_darwin.go).
+//
+//go:noescape
+func mach_vm_region(address, region_size *uint64, info unsafe.Pointer) int32
+
+//go:noescape
+func proc_regionfilename(pid int, address uint64, buf *byte, buflen int64) int32
diff --git a/src/runtime/pprof/vminfo_darwin_test.go b/src/runtime/pprof/vminfo_darwin_test.go
new file mode 100644
index 0000000..8749a13
--- /dev/null
+++ b/src/runtime/pprof/vminfo_darwin_test.go
@@ -0,0 +1,163 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !ios
+
+package pprof
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"internal/abi"
+	"internal/testenv"
+	"os"
+	"os/exec"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+func TestVMInfo(t *testing.T) {
+	var begin, end, offset uint64
+	var filename string
+	first := true
+	machVMInfo(func(lo, hi, off uint64, file, buildID string) {
+		if first {
+			begin = lo
+			end = hi
+			offset = off
+			filename = file
+		}
+		// May see multiple text segments if rosetta is used for running
+		// the go toolchain itself.
+		first = false
+	})
+	lo, hi, err := useVMMapWithRetry(t)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := begin, lo; got != want {
+		t.Errorf("got %x, want %x", got, want)
+	}
+	if got, want := end, hi; got != want {
+		t.Errorf("got %x, want %x", got, want)
+	}
+	if got, want := offset, uint64(0); got != want {
+		t.Errorf("got %x, want %x", got, want)
+	}
+	if !strings.HasSuffix(filename, "pprof.test") {
+		t.Errorf("got %s, want pprof.test", filename)
+	}
+	addr := uint64(abi.FuncPCABIInternal(TestVMInfo))
+	if addr < lo || addr > hi {
+		t.Errorf("%x..%x does not contain function %p (%x)", lo, hi, TestVMInfo, addr)
+	}
+}
+
+func useVMMapWithRetry(t *testing.T) (hi, lo uint64, err error) {
+	var retryable bool
+	for {
+		hi, lo, retryable, err = useVMMap(t)
+		if err == nil {
+			return hi, lo, nil
+		}
+		if !retryable {
+			return 0, 0, err
+		}
+		t.Logf("retrying vmmap after error: %v", err)
+	}
+}
+
+func useVMMap(t *testing.T) (hi, lo uint64, retryable bool, err error) {
+	pid := strconv.Itoa(os.Getpid())
+	testenv.MustHaveExecPath(t, "vmmap")
+	cmd := testenv.Command(t, "vmmap", pid)
+	out, cmdErr := cmd.Output()
+	if cmdErr != nil {
+		t.Logf("vmmap output: %s", out)
+		if ee, ok := cmdErr.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+			t.Logf("%v: %v\n%s", cmd, cmdErr, ee.Stderr)
+		}
+		retryable = bytes.Contains(out, []byte("resource shortage"))
+		t.Logf("%v: %v", cmd, cmdErr)
+		if retryable {
+			return 0, 0, true, cmdErr
+		}
+	}
+	// Always parse the output of vmmap since it may return an error
+	// code even if it successfully reports the text segment information
+	// required for this test.
+	hi, lo, err = parseVmmap(out)
+	if err != nil {
+		if cmdErr != nil {
+			return 0, 0, false, fmt.Errorf("failed to parse vmmap output, vmmap reported an error: %v", err)
+		}
+		t.Logf("vmmap output: %s", out)
+		return 0, 0, false, fmt.Errorf("failed to parse vmmap output, vmmap did not report an error: %v", err)
+	}
+	return hi, lo, false, nil
+}
+
+// parseVmmap parses the output of vmmap and calls addMapping for the first r-x TEXT segment in the output.
+func parseVmmap(data []byte) (hi, lo uint64, err error) {
+	// vmmap 53799
+	// Process:         gopls [53799]
+	// Path:            /Users/USER/*/gopls
+	// Load Address:    0x1029a0000
+	// Identifier:      gopls
+	// Version:         ???
+	// Code Type:       ARM64
+	// Platform:        macOS
+	// Parent Process:  Code Helper (Plugin) [53753]
+	//
+	// Date/Time:       2023-05-25 09:45:49.331 -0700
+	// Launch Time:     2023-05-23 09:35:37.514 -0700
+	// OS Version:      macOS 13.3.1 (22E261)
+	// Report Version:  7
+	// Analysis Tool:   /Applications/Xcode.app/Contents/Developer/usr/bin/vmmap
+	// Analysis Tool Version:  Xcode 14.3 (14E222b)
+	//
+	// Physical footprint:         1.2G
+	// Physical footprint (peak):  1.2G
+	// Idle exit:                  untracked
+	// ----
+	//
+	// Virtual Memory Map of process 53799 (gopls)
+	// Output report format:  2.4  -64-bit process
+	// VM page size:  16384 bytes
+	//
+	// ==== Non-writable regions for process 53799
+	// REGION TYPE                    START END         [ VSIZE  RSDNT  DIRTY   SWAP] PRT/MAX SHRMOD PURGE    REGION DETAIL
+	// __TEXT                      1029a0000-1033bc000    [ 10.1M  7360K     0K     0K] r-x/rwx SM=COW          /Users/USER/*/gopls
+	// __DATA_CONST                1033bc000-1035bc000    [ 2048K  2000K     0K     0K] r--/rwSM=COW          /Users/USER/*/gopls
+	// __DATA_CONST                1035bc000-103a48000    [ 4656K  3824K     0K     0K] r--/rwSM=COW          /Users/USER/*/gopls
+	// __LINKEDIT                  103b00000-103c98000    [ 1632K  1616K     0K     0K] r--/r-SM=COW          /Users/USER/*/gopls
+	// dyld private memory         103cd8000-103cdc000    [   16K     0K     0K     0K] ---/--SM=NUL
+	// shared memory               103ce4000-103ce8000    [   16K    16K    16K     0K] r--/r-SM=SHM
+	// MALLOC metadata             103ce8000-103cec000    [   16K    16K    16K     0K] r--/rwx SM=COW          DefaultMallocZone_0x103ce8000 zone structure
+	// MALLOC guard page           103cf0000-103cf4000    [   16K     0K     0K     0K] ---/rwx SM=COW
+	// MALLOC guard page           103cfc000-103d00000    [   16K     0K     0K     0K] ---/rwx SM=COW
+	// MALLOC guard page           103d00000-103d04000    [   16K     0K     0K     0K] ---/rwx SM=NUL
+
+	banner := "==== Non-writable regions for process"
+	grabbing := false
+	sc := bufio.NewScanner(bytes.NewReader(data))
+	for sc.Scan() {
+		l := sc.Text()
+		if grabbing {
+			p := strings.Fields(l)
+			if len(p) > 7 && p[0] == "__TEXT" && p[7] == "r-x/rwx" {
+				locs := strings.Split(p[1], "-")
+				start, _ := strconv.ParseUint(locs[0], 16, 64)
+				end, _ := strconv.ParseUint(locs[1], 16, 64)
+				return start, end, nil
+			}
+		}
+		if strings.HasPrefix(l, banner) {
+			grabbing = true
+		}
+	}
+	return 0, 0, fmt.Errorf("vmmap no text segment found")
+}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 76d8ba4..82d85cd 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -385,7 +385,7 @@
 		// Not Go code.
 		return false, 0
 	}
-	if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 {
+	if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc) == 0 {
 		// We probably stopped at a half-executed CALL instruction,
 		// where the LR is updated but the PC has not. If we preempt
 		// here we'll see a seemingly self-recursive call, which is in
@@ -414,7 +414,7 @@
 		return false, 0
 	}
 	// Check the inner-most name
-	u, uf := newInlineUnwinder(f, pc, nil)
+	u, uf := newInlineUnwinder(f, pc)
 	name := u.srcFunc(uf).name()
 	if hasPrefix(name, "runtime.") ||
 		hasPrefix(name, "runtime/internal/") ||
diff --git a/src/runtime/preempt_arm.s b/src/runtime/preempt_arm.s
index 8f243c0..b68df5d 100644
--- a/src/runtime/preempt_arm.s
+++ b/src/runtime/preempt_arm.s
@@ -19,9 +19,9 @@
 	MOVW R12, 48(R13)
 	MOVW CPSR, R0
 	MOVW R0, 52(R13)
-	MOVB ·goarm(SB), R0
-	CMP $6, R0
-	BLT nofp
+	MOVB ·goarmsoftfp(SB), R0
+	CMP $0, R0
+	BNE nofp
 	MOVW FPCR, R0
 	MOVW R0, 56(R13)
 	MOVD F0, 60(R13)
@@ -42,9 +42,9 @@
 	MOVD F15, 180(R13)
 nofp:
 	CALL ·asyncPreempt2(SB)
-	MOVB ·goarm(SB), R0
-	CMP $6, R0
-	BLT nofp2
+	MOVB ·goarmsoftfp(SB), R0
+	CMP $0, R0
+	BNE nofp2
 	MOVD 180(R13), F15
 	MOVD 172(R13), F14
 	MOVD 164(R13), F13
diff --git a/src/runtime/preempt_riscv64.s b/src/runtime/preempt_riscv64.s
index 56df6c3..bbb6447 100644
--- a/src/runtime/preempt_riscv64.s
+++ b/src/runtime/preempt_riscv64.s
@@ -5,7 +5,7 @@
 
 TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
 	MOV X1, -464(X2)
-	ADD $-464, X2
+	SUB $464, X2
 	MOV X5, 8(X2)
 	MOV X6, 16(X2)
 	MOV X7, 24(X2)
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index afb33c1..0616731 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -8,6 +8,8 @@
 	"internal/abi"
 	"internal/cpu"
 	"internal/goarch"
+	"internal/goexperiment"
+	"internal/goos"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
@@ -84,7 +86,7 @@
 // semi-persistent CPU underutilization.
 //
 // The general pattern for submission is:
-// 1. Submit work to the local run queue, timer heap, or GC state.
+// 1. Submit work to the local or global run queue, timer heap, or GC state.
 // 2. #StoreLoad-style memory barrier.
 // 3. Check sched.nmspinning.
 //
@@ -244,8 +246,10 @@
 	// list can arrive a few different ways, but it will always
 	// contain the init tasks computed by the linker for all the
 	// packages in the program (excluding those added at runtime
-	// by package plugin).
-	for _, m := range activeModules() {
+	// by package plugin). Run through the modules in dependency
+	// order (the order they are initialized by the dynamic
+	// loader, i.e. they are added to the moduledata linked list).
+	for m := &firstmoduledata; m != nil; m = m.next {
 		doInit(m.inittasks)
 	}
 
@@ -514,7 +518,20 @@
 //go:nosplit
 //go:nowritebarrierrec
 func badmorestackg0() {
-	writeErrStr("fatal: morestack on g0\n")
+	if !crashStackImplemented {
+		writeErrStr("fatal: morestack on g0\n")
+		return
+	}
+
+	g := getg()
+	switchToCrashStack(func() {
+		print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
+		g.m.traceback = 2 // include pc and sp in stack trace
+		traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
+		print("\n")
+
+		throw("morestack on g0")
+	})
 }
 
 //go:nosplit
@@ -528,6 +545,45 @@
 	throw("ctxt != 0")
 }
 
+// gcrash is a fake g that can be used when crashing due to bad
+// stack conditions.
+var gcrash g
+
+var crashingG atomic.Pointer[g]
+
+// Switch to crashstack and call fn, with special handling of
+// concurrent and recursive cases.
+//
+// Nosplit as it is called in a bad stack condition (we know
+// morestack would fail).
+//
+//go:nosplit
+//go:nowritebarrierrec
+func switchToCrashStack(fn func()) {
+	me := getg()
+	if crashingG.CompareAndSwapNoWB(nil, me) {
+		switchToCrashStack0(fn) // should never return
+		abort()
+	}
+	if crashingG.Load() == me {
+		// recursive crashing. too bad.
+		writeErrStr("fatal: recursive switchToCrashStack\n")
+		abort()
+	}
+	// Another g is crashing. Give it some time, hopefully it will finish traceback.
+	usleep_no_g(100)
+	writeErrStr("fatal: concurrent switchToCrashStack\n")
+	abort()
+}
+
+// Disable crash stack on Windows for now. Apparently, throwing an exception
+// on a non-system-allocated crash stack causes EXCEPTION_STACK_OVERFLOW and
+// hangs the process (see issue 63938).
+const crashStackImplemented = (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "wasm") && GOOS != "windows"
+
+//go:noescape
+func switchToCrashStack0(fn func()) // in assembly
+
 func lockedOSThread() bool {
 	gp := getg()
 	return gp.lockedm != 0 && gp.m.lockedg != 0
@@ -703,6 +759,8 @@
 	lockInit(&reflectOffs.lock, lockRankReflectOffs)
 	lockInit(&finlock, lockRankFin)
 	lockInit(&cpuprof.lock, lockRankCpuprof)
+	allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
+	execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
 	traceLockInit()
 	// Enforce that this lock is always a leaf lock.
 	// All of this lock's critical sections should be
@@ -721,14 +779,15 @@
 	// The world starts stopped.
 	worldStopped()
 
+	ticks.init() // run as early as possible
 	moduledataverify()
 	stackinit()
 	mallocinit()
 	godebug := getGodebugEarly()
 	initPageTrace(godebug) // must run after mallocinit but before anything allocates
 	cpuinit(godebug)       // must run before alginit
-	alginit()              // maps, hash, fastrand must not be used before this call
-	fastrandinit()         // must run before mcommoninit
+	randinit()             // must run before alginit, mcommoninit
+	alginit()              // maps, hash, rand must not be used before this call
 	mcommoninit(gp.m, -1)
 	modulesinit()   // provides activeModules
 	typelinksinit() // uses maps, activeModules
@@ -741,9 +800,16 @@
 	goargs()
 	goenvs()
 	secure()
+	checkfds()
 	parsedebugvars()
 	gcinit()
 
+	// Allocate stack space that can be used when crashing due to bad stack
+	// conditions, e.g. morestack on g0.
+	gcrash.stack = stackalloc(16384)
+	gcrash.stackguard0 = gcrash.stack.lo + 1000
+	gcrash.stackguard1 = gcrash.stack.lo + 1000
+
 	// if disableMemoryProfiling is set, update MemProfileRate to 0 to turn off memprofile.
 	// Note: parsedebugvars may update MemProfileRate, but when disableMemoryProfiling is
 	// set to true by the linker, it means that nothing is consuming the profile, it is
@@ -836,18 +902,7 @@
 		mp.id = mReserveID()
 	}
 
-	lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
-	hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
-	if lo|hi == 0 {
-		hi = 1
-	}
-	// Same behavior as for 1.17.
-	// TODO: Simplify this.
-	if goarch.BigEndian {
-		mp.fastrand = uint64(lo)<<32 | uint64(hi)
-	} else {
-		mp.fastrand = uint64(hi)<<32 | uint64(lo)
-	}
+	mrandinit(mp)
 
 	mpreinit(mp)
 	if mp.gsignal != nil {
@@ -858,7 +913,7 @@
 	// when it is just in a register or thread-local storage.
 	mp.alllink = allm
 
-	// NumCgoCall() iterates over allm w/o schedlock,
+	// NumCgoCall() and others iterate over allm w/o schedlock,
 	// so we need to publish it safely.
 	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
 	unlock(&sched.lock)
@@ -879,19 +934,22 @@
 	return mp.ncgo > 0 || mp.isextra
 }
 
-var fastrandseed uintptr
+const (
+	// osHasLowResTimer indicates that the platform's internal timer system has a low resolution,
+	// typically on the order of 1 ms or more.
+	osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
 
-func fastrandinit() {
-	s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
-	getRandomData(s)
-}
+	// osHasLowResClockInt is osHasLowResClock but in integer form, so it can be used to create
+	// constants conditionally.
+	osHasLowResClockInt = goos.IsWindows
+
+	// osHasLowResClock indicates that timestamps produced by nanotime on the platform have a
+	// low resolution, typically on the order of 1 ms or more.
+	osHasLowResClock = osHasLowResClockInt > 0
+)
 
 // Mark gp ready to run.
 func ready(gp *g, traceskip int, next bool) {
-	if traceEnabled() {
-		traceGoUnpark(gp, traceskip)
-	}
-
 	status := readgstatus(gp)
 
 	// Mark runnable.
@@ -902,7 +960,12 @@
 	}
 
 	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
+	trace := traceAcquire()
 	casgstatus(gp, _Gwaiting, _Grunnable)
+	if trace.ok() {
+		trace.GoUnpark(gp, traceskip)
+		traceRelease(trace)
+	}
 	runqput(mp.p.ptr(), gp, next)
 	wakep()
 	releasem(mp)
@@ -1217,6 +1280,10 @@
 	return stwReasonStrings[r]
 }
 
+func (r stwReason) isGC() bool {
+	return r == stwGCMarkTerm || r == stwGCSweepTerm
+}
+
 // If you add to this list, also add it to src/internal/trace/parser.go.
 // If you change the values of any of the stw* constants, bump the trace
 // version number and make a copy of this.
@@ -1240,6 +1307,18 @@
 	stwForTestResetDebugLog:        "ResetDebugLog (test)",
 }
 
+// worldStop provides context from the stop-the-world required by the
+// start-the-world.
+type worldStop struct {
+	reason stwReason
+	start  int64
+}
+
+// Temporary variable for stopTheWorld, when it can't write to the stack.
+//
+// Protected by worldsema.
+var stopTheWorldContext worldStop
+
 // stopTheWorld stops all P's from executing goroutines, interrupting
 // all goroutines at GC safe points and records reason as the reason
 // for the stop. On return, only the current goroutine's P is running.
@@ -1254,7 +1333,10 @@
 // This is also used by routines that do stack dumps. If the system is
 // in panic or being exited, this may not reliably stop all
 // goroutines.
-func stopTheWorld(reason stwReason) {
+//
+// Returns the STW context. When starting the world, this context must be
+// passed to startTheWorld.
+func stopTheWorld(reason stwReason) worldStop {
 	semacquire(&worldsema)
 	gp := getg()
 	gp.m.preemptoff = reason.String()
@@ -1269,16 +1351,22 @@
 		// must have preempted all goroutines, including any attempting
 		// to scan our stack, in which case, any stack shrinking will
 		// have already completed by the time we exit.
-		// Don't provide a wait reason because we're still executing.
+		//
+		// N.B. The execution tracer is not aware of this status
+		// transition and handles it specially based on the
+		// wait reason.
 		casGToWaiting(gp, _Grunning, waitReasonStoppingTheWorld)
-		stopTheWorldWithSema(reason)
+		stopTheWorldContext = stopTheWorldWithSema(reason) // avoid write to stack
 		casgstatus(gp, _Gwaiting, _Grunning)
 	})
+	return stopTheWorldContext
 }
 
 // startTheWorld undoes the effects of stopTheWorld.
-func startTheWorld() {
-	systemstack(func() { startTheWorldWithSema() })
+//
+// w must be the worldStop returned by stopTheWorld.
+func startTheWorld(w worldStop) {
+	systemstack(func() { startTheWorldWithSema(0, w) })
 
 	// worldsema must be held over startTheWorldWithSema to ensure
 	// gomaxprocs cannot change while worldsema is held.
@@ -1304,14 +1392,16 @@
 // stopTheWorldGC has the same effect as stopTheWorld, but blocks
 // until the GC is not running. It also blocks a GC from starting
 // until startTheWorldGC is called.
-func stopTheWorldGC(reason stwReason) {
+func stopTheWorldGC(reason stwReason) worldStop {
 	semacquire(&gcsema)
-	stopTheWorld(reason)
+	return stopTheWorld(reason)
 }
 
 // startTheWorldGC undoes the effects of stopTheWorldGC.
-func startTheWorldGC() {
-	startTheWorld()
+//
+// w must be the worldStop returned by stopTheWorld.
+func startTheWorldGC(w worldStop) {
+	startTheWorld(w)
 	semrelease(&gcsema)
 }
 
@@ -1333,13 +1423,18 @@
 //
 //	semacquire(&worldsema, 0)
 //	m.preemptoff = "reason"
-//	systemstack(stopTheWorldWithSema)
+//	var stw worldStop
+//	systemstack(func() {
+//		stw = stopTheWorldWithSema(reason)
+//	})
 //
 // When finished, the caller must either call startTheWorld or undo
 // these three operations separately:
 //
 //	m.preemptoff = ""
-//	systemstack(startTheWorldWithSema)
+//	systemstack(func() {
+//		now = startTheWorldWithSema(stw)
+//	})
 //	semrelease(&worldsema)
 //
 // It is allowed to acquire worldsema once and then execute multiple
@@ -1348,9 +1443,14 @@
 // startTheWorldWithSema and stopTheWorldWithSema.
 // Holding worldsema causes any other goroutines invoking
 // stopTheWorld to block.
-func stopTheWorldWithSema(reason stwReason) {
-	if traceEnabled() {
-		traceSTWStart(reason)
+//
+// Returns the STW context. When starting the world, this context must be
+// passed to startTheWorldWithSema.
+func stopTheWorldWithSema(reason stwReason) worldStop {
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.STWStart(reason)
+		traceRelease(trace)
 	}
 	gp := getg()
 
@@ -1361,6 +1461,7 @@
 	}
 
 	lock(&sched.lock)
+	start := nanotime() // exclude time waiting for sched.lock from start and total time metrics.
 	sched.stopwait = gomaxprocs
 	sched.gcwaiting.Store(true)
 	preemptall()
@@ -1368,17 +1469,22 @@
 	gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
 	sched.stopwait--
 	// try to retake all P's in Psyscall status
+	trace = traceAcquire()
 	for _, pp := range allp {
 		s := pp.status
 		if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
-			if traceEnabled() {
-				traceGoSysBlock(pp)
-				traceProcStop(pp)
+			if trace.ok() {
+				trace.GoSysBlock(pp)
+				trace.ProcSteal(pp, false)
 			}
 			pp.syscalltick++
 			sched.stopwait--
 		}
 	}
+	if trace.ok() {
+		traceRelease(trace)
+	}
+
 	// stop idle P's
 	now := nanotime()
 	for {
@@ -1404,6 +1510,13 @@
 		}
 	}
 
+	startTime := nanotime() - start
+	if reason.isGC() {
+		sched.stwStoppingTimeGC.record(startTime)
+	} else {
+		sched.stwStoppingTimeOther.record(startTime)
+	}
+
 	// sanity checks
 	bad := ""
 	if sched.stopwait != 0 {
@@ -1428,15 +1541,24 @@
 	}
 
 	worldStopped()
+
+	return worldStop{reason: reason, start: start}
 }
 
-func startTheWorldWithSema() int64 {
+// reason is the same STW reason passed to stopTheWorld. start is the start
+// time returned by stopTheWorld.
+//
+// now is the current time; prefer to pass 0 to capture a fresh timestamp.
+//
+// stattTheWorldWithSema returns now.
+func startTheWorldWithSema(now int64, w worldStop) int64 {
 	assertWorldStopped()
 
 	mp := acquirem() // disable preemption because it can be holding p in a local var
 	if netpollinited() {
-		list := netpoll(0) // non-blocking
+		list, delta := netpoll(0) // non-blocking
 		injectglist(&list)
+		netpollAdjustWaiters(delta)
 	}
 	lock(&sched.lock)
 
@@ -1473,9 +1595,19 @@
 	}
 
 	// Capture start-the-world time before doing clean-up tasks.
-	startTime := nanotime()
-	if traceEnabled() {
-		traceSTWDone()
+	if now == 0 {
+		now = nanotime()
+	}
+	totalTime := now - w.start
+	if w.reason.isGC() {
+		sched.stwTotalTimeGC.record(totalTime)
+	} else {
+		sched.stwTotalTimeOther.record(totalTime)
+	}
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.STWDone()
+		traceRelease(trace)
 	}
 
 	// Wakeup an additional proc in case we have excessive runnable goroutines
@@ -1485,7 +1617,7 @@
 
 	releasem(mp)
 
-	return startTime
+	return now
 }
 
 // usesLibcall indicates whether this runtime performs system calls
@@ -1495,7 +1627,7 @@
 	case "aix", "darwin", "illumos", "ios", "solaris", "windows":
 		return true
 	case "openbsd":
-		return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
+		return GOARCH != "mips64"
 	}
 	return false
 }
@@ -1507,10 +1639,7 @@
 	case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
 		return true
 	case "openbsd":
-		switch GOARCH {
-		case "386", "amd64", "arm", "arm64":
-			return true
-		}
+		return GOARCH != "mips64"
 	}
 	return false
 }
@@ -1690,6 +1819,8 @@
 	}
 	throw("m not found in allm")
 found:
+	// Events must not be traced after this point.
+
 	// Delay reaping m until it's done with the stack.
 	//
 	// Put mp on the free list, though it will not be reaped while freeWait
@@ -1699,12 +1830,16 @@
 	//
 	// Note that the free list must not be linked through alllink because
 	// some functions walk allm without locking, so may be using alllink.
+	//
+	// N.B. It's important that the M appears on the free list simultaneously
+	// with it being removed so that the tracer can find it.
 	mp.freeWait.Store(freeMWait)
 	mp.freelink = sched.freem
 	sched.freem = mp
 	unlock(&sched.lock)
 
 	atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
+	sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
 
 	// Release the P.
 	handoffp(releasep())
@@ -1754,10 +1889,35 @@
 // fn will run on every CPU executing Go code, but it acts as a global
 // memory barrier. GC uses this as a "ragged barrier."
 //
-// The caller must hold worldsema.
+// The caller must hold worldsema. fn must not refer to any
+// part of the current goroutine's stack, since the GC may move it.
+func forEachP(reason waitReason, fn func(*p)) {
+	systemstack(func() {
+		gp := getg().m.curg
+		// Mark the user stack as preemptible so that it may be scanned.
+		// Otherwise, our attempt to force all P's to a safepoint could
+		// result in a deadlock as we attempt to preempt a worker that's
+		// trying to preempt us (e.g. for a stack scan).
+		//
+		// N.B. The execution tracer is not aware of this status
+		// transition and handles it specially based on the
+		// wait reason.
+		casGToWaiting(gp, _Grunning, reason)
+		forEachPInternal(fn)
+		casgstatus(gp, _Gwaiting, _Grunning)
+	})
+}
+
+// forEachPInternal calls fn(p) for every P p when p reaches a GC safe point.
+// It is the internal implementation of forEachP.
+//
+// The caller must hold worldsema and either must ensure that a GC is not
+// running (otherwise this may deadlock with the GC trying to preempt this P)
+// or it must leave its goroutine in a preemptible state before it switches
+// to the systemstack. Due to these restrictions, prefer forEachP when possible.
 //
 //go:systemstack
-func forEachP(fn func(*p)) {
+func forEachPInternal(fn func(*p)) {
 	mp := acquirem()
 	pp := getg().m.p.ptr()
 
@@ -1799,13 +1959,21 @@
 	// off to induce safe point function execution.
 	for _, p2 := range allp {
 		s := p2.status
+
+		// We need to be fine-grained about tracing here, since handoffp
+		// might call into the tracer, and the tracer is non-reentrant.
+		trace := traceAcquire()
 		if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
-			if traceEnabled() {
-				traceGoSysBlock(p2)
-				traceProcStop(p2)
+			if trace.ok() {
+				// It's important that we traceRelease before we call handoffp, which may also traceAcquire.
+				trace.GoSysBlock(p2)
+				trace.ProcSteal(p2, false)
+				traceRelease(trace)
 			}
 			p2.syscalltick++
 			handoffp(p2)
+		} else if trace.ok() {
+			traceRelease(trace)
 		}
 	}
 
@@ -1905,6 +2073,7 @@
 		lock(&sched.lock)
 		var newList *m
 		for freem := sched.freem; freem != nil; {
+			// Wait for freeWait to indicate that freem's stack is unused.
 			wait := freem.freeWait.Load()
 			if wait == freeMWait {
 				next := freem.freelink
@@ -1913,6 +2082,12 @@
 				freem = next
 				continue
 			}
+			// Drop any remaining trace resources.
+			// Ms can continue to emit events all the way until wait != freeMWait,
+			// so it's only safe to call traceThreadDestroy at this point.
+			if traceEnabled() || traceShuttingDown() {
+				traceThreadDestroy(freem)
+			}
 			// Free the stack if needed. For freeMRef, there is
 			// nothing to do except drop freem from the sched.freem
 			// list.
@@ -2037,30 +2212,10 @@
 	osSetupTLS(mp)
 
 	// Install g (= m->g0) and set the stack bounds
-	// to match the current stack. If we don't actually know
-	// how big the stack is, like we don't know how big any
-	// scheduling stack is, but we assume there's at least 32 kB.
-	// If we can get a more accurate stack bound from pthread,
-	// use that.
+	// to match the current stack.
 	setg(mp.g0)
-	gp := getg()
-	gp.stack.hi = getcallersp() + 1024
-	gp.stack.lo = getcallersp() - 32*1024
-	if !signal && _cgo_getstackbound != nil {
-		// Don't adjust if called from the signal handler.
-		// We are on the signal stack, not the pthread stack.
-		// (We could get the stack bounds from sigaltstack, but
-		// we're getting out of the signal handler very soon
-		// anyway. Not worth it.)
-		var bounds [2]uintptr
-		asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
-		// getstackbound is an unsupported no-op on Windows.
-		if bounds[0] != 0 {
-			gp.stack.lo = bounds[0]
-			gp.stack.hi = bounds[1]
-		}
-	}
-	gp.stackguard0 = gp.stack.lo + stackGuard
+	sp := getcallersp()
+	callbackUpdateSystemStack(mp, sp, signal)
 
 	// Should mark we are already in Go now.
 	// Otherwise, we may call needm again when we get a signal, before cgocallbackg1,
@@ -2071,9 +2226,27 @@
 	asminit()
 	minit()
 
+	// Emit a trace event for this dead -> syscall transition,
+	// but only in the new tracer and only if we're not in a signal handler.
+	//
+	// N.B. the tracer can run on a bare M just fine, we just have
+	// to make sure to do this before setg(nil) and unminit.
+	var trace traceLocker
+	if goexperiment.ExecTracer2 && !signal {
+		trace = traceAcquire()
+	}
+
 	// mp.curg is now a real goroutine.
 	casgstatus(mp.curg, _Gdead, _Gsyscall)
 	sched.ngsys.Add(-1)
+
+	if goexperiment.ExecTracer2 && !signal {
+		if trace.ok() {
+			trace.GoCreateSyscall(mp.curg)
+			traceRelease(trace)
+		}
+	}
+	mp.isExtraInSig = signal
 }
 
 // Acquire an extra m and bind it to the C thread when a pthread key has been created.
@@ -2136,8 +2309,10 @@
 	if raceenabled {
 		gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
 	}
-	if traceEnabled() {
-		traceOneNewExtraM(gp)
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.OneNewExtraM(gp)
+		traceRelease(trace)
 	}
 	// put on allg for garbage collector
 	allgadd(gp)
@@ -2177,20 +2352,83 @@
 // So that the destructor would invoke dropm while the non-Go thread is exiting.
 // This is much faster since it avoids expensive signal-related syscalls.
 //
-// NOTE: this always runs without a P, so, nowritebarrierrec required.
+// This always runs without a P, so //go:nowritebarrierrec is required.
+//
+// This may run with a different stack than was recorded in g0 (there is no
+// call to callbackUpdateSystemStack prior to dropm), so this must be
+// //go:nosplit to avoid the stack bounds check.
 //
 //go:nowritebarrierrec
+//go:nosplit
 func dropm() {
 	// Clear m and g, and return m to the extra list.
 	// After the call to setg we can only call nosplit functions
 	// with no pointer manipulation.
 	mp := getg().m
 
+	// Emit a trace event for this syscall -> dead transition,
+	// but only in the new tracer.
+	//
+	// N.B. the tracer can run on a bare M just fine, we just have
+	// to make sure to do this before setg(nil) and unminit.
+	var trace traceLocker
+	if goexperiment.ExecTracer2 && !mp.isExtraInSig {
+		trace = traceAcquire()
+	}
+
 	// Return mp.curg to dead state.
 	casgstatus(mp.curg, _Gsyscall, _Gdead)
 	mp.curg.preemptStop = false
 	sched.ngsys.Add(1)
 
+	if goexperiment.ExecTracer2 && !mp.isExtraInSig {
+		if trace.ok() {
+			trace.GoDestroySyscall()
+			traceRelease(trace)
+		}
+	}
+
+	if goexperiment.ExecTracer2 {
+		// Trash syscalltick so that it doesn't line up with mp.old.syscalltick anymore.
+		//
+		// In the new tracer, we model needm and dropm and a goroutine being created and
+		// destroyed respectively. The m then might get reused with a different procid but
+		// still with a reference to oldp, and still with the same syscalltick. The next
+		// time a G is "created" in needm, it'll return and quietly reacquire its P from a
+		// different m with a different procid, which will confuse the trace parser. By
+		// trashing syscalltick, we ensure that it'll appear as if we lost the P to the
+		// tracer parser and that we just reacquired it.
+		//
+		// Trash the value by decrementing because that gets us as far away from the value
+		// the syscall exit code expects as possible. Setting to zero is risky because
+		// syscalltick could already be zero (and in fact, is initialized to zero).
+		mp.syscalltick--
+	}
+
+	// Reset trace state unconditionally. This goroutine is being 'destroyed'
+	// from the perspective of the tracer.
+	mp.curg.trace.reset()
+
+	// Flush all the M's buffers. This is necessary because the M might
+	// be used on a different thread with a different procid, so we have
+	// to make sure we don't write into the same buffer.
+	//
+	// N.B. traceThreadDestroy is a no-op in the old tracer, so avoid the
+	// unnecessary acquire/release of the lock.
+	if goexperiment.ExecTracer2 && (traceEnabled() || traceShuttingDown()) {
+		// Acquire sched.lock across thread destruction. One of the invariants of the tracer
+		// is that a thread cannot disappear from the tracer's view (allm or freem) without
+		// it noticing, so it requires that sched.lock be held over traceThreadDestroy.
+		//
+		// This isn't strictly necessary in this case, because this thread never leaves allm,
+		// but the critical section is short and dropm is rare on pthread platforms, so just
+		// take the lock and play it safe. traceThreadDestroy also asserts that the lock is held.
+		lock(&sched.lock)
+		traceThreadDestroy(mp)
+		unlock(&sched.lock)
+	}
+	mp.isExtraInSig = false
+
 	// Block signals before unminit.
 	// Unminit unregisters the signal handling stack (but needs g on some systems).
 	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
@@ -2201,6 +2439,14 @@
 
 	setg(nil)
 
+	// Clear g0 stack bounds to ensure that needm always refreshes the
+	// bounds when reusing this M.
+	g0 := mp.g0
+	g0.stack.hi = 0
+	g0.stack.lo = 0
+	g0.stackguard0 = 0
+	g0.stackguard1 = 0
+
 	putExtraM(mp)
 
 	msigrestore(sigmask)
@@ -2872,13 +3118,15 @@
 		setThreadCPUProfiler(hz)
 	}
 
-	if traceEnabled() {
+	trace := traceAcquire()
+	if trace.ok() {
 		// GoSysExit has to happen when we have a P, but before GoStart.
 		// So we emit it here.
-		if gp.syscallsp != 0 {
-			traceGoSysExit()
+		if !goexperiment.ExecTracer2 && gp.syscallsp != 0 {
+			trace.GoSysExit(true)
 		}
-		traceGoStart()
+		trace.GoStart()
+		traceRelease(trace)
 	}
 
 	gogo(&gp.sched)
@@ -2915,8 +3163,12 @@
 	if traceEnabled() || traceShuttingDown() {
 		gp := traceReader()
 		if gp != nil {
+			trace := traceAcquire()
 			casgstatus(gp, _Gwaiting, _Grunnable)
-			traceGoUnpark(gp, 0)
+			if trace.ok() {
+				trace.GoUnpark(gp, 0)
+				traceRelease(trace)
+			}
 			return gp, false, true
 		}
 	}
@@ -2974,13 +3226,16 @@
 	// blocked thread (e.g. it has already returned from netpoll, but does
 	// not set lastpoll yet), this thread will do blocking netpoll below
 	// anyway.
-	if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
-		if list := netpoll(0); !list.empty() { // non-blocking
+	if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
+		if list, delta := netpoll(0); !list.empty() { // non-blocking
 			gp := list.pop()
 			injectglist(&list)
+			netpollAdjustWaiters(delta)
+			trace := traceAcquire()
 			casgstatus(gp, _Gwaiting, _Grunnable)
-			if traceEnabled() {
-				traceGoUnpark(gp, 0)
+			if trace.ok() {
+				trace.GoUnpark(gp, 0)
+				traceRelease(trace)
 			}
 			return gp, false, false
 		}
@@ -3023,9 +3278,12 @@
 		if node != nil {
 			pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
 			gp := node.gp.ptr()
+
+			trace := traceAcquire()
 			casgstatus(gp, _Gwaiting, _Grunnable)
-			if traceEnabled() {
-				traceGoUnpark(gp, 0)
+			if trace.ok() {
+				trace.GoUnpark(gp, 0)
+				traceRelease(trace)
 			}
 			return gp, false, false
 		}
@@ -3038,9 +3296,11 @@
 	// until a callback was triggered.
 	gp, otherReady := beforeIdle(now, pollUntil)
 	if gp != nil {
+		trace := traceAcquire()
 		casgstatus(gp, _Gwaiting, _Grunnable)
-		if traceEnabled() {
-			traceGoUnpark(gp, 0)
+		if trace.ok() {
+			trace.GoUnpark(gp, 0)
+			traceRelease(trace)
 		}
 		return gp, false, false
 	}
@@ -3091,7 +3351,7 @@
 	//
 	// This applies to the following sources of work:
 	//
-	// * Goroutines added to a per-P run queue.
+	// * Goroutines added to the global or a per-P run queue.
 	// * New/modified-earlier timers on a per-P timer heap.
 	// * Idle-priority GC work (barring golang.org/issue/19112).
 	//
@@ -3133,7 +3393,24 @@
 		//
 		// See https://go.dev/issue/43997.
 
-		// Check all runqueues once again.
+		// Check global and P runqueues again.
+
+		lock(&sched.lock)
+		if sched.runqsize != 0 {
+			pp, _ := pidlegetSpinning(0)
+			if pp != nil {
+				gp := globrunqget(pp, 0)
+				if gp == nil {
+					throw("global runq empty with non-zero runqsize")
+				}
+				unlock(&sched.lock)
+				acquirep(pp)
+				mp.becomeSpinning()
+				return gp, false, false
+			}
+		}
+		unlock(&sched.lock)
+
 		pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
 		if pp != nil {
 			acquirep(pp)
@@ -3149,9 +3426,11 @@
 
 			// Run the idle worker.
 			pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
+			trace := traceAcquire()
 			casgstatus(gp, _Gwaiting, _Grunnable)
-			if traceEnabled() {
-				traceGoUnpark(gp, 0)
+			if trace.ok() {
+				trace.GoUnpark(gp, 0)
+				traceRelease(trace)
 			}
 			return gp, false, false
 		}
@@ -3166,7 +3445,7 @@
 	}
 
 	// Poll network until next timer.
-	if netpollinited() && (netpollWaiters.Load() > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
+	if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
 		sched.pollUntil.Store(pollUntil)
 		if mp.p != 0 {
 			throw("findrunnable: netpoll with p")
@@ -3188,7 +3467,7 @@
 			// When using fake time, just poll.
 			delay = 0
 		}
-		list := netpoll(delay) // block until new work is available
+		list, delta := netpoll(delay) // block until new work is available
 		// Refresh now again, after potentially blocking.
 		now = nanotime()
 		sched.pollUntil.Store(0)
@@ -3204,14 +3483,18 @@
 		unlock(&sched.lock)
 		if pp == nil {
 			injectglist(&list)
+			netpollAdjustWaiters(delta)
 		} else {
 			acquirep(pp)
 			if !list.empty() {
 				gp := list.pop()
 				injectglist(&list)
+				netpollAdjustWaiters(delta)
+				trace := traceAcquire()
 				casgstatus(gp, _Gwaiting, _Grunnable)
-				if traceEnabled() {
-					traceGoUnpark(gp, 0)
+				if trace.ok() {
+					trace.GoUnpark(gp, 0)
+					traceRelease(trace)
 				}
 				return gp, false, false
 			}
@@ -3242,9 +3525,10 @@
 	if !runqempty(p) {
 		return true
 	}
-	if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
-		if list := netpoll(0); !list.empty() {
+	if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
+		if list, delta := netpoll(0); !list.empty() {
 			injectglist(&list)
+			netpollAdjustWaiters(delta)
 			return true
 		}
 	}
@@ -3266,7 +3550,7 @@
 	for i := 0; i < stealTries; i++ {
 		stealTimersOrRunNextG := i == stealTries-1
 
-		for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
+		for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
 			if sched.gcwaiting.Load() {
 				// GC work may be available.
 				return nil, false, now, pollUntil, true
@@ -3478,10 +3762,12 @@
 	if glist.empty() {
 		return
 	}
-	if traceEnabled() {
+	trace := traceAcquire()
+	if trace.ok() {
 		for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
-			traceGoUnpark(gp, 0)
+			trace.GoUnpark(gp, 0)
 		}
+		traceRelease(trace)
 	}
 
 	// Mark all the goroutines as runnable before we put them
@@ -3721,13 +4007,16 @@
 func park_m(gp *g) {
 	mp := getg().m
 
-	if traceEnabled() {
-		traceGoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
-	}
+	trace := traceAcquire()
 
 	// N.B. Not using casGToWaiting here because the waitreason is
 	// set by park_m's caller.
 	casgstatus(gp, _Grunning, _Gwaiting)
+	if trace.ok() {
+		trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
+		traceRelease(trace)
+	}
+
 	dropg()
 
 	if fn := mp.waitunlockf; fn != nil {
@@ -3735,66 +4024,68 @@
 		mp.waitunlockf = nil
 		mp.waitlock = nil
 		if !ok {
-			if traceEnabled() {
-				traceGoUnpark(gp, 2)
-			}
+			trace := traceAcquire()
 			casgstatus(gp, _Gwaiting, _Grunnable)
+			if trace.ok() {
+				trace.GoUnpark(gp, 2)
+				traceRelease(trace)
+			}
 			execute(gp, true) // Schedule it back, never returns.
 		}
 	}
 	schedule()
 }
 
-func goschedImpl(gp *g) {
+func goschedImpl(gp *g, preempted bool) {
+	trace := traceAcquire()
 	status := readgstatus(gp)
 	if status&^_Gscan != _Grunning {
 		dumpgstatus(gp)
 		throw("bad g status")
 	}
 	casgstatus(gp, _Grunning, _Grunnable)
+	if trace.ok() {
+		if preempted {
+			trace.GoPreempt()
+		} else {
+			trace.GoSched()
+		}
+		traceRelease(trace)
+	}
+
 	dropg()
 	lock(&sched.lock)
 	globrunqput(gp)
 	unlock(&sched.lock)
 
+	if mainStarted {
+		wakep()
+	}
+
 	schedule()
 }
 
 // Gosched continuation on g0.
 func gosched_m(gp *g) {
-	if traceEnabled() {
-		traceGoSched()
-	}
-	goschedImpl(gp)
+	goschedImpl(gp, false)
 }
 
 // goschedguarded is a forbidden-states-avoided version of gosched_m.
 func goschedguarded_m(gp *g) {
-
 	if !canPreemptM(gp.m) {
 		gogo(&gp.sched) // never return
 	}
-
-	if traceEnabled() {
-		traceGoSched()
-	}
-	goschedImpl(gp)
+	goschedImpl(gp, false)
 }
 
 func gopreempt_m(gp *g) {
-	if traceEnabled() {
-		traceGoPreempt()
-	}
-	goschedImpl(gp)
+	goschedImpl(gp, true)
 }
 
 // preemptPark parks gp and puts it in _Gpreempted.
 //
 //go:systemstack
 func preemptPark(gp *g) {
-	if traceEnabled() {
-		traceGoPark(traceBlockPreempted, 0)
-	}
 	status := readgstatus(gp)
 	if status&^_Gscan != _Grunning {
 		dumpgstatus(gp)
@@ -3823,7 +4114,30 @@
 	// transitions until we can dropg.
 	casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
 	dropg()
+
+	// Be careful about how we trace this next event. The ordering
+	// is subtle.
+	//
+	// The moment we CAS into _Gpreempted, suspendG could CAS to
+	// _Gwaiting, do its work, and ready the goroutine. All of
+	// this could happen before we even get the chance to emit
+	// an event. The end result is that the events could appear
+	// out of order, and the tracer generally assumes the scheduler
+	// takes care of the ordering between GoPark and GoUnpark.
+	//
+	// The answer here is simple: emit the event while we still hold
+	// the _Gscan bit on the goroutine. We still need to traceAcquire
+	// and traceRelease across the CAS because the tracer could be
+	// what's calling suspendG in the first place, and we want the
+	// CAS and event emission to appear atomic to the tracer.
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GoPark(traceBlockPreempted, 0)
+	}
 	casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
+	if trace.ok() {
+		traceRelease(trace)
+	}
 	schedule()
 }
 
@@ -3836,11 +4150,13 @@
 }
 
 func goyield_m(gp *g) {
-	if traceEnabled() {
-		traceGoPreempt()
-	}
+	trace := traceAcquire()
 	pp := gp.m.p.ptr()
 	casgstatus(gp, _Grunning, _Grunnable)
+	if trace.ok() {
+		trace.GoPreempt()
+		traceRelease(trace)
+	}
 	dropg()
 	runqput(pp, gp, false)
 	schedule()
@@ -3851,14 +4167,21 @@
 	if raceenabled {
 		racegoend()
 	}
-	if traceEnabled() {
-		traceGoEnd()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GoEnd()
+		traceRelease(trace)
 	}
 	mcall(goexit0)
 }
 
 // goexit continuation on g0.
 func goexit0(gp *g) {
+	gdestroy(gp)
+	schedule()
+}
+
+func gdestroy(gp *g) {
 	mp := getg().m
 	pp := mp.p.ptr()
 
@@ -3895,7 +4218,7 @@
 
 	if GOARCH == "wasm" { // no threads yet on wasm
 		gfput(pp, gp)
-		schedule() // never returns
+		return
 	}
 
 	if mp.lockedInt != 0 {
@@ -3918,7 +4241,6 @@
 			mp.lockedExt = 0
 		}
 	}
-	schedule()
 }
 
 // save updates getg().sched to refer to pc and sp so that a following
@@ -3976,7 +4298,7 @@
 // must always point to a valid stack frame. entersyscall below is the normal
 // entry point for syscalls, which obtains the SP and PC from the caller.
 //
-// Syscall tracing:
+// Syscall tracing (old tracer):
 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
 // If the syscall does not block, that is it, we do not emit any other events.
 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
@@ -3991,6 +4313,7 @@
 //
 //go:nosplit
 func reentersyscall(pc, sp uintptr) {
+	trace := traceAcquire()
 	gp := getg()
 
 	// Disable preemption because during this function g is in Gsyscall status,
@@ -4021,8 +4344,11 @@
 		})
 	}
 
-	if traceEnabled() {
-		systemstack(traceGoSysCall)
+	if trace.ok() {
+		systemstack(func() {
+			trace.GoSysCall()
+			traceRelease(trace)
+		})
 		// systemstack itself clobbers g.sched.{pc,sp} and we might
 		// need them later when the G is genuinely blocked in a
 		// syscall
@@ -4078,15 +4404,32 @@
 	pp := gp.m.oldp.ptr()
 
 	lock(&sched.lock)
+	trace := traceAcquire()
 	if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
-		if traceEnabled() {
-			traceGoSysBlock(pp)
-			traceProcStop(pp)
+		if trace.ok() {
+			if goexperiment.ExecTracer2 {
+				// This is a steal in the new tracer. While it's very likely
+				// that we were the ones to put this P into _Psyscall, between
+				// then and now it's totally possible it had been stolen and
+				// then put back into _Psyscall for us to acquire here. In such
+				// case ProcStop would be incorrect.
+				//
+				// TODO(mknyszek): Consider emitting a ProcStop instead when
+				// gp.m.syscalltick == pp.syscalltick, since then we know we never
+				// lost the P.
+				trace.ProcSteal(pp, true)
+			} else {
+				trace.GoSysBlock(pp)
+				trace.ProcStop(pp)
+			}
+			traceRelease(trace)
 		}
 		pp.syscalltick++
 		if sched.stopwait--; sched.stopwait == 0 {
 			notewakeup(&sched.stopnote)
 		}
+	} else if trace.ok() {
+		traceRelease(trace)
 	}
 	unlock(&sched.lock)
 }
@@ -4135,9 +4478,11 @@
 }
 
 func entersyscallblock_handoff() {
-	if traceEnabled() {
-		traceGoSysCall()
-		traceGoSysBlock(getg().m.p.ptr())
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.GoSysCall()
+		trace.GoSysBlock(getg().m.p.ptr())
+		traceRelease(trace)
 	}
 	handoffp(releasep())
 }
@@ -4176,15 +4521,33 @@
 				tryRecordGoroutineProfileWB(gp)
 			})
 		}
-		if traceEnabled() {
-			if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
-				systemstack(traceGoStart)
-			}
+		trace := traceAcquire()
+		if trace.ok() {
+			lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
+			systemstack(func() {
+				if goexperiment.ExecTracer2 {
+					// Write out syscall exit eagerly in the experiment.
+					//
+					// It's important that we write this *after* we know whether we
+					// lost our P or not (determined by exitsyscallfast).
+					trace.GoSysExit(lostP)
+				}
+				if lostP {
+					// We lost the P at some point, even though we got it back here.
+					// Trace that we're starting again, because there was a traceGoSysBlock
+					// call somewhere in exitsyscallfast (indicating that this goroutine
+					// had blocked) and we're about to start running again.
+					trace.GoStart()
+				}
+			})
 		}
 		// There's a cpu for us, so we can run.
 		gp.m.p.ptr().syscalltick++
 		// We need to cas the status and scan before resuming...
 		casgstatus(gp, _Gsyscall, _Grunning)
+		if trace.ok() {
+			traceRelease(trace)
+		}
 
 		// Garbage collector isn't running (since we are),
 		// so okay to clear syscallsp.
@@ -4207,17 +4570,15 @@
 		return
 	}
 
-	if traceEnabled() {
-		// Wait till traceGoSysBlock event is emitted.
-		// This ensures consistency of the trace (the goroutine is started after it is blocked).
-		for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
-			osyield()
+	if !goexperiment.ExecTracer2 {
+		// In the old tracer, because we don't have a P we can't
+		// actually record the true time we exited the syscall.
+		// Record it.
+		trace := traceAcquire()
+		if trace.ok() {
+			trace.RecordSyscallExitedTime(gp, oldp)
+			traceRelease(trace)
 		}
-		// We can't trace syscall exit right now because we don't have a P.
-		// Tracing code can invoke write barriers that cannot run without a P.
-		// So instead we remember the syscall exit time and emit the event
-		// in execute when we have a P.
-		gp.trace.sysExitTime = traceClockNow()
 	}
 
 	gp.m.locks--
@@ -4246,27 +4607,40 @@
 	}
 
 	// Try to re-acquire the last P.
+	trace := traceAcquire()
 	if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
 		// There's a cpu for us, so we can run.
 		wirep(oldp)
-		exitsyscallfast_reacquired()
+		exitsyscallfast_reacquired(trace)
+		if trace.ok() {
+			traceRelease(trace)
+		}
 		return true
 	}
+	if trace.ok() {
+		traceRelease(trace)
+	}
 
 	// Try to get any other idle P.
 	if sched.pidle != 0 {
 		var ok bool
 		systemstack(func() {
 			ok = exitsyscallfast_pidle()
-			if ok && traceEnabled() {
-				if oldp != nil {
-					// Wait till traceGoSysBlock event is emitted.
-					// This ensures consistency of the trace (the goroutine is started after it is blocked).
-					for oldp.syscalltick == gp.m.syscalltick {
-						osyield()
+			if ok && !goexperiment.ExecTracer2 {
+				trace := traceAcquire()
+				if trace.ok() {
+					if oldp != nil {
+						// Wait till traceGoSysBlock event is emitted.
+						// This ensures consistency of the trace (the goroutine is started after it is blocked).
+						for oldp.syscalltick == gp.m.syscalltick {
+							osyield()
+						}
 					}
+					// In the experiment, we write this in exitsyscall.
+					// Don't write it here unless the experiment is off.
+					trace.GoSysExit(true)
+					traceRelease(trace)
 				}
-				traceGoSysExit()
 			}
 		})
 		if ok {
@@ -4281,18 +4655,25 @@
 // syscall.
 //
 //go:nosplit
-func exitsyscallfast_reacquired() {
+func exitsyscallfast_reacquired(trace traceLocker) {
 	gp := getg()
 	if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
-		if traceEnabled() {
+		if trace.ok() {
 			// The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
 			// traceGoSysBlock for this syscall was already emitted,
 			// but here we effectively retake the p from the new syscall running on the same p.
 			systemstack(func() {
-				// Denote blocking of the new syscall.
-				traceGoSysBlock(gp.m.p.ptr())
-				// Denote completion of the current syscall.
-				traceGoSysExit()
+				if goexperiment.ExecTracer2 {
+					// In the experiment, we're stealing the P. It's treated
+					// as if it temporarily stopped running. Then, start running.
+					trace.ProcSteal(gp.m.p.ptr(), true)
+					trace.ProcStart()
+				} else {
+					// Denote blocking of the new syscall.
+					trace.GoSysBlock(gp.m.p.ptr())
+					// Denote completion of the current syscall.
+					trace.GoSysExit(true)
+				}
 			})
 		}
 		gp.m.p.ptr().syscalltick++
@@ -4321,7 +4702,23 @@
 //
 //go:nowritebarrierrec
 func exitsyscall0(gp *g) {
+	var trace traceLocker
+	if goexperiment.ExecTracer2 {
+		traceExitingSyscall()
+		trace = traceAcquire()
+	}
 	casgstatus(gp, _Gsyscall, _Grunnable)
+	if goexperiment.ExecTracer2 {
+		traceExitedSyscall()
+		if trace.ok() {
+			// Write out syscall exit eagerly in the experiment.
+			//
+			// It's important that we write this *after* we know whether we
+			// lost our P or not (determined by exitsyscallfast).
+			trace.GoSysExit(true)
+			traceRelease(trace)
+		}
+	}
 	dropg()
 	lock(&sched.lock)
 	var pp *p
@@ -4516,12 +4913,14 @@
 	totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
 	totalSize = alignUp(totalSize, sys.StackAlign)
 	sp := newg.stack.hi - totalSize
-	spArg := sp
 	if usesLR {
 		// caller's LR
 		*(*uintptr)(unsafe.Pointer(sp)) = 0
 		prepGoExitFrame(sp)
-		spArg += sys.MinFrameSize
+	}
+	if GOARCH == "arm64" {
+		// caller's FP
+		*(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
 	}
 
 	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
@@ -4551,13 +4950,15 @@
 		}
 	}
 	// Track initial transition?
-	newg.trackingSeq = uint8(fastrand())
+	newg.trackingSeq = uint8(cheaprand())
 	if newg.trackingSeq%gTrackingPeriod == 0 {
 		newg.tracking = true
 	}
-	casgstatus(newg, _Gdead, _Grunnable)
 	gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
 
+	// Get a goid and switch to runnable. Make all this atomic to the tracer.
+	trace := traceAcquire()
+	casgstatus(newg, _Gdead, _Grunnable)
 	if pp.goidcache == pp.goidcacheend {
 		// Sched.goidgen is the last allocated id,
 		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
@@ -4568,6 +4969,13 @@
 	}
 	newg.goid = pp.goidcache
 	pp.goidcache++
+	newg.trace.reset()
+	if trace.ok() {
+		trace.GoCreate(newg, newg.startpc)
+		traceRelease(trace)
+	}
+
+	// Set up race context.
 	if raceenabled {
 		newg.racectx = racegostart(callerpc)
 		newg.raceignore = 0
@@ -4577,9 +4985,6 @@
 			racereleasemergeg(newg, unsafe.Pointer(&labelSync))
 		}
 	}
-	if traceEnabled() {
-		traceGoCreate(newg, newg.startpc)
-	}
 	releasem(mp)
 
 	return newg
@@ -4768,7 +5173,7 @@
 // The calling goroutine will always execute in that thread,
 // and no other goroutine will execute in it,
 // until the calling goroutine has made as many calls to
-// UnlockOSThread as to LockOSThread.
+// [UnlockOSThread] as to LockOSThread.
 // If the calling goroutine exits without unlocking the thread,
 // the thread will be terminated.
 //
@@ -4887,6 +5292,7 @@
 func _LostExternalCode()          { _LostExternalCode() }
 func _GC()                        { _GC() }
 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
+func _LostContendedRuntimeLock()  { _LostContendedRuntimeLock() }
 func _VDSO()                      { _VDSO() }
 
 // Called if we receive a SIGPROF signal.
@@ -4997,14 +5403,16 @@
 		cpuprof.add(tagPtr, stk[:n])
 
 		gprof := gp
+		var mp *m
 		var pp *p
 		if gp != nil && gp.m != nil {
 			if gp.m.curg != nil {
 				gprof = gp.m.curg
 			}
+			mp = gp.m
 			pp = gp.m.p.ptr()
 		}
-		traceCPUSample(gprof, pp, stk[:n])
+		traceCPUSample(gprof, mp, pp, stk[:n])
 	}
 	getg().m.mallocing--
 }
@@ -5188,8 +5596,10 @@
 	if old < 0 || nprocs <= 0 {
 		throw("procresize: invalid arg")
 	}
-	if traceEnabled() {
-		traceGomaxprocs(nprocs)
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.Gomaxprocs(nprocs)
+		traceRelease(trace)
 	}
 
 	// update statistics
@@ -5254,12 +5664,14 @@
 		// because p.destroy itself has write barriers, so we
 		// need to do that from a valid P.
 		if gp.m.p != 0 {
-			if traceEnabled() {
+			trace := traceAcquire()
+			if trace.ok() {
 				// Pretend that we were descheduled
 				// and then scheduled again to keep
 				// the trace sane.
-				traceGoSched()
-				traceProcStop(gp.m.p.ptr())
+				trace.GoSched()
+				trace.ProcStop(gp.m.p.ptr())
+				traceRelease(trace)
 			}
 			gp.m.p.ptr().m = 0
 		}
@@ -5268,8 +5680,10 @@
 		pp.m = 0
 		pp.status = _Pidle
 		acquirep(pp)
-		if traceEnabled() {
-			traceGoStart()
+		trace := traceAcquire()
+		if trace.ok() {
+			trace.GoStart()
+			traceRelease(trace)
 		}
 	}
 
@@ -5333,8 +5747,10 @@
 	// from a potentially stale mcache.
 	pp.mcache.prepareForSweep()
 
-	if traceEnabled() {
-		traceProcStart()
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.ProcStart()
+		traceRelease(trace)
 	}
 }
 
@@ -5348,15 +5764,23 @@
 	gp := getg()
 
 	if gp.m.p != 0 {
-		throw("wirep: already in go")
+		// Call on the systemstack to avoid a nosplit overflow build failure
+		// on some platforms when built with -N -l. See #64113.
+		systemstack(func() {
+			throw("wirep: already in go")
+		})
 	}
 	if pp.m != 0 || pp.status != _Pidle {
-		id := int64(0)
-		if pp.m != 0 {
-			id = pp.m.ptr().id
-		}
-		print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
-		throw("wirep: invalid p state")
+		// Call on the systemstack to avoid a nosplit overflow build failure
+		// on some platforms when built with -N -l. See #64113.
+		systemstack(func() {
+			id := int64(0)
+			if pp.m != 0 {
+				id = pp.m.ptr().id
+			}
+			print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
+			throw("wirep: invalid p state")
+		})
 	}
 	gp.m.p.set(pp)
 	pp.m.set(gp.m)
@@ -5365,6 +5789,16 @@
 
 // Disassociate p and the current m.
 func releasep() *p {
+	trace := traceAcquire()
+	if trace.ok() {
+		trace.ProcStop(getg().m.p.ptr())
+		traceRelease(trace)
+	}
+	return releasepNoTrace()
+}
+
+// Disassociate p and the current m without tracing an event.
+func releasepNoTrace() *p {
 	gp := getg()
 
 	if gp.m.p == 0 {
@@ -5375,9 +5809,6 @@
 		print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
 		throw("releasep: invalid p state")
 	}
-	if traceEnabled() {
-		traceProcStop(gp.m.p.ptr())
-	}
 	gp.m.p = 0
 	pp.m = 0
 	pp.status = _Pidle
@@ -5596,7 +6027,7 @@
 		lastpoll := sched.lastpoll.Load()
 		if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
 			sched.lastpoll.CompareAndSwap(lastpoll, now)
-			list := netpoll(0) // non-blocking - returns list of goroutines
+			list, delta := netpoll(0) // non-blocking - returns list of goroutines
 			if !list.empty() {
 				// Need to decrement number of idle locked M's
 				// (pretending that one more is running) before injectglist.
@@ -5608,6 +6039,7 @@
 				incidlelocked(-1)
 				injectglist(&list)
 				incidlelocked(1)
+				netpollAdjustWaiters(delta)
 			}
 		}
 		if GOOS == "netbsd" && needSysmonWorkaround {
@@ -5721,14 +6153,18 @@
 			// Otherwise the M from which we retake can exit the syscall,
 			// increment nmidle and report deadlock.
 			incidlelocked(-1)
+			trace := traceAcquire()
 			if atomic.Cas(&pp.status, s, _Pidle) {
-				if traceEnabled() {
-					traceGoSysBlock(pp)
-					traceProcStop(pp)
+				if trace.ok() {
+					trace.GoSysBlock(pp)
+					trace.ProcSteal(pp, false)
+					traceRelease(trace)
 				}
 				n++
 				pp.syscalltick++
 				handoffp(pp)
+			} else if trace.ok() {
+				traceRelease(trace)
 			}
 			incidlelocked(1)
 			lock(&allpLock)
@@ -6197,7 +6633,7 @@
 // If the run queue is full, runnext puts g on the global queue.
 // Executed only by the owner P.
 func runqput(pp *p, gp *g, next bool) {
-	if randomizeScheduler && next && fastrandn(2) == 0 {
+	if randomizeScheduler && next && randn(2) == 0 {
 		next = false
 	}
 
@@ -6250,7 +6686,7 @@
 
 	if randomizeScheduler {
 		for i := uint32(1); i <= n; i++ {
-			j := fastrandn(i + 1)
+			j := cheaprandn(i + 1)
 			batch[i], batch[j] = batch[j], batch[i]
 		}
 	}
@@ -6291,7 +6727,7 @@
 			return (pp.runqtail + o) % uint32(len(pp.runq))
 		}
 		for i := uint32(1); i < n; i++ {
-			j := fastrandn(i + 1)
+			j := cheaprandn(i + 1)
 			pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
 		}
 	}
@@ -6395,7 +6831,7 @@
 						// between different Ps.
 						// A sync chan send/recv takes ~50ns as of time of
 						// writing, so 3us gives ~50x overshoot.
-						if GOOS != "windows" && GOOS != "openbsd" && GOOS != "netbsd" {
+						if !osHasLowResTimer {
 							usleep(3)
 						} else {
 							// On some platforms system timer granularity is
diff --git a/src/runtime/profbuf.go b/src/runtime/profbuf.go
index 083b55a..d3afbcd 100644
--- a/src/runtime/profbuf.go
+++ b/src/runtime/profbuf.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Go Authors.  All rights reserved.
+// Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -348,7 +348,7 @@
 	// so there is no need for a deletion barrier on b.tags[wt].
 	wt := int(bw.tagCount() % uint32(len(b.tags)))
 	if tagPtr != nil {
-		*(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(unsafe.Pointer(*tagPtr))
+		*(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(*tagPtr)
 	}
 
 	// Main record.
@@ -468,7 +468,7 @@
 			// Won the race, report overflow.
 			dst := b.overflowBuf
 			dst[0] = uint64(2 + b.hdrsize + 1)
-			dst[1] = uint64(time)
+			dst[1] = time
 			for i := uintptr(0); i < b.hdrsize; i++ {
 				dst[2+i] = 0
 			}
diff --git a/src/runtime/profbuf_test.go b/src/runtime/profbuf_test.go
index d9c5264..dac78ff 100644
--- a/src/runtime/profbuf_test.go
+++ b/src/runtime/profbuf_test.go
@@ -39,13 +39,7 @@
 			c <- 1
 		}()
 		time.Sleep(10 * time.Millisecond) // let goroutine run and block
-		return func() {
-			select {
-			case <-c:
-			case <-time.After(1 * time.Second):
-				t.Fatalf("timeout waiting for blocked read")
-			}
-		}
+		return func() { <-c }
 	}
 	readEOF := func(t *testing.T, b *ProfBuf) {
 		rdata, rtags, eof := b.Read(ProfBufBlocking)
diff --git a/src/runtime/race.go b/src/runtime/race.go
index e2767f0..ca4f051 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -172,14 +172,14 @@
 	pc := ctx.pc
 	fi := findfunc(pc)
 	if fi.valid() {
-		u, uf := newInlineUnwinder(fi, pc, nil)
+		u, uf := newInlineUnwinder(fi, pc)
 		for ; uf.valid(); uf = u.next(uf) {
 			sf := u.srcFunc(uf)
 			if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
 				// Ignore wrappers, unless we're at the outermost frame of u.
 				// A non-inlined wrapper frame always means we have a physical
 				// frame consisting entirely of wrappers, in which case we'll
-				// take a outermost wrapper over nothing.
+				// take an outermost wrapper over nothing.
 				continue
 			}
 
@@ -223,6 +223,7 @@
 
 func raceSymbolizeData(ctx *symbolizeDataContext) {
 	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
+		// TODO: Does this need to handle malloc headers?
 		ctx.heap = 1
 		ctx.start = base
 		ctx.size = span.elemsize
diff --git a/src/runtime/race/README b/src/runtime/race/README
index acd8b84..47c51ca 100644
--- a/src/runtime/race/README
+++ b/src/runtime/race/README
@@ -4,14 +4,14 @@
 
 To update the .syso files use golang.org/x/build/cmd/racebuild.
 
-race_darwin_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
-race_freebsd_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
-race_linux_ppc64le.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_netbsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_windows_amd64.syso built with LLVM b6374437af39af66896da74a1dc1b8a0ece26bee and Go 3e97294663d978bf8abb7acec7cc615ef2f1ea75.
-race_linux_arm64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_darwin_arm64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_openbsd_amd64.syso built with LLVM fcf6ae2f070eba73074b6ec8d8281e54d29dbeeb and Go 8f2db14cd35bbd674cb2988a508306de6655e425.
-race_linux_s390x.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-internal/amd64v3/race_linux.syso built with LLVM 74c2d4f6024c8f160871a2baa928d0b42415f183 and Go c0f27eb3d580c8b9efd73802678eba4c6c9461be.
-internal/amd64v1/race_linux.syso built with LLVM 74c2d4f6024c8f160871a2baa928d0b42415f183 and Go c0f27eb3d580c8b9efd73802678eba4c6c9461be.
+internal/amd64v1/race_darwin.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+internal/amd64v1/race_freebsd.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+internal/amd64v1/race_linux.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+internal/amd64v1/race_netbsd.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+internal/amd64v1/race_openbsd.syso built with LLVM fcf6ae2f070eba73074b6ec8d8281e54d29dbeeb and Go 8f2db14cd35bbd674cb2988a508306de6655e425.
+internal/amd64v1/race_windows.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+internal/amd64v3/race_linux.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+race_darwin_arm64.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+race_linux_arm64.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+race_linux_ppc64le.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+race_linux_s390x.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
diff --git a/src/runtime/race/internal/amd64v1/race_darwin.syso b/src/runtime/race/internal/amd64v1/race_darwin.syso
index e5d848c..e92f4ce 100644
--- a/src/runtime/race/internal/amd64v1/race_darwin.syso
+++ b/src/runtime/race/internal/amd64v1/race_darwin.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_freebsd.syso b/src/runtime/race/internal/amd64v1/race_freebsd.syso
index b3a4383..10edcba 100644
--- a/src/runtime/race/internal/amd64v1/race_freebsd.syso
+++ b/src/runtime/race/internal/amd64v1/race_freebsd.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_linux.syso b/src/runtime/race/internal/amd64v1/race_linux.syso
index 68f1508..f867990 100644
--- a/src/runtime/race/internal/amd64v1/race_linux.syso
+++ b/src/runtime/race/internal/amd64v1/race_linux.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_netbsd.syso b/src/runtime/race/internal/amd64v1/race_netbsd.syso
index e6cc4bf..c920820 100644
--- a/src/runtime/race/internal/amd64v1/race_netbsd.syso
+++ b/src/runtime/race/internal/amd64v1/race_netbsd.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_windows.syso b/src/runtime/race/internal/amd64v1/race_windows.syso
index 777bd83..82ac90b 100644
--- a/src/runtime/race/internal/amd64v1/race_windows.syso
+++ b/src/runtime/race/internal/amd64v1/race_windows.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v3/race_linux.syso b/src/runtime/race/internal/amd64v3/race_linux.syso
index 33c3e76b..8ca0f4d 100644
--- a/src/runtime/race/internal/amd64v3/race_linux.syso
+++ b/src/runtime/race/internal/amd64v3/race_linux.syso
Binary files differ
diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go
index 4c2c339..0ee0f41 100644
--- a/src/runtime/race/output_test.go
+++ b/src/runtime/race/output_test.go
@@ -208,8 +208,8 @@
 `, []string{`
 ==================
 --- FAIL: TestFail \([0-9.]+s\)
-.*main_test.go:14: true
 .*testing.go:.*: race detected during execution of test
+.*main_test.go:14: true
 FAIL`}},
 
 	{"slicebytetostring_pc", "run", "", "atexit_sleep_ms=0", `
@@ -439,7 +439,7 @@
   main\.main\(\)
       .*/main.go:[0-9]+ \+0x[0-9,a-f]+
 ==================`}},
-	// Test symbolizing wrappers. Both (*T).f and main.func1 are wrappers.
+	// Test symbolizing wrappers. Both (*T).f and main.gowrap1 are wrappers.
 	// go.dev/issue/60245
 	{"wrappersym", "run", "", "atexit_sleep_ms=0", `
 package main
@@ -465,7 +465,7 @@
       .*/main.go:15 \+0x[0-9,a-f]+
   main\.\(\*T\)\.f\(\)
       <autogenerated>:1 \+0x[0-9,a-f]+
-  main\.main\.func1\(\)
+  main\.main\.gowrap1\(\)
       .*/main.go:9 \+0x[0-9,a-f]+
 
 Previous write at 0x[0-9,a-f]+ by main goroutine:
diff --git a/src/runtime/race/race_darwin_amd64.go b/src/runtime/race/race_darwin_amd64.go
index fbb838a..02d73f8 100644
--- a/src/runtime/race/race_darwin_amd64.go
+++ b/src/runtime/race/race_darwin_amd64.go
@@ -28,6 +28,9 @@
 //go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
 //go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
 //go:cgo_import_dynamic _exit _exit ""
+//go:cgo_import_dynamic _sanitizer_internal_memcpy _sanitizer_internal_memcpy ""
+//go:cgo_import_dynamic _sanitizer_internal_memmove _sanitizer_internal_memmove ""
+//go:cgo_import_dynamic _sanitizer_internal_memset _sanitizer_internal_memset ""
 //go:cgo_import_dynamic abort abort ""
 //go:cgo_import_dynamic arc4random_buf arc4random_buf ""
 //go:cgo_import_dynamic close close ""
@@ -74,6 +77,7 @@
 //go:cgo_import_dynamic pthread_get_stackaddr_np pthread_get_stackaddr_np ""
 //go:cgo_import_dynamic pthread_get_stacksize_np pthread_get_stacksize_np ""
 //go:cgo_import_dynamic pthread_getspecific pthread_getspecific ""
+//go:cgo_import_dynamic pthread_introspection_hook_install pthread_introspection_hook_install ""
 //go:cgo_import_dynamic pthread_join pthread_join ""
 //go:cgo_import_dynamic pthread_self pthread_self ""
 //go:cgo_import_dynamic pthread_sigmask pthread_sigmask ""
@@ -99,3 +103,6 @@
 //go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
 //go:cgo_import_dynamic waitpid waitpid ""
 //go:cgo_import_dynamic write write ""
+//go:cgo_import_dynamic memcpy memcpy ""
+//go:cgo_import_dynamic memmove memmove ""
+//go:cgo_import_dynamic memset memset ""
diff --git a/src/runtime/race/race_darwin_arm64.go b/src/runtime/race/race_darwin_arm64.go
index fe8584c..cb703a6 100644
--- a/src/runtime/race/race_darwin_arm64.go
+++ b/src/runtime/race/race_darwin_arm64.go
@@ -8,6 +8,9 @@
 
 package race
 
+//go:cgo_import_dynamic _Block_object_assign _Block_object_assign ""
+//go:cgo_import_dynamic _Block_object_dispose _Block_object_dispose ""
+//go:cgo_import_dynamic _NSConcreteStackBlock _NSConcreteStackBlock ""
 //go:cgo_import_dynamic _NSGetArgv _NSGetArgv ""
 //go:cgo_import_dynamic _NSGetEnviron _NSGetEnviron ""
 //go:cgo_import_dynamic _NSGetExecutablePath _NSGetExecutablePath ""
@@ -20,8 +23,13 @@
 //go:cgo_import_dynamic _dyld_get_image_header _dyld_get_image_header ""
 //go:cgo_import_dynamic _dyld_get_image_name _dyld_get_image_name ""
 //go:cgo_import_dynamic _dyld_get_image_vmaddr_slide _dyld_get_image_vmaddr_slide ""
+//go:cgo_import_dynamic _dyld_get_shared_cache_range _dyld_get_shared_cache_range ""
+//go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
 //go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
 //go:cgo_import_dynamic _exit _exit ""
+//go:cgo_import_dynamic _sanitizer_internal_memcpy _sanitizer_internal_memcpy ""
+//go:cgo_import_dynamic _sanitizer_internal_memmove _sanitizer_internal_memmove ""
+//go:cgo_import_dynamic _sanitizer_internal_memset _sanitizer_internal_memset ""
 //go:cgo_import_dynamic abort abort ""
 //go:cgo_import_dynamic arc4random_buf arc4random_buf ""
 //go:cgo_import_dynamic bzero bzero ""
@@ -29,6 +37,7 @@
 //go:cgo_import_dynamic dlsym dlsym ""
 //go:cgo_import_dynamic dup dup ""
 //go:cgo_import_dynamic dup2 dup2 ""
+//go:cgo_import_dynamic dyld_shared_cache_iterate_text dyld_shared_cache_iterate_text ""
 //go:cgo_import_dynamic execve execve ""
 //go:cgo_import_dynamic exit exit ""
 //go:cgo_import_dynamic fstat fstat ""
@@ -68,6 +77,7 @@
 //go:cgo_import_dynamic pthread_get_stackaddr_np pthread_get_stackaddr_np ""
 //go:cgo_import_dynamic pthread_get_stacksize_np pthread_get_stacksize_np ""
 //go:cgo_import_dynamic pthread_getspecific pthread_getspecific ""
+//go:cgo_import_dynamic pthread_introspection_hook_install pthread_introspection_hook_install ""
 //go:cgo_import_dynamic pthread_join pthread_join ""
 //go:cgo_import_dynamic pthread_self pthread_self ""
 //go:cgo_import_dynamic pthread_sigmask pthread_sigmask ""
@@ -93,3 +103,6 @@
 //go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
 //go:cgo_import_dynamic waitpid waitpid ""
 //go:cgo_import_dynamic write write ""
+//go:cgo_import_dynamic memcpy memcpy ""
+//go:cgo_import_dynamic memmove memmove ""
+//go:cgo_import_dynamic memset memset ""
diff --git a/src/runtime/race/race_darwin_arm64.syso b/src/runtime/race/race_darwin_arm64.syso
index 4a23df2..8d8c120 100644
--- a/src/runtime/race/race_darwin_arm64.syso
+++ b/src/runtime/race/race_darwin_arm64.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_arm64.syso b/src/runtime/race/race_linux_arm64.syso
index c8b3f48..7e9fc5a 100644
--- a/src/runtime/race/race_linux_arm64.syso
+++ b/src/runtime/race/race_linux_arm64.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_ppc64le.syso b/src/runtime/race/race_linux_ppc64le.syso
index 1939f29..49824a9 100644
--- a/src/runtime/race/race_linux_ppc64le.syso
+++ b/src/runtime/race/race_linux_ppc64le.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_s390x.syso b/src/runtime/race/race_linux_s390x.syso
index ed4a300..35a0f39 100644
--- a/src/runtime/race/race_linux_s390x.syso
+++ b/src/runtime/race/race_linux_s390x.syso
Binary files differ
diff --git a/src/runtime/race/testdata/chan_test.go b/src/runtime/race/testdata/chan_test.go
index e39ad4f..d592de9 100644
--- a/src/runtime/race/testdata/chan_test.go
+++ b/src/runtime/race/testdata/chan_test.go
@@ -766,7 +766,7 @@
 
 // Test that we call the proper race detector function when c.elemsize==0.
 // See https://github.com/golang/go/issues/42598
-func TestNoRaceElemetSize0(t *testing.T) {
+func TestNoRaceElemSize0(t *testing.T) {
 	var x, y int
 	var c = make(chan struct{}, 2)
 	c <- struct{}{}
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 4fa130e..45c1255 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -24,7 +24,7 @@
 // Arguments are passed in CX, DX, R8, R9, the rest is on stack.
 // Callee-saved registers are: BX, BP, DI, SI, R12-R15.
 // SP must be 16-byte aligned. Windows also requires "stack-backing" for the 4 register arguments:
-// https://msdn.microsoft.com/en-us/library/ms235286.aspx
+// https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention
 // We do not do this, because it seems to be intended for vararg/unprototyped functions.
 // Gcc-compiled race runtime does not try to use that space.
 
diff --git a/src/runtime/race_s390x.s b/src/runtime/race_s390x.s
index beb7f83..dadc12f 100644
--- a/src/runtime/race_s390x.s
+++ b/src/runtime/race_s390x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build race
-// +build race
 
 #include "go_asm.h"
 #include "funcdata.h"
diff --git a/src/runtime/rand.go b/src/runtime/rand.go
new file mode 100644
index 0000000..10cd116
--- /dev/null
+++ b/src/runtime/rand.go
@@ -0,0 +1,247 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Random number generation
+
+package runtime
+
+import (
+	"internal/chacha8rand"
+	"internal/goarch"
+	"runtime/internal/math"
+	"unsafe"
+	_ "unsafe" // for go:linkname
+)
+
+// OS-specific startup can set startupRand if the OS passes
+// random data to the process at startup time.
+// For example Linux passes 16 bytes in the auxv vector.
+var startupRand []byte
+
+// globalRand holds the global random state.
+// It is only used at startup and for creating new m's.
+// Otherwise the per-m random state should be used
+// by calling goodrand.
+var globalRand struct {
+	lock  mutex
+	seed  [32]byte
+	state chacha8rand.State
+	init  bool
+}
+
+var readRandomFailed bool
+
+// randinit initializes the global random state.
+// It must be called before any use of grand.
+func randinit() {
+	lock(&globalRand.lock)
+	if globalRand.init {
+		fatal("randinit twice")
+	}
+
+	seed := &globalRand.seed
+	if startupRand != nil {
+		for i, c := range startupRand {
+			seed[i%len(seed)] ^= c
+		}
+		clear(startupRand)
+		startupRand = nil
+	} else {
+		if readRandom(seed[:]) != len(seed) {
+			// readRandom should never fail, but if it does we'd rather
+			// not make Go binaries completely unusable, so make up
+			// some random data based on the current time.
+			readRandomFailed = true
+			readTimeRandom(seed[:])
+		}
+	}
+	globalRand.state.Init(*seed)
+	clear(seed[:])
+	globalRand.init = true
+	unlock(&globalRand.lock)
+}
+
+// readTimeRandom stretches any entropy in the current time
+// into entropy the length of r and XORs it into r.
+// This is a fallback for when readRandom does not read
+// the full requested amount.
+// Whatever entropy r already contained is preserved.
+func readTimeRandom(r []byte) {
+	// Inspired by wyrand.
+	// An earlier version of this code used getg().m.procid as well,
+	// but note that this is called so early in startup that procid
+	// is not initialized yet.
+	v := uint64(nanotime())
+	for len(r) > 0 {
+		v ^= 0xa0761d6478bd642f
+		v *= 0xe7037ed1a0b428db
+		size := 8
+		if len(r) < 8 {
+			size = len(r)
+		}
+		for i := 0; i < size; i++ {
+			r[i] ^= byte(v >> (8 * i))
+		}
+		r = r[size:]
+		v = v>>32 | v<<32
+	}
+}
+
+// bootstrapRand returns a random uint64 from the global random generator.
+func bootstrapRand() uint64 {
+	lock(&globalRand.lock)
+	if !globalRand.init {
+		fatal("randinit missed")
+	}
+	for {
+		if x, ok := globalRand.state.Next(); ok {
+			unlock(&globalRand.lock)
+			return x
+		}
+		globalRand.state.Refill()
+	}
+}
+
+// bootstrapRandReseed reseeds the bootstrap random number generator,
+// clearing from memory any trace of previously returned random numbers.
+func bootstrapRandReseed() {
+	lock(&globalRand.lock)
+	if !globalRand.init {
+		fatal("randinit missed")
+	}
+	globalRand.state.Reseed()
+	unlock(&globalRand.lock)
+}
+
+// rand32 is uint32(rand()), called from compiler-generated code.
+//go:nosplit
+func rand32() uint32 {
+	return uint32(rand())
+}
+
+// rand returns a random uint64 from the per-m chacha8 state.
+// Do not change signature: used via linkname from other packages.
+//go:nosplit
+//go:linkname rand
+func rand() uint64 {
+	// Note: We avoid acquirem here so that in the fast path
+	// there is just a getg, an inlined c.Next, and a return.
+	// The performance difference on a 16-core AMD is
+	// 3.7ns/call this way versus 4.3ns/call with acquirem (+16%).
+	mp := getg().m
+	c := &mp.chacha8
+	for {
+		// Note: c.Next is marked nosplit,
+		// so we don't need to use mp.locks
+		// on the fast path, which is that the
+		// first attempt succeeds.
+		x, ok := c.Next()
+		if ok {
+			return x
+		}
+		mp.locks++ // hold m even though c.Refill may do stack split checks
+		c.Refill()
+		mp.locks--
+	}
+}
+
+// mrandinit initializes the random state of an m.
+func mrandinit(mp *m) {
+	var seed [4]uint64
+	for i := range seed {
+		seed[i] = bootstrapRand()
+	}
+	bootstrapRandReseed() // erase key we just extracted
+	mp.chacha8.Init64(seed)
+	mp.cheaprand = rand()
+}
+
+// randn is like rand() % n but faster.
+// Do not change signature: used via linkname from other packages.
+//go:nosplit
+//go:linkname randn
+func randn(n uint32) uint32 {
+	// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+	return uint32((uint64(uint32(rand())) * uint64(n)) >> 32)
+}
+
+// cheaprand is a non-cryptographic-quality 32-bit random generator
+// suitable for calling at very high frequency (such as during scheduling decisions)
+// and at sensitive moments in the runtime (such as during stack unwinding).
+// it is "cheap" in the sense of both expense and quality.
+//
+// cheaprand must not be exported to other packages:
+// the rule is that other packages using runtime-provided
+// randomness must always use rand.
+//go:nosplit
+func cheaprand() uint32 {
+	mp := getg().m
+	// Implement wyrand: https://github.com/wangyi-fudan/wyhash
+	// Only the platform that math.Mul64 can be lowered
+	// by the compiler should be in this list.
+	if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
+		goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
+		goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
+		mp.cheaprand += 0xa0761d6478bd642f
+		hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db)
+		return uint32(hi ^ lo)
+	}
+
+	// Implement xorshift64+: 2 32-bit xorshift sequences added together.
+	// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
+	// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+	// This generator passes the SmallCrush suite, part of TestU01 framework:
+	// http://simul.iro.umontreal.ca/testu01/tu01.html
+	t := (*[2]uint32)(unsafe.Pointer(&mp.cheaprand))
+	s1, s0 := t[0], t[1]
+	s1 ^= s1 << 17
+	s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
+	t[0], t[1] = s0, s1
+	return s0 + s1
+}
+
+// cheaprand64 is a non-cryptographic-quality 63-bit random generator
+// suitable for calling at very high frequency (such as during sampling decisions).
+// it is "cheap" in the sense of both expense and quality.
+//
+// cheaprand64 must not be exported to other packages:
+// the rule is that other packages using runtime-provided
+// randomness must always use rand.
+//go:nosplit
+func cheaprand64() int64 {
+	return int64(cheaprand())<<31 ^ int64(cheaprand())
+}
+
+// cheaprandn is like cheaprand() % n but faster.
+//
+// cheaprandn must not be exported to other packages:
+// the rule is that other packages using runtime-provided
+// randomness must always use randn.
+//go:nosplit
+func cheaprandn(n uint32) uint32 {
+	// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+	return uint32((uint64(cheaprand()) * uint64(n)) >> 32)
+}
+
+// Too much legacy code has go:linkname references
+// to runtime.fastrand and friends, so keep these around for now.
+// Code should migrate to math/rand/v2.Uint64,
+// which is just as fast, but that's only available in Go 1.22+.
+// It would be reasonable to remove these in Go 1.24.
+// Do not call these from package runtime.
+
+//go:linkname legacy_fastrand runtime.fastrand
+func legacy_fastrand() uint32 {
+	return uint32(rand())
+}
+
+//go:linkname legacy_fastrandn runtime.fastrandn
+func legacy_fastrandn(n uint32) uint32 {
+	return randn(n)
+}
+
+//go:linkname legacy_fastrand64 runtime.fastrand64
+func legacy_fastrand64() uint64 {
+	return rand()
+}
diff --git a/src/runtime/rand_test.go b/src/runtime/rand_test.go
index 92d07eb..baecb69 100644
--- a/src/runtime/rand_test.go
+++ b/src/runtime/rand_test.go
@@ -8,8 +8,20 @@
 	. "runtime"
 	"strconv"
 	"testing"
+	_ "unsafe" // for go:linkname
 )
 
+func TestReadRandom(t *testing.T) {
+	if *ReadRandomFailed {
+		switch GOOS {
+		default:
+			t.Fatalf("readRandom failed at startup")
+		case "plan9":
+			// ok
+		}
+	}
+}
+
 func BenchmarkFastrand(b *testing.B) {
 	b.RunParallel(func(pb *testing.PB) {
 		for pb.Next() {
@@ -51,3 +63,35 @@
 		})
 	}
 }
+
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
+
+//go:linkname fastrandn runtime.fastrandn
+func fastrandn(uint32) uint32
+
+//go:linkname fastrand64 runtime.fastrand64
+func fastrand64() uint64
+
+func TestLegacyFastrand(t *testing.T) {
+	// Testing mainly that the calls work at all,
+	// but check that all three don't return the same number (1 in 2^64 chance)
+	{
+		x, y, z := fastrand(), fastrand(), fastrand()
+		if x == y && y == z {
+			t.Fatalf("fastrand three times = %#x, %#x, %#x, want different numbers", x, y, z)
+		}
+	}
+	{
+		x, y, z := fastrandn(1e9), fastrandn(1e9), fastrandn(1e9)
+		if x == y && y == z {
+			t.Fatalf("fastrandn three times = %#x, %#x, %#x, want different numbers", x, y, z)
+		}
+	}
+	{
+		x, y, z := fastrand64(), fastrand64(), fastrand64()
+		if x == y && y == z {
+			t.Fatalf("fastrand64 three times = %#x, %#x, %#x, want different numbers", x, y, z)
+		}
+	}
+}
diff --git a/src/runtime/rt0_openbsd_ppc64.s b/src/runtime/rt0_openbsd_ppc64.s
new file mode 100644
index 0000000..9fcad4f
--- /dev/null
+++ b/src/runtime/rt0_openbsd_ppc64.s
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT _rt0_ppc64_openbsd(SB),NOSPLIT,$0
+	BR	main(SB)
+
+TEXT main(SB),NOSPLIT,$-8
+	// Make sure R0 is zero before _main
+	XOR	R0, R0
+
+	MOVD	$runtime·rt0_go(SB), R12
+	MOVD	R12, CTR
+	BR	(CTR)
diff --git a/src/runtime/rt0_openbsd_riscv64.s b/src/runtime/rt0_openbsd_riscv64.s
new file mode 100644
index 0000000..e57423e
--- /dev/null
+++ b/src/runtime/rt0_openbsd_riscv64.s
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT _rt0_riscv64_openbsd(SB),NOSPLIT|NOFRAME,$0
+	MOV	0(X2), A0	// argc
+	ADD	$8, X2, A1	// argv
+	JMP	main(SB)
+
+TEXT main(SB),NOSPLIT|NOFRAME,$0
+	MOV	$runtime·rt0_go(SB), T0
+	JALR	ZERO, T0
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index 8c759bf..1ae6ff0 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -85,8 +85,9 @@
 	if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" {
 		t.Skip("skipping gdb python tests on illumos and solaris; see golang.org/issue/20821")
 	}
-
-	cmd := exec.Command("gdb", "-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')")
+	args := []string{"-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')"}
+	gdbArgsFixup(args)
+	cmd := exec.Command("gdb", args...)
 	out, err := cmd.CombinedOutput()
 
 	if err != nil {
@@ -156,6 +157,25 @@
 	return 0
 }
 
+func gdbArgsFixup(args []string) {
+	if runtime.GOOS != "windows" {
+		return
+	}
+	// On Windows, some gdb flavors expect -ex and -iex arguments
+	// containing spaces to be double quoted.
+	var quote bool
+	for i, arg := range args {
+		if arg == "-iex" || arg == "-ex" {
+			quote = true
+		} else if quote {
+			if strings.ContainsRune(arg, ' ') {
+				args[i] = `"` + arg + `"`
+			}
+			quote = false
+		}
+	}
+}
+
 func TestGdbPython(t *testing.T) {
 	testGdbPython(t, false)
 }
@@ -269,12 +289,14 @@
 		"-ex", "echo END\n",
 		filepath.Join(dir, "a.exe"),
 	)
+	gdbArgsFixup(args)
 	got, err := exec.Command("gdb", args...).CombinedOutput()
 	t.Logf("gdb output:\n%s", got)
 	if err != nil {
 		t.Fatalf("gdb exited with error: %v", err)
 	}
 
+	got = bytes.ReplaceAll(got, []byte("\r\n"), []byte("\n")) // normalize line endings
 	firstLine, _, _ := bytes.Cut(got, []byte("\n"))
 	if string(firstLine) != "Loading Go Runtime support." {
 		// This can happen when using all.bash with
@@ -442,6 +464,7 @@
 		"-ex", "continue",
 		filepath.Join(dir, "a.exe"),
 	}
+	gdbArgsFixup(args)
 	cmd = testenv.Command(t, "gdb", args...)
 
 	// Work around the GDB hang reported in https://go.dev/issue/37405.
@@ -562,6 +585,7 @@
 		"-ex", "info types astruct",
 		filepath.Join(dir, "a.exe"),
 	}
+	gdbArgsFixup(args)
 	got, err := exec.Command("gdb", args...).CombinedOutput()
 	t.Logf("gdb output:\n%s", got)
 	if err != nil {
@@ -630,6 +654,7 @@
 		"-ex", "print 'runtime._PageSize'",
 		filepath.Join(dir, "a.exe"),
 	}
+	gdbArgsFixup(args)
 	got, err := exec.Command("gdb", args...).CombinedOutput()
 	t.Logf("gdb output:\n%s", got)
 	if err != nil {
@@ -692,6 +717,7 @@
 		"-ex", "backtrace",
 		filepath.Join(dir, "a.exe"),
 	}
+	gdbArgsFixup(args)
 	got, err := exec.Command("gdb", args...).CombinedOutput()
 	t.Logf("gdb output:\n%s", got)
 	if err != nil {
@@ -770,6 +796,7 @@
 		"-ex", "continue",
 		filepath.Join(dir, "a.exe"),
 	}
+	gdbArgsFixup(args)
 	got, err := exec.Command("gdb", args...).CombinedOutput()
 	t.Logf("gdb output:\n%s", got)
 	if err != nil {
diff --git a/src/runtime/runtime-gdb_unix_test.go b/src/runtime/runtime-gdb_unix_test.go
index f9cc648..8b602d1 100644
--- a/src/runtime/runtime-gdb_unix_test.go
+++ b/src/runtime/runtime-gdb_unix_test.go
@@ -8,6 +8,7 @@
 
 import (
 	"bytes"
+	"fmt"
 	"internal/testenv"
 	"io"
 	"os"
@@ -19,6 +20,43 @@
 	"testing"
 )
 
+func canGenerateCore(t *testing.T) bool {
+	// Ensure there is enough RLIMIT_CORE available to generate a full core.
+	var lim syscall.Rlimit
+	err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
+	if err != nil {
+		t.Fatalf("error getting rlimit: %v", err)
+	}
+	// Minimum RLIMIT_CORE max to allow. This is a conservative estimate.
+	// Most systems allow infinity.
+	const minRlimitCore = 100 << 20 // 100 MB
+	if lim.Max < minRlimitCore {
+		t.Skipf("RLIMIT_CORE max too low: %#+v", lim)
+	}
+
+	// Make sure core pattern will send core to the current directory.
+	b, err := os.ReadFile("/proc/sys/kernel/core_pattern")
+	if err != nil {
+		t.Fatalf("error reading core_pattern: %v", err)
+	}
+	if string(b) != "core\n" {
+		t.Skipf("Unexpected core pattern %q", string(b))
+	}
+
+	coreUsesPID := false
+	b, err = os.ReadFile("/proc/sys/kernel/core_uses_pid")
+	if err == nil {
+		switch string(bytes.TrimSpace(b)) {
+		case "0":
+		case "1":
+			coreUsesPID = true
+		default:
+			t.Skipf("unexpected core_uses_pid value %q", string(b))
+		}
+	}
+	return coreUsesPID
+}
+
 const coreSignalSource = `
 package main
 
@@ -80,33 +118,12 @@
 	t.Parallel()
 	checkGdbVersion(t)
 
-	// Ensure there is enough RLIMIT_CORE available to generate a full core.
-	var lim syscall.Rlimit
-	err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
-	if err != nil {
-		t.Fatalf("error getting rlimit: %v", err)
-	}
-	// Minimum RLIMIT_CORE max to allow. This is a conservative estimate.
-	// Most systems allow infinity.
-	const minRlimitCore = 100 << 20 // 100 MB
-	if lim.Max < minRlimitCore {
-		t.Skipf("RLIMIT_CORE max too low: %#+v", lim)
-	}
-
-	// Make sure core pattern will send core to the current directory.
-	b, err := os.ReadFile("/proc/sys/kernel/core_pattern")
-	if err != nil {
-		t.Fatalf("error reading core_pattern: %v", err)
-	}
-	if string(b) != "core\n" {
-		t.Skipf("Unexpected core pattern %q", string(b))
-	}
-
-	dir := t.TempDir()
+	coreUsesPID := canGenerateCore(t)
 
 	// Build the source code.
+	dir := t.TempDir()
 	src := filepath.Join(dir, "main.go")
-	err = os.WriteFile(src, []byte(coreSignalSource), 0644)
+	err := os.WriteFile(src, []byte(coreSignalSource), 0644)
 	if err != nil {
 		t.Fatalf("failed to create file: %v", err)
 	}
@@ -136,6 +153,8 @@
 	}
 	w.Close()
 
+	pid := cmd.Process.Pid
+
 	// Wait for child to be ready.
 	var buf [1]byte
 	if _, err := r.Read(buf[:]); err != io.EOF {
@@ -167,12 +186,17 @@
 		t.Fatalf("CoreDump got %v want true", ws.CoreDump())
 	}
 
+	coreFile := "core"
+	if coreUsesPID {
+		coreFile += fmt.Sprintf(".%d", pid)
+	}
+
 	// Execute gdb commands.
 	args := []string{"-nx", "-batch",
 		"-iex", "add-auto-load-safe-path " + filepath.Join(testenv.GOROOT(t), "src", "runtime"),
 		"-ex", "backtrace",
 		filepath.Join(dir, "a.exe"),
-		filepath.Join(dir, "core"),
+		filepath.Join(dir, coreFile),
 	}
 	cmd = testenv.Command(t, "gdb", args...)
 
@@ -210,3 +234,146 @@
 		t.Fatalf("could not find runtime symbol in backtrace after signal handler:\n%s", rest)
 	}
 }
+
+const coreCrashThreadSource = `
+package main
+
+/*
+#cgo CFLAGS: -g -O0
+#include <stdio.h>
+#include <stddef.h>
+void trigger_crash()
+{
+	int* ptr = NULL;
+	*ptr = 1024;
+}
+*/
+import "C"
+import (
+	"flag"
+	"fmt"
+	"os"
+	"runtime/debug"
+	"syscall"
+)
+
+func enableCore() {
+	debug.SetTraceback("crash")
+
+	var lim syscall.Rlimit
+	err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
+	if err != nil {
+		panic(fmt.Sprintf("error getting rlimit: %v", err))
+	}
+	lim.Cur = lim.Max
+	fmt.Fprintf(os.Stderr, "Setting RLIMIT_CORE = %+#v\n", lim)
+	err = syscall.Setrlimit(syscall.RLIMIT_CORE, &lim)
+	if err != nil {
+		panic(fmt.Sprintf("error setting rlimit: %v", err))
+	}
+}
+
+func main() {
+	flag.Parse()
+
+	enableCore()
+
+	C.trigger_crash()
+}
+`
+
+// TestGdbCoreCrashThreadBacktrace tests that runtime could let the fault thread to crash process
+// and make fault thread as number one thread while gdb in a core file
+func TestGdbCoreCrashThreadBacktrace(t *testing.T) {
+	if runtime.GOOS != "linux" {
+		// N.B. This test isn't fundamentally Linux-only, but it needs
+		// to know how to enable/find core files on each OS.
+		t.Skip("Test only supported on Linux")
+	}
+	if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" {
+		// TODO(go.dev/issue/25218): Other architectures use sigreturn
+		// via VDSO, which we somehow don't handle correctly.
+		t.Skip("Backtrace through signal handler only works on 386 and amd64")
+	}
+
+	testenv.SkipFlaky(t, 65138)
+
+	testenv.MustHaveCGO(t)
+	checkGdbEnvironment(t)
+	t.Parallel()
+	checkGdbVersion(t)
+
+	coreUsesPID := canGenerateCore(t)
+
+	// Build the source code.
+	dir := t.TempDir()
+	src := filepath.Join(dir, "main.go")
+	err := os.WriteFile(src, []byte(coreCrashThreadSource), 0644)
+	if err != nil {
+		t.Fatalf("failed to create file: %v", err)
+	}
+	cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe", "main.go")
+	cmd.Dir = dir
+	out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
+	if err != nil {
+		t.Fatalf("building source %v\n%s", err, out)
+	}
+
+	// Start the test binary.
+	cmd = testenv.Command(t, "./a.exe")
+	cmd.Dir = dir
+	var output bytes.Buffer
+	cmd.Stdout = &output // for test logging
+	cmd.Stderr = &output
+
+	if err := cmd.Start(); err != nil {
+		t.Fatalf("error starting test binary: %v", err)
+	}
+
+	pid := cmd.Process.Pid
+
+	err = cmd.Wait()
+	t.Logf("child output:\n%s", output.String())
+	if err == nil {
+		t.Fatalf("Wait succeeded, want SIGABRT")
+	}
+	ee, ok := err.(*exec.ExitError)
+	if !ok {
+		t.Fatalf("Wait err got %T %v, want exec.ExitError", ee, ee)
+	}
+	ws, ok := ee.Sys().(syscall.WaitStatus)
+	if !ok {
+		t.Fatalf("Sys got %T %v, want syscall.WaitStatus", ee.Sys(), ee.Sys())
+	}
+	if ws.Signal() != syscall.SIGABRT {
+		t.Fatalf("Signal got %d want SIGABRT", ws.Signal())
+	}
+	if !ws.CoreDump() {
+		t.Fatalf("CoreDump got %v want true", ws.CoreDump())
+	}
+
+	coreFile := "core"
+	if coreUsesPID {
+		coreFile += fmt.Sprintf(".%d", pid)
+	}
+
+	// Execute gdb commands.
+	args := []string{"-nx", "-batch",
+		"-iex", "add-auto-load-safe-path " + filepath.Join(testenv.GOROOT(t), "src", "runtime"),
+		"-ex", "backtrace",
+		filepath.Join(dir, "a.exe"),
+		filepath.Join(dir, coreFile),
+	}
+	cmd = testenv.Command(t, "gdb", args...)
+
+	got, err := cmd.CombinedOutput()
+	t.Logf("gdb output:\n%s", got)
+	if err != nil {
+		t.Fatalf("gdb exited with error: %v", err)
+	}
+
+	re := regexp.MustCompile(`#.* trigger_crash`)
+	if found := re.Find(got) != nil; !found {
+		t.Fatalf("could not find trigger_crash in backtrace")
+	}
+}
diff --git a/src/runtime/runtime-seh_windows_test.go b/src/runtime/runtime-seh_windows_test.go
index 27e4f49..4250953 100644
--- a/src/runtime/runtime-seh_windows_test.go
+++ b/src/runtime/runtime-seh_windows_test.go
@@ -112,7 +112,7 @@
 		}
 		name := fn.Name()
 		switch name {
-		case "runtime.deferCallSave", "runtime.runOpenDeferFrame", "runtime.panicmem":
+		case "runtime.panicmem":
 			// These functions are skipped as they appear inconsistently depending
 			// whether inlining is on or off.
 			continue
diff --git a/src/runtime/runtime.go b/src/runtime/runtime.go
index 0822d0e..c70a76e 100644
--- a/src/runtime/runtime.go
+++ b/src/runtime/runtime.go
@@ -17,34 +17,100 @@
 var ticks ticksType
 
 type ticksType struct {
-	lock mutex
-	val  atomic.Int64
+	// lock protects access to start* and val.
+	lock       mutex
+	startTicks int64
+	startTime  int64
+	val        atomic.Int64
 }
 
-// Note: Called by runtime/pprof in addition to runtime code.
-func tickspersecond() int64 {
+// init initializes ticks to maximize the chance that we have a good ticksPerSecond reference.
+//
+// Must not run concurrently with ticksPerSecond.
+func (t *ticksType) init() {
+	lock(&ticks.lock)
+	t.startTime = nanotime()
+	t.startTicks = cputicks()
+	unlock(&ticks.lock)
+}
+
+// minTimeForTicksPerSecond is the minimum elapsed time we require to consider our ticksPerSecond
+// measurement to be of decent enough quality for profiling.
+//
+// There's a linear relationship here between minimum time and error from the true value.
+// The error from the true ticks-per-second in a linux/amd64 VM seems to be:
+// -   1 ms -> ~0.02% error
+// -   5 ms -> ~0.004% error
+// -  10 ms -> ~0.002% error
+// -  50 ms -> ~0.0003% error
+// - 100 ms -> ~0.0001% error
+//
+// We're willing to take 0.004% error here, because ticksPerSecond is intended to be used for
+// converting durations, not timestamps. Durations are usually going to be much larger, and so
+// the tiny error doesn't matter. The error is definitely going to be a problem when trying to
+// use this for timestamps, as it'll make those timestamps much less likely to line up.
+const minTimeForTicksPerSecond = 5_000_000*(1-osHasLowResClockInt) + 100_000_000*osHasLowResClockInt
+
+// ticksPerSecond returns a conversion rate between the cputicks clock and the nanotime clock.
+//
+// Note: Clocks are hard. Using this as an actual conversion rate for timestamps is ill-advised
+// and should be avoided when possible. Use only for durations, where a tiny error term isn't going
+// to make a meaningful difference in even a 1ms duration. If an accurate timestamp is needed,
+// use nanotime instead. (The entire Windows platform is a broad exception to this rule, where nanotime
+// produces timestamps on such a coarse granularity that the error from this conversion is actually
+// preferable.)
+//
+// The strategy for computing the conversion rate is to write down nanotime and cputicks as
+// early in process startup as possible. From then, we just need to wait until we get values
+// from nanotime that we can use (some platforms have a really coarse system time granularity).
+// We require some amount of time to pass to ensure that the conversion rate is fairly accurate
+// in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent
+// amount of time has passed by the time we get here.
+//
+// Must be called from a normal goroutine context (running regular goroutine with a P).
+//
+// Called by runtime/pprof in addition to runtime code.
+//
+// TODO(mknyszek): This doesn't account for things like CPU frequency scaling. Consider
+// a more sophisticated and general approach in the future.
+func ticksPerSecond() int64 {
+	// Get the conversion rate if we've already computed it.
 	r := ticks.val.Load()
 	if r != 0 {
 		return r
 	}
-	lock(&ticks.lock)
-	r = ticks.val.Load()
-	if r == 0 {
-		t0 := nanotime()
-		c0 := cputicks()
-		usleep(100 * 1000)
-		t1 := nanotime()
-		c1 := cputicks()
-		if t1 == t0 {
-			t1++
+
+	// Compute the conversion rate.
+	for {
+		lock(&ticks.lock)
+		r = ticks.val.Load()
+		if r != 0 {
+			unlock(&ticks.lock)
+			return r
 		}
-		r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0)
-		if r == 0 {
-			r++
+
+		// Grab the current time in both clocks.
+		nowTime := nanotime()
+		nowTicks := cputicks()
+
+		// See if we can use these times.
+		if nowTicks > ticks.startTicks && nowTime-ticks.startTime > minTimeForTicksPerSecond {
+			// Perform the calculation with floats. We don't want to risk overflow.
+			r = int64(float64(nowTicks-ticks.startTicks) * 1e9 / float64(nowTime-ticks.startTime))
+			if r == 0 {
+				// Zero is both a sentinel value and it would be bad if callers used this as
+				// a divisor. We tried out best, so just make it 1.
+				r++
+			}
+			ticks.val.Store(r)
+			unlock(&ticks.lock)
+			break
 		}
-		ticks.val.Store(r)
+		unlock(&ticks.lock)
+
+		// Sleep in one millisecond increments until we have a reliable time.
+		timeSleep(1_000_000)
 	}
-	unlock(&ticks.lock)
 	return r
 }
 
@@ -101,12 +167,17 @@
 		if newInc == nil {
 			return
 		}
-		// If other goroutines are racing here, no big deal. One will win,
-		// and all the inc functions will be using the same underlying
-		// *godebug.Setting.
 		inc = new(func())
 		*inc = (*newInc)(g.name)
-		g.inc.Store(inc)
+		if raceenabled {
+			racereleasemerge(unsafe.Pointer(&g.inc))
+		}
+		if !g.inc.CompareAndSwap(nil, inc) {
+			inc = g.inc.Load()
+		}
+	}
+	if raceenabled {
+		raceacquire(unsafe.Pointer(&g.inc))
 	}
 	(*inc)()
 }
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 92a7e02..afe1bdd 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -307,25 +307,28 @@
 // existing int var for that value, which may
 // already have an initial value.
 var debug struct {
-	cgocheck           int32
-	clobberfree        int32
-	dontfreezetheworld int32
-	efence             int32
-	gccheckmark        int32
-	gcpacertrace       int32
-	gcshrinkstackoff   int32
-	gcstoptheworld     int32
-	gctrace            int32
-	invalidptr         int32
-	madvdontneed       int32 // for Linux; issue 28466
-	scavtrace          int32
-	scheddetail        int32
-	schedtrace         int32
-	tracebackancestors int32
-	asyncpreemptoff    int32
-	harddecommit       int32
-	adaptivestackstart int32
-	tracefpunwindoff   int32
+	cgocheck                int32
+	clobberfree             int32
+	disablethp              int32
+	dontfreezetheworld      int32
+	efence                  int32
+	gccheckmark             int32
+	gcpacertrace            int32
+	gcshrinkstackoff        int32
+	gcstoptheworld          int32
+	gctrace                 int32
+	invalidptr              int32
+	madvdontneed            int32 // for Linux; issue 28466
+	runtimeContentionStacks atomic.Int32
+	scavtrace               int32
+	scheddetail             int32
+	schedtrace              int32
+	tracebackancestors      int32
+	asyncpreemptoff         int32
+	harddecommit            int32
+	adaptivestackstart      int32
+	tracefpunwindoff        int32
+	traceadvanceperiod      int32
 
 	// debug.malloc is used as a combined debug check
 	// in the malloc function and should be set
@@ -342,6 +345,7 @@
 	{name: "allocfreetrace", value: &debug.allocfreetrace},
 	{name: "clobberfree", value: &debug.clobberfree},
 	{name: "cgocheck", value: &debug.cgocheck},
+	{name: "disablethp", value: &debug.disablethp},
 	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
 	{name: "efence", value: &debug.efence},
 	{name: "gccheckmark", value: &debug.gccheckmark},
@@ -351,6 +355,7 @@
 	{name: "gctrace", value: &debug.gctrace},
 	{name: "invalidptr", value: &debug.invalidptr},
 	{name: "madvdontneed", value: &debug.madvdontneed},
+	{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
 	{name: "sbrk", value: &debug.sbrk},
 	{name: "scavtrace", value: &debug.scavtrace},
 	{name: "scheddetail", value: &debug.scheddetail},
@@ -362,6 +367,7 @@
 	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
 	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
 	{name: "panicnil", atomic: &debug.panicnil},
+	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
 }
 
 func parsedebugvars() {
@@ -380,6 +386,7 @@
 		// Hence, default to MADV_DONTNEED.
 		debug.madvdontneed = 1
 	}
+	debug.traceadvanceperiod = defaultTraceAdvancePeriod
 
 	godebug := gogetenv("GODEBUG")
 
@@ -616,7 +623,6 @@
 //go:linkname reflect_resolveTextOff reflect.resolveTextOff
 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
 	return toRType((*_type)(rtype)).textOff(textOff(off))
-
 }
 
 // reflectlite_resolveNameOff resolves a name offset from a base pointer.
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index f4c76ab..63320d4 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -6,6 +6,7 @@
 
 import (
 	"internal/abi"
+	"internal/chacha8rand"
 	"internal/goarch"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
@@ -342,7 +343,7 @@
 	bp   uintptr // for framepointer-enabled architectures
 }
 
-// sudog represents a g in a wait list, such as for sending/receiving
+// sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving
 // on a channel.
 //
 // sudog is necessary because the g ↔ synchronization object relation
@@ -382,6 +383,13 @@
 	// because c was closed.
 	success bool
 
+	// waiters is a count of semaRoot waiting list other than head of list,
+	// clamped to a uint16 to fit in unused space.
+	// Only meaningful at the head of the list.
+	// (If we wanted to be overly clever, we could store a high 16 bits
+	// in the second entry in the list.)
+	waiters uint16
+
 	parent   *sudog // semaRoot binary tree
 	waitlink *sudog // g.waiting list or semaRoot
 	waittail *sudog // semaRoot
@@ -416,7 +424,7 @@
 	// stack describes the actual stack memory: [stack.lo, stack.hi).
 	// stackguard0 is the stack pointer compared in the Go stack growth prologue.
 	// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
-	// stackguard1 is the stack pointer compared in the C stack growth prologue.
+	// stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
 	// It is stack.lo+StackGuard on g0 and gsignal stacks.
 	// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
 	stack       stack   // offset known to runtime/cgo
@@ -433,7 +441,7 @@
 	// param is a generic pointer parameter field used to pass
 	// values in particular contexts where other storage for the
 	// parameter would be difficult to find. It is currently used
-	// in three ways:
+	// in four ways:
 	// 1. When a channel operation wakes up a blocked goroutine, it sets param to
 	//    point to the sudog of the completed blocking operation.
 	// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
@@ -441,6 +449,8 @@
 	//    stack may have moved in the meantime.
 	// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
 	//    closure in the runtime is forbidden.
+	// 4. When a panic is recovered and control returns to the respective frame,
+	//    param may point to a savedOpenDeferState.
 	param        unsafe.Pointer
 	atomicstatus atomic.Uint32
 	stackLock    uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
@@ -470,8 +480,13 @@
 	// park on a chansend or chanrecv. Used to signal an unsafe point
 	// for stack shrinking.
 	parkingOnChan atomic.Bool
+	// inMarkAssist indicates whether the goroutine is in mark assist.
+	// Used by the execution tracer.
+	inMarkAssist bool
+	coroexit     bool // argument to coroswitch_m
 
 	raceignore    int8  // ignore race detection events
+	nocgocallback bool  // whether disable callback from C
 	tracking      bool  // whether we're tracking this G for sched latency statistics
 	trackingSeq   uint8 // used to decide whether to track this G
 	trackingStamp int64 // timestamp of when the G last started being tracked
@@ -493,6 +508,8 @@
 	timer         *timer         // cached timer for time.Sleep
 	selectDone    atomic.Uint32  // are we participating in a select and did someone win the race?
 
+	coroarg *coro // argument during coroutine transfers
+
 	// goroutineProfiled indicates the status of this goroutine's stack for the
 	// current in-progress goroutine profile
 	goroutineProfiled goroutineProfileStateHolder
@@ -562,8 +579,8 @@
 	incgo         bool          // m is executing a cgo call
 	isextra       bool          // m is an extra m
 	isExtraInC    bool          // m is an extra m that is not executing Go code
+	isExtraInSig  bool          // m is an extra m in a signal handler
 	freeWait      atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
-	fastrand      uint64
 	needextram    bool
 	traceback     uint8
 	ncgocall      uint64        // number of cgo calls in total
@@ -574,11 +591,13 @@
 	alllink       *m // on allm
 	schedlink     muintptr
 	lockedg       guintptr
-	createstack   [32]uintptr // stack that created this thread.
+	createstack   [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
 	lockedExt     uint32      // tracking for external LockOSThread
 	lockedInt     uint32      // tracking for internal lockOSThread
 	nextwaitm     muintptr    // next m waiting for lock
 
+	mLockProfile mLockProfile // fields relating to runtime.lock contention
+
 	// wait* are used to carry arguments from gopark into park_m, because
 	// there's no stack to put them on. That is their sole purpose.
 	waitunlockf          func(*g, unsafe.Pointer) bool
@@ -609,10 +628,16 @@
 	// Whether this is a pending preemption signal on this M.
 	signalPending atomic.Uint32
 
+	// pcvalue lookup cache
+	pcvalueCache pcvalueCache
+
 	dlogPerM
 
 	mOS
 
+	chacha8   chacha8rand.State
+	cheaprand uint64
+
 	// Up to 10 locks held by this m, maintained by the lock ranking code.
 	locksHeldLen int
 	locksHeld    [10]heldLockInfo
@@ -838,7 +863,7 @@
 	sysmonwait atomic.Bool
 	sysmonnote note
 
-	// safepointFn should be called on each P at the next GC
+	// safePointFn should be called on each P at the next GC
 	// safepoint if p.runSafePointFn is set.
 	safePointFn   func(*p)
 	safePointWait int32
@@ -868,6 +893,27 @@
 	// totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting
 	// with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock.
 	totalMutexWaitTime atomic.Int64
+
+	// stwStoppingTimeGC/Other are distributions of stop-the-world stopping
+	// latencies, defined as the time taken by stopTheWorldWithSema to get
+	// all Ps to stop. stwStoppingTimeGC covers all GC-related STWs,
+	// stwStoppingTimeOther covers the others.
+	stwStoppingTimeGC    timeHistogram
+	stwStoppingTimeOther timeHistogram
+
+	// stwTotalTimeGC/Other are distributions of stop-the-world total
+	// latencies, defined as the total time from stopTheWorldWithSema to
+	// startTheWorldWithSema. This is a superset of
+	// stwStoppingTimeGC/Other. stwTotalTimeGC covers all GC-related STWs,
+	// stwTotalTimeOther covers the others.
+	stwTotalTimeGC    timeHistogram
+	stwTotalTimeOther timeHistogram
+
+	// totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in
+	// allm) is the sum of time goroutines have spent in _Grunnable and with an
+	// M, but waiting for locks within the runtime. This field stores the value
+	// for Ms that have exited.
+	totalRuntimeLockWaitTime atomic.Int64
 }
 
 // Values for the flags field of a sigTabT.
@@ -947,7 +993,7 @@
 // layout of Itab known to compilers
 // allocated in non-garbage-collected memory
 // Needs to be in sync with
-// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WriteTabs.
+// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WritePluginTable.
 type itab struct {
 	inter *interfacetype
 	_type *_type
@@ -969,27 +1015,6 @@
 	idle atomic.Bool
 }
 
-// extendRandom extends the random numbers in r[:n] to the whole slice r.
-// Treats n<0 as n==0.
-func extendRandom(r []byte, n int) {
-	if n < 0 {
-		n = 0
-	}
-	for n < len(r) {
-		// Extend random bits using hash function & time seed
-		w := n
-		if w > 16 {
-			w = 16
-		}
-		h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
-		for i := 0; i < goarch.PtrSize && n < len(r); i++ {
-			r[n] = byte(h)
-			n++
-			h >>= 8
-		}
-	}
-}
-
 // A _defer holds an entry on the list of deferred calls.
 // If you add a field here, add code to clear it in deferProcStack.
 // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
@@ -999,29 +1024,16 @@
 // initialize them are not required. All defers must be manually scanned,
 // and for heap defers, marked.
 type _defer struct {
-	started bool
-	heap    bool
-	// openDefer indicates that this _defer is for a frame with open-coded
-	// defers. We have only one defer record for the entire frame (which may
-	// currently have 0, 1, or more defers active).
-	openDefer bool
+	heap      bool
+	rangefunc bool    // true for rangefunc list
 	sp        uintptr // sp at time of defer
 	pc        uintptr // pc at time of defer
 	fn        func()  // can be nil for open-coded defers
-	_panic    *_panic // panic that is running defer
 	link      *_defer // next defer on G; can point to either heap or stack!
 
-	// If openDefer is true, the fields below record values about the stack
-	// frame and associated function that has the open-coded defer(s). sp
-	// above will be the sp for the frame, and pc will be address of the
-	// deferreturn call in the function.
-	fd   unsafe.Pointer // funcdata for the function associated with the frame
-	varp uintptr        // value of varp for the stack frame
-	// framepc is the current pc associated with the stack frame. Together,
-	// with sp above (which is the sp associated with the stack frame),
-	// framepc/sp can be used as pc/sp pair to continue a stack trace via
-	// gentraceback().
-	framepc uintptr
+	// If rangefunc is true, *head is the head of the atomic linked list
+	// during a range-over-func execution.
+	head *atomic.Pointer[_defer]
 }
 
 // A _panic holds information about an active panic.
@@ -1033,14 +1045,39 @@
 // _panic values only live on the stack, regular stack pointer
 // adjustment takes care of them.
 type _panic struct {
-	argp      unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
-	arg       any            // argument to panic
-	link      *_panic        // link to earlier panic
-	pc        uintptr        // where to return to in runtime if this panic is bypassed
-	sp        unsafe.Pointer // where to return to in runtime if this panic is bypassed
-	recovered bool           // whether this panic is over
-	aborted   bool           // the panic was aborted
-	goexit    bool
+	argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
+	arg  any            // argument to panic
+	link *_panic        // link to earlier panic
+
+	// startPC and startSP track where _panic.start was called.
+	startPC uintptr
+	startSP unsafe.Pointer
+
+	// The current stack frame that we're running deferred calls for.
+	sp unsafe.Pointer
+	lr uintptr
+	fp unsafe.Pointer
+
+	// retpc stores the PC where the panic should jump back to, if the
+	// function last returned by _panic.next() recovers the panic.
+	retpc uintptr
+
+	// Extra state for handling open-coded defers.
+	deferBitsPtr *uint8
+	slotsPtr     unsafe.Pointer
+
+	recovered   bool // whether this panic has been recovered
+	goexit      bool
+	deferreturn bool
+}
+
+// savedOpenDeferState tracks the extra state from _panic that's
+// necessary for deferreturn to pick up where gopanic left off,
+// without needing to unwind the stack.
+type savedOpenDeferState struct {
+	retpc           uintptr
+	deferBitsOffset uintptr
+	slotsOffset     uintptr
 }
 
 // ancestorInfo records details of where a goroutine was started.
@@ -1087,6 +1124,11 @@
 	waitReasonDebugCall                               // "debug call"
 	waitReasonGCMarkTermination                       // "GC mark termination"
 	waitReasonStoppingTheWorld                        // "stopping the world"
+	waitReasonFlushProcCaches                         // "flushing proc caches"
+	waitReasonTraceGoroutineStatus                    // "trace goroutine status"
+	waitReasonTraceProcStatus                         // "trace proc status"
+	waitReasonPageTraceFlush                          // "page trace flush"
+	waitReasonCoroutine                               // "coroutine"
 )
 
 var waitReasonStrings = [...]string{
@@ -1122,6 +1164,11 @@
 	waitReasonDebugCall:             "debug call",
 	waitReasonGCMarkTermination:     "GC mark termination",
 	waitReasonStoppingTheWorld:      "stopping the world",
+	waitReasonFlushProcCaches:       "flushing proc caches",
+	waitReasonTraceGoroutineStatus:  "trace goroutine status",
+	waitReasonTraceProcStatus:       "trace proc status",
+	waitReasonPageTraceFlush:        "page trace flush",
+	waitReasonCoroutine:             "coroutine",
 }
 
 func (w waitReason) String() string {
@@ -1180,7 +1227,9 @@
 	processorVersionInfo uint32
 	isIntel              bool
 
-	goarm uint8 // set by cmd/link on arm systems
+	// set by cmd/link on arm systems
+	goarm       uint8
+	goarmsoftfp uint8
 )
 
 // Set by the linker so the runtime can determine the buildmode.
diff --git a/src/runtime/runtime_linux_test.go b/src/runtime/runtime_linux_test.go
index 6af5561..ab2452c 100644
--- a/src/runtime/runtime_linux_test.go
+++ b/src/runtime/runtime_linux_test.go
@@ -45,7 +45,7 @@
 // Use a misaligned pointer to get -EINVAL.
 func TestMincoreErrorSign(t *testing.T) {
 	var dst byte
-	v := Mincore(Add(unsafe.Pointer(new(int32)), 1), 1, &dst)
+	v := Mincore(unsafe.Add(unsafe.Pointer(new(int32)), 1), 1, &dst)
 
 	const EINVAL = 0x16
 	if v != -EINVAL {
diff --git a/src/runtime/rwmutex.go b/src/runtime/rwmutex.go
index ede3d13..34d8f67 100644
--- a/src/runtime/rwmutex.go
+++ b/src/runtime/rwmutex.go
@@ -25,6 +25,43 @@
 
 	readerCount atomic.Int32 // number of pending readers
 	readerWait  atomic.Int32 // number of departing readers
+
+	readRank  lockRank // semantic lock rank for read locking
+}
+
+// Lock ranking an rwmutex has two aspects:
+//
+// Semantic ranking: this rwmutex represents some higher level lock that
+// protects some resource (e.g., allocmLock protects creation of new Ms). The
+// read and write locks of that resource need to be represented in the lock
+// rank.
+//
+// Internal ranking: as an implementation detail, rwmutex uses two mutexes:
+// rLock and wLock. These have lock order requirements: wLock must be locked
+// before rLock. This also needs to be represented in the lock rank.
+//
+// Semantic ranking is represented by acquiring readRank during read lock and
+// writeRank during write lock.
+//
+// wLock is held for the duration of a write lock, so it uses writeRank
+// directly, both for semantic and internal ranking. rLock is only held
+// temporarily inside the rlock/lock methods, so it uses readRankInternal to
+// represent internal ranking. Semantic ranking is represented by a separate
+// acquire of readRank for the duration of a read lock.
+//
+// The lock ranking must document this ordering:
+// - readRankInternal is a leaf lock.
+// - readRank is taken before readRankInternal.
+// - writeRank is taken before readRankInternal.
+// - readRank is placed in the lock order wherever a read lock of this rwmutex
+//   belongs.
+// - writeRank is placed in the lock order wherever a write lock of this
+//   rwmutex belongs.
+func (rw *rwmutex) init(readRank, readRankInternal, writeRank lockRank) {
+	rw.readRank = readRank
+
+	lockInit(&rw.rLock, readRankInternal)
+	lockInit(&rw.wLock, writeRank)
 }
 
 const rwmutexMaxReaders = 1 << 30
@@ -36,10 +73,14 @@
 	// deadlock (issue #20903). Alternatively, we could drop the P
 	// while sleeping.
 	acquirem()
+
+	acquireLockRank(rw.readRank)
+	lockWithRankMayAcquire(&rw.rLock, getLockRank(&rw.rLock))
+
 	if rw.readerCount.Add(1) < 0 {
 		// A writer is pending. Park on the reader queue.
 		systemstack(func() {
-			lockWithRank(&rw.rLock, lockRankRwmutexR)
+			lock(&rw.rLock)
 			if rw.readerPass > 0 {
 				// Writer finished.
 				rw.readerPass -= 1
@@ -67,7 +108,7 @@
 		// A writer is pending.
 		if rw.readerWait.Add(-1) == 0 {
 			// The last reader unblocks the writer.
-			lockWithRank(&rw.rLock, lockRankRwmutexR)
+			lock(&rw.rLock)
 			w := rw.writer.ptr()
 			if w != nil {
 				notewakeup(&w.park)
@@ -75,18 +116,19 @@
 			unlock(&rw.rLock)
 		}
 	}
+	releaseLockRank(rw.readRank)
 	releasem(getg().m)
 }
 
 // lock locks rw for writing.
 func (rw *rwmutex) lock() {
 	// Resolve competition with other writers and stick to our P.
-	lockWithRank(&rw.wLock, lockRankRwmutexW)
+	lock(&rw.wLock)
 	m := getg().m
 	// Announce that there is a pending writer.
 	r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
 	// Wait for any active readers to complete.
-	lockWithRank(&rw.rLock, lockRankRwmutexR)
+	lock(&rw.rLock)
 	if r != 0 && rw.readerWait.Add(r) != 0 {
 		// Wait for reader to wake us up.
 		systemstack(func() {
@@ -108,7 +150,7 @@
 		throw("unlock of unlocked rwmutex")
 	}
 	// Unblock blocked readers.
-	lockWithRank(&rw.rLock, lockRankRwmutexR)
+	lock(&rw.rLock)
 	for rw.readers.ptr() != nil {
 		reader := rw.readers.ptr()
 		rw.readers = reader.schedlink
diff --git a/src/runtime/rwmutex_test.go b/src/runtime/rwmutex_test.go
index ddb16ae..bdeb9c4 100644
--- a/src/runtime/rwmutex_test.go
+++ b/src/runtime/rwmutex_test.go
@@ -29,6 +29,7 @@
 func doTestParallelReaders(numReaders int) {
 	GOMAXPROCS(numReaders + 1)
 	var m RWMutex
+	m.Init()
 	clocked := make(chan bool, numReaders)
 	var cunlock atomic.Bool
 	cdone := make(chan bool)
@@ -100,6 +101,7 @@
 	// Number of active readers + 10000 * number of active writers.
 	var activity int32
 	var rwm RWMutex
+	rwm.Init()
 	cdone := make(chan bool)
 	go writer(&rwm, num_iterations, &activity, cdone)
 	var i int
@@ -141,6 +143,7 @@
 	}
 	b.RunParallel(func(pb *testing.PB) {
 		var rwm PaddedRWMutex
+		rwm.Init()
 		for pb.Next() {
 			rwm.RLock()
 			rwm.RLock()
@@ -154,6 +157,7 @@
 
 func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
 	var rwm RWMutex
+	rwm.Init()
 	b.RunParallel(func(pb *testing.PB) {
 		foo := 0
 		for pb.Next() {
diff --git a/src/runtime/security_unix.go b/src/runtime/security_unix.go
index 16fc87e..fa54090 100644
--- a/src/runtime/security_unix.go
+++ b/src/runtime/security_unix.go
@@ -13,19 +13,12 @@
 		return
 	}
 
-	// When secure mode is enabled, we do two things:
-	//   1. ensure the file descriptors 0, 1, and 2 are open, and if not open them,
-	//      pointing at /dev/null (or fail)
-	//   2. enforce specific environment variable values (currently we only force
-	//		GOTRACEBACK=none)
+	// When secure mode is enabled, we do one thing: enforce specific
+	// environment variable values (currently we only force GOTRACEBACK=none)
 	//
 	// Other packages may also disable specific functionality when secure mode
 	// is enabled (determined by using linkname to call isSecureMode).
-	//
-	// NOTE: we may eventually want to enforce (1) regardless of whether secure
-	// mode is enabled or not.
 
-	secureFDs()
 	secureEnv()
 }
 
@@ -41,32 +34,3 @@
 		envs = append(envs, "GOTRACEBACK=none")
 	}
 }
-
-func secureFDs() {
-	const (
-		// F_GETFD and EBADF are standard across all unixes, define
-		// them here rather than in each of the OS specific files
-		F_GETFD = 0x01
-		EBADF   = 0x09
-	)
-
-	devNull := []byte("/dev/null\x00")
-	for i := 0; i < 3; i++ {
-		ret, errno := fcntl(int32(i), F_GETFD, 0)
-		if ret >= 0 {
-			continue
-		}
-		if errno != EBADF {
-			print("runtime: unexpected error while checking standard file descriptor ", i, ", errno=", errno, "\n")
-			throw("cannot secure fds")
-		}
-
-		if ret := open(&devNull[0], 2 /* O_RDWR */, 0); ret < 0 {
-			print("runtime: standard file descriptor ", i, " closed, unable to open /dev/null, errno=", errno, "\n")
-			throw("cannot secure fds")
-		} else if ret != int32(i) {
-			print("runtime: opened unexpected file descriptor ", ret, " when attempting to open ", i, "\n")
-			throw("cannot secure fds")
-		}
-	}
-}
diff --git a/src/runtime/select.go b/src/runtime/select.go
index 34c0637..b3a3085 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -173,7 +173,7 @@
 			continue
 		}
 
-		j := fastrandn(uint32(norder + 1))
+		j := cheaprandn(uint32(norder + 1))
 		pollorder[norder] = pollorder[j]
 		pollorder[j] = uint16(i)
 		norder++
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index d0a8117..c87fc76 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -191,7 +191,7 @@
 		unlock(&root.lock)
 		return
 	}
-	s, t0 := root.dequeue(addr)
+	s, t0, tailtime := root.dequeue(addr)
 	if s != nil {
 		root.nwait.Add(-1)
 	}
@@ -199,7 +199,28 @@
 	if s != nil { // May be slow or even yield, so unlock first
 		acquiretime := s.acquiretime
 		if acquiretime != 0 {
-			mutexevent(t0-acquiretime, 3+skipframes)
+			// Charge contention that this (delayed) unlock caused.
+			// If there are N more goroutines waiting beyond the
+			// one that's waking up, charge their delay as well, so that
+			// contention holding up many goroutines shows up as
+			// more costly than contention holding up a single goroutine.
+			// It would take O(N) time to calculate how long each goroutine
+			// has been waiting, so instead we charge avg(head-wait, tail-wait)*N.
+			// head-wait is the longest wait and tail-wait is the shortest.
+			// (When we do a lifo insertion, we preserve this property by
+			// copying the old head's acquiretime into the inserted new head.
+			// In that case the overall average may be slightly high, but that's fine:
+			// the average of the ends is only an approximation to the actual
+			// average anyway.)
+			// The root.dequeue above changed the head and tail acquiretime
+			// to the current time, so the next unlock will not re-count this contention.
+			dt0 := t0 - acquiretime
+			dt := dt0
+			if s.waiters != 0 {
+				dtail := t0 - tailtime
+				dt += (dtail + dt0) / 2 * int64(s.waiters)
+			}
+			mutexevent(dt, 3+skipframes)
 		}
 		if s.ticket != 0 {
 			throw("corrupted semaphore ticket")
@@ -248,6 +269,7 @@
 	s.elem = unsafe.Pointer(addr)
 	s.next = nil
 	s.prev = nil
+	s.waiters = 0
 
 	var last *sudog
 	pt := &root.treap
@@ -258,7 +280,7 @@
 				// Substitute s in t's place in treap.
 				*pt = s
 				s.ticket = t.ticket
-				s.acquiretime = t.acquiretime
+				s.acquiretime = t.acquiretime // preserve head acquiretime as oldest time
 				s.parent = t.parent
 				s.prev = t.prev
 				s.next = t.next
@@ -274,6 +296,10 @@
 				if s.waittail == nil {
 					s.waittail = t
 				}
+				s.waiters = t.waiters
+				if s.waiters+1 != 0 {
+					s.waiters++
+				}
 				t.parent = nil
 				t.prev = nil
 				t.next = nil
@@ -287,6 +313,9 @@
 				}
 				t.waittail = s
 				s.waitlink = nil
+				if t.waiters+1 != 0 {
+					t.waiters++
+				}
 			}
 			return
 		}
@@ -309,7 +338,7 @@
 	//
 	// s.ticket compared with zero in couple of places, therefore set lowest bit.
 	// It will not affect treap's quality noticeably.
-	s.ticket = fastrand() | 1
+	s.ticket = cheaprand() | 1
 	s.parent = last
 	*pt = s
 
@@ -330,7 +359,10 @@
 // in semaRoot blocked on addr.
 // If the sudog was being profiled, dequeue returns the time
 // at which it was woken up as now. Otherwise now is 0.
-func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now int64) {
+// If there are additional entries in the wait list, dequeue
+// returns tailtime set to the last entry's acquiretime.
+// Otherwise tailtime is found.acquiretime.
+func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) {
 	ps := &root.treap
 	s := *ps
 	for ; s != nil; s = *ps {
@@ -343,7 +375,7 @@
 			ps = &s.next
 		}
 	}
-	return nil, 0
+	return nil, 0, 0
 
 Found:
 	now = int64(0)
@@ -368,7 +400,16 @@
 		} else {
 			t.waittail = nil
 		}
+		t.waiters = s.waiters
+		if t.waiters > 1 {
+			t.waiters--
+		}
+		// Set head and tail acquire time to 'now',
+		// because the caller will take care of charging
+		// the delays before now for all entries in the list.
 		t.acquiretime = now
+		tailtime = s.waittail.acquiretime
+		s.waittail.acquiretime = now
 		s.waitlink = nil
 		s.waittail = nil
 	} else {
@@ -390,13 +431,14 @@
 		} else {
 			root.treap = nil
 		}
+		tailtime = s.acquiretime
 	}
 	s.parent = nil
 	s.elem = nil
 	s.next = nil
 	s.prev = nil
 	s.ticket = 0
-	return s, now
+	return s, now, tailtime
 }
 
 // rotateLeft rotates the tree rooted at node x.
diff --git a/src/runtime/signal_aix_ppc64.go b/src/runtime/signal_aix_ppc64.go
index c6cb91a..8ae0f74 100644
--- a/src/runtime/signal_aix_ppc64.go
+++ b/src/runtime/signal_aix_ppc64.go
@@ -1,4 +1,4 @@
-/// Copyright 2018 The Go Authors. All rights reserved.
+// Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/src/runtime/signal_freebsd_arm64.go b/src/runtime/signal_freebsd_arm64.go
index 159e965..2b87369 100644
--- a/src/runtime/signal_freebsd_arm64.go
+++ b/src/runtime/signal_freebsd_arm64.go
@@ -52,7 +52,7 @@
 //go:nowritebarrierrec
 func (c *sigctxt) pc() uint64 { return c.regs().mc_gpregs.gp_elr }
 
-func (c *sigctxt) fault() uint64 { return c.info.si_addr }
+func (c *sigctxt) fault() uintptr { return uintptr(c.info.si_addr) }
 
 func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
 func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
diff --git a/src/runtime/signal_linux_ppc64x.go b/src/runtime/signal_linux_ppc64x.go
index 3175428..95a0344 100644
--- a/src/runtime/signal_linux_ppc64x.go
+++ b/src/runtime/signal_linux_ppc64x.go
@@ -18,7 +18,8 @@
 
 //go:nosplit
 //go:nowritebarrierrec
-func (c *sigctxt) regs() *ptregs { return (*ucontext)(c.ctxt).uc_mcontext.regs }
+func (c *sigctxt) regs() *ptregs      { return (*ucontext)(c.ctxt).uc_mcontext.regs }
+func (c *sigctxt) cregs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
 
 func (c *sigctxt) r0() uint64  { return c.regs().gpr[0] }
 func (c *sigctxt) r1() uint64  { return c.regs().gpr[1] }
diff --git a/src/runtime/signal_openbsd_arm64.go b/src/runtime/signal_openbsd_arm64.go
index 3747b4f..bb14c8d 100644
--- a/src/runtime/signal_openbsd_arm64.go
+++ b/src/runtime/signal_openbsd_arm64.go
@@ -54,7 +54,7 @@
 //go:nowritebarrierrec
 func (c *sigctxt) rip() uint64 { return (uint64)(c.regs().sc_lr) } /* XXX */
 
-func (c *sigctxt) fault() uint64   { return c.sigaddr() }
+func (c *sigctxt) fault() uintptr  { return uintptr(c.sigaddr()) }
 func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
 func (c *sigctxt) sigaddr() uint64 {
 	return *(*uint64)(add(unsafe.Pointer(c.info), 16))
diff --git a/src/runtime/signal_openbsd_ppc64.go b/src/runtime/signal_openbsd_ppc64.go
new file mode 100644
index 0000000..be7217e
--- /dev/null
+++ b/src/runtime/signal_openbsd_ppc64.go
@@ -0,0 +1,83 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+	"internal/goarch"
+	"unsafe"
+)
+
+type sigctxt struct {
+	info *siginfo
+	ctxt unsafe.Pointer
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) regs() *sigcontext {
+	return (*sigcontext)(c.ctxt)
+}
+
+func (c *sigctxt) r0() uint64  { return c.regs().sc_reg[0] }
+func (c *sigctxt) r1() uint64  { return c.regs().sc_reg[1] }
+func (c *sigctxt) r2() uint64  { return c.regs().sc_reg[2] }
+func (c *sigctxt) r3() uint64  { return c.regs().sc_reg[3] }
+func (c *sigctxt) r4() uint64  { return c.regs().sc_reg[4] }
+func (c *sigctxt) r5() uint64  { return c.regs().sc_reg[5] }
+func (c *sigctxt) r6() uint64  { return c.regs().sc_reg[6] }
+func (c *sigctxt) r7() uint64  { return c.regs().sc_reg[7] }
+func (c *sigctxt) r8() uint64  { return c.regs().sc_reg[8] }
+func (c *sigctxt) r9() uint64  { return c.regs().sc_reg[9] }
+func (c *sigctxt) r10() uint64 { return c.regs().sc_reg[10] }
+func (c *sigctxt) r11() uint64 { return c.regs().sc_reg[11] }
+func (c *sigctxt) r12() uint64 { return c.regs().sc_reg[12] }
+func (c *sigctxt) r13() uint64 { return c.regs().sc_reg[13] }
+func (c *sigctxt) r14() uint64 { return c.regs().sc_reg[14] }
+func (c *sigctxt) r15() uint64 { return c.regs().sc_reg[15] }
+func (c *sigctxt) r16() uint64 { return c.regs().sc_reg[16] }
+func (c *sigctxt) r17() uint64 { return c.regs().sc_reg[17] }
+func (c *sigctxt) r18() uint64 { return c.regs().sc_reg[18] }
+func (c *sigctxt) r19() uint64 { return c.regs().sc_reg[19] }
+func (c *sigctxt) r20() uint64 { return c.regs().sc_reg[20] }
+func (c *sigctxt) r21() uint64 { return c.regs().sc_reg[21] }
+func (c *sigctxt) r22() uint64 { return c.regs().sc_reg[22] }
+func (c *sigctxt) r23() uint64 { return c.regs().sc_reg[23] }
+func (c *sigctxt) r24() uint64 { return c.regs().sc_reg[24] }
+func (c *sigctxt) r25() uint64 { return c.regs().sc_reg[25] }
+func (c *sigctxt) r26() uint64 { return c.regs().sc_reg[26] }
+func (c *sigctxt) r27() uint64 { return c.regs().sc_reg[27] }
+func (c *sigctxt) r28() uint64 { return c.regs().sc_reg[28] }
+func (c *sigctxt) r29() uint64 { return c.regs().sc_reg[29] }
+func (c *sigctxt) r30() uint64 { return c.regs().sc_reg[30] }
+func (c *sigctxt) r31() uint64 { return c.regs().sc_reg[31] }
+func (c *sigctxt) sp() uint64  { return c.regs().sc_reg[1] }
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) pc() uint64 { return c.regs().sc_pc }
+
+func (c *sigctxt) trap() uint64 { return 0 /* XXX - c.regs().trap */ }
+func (c *sigctxt) ctr() uint64  { return c.regs().sc_ctr }
+func (c *sigctxt) link() uint64 { return c.regs().sc_lr }
+func (c *sigctxt) xer() uint64  { return c.regs().sc_xer }
+func (c *sigctxt) ccr() uint64  { return c.regs().sc_cr }
+
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 {
+	return *(*uint64)(add(unsafe.Pointer(c.info), 16))
+}
+func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) }
+
+func (c *sigctxt) set_r0(x uint64)   { c.regs().sc_reg[0] = x }
+func (c *sigctxt) set_r12(x uint64)  { c.regs().sc_reg[12] = x }
+func (c *sigctxt) set_r30(x uint64)  { c.regs().sc_reg[30] = x }
+func (c *sigctxt) set_pc(x uint64)   { c.regs().sc_pc = x }
+func (c *sigctxt) set_sp(x uint64)   { c.regs().sc_reg[1] = x }
+func (c *sigctxt) set_link(x uint64) { c.regs().sc_lr = x }
+
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+	*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
+}
diff --git a/src/runtime/signal_openbsd_riscv64.go b/src/runtime/signal_openbsd_riscv64.go
new file mode 100644
index 0000000..25643a0
--- /dev/null
+++ b/src/runtime/signal_openbsd_riscv64.go
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+	"internal/goarch"
+	"unsafe"
+)
+
+type sigctxt struct {
+	info *siginfo
+	ctxt unsafe.Pointer
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) regs() *sigcontext {
+	return (*sigcontext)(c.ctxt)
+}
+
+func (c *sigctxt) ra() uint64  { return uint64(c.regs().sc_ra) }
+func (c *sigctxt) sp() uint64  { return uint64(c.regs().sc_sp) }
+func (c *sigctxt) gp() uint64  { return uint64(c.regs().sc_gp) }
+func (c *sigctxt) tp() uint64  { return uint64(c.regs().sc_tp) }
+func (c *sigctxt) t0() uint64  { return uint64(c.regs().sc_t[0]) }
+func (c *sigctxt) t1() uint64  { return uint64(c.regs().sc_t[1]) }
+func (c *sigctxt) t2() uint64  { return uint64(c.regs().sc_t[2]) }
+func (c *sigctxt) s0() uint64  { return uint64(c.regs().sc_s[0]) }
+func (c *sigctxt) s1() uint64  { return uint64(c.regs().sc_s[1]) }
+func (c *sigctxt) a0() uint64  { return uint64(c.regs().sc_a[0]) }
+func (c *sigctxt) a1() uint64  { return uint64(c.regs().sc_a[1]) }
+func (c *sigctxt) a2() uint64  { return uint64(c.regs().sc_a[2]) }
+func (c *sigctxt) a3() uint64  { return uint64(c.regs().sc_a[3]) }
+func (c *sigctxt) a4() uint64  { return uint64(c.regs().sc_a[4]) }
+func (c *sigctxt) a5() uint64  { return uint64(c.regs().sc_a[5]) }
+func (c *sigctxt) a6() uint64  { return uint64(c.regs().sc_a[6]) }
+func (c *sigctxt) a7() uint64  { return uint64(c.regs().sc_a[7]) }
+func (c *sigctxt) s2() uint64  { return uint64(c.regs().sc_s[2]) }
+func (c *sigctxt) s3() uint64  { return uint64(c.regs().sc_s[3]) }
+func (c *sigctxt) s4() uint64  { return uint64(c.regs().sc_s[4]) }
+func (c *sigctxt) s5() uint64  { return uint64(c.regs().sc_s[5]) }
+func (c *sigctxt) s6() uint64  { return uint64(c.regs().sc_s[6]) }
+func (c *sigctxt) s7() uint64  { return uint64(c.regs().sc_s[7]) }
+func (c *sigctxt) s8() uint64  { return uint64(c.regs().sc_s[8]) }
+func (c *sigctxt) s9() uint64  { return uint64(c.regs().sc_s[9]) }
+func (c *sigctxt) s10() uint64 { return uint64(c.regs().sc_s[10]) }
+func (c *sigctxt) s11() uint64 { return uint64(c.regs().sc_s[11]) }
+func (c *sigctxt) t3() uint64  { return uint64(c.regs().sc_t[3]) }
+func (c *sigctxt) t4() uint64  { return uint64(c.regs().sc_t[4]) }
+func (c *sigctxt) t5() uint64  { return uint64(c.regs().sc_t[5]) }
+func (c *sigctxt) t6() uint64  { return uint64(c.regs().sc_t[6]) }
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) pc() uint64 { return uint64(c.regs().sc_sepc) }
+
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 {
+	return *(*uint64)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize))
+}
+
+func (c *sigctxt) set_pc(x uint64) { c.regs().sc_sepc = uintptr(x) }
+func (c *sigctxt) set_ra(x uint64) { c.regs().sc_ra = uintptr(x) }
+func (c *sigctxt) set_sp(x uint64) { c.regs().sc_sp = uintptr(x) }
+func (c *sigctxt) set_gp(x uint64) { c.regs().sc_gp = uintptr(x) }
+
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+	*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
+}
diff --git a/src/runtime/signal_ppc64x.go b/src/runtime/signal_ppc64x.go
index bdd3540..b5722f9 100644
--- a/src/runtime/signal_ppc64x.go
+++ b/src/runtime/signal_ppc64x.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (aix || linux) && (ppc64 || ppc64le)
+//go:build (aix || linux || openbsd) && (ppc64 || ppc64le)
 
 package runtime
 
@@ -55,7 +55,8 @@
 
 //go:nosplit
 //go:nowritebarrierrec
-func (c *sigctxt) sigpc() uintptr { return uintptr(c.pc()) }
+func (c *sigctxt) sigpc() uintptr    { return uintptr(c.pc()) }
+func (c *sigctxt) setsigpc(x uint64) { c.set_pc(x) }
 
 func (c *sigctxt) sigsp() uintptr { return uintptr(c.sp()) }
 func (c *sigctxt) siglr() uintptr { return uintptr(c.link()) }
diff --git a/src/runtime/signal_riscv64.go b/src/runtime/signal_riscv64.go
index b8d7b97..8acd34c 100644
--- a/src/runtime/signal_riscv64.go
+++ b/src/runtime/signal_riscv64.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (linux || freebsd) && riscv64
+//go:build (linux || freebsd || openbsd) && riscv64
 
 package runtime
 
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index ae842e9..84391d5 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -597,7 +597,7 @@
 
 // crashing is the number of m's we have waited for when implementing
 // GOTRACEBACK=crash when a signal is received.
-var crashing int32
+var crashing atomic.Int32
 
 // testSigtrap and testSigusr1 are used by the runtime tests. If
 // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
@@ -698,7 +698,7 @@
 		// the unwinding code.
 		gp.sig = sig
 		gp.sigcode0 = uintptr(c.sigcode())
-		gp.sigcode1 = uintptr(c.fault())
+		gp.sigcode1 = c.fault()
 		gp.sigpc = c.sigpc()
 
 		c.preparePanic(sig, gp)
@@ -730,7 +730,7 @@
 	mp.throwing = throwTypeRuntime
 	mp.caughtsig.set(gp)
 
-	if crashing == 0 {
+	if crashing.Load() == 0 {
 		startpanic_m()
 	}
 
@@ -740,11 +740,11 @@
 	if level > 0 {
 		goroutineheader(gp)
 		tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
-		if crashing > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
+		if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
 			// tracebackothers on original m skipped this one; trace it now.
 			goroutineheader(mp.curg)
 			traceback(^uintptr(0), ^uintptr(0), 0, mp.curg)
-		} else if crashing == 0 {
+		} else if crashing.Load() == 0 {
 			tracebackothers(gp)
 			print("\n")
 		}
@@ -752,22 +752,38 @@
 	}
 
 	if docrash {
-		crashing++
-		if crashing < mcount()-int32(extraMLength.Load()) {
+		isCrashThread := false
+		if crashing.CompareAndSwap(0, 1) {
+			isCrashThread = true
+		} else {
+			crashing.Add(1)
+		}
+		if crashing.Load() < mcount()-int32(extraMLength.Load()) {
 			// There are other m's that need to dump their stacks.
 			// Relay SIGQUIT to the next m by sending it to the current process.
 			// All m's that have already received SIGQUIT have signal masks blocking
 			// receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
-			// When the last m receives the SIGQUIT, it will fall through to the call to
-			// crash below. Just in case the relaying gets botched, each m involved in
+			// The first m will wait until all ms received the SIGQUIT, then crash/exit.
+			// Just in case the relaying gets botched, each m involved in
 			// the relay sleeps for 5 seconds and then does the crash/exit itself.
-			// In expected operation, the last m has received the SIGQUIT and run
-			// crash/exit and the process is gone, all long before any of the
-			// 5-second sleeps have finished.
+			// The faulting m is crashing first so it is the faulting thread in the core dump (see issue #63277):
+			// in expected operation, the first m will wait until the last m has received the SIGQUIT,
+			// and then run crash/exit and the process is gone.
+			// However, if it spends more than 5 seconds to send SIGQUIT to all ms,
+			// any of ms may crash/exit the process after waiting for 5 seconds.
 			print("\n-----\n\n")
 			raiseproc(_SIGQUIT)
+		}
+		if isCrashThread {
+			i := 0
+			for (crashing.Load() < mcount()-int32(extraMLength.Load())) && i < 10 {
+				i++
+				usleep(500 * 1000)
+			}
+		} else {
 			usleep(5 * 1000 * 1000)
 		}
+		printDebugLog()
 		crash()
 	}
 
@@ -787,7 +803,11 @@
 		exit(2)
 	}
 
-	print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode(), "\n")
+	print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode())
+	if sig == _SIGSEGV || sig == _SIGBUS {
+		print(" addr=", hex(c.fault()))
+	}
+	print("\n")
 	if mp.incgo && gp == mp.g0 && mp.curg != nil {
 		print("signal arrived during cgo execution\n")
 		// Switch to curg so that we get a traceback of the Go code
@@ -1176,10 +1196,34 @@
 }
 
 // sigsetAllExiting is used by sigblock(true) when a thread is
-// exiting. sigset_all is defined in OS specific code, and per GOOS
-// behavior may override this default for sigsetAllExiting: see
-// osinit().
-var sigsetAllExiting = sigset_all
+// exiting.
+var sigsetAllExiting = func() sigset {
+	res := sigset_all
+
+	// Apply GOOS-specific overrides here, rather than in osinit,
+	// because osinit may be called before sigsetAllExiting is
+	// initialized (#51913).
+	if GOOS == "linux" && iscgo {
+		// #42494 glibc and musl reserve some signals for
+		// internal use and require they not be blocked by
+		// the rest of a normal C runtime. When the go runtime
+		// blocks...unblocks signals, temporarily, the blocked
+		// interval of time is generally very short. As such,
+		// these expectations of *libc code are mostly met by
+		// the combined go+cgo system of threads. However,
+		// when go causes a thread to exit, via a return from
+		// mstart(), the combined runtime can deadlock if
+		// these signals are blocked. Thus, don't block these
+		// signals when exiting threads.
+		// - glibc: SIGCANCEL (32), SIGSETXID (33)
+		// - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
+		sigdelset(&res, 32)
+		sigdelset(&res, 33)
+		sigdelset(&res, 34)
+	}
+
+	return res
+}()
 
 // sigblock blocks signals in the current thread's signal mask.
 // This is used to block signals while setting up and tearing down g
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 8e0e39c..4b7960c 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -44,13 +44,13 @@
 func exceptiontramp()
 func firstcontinuetramp()
 func lastcontinuetramp()
+func sehtramp()
 func sigresume()
 
 func initExceptionHandler() {
 	stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
-	if _AddVectoredContinueHandler == nil || GOARCH == "386" {
-		// use SetUnhandledExceptionFilter for windows-386 or
-		// if VectoredContinueHandler is unavailable.
+	if GOARCH == "386" {
+		// use SetUnhandledExceptionFilter for windows-386.
 		// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
 		stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
 	} else {
@@ -262,6 +262,43 @@
 	return _EXCEPTION_CONTINUE_EXECUTION
 }
 
+// sehhandler is reached as part of the SEH chain.
+//
+// It is nosplit for the same reason as exceptionhandler.
+//
+//go:nosplit
+func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CONTEXT) int32 {
+	g0 := getg()
+	if g0 == nil || g0.m.curg == nil {
+		// No g available, nothing to do here.
+		return _EXCEPTION_CONTINUE_SEARCH_SEH
+	}
+	// The Windows SEH machinery will unwind the stack until it finds
+	// a frame with a handler for the exception or until the frame is
+	// outside the stack boundaries, in which case it will call the
+	// UnhandledExceptionFilter. Unfortunately, it doesn't know about
+	// the goroutine stack, so it will stop unwinding when it reaches the
+	// first frame not running in g0. As a result, neither non-Go exceptions
+	// handlers higher up the stack nor UnhandledExceptionFilter will be called.
+	//
+	// To work around this, manually unwind the stack until the top of the goroutine
+	// stack is reached, and then pass the control back to Windows.
+	gp := g0.m.curg
+	ctxt := dctxt.ctx()
+	var base, sp uintptr
+	for {
+		entry := stdcall3(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
+		if entry == 0 {
+			break
+		}
+		stdcall8(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
+		if sp < gp.stack.lo || gp.stack.hi <= sp {
+			break
+		}
+	}
+	return _EXCEPTION_CONTINUE_SEARCH_SEH
+}
+
 // It seems Windows searches ContinueHandler's list even
 // if ExceptionHandler returns EXCEPTION_CONTINUE_EXECUTION.
 // firstcontinuehandler will stop that search,
diff --git a/src/runtime/signal_windows_test.go b/src/runtime/signal_windows_test.go
index 431c372..9318ff9 100644
--- a/src/runtime/signal_windows_test.go
+++ b/src/runtime/signal_windows_test.go
@@ -114,7 +114,13 @@
 	if err != nil {
 		t.Fatalf("failure while running executable: %s\n%s", err, out)
 	}
-	expectedOutput := "exceptionCount: 1\ncontinueCount: 1\n"
+	var expectedOutput string
+	if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" {
+		// TODO: remove when windows/arm64 and windows/arm support SEH stack unwinding.
+		expectedOutput = "exceptionCount: 1\ncontinueCount: 1\nunhandledCount: 0\n"
+	} else {
+		expectedOutput = "exceptionCount: 1\ncontinueCount: 1\nunhandledCount: 1\n"
+	}
 	// cleaning output
 	cleanedOut := strings.ReplaceAll(string(out), "\r\n", "\n")
 	if cleanedOut != expectedOutput {
diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go
index fb91954..aa8caaa 100644
--- a/src/runtime/sizeof_test.go
+++ b/src/runtime/sizeof_test.go
@@ -5,6 +5,7 @@
 package runtime_test
 
 import (
+	"internal/goexperiment"
 	"reflect"
 	"runtime"
 	"testing"
@@ -16,13 +17,18 @@
 func TestSizeof(t *testing.T) {
 	const _64bit = unsafe.Sizeof(uintptr(0)) == 8
 
+	g32bit := uintptr(256)
+	if goexperiment.ExecTracer2 {
+		g32bit = uintptr(260)
+	}
+
 	var tests = []struct {
 		val    any     // type as a value
 		_32bit uintptr // size on 32bit platforms
 		_64bit uintptr // size on 64bit platforms
 	}{
-		{runtime.G{}, 252, 408},   // g, but exported for testing
-		{runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
+		{runtime.G{}, g32bit, 424}, // g, but exported for testing
+		{runtime.Sudog{}, 56, 88},  // sudog, but exported for testing
 	}
 
 	for _, tt := range tests {
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 228697a..eb628bb 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -64,7 +64,11 @@
 		if copymem > 0 && writeBarrier.enabled {
 			// Only shade the pointers in old.array since we know the destination slice to
 			// only contains nil pointers because it has been cleared during alloc.
-			bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem)
+			//
+			// It's safe to pass a type to this function as an optimization because
+			// from and to only ever refer to memory representing whole values of
+			// type et. See the comment on bulkBarrierPreWrite.
+			bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et)
 		}
 	}
 
@@ -117,12 +121,6 @@
 	return makeslice(et, len, cap)
 }
 
-// This is a wrapper over runtime/internal/math.MulUintptr,
-// so the compiler can recognize and treat it as an intrinsic.
-func mulUintptr(a, b uintptr) (uintptr, bool) {
-	return math.MulUintptr(a, b)
-}
-
 // growslice allocates new backing store for a slice.
 //
 // arguments:
@@ -177,30 +175,7 @@
 		return slice{unsafe.Pointer(&zerobase), newLen, newLen}
 	}
 
-	newcap := oldCap
-	doublecap := newcap + newcap
-	if newLen > doublecap {
-		newcap = newLen
-	} else {
-		const threshold = 256
-		if oldCap < threshold {
-			newcap = doublecap
-		} else {
-			// Check 0 < newcap to detect overflow
-			// and prevent an infinite loop.
-			for 0 < newcap && newcap < newLen {
-				// Transition from growing 2x for small slices
-				// to growing 1.25x for large slices. This formula
-				// gives a smooth-ish transition between the two.
-				newcap += (newcap + 3*threshold) / 4
-			}
-			// Set newcap to the requested cap when
-			// the newcap calculation overflowed.
-			if newcap <= 0 {
-				newcap = newLen
-			}
-		}
-	}
+	newcap := nextslicecap(newLen, oldCap)
 
 	var overflow bool
 	var lenmem, newlenmem, capmem uintptr
@@ -208,17 +183,18 @@
 	// For 1 we don't need any division/multiplication.
 	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
 	// For powers of 2, use a variable shift.
+	noscan := et.PtrBytes == 0
 	switch {
 	case et.Size_ == 1:
 		lenmem = uintptr(oldLen)
 		newlenmem = uintptr(newLen)
-		capmem = roundupsize(uintptr(newcap))
+		capmem = roundupsize(uintptr(newcap), noscan)
 		overflow = uintptr(newcap) > maxAlloc
 		newcap = int(capmem)
 	case et.Size_ == goarch.PtrSize:
 		lenmem = uintptr(oldLen) * goarch.PtrSize
 		newlenmem = uintptr(newLen) * goarch.PtrSize
-		capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
+		capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
 		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
 		newcap = int(capmem / goarch.PtrSize)
 	case isPowerOfTwo(et.Size_):
@@ -231,7 +207,7 @@
 		}
 		lenmem = uintptr(oldLen) << shift
 		newlenmem = uintptr(newLen) << shift
-		capmem = roundupsize(uintptr(newcap) << shift)
+		capmem = roundupsize(uintptr(newcap)<<shift, noscan)
 		overflow = uintptr(newcap) > (maxAlloc >> shift)
 		newcap = int(capmem >> shift)
 		capmem = uintptr(newcap) << shift
@@ -239,7 +215,7 @@
 		lenmem = uintptr(oldLen) * et.Size_
 		newlenmem = uintptr(newLen) * et.Size_
 		capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
-		capmem = roundupsize(capmem)
+		capmem = roundupsize(capmem, noscan)
 		newcap = int(capmem / et.Size_)
 		capmem = uintptr(newcap) * et.Size_
 	}
@@ -275,7 +251,11 @@
 		if lenmem > 0 && writeBarrier.enabled {
 			// Only shade the pointers in oldPtr since we know the destination slice p
 			// only contains nil pointers because it has been cleared during alloc.
-			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes)
+			//
+			// It's safe to pass a type to this function as an optimization because
+			// from and to only ever refer to memory representing whole values of
+			// type et. See the comment on bulkBarrierPreWrite.
+			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et)
 		}
 	}
 	memmove(p, oldPtr, lenmem)
@@ -283,6 +263,41 @@
 	return slice{p, newLen, newcap}
 }
 
+// nextslicecap computes the next appropriate slice length.
+func nextslicecap(newLen, oldCap int) int {
+	newcap := oldCap
+	doublecap := newcap + newcap
+	if newLen > doublecap {
+		return newLen
+	}
+
+	const threshold = 256
+	if oldCap < threshold {
+		return doublecap
+	}
+	for {
+		// Transition from growing 2x for small slices
+		// to growing 1.25x for large slices. This formula
+		// gives a smooth-ish transition between the two.
+		newcap += (newcap + 3*threshold) >> 2
+
+		// We need to check `newcap >= newLen` and whether `newcap` overflowed.
+		// newLen is guaranteed to be larger than zero, hence
+		// when newcap overflows then `uint(newcap) > uint(newLen)`.
+		// This allows to check for both with the same comparison.
+		if uint(newcap) >= uint(newLen) {
+			break
+		}
+	}
+
+	// Set newcap to the requested cap when
+	// the newcap calculation overflowed.
+	if newcap <= 0 {
+		return newLen
+	}
+	return newcap
+}
+
 //go:linkname reflect_growslice reflect.growslice
 func reflect_growslice(et *_type, old slice, num int) slice {
 	// Semantically equivalent to slices.Grow, except that the caller
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 45d66da..61cd0a0 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -555,7 +555,6 @@
 type adjustinfo struct {
 	old   stack
 	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
-	cache pcvalueCache
 
 	// sghi is the highest sudog.elem on the stack.
 	sghi uintptr
@@ -676,7 +675,7 @@
 		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
 	}
 
-	locals, args, objs := frame.getStackMap(&adjinfo.cache, true)
+	locals, args, objs := frame.getStackMap(true)
 
 	// Adjust local variables if stack frame has been allocated.
 	if locals.n > 0 {
@@ -763,10 +762,7 @@
 	for d := gp._defer; d != nil; d = d.link {
 		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
 		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
-		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
 		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
-		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
-		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
 	}
 }
 
diff --git a/src/runtime/stkframe.go b/src/runtime/stkframe.go
index 5caacba..becb729 100644
--- a/src/runtime/stkframe.go
+++ b/src/runtime/stkframe.go
@@ -143,7 +143,7 @@
 		if !retValid {
 			// argMap.n includes the results, but
 			// those aren't valid, so drop them.
-			n := int32((uintptr(mv.argLen) &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
+			n := int32((mv.argLen &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
 			if n < argMap.n {
 				argMap.n = n
 			}
@@ -154,7 +154,7 @@
 
 // getStackMap returns the locals and arguments live pointer maps, and
 // stack object list for frame.
-func (frame *stkframe) getStackMap(cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
+func (frame *stkframe) getStackMap(debug bool) (locals, args bitvector, objs []stackObjectRecord) {
 	targetpc := frame.continpc
 	if targetpc == 0 {
 		// Frame is dead. Return empty bitvectors.
@@ -169,7 +169,7 @@
 		// the first instruction of the function changes the
 		// stack map.
 		targetpc--
-		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, targetpc, cache)
+		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, targetpc)
 	}
 	if pcdata == -1 {
 		// We do not have a valid pcdata value but there might be a
@@ -234,7 +234,7 @@
 	}
 
 	// stack objects.
-	if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") &&
+	if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") &&
 		unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect {
 		// For reflect.makeFuncStub and reflect.methodValueCall,
 		// we need to fake the stack object record.
diff --git a/src/runtime/string.go b/src/runtime/string.go
index 7ac3e66..e01b7fc 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -270,7 +270,7 @@
 
 // rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
 func rawbyteslice(size int) (b []byte) {
-	cap := roundupsize(uintptr(size))
+	cap := roundupsize(uintptr(size), true)
 	p := mallocgc(cap, nil, false)
 	if cap != uintptr(size) {
 		memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size))
@@ -285,7 +285,7 @@
 	if uintptr(size) > maxAlloc/4 {
 		throw("out of memory")
 	}
-	mem := roundupsize(uintptr(size) * 4)
+	mem := roundupsize(uintptr(size)*4, true)
 	p := mallocgc(mem, nil, false)
 	if mem != uintptr(size)*4 {
 		memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4)
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 65b7299..34984d8 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -6,8 +6,6 @@
 
 import (
 	"internal/abi"
-	"internal/goarch"
-	"runtime/internal/math"
 	"unsafe"
 )
 
@@ -120,91 +118,6 @@
 // exported value for testing
 const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
 
-//go:nosplit
-func fastrand() uint32 {
-	mp := getg().m
-	// Implement wyrand: https://github.com/wangyi-fudan/wyhash
-	// Only the platform that math.Mul64 can be lowered
-	// by the compiler should be in this list.
-	if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
-		goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
-		goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
-		mp.fastrand += 0xa0761d6478bd642f
-		hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
-		return uint32(hi ^ lo)
-	}
-
-	// Implement xorshift64+: 2 32-bit xorshift sequences added together.
-	// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
-	// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-	// This generator passes the SmallCrush suite, part of TestU01 framework:
-	// http://simul.iro.umontreal.ca/testu01/tu01.html
-	t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand))
-	s1, s0 := t[0], t[1]
-	s1 ^= s1 << 17
-	s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
-	t[0], t[1] = s0, s1
-	return s0 + s1
-}
-
-//go:nosplit
-func fastrandn(n uint32) uint32 {
-	// This is similar to fastrand() % n, but faster.
-	// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
-	return uint32(uint64(fastrand()) * uint64(n) >> 32)
-}
-
-func fastrand64() uint64 {
-	mp := getg().m
-	// Implement wyrand: https://github.com/wangyi-fudan/wyhash
-	// Only the platform that math.Mul64 can be lowered
-	// by the compiler should be in this list.
-	if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
-		goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
-		goarch.IsS390x|goarch.IsRiscv64 == 1 {
-		mp.fastrand += 0xa0761d6478bd642f
-		hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
-		return hi ^ lo
-	}
-
-	// Implement xorshift64+: 2 32-bit xorshift sequences added together.
-	// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-	// This generator passes the SmallCrush suite, part of TestU01 framework:
-	// http://simul.iro.umontreal.ca/testu01/tu01.html
-	t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand))
-	s1, s0 := t[0], t[1]
-	s1 ^= s1 << 17
-	s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
-	r := uint64(s0 + s1)
-
-	s0, s1 = s1, s0
-	s1 ^= s1 << 17
-	s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
-	r += uint64(s0+s1) << 32
-
-	t[0], t[1] = s0, s1
-	return r
-}
-
-func fastrandu() uint {
-	if goarch.PtrSize == 4 {
-		return uint(fastrand())
-	}
-	return uint(fastrand64())
-}
-
-//go:linkname rand_fastrand64 math/rand.fastrand64
-func rand_fastrand64() uint64 { return fastrand64() }
-
-//go:linkname sync_fastrandn sync.fastrandn
-func sync_fastrandn(n uint32) uint32 { return fastrandn(n) }
-
-//go:linkname net_fastrandu net.fastrandu
-func net_fastrandu() uint { return fastrandu() }
-
-//go:linkname os_fastrand os.fastrand
-func os_fastrand() uint32 { return fastrand() }
-
 // in internal/bytealg/equal_*.s
 //
 //go:noescape
@@ -371,6 +284,11 @@
 //
 // The compiler rewrites calls to this function into instructions that fetch the
 // pointer from a well-known register (DX on x86 architecture, etc.) directly.
+//
+// WARNING: PGO-based devirtualization cannot detect that caller of
+// getclosureptr require closure context, and thus must maintain a list of
+// these functions, which is in
+// cmd/compile/internal/devirtualize/pgo.maybeDevirtualizeFunctionCall.
 func getclosureptr() uintptr
 
 //go:noescape
@@ -421,11 +339,15 @@
 func systemstack_switch()
 
 // alignUp rounds n up to a multiple of a. a must be a power of 2.
+//
+//go:nosplit
 func alignUp(n, a uintptr) uintptr {
 	return (n + a - 1) &^ (a - 1)
 }
 
 // alignDown rounds n down to a multiple of a. a must be a power of 2.
+//
+//go:nosplit
 func alignDown(n, a uintptr) uintptr {
 	return n &^ (a - 1)
 }
@@ -446,7 +368,7 @@
 func bool2int(x bool) int {
 	// Avoid branches. In the SSA compiler, this compiles to
 	// exactly what you would want it to.
-	return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
+	return int(*(*uint8)(unsafe.Pointer(&x)))
 }
 
 // abort crashes the runtime in situations where even throw might not
diff --git a/src/runtime/stubs_amd64.go b/src/runtime/stubs_amd64.go
index a86a496..6d0b113 100644
--- a/src/runtime/stubs_amd64.go
+++ b/src/runtime/stubs_amd64.go
@@ -41,6 +41,9 @@
 //go:noescape
 func asmcgocall_no_g(fn, arg unsafe.Pointer)
 
+//go:systemstack
+func asmcgocall_landingpad()
+
 // Used by reflectcall and the reflect package.
 //
 // Spills/loads arguments in registers to/from an internal/abi.RegArgs
diff --git a/src/runtime/stubs_loong64.go b/src/runtime/stubs_loong64.go
index 556983c..4576089 100644
--- a/src/runtime/stubs_loong64.go
+++ b/src/runtime/stubs_loong64.go
@@ -10,6 +10,13 @@
 func load_g()
 func save_g()
 
+// Used by reflectcall and the reflect package.
+//
+// Spills/loads arguments in registers to/from an internal/abi.RegArgs
+// respectively. Does not follow the Go ABI.
+func spillArgs()
+func unspillArgs()
+
 // getfp returns the frame pointer register of its caller or 0 if not implemented.
 // TODO: Make this a compiler intrinsic
 func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_ppc64x.go b/src/runtime/stubs_ppc64x.go
index 0b7771e..36b01a7 100644
--- a/src/runtime/stubs_ppc64x.go
+++ b/src/runtime/stubs_ppc64x.go
@@ -6,11 +6,16 @@
 
 package runtime
 
+import "unsafe"
+
 // Called from assembly only; declared for go vet.
 func load_g()
 func save_g()
 func reginit()
 
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
+
 // Spills/loads arguments in registers to/from an internal/abi.RegArgs
 // respectively. Does not follow the Go ABI.
 func spillArgs()
diff --git a/src/runtime/stubs_riscv64.go b/src/runtime/stubs_riscv64.go
index b07d7f8..61a6e33 100644
--- a/src/runtime/stubs_riscv64.go
+++ b/src/runtime/stubs_riscv64.go
@@ -4,10 +4,15 @@
 
 package runtime
 
+import "unsafe"
+
 // Called from assembly only; declared for go vet.
 func load_g()
 func save_g()
 
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
+
 // Used by reflectcall and the reflect package.
 //
 // Spills/loads arguments in registers to/from an internal/abi.RegArgs
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index b47f2d8..edf800f 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -13,7 +13,7 @@
 )
 
 // Frames may be used to get function/file/line information for a
-// slice of PC values returned by Callers.
+// slice of PC values returned by [Callers].
 type Frames struct {
 	// callers is a slice of PCs that have not yet been expanded to frames.
 	callers []uintptr
@@ -23,7 +23,7 @@
 	frameStore [2]Frame
 }
 
-// Frame is the information returned by Frames for each call frame.
+// Frame is the information returned by [Frames] for each call frame.
 type Frame struct {
 	// PC is the program counter for the location in this frame.
 	// For a frame that calls another frame, this will be the
@@ -70,24 +70,24 @@
 	funcInfo funcInfo
 }
 
-// CallersFrames takes a slice of PC values returned by Callers and
+// CallersFrames takes a slice of PC values returned by [Callers] and
 // prepares to return function/file/line information.
-// Do not change the slice until you are done with the Frames.
+// Do not change the slice until you are done with the [Frames].
 func CallersFrames(callers []uintptr) *Frames {
 	f := &Frames{callers: callers}
 	f.frames = f.frameStore[:0]
 	return f
 }
 
-// Next returns a Frame representing the next call frame in the slice
+// Next returns a [Frame] representing the next call frame in the slice
 // of PC values. If it has already returned all call frames, Next
-// returns a zero Frame.
+// returns a zero [Frame].
 //
 // The more result indicates whether the next call to Next will return
-// a valid Frame. It does not necessarily indicate whether this call
+// a valid [Frame]. It does not necessarily indicate whether this call
 // returned one.
 //
-// See the Frames example for idiomatic usage.
+// See the [Frames] example for idiomatic usage.
 func (ci *Frames) Next() (frame Frame, more bool) {
 	for len(ci.frames) < 2 {
 		// Find the next frame.
@@ -119,7 +119,7 @@
 		}
 		// It's important that interpret pc non-strictly as cgoTraceback may
 		// have added bogus PCs with a valid funcInfo but invalid PCDATA.
-		u, uf := newInlineUnwinder(funcInfo, pc, nil)
+		u, uf := newInlineUnwinder(funcInfo, pc)
 		sf := u.srcFunc(uf)
 		if u.isInlined(uf) {
 			// Note: entry is not modified. It always refers to a real frame, not an inlined one.
@@ -180,7 +180,7 @@
 	if !f.funcInfo.valid() {
 		return f.Function
 	}
-	u, uf := newInlineUnwinder(f.funcInfo, f.PC, nil)
+	u, uf := newInlineUnwinder(f.funcInfo, f.PC)
 	sf := u.srcFunc(uf)
 	return sf.name()
 }
@@ -204,8 +204,7 @@
 		return stk
 	}
 
-	var cache pcvalueCache
-	u, uf := newInlineUnwinder(f, tracepc, &cache)
+	u, uf := newInlineUnwinder(f, tracepc)
 	if !u.isInlined(uf) {
 		// Nothing inline at tracepc.
 		return stk
@@ -623,7 +622,7 @@
 			}
 			end := sect.baseaddr + (sect.end - sect.vaddr)
 			// For the last section, include the end address (etext), as it is included in the functab.
-			if i == len(md.textsectmap) {
+			if i == len(md.textsectmap)-1 {
 				end++
 			}
 			if pc < end {
@@ -643,7 +642,7 @@
 	return gostringnocopy(&md.funcnametab[nameOff])
 }
 
-// FuncForPC returns a *Func describing the function that contains the
+// FuncForPC returns a *[Func] describing the function that contains the
 // given program counter address, or else nil.
 //
 // If pc represents multiple functions because of inlining, it returns
@@ -658,7 +657,7 @@
 	// We just report the preceding function in that situation. See issue 29735.
 	// TODO: Perhaps we should report no function at all in that case.
 	// The runtime currently doesn't have function end info, alas.
-	u, uf := newInlineUnwinder(f, pc, nil)
+	u, uf := newInlineUnwinder(f, pc)
 	if !u.isInlined(uf) {
 		return f._Func()
 	}
@@ -823,14 +822,16 @@
 
 type pcvalueCache struct {
 	entries [2][8]pcvalueCacheEnt
+	inUse   int
 }
 
 type pcvalueCacheEnt struct {
 	// targetpc and off together are the key of this cache entry.
 	targetpc uintptr
 	off      uint32
-	// val is the value of this cached pcvalue entry.
-	val int32
+
+	val   int32   // The value of this entry.
+	valPC uintptr // The PC at which val starts
 }
 
 // pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
@@ -842,31 +843,56 @@
 }
 
 // Returns the PCData value, and the PC where this value starts.
-// TODO: the start PC is returned only when cache is nil.
-func pcvalue(f funcInfo, off uint32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) {
+func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uintptr) {
+	// If true, when we get a cache hit, still look up the data and make sure it
+	// matches the cached contents.
+	const debugCheckCache = false
+
 	if off == 0 {
 		return -1, 0
 	}
 
 	// Check the cache. This speeds up walks of deep stacks, which
-	// tend to have the same recursive functions over and over.
-	//
-	// This cache is small enough that full associativity is
-	// cheaper than doing the hashing for a less associative
-	// cache.
-	if cache != nil {
-		x := pcvalueCacheKey(targetpc)
-		for i := range cache.entries[x] {
-			// We check off first because we're more
-			// likely to have multiple entries with
-			// different offsets for the same targetpc
-			// than the other way around, so we'll usually
-			// fail in the first clause.
-			ent := &cache.entries[x][i]
-			if ent.off == off && ent.targetpc == targetpc {
-				return ent.val, 0
+	// tend to have the same recursive functions over and over,
+	// or repetitive stacks between goroutines.
+	var checkVal int32
+	var checkPC uintptr
+	ck := pcvalueCacheKey(targetpc)
+	{
+		mp := acquirem()
+		cache := &mp.pcvalueCache
+		// The cache can be used by the signal handler on this M. Avoid
+		// re-entrant use of the cache. The signal handler can also write inUse,
+		// but will always restore its value, so we can use a regular increment
+		// even if we get signaled in the middle of it.
+		cache.inUse++
+		if cache.inUse == 1 {
+			for i := range cache.entries[ck] {
+				// We check off first because we're more
+				// likely to have multiple entries with
+				// different offsets for the same targetpc
+				// than the other way around, so we'll usually
+				// fail in the first clause.
+				ent := &cache.entries[ck][i]
+				if ent.off == off && ent.targetpc == targetpc {
+					val, pc := ent.val, ent.valPC
+					if debugCheckCache {
+						checkVal, checkPC = ent.val, ent.valPC
+						break
+					} else {
+						cache.inUse--
+						releasem(mp)
+						return val, pc
+					}
+				}
 			}
+		} else if debugCheckCache && (cache.inUse < 1 || cache.inUse > 2) {
+			// Catch accounting errors or deeply reentrant use. In principle
+			// "inUse" should never exceed 2.
+			throw("cache.inUse out of range")
 		}
+		cache.inUse--
+		releasem(mp)
 	}
 
 	if !f.valid() {
@@ -894,16 +920,28 @@
 			// larger than the cache.
 			// Put the new element at the beginning,
 			// since it is the most likely to be newly used.
-			if cache != nil {
-				x := pcvalueCacheKey(targetpc)
-				e := &cache.entries[x]
-				ci := fastrandn(uint32(len(cache.entries[x])))
-				e[ci] = e[0]
-				e[0] = pcvalueCacheEnt{
-					targetpc: targetpc,
-					off:      off,
-					val:      val,
+			if debugCheckCache && checkPC != 0 {
+				if checkVal != val || checkPC != prevpc {
+					print("runtime: table value ", val, "@", prevpc, " != cache value ", checkVal, "@", checkPC, " at PC ", targetpc, " off ", off, "\n")
+					throw("bad pcvalue cache")
 				}
+			} else {
+				mp := acquirem()
+				cache := &mp.pcvalueCache
+				cache.inUse++
+				if cache.inUse == 1 {
+					e := &cache.entries[ck]
+					ci := cheaprandn(uint32(len(cache.entries[ck])))
+					e[ci] = e[0]
+					e[0] = pcvalueCacheEnt{
+						targetpc: targetpc,
+						off:      off,
+						val:      val,
+						valPC:    prevpc,
+					}
+				}
+				cache.inUse--
+				releasem(mp)
 			}
 
 			return val, prevpc
@@ -976,8 +1014,8 @@
 	if !f.valid() {
 		return "?", 0
 	}
-	fileno, _ := pcvalue(f, f.pcfile, targetpc, nil, strict)
-	line, _ = pcvalue(f, f.pcln, targetpc, nil, strict)
+	fileno, _ := pcvalue(f, f.pcfile, targetpc, strict)
+	line, _ = pcvalue(f, f.pcln, targetpc, strict)
 	if fileno == -1 || line == -1 || int(fileno) >= len(datap.filetab) {
 		// print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
 		return "?", 0
@@ -990,8 +1028,8 @@
 	return funcline1(f, targetpc, true)
 }
 
-func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
-	x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
+func funcspdelta(f funcInfo, targetpc uintptr) int32 {
+	x, _ := pcvalue(f, f.pcsp, targetpc, true)
 	if debugPcln && x&(goarch.PtrSize-1) != 0 {
 		print("invalid spdelta ", funcname(f), " ", hex(f.entry()), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
 		throw("bad spdelta")
@@ -1005,16 +1043,14 @@
 	p := datap.pctab[f.pcsp:]
 	pc := f.entry()
 	val := int32(-1)
-	max := int32(0)
+	most := int32(0)
 	for {
 		var ok bool
 		p, ok = step(p, &pc, &val, pc == f.entry())
 		if !ok {
-			return max
+			return most
 		}
-		if val > max {
-			max = val
-		}
+		most = max(most, val)
 	}
 }
 
@@ -1022,29 +1058,28 @@
 	return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
 }
 
-func pcdatavalue(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache) int32 {
+func pcdatavalue(f funcInfo, table uint32, targetpc uintptr) int32 {
 	if table >= f.npcdata {
 		return -1
 	}
-	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, true)
+	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, true)
 	return r
 }
 
-func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
+func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, strict bool) int32 {
 	if table >= f.npcdata {
 		return -1
 	}
-	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, strict)
+	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, strict)
 	return r
 }
 
 // Like pcdatavalue, but also return the start PC of this PCData value.
-// It doesn't take a cache.
 func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) {
 	if table >= f.npcdata {
 		return -1, 0
 	}
-	return pcvalue(f, pcdatastart(f, table), targetpc, nil, true)
+	return pcvalue(f, pcdatastart(f, table), targetpc, true)
 }
 
 // funcdata returns a pointer to the ith funcdata for f.
diff --git a/src/runtime/symtabinl.go b/src/runtime/symtabinl.go
index 2bb1c4b..9273b49 100644
--- a/src/runtime/symtabinl.go
+++ b/src/runtime/symtabinl.go
@@ -30,7 +30,6 @@
 // code.
 type inlineUnwinder struct {
 	f       funcInfo
-	cache   *pcvalueCache
 	inlTree *[1 << 20]inlinedCall
 }
 
@@ -52,13 +51,13 @@
 // This unwinder uses non-strict handling of PC because it's assumed this is
 // only ever used for symbolic debugging. If things go really wrong, it'll just
 // fall back to the outermost frame.
-func newInlineUnwinder(f funcInfo, pc uintptr, cache *pcvalueCache) (inlineUnwinder, inlineFrame) {
+func newInlineUnwinder(f funcInfo, pc uintptr) (inlineUnwinder, inlineFrame) {
 	inldata := funcdata(f, abi.FUNCDATA_InlTree)
 	if inldata == nil {
 		return inlineUnwinder{f: f}, inlineFrame{pc: pc, index: -1}
 	}
 	inlTree := (*[1 << 20]inlinedCall)(inldata)
-	u := inlineUnwinder{f: f, cache: cache, inlTree: inlTree}
+	u := inlineUnwinder{f: f, inlTree: inlTree}
 	return u, u.resolveInternal(pc)
 }
 
@@ -67,7 +66,7 @@
 		pc: pc,
 		// Conveniently, this returns -1 if there's an error, which is the same
 		// value we use for the outermost frame.
-		index: pcdatavalue1(u.f, abi.PCDATA_InlTreeIndex, pc, u.cache, false),
+		index: pcdatavalue1(u.f, abi.PCDATA_InlTreeIndex, pc, false),
 	}
 }
 
diff --git a/src/runtime/symtabinl_test.go b/src/runtime/symtabinl_test.go
index 9e75f79..df524ae 100644
--- a/src/runtime/symtabinl_test.go
+++ b/src/runtime/symtabinl_test.go
@@ -34,10 +34,9 @@
 
 	// Iterate over the PCs in tiuTest and walk the inline stack for each.
 	prevStack := "x"
-	var cache pcvalueCache
 	for pc := pc1; pc < pc1+1024 && findfunc(pc) == f; pc += sys.PCQuantum {
 		stack := ""
-		u, uf := newInlineUnwinder(f, pc, &cache)
+		u, uf := newInlineUnwinder(f, pc)
 		if file, _ := u.fileLine(uf); file == "?" {
 			// We're probably in the trailing function padding, where findfunc
 			// still returns f but there's no symbolic information. Just keep
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index fa9a2fb..45175d8 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -549,6 +549,58 @@
 }
 func issetugid_trampoline()
 
+// mach_vm_region is used to obtain virtual memory mappings for use by the
+// profiling system and is only exported to runtime/pprof. It is restricted
+// to obtaining mappings for the current process.
+//
+//go:linkname mach_vm_region runtime/pprof.mach_vm_region
+func mach_vm_region(address, region_size *uint64, info unsafe.Pointer) int32 {
+	// kern_return_t mach_vm_region(
+	// 	vm_map_read_t target_task,
+	// 	mach_vm_address_t *address,
+	// 	mach_vm_size_t *size,
+	// 	vm_region_flavor_t flavor,
+	// 	vm_region_info_t info,
+	// 	mach_msg_type_number_t *infoCnt,
+	// 	mach_port_t *object_name);
+	var count machMsgTypeNumber = _VM_REGION_BASIC_INFO_COUNT_64
+	var object_name machPort
+	args := struct {
+		address     *uint64
+		size        *uint64
+		flavor      machVMRegionFlavour
+		info        unsafe.Pointer
+		count       *machMsgTypeNumber
+		object_name *machPort
+	}{
+		address:     address,
+		size:        region_size,
+		flavor:      _VM_REGION_BASIC_INFO_64,
+		info:        info,
+		count:       &count,
+		object_name: &object_name,
+	}
+	return libcCall(unsafe.Pointer(abi.FuncPCABI0(mach_vm_region_trampoline)), unsafe.Pointer(&args))
+}
+func mach_vm_region_trampoline()
+
+//go:linkname proc_regionfilename runtime/pprof.proc_regionfilename
+func proc_regionfilename(pid int, address uint64, buf *byte, buflen int64) int32 {
+	args := struct {
+		pid     int
+		address uint64
+		buf     *byte
+		bufSize int64
+	}{
+		pid:     pid,
+		address: address,
+		buf:     buf,
+		bufSize: buflen,
+	}
+	return libcCall(unsafe.Pointer(abi.FuncPCABI0(proc_regionfilename_trampoline)), unsafe.Pointer(&args))
+}
+func proc_regionfilename_trampoline()
+
 // Tell the linker that the libc_* functions are to be found
 // in a system library, with the libc_ prefix missing.
 
@@ -574,6 +626,9 @@
 //go:cgo_import_dynamic libc_error __error "/usr/lib/libSystem.B.dylib"
 //go:cgo_import_dynamic libc_usleep usleep "/usr/lib/libSystem.B.dylib"
 
+//go:cgo_import_dynamic libc_proc_regionfilename proc_regionfilename "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_mach_task_self_ mach_task_self_ "/usr/lib/libSystem.B.dylib""
+//go:cgo_import_dynamic libc_mach_vm_region mach_vm_region "/usr/lib/libSystem.B.dylib""
 //go:cgo_import_dynamic libc_mach_timebase_info mach_timebase_info "/usr/lib/libSystem.B.dylib"
 //go:cgo_import_dynamic libc_mach_absolute_time mach_absolute_time "/usr/lib/libSystem.B.dylib"
 //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib"
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 8e8ad9c..01992d5 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -796,3 +796,25 @@
 TEXT runtime·issetugid_trampoline(SB),NOSPLIT,$0
 	CALL	libc_issetugid(SB)
 	RET
+
+// mach_vm_region_trampoline calls mach_vm_region from libc.
+TEXT runtime·mach_vm_region_trampoline(SB),NOSPLIT,$0
+	MOVQ	0(DI), SI // address
+	MOVQ	8(DI), DX // size
+	MOVL	16(DI), CX // flavor
+	MOVQ	24(DI), R8 // info
+	MOVQ	32(DI), R9 // count
+	MOVQ	40(DI), R10 // object_name
+	MOVQ	$libc_mach_task_self_(SB), DI
+	MOVL	0(DI), DI
+	CALL	libc_mach_vm_region(SB)
+	RET
+
+// proc_regionfilename_trampoline calls proc_regionfilename.
+TEXT runtime·proc_regionfilename_trampoline(SB),NOSPLIT,$0
+	MOVQ	8(DI), SI // address
+	MOVQ	16(DI), DX // buffer
+	MOVQ	24(DI), CX // buffer_size
+	MOVQ	0(DI), DI // pid
+	CALL	libc_proc_regionfilename(SB)
+	RET
diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s
index dc6caf8..32d1f95 100644
--- a/src/runtime/sys_darwin_arm64.s
+++ b/src/runtime/sys_darwin_arm64.s
@@ -767,3 +767,26 @@
 TEXT runtime·issetugid_trampoline(SB),NOSPLIT,$0
 	BL	libc_issetugid(SB)
 	RET
+
+// mach_vm_region_trampoline calls mach_vm_region from libc.
+TEXT runtime·mach_vm_region_trampoline(SB),NOSPLIT,$0
+	MOVD	0(R0), R1	// address
+	MOVD	8(R0), R2	// size
+	MOVW	16(R0), R3	// flavor
+	MOVD	24(R0), R4	// info
+	MOVD	32(R0), R5	// count
+	MOVD	40(R0), R6  // object_name
+	MOVD	$libc_mach_task_self_(SB), R0
+	MOVW	0(R0), R0
+	BL	libc_mach_vm_region(SB)
+	RET
+
+// proc_regionfilename_trampoline calls proc_regionfilename for
+// the current process.
+TEXT runtime·proc_regionfilename_trampoline(SB),NOSPLIT,$0
+	MOVD	8(R0), R1	// address
+	MOVD	16(R0), R2	// buffer
+	MOVD	24(R0), R3	// buffer_size
+	MOVD	0(R0), R0 // pid
+	BL	libc_proc_regionfilename(SB)
+	RET
diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s
index 12e5455..eba8e1f 100644
--- a/src/runtime/sys_linux_loong64.s
+++ b/src/runtime/sys_linux_loong64.s
@@ -461,8 +461,8 @@
 TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
 	JMP	runtime·sigtramp(SB)
 
-// func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
-TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0
+// func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
+TEXT runtime·sysMmap(SB),NOSPLIT|NOFRAME,$0
 	MOVV	addr+0(FP), R4
 	MOVV	n+8(FP), R5
 	MOVW	prot+16(FP), R6
@@ -483,8 +483,25 @@
 	MOVV	$0, err+40(FP)
 	RET
 
-// func munmap(addr unsafe.Pointer, n uintptr)
-TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0
+// Call the function stored in _cgo_mmap using the GCC calling convention.
+// This must be called on the system stack.
+// func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
+TEXT runtime·callCgoMmap(SB),NOSPLIT,$0
+	MOVV	addr+0(FP), R4
+	MOVV	n+8(FP), R5
+	MOVW	prot+16(FP), R6
+	MOVW	flags+20(FP), R7
+	MOVW	fd+24(FP), R8
+	MOVW	off+28(FP), R9
+	MOVV	_cgo_mmap(SB), R13
+	SUBV	$16, R3		// reserve 16 bytes for sp-8 where fp may be saved.
+	JAL	(R13)
+	ADDV	$16, R3
+	MOVV	R4, ret+32(FP)
+	RET
+
+// func sysMunmap(addr unsafe.Pointer, n uintptr)
+TEXT runtime·sysMunmap(SB),NOSPLIT|NOFRAME,$0
 	MOVV	addr+0(FP), R4
 	MOVV	n+8(FP), R5
 	MOVV	$SYS_munmap, R11
@@ -494,6 +511,18 @@
 	MOVV	R0, 0xf3(R0)	// crash
 	RET
 
+// Call the function stored in _cgo_munmap using the GCC calling convention.
+// This must be called on the system stack.
+// func callCgoMunmap(addr unsafe.Pointer, n uintptr)
+TEXT runtime·callCgoMunmap(SB),NOSPLIT,$0
+	MOVV	addr+0(FP), R4
+	MOVV	n+8(FP), R5
+	MOVV	_cgo_munmap(SB), R13
+	SUBV	$16, R3		// reserve 16 bytes for sp-8 where fp may be saved.
+	JAL	(R13)
+	ADDV	$16, R3
+	RET
+
 // func madvise(addr unsafe.Pointer, n uintptr, flags int32)
 TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0
 	MOVV	addr+0(FP), R4
diff --git a/src/runtime/sys_linux_riscv64.s b/src/runtime/sys_linux_riscv64.s
index d1558fd..ffec2b5 100644
--- a/src/runtime/sys_linux_riscv64.s
+++ b/src/runtime/sys_linux_riscv64.s
@@ -256,7 +256,7 @@
 	MOV	(g_sched+gobuf_sp)(T1), X2
 
 noswitch:
-	ADDI	$-24, X2 // Space for result
+	SUB	$24, X2 // Space for result
 	ANDI	$~7, X2 // Align for C code
 	MOV	$8(X2), A1
 
@@ -328,7 +328,7 @@
 	MOV	(g_sched+gobuf_sp)(T1), X2
 
 noswitch:
-	ADDI	$-24, X2 // Space for result
+	SUB	$24, X2 // Space for result
 	ANDI	$~7, X2 // Align for C code
 	MOV	$8(X2), A1
 
diff --git a/src/runtime/sys_openbsd_ppc64.s b/src/runtime/sys_openbsd_ppc64.s
new file mode 100644
index 0000000..a1c67c8
--- /dev/null
+++ b/src/runtime/sys_openbsd_ppc64.s
@@ -0,0 +1,655 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// System calls and other sys.stuff for ppc64, OpenBSD
+// System calls are implemented in libc/libpthread, this file
+// contains trampolines that convert from Go to C calling convention.
+// Some direct system call implementations currently remain.
+//
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+
+#define CLOCK_REALTIME	$0
+#define	CLOCK_MONOTONIC	$3
+
+// mstart_stub is the first function executed on a new thread started by pthread_create.
+// It just does some low-level setup and then calls mstart.
+// Note: called with the C calling convention.
+TEXT runtime·mstart_stub(SB),NOSPLIT,$32
+	// R3 points to the m.
+	// We are already on m's g0 stack.
+
+	// Go relies on R0 being $0.
+	XOR	R0, R0
+
+	// TODO(jsing): Save callee-save registers (R14-R31, F14-F31, V20-V31).
+
+	MOVD    m_g0(R3), g
+	BL	runtime·save_g(SB)
+
+	BL	runtime·mstart(SB)
+
+	// TODO(jsing): Restore callee-save registers (R14-R31, F14-F31, V20-V31).
+
+	// Go is all done with this OS thread.
+	// Tell pthread everything is ok (we never join with this thread, so
+	// the value here doesn't really matter).
+	MOVD	$0, R3
+
+	RET
+
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+	MOVW	sig+8(FP), R3
+	MOVD	info+16(FP), R4
+	MOVD	ctx+24(FP), R5
+	MOVD	fn+0(FP), R12
+	MOVD	R12, CTR
+	CALL	(CTR)			// Alignment for ELF ABI?
+	RET
+
+TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$16
+	// Go relies on R0 being $0 and we may have been executing non-Go code.
+	XOR	R0, R0
+
+	// TODO(jsing): Save callee-save registers (R2, R14-R31, F14-F31).
+	// in the case of signal forwarding.
+	// Please refer to https://golang.org/issue/31827 .
+
+	// If called from an external code context, g will not be set.
+	BL	runtime·load_g(SB)
+
+	BL	runtime·sigtrampgo<ABIInternal>(SB)
+
+	// TODO(jsing): Restore callee-save registers.
+
+	RET
+
+// These trampolines help convert from Go calling convention to C calling convention.
+// They should be called with asmcgocall.
+// A pointer to the arguments is passed in R3.
+// A single int32 result is returned in R3.
+// (For more results, make an args/results structure.)
+TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$32
+	MOVD	0(R3), R3		// arg 1 - attr
+	CALL	libc_pthread_attr_init(SB)
+	RET
+
+TEXT runtime·pthread_attr_destroy_trampoline(SB),NOSPLIT,$32
+	MOVD	0(R3), R3		// arg 1 - attr
+	CALL	libc_pthread_attr_destroy(SB)
+	RET
+
+TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - size
+	MOVD	0(R3), R3		// arg 1 - attr
+	CALL	libc_pthread_attr_getstacksize(SB)
+	RET
+
+TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - state
+	MOVD	0(R3), R3		// arg 1 - attr
+	CALL	libc_pthread_attr_setdetachstate(SB)
+	RET
+
+TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$32
+	MOVD	0(R3), R4		// arg 2 - attr
+	MOVD	8(R3), R5		// arg 3 - start
+	MOVD	16(R3), R6		// arg 4 - arg
+
+	MOVD	R1, R15
+	SUB	$64, R1
+	RLDCR	$0, R1, $~15, R1
+	MOVD	R1, R3			// arg 1 - &threadid (discard)
+	CALL	libc_pthread_create(SB)
+	MOVD	R15, R1
+
+	RET
+
+TEXT runtime·thrkill_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - signal (int64)
+	MOVD	$0, R5			// arg 3 - tcb
+	MOVW	0(R3), R3		// arg 1 - tid
+	CALL	libc_thrkill(SB)
+	RET
+
+TEXT runtime·thrsleep_trampoline(SB),NOSPLIT,$32
+	MOVW	8(R3), R4		// arg 2 - clock_id
+	MOVD	16(R3), R5		// arg 3 - abstime
+	MOVD	24(R3), R6		// arg 4 - lock
+	MOVD	32(R3), R7		// arg 5 - abort
+	MOVD	0(R3), R3		// arg 1 - id
+	CALL	libc_thrsleep(SB)
+	RET
+
+TEXT runtime·thrwakeup_trampoline(SB),NOSPLIT,$32
+	MOVW	8(R3), R4		// arg 2 - count
+	MOVD	0(R3), R3		// arg 1 - id
+	CALL	libc_thrwakeup(SB)
+	RET
+
+TEXT runtime·exit_trampoline(SB),NOSPLIT,$32
+	MOVW	0(R3), R3		// arg 1 - status
+	CALL	libc_exit(SB)
+	MOVD	$0, R3			// crash on failure
+	MOVD	R3, (R3)
+	RET
+
+TEXT runtime·getthrid_trampoline(SB),NOSPLIT,$32
+	MOVD	R3, R14			// pointer to args
+	CALL	libc_getthrid(SB)
+	MOVW	R3, 0(R14)		// return value
+	RET
+
+TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$32
+	MOVD	R3, R14			// pointer to args
+	CALL	libc_getpid(SB)		// arg 1 - pid
+	MOVW	0(R14), R4		// arg 2 - signal
+	CALL	libc_kill(SB)
+	RET
+
+TEXT runtime·sched_yield_trampoline(SB),NOSPLIT,$32
+	CALL	libc_sched_yield(SB)
+	RET
+
+TEXT runtime·mmap_trampoline(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+	MOVD	0(R14), R3		// arg 1 - addr
+	MOVD	8(R14), R4		// arg 2 - len
+	MOVW	16(R14), R5		// arg 3 - prot
+	MOVW	20(R14), R6		// arg 4 - flags
+	MOVW	24(R14), R7		// arg 5 - fid
+	MOVW	28(R14), R8		// arg 6 - offset
+	CALL	libc_mmap(SB)
+	MOVD	$0, R4
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R4		// errno
+	MOVD	$0, R3
+noerr:
+	MOVD	R3, 32(R14)
+	MOVD	R4, 40(R14)
+	RET
+
+TEXT runtime·munmap_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - len
+	MOVD	0(R3), R3		// arg 1 - addr
+	CALL	libc_munmap(SB)
+	CMP	R3, $-1
+	BNE	3(PC)
+	MOVD	$0, R3			// crash on failure
+	MOVD	R3, (R3)
+	RET
+
+TEXT runtime·madvise_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - len
+	MOVW	16(R3), R5		// arg 3 - advice
+	MOVD	0(R3), R3		// arg 1 - addr
+	CALL	libc_madvise(SB)
+	// ignore failure - maybe pages are locked
+	RET
+
+TEXT runtime·open_trampoline(SB),NOSPLIT,$32
+	MOVW	8(R3), R4		// arg 2 - flags
+	MOVW	12(R3), R5		// arg 3 - mode
+	MOVD	0(R3), R3		// arg 1 - path
+	MOVD	$0, R6			// varargs
+	CALL	libc_open(SB)
+	RET
+
+TEXT runtime·close_trampoline(SB),NOSPLIT,$32
+	MOVW	0(R3), R3		// arg 1 - fd
+	CALL	libc_close(SB)
+	RET
+
+TEXT runtime·read_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - buf
+	MOVW	16(R3), R5		// arg 3 - count
+	MOVW	0(R3), R3		// arg 1 - fd (int32)
+	CALL	libc_read(SB)
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3		// errno
+	NEG	R3, R3			// caller expects negative errno value
+noerr:
+	RET
+
+TEXT runtime·write_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - buf
+	MOVW	16(R3), R5		// arg 3 - count
+	MOVD	0(R3), R3		// arg 1 - fd (uintptr)
+	CALL	libc_write(SB)
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3		// errno
+	NEG	R3, R3			// caller expects negative errno value
+noerr:
+	RET
+
+TEXT runtime·pipe2_trampoline(SB),NOSPLIT,$32
+	MOVW	8(R3), R4		// arg 2 - flags
+	MOVD	0(R3), R3		// arg 1 - filedes
+	CALL	libc_pipe2(SB)
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3		// errno
+	NEG	R3, R3			// caller expects negative errno value
+noerr:
+	RET
+
+TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - new
+	MOVD	16(R3), R5		// arg 3 - old
+	MOVW	0(R3), R3		// arg 1 - which
+	CALL	libc_setitimer(SB)
+	RET
+
+TEXT runtime·usleep_trampoline(SB),NOSPLIT,$32
+	MOVW	0(R3), R3		// arg 1 - usec
+	CALL	libc_usleep(SB)
+	RET
+
+TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$32
+	MOVW	8(R3), R4		// arg 2 - miblen
+	MOVD	16(R3), R5		// arg 3 - out
+	MOVD	24(R3), R6		// arg 4 - size
+	MOVD	32(R3), R7		// arg 5 - dst
+	MOVD	40(R3), R8		// arg 6 - ndst
+	MOVD	0(R3), R3		// arg 1 - mib
+	CALL	libc_sysctl(SB)
+	RET
+
+TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$32
+	CALL	libc_kqueue(SB)
+	RET
+
+TEXT runtime·kevent_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - keventt
+	MOVW	16(R3), R5		// arg 3 - nch
+	MOVD	24(R3), R6		// arg 4 - ev
+	MOVW	32(R3), R7		// arg 5 - nev
+	MOVD	40(R3), R8		// arg 6 - ts
+	MOVW	0(R3), R3		// arg 1 - kq
+	CALL	libc_kevent(SB)
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3		// errno
+	NEG	R3, R3			// caller expects negative errno value
+noerr:
+	RET
+
+TEXT runtime·clock_gettime_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - tp
+	MOVW	0(R3), R3		// arg 1 - clock_id
+	CALL	libc_clock_gettime(SB)
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3		// errno
+	NEG	R3, R3			// caller expects negative errno value
+noerr:
+	RET
+
+TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+	MOVW	0(R14), R3		// arg 1 - fd
+	MOVW	4(R14), R4		// arg 2 - cmd
+	MOVW	8(R14), R5		// arg 3 - arg
+	MOVD	$0, R6			// vararg
+	CALL	libc_fcntl(SB)
+	MOVD	$0, R4
+	CMP	R3, $-1
+	BNE	noerr
+	CALL	libc_errno(SB)
+	MOVW	(R3), R4		// errno
+	MOVW	$-1, R3
+noerr:
+	MOVW	R3, 12(R14)
+	MOVW	R4, 16(R14)
+	RET
+
+TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - new
+	MOVD	16(R3), R5		// arg 3 - old
+	MOVW	0(R3), R3		// arg 1 - sig
+	CALL	libc_sigaction(SB)
+	CMP	R3, $-1
+	BNE	3(PC)
+	MOVD	$0, R3			// crash on syscall failure
+	MOVD	R3, (R3)
+	RET
+
+TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - new
+	MOVD	16(R3), R5		// arg 3 - old
+	MOVW	0(R3), R3		// arg 1 - how
+	CALL	libc_pthread_sigmask(SB)
+	CMP	R3, $-1
+	BNE	3(PC)
+	MOVD	$0, R3			// crash on syscall failure
+	MOVD	R3, (R3)
+	RET
+
+TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$32
+	MOVD	8(R3), R4		// arg 2 - old
+	MOVD	0(R3), R3		// arg 1 - new
+	CALL	libc_sigaltstack(SB)
+	CMP	R3, $-1
+	BNE	3(PC)
+	MOVD	$0, R3			// crash on syscall failure
+	MOVD	R3, (R3)
+	RET
+
+TEXT runtime·issetugid_trampoline(SB),NOSPLIT,$32
+	MOVD	R3, R14			// pointer to args
+	CALL	libc_getthrid(SB)
+	MOVW	R3, 0(R14)		// return value
+	RET
+
+// syscall calls a function in libc on behalf of the syscall package.
+// syscall takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+
+	MOVD	(0*8)(R14), R12		// fn
+	MOVD	(1*8)(R14), R3		// a1
+	MOVD	(2*8)(R14), R4		// a2
+	MOVD	(3*8)(R14), R5		// a3
+	MOVD	$0, R6			// vararg
+
+	MOVD	R12, CTR
+	CALL	(CTR)
+
+	MOVD	R3, (4*8)(R14)		// r1
+	MOVD	R4, (5*8)(R14)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	CMPW	R3, $-1
+	BNE	ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3
+	MOVD	R3, (6*8)(R14)		// err
+
+ok:
+	RET
+
+// syscallX calls a function in libc on behalf of the syscall package.
+// syscallX takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscallX must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscallX is like syscall but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscallX(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+
+	MOVD	(0*8)(R14), R12		// fn
+	MOVD	(1*8)(R14), R3		// a1
+	MOVD	(2*8)(R14), R4		// a2
+	MOVD	(3*8)(R14), R5		// a3
+	MOVD	$0, R6			// vararg
+
+	MOVD	R12, CTR
+	CALL	(CTR)
+
+	MOVD	R3, (4*8)(R14)		// r1
+	MOVD	R4, (5*8)(R14)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	CMP	R3, $-1
+	BNE	ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3
+	MOVD	R3, (6*8)(R14)		// err
+
+ok:
+	RET
+
+// syscall6 calls a function in libc on behalf of the syscall package.
+// syscall6 takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall6 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6 expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall6(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+
+	MOVD	(0*8)(R14), R12		// fn
+	MOVD	(1*8)(R14), R3		// a1
+	MOVD	(2*8)(R14), R4		// a2
+	MOVD	(3*8)(R14), R5		// a3
+	MOVD	(4*8)(R14), R6		// a4
+	MOVD	(5*8)(R14), R7		// a5
+	MOVD	(6*8)(R14), R8		// a6
+	MOVD	$0, R9			// vararg
+
+	MOVD	R12, CTR
+	CALL	(CTR)
+
+	MOVD	R3, (7*8)(R14)		// r1
+	MOVD	R4, (8*8)(R14)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	CMPW	R3, $-1
+	BNE	ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3
+	MOVD	R3, (9*8)(R14)		// err
+
+ok:
+	RET
+
+// syscall6X calls a function in libc on behalf of the syscall package.
+// syscall6X takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall6X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6X is like syscall6 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall6X(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+
+	MOVD	(0*8)(R14), R12		// fn
+	MOVD	(1*8)(R14), R3		// a1
+	MOVD	(2*8)(R14), R4		// a2
+	MOVD	(3*8)(R14), R5		// a3
+	MOVD	(4*8)(R14), R6		// a4
+	MOVD	(5*8)(R14), R7		// a5
+	MOVD	(6*8)(R14), R8		// a6
+	MOVD	$0, R9			// vararg
+
+	MOVD	R12, CTR
+	CALL	(CTR)
+
+	MOVD	R3, (7*8)(R14)		// r1
+	MOVD	R4, (8*8)(R14)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	CMP	R3, $-1
+	BNE	ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3
+	MOVD	R3, (9*8)(R14)		// err
+
+ok:
+	RET
+
+// syscall10 calls a function in libc on behalf of the syscall package.
+// syscall10 takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	a7    uintptr
+//	a8    uintptr
+//	a9    uintptr
+//	a10   uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall10 must be called on the g0 stack with the
+// C calling convention (use libcCall). Note that this is
+// really syscall8 as a maximum of eight parameters can be
+// passed via registers (and current usage does not exceed
+// this).
+TEXT runtime·syscall10(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+
+	MOVD	(0*8)(R14), R12		// fn
+	MOVD	(1*8)(R14), R3		// a1
+	MOVD	(2*8)(R14), R4		// a2
+	MOVD	(3*8)(R14), R5		// a3
+	MOVD	(4*8)(R14), R6		// a4
+	MOVD	(5*8)(R14), R7		// a5
+	MOVD	(6*8)(R14), R8		// a6
+	MOVD	(7*8)(R14), R9		// a7
+	MOVD	(8*8)(R14), R10		// a8
+
+	MOVD	R12, CTR
+	CALL	(CTR)
+
+	MOVD	R3, (11*8)(R14)		// r1
+	MOVD	R4, (12*8)(R14)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	CMPW	R3, $-1
+	BNE	ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3
+	MOVD	R3, (13*8)(R14)		// err
+
+ok:
+	RET
+
+// syscall10X calls a function in libc on behalf of the syscall package.
+// syscall10X takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	a7    uintptr
+//	a8    uintptr
+//	a9    uintptr
+//	a10   uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall10X must be called on the g0 stack with the
+// C calling convention (use libcCall). Note that this is
+// really syscall8X as a maximum of eight parameters can be
+// passed via registers (and current usage does not exceed
+// this).
+//
+// syscall10X is like syscall10 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall10X(SB),NOSPLIT,$32
+	MOVD    R3, R14			// pointer to args
+
+	MOVD	(0*8)(R14), R12		// fn
+	MOVD	(1*8)(R14), R3		// a1
+	MOVD	(2*8)(R14), R4		// a2
+	MOVD	(3*8)(R14), R5		// a3
+	MOVD	(4*8)(R14), R6		// a4
+	MOVD	(5*8)(R14), R7		// a5
+	MOVD	(6*8)(R14), R8		// a6
+	MOVD	(7*8)(R14), R9		// a7
+	MOVD	(8*8)(R14), R10		// a8
+
+	MOVD	R12, CTR
+	CALL	(CTR)
+
+	MOVD	R3, (11*8)(R14)		// r1
+	MOVD	R4, (12*8)(R14)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	CMP	R3, $-1
+	BNE	ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(R3), R3
+	MOVD	R3, (13*8)(R14)		// err
+
+ok:
+	RET
diff --git a/src/runtime/sys_openbsd_riscv64.s b/src/runtime/sys_openbsd_riscv64.s
new file mode 100644
index 0000000..3262b41
--- /dev/null
+++ b/src/runtime/sys_openbsd_riscv64.s
@@ -0,0 +1,742 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// System calls and other sys.stuff for riscv64, OpenBSD
+// System calls are implemented in libc/libpthread, this file
+// contains trampolines that convert from Go to C calling convention.
+//
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+
+#define CLOCK_REALTIME	$0
+#define	CLOCK_MONOTONIC	$3
+
+// mstart_stub is the first function executed on a new thread started by pthread_create.
+// It just does some low-level setup and then calls mstart.
+// Note: called with the C calling convention.
+TEXT runtime·mstart_stub(SB),NOSPLIT,$200
+	// X10 points to the m.
+	// We are already on m's g0 stack.
+
+	// Save callee-save registers (X8, X9, X18..X27, F8, F9, F18..F27)
+	MOV	X8, (1*8)(X2)
+	MOV	X9, (2*8)(X2)
+	MOV	X18, (3*8)(X2)
+	MOV	X19, (4*8)(X2)
+	MOV	X20, (5*8)(X2)
+	MOV	X21, (6*8)(X2)
+	MOV	X22, (7*8)(X2)
+	MOV	X23, (8*8)(X2)
+	MOV	X24, (9*8)(X2)
+	MOV	X25, (10*8)(X2)
+	MOV	X26, (11*8)(X2)
+	MOV	g, (12*8)(X2)
+	MOVF	F8, (13*8)(X2)
+	MOVF	F9, (14*8)(X2)
+	MOVF	F18, (15*8)(X2)
+	MOVF	F19, (16*8)(X2)
+	MOVF	F20, (17*8)(X2)
+	MOVF	F21, (18*8)(X2)
+	MOVF	F22, (19*8)(X2)
+	MOVF	F23, (20*8)(X2)
+	MOVF	F24, (21*8)(X2)
+	MOVF	F25, (22*8)(X2)
+	MOVF	F26, (23*8)(X2)
+	MOVF	F27, (24*8)(X2)
+
+	MOV	m_g0(X10), g
+	CALL	runtime·save_g(SB)
+
+	CALL	runtime·mstart(SB)
+
+	// Restore callee-save registers.
+	MOV	(1*8)(X2), X8
+	MOV	(2*8)(X2), X9
+	MOV	(3*8)(X2), X18
+	MOV	(4*8)(X2), X19
+	MOV	(5*8)(X2), X20
+	MOV	(6*8)(X2), X21
+	MOV	(7*8)(X2), X22
+	MOV	(8*8)(X2), X23
+	MOV	(9*8)(X2), X24
+	MOV	(10*8)(X2), X25
+	MOV	(11*8)(X2), X26
+	MOV	(12*8)(X2), g
+	MOVF	(13*8)(X2), F8
+	MOVF	(14*8)(X2), F9
+	MOVF	(15*8)(X2), F18
+	MOVF	(16*8)(X2), F19
+	MOVF	(17*8)(X2), F20
+	MOVF	(18*8)(X2), F21
+	MOVF	(19*8)(X2), F22
+	MOVF	(20*8)(X2), F23
+	MOVF	(21*8)(X2), F24
+	MOVF	(22*8)(X2), F25
+	MOVF	(23*8)(X2), F26
+	MOVF	(24*8)(X2), F27
+
+	// Go is all done with this OS thread.
+	// Tell pthread everything is ok (we never join with this thread, so
+	// the value here doesn't really matter).
+	MOV	$0, X10
+
+	RET
+
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+	MOVW	sig+8(FP), X10
+	MOV	info+16(FP), X11
+	MOV	ctx+24(FP), X12
+	MOV	fn+0(FP), X5
+	JALR	X1, X5
+	RET
+
+TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$224
+	// Save callee-save registers (X8, X9, X18..X27, F8, F9, F18..F27)
+	MOV	X8, (4*8)(X2)
+	MOV	X9, (5*8)(X2)
+	MOV	X18, (6*8)(X2)
+	MOV	X19, (7*8)(X2)
+	MOV	X20, (8*8)(X2)
+	MOV	X21, (9*8)(X2)
+	MOV	X22, (10*8)(X2)
+	MOV	X23, (11*8)(X2)
+	MOV	X24, (12*8)(X2)
+	MOV	X25, (13*8)(X2)
+	MOV	X26, (14*8)(X2)
+	MOV	g, (15*8)(X2)
+	MOVF	F8, (16*8)(X2)
+	MOVF	F9, (17*8)(X2)
+	MOVF	F18, (18*8)(X2)
+	MOVF	F19, (19*8)(X2)
+	MOVF	F20, (20*8)(X2)
+	MOVF	F21, (21*8)(X2)
+	MOVF	F22, (22*8)(X2)
+	MOVF	F23, (23*8)(X2)
+	MOVF	F24, (24*8)(X2)
+	MOVF	F25, (25*8)(X2)
+	MOVF	F26, (26*8)(X2)
+	MOVF	F27, (27*8)(X2)
+
+	// this might be called in external code context,
+	// where g is not set.
+	CALL	runtime·load_g(SB)
+
+	MOVW	X10, 8(X2)
+	MOV	X11, 16(X2)
+	MOV	X12, 24(X2)
+	MOV	$runtime·sigtrampgo(SB), X5
+	JALR	X1, X5
+
+	// Restore callee-save registers.
+	MOV	(4*8)(X2), X8
+	MOV	(5*8)(X2), X9
+	MOV	(6*8)(X2), X18
+	MOV	(7*8)(X2), X19
+	MOV	(8*8)(X2), X20
+	MOV	(9*8)(X2), X21
+	MOV	(10*8)(X2), X22
+	MOV	(11*8)(X2), X23
+	MOV	(12*8)(X2), X24
+	MOV	(13*8)(X2), X25
+	MOV	(14*8)(X2), X26
+	MOV	(15*8)(X2), g
+	MOVF	(16*8)(X2), F8
+	MOVF	(17*8)(X2), F9
+	MOVF	(18*8)(X2), F18
+	MOVF	(19*8)(X2), F19
+	MOVF	(20*8)(X2), F20
+	MOVF	(21*8)(X2), F21
+	MOVF	(22*8)(X2), F22
+	MOVF	(23*8)(X2), F23
+	MOVF	(24*8)(X2), F24
+	MOVF	(25*8)(X2), F25
+	MOVF	(26*8)(X2), F26
+	MOVF	(27*8)(X2), F27
+
+	RET
+
+//
+// These trampolines help convert from Go calling convention to C calling convention.
+// They should be called with asmcgocall.
+// A pointer to the arguments is passed in R0.
+// A single int32 result is returned in R0.
+// (For more results, make an args/results structure.)
+TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$8
+	MOV	0(X10), X10		// arg 1 - attr
+	CALL	libc_pthread_attr_init(SB)
+	RET
+
+TEXT runtime·pthread_attr_destroy_trampoline(SB),NOSPLIT,$8
+	MOV	0(X10), X10		// arg 1 - attr
+	CALL	libc_pthread_attr_destroy(SB)
+	RET
+
+TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - size
+	MOV	0(X10), X10		// arg 1 - attr
+	CALL	libc_pthread_attr_getstacksize(SB)
+	RET
+
+TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - state
+	MOV	0(X10), X10		// arg 1 - attr
+	CALL	libc_pthread_attr_setdetachstate(SB)
+	RET
+
+TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$8
+	MOV	0(X10), X11		// arg 2 - attr
+	MOV	8(X10), X12		// arg 3 - start
+	MOV	16(X10), X13		// arg 4 - arg
+	ADD	$-16, X2
+	MOV	X2, X10			// arg 1 - &threadid (discard)
+	CALL	libc_pthread_create(SB)
+	ADD	$16, X2
+	RET
+
+TEXT runtime·thrkill_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - signal
+	MOV	$0, X12			// arg 3 - tcb
+	MOVW	0(X10), X10		// arg 1 - tid
+	CALL	libc_thrkill(SB)
+	RET
+
+TEXT runtime·thrsleep_trampoline(SB),NOSPLIT,$8
+	MOVW	8(X10), X11		// arg 2 - clock_id
+	MOV	16(X10), X12		// arg 3 - abstime
+	MOV	24(X10), X13		// arg 4 - lock
+	MOV	32(X10), X14		// arg 5 - abort
+	MOV	0(X10), X10		// arg 1 - id
+	CALL	libc_thrsleep(SB)
+	RET
+
+TEXT runtime·thrwakeup_trampoline(SB),NOSPLIT,$8
+	MOVW	8(X10), X11		// arg 2 - count
+	MOV	0(X10), X10		// arg 1 - id
+	CALL	libc_thrwakeup(SB)
+	RET
+
+TEXT runtime·exit_trampoline(SB),NOSPLIT,$8
+	MOVW	0(X10), X10		// arg 1 - status
+	CALL	libc_exit(SB)
+	MOV	$0, X5			// crash on failure
+	MOV	X5, (X5)
+	RET
+
+TEXT runtime·getthrid_trampoline(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+	CALL	libc_getthrid(SB)
+	MOVW	X10, 0(X9)		// return value
+	RET
+
+TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+	CALL	libc_getpid(SB)		// arg 1 - pid (result in X10)
+	MOVW	0(X9), X11		// arg 2 - signal
+	CALL	libc_kill(SB)
+	RET
+
+TEXT runtime·sched_yield_trampoline(SB),NOSPLIT,$8
+	CALL	libc_sched_yield(SB)
+	RET
+
+TEXT runtime·mmap_trampoline(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+	MOV	0(X9), X10		// arg 1 - addr
+	MOV	8(X9), X11		// arg 2 - len
+	MOVW	16(X9), X12		// arg 3 - prot
+	MOVW	20(X9), X13		// arg 4 - flags
+	MOVW	24(X9), X14		// arg 5 - fid
+	MOVW	28(X9), X15		// arg 6 - offset
+	CALL	libc_mmap(SB)
+	MOV	$0, X5
+	MOV	$-1, X6
+	BNE	X6, X10, noerr
+	CALL	libc_errno(SB)
+	MOVW	(X10), X5		// errno
+	MOV	$0, X10
+noerr:
+	MOV	X10, 32(X9)
+	MOV	X5, 40(X9)
+	RET
+
+TEXT runtime·munmap_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - len
+	MOV	0(X10), X10		// arg 1 - addr
+	CALL	libc_munmap(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, 3(PC)
+	MOV	$0, X5			// crash on failure
+	MOV	X5, (X5)
+	RET
+
+TEXT runtime·madvise_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - len
+	MOVW	16(X10), X12		// arg 3 - advice
+	MOV	0(X10), X10		// arg 1 - addr
+	CALL	libc_madvise(SB)
+	// ignore failure - maybe pages are locked
+	RET
+
+TEXT runtime·open_trampoline(SB),NOSPLIT,$8
+	MOVW	8(X10), X11		// arg 2 - flags
+	MOVW	12(X10), X12		// arg 3 - mode
+	MOV	0(X10), X10		// arg 1 - path
+	MOV	$0, X13			// varargs
+	CALL	libc_open(SB)
+	RET
+
+TEXT runtime·close_trampoline(SB),NOSPLIT,$8
+	MOVW	0(X10), X10		// arg 1 - fd
+	CALL	libc_close(SB)
+	RET
+
+TEXT runtime·read_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - buf
+	MOVW	16(X10), X12		// arg 3 - count
+	MOVW	0(X10), X10		// arg 1 - fd (int32 from read)
+	CALL	libc_read(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, noerr
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10		// errno
+	NEG	X10			// caller expects negative errno
+noerr:
+	RET
+
+TEXT runtime·write_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - buf
+	MOVW	16(X10), X12		// arg 3 - count
+	MOV	0(X10), X10		// arg 1 - fd (uintptr from write1)
+	CALL	libc_write(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, noerr
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10		// errno
+	NEG	X10			// caller expects negative errno
+noerr:
+	RET
+
+TEXT runtime·pipe2_trampoline(SB),NOSPLIT,$8
+	MOVW	8(X10), X11		// arg 2 - flags
+	MOV	0(X10), X10		// arg 1 - filedes
+	CALL	libc_pipe2(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, noerr
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10		// errno
+	NEG	X10			// caller expects negative errno
+noerr:
+	RET
+
+TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - new
+	MOV	16(X10), X12		// arg 3 - old
+	MOVW	0(X10), X10		// arg 1 - which
+	CALL	libc_setitimer(SB)
+	RET
+
+TEXT runtime·usleep_trampoline(SB),NOSPLIT,$8
+	MOVW	0(X10), X10		// arg 1 - usec
+	CALL	libc_usleep(SB)
+	RET
+
+TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$8
+	MOVW	8(X10), X11		// arg 2 - miblen
+	MOV	16(X10), X12		// arg 3 - out
+	MOV	24(X10), X13		// arg 4 - size
+	MOV	32(X10), X14		// arg 5 - dst
+	MOV	40(X10), X15		// arg 6 - ndst
+	MOV	0(X10), X10		// arg 1 - mib
+	CALL	libc_sysctl(SB)
+	RET
+
+TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$8
+	CALL	libc_kqueue(SB)
+	RET
+
+TEXT runtime·kevent_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - keventt
+	MOVW	16(X10), X12		// arg 3 - nch
+	MOV	24(X10), X13		// arg 4 - ev
+	MOVW	32(X10), X14		// arg 5 - nev
+	MOV	40(X10), X15		// arg 6 - ts
+	MOVW	0(X10), X10		// arg 1 - kq
+	CALL	libc_kevent(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, noerr
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10		// errno
+	NEG	X10			// caller expects negative errno
+noerr:
+	RET
+
+TEXT runtime·clock_gettime_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - tp
+	MOVW	0(X10), X10		// arg 1 - clock_id
+	CALL	libc_clock_gettime(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, 3(PC)
+	MOV	$0, X5			// crash on failure
+	MOV	X5, (X5)
+	RET
+
+TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+	MOVW	0(X9), X10		// arg 1 - fd
+	MOVW	4(X9), X11		// arg 2 - cmd
+	MOVW	8(X9), X12		// arg 3 - arg
+	MOV	$0, X13			// vararg
+	CALL	libc_fcntl(SB)
+	MOV	$-1, X5
+	MOV	$0, X11
+	BNE	X5, X10, noerr
+	CALL	libc_errno(SB)
+	MOVW	(X10), X11		// errno
+	MOV	$-1, X10
+noerr:
+	MOVW	X10, 12(X9)
+	MOVW	X11, 16(X9)
+	RET
+
+TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - new
+	MOV	16(X10), X12		// arg 3 - old
+	MOVW	0(X10), X10		// arg 1 - sig
+	CALL	libc_sigaction(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, 3(PC)
+	MOV	$0, X5			// crash on failure
+	MOV	X5, (X5)
+	RET
+
+TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - new
+	MOV	16(X10), X12		// arg 3 - old
+	MOVW	0(X10), X10		// arg 1 - how
+	CALL	libc_pthread_sigmask(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, 3(PC)
+	MOV	$0, X5			// crash on failure
+	MOV	X5, (X5)
+	RET
+
+TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$8
+	MOV	8(X10), X11		// arg 2 - old
+	MOV	0(X10), X10		// arg 1 - new
+	CALL	libc_sigaltstack(SB)
+	MOV	$-1, X5
+	BNE	X5, X10, 3(PC)
+	MOV	$0, X5			// crash on failure
+	MOV	X5, (X5)
+	RET
+
+TEXT runtime·issetugid_trampoline(SB),NOSPLIT,$0
+	MOV	X10, X9			// pointer to args
+	CALL	libc_issetugid(SB)
+	MOVW	X10, 0(X9)		// return value
+	RET
+
+// syscall calls a function in libc on behalf of the syscall package.
+// syscall takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+
+	MOV	(0*8)(X9), X5		// fn
+	MOV	(1*8)(X9), X10		// a1
+	MOV	(2*8)(X9), X11		// a2
+	MOV	(3*8)(X9), X12		// a3
+	MOV	$0, X13			// vararg
+
+	JALR	X1, X5
+
+	MOV	X10, (4*8)(X9)		// r1
+	MOV	X11, (5*8)(X9)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	MOV	$-1, X5
+	MOVW	X10, X11
+	BNE	X5, X11, ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10
+	MOV	X10, (6*8)(X9)		// err
+
+ok:
+	RET
+
+// syscallX calls a function in libc on behalf of the syscall package.
+// syscallX takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscallX must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscallX is like syscall but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscallX(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+
+	MOV	(0*8)(X9), X5		// fn
+	MOV	(1*8)(X9), X10		// a1
+	MOV	(2*8)(X9), X11		// a2
+	MOV	(3*8)(X9), X12		// a3
+	MOV	$0, X13			// vararg
+
+	JALR	X1, X5
+
+	MOV	X10, (4*8)(X9)		// r1
+	MOV	X11, (5*8)(X9)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	MOV	$-1, X5
+	BNE	X5, X10, ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10
+	MOV	X10, (6*8)(X9)		// err
+
+ok:
+	RET
+
+// syscall6 calls a function in libc on behalf of the syscall package.
+// syscall6 takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall6 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6 expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall6(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+
+	MOV	(0*8)(X9), X5		// fn
+	MOV	(1*8)(X9), X10		// a1
+	MOV	(2*8)(X9), X11		// a2
+	MOV	(3*8)(X9), X12		// a3
+	MOV	(4*8)(X9), X13		// a4
+	MOV	(5*8)(X9), X14		// a5
+	MOV	(6*8)(X9), X15		// a6
+	MOV	$0, X16			// vararg
+
+	JALR	X1, X5
+
+	MOV	X10, (7*8)(X9)		// r1
+	MOV	X11, (8*8)(X9)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	MOV	$-1, X5
+	MOVW	X10, X11
+	BNE	X5, X11, ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10
+	MOV	X10, (9*8)(X9)		// err
+
+ok:
+	RET
+
+// syscall6X calls a function in libc on behalf of the syscall package.
+// syscall6X takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall6X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6X is like syscall6 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall6X(SB),NOSPLIT,$8
+	MOV	X10, X9			// pointer to args
+
+	MOV	(0*8)(X9), X5		// fn
+	MOV	(1*8)(X9), X10		// a1
+	MOV	(2*8)(X9), X11		// a2
+	MOV	(3*8)(X9), X12		// a3
+	MOV	(4*8)(X9), X13		// a4
+	MOV	(5*8)(X9), X14		// a5
+	MOV	(6*8)(X9), X15		// a6
+	MOV	$0, X16			// vararg
+
+	JALR	X1, X5
+
+	MOV	X10, (7*8)(X9)		// r1
+	MOV	X11, (8*8)(X9)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	MOV	$-1, X5
+	BNE	X5, X10, ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10
+	MOV	X10, (9*8)(X9)		// err
+
+ok:
+	RET
+
+// syscall10 calls a function in libc on behalf of the syscall package.
+// syscall10 takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	a7    uintptr
+//	a8    uintptr
+//	a9    uintptr
+//	a10   uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall10 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// The openbsd/riscv64 kernel only accepts eight syscall arguments.
+TEXT runtime·syscall10(SB),NOSPLIT,$0
+	MOV	X10, X9			// pointer to args
+
+	ADD	$-16, X2
+
+	MOV	(0*8)(X9), X5		// fn
+	MOV	(1*8)(X9), X10		// a1
+	MOV	(2*8)(X9), X11		// a2
+	MOV	(3*8)(X9), X12		// a3
+	MOV	(4*8)(X9), X13		// a4
+	MOV	(5*8)(X9), X14		// a5
+	MOV	(6*8)(X9), X15		// a6
+	MOV	(7*8)(X9), X16		// a7
+	MOV	(8*8)(X9), X17		// a8
+
+	JALR	X1, X5
+
+	MOV	X10, (11*8)(X9)		// r1
+	MOV	X11, (12*8)(X9)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	MOV	$-1, X5
+	MOVW	X10, X11
+	BNE	X5, X11, ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10
+	MOV	X10, (13*8)(X9)		// err
+
+ok:
+	ADD	$16, X2
+	RET
+
+// syscall10X calls a function in libc on behalf of the syscall package.
+// syscall10X takes a pointer to a struct like:
+// struct {
+//	fn    uintptr
+//	a1    uintptr
+//	a2    uintptr
+//	a3    uintptr
+//	a4    uintptr
+//	a5    uintptr
+//	a6    uintptr
+//	a7    uintptr
+//	a8    uintptr
+//	a9    uintptr
+//	a10   uintptr
+//	r1    uintptr
+//	r2    uintptr
+//	err   uintptr
+// }
+// syscall10X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall10X is like syscall10 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+//
+// The openbsd/riscv64 kernel only accepts eight syscall arguments.
+TEXT runtime·syscall10X(SB),NOSPLIT,$0
+	MOV	X10, X9			// pointer to args
+
+	ADD	$-16, X2
+
+	MOV	(0*8)(X9), X5		// fn
+	MOV	(1*8)(X9), X10		// a1
+	MOV	(2*8)(X9), X11		// a2
+	MOV	(3*8)(X9), X12		// a3
+	MOV	(4*8)(X9), X13		// a4
+	MOV	(5*8)(X9), X14		// a5
+	MOV	(6*8)(X9), X15		// a6
+	MOV	(7*8)(X9), X16		// a7
+	MOV	(8*8)(X9), X17		// a8
+
+	JALR	X1, X5
+
+	MOV	X10, (11*8)(X9)		// r1
+	MOV	X11, (12*8)(X9)		// r2
+
+	// Standard libc functions return -1 on error
+	// and set errno.
+	MOV	$-1, X5
+	BNE	X5, X10, ok
+
+	// Get error code from libc.
+	CALL	libc_errno(SB)
+	MOVW	(X10), X10
+	MOV	X10, (13*8)(X9)		// err
+
+ok:
+	ADD	$16, X2
+	RET
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index 41a6ee6..e71fda7 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -11,16 +11,24 @@
 #define TEB_TlsSlots 0xE10
 #define TEB_ArbitraryPtr 0x14
 
+TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
+	JMP	runtime·asmstdcall(SB)
+
 // void runtime·asmstdcall(void *c);
 TEXT runtime·asmstdcall(SB),NOSPLIT,$0
 	MOVL	fn+0(FP), BX
+	MOVL	SP, BP	// save stack pointer
 
 	// SetLastError(0).
 	MOVL	$0, 0x34(FS)
 
+	MOVL	libcall_n(BX), CX
+
+	// Fast version, do not store args on the stack.
+	CMPL	CX, $0
+	JE	docall
+
 	// Copy args to the stack.
-	MOVL	SP, BP
-	MOVL	libcall_n(BX), CX	// words
 	MOVL	CX, AX
 	SALL	$2, AX
 	SUBL	AX, SP			// room for args
@@ -29,6 +37,7 @@
 	CLD
 	REP; MOVSL
 
+docall:
 	// Call stdcall or cdecl function.
 	// DI SI BP BX are preserved, SP is not
 	CALL	libcall_fn(BX)
@@ -225,34 +234,7 @@
 	MOVL	DX, 0(CX)(FS)
 	RET
 
-// Runs on OS stack.
-// duration (in -100ns units) is in dt+0(FP).
-// g may be nil.
-TEXT runtime·usleep2(SB),NOSPLIT,$20-4
-	MOVL	dt+0(FP), BX
-	MOVL	$-1, hi-4(SP)
-	MOVL	BX, lo-8(SP)
-	LEAL	lo-8(SP), BX
-	MOVL	BX, ptime-12(SP)
-	MOVL	$0, alertable-16(SP)
-	MOVL	$-1, handle-20(SP)
-	MOVL	SP, BP
-	MOVL	runtime·_NtWaitForSingleObject(SB), AX
-	CALL	AX
-	MOVL	BP, SP
-	RET
-
-// Runs on OS stack.
-TEXT runtime·switchtothread(SB),NOSPLIT,$0
-	MOVL	SP, BP
-	MOVL	runtime·_SwitchToThread(SB), AX
-	CALL	AX
-	MOVL	BP, SP
-	RET
-
 TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
-	CMPB	runtime·useQPCTime(SB), $0
-	JNE	useQPC
 loop:
 	MOVL	(_INTERRUPT_TIME+time_hi1), AX
 	MOVL	(_INTERRUPT_TIME+time_lo), CX
@@ -269,9 +251,6 @@
 	MOVL	AX, ret_lo+0(FP)
 	MOVL	DX, ret_hi+4(FP)
 	RET
-useQPC:
-	JMP	runtime·nanotimeQPC(SB)
-	RET
 
 // This is called from rt0_go, which runs on the system stack
 // using the initial stack allocated by the OS.
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index e66f444..c1b78e3 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -12,6 +12,10 @@
 #define TEB_TlsSlots 0x1480
 #define TEB_ArbitraryPtr 0x28
 
+TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
+	MOVQ	AX, CX
+	JMP	runtime·asmstdcall(SB)
+
 // void runtime·asmstdcall(void *c);
 TEXT runtime·asmstdcall(SB),NOSPLIT,$16
 	MOVQ	SP, AX
@@ -29,6 +33,11 @@
 
 	SUBQ	$(const_maxArgs*8), SP	// room for args
 
+	// Fast version, do not store args on the stack nor
+	// load them into registers.
+	CMPL	CX, $0
+	JE	docall
+
 	// Fast version, do not store args on the stack.
 	CMPL	CX, $4
 	JLE	loadregs
@@ -53,12 +62,13 @@
 	// Floating point arguments are passed in the XMM
 	// registers. Set them here in case any of the arguments
 	// are floating point values. For details see
-	//	https://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+	//	https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-170
 	MOVQ	CX, X0
 	MOVQ	DX, X1
 	MOVQ	R8, X2
 	MOVQ	R9, X3
 
+docall:
 	// Call stdcall function.
 	CALL	AX
 
@@ -92,7 +102,7 @@
 // exception record and context pointers.
 // DX is the kind of sigtramp function.
 // Return value of sigtrampgo is stored in AX.
-TEXT sigtramp<>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT sigtramp<>(SB),NOSPLIT,$0-0
 	// Switch from the host ABI to the Go ABI.
 	PUSH_REGS_HOST_TO_ABI0()
 
@@ -145,6 +155,38 @@
 	MOVQ	$const_callbackLastVCH, DX
 	JMP	sigtramp<>(SB)
 
+TEXT runtime·sehtramp(SB),NOSPLIT,$40-0
+	// CX: PEXCEPTION_RECORD ExceptionRecord
+	// DX: ULONG64 EstablisherFrame
+	// R8: PCONTEXT ContextRecord
+	// R9: PDISPATCHER_CONTEXT DispatcherContext
+	// Switch from the host ABI to the Go ABI.
+	PUSH_REGS_HOST_TO_ABI0()
+
+	get_tls(AX)
+	CMPQ	AX, $0
+	JNE	2(PC)
+	// This shouldn't happen, sehtramp is only attached to functions
+	// called from Go, and exception handlers are only called from
+	// the thread that threw the exception.
+	INT	$3
+
+	// Exception from Go thread, set R14.
+	MOVQ	g(AX), R14
+
+	ADJSP	$40
+	MOVQ	CX, 0(SP)
+	MOVQ	DX, 8(SP)
+	MOVQ	R8, 16(SP)
+	MOVQ	R9, 24(SP)
+	CALL	runtime·sehhandler(SB)
+	MOVL	32(SP), AX
+
+	ADJSP	$-40
+
+	POP_REGS_HOST_TO_ABI0()
+	RET
+
 TEXT runtime·callbackasm1(SB),NOSPLIT|NOFRAME,$0
 	// Construct args vector for cgocallback().
 	// By windows/amd64 calling convention first 4 args are in CX, DX, R8, R9
@@ -233,48 +275,12 @@
 	MOVQ	DI, 0(CX)(GS)
 	RET
 
-// Runs on OS stack.
-// duration (in -100ns units) is in dt+0(FP).
-// g may be nil.
-// The function leaves room for 4 syscall parameters
-// (as per windows amd64 calling convention).
-TEXT runtime·usleep2(SB),NOSPLIT,$48-4
-	MOVLQSX	dt+0(FP), BX
-	MOVQ	SP, AX
-	ANDQ	$~15, SP	// alignment as per Windows requirement
-	MOVQ	AX, 40(SP)
-	LEAQ	32(SP), R8  // ptime
-	MOVQ	BX, (R8)
-	MOVQ	$-1, CX // handle
-	MOVQ	$0, DX // alertable
-	MOVQ	runtime·_NtWaitForSingleObject(SB), AX
-	CALL	AX
-	MOVQ	40(SP), SP
-	RET
-
-// Runs on OS stack.
-TEXT runtime·switchtothread(SB),NOSPLIT,$0
-	MOVQ	SP, AX
-	ANDQ	$~15, SP	// alignment as per Windows requirement
-	SUBQ	$(48), SP	// room for SP and 4 args as per Windows requirement
-				// plus one extra word to keep stack 16 bytes aligned
-	MOVQ	AX, 32(SP)
-	MOVQ	runtime·_SwitchToThread(SB), AX
-	CALL	AX
-	MOVQ	32(SP), SP
-	RET
-
 TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
-	CMPB	runtime·useQPCTime(SB), $0
-	JNE	useQPC
 	MOVQ	$_INTERRUPT_TIME, DI
 	MOVQ	time_lo(DI), AX
 	IMULQ	$100, AX
 	MOVQ	AX, ret+0(FP)
 	RET
-useQPC:
-	JMP	runtime·nanotimeQPC(SB)
-	RET
 
 // func osSetupTLS(mp *m)
 // Setup TLS. for use by needm on Windows.
diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s
index 67009df..d194899 100644
--- a/src/runtime/sys_windows_arm.s
+++ b/src/runtime/sys_windows_arm.s
@@ -9,6 +9,9 @@
 
 // Note: For system ABI, R0-R3 are args, R4-R11 are callee-save.
 
+TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
+	B	runtime·asmstdcall(SB)
+
 // void runtime·asmstdcall(void *c);
 TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
 	MOVM.DB.W [R4, R5, R14], (R13)	// push {r4, r5, lr}
@@ -192,35 +195,6 @@
 	MOVW	$0, R0
 	MOVM.IA.W (R13), [R4-R11, R15]		// pop {r4-r11, pc}
 
-// Runs on OS stack.
-// duration (in -100ns units) is in dt+0(FP).
-// g may be nil.
-TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$0-4
-	MOVW	dt+0(FP), R3
-	MOVM.DB.W [R4, R14], (R13)	// push {r4, lr}
-	MOVW	R13, R4			// Save SP
-	SUB	$8, R13			// R13 = R13 - 8
-	BIC	$0x7, R13		// Align SP for ABI
-	MOVW	$0, R1			// R1 = FALSE (alertable)
-	MOVW	$-1, R0			// R0 = handle
-	MOVW	R13, R2			// R2 = pTime
-	MOVW	R3, 0(R2)		// time_lo
-	MOVW	R0, 4(R2)		// time_hi
-	MOVW	runtime·_NtWaitForSingleObject(SB), R3
-	BL	(R3)
-	MOVW	R4, R13			// Restore SP
-	MOVM.IA.W (R13), [R4, R15]	// pop {R4, pc}
-
-// Runs on OS stack.
-TEXT runtime·switchtothread(SB),NOSPLIT|NOFRAME,$0
-	MOVM.DB.W [R4, R14], (R13)  	// push {R4, lr}
-	MOVW    R13, R4
-	BIC	$0x7, R13		// alignment for ABI
-	MOVW	runtime·_SwitchToThread(SB), R0
-	BL	(R0)
-	MOVW 	R4, R13			// restore stack pointer
-	MOVM.IA.W (R13), [R4, R15]	// pop {R4, pc}
-
 TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
 	B	runtime·armPublicationBarrier(SB)
 
@@ -231,11 +205,6 @@
 	RET
 
 TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
-	MOVW	$0, R0
-	MOVB	runtime·useQPCTime(SB), R0
-	CMP	$0, R0
-	BNE	useQPC
-	MOVW	$_INTERRUPT_TIME, R3
 loop:
 	MOVW	time_hi1(R3), R1
 	DMB	MB_ISH
@@ -254,8 +223,6 @@
 	MOVW	R3, ret_lo+0(FP)
 	MOVW	R4, ret_hi+4(FP)
 	RET
-useQPC:
-	RET	runtime·nanotimeQPC(SB)		// tail call
 
 // save_g saves the g register (R10) into thread local memory
 // so that we can call externally compiled
diff --git a/src/runtime/sys_windows_arm64.s b/src/runtime/sys_windows_arm64.s
index 22bf1dd..1f6d411 100644
--- a/src/runtime/sys_windows_arm64.s
+++ b/src/runtime/sys_windows_arm64.s
@@ -19,6 +19,9 @@
 //
 // load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0.
 
+TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
+	B	runtime·asmstdcall(SB)
+
 // void runtime·asmstdcall(void *c);
 TEXT runtime·asmstdcall(SB),NOSPLIT,$16
 	STP	(R19, R20), 16(RSP) // save old R19, R20
@@ -225,41 +228,13 @@
 	MOVD	$0, R0
 	RET
 
-// Runs on OS stack.
-// duration (in -100ns units) is in dt+0(FP).
-// g may be nil.
-TEXT runtime·usleep2(SB),NOSPLIT,$32-4
-	MOVW	dt+0(FP), R0
-	MOVD	$16(RSP), R2		// R2 = pTime
-	MOVD	R0, 0(R2)		// *pTime = -dt
-	MOVD	$-1, R0			// R0 = handle
-	MOVD	$0, R1			// R1 = FALSE (alertable)
-	MOVD	runtime·_NtWaitForSingleObject(SB), R3
-	SUB	$16, RSP	// skip over saved frame pointer below RSP
-	BL	(R3)
-	ADD	$16, RSP
-	RET
-
-// Runs on OS stack.
-TEXT runtime·switchtothread(SB),NOSPLIT,$16-0
-	MOVD	runtime·_SwitchToThread(SB), R0
-	SUB	$16, RSP	// skip over saved frame pointer below RSP
-	BL	(R0)
-	ADD	$16, RSP
-	RET
-
 TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
-	MOVB	runtime·useQPCTime(SB), R0
-	CMP	$0, R0
-	BNE	useQPC
 	MOVD	$_INTERRUPT_TIME, R3
 	MOVD	time_lo(R3), R0
 	MOVD	$100, R1
 	MUL	R1, R0
 	MOVD	R0, ret+0(FP)
 	RET
-useQPC:
-	RET	runtime·nanotimeQPC(SB)		// tail call
 
 // This is called from rt0_go, which runs on the system stack
 // using the initial stack allocated by the OS.
diff --git a/src/runtime/syscall_aix.go b/src/runtime/syscall_aix.go
index e87d4d6..7738fca 100644
--- a/src/runtime/syscall_aix.go
+++ b/src/runtime/syscall_aix.go
@@ -164,7 +164,6 @@
 func syscall_fcntl1(fd, cmd, arg uintptr) (val, err uintptr) {
 	val, err = syscall3(&libc_fcntl, fd, cmd, arg)
 	return
-
 }
 
 //go:linkname syscall_forkx syscall.forkx
diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go
index 1770b83..6a056c8 100644
--- a/src/runtime/syscall_windows_test.go
+++ b/src/runtime/syscall_windows_test.go
@@ -265,11 +265,9 @@
 	h := syscall.Handle(r)
 	defer syscall.CloseHandle(h)
 
-	switch s, err := syscall.WaitForSingleObject(h, 100); s {
+	switch s, err := syscall.WaitForSingleObject(h, syscall.INFINITE); s {
 	case syscall.WAIT_OBJECT_0:
 		break
-	case syscall.WAIT_TIMEOUT:
-		t.Fatal("timeout waiting for thread to exit")
 	case syscall.WAIT_FAILED:
 		t.Fatalf("WaitForSingleObject failed: %v", err)
 	default:
diff --git a/src/runtime/test_amd64.s b/src/runtime/test_amd64.s
index 80fa8c9..cc4bc62 100644
--- a/src/runtime/test_amd64.s
+++ b/src/runtime/test_amd64.s
@@ -1,3 +1,7 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 // Create a large frame to force stack growth. See #62326.
 TEXT ·testSPWrite(SB),0,$16384-0
 	// Write to SP
diff --git a/src/runtime/testdata/testfds/main.go b/src/runtime/testdata/testfds/main.go
new file mode 100644
index 0000000..238ba46
--- /dev/null
+++ b/src/runtime/testdata/testfds/main.go
@@ -0,0 +1,29 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+)
+
+func main() {
+	f, err := os.OpenFile(os.Getenv("TEST_OUTPUT"), os.O_CREATE|os.O_RDWR, 0600)
+	if err != nil {
+		log.Fatalf("os.Open failed: %s", err)
+	}
+	defer f.Close()
+	b, err := io.ReadAll(os.Stdin)
+	if err != nil {
+		log.Fatalf("io.ReadAll(os.Stdin) failed: %s", err)
+	}
+	if len(b) != 0 {
+		log.Fatalf("io.ReadAll(os.Stdin) returned non-nil: %x", b)
+	}
+	fmt.Fprintf(os.Stdout, "stdout\n")
+	fmt.Fprintf(os.Stderr, "stderr\n")
+}
diff --git a/src/runtime/testdata/testprog/syscall_windows.go b/src/runtime/testdata/testprog/syscall_windows.go
index 71bf384..a9b8c09 100644
--- a/src/runtime/testdata/testprog/syscall_windows.go
+++ b/src/runtime/testdata/testprog/syscall_windows.go
@@ -67,7 +67,7 @@
 		panic(err)
 	}
 	// assumes that this process creates 1 thread for each
-	// thread locked goroutine plus extra 5 threads
+	// thread locked goroutine plus extra 10 threads
 	// like sysmon and others
-	print((mem2 - mem1) / (threadCount + 5))
+	print((mem2 - mem1) / (threadCount + 10))
 }
diff --git a/src/runtime/testdata/testprog/syscalls_linux.go b/src/runtime/testdata/testprog/syscalls_linux.go
index 48f8014..3939b16 100644
--- a/src/runtime/testdata/testprog/syscalls_linux.go
+++ b/src/runtime/testdata/testprog/syscalls_linux.go
@@ -7,6 +7,7 @@
 import (
 	"bytes"
 	"fmt"
+	"internal/testenv"
 	"os"
 	"syscall"
 )
@@ -44,11 +45,8 @@
 
 func unshareFs() error {
 	err := syscall.Unshare(syscall.CLONE_FS)
-	if err != nil {
-		errno, ok := err.(syscall.Errno)
-		if ok && errno == syscall.EPERM {
-			return errNotPermitted
-		}
+	if testenv.SyscallIsNotSupported(err) {
+		return errNotPermitted
 	}
 	return err
 }
diff --git a/src/runtime/testdata/testprogcgo/cgonocallback.c b/src/runtime/testdata/testprogcgo/cgonocallback.c
new file mode 100644
index 0000000..465a484
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/cgonocallback.c
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "_cgo_export.h"
+
+void runCShouldNotCallback() {
+	CallbackToGo();
+}
diff --git a/src/runtime/testdata/testprogcgo/cgonocallback.go b/src/runtime/testdata/testprogcgo/cgonocallback.go
new file mode 100644
index 0000000..c13bf27
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/cgonocallback.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// #cgo nocallback annotations for a C function means it should not callback to Go.
+// But it do callback to go in this test, Go should crash here.
+
+/*
+// TODO(#56378): #cgo nocallback runCShouldNotCallback
+extern void runCShouldNotCallback();
+*/
+import "C"
+
+import (
+	"fmt"
+)
+
+func init() {
+	register("CgoNoCallback", CgoNoCallback)
+}
+
+//export CallbackToGo
+func CallbackToGo() {
+}
+
+func CgoNoCallback() {
+	C.runCShouldNotCallback()
+	fmt.Println("OK")
+}
diff --git a/src/runtime/testdata/testprogcgo/cgonoescape.go b/src/runtime/testdata/testprogcgo/cgonoescape.go
new file mode 100644
index 0000000..f5eebac
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/cgonoescape.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// #cgo noescape annotations for a C function means its arguments won't escape to heap.
+
+// We assume that there won't be 100 new allocated heap objects in other places,
+// i.e. runtime.ReadMemStats or other runtime background works.
+// So, the tests are:
+// 1. at least 100 new allocated heap objects after invoking withoutNoEscape 100 times.
+// 2. less than 100 new allocated heap objects after invoking withoutNoEscape 100 times.
+
+/*
+// TODO(#56378): #cgo noescape runCWithNoEscape
+
+void runCWithNoEscape(void *p) {
+}
+void runCWithoutNoEscape(void *p) {
+}
+*/
+import "C"
+
+import (
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"unsafe"
+)
+
+const num = 100
+
+func init() {
+	register("CgoNoEscape", CgoNoEscape)
+}
+
+//go:noinline
+func withNoEscape() {
+	var str string
+	C.runCWithNoEscape(unsafe.Pointer(&str))
+}
+
+//go:noinline
+func withoutNoEscape() {
+	var str string
+	C.runCWithoutNoEscape(unsafe.Pointer(&str))
+}
+
+func CgoNoEscape() {
+	// make GC stop to see the heap objects allocated
+	debug.SetGCPercent(-1)
+
+	var stats runtime.MemStats
+	runtime.ReadMemStats(&stats)
+	preHeapObjects := stats.HeapObjects
+
+	for i := 0; i < num; i++ {
+		withNoEscape()
+	}
+
+	runtime.ReadMemStats(&stats)
+	nowHeapObjects := stats.HeapObjects
+
+	if nowHeapObjects-preHeapObjects >= num {
+		fmt.Printf("too many heap objects allocated, pre: %v, now: %v\n", preHeapObjects, nowHeapObjects)
+	}
+
+	runtime.ReadMemStats(&stats)
+	preHeapObjects = stats.HeapObjects
+
+	for i := 0; i < num; i++ {
+		withoutNoEscape()
+	}
+
+	runtime.ReadMemStats(&stats)
+	nowHeapObjects = stats.HeapObjects
+
+	if nowHeapObjects-preHeapObjects < num {
+		fmt.Printf("too few heap objects allocated, pre: %v, now: %v\n", preHeapObjects, nowHeapObjects)
+	}
+
+	fmt.Println("OK")
+}
diff --git a/src/runtime/testdata/testprogcgo/numgoroutine.go b/src/runtime/testdata/testprogcgo/numgoroutine.go
index 1b9f202..9cbb4e4 100644
--- a/src/runtime/testdata/testprogcgo/numgoroutine.go
+++ b/src/runtime/testdata/testprogcgo/numgoroutine.go
@@ -70,7 +70,7 @@
 	sbuf = sbuf[:runtime.Stack(sbuf, true)]
 	n = strings.Count(string(sbuf), "goroutine ")
 	if n != want {
-		fmt.Printf("%s Stack: want %d; got %d:\n%s\n", label, want, n, string(sbuf))
+		fmt.Printf("%s Stack: want %d; got %d:\n%s\n", label, want, n, sbuf)
 		return "", false
 	}
 	return string(sbuf), true
diff --git a/src/runtime/testdata/testprogcgo/stack_windows.go b/src/runtime/testdata/testprogcgo/stack_windows.go
index 0be1126..d095093 100644
--- a/src/runtime/testdata/testprogcgo/stack_windows.go
+++ b/src/runtime/testdata/testprogcgo/stack_windows.go
@@ -51,7 +51,7 @@
 		panic(err)
 	}
 	// assumes that this process creates 1 thread for each
-	// thread locked goroutine plus extra 5 threads
+	// thread locked goroutine plus extra 10 threads
 	// like sysmon and others
-	print((mem2 - mem1) / (threadCount + 5))
+	print((mem2 - mem1) / (threadCount + 10))
 }
diff --git a/src/runtime/testdata/testprogcgo/stackswitch.c b/src/runtime/testdata/testprogcgo/stackswitch.c
new file mode 100644
index 0000000..3473d5b
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/stackswitch.c
@@ -0,0 +1,147 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix && !android && !openbsd
+
+// Required for darwin ucontext.
+#define _XOPEN_SOURCE
+// Required for netbsd stack_t if _XOPEN_SOURCE is set.
+#define _XOPEN_SOURCE_EXTENDED	1
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
+#include <assert.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ucontext.h>
+
+// musl libc does not provide getcontext, etc. Skip the test there.
+//
+// musl libc doesn't provide any direct detection mechanism. So assume any
+// non-glibc linux is using musl.
+//
+// Note that bionic does not provide getcontext either, but that is skipped via
+// the android build tag.
+#if defined(__linux__) && !defined(__GLIBC__)
+#define MUSL 1
+#endif
+#if defined(MUSL)
+void callStackSwitchCallbackFromThread(void) {
+	printf("SKIP\n");
+	exit(0);
+}
+#else
+
+// Use a stack size larger than the 32kb estimate in
+// runtime.callbackUpdateSystemStack. This ensures that a second stack
+// allocation won't accidentally count as in bounds of the first stack
+#define STACK_SIZE	(64ull << 10)
+
+static ucontext_t uctx_save, uctx_switch;
+
+extern void stackSwitchCallback(void);
+
+char *stack2;
+
+static void *stackSwitchThread(void *arg) {
+	// Simple test: callback works from the normal system stack.
+	stackSwitchCallback();
+
+	// Next, verify that switching stacks doesn't break callbacks.
+
+	char *stack1 = malloc(STACK_SIZE);
+	if (stack1 == NULL) {
+		perror("malloc");
+		exit(1);
+	}
+
+	// Allocate the second stack before freeing the first to ensure we don't get
+	// the same address from malloc.
+	//
+	// Will be freed in stackSwitchThread2.
+	stack2 = malloc(STACK_SIZE);
+	if (stack1 == NULL) {
+		perror("malloc");
+		exit(1);
+	}
+
+	if (getcontext(&uctx_switch) == -1) {
+		perror("getcontext");
+		exit(1);
+	}
+	uctx_switch.uc_stack.ss_sp = stack1;
+	uctx_switch.uc_stack.ss_size = STACK_SIZE;
+	uctx_switch.uc_link = &uctx_save;
+	makecontext(&uctx_switch, stackSwitchCallback, 0);
+
+	if (swapcontext(&uctx_save, &uctx_switch) == -1) {
+		perror("swapcontext");
+		exit(1);
+	}
+
+	if (getcontext(&uctx_switch) == -1) {
+		perror("getcontext");
+		exit(1);
+	}
+	uctx_switch.uc_stack.ss_sp = stack2;
+	uctx_switch.uc_stack.ss_size = STACK_SIZE;
+	uctx_switch.uc_link = &uctx_save;
+	makecontext(&uctx_switch, stackSwitchCallback, 0);
+
+	if (swapcontext(&uctx_save, &uctx_switch) == -1) {
+		perror("swapcontext");
+		exit(1);
+	}
+
+	free(stack1);
+
+	return NULL;
+}
+
+static void *stackSwitchThread2(void *arg) {
+	// New thread. Use stack bounds that partially overlap the previous
+	// bounds. needm should refresh the stack bounds anyway since this is a
+	// new thread.
+
+	// N.B. since we used a custom stack with makecontext,
+	// callbackUpdateSystemStack had to guess the bounds. Its guess assumes
+	// a 32KiB stack.
+	char *prev_stack_lo = stack2 + STACK_SIZE - (32*1024);
+
+	// New SP is just barely in bounds, but if we don't update the bounds
+	// we'll almost certainly overflow. The SP that
+	// callbackUpdateSystemStack sees already has some data pushed, so it
+	// will be a bit below what we set here. Thus we include some slack.
+	char *new_stack_hi = prev_stack_lo + 128;
+
+	if (getcontext(&uctx_switch) == -1) {
+		perror("getcontext");
+		exit(1);
+	}
+	uctx_switch.uc_stack.ss_sp = new_stack_hi - (STACK_SIZE / 2);
+	uctx_switch.uc_stack.ss_size = STACK_SIZE / 2;
+	uctx_switch.uc_link = &uctx_save;
+	makecontext(&uctx_switch, stackSwitchCallback, 0);
+
+	if (swapcontext(&uctx_save, &uctx_switch) == -1) {
+		perror("swapcontext");
+		exit(1);
+	}
+
+	free(stack2);
+
+	return NULL;
+}
+
+void callStackSwitchCallbackFromThread(void) {
+	pthread_t thread;
+	assert(pthread_create(&thread, NULL, stackSwitchThread, NULL) == 0);
+	assert(pthread_join(thread, NULL) == 0);
+
+	assert(pthread_create(&thread, NULL, stackSwitchThread2, NULL) == 0);
+	assert(pthread_join(thread, NULL) == 0);
+}
+
+#endif
diff --git a/src/runtime/testdata/testprogcgo/stackswitch.go b/src/runtime/testdata/testprogcgo/stackswitch.go
new file mode 100644
index 0000000..70e630e
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/stackswitch.go
@@ -0,0 +1,42 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix && !android && !openbsd
+
+package main
+
+/*
+void callStackSwitchCallbackFromThread(void);
+*/
+import "C"
+
+import (
+	"fmt"
+	"runtime/debug"
+)
+
+func init() {
+	register("StackSwitchCallback", StackSwitchCallback)
+}
+
+//export stackSwitchCallback
+func stackSwitchCallback() {
+	// We want to trigger a bounds check on the g0 stack. To do this, we
+	// need to call a splittable function through systemstack().
+	// SetGCPercent contains such a systemstack call.
+	gogc := debug.SetGCPercent(100)
+	debug.SetGCPercent(gogc)
+}
+
+// Regression test for https://go.dev/issue/62440. It should be possible for C
+// threads to call into Go from different stacks without crashing due to g0
+// stack bounds checks.
+//
+// N.B. This is only OK for threads created in C. Threads with Go frames up the
+// stack must not change the stack out from under us.
+func StackSwitchCallback() {
+	C.callStackSwitchCallbackFromThread()
+
+	fmt.Printf("OK\n")
+}
diff --git a/src/runtime/testdata/testprogcgo/threadprof.go b/src/runtime/testdata/testprogcgo/threadprof.go
index d62d4b4..00b511d 100644
--- a/src/runtime/testdata/testprogcgo/threadprof.go
+++ b/src/runtime/testdata/testprogcgo/threadprof.go
@@ -92,7 +92,9 @@
 		return
 	}
 
-	out, err := exec.Command(os.Args[0], "CgoExternalThreadSignal", "crash").CombinedOutput()
+	cmd := exec.Command(os.Args[0], "CgoExternalThreadSignal", "crash")
+	cmd.Dir = os.TempDir() // put any core file in tempdir
+	out, err := cmd.CombinedOutput()
 	if err == nil {
 		fmt.Println("C signal did not crash as expected")
 		fmt.Printf("\n%s\n", out)
diff --git a/src/runtime/testdata/testprognet/waiters.go b/src/runtime/testdata/testprognet/waiters.go
new file mode 100644
index 0000000..a65c40b
--- /dev/null
+++ b/src/runtime/testdata/testprognet/waiters.go
@@ -0,0 +1,69 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"runtime/internal/atomic"
+	"sync"
+	"time"
+	_ "unsafe" // for go:linkname
+)
+
+// The bug is that netpollWaiters increases monotonically.
+// This doesn't cause a problem until it overflows.
+// Use linkname to see the value.
+//
+//go:linkname netpollWaiters runtime.netpollWaiters
+var netpollWaiters atomic.Uint32
+
+func init() {
+	register("NetpollWaiters", NetpollWaiters)
+}
+
+func NetpollWaiters() {
+	listener, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		conn, err := listener.Accept()
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer conn.Close()
+		if _, err := io.Copy(io.Discard, conn); err != nil {
+			log.Fatal(err)
+		}
+	}()
+
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		conn, err := net.Dial("tcp", listener.Addr().String())
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer conn.Close()
+		for i := 0; i < 10; i++ {
+			fmt.Fprintf(conn, "%d\n", i)
+			time.Sleep(time.Millisecond)
+		}
+	}()
+
+	wg.Wait()
+	if v := netpollWaiters.Load(); v != 0 {
+		log.Fatalf("current waiters %v", v)
+	}
+
+	fmt.Println("OK")
+}
diff --git a/src/runtime/testdata/testwinlib/main.c b/src/runtime/testdata/testwinlib/main.c
index 55ee657..e9b5946 100644
--- a/src/runtime/testdata/testwinlib/main.c
+++ b/src/runtime/testdata/testwinlib/main.c
@@ -4,6 +4,8 @@
 
 int exceptionCount;
 int continueCount;
+int unhandledCount;
+
 LONG WINAPI customExceptionHandlder(struct _EXCEPTION_POINTERS *ExceptionInfo)
 {
     if (ExceptionInfo->ExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT)
@@ -20,7 +22,10 @@
 #else
         c->Pc = c->Lr;
 #endif
+#ifdef _ARM64_
+        // TODO: remove when windows/arm64 supports SEH stack unwinding.
         return EXCEPTION_CONTINUE_EXECUTION;
+#endif
     }
     return EXCEPTION_CONTINUE_SEARCH;
 }
@@ -29,6 +34,14 @@
     if (ExceptionInfo->ExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT)
     {
         continueCount++;
+    }
+    return EXCEPTION_CONTINUE_SEARCH;
+}
+
+LONG WINAPI unhandledExceptionHandler(struct _EXCEPTION_POINTERS *ExceptionInfo) {
+    if (ExceptionInfo->ExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT)
+    {
+        unhandledCount++;
         return EXCEPTION_CONTINUE_EXECUTION;
     }
     return EXCEPTION_CONTINUE_SEARCH;
@@ -58,10 +71,15 @@
         fflush(stdout);
         return 2;
     }
+    void *prevUnhandledHandler = SetUnhandledExceptionFilter(unhandledExceptionHandler);
     CallMeBack(throwFromC);
     RemoveVectoredContinueHandler(continueHandlerHandle);
     RemoveVectoredExceptionHandler(exceptionHandlerHandle);
-    printf("exceptionCount: %d\ncontinueCount: %d\n", exceptionCount, continueCount);
+    if (prevUnhandledHandler != NULL)
+    {
+        SetUnhandledExceptionFilter(prevUnhandledHandler);
+    }
+    printf("exceptionCount: %d\ncontinueCount: %d\nunhandledCount: %d\n", exceptionCount, continueCount, unhandledCount);
     fflush(stdout);
     return 0;
 }
diff --git a/src/runtime/testdata/testwinlibthrow/main.go b/src/runtime/testdata/testwinlibthrow/main.go
index ce0c92f..493e1e1 100644
--- a/src/runtime/testdata/testwinlibthrow/main.go
+++ b/src/runtime/testdata/testwinlibthrow/main.go
@@ -1,19 +1,19 @@
-package main

-

-import (

-	"os"

-	"syscall"

-)

-

-func main() {

-	dll := syscall.MustLoadDLL("veh.dll")

-	RaiseNoExcept := dll.MustFindProc("RaiseNoExcept")

-	ThreadRaiseNoExcept := dll.MustFindProc("ThreadRaiseNoExcept")

-

-	thread := len(os.Args) > 1 && os.Args[1] == "thread"

-	if !thread {

-		RaiseNoExcept.Call()

-	} else {

-		ThreadRaiseNoExcept.Call()

-	}

-}

+package main
+
+import (
+	"os"
+	"syscall"
+)
+
+func main() {
+	dll := syscall.MustLoadDLL("veh.dll")
+	RaiseNoExcept := dll.MustFindProc("RaiseNoExcept")
+	ThreadRaiseNoExcept := dll.MustFindProc("ThreadRaiseNoExcept")
+
+	thread := len(os.Args) > 1 && os.Args[1] == "thread"
+	if !thread {
+		RaiseNoExcept.Call()
+	} else {
+		ThreadRaiseNoExcept.Call()
+	}
+}
diff --git a/src/runtime/time.go b/src/runtime/time.go
index c05351c..8ed1e45 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -1016,7 +1016,7 @@
 func updateTimerModifiedEarliest(pp *p, nextwhen int64) {
 	for {
 		old := pp.timerModifiedEarliest.Load()
-		if old != 0 && int64(old) < nextwhen {
+		if old != 0 && old < nextwhen {
 			return
 		}
 
diff --git a/src/runtime/time_windows_386.s b/src/runtime/time_windows_386.s
index b8b636e..77e5f76 100644
--- a/src/runtime/time_windows_386.s
+++ b/src/runtime/time_windows_386.s
@@ -9,8 +9,6 @@
 #include "time_windows.h"
 
 TEXT time·now(SB),NOSPLIT,$0-20
-	CMPB	runtime·useQPCTime(SB), $0
-	JNE	useQPC
 loop:
 	MOVL	(_INTERRUPT_TIME+time_hi1), AX
 	MOVL	(_INTERRUPT_TIME+time_lo), CX
@@ -79,6 +77,3 @@
 	MOVL	AX, sec+0(FP)
 	MOVL	DX, sec+4(FP)
 	RET
-useQPC:
-	JMP	runtime·nowQPC(SB)
-	RET
diff --git a/src/runtime/time_windows_amd64.s b/src/runtime/time_windows_amd64.s
index 226f2b5..d3fcf2e 100644
--- a/src/runtime/time_windows_amd64.s
+++ b/src/runtime/time_windows_amd64.s
@@ -9,9 +9,6 @@
 #include "time_windows.h"
 
 TEXT time·now(SB),NOSPLIT,$0-24
-	CMPB	runtime·useQPCTime(SB), $0
-	JNE	useQPC
-
 	MOVQ	$_INTERRUPT_TIME, DI
 	MOVQ	time_lo(DI), AX
 	IMULQ	$100, AX
@@ -37,6 +34,3 @@
 	SUBQ	DX, CX
 	MOVL	CX, nsec+8(FP)
 	RET
-useQPC:
-	JMP	runtime·nowQPC(SB)
-	RET
diff --git a/src/runtime/time_windows_arm.s b/src/runtime/time_windows_arm.s
index 8d4469f..ff5686d 100644
--- a/src/runtime/time_windows_arm.s
+++ b/src/runtime/time_windows_arm.s
@@ -9,10 +9,6 @@
 #include "time_windows.h"
 
 TEXT time·now(SB),NOSPLIT,$0-20
-	MOVW    $0, R0
-	MOVB    runtime·useQPCTime(SB), R0
-	CMP	$0, R0
-	BNE	useQPC
 	MOVW	$_INTERRUPT_TIME, R3
 loop:
 	MOVW	time_hi1(R3), R1
@@ -85,6 +81,4 @@
 	MOVW	R7,sec_hi+4(FP)
 	MOVW	R1,nsec+8(FP)
 	RET
-useQPC:
-	RET	runtime·nowQPC(SB)		// tail call
 
diff --git a/src/runtime/time_windows_arm64.s b/src/runtime/time_windows_arm64.s
index 7943d6b..47e7656 100644
--- a/src/runtime/time_windows_arm64.s
+++ b/src/runtime/time_windows_arm64.s
@@ -9,10 +9,6 @@
 #include "time_windows.h"
 
 TEXT time·now(SB),NOSPLIT,$0-24
-	MOVB    runtime·useQPCTime(SB), R0
-	CMP	$0, R0
-	BNE	useQPC
-
 	MOVD	$_INTERRUPT_TIME, R3
 	MOVD	time_lo(R3), R0
 	MOVD	$100, R1
@@ -42,6 +38,4 @@
 	MSUB	R1, R0, R2, R0
 	MOVW	R0, nsec+8(FP)
 	RET
-useQPC:
-	RET	runtime·nowQPC(SB)		// tail call
 
diff --git a/src/runtime/tls_ppc64x.s b/src/runtime/tls_ppc64x.s
index 17aec9f..137214b 100644
--- a/src/runtime/tls_ppc64x.s
+++ b/src/runtime/tls_ppc64x.s
@@ -24,10 +24,12 @@
 // NOTE: setg_gcc<> assume this clobbers only R31.
 TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
 #ifndef GOOS_aix
+#ifndef GOOS_openbsd
 	MOVBZ	runtime·iscgo(SB), R31
 	CMP	R31, $0
 	BEQ	nocgo
 #endif
+#endif
 	MOVD	runtime·tls_g(SB), R31
 	MOVD	g, 0(R31)
 
diff --git a/src/runtime/tls_riscv64.s b/src/runtime/tls_riscv64.s
index 397919a..2aeb89a 100644
--- a/src/runtime/tls_riscv64.s
+++ b/src/runtime/tls_riscv64.s
@@ -11,20 +11,16 @@
 //
 // NOTE: mcall() assumes this clobbers only X31 (REG_TMP).
 TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
+#ifndef GOOS_openbsd
 	MOVB	runtime·iscgo(SB), X31
-	BEQ	X0, X31, nocgo
-
-	MOV	runtime·tls_g(SB), X31
-	ADD	TP, X31		// add offset to thread pointer (X4)
-	MOV	g, (X31)
-
+	BEQZ	X31, nocgo
+#endif
+	MOV	g, runtime·tls_g(SB)
 nocgo:
 	RET
 
 TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
-	MOV	runtime·tls_g(SB), X31
-	ADD	TP, X31		// add offset to thread pointer (X4)
-	MOV	(X31), g
+	MOV	runtime·tls_g(SB), g
 	RET
 
 GLOBL runtime·tls_g(SB), TLSBSS, $8
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 7d7987c..a9cfa22 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !goexperiment.exectracer2
+
 // Go execution tracer.
 // The tracer captures a wide range of execution events like goroutine
 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
@@ -198,6 +200,9 @@
 	lastP              puintptr  // last P emitted an event for this goroutine
 }
 
+// Unused; for compatibility with the new tracer.
+func (s *gTraceState) reset() {}
+
 // mTraceState is per-M state for the tracer.
 type mTraceState struct {
 	startingTrace  bool // this M is in TraceStart, potentially before traceEnabled is true
@@ -258,6 +263,8 @@
 
 // traceEnabled returns true if the trace is currently enabled.
 //
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
 //go:nosplit
 func traceEnabled() bool {
 	return trace.enabled
@@ -270,10 +277,56 @@
 	return trace.shutdown
 }
 
+// traceLocker represents an M writing trace events. While a traceLocker value
+// is valid, the tracer observes all operations on the G/M/P or trace events being
+// written as happening atomically.
+//
+// This doesn't do much for the current tracer, because the current tracer doesn't
+// need atomicity around non-trace runtime operations. All the state it needs it
+// collects carefully during a STW.
+type traceLocker struct {
+	enabled bool
+}
+
+// traceAcquire prepares this M for writing one or more trace events.
+//
+// This exists for compatibility with the upcoming new tracer; it doesn't do much
+// in the current tracer.
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func traceAcquire() traceLocker {
+	if !traceEnabled() {
+		return traceLocker{false}
+	}
+	return traceLocker{true}
+}
+
+// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func (tl traceLocker) ok() bool {
+	return tl.enabled
+}
+
+// traceRelease indicates that this M is done writing trace events.
+//
+// This exists for compatibility with the upcoming new tracer; it doesn't do anything
+// in the current tracer.
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func traceRelease(tl traceLocker) {
+}
+
 // StartTrace enables tracing for the current process.
-// While tracing, the data will be buffered and available via ReadTrace.
+// While tracing, the data will be buffered and available via [ReadTrace].
 // StartTrace returns an error if tracing is already enabled.
-// Most clients should use the runtime/trace package or the testing package's
+// Most clients should use the [runtime/trace] package or the [testing] package's
 // -test.trace flag instead of calling StartTrace directly.
 func StartTrace() error {
 	// Stop the world so that we can take a consistent snapshot
@@ -281,7 +334,7 @@
 	// Do not stop the world during GC so we ensure we always see
 	// a consistent view of GC-related events (e.g. a start is always
 	// paired with an end).
-	stopTheWorldGC(stwStartTrace)
+	stw := stopTheWorldGC(stwStartTrace)
 
 	// Prevent sysmon from running any code that could generate events.
 	lock(&sched.sysmonlock)
@@ -296,7 +349,7 @@
 	if trace.enabled || trace.shutdown {
 		unlock(&trace.bufLock)
 		unlock(&sched.sysmonlock)
-		startTheWorldGC()
+		startTheWorldGC(stw)
 		return errorString("tracing is already enabled")
 	}
 
@@ -367,8 +420,10 @@
 			gp.trace.tracedSyscallEnter = false
 		}
 	})
-	traceProcStart()
-	traceGoStart()
+	// Use a dummy traceLocker. The trace isn't enabled yet, but we can still write events.
+	tl := traceLocker{}
+	tl.ProcStart()
+	tl.GoStart()
 	// Note: startTicks needs to be set after we emit traceEvGoInSyscall events.
 	// If we do it the other way around, it is possible that exitsyscall will
 	// query sysExitTime after startTicks but before traceEvGoInSyscall timestamp.
@@ -401,9 +456,12 @@
 	unlock(&sched.sysmonlock)
 
 	// Record the current state of HeapGoal to avoid information loss in trace.
-	traceHeapGoal()
+	//
+	// Use the same dummy trace locker. The trace can't end until after we start
+	// the world, and we can safely trace from here.
+	tl.HeapGoal()
 
-	startTheWorldGC()
+	startTheWorldGC(stw)
 	return nil
 }
 
@@ -412,7 +470,7 @@
 func StopTrace() {
 	// Stop the world so that we can collect the trace buffers from all p's below,
 	// and also to avoid races with traceEvent.
-	stopTheWorldGC(stwStopTrace)
+	stw := stopTheWorldGC(stwStopTrace)
 
 	// See the comment in StartTrace.
 	lock(&sched.sysmonlock)
@@ -423,11 +481,14 @@
 	if !trace.enabled {
 		unlock(&trace.bufLock)
 		unlock(&sched.sysmonlock)
-		startTheWorldGC()
+		startTheWorldGC(stw)
 		return
 	}
 
-	traceGoSched()
+	// Trace GoSched for us, and use a dummy locker. The world is stopped
+	// and we control whether the trace is enabled, so this is safe.
+	tl := traceLocker{}
+	tl.GoSched()
 
 	atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
 	trace.cpuLogRead.close()
@@ -479,7 +540,7 @@
 
 	unlock(&sched.sysmonlock)
 
-	startTheWorldGC()
+	startTheWorldGC(stw)
 
 	// The world is started but we've set trace.shutdown, so new tracing can't start.
 	// Wait for the trace reader to flush pending buffers and stop.
@@ -714,6 +775,13 @@
 	unlock(&trace.lock)
 }
 
+// ThreadDestroy is a no-op. It exists as a stub to support the new tracer.
+//
+// This must run on the system stack, just to match the new tracer.
+func traceThreadDestroy(_ *m) {
+	// No-op in old tracer.
+}
+
 // traceFullQueue queues buf into queue of full buffers.
 func traceFullQueue(buf traceBufPtr) {
 	buf.ptr().link = 0
@@ -846,8 +914,8 @@
 // traceCPUSample writes a CPU profile sample stack to the execution tracer's
 // profiling buffer. It is called from a signal handler, so is limited in what
 // it can do.
-func traceCPUSample(gp *g, pp *p, stk []uintptr) {
-	if !trace.enabled {
+func traceCPUSample(gp *g, _ *m, pp *p, stk []uintptr) {
+	if !traceEnabled() {
 		// Tracing is usually turned off; don't spend time acquiring the signal
 		// lock unless it's active.
 		return
@@ -944,7 +1012,7 @@
 			}
 			stackID := trace.stackTab.put(buf.stk[:nstk])
 
-			traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, uint64(timestamp), ppid, goid)
+			traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp, ppid, goid)
 		}
 	}
 }
@@ -1349,7 +1417,6 @@
 	}
 
 	var (
-		cache      pcvalueCache
 		lastFuncID = abi.FuncIDNormal
 		newPCBuf   = make([]uintptr, 0, traceStackSize)
 		skip       = pcBuf[0]
@@ -1378,7 +1445,7 @@
 			continue
 		}
 
-		u, uf := newInlineUnwinder(fi, callPC, &cache)
+		u, uf := newInlineUnwinder(fi, callPC)
 		for ; uf.valid(); uf = u.next(uf) {
 			sf := u.srcFunc(uf)
 			if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
@@ -1476,15 +1543,15 @@
 
 // The following functions write specific events to trace.
 
-func traceGomaxprocs(procs int32) {
+func (_ traceLocker) Gomaxprocs(procs int32) {
 	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
 }
 
-func traceProcStart() {
+func (_ traceLocker) ProcStart() {
 	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
 }
 
-func traceProcStop(pp *p) {
+func (_ traceLocker) ProcStop(pp *p) {
 	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
 	// to handle this we temporary employ the P.
 	mp := acquirem()
@@ -1495,16 +1562,16 @@
 	releasem(mp)
 }
 
-func traceGCStart() {
+func (_ traceLocker) GCStart() {
 	traceEvent(traceEvGCStart, 3, trace.seqGC)
 	trace.seqGC++
 }
 
-func traceGCDone() {
+func (_ traceLocker) GCDone() {
 	traceEvent(traceEvGCDone, -1)
 }
 
-func traceSTWStart(reason stwReason) {
+func (_ traceLocker) STWStart(reason stwReason) {
 	// Don't trace if this STW is for trace start/stop, since traceEnabled
 	// switches during a STW.
 	if reason == stwStartTrace || reason == stwStopTrace {
@@ -1514,7 +1581,7 @@
 	traceEvent(traceEvSTWStart, -1, uint64(reason))
 }
 
-func traceSTWDone() {
+func (_ traceLocker) STWDone() {
 	mp := getg().m
 	if !mp.trace.tracedSTWStart {
 		return
@@ -1528,7 +1595,7 @@
 //
 // traceGCSweepStart must be paired with traceGCSweepDone and there
 // must be no preemption points between these two calls.
-func traceGCSweepStart() {
+func (_ traceLocker) GCSweepStart() {
 	// Delay the actual GCSweepStart event until the first span
 	// sweep. If we don't sweep anything, don't emit any events.
 	pp := getg().m.p.ptr()
@@ -1542,7 +1609,7 @@
 //
 // This may be called outside a traceGCSweepStart/traceGCSweepDone
 // pair; however, it will not emit any trace events in this case.
-func traceGCSweepSpan(bytesSwept uintptr) {
+func (_ traceLocker) GCSweepSpan(bytesSwept uintptr) {
 	pp := getg().m.p.ptr()
 	if pp.trace.inSweep {
 		if pp.trace.swept == 0 {
@@ -1552,7 +1619,7 @@
 	}
 }
 
-func traceGCSweepDone() {
+func (_ traceLocker) GCSweepDone() {
 	pp := getg().m.p.ptr()
 	if !pp.trace.inSweep {
 		throw("missing traceGCSweepStart")
@@ -1563,15 +1630,15 @@
 	pp.trace.inSweep = false
 }
 
-func traceGCMarkAssistStart() {
+func (_ traceLocker) GCMarkAssistStart() {
 	traceEvent(traceEvGCMarkAssistStart, 1)
 }
 
-func traceGCMarkAssistDone() {
+func (_ traceLocker) GCMarkAssistDone() {
 	traceEvent(traceEvGCMarkAssistDone, -1)
 }
 
-func traceGoCreate(newg *g, pc uintptr) {
+func (_ traceLocker) GoCreate(newg *g, pc uintptr) {
 	newg.trace.seq = 0
 	newg.trace.lastP = getg().m.p
 	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
@@ -1579,7 +1646,7 @@
 	traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
 }
 
-func traceGoStart() {
+func (_ traceLocker) GoStart() {
 	gp := getg().m.curg
 	pp := gp.m.p
 	gp.trace.seq++
@@ -1593,29 +1660,29 @@
 	}
 }
 
-func traceGoEnd() {
+func (_ traceLocker) GoEnd() {
 	traceEvent(traceEvGoEnd, -1)
 }
 
-func traceGoSched() {
+func (_ traceLocker) GoSched() {
 	gp := getg()
 	gp.trace.lastP = gp.m.p
 	traceEvent(traceEvGoSched, 1)
 }
 
-func traceGoPreempt() {
+func (_ traceLocker) GoPreempt() {
 	gp := getg()
 	gp.trace.lastP = gp.m.p
 	traceEvent(traceEvGoPreempt, 1)
 }
 
-func traceGoPark(reason traceBlockReason, skip int) {
+func (_ traceLocker) GoPark(reason traceBlockReason, skip int) {
 	// Convert the block reason directly to a trace event type.
 	// See traceBlockReason for more information.
 	traceEvent(byte(reason), skip)
 }
 
-func traceGoUnpark(gp *g, skip int) {
+func (_ traceLocker) GoUnpark(gp *g, skip int) {
 	pp := getg().m.p
 	gp.trace.seq++
 	if gp.trace.lastP == pp {
@@ -1626,7 +1693,7 @@
 	}
 }
 
-func traceGoSysCall() {
+func (_ traceLocker) GoSysCall() {
 	var skip int
 	switch {
 	case tracefpunwindoff():
@@ -1647,7 +1714,10 @@
 	traceEvent(traceEvGoSysCall, skip)
 }
 
-func traceGoSysExit() {
+func (_ traceLocker) GoSysExit(lostP bool) {
+	if !lostP {
+		throw("lostP must always be true in the old tracer for GoSysExit")
+	}
 	gp := getg().m.curg
 	if !gp.trace.tracedSyscallEnter {
 		// There was no syscall entry traced for us at all, so there's definitely
@@ -1674,7 +1744,23 @@
 	traceEvent(traceEvGoSysExit, -1, gp.goid, gp.trace.seq, uint64(ts))
 }
 
-func traceGoSysBlock(pp *p) {
+// nosplit because it's called from exitsyscall without a P.
+//
+//go:nosplit
+func (_ traceLocker) RecordSyscallExitedTime(gp *g, oldp *p) {
+	// Wait till traceGoSysBlock event is emitted.
+	// This ensures consistency of the trace (the goroutine is started after it is blocked).
+	for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
+		osyield()
+	}
+	// We can't trace syscall exit right now because we don't have a P.
+	// Tracing code can invoke write barriers that cannot run without a P.
+	// So instead we remember the syscall exit time and emit the event
+	// in execute when we have a P.
+	gp.trace.sysExitTime = traceClockNow()
+}
+
+func (_ traceLocker) GoSysBlock(pp *p) {
 	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
 	// to handle this we temporary employ the P.
 	mp := acquirem()
@@ -1685,11 +1771,15 @@
 	releasem(mp)
 }
 
-func traceHeapAlloc(live uint64) {
+func (t traceLocker) ProcSteal(pp *p, forMe bool) {
+	t.ProcStop(pp)
+}
+
+func (_ traceLocker) HeapAlloc(live uint64) {
 	traceEvent(traceEvHeapAlloc, -1, live)
 }
 
-func traceHeapGoal() {
+func (_ traceLocker) HeapGoal() {
 	heapGoal := gcController.heapGoal()
 	if heapGoal == ^uint64(0) {
 		// Heap-based triggering is disabled.
@@ -1790,19 +1880,27 @@
 	return f.datap.textAddr(*(*uint32)(w))
 }
 
-// traceOneNewExtraM registers the fact that a new extra M was created with
+// OneNewExtraM registers the fact that a new extra M was created with
 // the tracer. This matters if the M (which has an attached G) is used while
 // the trace is still active because if it is, we need the fact that it exists
 // to show up in the final trace.
-func traceOneNewExtraM(gp *g) {
+func (tl traceLocker) OneNewExtraM(gp *g) {
 	// Trigger two trace events for the locked g in the extra m,
 	// since the next event of the g will be traceEvGoSysExit in exitsyscall,
 	// while calling from C thread to Go.
-	traceGoCreate(gp, 0) // no start pc
+	tl.GoCreate(gp, 0) // no start pc
 	gp.trace.seq++
 	traceEvent(traceEvGoInSyscall, -1, gp.goid)
 }
 
+// Used only in the new tracer.
+func (tl traceLocker) GoCreateSyscall(gp *g) {
+}
+
+// Used only in the new tracer.
+func (tl traceLocker) GoDestroySyscall() {
+}
+
 // traceTime represents a timestamp for the trace.
 type traceTime uint64
 
@@ -1816,3 +1914,12 @@
 func traceClockNow() traceTime {
 	return traceTime(cputicks() / traceTimeDiv)
 }
+
+func traceExitingSyscall() {
+}
+
+func traceExitedSyscall() {
+}
+
+// Not used in the old tracer. Defined for compatibility.
+const defaultTraceAdvancePeriod = 0
diff --git a/src/runtime/trace/annotation_test.go b/src/runtime/trace/annotation_test.go
index 69ea8f2..1bfe28e 100644
--- a/src/runtime/trace/annotation_test.go
+++ b/src/runtime/trace/annotation_test.go
@@ -8,6 +8,7 @@
 	"bytes"
 	"context"
 	"fmt"
+	"internal/goexperiment"
 	"internal/trace"
 	"reflect"
 	. "runtime/trace"
@@ -42,6 +43,10 @@
 }
 
 func TestUserTaskRegion(t *testing.T) {
+	if goexperiment.ExecTracer2 {
+		// An equivalent test exists in internal/trace/v2.
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 	if IsEnabled() {
 		t.Skip("skipping because -test.trace is set")
 	}
diff --git a/src/runtime/trace/trace_stack_test.go b/src/runtime/trace/trace_stack_test.go
index be3adc9..f427e57 100644
--- a/src/runtime/trace/trace_stack_test.go
+++ b/src/runtime/trace/trace_stack_test.go
@@ -7,6 +7,7 @@
 import (
 	"bytes"
 	"fmt"
+	"internal/goexperiment"
 	"internal/testenv"
 	"internal/trace"
 	"net"
@@ -152,11 +153,11 @@
 			{"runtime/trace_test.TestTraceSymbolize.func1", 0},
 		}},
 		{trace.EvGoSched, []frame{
-			{"runtime/trace_test.TestTraceSymbolize", 111},
+			{"runtime/trace_test.TestTraceSymbolize", 112},
 			{"testing.tRunner", 0},
 		}},
 		{trace.EvGoCreate, []frame{
-			{"runtime/trace_test.TestTraceSymbolize", 40},
+			{"runtime/trace_test.TestTraceSymbolize", 41},
 			{"testing.tRunner", 0},
 		}},
 		{trace.EvGoStop, []frame{
@@ -177,7 +178,7 @@
 		}},
 		{trace.EvGoUnblock, []frame{
 			{"runtime.chansend1", 0},
-			{"runtime/trace_test.TestTraceSymbolize", 113},
+			{"runtime/trace_test.TestTraceSymbolize", 114},
 			{"testing.tRunner", 0},
 		}},
 		{trace.EvGoBlockSend, []frame{
@@ -186,7 +187,7 @@
 		}},
 		{trace.EvGoUnblock, []frame{
 			{"runtime.chanrecv1", 0},
-			{"runtime/trace_test.TestTraceSymbolize", 114},
+			{"runtime/trace_test.TestTraceSymbolize", 115},
 			{"testing.tRunner", 0},
 		}},
 		{trace.EvGoBlockSelect, []frame{
@@ -195,7 +196,7 @@
 		}},
 		{trace.EvGoUnblock, []frame{
 			{"runtime.selectgo", 0},
-			{"runtime/trace_test.TestTraceSymbolize", 115},
+			{"runtime/trace_test.TestTraceSymbolize", 116},
 			{"testing.tRunner", 0},
 		}},
 		{trace.EvGoBlockSync, []frame{
@@ -214,7 +215,7 @@
 		{trace.EvGoUnblock, []frame{
 			{"sync.(*WaitGroup).Add", 0},
 			{"sync.(*WaitGroup).Done", 0},
-			{"runtime/trace_test.TestTraceSymbolize", 120},
+			{"runtime/trace_test.TestTraceSymbolize", 121},
 			{"testing.tRunner", 0},
 		}},
 		{trace.EvGoBlockCond, []frame{
@@ -289,6 +290,10 @@
 
 func skipTraceSymbolizeTestIfNecessary(t *testing.T) {
 	testenv.MustHaveGoBuild(t)
+	if goexperiment.ExecTracer2 {
+		// An equivalent test exists in internal/trace/v2.
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 	if IsEnabled() {
 		t.Skip("skipping because -test.trace is set")
 	}
diff --git a/src/runtime/trace/trace_test.go b/src/runtime/trace/trace_test.go
index 04a43a0..23a8d11 100644
--- a/src/runtime/trace/trace_test.go
+++ b/src/runtime/trace/trace_test.go
@@ -9,6 +9,7 @@
 	"context"
 	"flag"
 	"fmt"
+	"internal/goexperiment"
 	"internal/profile"
 	"internal/race"
 	"internal/trace"
@@ -41,6 +42,9 @@
 	if testing.Short() {
 		t.Skip("skipping in short mode")
 	}
+	if goexperiment.ExecTracer2 {
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 	// During Start, bunch of records are written to reflect the current
 	// snapshot of the program, including state of each goroutines.
 	// And some string constants are written to the trace to aid trace
@@ -127,6 +131,10 @@
 	if IsEnabled() {
 		t.Skip("skipping because -test.trace is set")
 	}
+	if goexperiment.ExecTracer2 {
+		// An equivalent test exists in internal/trace/v2.
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 	buf := new(bytes.Buffer)
 	if err := Start(buf); err != nil {
 		t.Fatalf("failed to start tracing: %v", err)
@@ -194,6 +202,10 @@
 	if testing.Short() {
 		t.Skip("skipping in -short mode")
 	}
+	if goexperiment.ExecTracer2 {
+		// An equivalent test exists in internal/trace/v2.
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 
 	var wg sync.WaitGroup
 	done := make(chan bool)
@@ -356,6 +368,10 @@
 	if IsEnabled() {
 		t.Skip("skipping because -test.trace is set")
 	}
+	if goexperiment.ExecTracer2 {
+		// An equivalent test exists in internal/trace/v2.
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
 	outerDone := make(chan bool)
 
@@ -506,6 +522,9 @@
 	if IsEnabled() {
 		t.Skip("skipping because -test.trace is set")
 	}
+	if goexperiment.ExecTracer2 {
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 	buf := new(bytes.Buffer)
 	if err := Start(buf); err != nil {
 		t.Fatalf("failed to start tracing: %v", err)
@@ -592,6 +611,10 @@
 	if IsEnabled() {
 		t.Skip("skipping because -test.trace is set")
 	}
+	if goexperiment.ExecTracer2 {
+		// An equivalent test exists in internal/trace/v2.
+		t.Skip("skipping because this test is incompatible with the new tracer")
+	}
 
 	cpuBuf := new(bytes.Buffer)
 	if err := pprof.StartCPUProfile(cpuBuf); err != nil {
diff --git a/src/runtime/trace2.go b/src/runtime/trace2.go
new file mode 100644
index 0000000..673205d
--- /dev/null
+++ b/src/runtime/trace2.go
@@ -0,0 +1,1001 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Go execution tracer.
+// The tracer captures a wide range of execution events like goroutine
+// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
+// changes of heap size, processor start/stop, etc and writes them to a buffer
+// in a compact form. A precise nanosecond-precision timestamp and a stack
+// trace is captured for most events.
+//
+// Tracer invariants (to keep the synchronization making sense):
+// - An m that has a trace buffer must be on either the allm or sched.freem lists.
+// - Any trace buffer mutation must either be happening in traceAdvance or between
+//   a traceAcquire and a subsequent traceRelease.
+// - traceAdvance cannot return until the previous generation's buffers are all flushed.
+//
+// See https://go.dev/issue/60773 for a link to the full design.
+
+package runtime
+
+import (
+	"runtime/internal/atomic"
+	"unsafe"
+)
+
+// Trace state.
+
+// trace is global tracing context.
+var trace struct {
+	// trace.lock must only be acquired on the system stack where
+	// stack splits cannot happen while it is held.
+	lock mutex
+
+	// Trace buffer management.
+	//
+	// First we check the empty list for any free buffers. If not, buffers
+	// are allocated directly from the OS. Once they're filled up and/or
+	// flushed, they end up on the full queue for trace.gen%2.
+	//
+	// The trace reader takes buffers off the full list one-by-one and
+	// places them into reading until they're finished being read from.
+	// Then they're placed onto the empty list.
+	//
+	// Protected by trace.lock.
+	reading       *traceBuf // buffer currently handed off to user
+	empty         *traceBuf // stack of empty buffers
+	full          [2]traceBufQueue
+	workAvailable atomic.Bool
+
+	// State for the trace reader goroutine.
+	//
+	// Protected by trace.lock.
+	readerGen     atomic.Uintptr // the generation the reader is currently reading for
+	flushedGen    atomic.Uintptr // the last completed generation
+	headerWritten bool           // whether ReadTrace has emitted trace header
+
+	// doneSema is used to synchronize the reader and traceAdvance. Specifically,
+	// it notifies traceAdvance that the reader is done with a generation.
+	// Both semaphores are 0 by default (so, acquires block). traceAdvance
+	// attempts to acquire for gen%2 after flushing the last buffers for gen.
+	// Meanwhile the reader releases the sema for gen%2 when it has finished
+	// processing gen.
+	doneSema [2]uint32
+
+	// Trace data tables for deduplicating data going into the trace.
+	// There are 2 of each: one for gen%2, one for 1-gen%2.
+	stackTab  [2]traceStackTable  // maps stack traces to unique ids
+	stringTab [2]traceStringTable // maps strings to unique ids
+
+	// cpuLogRead accepts CPU profile samples from the signal handler where
+	// they're generated. There are two profBufs here: one for gen%2, one for
+	// 1-gen%2. These profBufs use a three-word header to hold the IDs of the P, G,
+	// and M (respectively) that were active at the time of the sample. Because
+	// profBuf uses a record with all zeros in its header to indicate overflow,
+	// we make sure to make the P field always non-zero: The ID of a real P will
+	// start at bit 1, and bit 0 will be set. Samples that arrive while no P is
+	// running (such as near syscalls) will set the first header field to 0b10.
+	// This careful handling of the first header field allows us to store ID of
+	// the active G directly in the second field, even though that will be 0
+	// when sampling g0.
+	//
+	// Initialization and teardown of these fields is protected by traceAdvanceSema.
+	cpuLogRead  [2]*profBuf
+	signalLock  atomic.Uint32              // protects use of the following member, only usable in signal handlers
+	cpuLogWrite [2]atomic.Pointer[profBuf] // copy of cpuLogRead for use in signal handlers, set without signalLock
+	cpuSleep    *wakeableSleep
+	cpuLogDone  <-chan struct{}
+	cpuBuf      [2]*traceBuf
+
+	reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil
+
+	// Fast mappings from enumerations to string IDs that are prepopulated
+	// in the trace.
+	markWorkerLabels [2][len(gcMarkWorkerModeStrings)]traceArg
+	goStopReasons    [2][len(traceGoStopReasonStrings)]traceArg
+	goBlockReasons   [2][len(traceBlockReasonStrings)]traceArg
+
+	// Trace generation counter.
+	gen            atomic.Uintptr
+	lastNonZeroGen uintptr // last non-zero value of gen
+
+	// shutdown is set when we are waiting for trace reader to finish after setting gen to 0
+	//
+	// Writes protected by trace.lock.
+	shutdown atomic.Bool
+
+	// Number of goroutines in syscall exiting slow path.
+	exitingSyscall atomic.Int32
+
+	// seqGC is the sequence counter for GC begin/end.
+	//
+	// Mutated only during stop-the-world.
+	seqGC uint64
+}
+
+// Trace public API.
+
+var (
+	traceAdvanceSema  uint32 = 1
+	traceShutdownSema uint32 = 1
+)
+
+// StartTrace enables tracing for the current process.
+// While tracing, the data will be buffered and available via [ReadTrace].
+// StartTrace returns an error if tracing is already enabled.
+// Most clients should use the [runtime/trace] package or the [testing] package's
+// -test.trace flag instead of calling StartTrace directly.
+func StartTrace() error {
+	if traceEnabled() || traceShuttingDown() {
+		return errorString("tracing is already enabled")
+	}
+	// Block until cleanup of the last trace is done.
+	semacquire(&traceShutdownSema)
+	semrelease(&traceShutdownSema)
+
+	// Hold traceAdvanceSema across trace start, since we'll want it on
+	// the other side of tracing being enabled globally.
+	semacquire(&traceAdvanceSema)
+
+	// Initialize CPU profile -> trace ingestion.
+	traceInitReadCPU()
+
+	// Compute the first generation for this StartTrace.
+	//
+	// Note: we start from the last non-zero generation rather than 1 so we
+	// can avoid resetting all the arrays indexed by gen%2 or gen%3. There's
+	// more than one of each per m, p, and goroutine.
+	firstGen := traceNextGen(trace.lastNonZeroGen)
+
+	// Reset GC sequencer.
+	trace.seqGC = 1
+
+	// Reset trace reader state.
+	trace.headerWritten = false
+	trace.readerGen.Store(firstGen)
+	trace.flushedGen.Store(0)
+
+	// Register some basic strings in the string tables.
+	traceRegisterLabelsAndReasons(firstGen)
+
+	// Stop the world.
+	//
+	// The purpose of stopping the world is to make sure that no goroutine is in a
+	// context where it could emit an event by bringing all goroutines to a safe point
+	// with no opportunity to transition.
+	//
+	// The exception to this rule are goroutines that are concurrently exiting a syscall.
+	// Those will all be forced into the syscalling slow path, and we'll just make sure
+	// that we don't observe any goroutines in that critical section before starting
+	// the world again.
+	//
+	// A good follow-up question to this is why stopping the world is necessary at all
+	// given that we have traceAcquire and traceRelease. Unfortunately, those only help
+	// us when tracing is already active (for performance, so when tracing is off the
+	// tracing seqlock is left untouched). The main issue here is subtle: we're going to
+	// want to obtain a correct starting status for each goroutine, but there are windows
+	// of time in which we could read and emit an incorrect status. Specifically:
+	//
+	//	trace := traceAcquire()
+	//  // <----> problem window
+	//	casgstatus(gp, _Gwaiting, _Grunnable)
+	//	if trace.ok() {
+	//		trace.GoUnpark(gp, 2)
+	//		traceRelease(trace)
+	//	}
+	//
+	// More precisely, if we readgstatus for a gp while another goroutine is in the problem
+	// window and that goroutine didn't observe that tracing had begun, then we might write
+	// a GoStatus(GoWaiting) event for that goroutine, but it won't trace an event marking
+	// the transition from GoWaiting to GoRunnable. The trace will then be broken, because
+	// future events will be emitted assuming the tracer sees GoRunnable.
+	//
+	// In short, what we really need here is to make sure that the next time *any goroutine*
+	// hits a traceAcquire, it sees that the trace is enabled.
+	//
+	// Note also that stopping the world is necessary to make sure sweep-related events are
+	// coherent. Since the world is stopped and sweeps are non-preemptible, we can never start
+	// the world and see an unpaired sweep 'end' event. Other parts of the tracer rely on this.
+	stw := stopTheWorld(stwStartTrace)
+
+	// Prevent sysmon from running any code that could generate events.
+	lock(&sched.sysmonlock)
+
+	// Reset mSyscallID on all Ps while we have them stationary and the trace is disabled.
+	for _, pp := range allp {
+		pp.trace.mSyscallID = -1
+	}
+
+	// Start tracing.
+	//
+	// After this executes, other Ms may start creating trace buffers and emitting
+	// data into them.
+	trace.gen.Store(firstGen)
+
+	// Wait for exitingSyscall to drain.
+	//
+	// It may not monotonically decrease to zero, but in the limit it will always become
+	// zero because the world is stopped and there are no available Ps for syscall-exited
+	// goroutines to run on.
+	//
+	// Because we set gen before checking this, and because exitingSyscall is always incremented
+	// *after* traceAcquire (which checks gen), we can be certain that when exitingSyscall is zero
+	// that any goroutine that goes to exit a syscall from then on *must* observe the new gen.
+	//
+	// The critical section on each goroutine here is going to be quite short, so the likelihood
+	// that we observe a zero value is high.
+	for trace.exitingSyscall.Load() != 0 {
+		osyield()
+	}
+
+	// Record some initial pieces of information.
+	//
+	// N.B. This will also emit a status event for this goroutine.
+	tl := traceAcquire()
+	tl.Gomaxprocs(gomaxprocs)  // Get this as early in the trace as possible. See comment in traceAdvance.
+	tl.STWStart(stwStartTrace) // We didn't trace this above, so trace it now.
+
+	// Record the fact that a GC is active, if applicable.
+	if gcphase == _GCmark || gcphase == _GCmarktermination {
+		tl.GCActive()
+	}
+
+	// Record the heap goal so we have it at the very beginning of the trace.
+	tl.HeapGoal()
+
+	// Make sure a ProcStatus is emitted for every P, while we're here.
+	for _, pp := range allp {
+		tl.writer().writeProcStatusForP(pp, pp == tl.mp.p.ptr()).end()
+	}
+	traceRelease(tl)
+
+	unlock(&sched.sysmonlock)
+	startTheWorld(stw)
+
+	traceStartReadCPU()
+	traceAdvancer.start()
+
+	semrelease(&traceAdvanceSema)
+	return nil
+}
+
+// StopTrace stops tracing, if it was previously enabled.
+// StopTrace only returns after all the reads for the trace have completed.
+func StopTrace() {
+	traceAdvance(true)
+}
+
+// traceAdvance moves tracing to the next generation, and cleans up the current generation,
+// ensuring that it's flushed out before returning. If stopTrace is true, it disables tracing
+// altogether instead of advancing to the next generation.
+//
+// traceAdvanceSema must not be held.
+func traceAdvance(stopTrace bool) {
+	semacquire(&traceAdvanceSema)
+
+	// Get the gen that we're advancing from. In this function we don't really care much
+	// about the generation we're advancing _into_ since we'll do all the cleanup in this
+	// generation for the next advancement.
+	gen := trace.gen.Load()
+	if gen == 0 {
+		// We may end up here traceAdvance is called concurrently with StopTrace.
+		semrelease(&traceAdvanceSema)
+		return
+	}
+
+	// Write an EvFrequency event for this generation.
+	//
+	// N.B. This may block for quite a while to get a good frequency estimate, so make sure we do
+	// this here and not e.g. on the trace reader.
+	traceFrequency(gen)
+
+	// Collect all the untraced Gs.
+	type untracedG struct {
+		gp           *g
+		goid         uint64
+		mid          int64
+		status       uint32
+		waitreason   waitReason
+		inMarkAssist bool
+	}
+	var untracedGs []untracedG
+	forEachGRace(func(gp *g) {
+		// Make absolutely sure all Gs are ready for the next
+		// generation. We need to do this even for dead Gs because
+		// they may come alive with a new identity, and its status
+		// traced bookkeeping might end up being stale.
+		// We may miss totally new goroutines, but they'll always
+		// have clean bookkeeping.
+		gp.trace.readyNextGen(gen)
+		// If the status was traced, nothing else to do.
+		if gp.trace.statusWasTraced(gen) {
+			return
+		}
+		// Scribble down information about this goroutine.
+		ug := untracedG{gp: gp, mid: -1}
+		systemstack(func() {
+			me := getg().m.curg
+			// We don't have to handle this G status transition because we
+			// already eliminated ourselves from consideration above.
+			casGToWaiting(me, _Grunning, waitReasonTraceGoroutineStatus)
+			// We need to suspend and take ownership of the G to safely read its
+			// goid. Note that we can't actually emit the event at this point
+			// because we might stop the G in a window where it's unsafe to write
+			// events based on the G's status. We need the global trace buffer flush
+			// coming up to make sure we're not racing with the G.
+			//
+			// It should be very unlikely that we try to preempt a running G here.
+			// The only situation that we might is that we're racing with a G
+			// that's running for the first time in this generation. Therefore,
+			// this should be relatively fast.
+			s := suspendG(gp)
+			if !s.dead {
+				ug.goid = s.g.goid
+				if s.g.m != nil {
+					ug.mid = int64(s.g.m.procid)
+				}
+				ug.status = readgstatus(s.g) &^ _Gscan
+				ug.waitreason = s.g.waitreason
+				ug.inMarkAssist = s.g.inMarkAssist
+			}
+			resumeG(s)
+			casgstatus(me, _Gwaiting, _Grunning)
+		})
+		if ug.goid != 0 {
+			untracedGs = append(untracedGs, ug)
+		}
+	})
+
+	if !stopTrace {
+		// Re-register runtime goroutine labels and stop/block reasons.
+		traceRegisterLabelsAndReasons(traceNextGen(gen))
+	}
+
+	// Now that we've done some of the heavy stuff, prevent the world from stopping.
+	// This is necessary to ensure the consistency of the STW events. If we're feeling
+	// adventurous we could lift this restriction and add a STWActive event, but the
+	// cost of maintaining this consistency is low. We're not going to hold this semaphore
+	// for very long and most STW periods are very short.
+	// Once we hold worldsema, prevent preemption as well so we're not interrupted partway
+	// through this. We want to get this done as soon as possible.
+	semacquire(&worldsema)
+	mp := acquirem()
+
+	// Advance the generation or stop the trace.
+	trace.lastNonZeroGen = gen
+	if stopTrace {
+		systemstack(func() {
+			// Ordering is important here. Set shutdown first, then disable tracing,
+			// so that conditions like (traceEnabled() || traceShuttingDown()) have
+			// no opportunity to be false. Hold the trace lock so this update appears
+			// atomic to the trace reader.
+			lock(&trace.lock)
+			trace.shutdown.Store(true)
+			trace.gen.Store(0)
+			unlock(&trace.lock)
+		})
+	} else {
+		trace.gen.Store(traceNextGen(gen))
+	}
+
+	// Emit a ProcsChange event so we have one on record for each generation.
+	// Let's emit it as soon as possible so that downstream tools can rely on the value
+	// being there fairly soon in a generation.
+	//
+	// It's important that we do this before allowing stop-the-worlds again,
+	// because the procs count could change.
+	if !stopTrace {
+		tl := traceAcquire()
+		tl.Gomaxprocs(gomaxprocs)
+		traceRelease(tl)
+	}
+
+	// Emit a GCActive event in the new generation if necessary.
+	//
+	// It's important that we do this before allowing stop-the-worlds again,
+	// because that could emit global GC-related events.
+	if !stopTrace && (gcphase == _GCmark || gcphase == _GCmarktermination) {
+		tl := traceAcquire()
+		tl.GCActive()
+		traceRelease(tl)
+	}
+
+	// Preemption is OK again after this. If the world stops or whatever it's fine.
+	// We're just cleaning up the last generation after this point.
+	//
+	// We also don't care if the GC starts again after this for the same reasons.
+	releasem(mp)
+	semrelease(&worldsema)
+
+	// Snapshot allm and freem.
+	//
+	// Snapshotting after the generation counter update is sufficient.
+	// Because an m must be on either allm or sched.freem if it has an active trace
+	// buffer, new threads added to allm after this point must necessarily observe
+	// the new generation number (sched.lock acts as a barrier).
+	//
+	// Threads that exit before this point and are on neither list explicitly
+	// flush their own buffers in traceThreadDestroy.
+	//
+	// Snapshotting freem is necessary because Ms can continue to emit events
+	// while they're still on that list. Removal from sched.freem is serialized with
+	// this snapshot, so either we'll capture an m on sched.freem and race with
+	// the removal to flush its buffers (resolved by traceThreadDestroy acquiring
+	// the thread's seqlock, which one of us must win, so at least its old gen buffer
+	// will be flushed in time for the new generation) or it will have flushed its
+	// buffers before we snapshotted it to begin with.
+	lock(&sched.lock)
+	mToFlush := allm
+	for mp := mToFlush; mp != nil; mp = mp.alllink {
+		mp.trace.link = mp.alllink
+	}
+	for mp := sched.freem; mp != nil; mp = mp.freelink {
+		mp.trace.link = mToFlush
+		mToFlush = mp
+	}
+	unlock(&sched.lock)
+
+	// Iterate over our snapshot, flushing every buffer until we're done.
+	//
+	// Because trace writers read the generation while the seqlock is
+	// held, we can be certain that when there are no writers there are
+	// also no stale generation values left. Therefore, it's safe to flush
+	// any buffers that remain in that generation's slot.
+	const debugDeadlock = false
+	systemstack(func() {
+		// Track iterations for some rudimentary deadlock detection.
+		i := 0
+		detectedDeadlock := false
+
+		for mToFlush != nil {
+			prev := &mToFlush
+			for mp := *prev; mp != nil; {
+				if mp.trace.seqlock.Load()%2 != 0 {
+					// The M is writing. Come back to it later.
+					prev = &mp.trace.link
+					mp = mp.trace.link
+					continue
+				}
+				// Flush the trace buffer.
+				//
+				// trace.lock needed for traceBufFlush, but also to synchronize
+				// with traceThreadDestroy, which flushes both buffers unconditionally.
+				lock(&trace.lock)
+				bufp := &mp.trace.buf[gen%2]
+				if *bufp != nil {
+					traceBufFlush(*bufp, gen)
+					*bufp = nil
+				}
+				unlock(&trace.lock)
+
+				// Remove the m from the flush list.
+				*prev = mp.trace.link
+				mp.trace.link = nil
+				mp = *prev
+			}
+			// Yield only if we're going to be going around the loop again.
+			if mToFlush != nil {
+				osyield()
+			}
+
+			if debugDeadlock {
+				// Try to detect a deadlock. We probably shouldn't loop here
+				// this many times.
+				if i > 100000 && !detectedDeadlock {
+					detectedDeadlock = true
+					println("runtime: failing to flush")
+					for mp := mToFlush; mp != nil; mp = mp.trace.link {
+						print("runtime: m=", mp.id, "\n")
+					}
+				}
+				i++
+			}
+		}
+	})
+
+	// At this point, the old generation is fully flushed minus stack and string
+	// tables, CPU samples, and goroutines that haven't run at all during the last
+	// generation.
+
+	// Check to see if any Gs still haven't had events written out for them.
+	statusWriter := unsafeTraceWriter(gen, nil)
+	for _, ug := range untracedGs {
+		if ug.gp.trace.statusWasTraced(gen) {
+			// It was traced, we don't need to do anything.
+			continue
+		}
+		// It still wasn't traced. Because we ensured all Ms stopped writing trace
+		// events to the last generation, that must mean the G never had its status
+		// traced in gen between when we recorded it and now. If that's true, the goid
+		// and status we recorded then is exactly what we want right now.
+		status := goStatusToTraceGoStatus(ug.status, ug.waitreason)
+		statusWriter = statusWriter.writeGoStatus(ug.goid, ug.mid, status, ug.inMarkAssist)
+	}
+	statusWriter.flush().end()
+
+	// Read everything out of the last gen's CPU profile buffer.
+	traceReadCPU(gen)
+
+	systemstack(func() {
+		// Flush CPU samples, stacks, and strings for the last generation. This is safe,
+		// because we're now certain no M is writing to the last generation.
+		//
+		// Ordering is important here. traceCPUFlush may generate new stacks and dumping
+		// stacks may generate new strings.
+		traceCPUFlush(gen)
+		trace.stackTab[gen%2].dump(gen)
+		trace.stringTab[gen%2].reset(gen)
+
+		// That's it. This generation is done producing buffers.
+		lock(&trace.lock)
+		trace.flushedGen.Store(gen)
+		unlock(&trace.lock)
+	})
+
+	if stopTrace {
+		semacquire(&traceShutdownSema)
+
+		// Finish off CPU profile reading.
+		traceStopReadCPU()
+	} else {
+		// Go over each P and emit a status event for it if necessary.
+		//
+		// We do this at the beginning of the new generation instead of the
+		// end like we do for goroutines because forEachP doesn't give us a
+		// hook to skip Ps that have already been traced. Since we have to
+		// preempt all Ps anyway, might as well stay consistent with StartTrace
+		// which does this during the STW.
+		semacquire(&worldsema)
+		forEachP(waitReasonTraceProcStatus, func(pp *p) {
+			tl := traceAcquire()
+			if !pp.trace.statusWasTraced(tl.gen) {
+				tl.writer().writeProcStatusForP(pp, false).end()
+			}
+			traceRelease(tl)
+		})
+		// Perform status reset on dead Ps because they just appear as idle.
+		//
+		// Holding worldsema prevents allp from changing.
+		//
+		// TODO(mknyszek): Consider explicitly emitting ProcCreate and ProcDestroy
+		// events to indicate whether a P exists, rather than just making its
+		// existence implicit.
+		for _, pp := range allp[len(allp):cap(allp)] {
+			pp.trace.readyNextGen(traceNextGen(gen))
+		}
+		semrelease(&worldsema)
+	}
+
+	// Block until the trace reader has finished processing the last generation.
+	semacquire(&trace.doneSema[gen%2])
+	if raceenabled {
+		raceacquire(unsafe.Pointer(&trace.doneSema[gen%2]))
+	}
+
+	// Double-check that things look as we expect after advancing and perform some
+	// final cleanup if the trace has fully stopped.
+	systemstack(func() {
+		lock(&trace.lock)
+		if !trace.full[gen%2].empty() {
+			throw("trace: non-empty full trace buffer for done generation")
+		}
+		if stopTrace {
+			if !trace.full[1-(gen%2)].empty() {
+				throw("trace: non-empty full trace buffer for next generation")
+			}
+			if trace.reading != nil || trace.reader.Load() != nil {
+				throw("trace: reading after shutdown")
+			}
+			// Free all the empty buffers.
+			for trace.empty != nil {
+				buf := trace.empty
+				trace.empty = buf.link
+				sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys)
+			}
+			// Clear trace.shutdown and other flags.
+			trace.headerWritten = false
+			trace.shutdown.Store(false)
+		}
+		unlock(&trace.lock)
+	})
+
+	if stopTrace {
+		// Clear the sweep state on every P for the next time tracing is enabled.
+		//
+		// It may be stale in the next trace because we may have ended tracing in
+		// the middle of a sweep on a P.
+		//
+		// It's fine not to call forEachP here because tracing is disabled and we
+		// know at this point that nothing is calling into the tracer, but we do
+		// need to look at dead Ps too just because GOMAXPROCS could have been called
+		// at any point since we stopped tracing, and we have to ensure there's no
+		// bad state on dead Ps too. Prevent a STW and a concurrent GOMAXPROCS that
+		// might mutate allp by making ourselves briefly non-preemptible.
+		mp := acquirem()
+		for _, pp := range allp[:cap(allp)] {
+			pp.trace.inSweep = false
+			pp.trace.maySweep = false
+			pp.trace.swept = 0
+			pp.trace.reclaimed = 0
+		}
+		releasem(mp)
+	}
+
+	// Release the advance semaphore. If stopTrace is true we're still holding onto
+	// traceShutdownSema.
+	//
+	// Do a direct handoff. Don't let one caller of traceAdvance starve
+	// other calls to traceAdvance.
+	semrelease1(&traceAdvanceSema, true, 0)
+
+	if stopTrace {
+		// Stop the traceAdvancer. We can't be holding traceAdvanceSema here because
+		// we'll deadlock (we're blocked on the advancer goroutine exiting, but it
+		// may be currently trying to acquire traceAdvanceSema).
+		traceAdvancer.stop()
+		semrelease(&traceShutdownSema)
+	}
+}
+
+func traceNextGen(gen uintptr) uintptr {
+	if gen == ^uintptr(0) {
+		// gen is used both %2 and %3 and we want both patterns to continue when we loop around.
+		// ^uint32(0) and ^uint64(0) are both odd and multiples of 3. Therefore the next generation
+		// we want is even and one more than a multiple of 3. The smallest such number is 4.
+		return 4
+	}
+	return gen + 1
+}
+
+// traceRegisterLabelsAndReasons re-registers mark worker labels and
+// goroutine stop/block reasons in the string table for the provided
+// generation. Note: the provided generation must not have started yet.
+func traceRegisterLabelsAndReasons(gen uintptr) {
+	for i, label := range gcMarkWorkerModeStrings[:] {
+		trace.markWorkerLabels[gen%2][i] = traceArg(trace.stringTab[gen%2].put(gen, label))
+	}
+	for i, str := range traceBlockReasonStrings[:] {
+		trace.goBlockReasons[gen%2][i] = traceArg(trace.stringTab[gen%2].put(gen, str))
+	}
+	for i, str := range traceGoStopReasonStrings[:] {
+		trace.goStopReasons[gen%2][i] = traceArg(trace.stringTab[gen%2].put(gen, str))
+	}
+}
+
+// ReadTrace returns the next chunk of binary tracing data, blocking until data
+// is available. If tracing is turned off and all the data accumulated while it
+// was on has been returned, ReadTrace returns nil. The caller must copy the
+// returned data before calling ReadTrace again.
+// ReadTrace must be called from one goroutine at a time.
+func ReadTrace() []byte {
+top:
+	var buf []byte
+	var park bool
+	systemstack(func() {
+		buf, park = readTrace0()
+	})
+	if park {
+		gopark(func(gp *g, _ unsafe.Pointer) bool {
+			if !trace.reader.CompareAndSwapNoWB(nil, gp) {
+				// We're racing with another reader.
+				// Wake up and handle this case.
+				return false
+			}
+
+			if g2 := traceReader(); gp == g2 {
+				// New data arrived between unlocking
+				// and the CAS and we won the wake-up
+				// race, so wake up directly.
+				return false
+			} else if g2 != nil {
+				printlock()
+				println("runtime: got trace reader", g2, g2.goid)
+				throw("unexpected trace reader")
+			}
+
+			return true
+		}, nil, waitReasonTraceReaderBlocked, traceBlockSystemGoroutine, 2)
+		goto top
+	}
+
+	return buf
+}
+
+// readTrace0 is ReadTrace's continuation on g0. This must run on the
+// system stack because it acquires trace.lock.
+//
+//go:systemstack
+func readTrace0() (buf []byte, park bool) {
+	if raceenabled {
+		// g0 doesn't have a race context. Borrow the user G's.
+		if getg().racectx != 0 {
+			throw("expected racectx == 0")
+		}
+		getg().racectx = getg().m.curg.racectx
+		// (This defer should get open-coded, which is safe on
+		// the system stack.)
+		defer func() { getg().racectx = 0 }()
+	}
+
+	// This function must not allocate while holding trace.lock:
+	// allocation can call heap allocate, which will try to emit a trace
+	// event while holding heap lock.
+	lock(&trace.lock)
+
+	if trace.reader.Load() != nil {
+		// More than one goroutine reads trace. This is bad.
+		// But we rather do not crash the program because of tracing,
+		// because tracing can be enabled at runtime on prod servers.
+		unlock(&trace.lock)
+		println("runtime: ReadTrace called from multiple goroutines simultaneously")
+		return nil, false
+	}
+	// Recycle the old buffer.
+	if buf := trace.reading; buf != nil {
+		buf.link = trace.empty
+		trace.empty = buf
+		trace.reading = nil
+	}
+	// Write trace header.
+	if !trace.headerWritten {
+		trace.headerWritten = true
+		unlock(&trace.lock)
+		return []byte("go 1.22 trace\x00\x00\x00"), false
+	}
+
+	// Read the next buffer.
+
+	if trace.readerGen.Load() == 0 {
+		trace.readerGen.Store(1)
+	}
+	var gen uintptr
+	for {
+		assertLockHeld(&trace.lock)
+		gen = trace.readerGen.Load()
+
+		// Check to see if we need to block for more data in this generation
+		// or if we need to move our generation forward.
+		if !trace.full[gen%2].empty() {
+			break
+		}
+		// Most of the time readerGen is one generation ahead of flushedGen, as the
+		// current generation is being read from. Then, once the last buffer is flushed
+		// into readerGen, flushedGen will rise to meet it. At this point, the tracer
+		// is waiting on the reader to finish flushing the last generation so that it
+		// can continue to advance.
+		if trace.flushedGen.Load() == gen {
+			if trace.shutdown.Load() {
+				unlock(&trace.lock)
+
+				// Wake up anyone waiting for us to be done with this generation.
+				//
+				// Do this after reading trace.shutdown, because the thread we're
+				// waking up is going to clear trace.shutdown.
+				if raceenabled {
+					// Model synchronization on trace.doneSema, which te race
+					// detector does not see. This is required to avoid false
+					// race reports on writer passed to trace.Start.
+					racerelease(unsafe.Pointer(&trace.doneSema[gen%2]))
+				}
+				semrelease(&trace.doneSema[gen%2])
+
+				// We're shutting down, and the last generation is fully
+				// read. We're done.
+				return nil, false
+			}
+			// The previous gen has had all of its buffers flushed, and
+			// there's nothing else for us to read. Advance the generation
+			// we're reading from and try again.
+			trace.readerGen.Store(trace.gen.Load())
+			unlock(&trace.lock)
+
+			// Wake up anyone waiting for us to be done with this generation.
+			//
+			// Do this after reading gen to make sure we can't have the trace
+			// advance until we've read it.
+			if raceenabled {
+				// See comment above in the shutdown case.
+				racerelease(unsafe.Pointer(&trace.doneSema[gen%2]))
+			}
+			semrelease(&trace.doneSema[gen%2])
+
+			// Reacquire the lock and go back to the top of the loop.
+			lock(&trace.lock)
+			continue
+		}
+		// Wait for new data.
+		//
+		// We don't simply use a note because the scheduler
+		// executes this goroutine directly when it wakes up
+		// (also a note would consume an M).
+		//
+		// Before we drop the lock, clear the workAvailable flag. Work can
+		// only be queued with trace.lock held, so this is at least true until
+		// we drop the lock.
+		trace.workAvailable.Store(false)
+		unlock(&trace.lock)
+		return nil, true
+	}
+	// Pull a buffer.
+	tbuf := trace.full[gen%2].pop()
+	trace.reading = tbuf
+	unlock(&trace.lock)
+	return tbuf.arr[:tbuf.pos], false
+}
+
+// traceReader returns the trace reader that should be woken up, if any.
+// Callers should first check (traceEnabled() || traceShuttingDown()).
+//
+// This must run on the system stack because it acquires trace.lock.
+//
+//go:systemstack
+func traceReader() *g {
+	gp := traceReaderAvailable()
+	if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
+		return nil
+	}
+	return gp
+}
+
+// traceReaderAvailable returns the trace reader if it is not currently
+// scheduled and should be. Callers should first check that
+// (traceEnabled() || traceShuttingDown()) is true.
+func traceReaderAvailable() *g {
+	// There are three conditions under which we definitely want to schedule
+	// the reader:
+	// - The reader is lagging behind in finishing off the last generation.
+	//   In this case, trace buffers could even be empty, but the trace
+	//   advancer will be waiting on the reader, so we have to make sure
+	//   to schedule the reader ASAP.
+	// - The reader has pending work to process for it's reader generation
+	//   (assuming readerGen is not lagging behind). Note that we also want
+	//   to be careful *not* to schedule the reader if there's no work to do.
+	// - The trace is shutting down. The trace stopper blocks on the reader
+	//   to finish, much like trace advancement.
+	//
+	// We also want to be careful not to schedule the reader if there's no
+	// reason to.
+	if trace.flushedGen.Load() == trace.readerGen.Load() || trace.workAvailable.Load() || trace.shutdown.Load() {
+		return trace.reader.Load()
+	}
+	return nil
+}
+
+// Trace advancer goroutine.
+var traceAdvancer traceAdvancerState
+
+type traceAdvancerState struct {
+	timer *wakeableSleep
+	done  chan struct{}
+}
+
+// start starts a new traceAdvancer.
+func (s *traceAdvancerState) start() {
+	// Start a goroutine to periodically advance the trace generation.
+	s.done = make(chan struct{})
+	s.timer = newWakeableSleep()
+	go func() {
+		for traceEnabled() {
+			// Set a timer to wake us up
+			s.timer.sleep(int64(debug.traceadvanceperiod))
+
+			// Try to advance the trace.
+			traceAdvance(false)
+		}
+		s.done <- struct{}{}
+	}()
+}
+
+// stop stops a traceAdvancer and blocks until it exits.
+func (s *traceAdvancerState) stop() {
+	s.timer.wake()
+	<-s.done
+	close(s.done)
+	s.timer.close()
+}
+
+// traceAdvancePeriod is the approximate period between
+// new generations.
+const defaultTraceAdvancePeriod = 1e9 // 1 second.
+
+// wakeableSleep manages a wakeable goroutine sleep.
+//
+// Users of this type must call init before first use and
+// close to free up resources. Once close is called, init
+// must be called before another use.
+type wakeableSleep struct {
+	timer *timer
+
+	// lock protects access to wakeup, but not send/recv on it.
+	lock   mutex
+	wakeup chan struct{}
+}
+
+// newWakeableSleep initializes a new wakeableSleep and returns it.
+func newWakeableSleep() *wakeableSleep {
+	s := new(wakeableSleep)
+	lockInit(&s.lock, lockRankWakeableSleep)
+	s.wakeup = make(chan struct{}, 1)
+	s.timer = new(timer)
+	s.timer.arg = s
+	s.timer.f = func(s any, _ uintptr) {
+		s.(*wakeableSleep).wake()
+	}
+	return s
+}
+
+// sleep sleeps for the provided duration in nanoseconds or until
+// another goroutine calls wake.
+//
+// Must not be called by more than one goroutine at a time and
+// must not be called concurrently with close.
+func (s *wakeableSleep) sleep(ns int64) {
+	resetTimer(s.timer, nanotime()+ns)
+	lock(&s.lock)
+	if raceenabled {
+		raceacquire(unsafe.Pointer(&s.lock))
+	}
+	wakeup := s.wakeup
+	if raceenabled {
+		racerelease(unsafe.Pointer(&s.lock))
+	}
+	unlock(&s.lock)
+	<-wakeup
+	stopTimer(s.timer)
+}
+
+// wake awakens any goroutine sleeping on the timer.
+//
+// Safe for concurrent use with all other methods.
+func (s *wakeableSleep) wake() {
+	// Grab the wakeup channel, which may be nil if we're
+	// racing with close.
+	lock(&s.lock)
+	if raceenabled {
+		raceacquire(unsafe.Pointer(&s.lock))
+	}
+	if s.wakeup != nil {
+		// Non-blocking send.
+		//
+		// Others may also write to this channel and we don't
+		// want to block on the receiver waking up. This also
+		// effectively batches together wakeup notifications.
+		select {
+		case s.wakeup <- struct{}{}:
+		default:
+		}
+	}
+	if raceenabled {
+		racerelease(unsafe.Pointer(&s.lock))
+	}
+	unlock(&s.lock)
+}
+
+// close wakes any goroutine sleeping on the timer and prevents
+// further sleeping on it.
+//
+// Once close is called, the wakeableSleep must no longer be used.
+//
+// It must only be called once no goroutine is sleeping on the
+// timer *and* nothing else will call wake concurrently.
+func (s *wakeableSleep) close() {
+	// Set wakeup to nil so that a late timer ends up being a no-op.
+	lock(&s.lock)
+	if raceenabled {
+		raceacquire(unsafe.Pointer(&s.lock))
+	}
+	wakeup := s.wakeup
+	s.wakeup = nil
+
+	// Close the channel.
+	close(wakeup)
+
+	if raceenabled {
+		racerelease(unsafe.Pointer(&s.lock))
+	}
+	unlock(&s.lock)
+	return
+}
diff --git a/src/runtime/trace2buf.go b/src/runtime/trace2buf.go
new file mode 100644
index 0000000..54de5e1
--- /dev/null
+++ b/src/runtime/trace2buf.go
@@ -0,0 +1,259 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Trace buffer management.
+
+package runtime
+
+import (
+	"runtime/internal/sys"
+	"unsafe"
+)
+
+// Maximum number of bytes required to encode uint64 in base-128.
+const traceBytesPerNumber = 10
+
+// traceWriter is the interface for writing all trace data.
+//
+// This type is passed around as a value, and all of its methods return
+// a new traceWriter. This allows for chaining together calls in a fluent-style
+// API. This is partly stylistic, and very slightly for performance, since
+// the compiler can destructure this value and pass it between calls as
+// just regular arguments. However, this style is not load-bearing, and
+// we can change it if it's deemed too error-prone.
+type traceWriter struct {
+	traceLocker
+	*traceBuf
+}
+
+// write returns an a traceWriter that writes into the current M's stream.
+func (tl traceLocker) writer() traceWriter {
+	return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
+}
+
+// unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
+//
+// It should only be used in contexts where either:
+// - Another traceLocker is held.
+// - trace.gen is prevented from advancing.
+//
+// buf may be nil.
+func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
+	return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
+}
+
+// end writes the buffer back into the m.
+func (w traceWriter) end() {
+	if w.mp == nil {
+		// Tolerate a nil mp. It makes code that creates traceWriters directly
+		// less error-prone.
+		return
+	}
+	w.mp.trace.buf[w.gen%2] = w.traceBuf
+}
+
+// ensure makes sure that at least maxSize bytes are available to write.
+//
+// Returns whether the buffer was flushed.
+func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
+	refill := w.traceBuf == nil || !w.available(maxSize)
+	if refill {
+		w = w.refill()
+	}
+	return w, refill
+}
+
+// flush puts w.traceBuf on the queue of full buffers.
+func (w traceWriter) flush() traceWriter {
+	systemstack(func() {
+		lock(&trace.lock)
+		if w.traceBuf != nil {
+			traceBufFlush(w.traceBuf, w.gen)
+		}
+		unlock(&trace.lock)
+	})
+	w.traceBuf = nil
+	return w
+}
+
+// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
+func (w traceWriter) refill() traceWriter {
+	systemstack(func() {
+		lock(&trace.lock)
+		if w.traceBuf != nil {
+			traceBufFlush(w.traceBuf, w.gen)
+		}
+		if trace.empty != nil {
+			w.traceBuf = trace.empty
+			trace.empty = w.traceBuf.link
+			unlock(&trace.lock)
+		} else {
+			unlock(&trace.lock)
+			w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
+			if w.traceBuf == nil {
+				throw("trace: out of memory")
+			}
+		}
+	})
+	// Initialize the buffer.
+	ts := traceClockNow()
+	if ts <= w.traceBuf.lastTime {
+		ts = w.traceBuf.lastTime + 1
+	}
+	w.traceBuf.lastTime = ts
+	w.traceBuf.link = nil
+	w.traceBuf.pos = 0
+
+	// Tolerate a nil mp.
+	mID := ^uint64(0)
+	if w.mp != nil {
+		mID = uint64(w.mp.procid)
+	}
+
+	// Write the buffer's header.
+	w.byte(byte(traceEvEventBatch))
+	w.varint(uint64(w.gen))
+	w.varint(uint64(mID))
+	w.varint(uint64(ts))
+	w.traceBuf.lenPos = w.varintReserve()
+	return w
+}
+
+// traceBufQueue is a FIFO of traceBufs.
+type traceBufQueue struct {
+	head, tail *traceBuf
+}
+
+// push queues buf into queue of buffers.
+func (q *traceBufQueue) push(buf *traceBuf) {
+	buf.link = nil
+	if q.head == nil {
+		q.head = buf
+	} else {
+		q.tail.link = buf
+	}
+	q.tail = buf
+}
+
+// pop dequeues from the queue of buffers.
+func (q *traceBufQueue) pop() *traceBuf {
+	buf := q.head
+	if buf == nil {
+		return nil
+	}
+	q.head = buf.link
+	if q.head == nil {
+		q.tail = nil
+	}
+	buf.link = nil
+	return buf
+}
+
+func (q *traceBufQueue) empty() bool {
+	return q.head == nil
+}
+
+// traceBufHeader is per-P tracing buffer.
+type traceBufHeader struct {
+	link     *traceBuf // in trace.empty/full
+	lastTime traceTime // when we wrote the last event
+	pos      int       // next write offset in arr
+	lenPos   int       // position of batch length value
+}
+
+// traceBuf is per-M tracing buffer.
+//
+// TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
+type traceBuf struct {
+	_ sys.NotInHeap
+	traceBufHeader
+	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
+}
+
+// byte appends v to buf.
+func (buf *traceBuf) byte(v byte) {
+	buf.arr[buf.pos] = v
+	buf.pos++
+}
+
+// varint appends v to buf in little-endian-base-128 encoding.
+func (buf *traceBuf) varint(v uint64) {
+	pos := buf.pos
+	arr := buf.arr[pos : pos+traceBytesPerNumber]
+	for i := range arr {
+		if v < 0x80 {
+			pos += i + 1
+			arr[i] = byte(v)
+			break
+		}
+		arr[i] = 0x80 | byte(v)
+		v >>= 7
+	}
+	buf.pos = pos
+}
+
+// varintReserve reserves enough space in buf to hold any varint.
+//
+// Space reserved this way can be filled in with the varintAt method.
+func (buf *traceBuf) varintReserve() int {
+	p := buf.pos
+	buf.pos += traceBytesPerNumber
+	return p
+}
+
+// stringData appends s's data directly to buf.
+func (buf *traceBuf) stringData(s string) {
+	buf.pos += copy(buf.arr[buf.pos:], s)
+}
+
+func (buf *traceBuf) available(size int) bool {
+	return len(buf.arr)-buf.pos >= size
+}
+
+// varintAt writes varint v at byte position pos in buf. This always
+// consumes traceBytesPerNumber bytes. This is intended for when the caller
+// needs to reserve space for a varint but can't populate it until later.
+// Use varintReserve to reserve this space.
+func (buf *traceBuf) varintAt(pos int, v uint64) {
+	for i := 0; i < traceBytesPerNumber; i++ {
+		if i < traceBytesPerNumber-1 {
+			buf.arr[pos] = 0x80 | byte(v)
+		} else {
+			buf.arr[pos] = byte(v)
+		}
+		v >>= 7
+		pos++
+	}
+	if v != 0 {
+		throw("v could not fit in traceBytesPerNumber")
+	}
+}
+
+// traceBufFlush flushes a trace buffer.
+//
+// Must run on the system stack because trace.lock must be held.
+//
+//go:systemstack
+func traceBufFlush(buf *traceBuf, gen uintptr) {
+	assertLockHeld(&trace.lock)
+
+	// Write out the non-header length of the batch in the header.
+	//
+	// Note: the length of the header is not included to make it easier
+	// to calculate this value when deserializing and reserializing the
+	// trace. Varints can have additional padding of zero bits that is
+	// quite difficult to preserve, and if we include the header we
+	// force serializers to do more work. Nothing else actually needs
+	// padding.
+	buf.varintAt(buf.lenPos, uint64(buf.pos-(buf.lenPos+traceBytesPerNumber)))
+	trace.full[gen%2].push(buf)
+
+	// Notify the scheduler that there's work available and that the trace
+	// reader should be scheduled.
+	if !trace.workAvailable.Load() {
+		trace.workAvailable.Store(true)
+	}
+}
diff --git a/src/runtime/trace2cpu.go b/src/runtime/trace2cpu.go
new file mode 100644
index 0000000..4635662
--- /dev/null
+++ b/src/runtime/trace2cpu.go
@@ -0,0 +1,287 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// CPU profile -> trace
+
+package runtime
+
+// traceInitReadCPU initializes CPU profile -> tracer state for tracing.
+//
+// Returns a profBuf for reading from.
+func traceInitReadCPU() {
+	if traceEnabled() {
+		throw("traceInitReadCPU called with trace enabled")
+	}
+	// Create new profBuf for CPU samples that will be emitted as events.
+	// Format: after the timestamp, header is [pp.id, gp.goid, mp.procid].
+	trace.cpuLogRead[0] = newProfBuf(3, profBufWordCount, profBufTagCount)
+	trace.cpuLogRead[1] = newProfBuf(3, profBufWordCount, profBufTagCount)
+	// We must not acquire trace.signalLock outside of a signal handler: a
+	// profiling signal may arrive at any time and try to acquire it, leading to
+	// deadlock. Because we can't use that lock to protect updates to
+	// trace.cpuLogWrite (only use of the structure it references), reads and
+	// writes of the pointer must be atomic. (And although this field is never
+	// the sole pointer to the profBuf value, it's best to allow a write barrier
+	// here.)
+	trace.cpuLogWrite[0].Store(trace.cpuLogRead[0])
+	trace.cpuLogWrite[1].Store(trace.cpuLogRead[1])
+}
+
+// traceStartReadCPU creates a goroutine to start reading CPU profile
+// data into an active trace.
+//
+// traceAdvanceSema must be held.
+func traceStartReadCPU() {
+	if !traceEnabled() {
+		throw("traceStartReadCPU called with trace disabled")
+	}
+	// Spin up the logger goroutine.
+	trace.cpuSleep = newWakeableSleep()
+	done := make(chan struct{}, 1)
+	go func() {
+		for traceEnabled() {
+			// Sleep here because traceReadCPU is non-blocking. This mirrors
+			// how the runtime/pprof package obtains CPU profile data.
+			//
+			// We can't do a blocking read here because Darwin can't do a
+			// wakeup from a signal handler, so all CPU profiling is just
+			// non-blocking. See #61768 for more details.
+			//
+			// Like the runtime/pprof package, even if that bug didn't exist
+			// we would still want to do a goroutine-level sleep in between
+			// reads to avoid frequent wakeups.
+			trace.cpuSleep.sleep(100_000_000)
+
+			tl := traceAcquire()
+			if !tl.ok() {
+				// Tracing disabled.
+				break
+			}
+			keepGoing := traceReadCPU(tl.gen)
+			traceRelease(tl)
+			if !keepGoing {
+				break
+			}
+		}
+		done <- struct{}{}
+	}()
+	trace.cpuLogDone = done
+}
+
+// traceStopReadCPU blocks until the trace CPU reading goroutine exits.
+//
+// traceAdvanceSema must be held, and tracing must be disabled.
+func traceStopReadCPU() {
+	if traceEnabled() {
+		throw("traceStopReadCPU called with trace enabled")
+	}
+
+	// Once we close the profbuf, we'll be in one of two situations:
+	// - The logger goroutine has already exited because it observed
+	//   that the trace is disabled.
+	// - The logger goroutine is asleep.
+	//
+	// Wake the goroutine so it can observe that their the buffer is
+	// closed an exit.
+	trace.cpuLogWrite[0].Store(nil)
+	trace.cpuLogWrite[1].Store(nil)
+	trace.cpuLogRead[0].close()
+	trace.cpuLogRead[1].close()
+	trace.cpuSleep.wake()
+
+	// Wait until the logger goroutine exits.
+	<-trace.cpuLogDone
+
+	// Clear state for the next trace.
+	trace.cpuLogDone = nil
+	trace.cpuLogRead[0] = nil
+	trace.cpuLogRead[1] = nil
+	trace.cpuSleep.close()
+}
+
+// traceReadCPU attempts to read from the provided profBuf[gen%2] and write
+// into the trace. Returns true if there might be more to read or false
+// if the profBuf is closed or the caller should otherwise stop reading.
+//
+// The caller is responsible for ensuring that gen does not change. Either
+// the caller must be in a traceAcquire/traceRelease block, or must be calling
+// with traceAdvanceSema held.
+//
+// No more than one goroutine may be in traceReadCPU for the same
+// profBuf at a time.
+//
+// Must not run on the system stack because profBuf.read performs race
+// operations.
+func traceReadCPU(gen uintptr) bool {
+	var pcBuf [traceStackSize]uintptr
+
+	data, tags, eof := trace.cpuLogRead[gen%2].read(profBufNonBlocking)
+	for len(data) > 0 {
+		if len(data) < 4 || data[0] > uint64(len(data)) {
+			break // truncated profile
+		}
+		if data[0] < 4 || tags != nil && len(tags) < 1 {
+			break // malformed profile
+		}
+		if len(tags) < 1 {
+			break // mismatched profile records and tags
+		}
+
+		// Deserialize the data in the profile buffer.
+		recordLen := data[0]
+		timestamp := data[1]
+		ppid := data[2] >> 1
+		if hasP := (data[2] & 0b1) != 0; !hasP {
+			ppid = ^uint64(0)
+		}
+		goid := data[3]
+		mpid := data[4]
+		stk := data[5:recordLen]
+
+		// Overflow records always have their headers contain
+		// all zeroes.
+		isOverflowRecord := len(stk) == 1 && data[2] == 0 && data[3] == 0 && data[4] == 0
+
+		// Move the data iterator forward.
+		data = data[recordLen:]
+		// No support here for reporting goroutine tags at the moment; if
+		// that information is to be part of the execution trace, we'd
+		// probably want to see when the tags are applied and when they
+		// change, instead of only seeing them when we get a CPU sample.
+		tags = tags[1:]
+
+		if isOverflowRecord {
+			// Looks like an overflow record from the profBuf. Not much to
+			// do here, we only want to report full records.
+			continue
+		}
+
+		// Construct the stack for insertion to the stack table.
+		nstk := 1
+		pcBuf[0] = logicalStackSentinel
+		for ; nstk < len(pcBuf) && nstk-1 < len(stk); nstk++ {
+			pcBuf[nstk] = uintptr(stk[nstk-1])
+		}
+
+		// Write out a trace event.
+		w := unsafeTraceWriter(gen, trace.cpuBuf[gen%2])
+
+		// Ensure we have a place to write to.
+		var flushed bool
+		w, flushed = w.ensure(2 + 5*traceBytesPerNumber /* traceEvCPUSamples + traceEvCPUSample + timestamp + g + m + p + stack ID */)
+		if flushed {
+			// Annotate the batch as containing strings.
+			w.byte(byte(traceEvCPUSamples))
+		}
+
+		// Add the stack to the table.
+		stackID := trace.stackTab[gen%2].put(pcBuf[:nstk])
+
+		// Write out the CPU sample.
+		w.byte(byte(traceEvCPUSample))
+		w.varint(timestamp)
+		w.varint(mpid)
+		w.varint(ppid)
+		w.varint(goid)
+		w.varint(stackID)
+
+		trace.cpuBuf[gen%2] = w.traceBuf
+	}
+	return !eof
+}
+
+// traceCPUFlush flushes trace.cpuBuf[gen%2]. The caller must be certain that gen
+// has completed and that there are no more writers to it.
+//
+// Must run on the systemstack because it flushes buffers and acquires trace.lock
+// to do so.
+//
+//go:systemstack
+func traceCPUFlush(gen uintptr) {
+	// Flush any remaining trace buffers containing CPU samples.
+	if buf := trace.cpuBuf[gen%2]; buf != nil {
+		lock(&trace.lock)
+		traceBufFlush(buf, gen)
+		unlock(&trace.lock)
+		trace.cpuBuf[gen%2] = nil
+	}
+}
+
+// traceCPUSample writes a CPU profile sample stack to the execution tracer's
+// profiling buffer. It is called from a signal handler, so is limited in what
+// it can do. mp must be the thread that is currently stopped in a signal.
+func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
+	if !traceEnabled() {
+		// Tracing is usually turned off; don't spend time acquiring the signal
+		// lock unless it's active.
+		return
+	}
+	if mp == nil {
+		// Drop samples that don't have an identifiable thread. We can't render
+		// this in any useful way anyway.
+		return
+	}
+
+	// We're going to conditionally write to one of two buffers based on the
+	// generation. To make sure we write to the correct one, we need to make
+	// sure this thread's trace seqlock is held. If it already is, then we're
+	// in the tracer and we can just take advantage of that. If it isn't, then
+	// we need to acquire it and read the generation.
+	locked := false
+	if mp.trace.seqlock.Load()%2 == 0 {
+		mp.trace.seqlock.Add(1)
+		locked = true
+	}
+	gen := trace.gen.Load()
+	if gen == 0 {
+		// Tracing is disabled, as it turns out. Release the seqlock if necessary
+		// and exit.
+		if locked {
+			mp.trace.seqlock.Add(1)
+		}
+		return
+	}
+
+	now := traceClockNow()
+	// The "header" here is the ID of the M that was running the profiled code,
+	// followed by the IDs of the P and goroutine. (For normal CPU profiling, it's
+	// usually the number of samples with the given stack.) Near syscalls, pp
+	// may be nil. Reporting goid of 0 is fine for either g0 or a nil gp.
+	var hdr [3]uint64
+	if pp != nil {
+		// Overflow records in profBuf have all header values set to zero. Make
+		// sure that real headers have at least one bit set.
+		hdr[0] = uint64(pp.id)<<1 | 0b1
+	} else {
+		hdr[0] = 0b10
+	}
+	if gp != nil {
+		hdr[1] = gp.goid
+	}
+	if mp != nil {
+		hdr[2] = uint64(mp.procid)
+	}
+
+	// Allow only one writer at a time
+	for !trace.signalLock.CompareAndSwap(0, 1) {
+		// TODO: Is it safe to osyield here? https://go.dev/issue/52672
+		osyield()
+	}
+
+	if log := trace.cpuLogWrite[gen%2].Load(); log != nil {
+		// Note: we don't pass a tag pointer here (how should profiling tags
+		// interact with the execution tracer?), but if we did we'd need to be
+		// careful about write barriers. See the long comment in profBuf.write.
+		log.write(nil, int64(now), hdr[:], stk)
+	}
+
+	trace.signalLock.Store(0)
+
+	// Release the seqlock if we acquired it earlier.
+	if locked {
+		mp.trace.seqlock.Add(1)
+	}
+}
diff --git a/src/runtime/trace2event.go b/src/runtime/trace2event.go
new file mode 100644
index 0000000..1f2a9f7
--- /dev/null
+++ b/src/runtime/trace2event.go
@@ -0,0 +1,194 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Trace event writing API for trace2runtime.go.
+
+package runtime
+
+import (
+	"runtime/internal/sys"
+)
+
+// Event types in the trace, args are given in square brackets.
+//
+// Naming scheme:
+//   - Time range event pairs have suffixes "Begin" and "End".
+//   - "Start", "Stop", "Create", "Destroy", "Block", "Unblock"
+//     are suffixes reserved for scheduling resources.
+//
+// NOTE: If you add an event type, make sure you also update all
+// tables in this file!
+type traceEv uint8
+
+const (
+	traceEvNone traceEv = iota // unused
+
+	// Structural events.
+	traceEvEventBatch // start of per-M batch of events [generation, M ID, timestamp, batch length]
+	traceEvStacks     // start of a section of the stack table [...traceEvStack]
+	traceEvStack      // stack table entry [ID, ...{PC, func string ID, file string ID, line #}]
+	traceEvStrings    // start of a section of the string dictionary [...traceEvString]
+	traceEvString     // string dictionary entry [ID, length, string]
+	traceEvCPUSamples // start of a section of CPU samples [...traceEvCPUSample]
+	traceEvCPUSample  // CPU profiling sample [timestamp, M ID, P ID, goroutine ID, stack ID]
+	traceEvFrequency  // timestamp units per sec [freq]
+
+	// Procs.
+	traceEvProcsChange // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack ID]
+	traceEvProcStart   // start of P [timestamp, P ID, P seq]
+	traceEvProcStop    // stop of P [timestamp]
+	traceEvProcSteal   // P was stolen [timestamp, P ID, P seq, M ID]
+	traceEvProcStatus  // P status at the start of a generation [timestamp, P ID, status]
+
+	// Goroutines.
+	traceEvGoCreate            // goroutine creation [timestamp, new goroutine ID, new stack ID, stack ID]
+	traceEvGoCreateSyscall     // goroutine appears in syscall (cgo callback) [timestamp, new goroutine ID]
+	traceEvGoStart             // goroutine starts running [timestamp, goroutine ID, goroutine seq]
+	traceEvGoDestroy           // goroutine ends [timestamp]
+	traceEvGoDestroySyscall    // goroutine ends in syscall (cgo callback) [timestamp]
+	traceEvGoStop              // goroutine yields its time, but is runnable [timestamp, reason, stack ID]
+	traceEvGoBlock             // goroutine blocks [timestamp, reason, stack ID]
+	traceEvGoUnblock           // goroutine is unblocked [timestamp, goroutine ID, goroutine seq, stack ID]
+	traceEvGoSyscallBegin      // syscall enter [timestamp, P seq, stack ID]
+	traceEvGoSyscallEnd        // syscall exit [timestamp]
+	traceEvGoSyscallEndBlocked // syscall exit and it blocked at some point [timestamp]
+	traceEvGoStatus            // goroutine status at the start of a generation [timestamp, goroutine ID, M ID, status]
+
+	// STW.
+	traceEvSTWBegin // STW start [timestamp, kind]
+	traceEvSTWEnd   // STW done [timestamp]
+
+	// GC events.
+	traceEvGCActive           // GC active [timestamp, seq]
+	traceEvGCBegin            // GC start [timestamp, seq, stack ID]
+	traceEvGCEnd              // GC done [timestamp, seq]
+	traceEvGCSweepActive      // GC sweep active [timestamp, P ID]
+	traceEvGCSweepBegin       // GC sweep start [timestamp, stack ID]
+	traceEvGCSweepEnd         // GC sweep done [timestamp, swept bytes, reclaimed bytes]
+	traceEvGCMarkAssistActive // GC mark assist active [timestamp, goroutine ID]
+	traceEvGCMarkAssistBegin  // GC mark assist start [timestamp, stack ID]
+	traceEvGCMarkAssistEnd    // GC mark assist done [timestamp]
+	traceEvHeapAlloc          // gcController.heapLive change [timestamp, heap alloc in bytes]
+	traceEvHeapGoal           // gcController.heapGoal() change [timestamp, heap goal in bytes]
+
+	// Annotations.
+	traceEvGoLabel         // apply string label to current running goroutine [timestamp, label string ID]
+	traceEvUserTaskBegin   // trace.NewTask [timestamp, internal task ID, internal parent task ID, name string ID, stack ID]
+	traceEvUserTaskEnd     // end of a task [timestamp, internal task ID, stack ID]
+	traceEvUserRegionBegin // trace.{Start,With}Region [timestamp, internal task ID, name string ID, stack ID]
+	traceEvUserRegionEnd   // trace.{End,With}Region [timestamp, internal task ID, name string ID, stack ID]
+	traceEvUserLog         // trace.Log [timestamp, internal task ID, key string ID, stack, value string ID]
+)
+
+// traceArg is a simple wrapper type to help ensure that arguments passed
+// to traces are well-formed.
+type traceArg uint64
+
+// traceEventWriter is the high-level API for writing trace events.
+//
+// See the comment on traceWriter about style for more details as to why
+// this type and its methods are structured the way they are.
+type traceEventWriter struct {
+	w traceWriter
+}
+
+// eventWriter creates a new traceEventWriter. It is the main entrypoint for writing trace events.
+//
+// Before creating the event writer, this method will emit a status for the current goroutine
+// or proc if it exists, and if it hasn't had its status emitted yet. goStatus and procStatus indicate
+// what the status of goroutine or P should be immediately *before* the events that are about to
+// be written using the eventWriter (if they exist). No status will be written if there's no active
+// goroutine or P.
+//
+// Callers can elect to pass a constant value here if the status is clear (e.g. a goroutine must have
+// been Runnable before a GoStart). Otherwise, callers can query the status of either the goroutine
+// or P and pass the appropriate status.
+//
+// In this case, the default status should be traceGoBad or traceProcBad to help identify bugs sooner.
+func (tl traceLocker) eventWriter(goStatus traceGoStatus, procStatus traceProcStatus) traceEventWriter {
+	w := tl.writer()
+	if pp := tl.mp.p.ptr(); pp != nil && !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
+		w = w.writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep)
+	}
+	if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
+		w = w.writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist)
+	}
+	return traceEventWriter{w}
+}
+
+// commit writes out a trace event and calls end. It's a helper to make the
+// common case of writing out a single event less error-prone.
+func (e traceEventWriter) commit(ev traceEv, args ...traceArg) {
+	e = e.write(ev, args...)
+	e.end()
+}
+
+// write writes an event into the trace.
+func (e traceEventWriter) write(ev traceEv, args ...traceArg) traceEventWriter {
+	e.w = e.w.event(ev, args...)
+	return e
+}
+
+// end finishes writing to the trace. The traceEventWriter must not be used after this call.
+func (e traceEventWriter) end() {
+	e.w.end()
+}
+
+// traceEventWrite is the part of traceEvent that actually writes the event.
+func (w traceWriter) event(ev traceEv, args ...traceArg) traceWriter {
+	// Make sure we have room.
+	w, _ = w.ensure(1 + (len(args)+1)*traceBytesPerNumber)
+
+	// Compute the timestamp diff that we'll put in the trace.
+	ts := traceClockNow()
+	if ts <= w.traceBuf.lastTime {
+		ts = w.traceBuf.lastTime + 1
+	}
+	tsDiff := uint64(ts - w.traceBuf.lastTime)
+	w.traceBuf.lastTime = ts
+
+	// Write out event.
+	w.byte(byte(ev))
+	w.varint(tsDiff)
+	for _, arg := range args {
+		w.varint(uint64(arg))
+	}
+	return w
+}
+
+// stack takes a stack trace skipping the provided number of frames.
+// It then returns a traceArg representing that stack which may be
+// passed to write.
+func (tl traceLocker) stack(skip int) traceArg {
+	return traceArg(traceStack(skip, tl.mp, tl.gen))
+}
+
+// startPC takes a start PC for a goroutine and produces a unique
+// stack ID for it.
+//
+// It then returns a traceArg representing that stack which may be
+// passed to write.
+func (tl traceLocker) startPC(pc uintptr) traceArg {
+	// +PCQuantum because makeTraceFrame expects return PCs and subtracts PCQuantum.
+	return traceArg(trace.stackTab[tl.gen%2].put([]uintptr{
+		logicalStackSentinel,
+		startPCForTrace(pc) + sys.PCQuantum,
+	}))
+}
+
+// string returns a traceArg representing s which may be passed to write.
+// The string is assumed to be relatively short and popular, so it may be
+// stored for a while in the string dictionary.
+func (tl traceLocker) string(s string) traceArg {
+	return traceArg(trace.stringTab[tl.gen%2].put(tl.gen, s))
+}
+
+// uniqueString returns a traceArg representing s which may be passed to write.
+// The string is assumed to be unique or long, so it will be written out to
+// the trace eagerly.
+func (tl traceLocker) uniqueString(s string) traceArg {
+	return traceArg(trace.stringTab[tl.gen%2].emit(tl.gen, s))
+}
diff --git a/src/runtime/trace2map.go b/src/runtime/trace2map.go
new file mode 100644
index 0000000..195ec0b
--- /dev/null
+++ b/src/runtime/trace2map.go
@@ -0,0 +1,151 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Simple hash table for tracing. Provides a mapping
+// between variable-length data and a unique ID. Subsequent
+// puts of the same data will return the same ID.
+//
+// Uses a region-based allocation scheme and assumes that the
+// table doesn't ever grow very big.
+//
+// This is definitely not a general-purpose hash table! It avoids
+// doing any high-level Go operations so it's safe to use even in
+// sensitive contexts.
+
+package runtime
+
+import (
+	"runtime/internal/atomic"
+	"runtime/internal/sys"
+	"unsafe"
+)
+
+type traceMap struct {
+	lock mutex // Must be acquired on the system stack
+	seq  atomic.Uint64
+	mem  traceRegionAlloc
+	tab  [1 << 13]atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
+}
+
+type traceMapNode struct {
+	_    sys.NotInHeap
+	link atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
+	hash uintptr
+	id   uint64
+	data []byte
+}
+
+// next is a type-safe wrapper around link.
+func (n *traceMapNode) next() *traceMapNode {
+	return (*traceMapNode)(n.link.Load())
+}
+
+// stealID steals an ID from the table, ensuring that it will not
+// appear in the table anymore.
+func (tab *traceMap) stealID() uint64 {
+	return tab.seq.Add(1)
+}
+
+// put inserts the data into the table.
+//
+// It's always safe to noescape data because its bytes are always copied.
+//
+// Returns a unique ID for the data and whether this is the first time
+// the data has been added to the map.
+func (tab *traceMap) put(data unsafe.Pointer, size uintptr) (uint64, bool) {
+	if size == 0 {
+		return 0, false
+	}
+	hash := memhash(data, 0, size)
+	// First, search the hashtable w/o the mutex.
+	if id := tab.find(data, size, hash); id != 0 {
+		return id, false
+	}
+	// Now, double check under the mutex.
+	// Switch to the system stack so we can acquire tab.lock
+	var id uint64
+	var added bool
+	systemstack(func() {
+		lock(&tab.lock)
+		if id = tab.find(data, size, hash); id != 0 {
+			unlock(&tab.lock)
+			return
+		}
+		// Create new record.
+		id = tab.seq.Add(1)
+		vd := tab.newTraceMapNode(data, size, hash, id)
+
+		// Insert it into the table.
+		//
+		// Update the link first, since the node isn't published yet.
+		// Then, store the node in the table as the new first node
+		// for the bucket.
+		part := int(hash % uintptr(len(tab.tab)))
+		vd.link.StoreNoWB(tab.tab[part].Load())
+		tab.tab[part].StoreNoWB(unsafe.Pointer(vd))
+		unlock(&tab.lock)
+
+		added = true
+	})
+	return id, added
+}
+
+// find looks up data in the table, assuming hash is a hash of data.
+//
+// Returns 0 if the data is not found, and the unique ID for it if it is.
+func (tab *traceMap) find(data unsafe.Pointer, size, hash uintptr) uint64 {
+	part := int(hash % uintptr(len(tab.tab)))
+	for vd := tab.bucket(part); vd != nil; vd = vd.next() {
+		// Synchronization not necessary. Once published to the table, these
+		// values are immutable.
+		if vd.hash == hash && uintptr(len(vd.data)) == size {
+			if memequal(unsafe.Pointer(&vd.data[0]), data, size) {
+				return vd.id
+			}
+		}
+	}
+	return 0
+}
+
+// bucket is a type-safe wrapper for looking up a value in tab.tab.
+func (tab *traceMap) bucket(part int) *traceMapNode {
+	return (*traceMapNode)(tab.tab[part].Load())
+}
+
+func (tab *traceMap) newTraceMapNode(data unsafe.Pointer, size, hash uintptr, id uint64) *traceMapNode {
+	// Create data array.
+	sl := notInHeapSlice{
+		array: tab.mem.alloc(size),
+		len:   int(size),
+		cap:   int(size),
+	}
+	memmove(unsafe.Pointer(sl.array), data, size)
+
+	// Create metadata structure.
+	meta := (*traceMapNode)(unsafe.Pointer(tab.mem.alloc(unsafe.Sizeof(traceMapNode{}))))
+	*(*notInHeapSlice)(unsafe.Pointer(&meta.data)) = sl
+	meta.id = id
+	meta.hash = hash
+	return meta
+}
+
+// reset drops all allocated memory from the table and resets it.
+//
+// tab.lock must be held. Must run on the system stack because of this.
+//
+//go:systemstack
+func (tab *traceMap) reset() {
+	assertLockHeld(&tab.lock)
+	tab.mem.drop()
+	tab.seq.Store(0)
+	// Clear table without write barriers. The table consists entirely
+	// of notinheap pointers, so this is fine.
+	//
+	// Write barriers may theoretically call into the tracer and acquire
+	// the lock again, and this lock ordering is expressed in the static
+	// lock ranking checker.
+	memclrNoHeapPointers(unsafe.Pointer(&tab.tab), unsafe.Sizeof(tab.tab))
+}
diff --git a/src/runtime/trace2region.go b/src/runtime/trace2region.go
new file mode 100644
index 0000000..b514d12
--- /dev/null
+++ b/src/runtime/trace2region.go
@@ -0,0 +1,62 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Simple not-in-heap bump-pointer traceRegion allocator.
+
+package runtime
+
+import (
+	"internal/goarch"
+	"runtime/internal/sys"
+	"unsafe"
+)
+
+// traceRegionAlloc is a non-thread-safe region allocator.
+// It holds a linked list of traceRegionAllocBlock.
+type traceRegionAlloc struct {
+	head *traceRegionAllocBlock
+	off  uintptr
+}
+
+// traceRegionAllocBlock is a block in traceRegionAlloc.
+//
+// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
+// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
+// not need write barriers.
+type traceRegionAllocBlock struct {
+	_    sys.NotInHeap
+	next *traceRegionAllocBlock
+	data [64<<10 - goarch.PtrSize]byte
+}
+
+// alloc allocates n-byte block.
+func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
+	n = alignUp(n, goarch.PtrSize)
+	if a.head == nil || a.off+n > uintptr(len(a.head.data)) {
+		if n > uintptr(len(a.head.data)) {
+			throw("traceRegion: alloc too large")
+		}
+		block := (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
+		if block == nil {
+			throw("traceRegion: out of memory")
+		}
+		block.next = a.head
+		a.head = block
+		a.off = 0
+	}
+	p := &a.head.data[a.off]
+	a.off += n
+	return (*notInHeap)(unsafe.Pointer(p))
+}
+
+// drop frees all previously allocated memory and resets the allocator.
+func (a *traceRegionAlloc) drop() {
+	for a.head != nil {
+		block := a.head
+		a.head = block.next
+		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
+	}
+}
diff --git a/src/runtime/trace2runtime.go b/src/runtime/trace2runtime.go
new file mode 100644
index 0000000..512e539
--- /dev/null
+++ b/src/runtime/trace2runtime.go
@@ -0,0 +1,704 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Runtime -> tracer API.
+
+package runtime
+
+import (
+	"runtime/internal/atomic"
+	_ "unsafe" // for go:linkname
+)
+
+// gTraceState is per-G state for the tracer.
+type gTraceState struct {
+	traceSchedResourceState
+}
+
+// reset resets the gTraceState for a new goroutine.
+func (s *gTraceState) reset() {
+	s.seq = [2]uint64{}
+	// N.B. s.statusTraced is managed and cleared separately.
+}
+
+// mTraceState is per-M state for the tracer.
+type mTraceState struct {
+	seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
+	buf     [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
+	link    *m             // Snapshot of alllink or freelink.
+}
+
+// pTraceState is per-P state for the tracer.
+type pTraceState struct {
+	traceSchedResourceState
+
+	// mSyscallID is the ID of the M this was bound to before entering a syscall.
+	mSyscallID int64
+
+	// maySweep indicates the sweep events should be traced.
+	// This is used to defer the sweep start event until a span
+	// has actually been swept.
+	maySweep bool
+
+	// inSweep indicates that at least one sweep event has been traced.
+	inSweep bool
+
+	// swept and reclaimed track the number of bytes swept and reclaimed
+	// by sweeping in the current sweep loop (while maySweep was true).
+	swept, reclaimed uintptr
+}
+
+// traceLockInit initializes global trace locks.
+func traceLockInit() {
+	// Sharing a lock rank here is fine because they should never be accessed
+	// together. If they are, we want to find out immediately.
+	lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
+	lockInit(&trace.stringTab[0].tab.lock, lockRankTraceStrings)
+	lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
+	lockInit(&trace.stringTab[1].tab.lock, lockRankTraceStrings)
+	lockInit(&trace.stackTab[0].tab.lock, lockRankTraceStackTab)
+	lockInit(&trace.stackTab[1].tab.lock, lockRankTraceStackTab)
+	lockInit(&trace.lock, lockRankTrace)
+}
+
+// lockRankMayTraceFlush records the lock ranking effects of a
+// potential call to traceFlush.
+//
+// nosplit because traceAcquire is nosplit.
+//
+//go:nosplit
+func lockRankMayTraceFlush() {
+	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
+}
+
+// traceBlockReason is an enumeration of reasons a goroutine might block.
+// This is the interface the rest of the runtime uses to tell the
+// tracer why a goroutine blocked. The tracer then propagates this information
+// into the trace however it sees fit.
+//
+// Note that traceBlockReasons should not be compared, since reasons that are
+// distinct by name may *not* be distinct by value.
+type traceBlockReason uint8
+
+const (
+	traceBlockGeneric traceBlockReason = iota
+	traceBlockForever
+	traceBlockNet
+	traceBlockSelect
+	traceBlockCondWait
+	traceBlockSync
+	traceBlockChanSend
+	traceBlockChanRecv
+	traceBlockGCMarkAssist
+	traceBlockGCSweep
+	traceBlockSystemGoroutine
+	traceBlockPreempted
+	traceBlockDebugCall
+	traceBlockUntilGCEnds
+	traceBlockSleep
+)
+
+var traceBlockReasonStrings = [...]string{
+	traceBlockGeneric:         "unspecified",
+	traceBlockForever:         "forever",
+	traceBlockNet:             "network",
+	traceBlockSelect:          "select",
+	traceBlockCondWait:        "sync.(*Cond).Wait",
+	traceBlockSync:            "sync",
+	traceBlockChanSend:        "chan send",
+	traceBlockChanRecv:        "chan receive",
+	traceBlockGCMarkAssist:    "GC mark assist wait for work",
+	traceBlockGCSweep:         "GC background sweeper wait",
+	traceBlockSystemGoroutine: "system goroutine wait",
+	traceBlockPreempted:       "preempted",
+	traceBlockDebugCall:       "wait for debug call",
+	traceBlockUntilGCEnds:     "wait until GC ends",
+	traceBlockSleep:           "sleep",
+}
+
+// traceGoStopReason is an enumeration of reasons a goroutine might yield.
+//
+// Note that traceGoStopReasons should not be compared, since reasons that are
+// distinct by name may *not* be distinct by value.
+type traceGoStopReason uint8
+
+const (
+	traceGoStopGeneric traceGoStopReason = iota
+	traceGoStopGoSched
+	traceGoStopPreempted
+)
+
+var traceGoStopReasonStrings = [...]string{
+	traceGoStopGeneric:   "unspecified",
+	traceGoStopGoSched:   "runtime.Gosched",
+	traceGoStopPreempted: "preempted",
+}
+
+// traceEnabled returns true if the trace is currently enabled.
+//
+//go:nosplit
+func traceEnabled() bool {
+	return trace.gen.Load() != 0
+}
+
+// traceShuttingDown returns true if the trace is currently shutting down.
+func traceShuttingDown() bool {
+	return trace.shutdown.Load()
+}
+
+// traceLocker represents an M writing trace events. While a traceLocker value
+// is valid, the tracer observes all operations on the G/M/P or trace events being
+// written as happening atomically.
+type traceLocker struct {
+	mp  *m
+	gen uintptr
+}
+
+// debugTraceReentrancy checks if the trace is reentrant.
+//
+// This is optional because throwing in a function makes it instantly
+// not inlineable, and we want traceAcquire to be inlineable for
+// low overhead when the trace is disabled.
+const debugTraceReentrancy = false
+
+// traceAcquire prepares this M for writing one or more trace events.
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func traceAcquire() traceLocker {
+	if !traceEnabled() {
+		return traceLocker{}
+	}
+	return traceAcquireEnabled()
+}
+
+// traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
+// broken out to make traceAcquire inlineable to keep the overhead of the tracer
+// when it's disabled low.
+//
+// nosplit because it's called by traceAcquire, which is nosplit.
+//
+//go:nosplit
+func traceAcquireEnabled() traceLocker {
+	// Any time we acquire a traceLocker, we may flush a trace buffer. But
+	// buffer flushes are rare. Record the lock edge even if it doesn't happen
+	// this time.
+	lockRankMayTraceFlush()
+
+	// Prevent preemption.
+	mp := acquirem()
+
+	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
+	// until all Ms are observed to be outside of their seqlock critical section.
+	//
+	// Note: The seqlock is mutated here and also in traceCPUSample. If you update
+	// usage of the seqlock here, make sure to also look at what traceCPUSample is
+	// doing.
+	seq := mp.trace.seqlock.Add(1)
+	if debugTraceReentrancy && seq%2 != 1 {
+		throw("bad use of trace.seqlock or tracer is reentrant")
+	}
+
+	// N.B. This load of gen appears redundant with the one in traceEnabled.
+	// However, it's very important that the gen we use for writing to the trace
+	// is acquired under a traceLocker so traceAdvance can make sure no stale
+	// gen values are being used.
+	//
+	// Because we're doing this load again, it also means that the trace
+	// might end up being disabled when we load it. In that case we need to undo
+	// what we did and bail.
+	gen := trace.gen.Load()
+	if gen == 0 {
+		mp.trace.seqlock.Add(1)
+		releasem(mp)
+		return traceLocker{}
+	}
+	return traceLocker{mp, gen}
+}
+
+// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func (tl traceLocker) ok() bool {
+	return tl.gen != 0
+}
+
+// traceRelease indicates that this M is done writing trace events.
+//
+// nosplit because it's called on the syscall path when stack movement is forbidden.
+//
+//go:nosplit
+func traceRelease(tl traceLocker) {
+	seq := tl.mp.trace.seqlock.Add(1)
+	if debugTraceReentrancy && seq%2 != 0 {
+		print("runtime: seq=", seq, "\n")
+		throw("bad use of trace.seqlock")
+	}
+	releasem(tl.mp)
+}
+
+// traceExitingSyscall marks a goroutine as exiting the syscall slow path.
+//
+// Must be paired with a traceExitedSyscall call.
+func traceExitingSyscall() {
+	trace.exitingSyscall.Add(1)
+}
+
+// traceExitedSyscall marks a goroutine as having exited the syscall slow path.
+func traceExitedSyscall() {
+	trace.exitingSyscall.Add(-1)
+}
+
+// Gomaxprocs emits a ProcsChange event.
+func (tl traceLocker) Gomaxprocs(procs int32) {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
+}
+
+// ProcStart traces a ProcStart event.
+//
+// Must be called with a valid P.
+func (tl traceLocker) ProcStart() {
+	pp := tl.mp.p.ptr()
+	// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
+	// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
+	// is during a syscall.
+	tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
+}
+
+// ProcStop traces a ProcStop event.
+func (tl traceLocker) ProcStop(pp *p) {
+	// The only time a goroutine is allowed to have its Proc moved around
+	// from under it is during a syscall.
+	tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
+}
+
+// GCActive traces a GCActive event.
+//
+// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
+// easily and only depends on where it's currently called.
+func (tl traceLocker) GCActive() {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
+	// N.B. Only one GC can be running at a time, so this is naturally
+	// serialized by the caller.
+	trace.seqGC++
+}
+
+// GCStart traces a GCBegin event.
+//
+// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
+// easily and only depends on where it's currently called.
+func (tl traceLocker) GCStart() {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
+	// N.B. Only one GC can be running at a time, so this is naturally
+	// serialized by the caller.
+	trace.seqGC++
+}
+
+// GCDone traces a GCEnd event.
+//
+// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
+// easily and only depends on where it's currently called.
+func (tl traceLocker) GCDone() {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
+	// N.B. Only one GC can be running at a time, so this is naturally
+	// serialized by the caller.
+	trace.seqGC++
+}
+
+// STWStart traces a STWBegin event.
+func (tl traceLocker) STWStart(reason stwReason) {
+	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
+	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
+}
+
+// STWDone traces a STWEnd event.
+func (tl traceLocker) STWDone() {
+	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
+	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
+}
+
+// GCSweepStart prepares to trace a sweep loop. This does not
+// emit any events until traceGCSweepSpan is called.
+//
+// GCSweepStart must be paired with traceGCSweepDone and there
+// must be no preemption points between these two calls.
+//
+// Must be called with a valid P.
+func (tl traceLocker) GCSweepStart() {
+	// Delay the actual GCSweepBegin event until the first span
+	// sweep. If we don't sweep anything, don't emit any events.
+	pp := tl.mp.p.ptr()
+	if pp.trace.maySweep {
+		throw("double traceGCSweepStart")
+	}
+	pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
+}
+
+// GCSweepSpan traces the sweep of a single span. If this is
+// the first span swept since traceGCSweepStart was called, this
+// will emit a GCSweepBegin event.
+//
+// This may be called outside a traceGCSweepStart/traceGCSweepDone
+// pair; however, it will not emit any trace events in this case.
+//
+// Must be called with a valid P.
+func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
+	pp := tl.mp.p.ptr()
+	if pp.trace.maySweep {
+		if pp.trace.swept == 0 {
+			tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
+			pp.trace.inSweep = true
+		}
+		pp.trace.swept += bytesSwept
+	}
+}
+
+// GCSweepDone finishes tracing a sweep loop. If any memory was
+// swept (i.e. traceGCSweepSpan emitted an event) then this will emit
+// a GCSweepEnd event.
+//
+// Must be called with a valid P.
+func (tl traceLocker) GCSweepDone() {
+	pp := tl.mp.p.ptr()
+	if !pp.trace.maySweep {
+		throw("missing traceGCSweepStart")
+	}
+	if pp.trace.inSweep {
+		tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
+		pp.trace.inSweep = false
+	}
+	pp.trace.maySweep = false
+}
+
+// GCMarkAssistStart emits a MarkAssistBegin event.
+func (tl traceLocker) GCMarkAssistStart() {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
+}
+
+// GCMarkAssistDone emits a MarkAssistEnd event.
+func (tl traceLocker) GCMarkAssistDone() {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
+}
+
+// GoCreate emits a GoCreate event.
+func (tl traceLocker) GoCreate(newg *g, pc uintptr) {
+	newg.trace.setStatusTraced(tl.gen)
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoCreate, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
+}
+
+// GoStart emits a GoStart event.
+//
+// Must be called with a valid P.
+func (tl traceLocker) GoStart() {
+	gp := getg().m.curg
+	pp := gp.m.p
+	w := tl.eventWriter(traceGoRunnable, traceProcRunning)
+	w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
+	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
+		w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
+	}
+	w.end()
+}
+
+// GoEnd emits a GoDestroy event.
+//
+// TODO(mknyszek): Rename this to GoDestroy.
+func (tl traceLocker) GoEnd() {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
+}
+
+// GoSched emits a GoStop event with a GoSched reason.
+func (tl traceLocker) GoSched() {
+	tl.GoStop(traceGoStopGoSched)
+}
+
+// GoPreempt emits a GoStop event with a GoPreempted reason.
+func (tl traceLocker) GoPreempt() {
+	tl.GoStop(traceGoStopPreempted)
+}
+
+// GoStop emits a GoStop event with the provided reason.
+func (tl traceLocker) GoStop(reason traceGoStopReason) {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
+}
+
+// GoPark emits a GoBlock event with the provided reason.
+//
+// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
+// that we have both, and waitReason is way more descriptive.
+func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
+}
+
+// GoUnpark emits a GoUnblock event.
+func (tl traceLocker) GoUnpark(gp *g, skip int) {
+	// Emit a GoWaiting status if necessary for the unblocked goroutine.
+	w := tl.eventWriter(traceGoRunning, traceProcRunning)
+	if !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
+		// Careful: don't use the event writer. We never want status or in-progress events
+		// to trigger more in-progress events.
+		w.w = w.w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist)
+	}
+	w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
+}
+
+// GoSysCall emits a GoSyscallBegin event.
+//
+// Must be called with a valid P.
+func (tl traceLocker) GoSysCall() {
+	var skip int
+	switch {
+	case tracefpunwindoff():
+		// Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
+		// results by hard coding the number of frames in between our caller and the
+		// actual syscall, see cases below.
+		// TODO(felixge): Implement gp.syscallbp to avoid this workaround?
+		skip = 1
+	case GOOS == "solaris" || GOOS == "illumos":
+		// These platforms don't use a libc_read_trampoline.
+		skip = 3
+	default:
+		// Skip the extra trampoline frame used on most systems.
+		skip = 4
+	}
+	// Scribble down the M that the P is currently attached to.
+	pp := tl.mp.p.ptr()
+	pp.trace.mSyscallID = int64(tl.mp.procid)
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(skip))
+}
+
+// GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
+// if lostP is true.
+//
+// lostP must be true in all cases that a goroutine loses its P during a syscall.
+// This means it's not sufficient to check if it has no P. In particular, it needs to be
+// true in the following cases:
+// - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
+// - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
+// - The goroutine lost its P and acquired a different one, and is now running with that P.
+func (tl traceLocker) GoSysExit(lostP bool) {
+	ev := traceEvGoSyscallEnd
+	procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
+	if lostP {
+		ev = traceEvGoSyscallEndBlocked
+		procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
+	} else {
+		tl.mp.p.ptr().trace.mSyscallID = -1
+	}
+	tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
+}
+
+// ProcSteal indicates that our current M stole a P from another M.
+//
+// inSyscall indicates that we're stealing the P from a syscall context.
+//
+// The caller must have ownership of pp.
+func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
+	// Grab the M ID we stole from.
+	mStolenFrom := pp.trace.mSyscallID
+	pp.trace.mSyscallID = -1
+
+	// The status of the proc and goroutine, if we need to emit one here, is not evident from the
+	// context of just emitting this event alone. There are two cases. Either we're trying to steal
+	// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
+	// ourselves specifically to keep running. The two contexts look different, but can be summarized
+	// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
+	// In the latter, we're a goroutine in a syscall.
+	goStatus := traceGoRunning
+	procStatus := traceProcRunning
+	if inSyscall {
+		goStatus = traceGoSyscall
+		procStatus = traceProcSyscallAbandoned
+	}
+	w := tl.eventWriter(goStatus, procStatus)
+
+	// Emit the status of the P we're stealing. We may have *just* done this when creating the event
+	// writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
+	// syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
+	// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
+	// at all (e.g. entersyscall_gcwait).
+	if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
+		// Careful: don't use the event writer. We never want status or in-progress events
+		// to trigger more in-progress events.
+		w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
+	}
+	w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
+}
+
+// GoSysBlock is a no-op in the new tracer.
+func (tl traceLocker) GoSysBlock(pp *p) {
+}
+
+// HeapAlloc emits a HeapAlloc event.
+func (tl traceLocker) HeapAlloc(live uint64) {
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
+}
+
+// HeapGoal reads the current heap goal and emits a HeapGoal event.
+func (tl traceLocker) HeapGoal() {
+	heapGoal := gcController.heapGoal()
+	if heapGoal == ^uint64(0) {
+		// Heap-based triggering is disabled.
+		heapGoal = 0
+	}
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
+}
+
+// OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
+// it's a good place to insert a thread-level event about the new extra M.
+func (tl traceLocker) OneNewExtraM(_ *g) {
+}
+
+// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
+//
+// Unlike GoCreate, the caller must be running on gp.
+//
+// This occurs when C code calls into Go. On pthread platforms it occurs only when
+// a C thread calls into Go code for the first time.
+func (tl traceLocker) GoCreateSyscall(gp *g) {
+	// N.B. We should never trace a status for this goroutine (which we're currently running on),
+	// since we want this to appear like goroutine creation.
+	gp.trace.setStatusTraced(tl.gen)
+	tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
+}
+
+// GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
+//
+// Must not have a P.
+//
+// This occurs when Go code returns back to C. On pthread platforms it occurs only when
+// the C thread is destroyed.
+func (tl traceLocker) GoDestroySyscall() {
+	// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
+	// that is in the syscall state.
+	tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
+}
+
+// To access runtime functions from runtime/trace.
+// See runtime/trace/annotation.go
+
+// trace_userTaskCreate emits a UserTaskCreate event.
+//
+//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
+func trace_userTaskCreate(id, parentID uint64, taskType string) {
+	tl := traceAcquire()
+	if !tl.ok() {
+		// Need to do this check because the caller won't have it.
+		return
+	}
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
+	traceRelease(tl)
+}
+
+// trace_userTaskEnd emits a UserTaskEnd event.
+//
+//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
+func trace_userTaskEnd(id uint64) {
+	tl := traceAcquire()
+	if !tl.ok() {
+		// Need to do this check because the caller won't have it.
+		return
+	}
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
+	traceRelease(tl)
+}
+
+// trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
+// depending on mode (0 == Begin, 1 == End).
+//
+// TODO(mknyszek): Just make this two functions.
+//
+//go:linkname trace_userRegion runtime/trace.userRegion
+func trace_userRegion(id, mode uint64, name string) {
+	tl := traceAcquire()
+	if !tl.ok() {
+		// Need to do this check because the caller won't have it.
+		return
+	}
+	var ev traceEv
+	switch mode {
+	case 0:
+		ev = traceEvUserRegionBegin
+	case 1:
+		ev = traceEvUserRegionEnd
+	default:
+		return
+	}
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
+	traceRelease(tl)
+}
+
+// trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
+//
+//go:linkname trace_userLog runtime/trace.userLog
+func trace_userLog(id uint64, category, message string) {
+	tl := traceAcquire()
+	if !tl.ok() {
+		// Need to do this check because the caller won't have it.
+		return
+	}
+	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
+	traceRelease(tl)
+}
+
+// traceProcFree is called when a P is destroyed.
+//
+// This must run on the system stack to match the old tracer.
+//
+//go:systemstack
+func traceProcFree(_ *p) {
+}
+
+// traceThreadDestroy is called when a thread is removed from
+// sched.freem.
+//
+// mp must not be able to emit trace events anymore.
+//
+// sched.lock must be held to synchronize with traceAdvance.
+func traceThreadDestroy(mp *m) {
+	assertLockHeld(&sched.lock)
+
+	// Flush all outstanding buffers to maintain the invariant
+	// that an M only has active buffers while on sched.freem
+	// or allm.
+	//
+	// Perform a traceAcquire/traceRelease on behalf of mp to
+	// synchronize with the tracer trying to flush our buffer
+	// as well.
+	seq := mp.trace.seqlock.Add(1)
+	if debugTraceReentrancy && seq%2 != 1 {
+		throw("bad use of trace.seqlock or tracer is reentrant")
+	}
+	systemstack(func() {
+		lock(&trace.lock)
+		for i := range mp.trace.buf {
+			if mp.trace.buf[i] != nil {
+				// N.B. traceBufFlush accepts a generation, but it
+				// really just cares about gen%2.
+				traceBufFlush(mp.trace.buf[i], uintptr(i))
+				mp.trace.buf[i] = nil
+			}
+		}
+		unlock(&trace.lock)
+	})
+	seq1 := mp.trace.seqlock.Add(1)
+	if seq1 != seq+1 {
+		print("runtime: seq1=", seq1, "\n")
+		throw("bad use of trace.seqlock")
+	}
+}
+
+// Not used in the new tracer; solely for compatibility with the old tracer.
+// nosplit because it's called from exitsyscall without a P.
+//
+//go:nosplit
+func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {
+}
diff --git a/src/runtime/trace2stack.go b/src/runtime/trace2stack.go
new file mode 100644
index 0000000..af6638f
--- /dev/null
+++ b/src/runtime/trace2stack.go
@@ -0,0 +1,294 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Trace stack table and acquisition.
+
+package runtime
+
+import (
+	"internal/abi"
+	"internal/goarch"
+	"unsafe"
+)
+
+const (
+	// Maximum number of PCs in a single stack trace.
+	// Since events contain only stack id rather than whole stack trace,
+	// we can allow quite large values here.
+	traceStackSize = 128
+
+	// logicalStackSentinel is a sentinel value at pcBuf[0] signifying that
+	// pcBuf[1:] holds a logical stack requiring no further processing. Any other
+	// value at pcBuf[0] represents a skip value to apply to the physical stack in
+	// pcBuf[1:] after inline expansion.
+	logicalStackSentinel = ^uintptr(0)
+)
+
+// traceStack captures a stack trace and registers it in the trace stack table.
+// It then returns its unique ID.
+//
+// skip controls the number of leaf frames to omit in order to hide tracer internals
+// from stack traces, see CL 5523.
+//
+// Avoid calling this function directly. gen needs to be the current generation
+// that this stack trace is being written out for, which needs to be synchronized with
+// generations moving forward. Prefer traceEventWriter.stack.
+func traceStack(skip int, mp *m, gen uintptr) uint64 {
+	var pcBuf [traceStackSize]uintptr
+
+	gp := getg()
+	curgp := gp.m.curg
+	nstk := 1
+	if tracefpunwindoff() || mp.hasCgoOnStack() {
+		// Slow path: Unwind using default unwinder. Used when frame pointer
+		// unwinding is unavailable or disabled (tracefpunwindoff), or might
+		// produce incomplete results or crashes (hasCgoOnStack). Note that no
+		// cgo callback related crashes have been observed yet. The main
+		// motivation is to take advantage of a potentially registered cgo
+		// symbolizer.
+		pcBuf[0] = logicalStackSentinel
+		if curgp == gp {
+			nstk += callers(skip+1, pcBuf[1:])
+		} else if curgp != nil {
+			nstk += gcallers(curgp, skip, pcBuf[1:])
+		}
+	} else {
+		// Fast path: Unwind using frame pointers.
+		pcBuf[0] = uintptr(skip)
+		if curgp == gp {
+			nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
+		} else if curgp != nil {
+			// We're called on the g0 stack through mcall(fn) or systemstack(fn). To
+			// behave like gcallers above, we start unwinding from sched.bp, which
+			// points to the caller frame of the leaf frame on g's stack. The return
+			// address of the leaf frame is stored in sched.pc, which we manually
+			// capture here.
+			pcBuf[1] = curgp.sched.pc
+			nstk += 1 + fpTracebackPCs(unsafe.Pointer(curgp.sched.bp), pcBuf[2:])
+		}
+	}
+	if nstk > 0 {
+		nstk-- // skip runtime.goexit
+	}
+	if nstk > 0 && curgp.goid == 1 {
+		nstk-- // skip runtime.main
+	}
+	id := trace.stackTab[gen%2].put(pcBuf[:nstk])
+	return id
+}
+
+// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
+// It is lock-free for reading.
+type traceStackTable struct {
+	tab traceMap
+}
+
+// put returns a unique id for the stack trace pcs and caches it in the table,
+// if it sees the trace for the first time.
+func (t *traceStackTable) put(pcs []uintptr) uint64 {
+	if len(pcs) == 0 {
+		return 0
+	}
+	id, _ := t.tab.put(noescape(unsafe.Pointer(&pcs[0])), uintptr(len(pcs))*unsafe.Sizeof(uintptr(0)))
+	return id
+}
+
+// dump writes all previously cached stacks to trace buffers,
+// releases all memory and resets state. It must only be called once the caller
+// can guarantee that there are no more writers to the table.
+//
+// This must run on the system stack because it flushes buffers and thus
+// may acquire trace.lock.
+//
+//go:systemstack
+func (t *traceStackTable) dump(gen uintptr) {
+	w := unsafeTraceWriter(gen, nil)
+
+	// Iterate over the table.
+	//
+	// Do not acquire t.tab.lock. There's a conceptual lock cycle between acquiring this lock
+	// here and allocation-related locks. Specifically, this lock may be acquired when an event
+	// is emitted in allocation paths. Simultaneously, we might allocate here with the lock held,
+	// creating a cycle. In practice, this cycle is never exercised. Because the table is only
+	// dumped once there are no more writers, it's not possible for the cycle to occur. However
+	// the lockrank mode is not sophisticated enough to identify this, and if it's not possible
+	// for that cycle to happen, then it's also not possible for this to race with writers to
+	// the table.
+	for i := range t.tab.tab {
+		stk := t.tab.bucket(i)
+		for ; stk != nil; stk = stk.next() {
+			stack := unsafe.Slice((*uintptr)(unsafe.Pointer(&stk.data[0])), uintptr(len(stk.data))/unsafe.Sizeof(uintptr(0)))
+
+			// N.B. This might allocate, but that's OK because we're not writing to the M's buffer,
+			// but one we're about to create (with ensure).
+			frames := makeTraceFrames(gen, fpunwindExpand(stack))
+
+			// Returns the maximum number of bytes required to hold the encoded stack, given that
+			// it contains N frames.
+			maxBytes := 1 + (2+4*len(frames))*traceBytesPerNumber
+
+			// Estimate the size of this record. This
+			// bound is pretty loose, but avoids counting
+			// lots of varint sizes.
+			//
+			// Add 1 because we might also write traceEvStacks.
+			var flushed bool
+			w, flushed = w.ensure(1 + maxBytes)
+			if flushed {
+				w.byte(byte(traceEvStacks))
+			}
+
+			// Emit stack event.
+			w.byte(byte(traceEvStack))
+			w.varint(uint64(stk.id))
+			w.varint(uint64(len(frames)))
+			for _, frame := range frames {
+				w.varint(uint64(frame.PC))
+				w.varint(frame.funcID)
+				w.varint(frame.fileID)
+				w.varint(frame.line)
+			}
+		}
+	}
+	// Still, hold the lock over reset. The callee expects it, even though it's
+	// not strictly necessary.
+	lock(&t.tab.lock)
+	t.tab.reset()
+	unlock(&t.tab.lock)
+
+	w.flush().end()
+}
+
+// makeTraceFrames returns the frames corresponding to pcs. It may
+// allocate and may emit trace events.
+func makeTraceFrames(gen uintptr, pcs []uintptr) []traceFrame {
+	frames := make([]traceFrame, 0, len(pcs))
+	ci := CallersFrames(pcs)
+	for {
+		f, more := ci.Next()
+		frames = append(frames, makeTraceFrame(gen, f))
+		if !more {
+			return frames
+		}
+	}
+}
+
+type traceFrame struct {
+	PC     uintptr
+	funcID uint64
+	fileID uint64
+	line   uint64
+}
+
+// makeTraceFrame sets up a traceFrame for a frame.
+func makeTraceFrame(gen uintptr, f Frame) traceFrame {
+	var frame traceFrame
+	frame.PC = f.PC
+
+	fn := f.Function
+	const maxLen = 1 << 10
+	if len(fn) > maxLen {
+		fn = fn[len(fn)-maxLen:]
+	}
+	frame.funcID = trace.stringTab[gen%2].put(gen, fn)
+	frame.line = uint64(f.Line)
+	file := f.File
+	if len(file) > maxLen {
+		file = file[len(file)-maxLen:]
+	}
+	frame.fileID = trace.stringTab[gen%2].put(gen, file)
+	return frame
+}
+
+// tracefpunwindoff returns true if frame pointer unwinding for the tracer is
+// disabled via GODEBUG or not supported by the architecture.
+func tracefpunwindoff() bool {
+	return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64)
+}
+
+// fpTracebackPCs populates pcBuf with the return addresses for each frame and
+// returns the number of PCs written to pcBuf. The returned PCs correspond to
+// "physical frames" rather than "logical frames"; that is if A is inlined into
+// B, this will return a PC for only B.
+func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
+	for i = 0; i < len(pcBuf) && fp != nil; i++ {
+		// return addr sits one word above the frame pointer
+		pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
+		// follow the frame pointer to the next one
+		fp = unsafe.Pointer(*(*uintptr)(fp))
+	}
+	return i
+}
+
+// fpunwindExpand checks if pcBuf contains logical frames (which include inlined
+// frames) or physical frames (produced by frame pointer unwinding) using a
+// sentinel value in pcBuf[0]. Logical frames are simply returned without the
+// sentinel. Physical frames are turned into logical frames via inline unwinding
+// and by applying the skip value that's stored in pcBuf[0].
+func fpunwindExpand(pcBuf []uintptr) []uintptr {
+	if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
+		// pcBuf contains logical rather than inlined frames, skip has already been
+		// applied, just return it without the sentinel value in pcBuf[0].
+		return pcBuf[1:]
+	}
+
+	var (
+		lastFuncID = abi.FuncIDNormal
+		newPCBuf   = make([]uintptr, 0, traceStackSize)
+		skip       = pcBuf[0]
+		// skipOrAdd skips or appends retPC to newPCBuf and returns true if more
+		// pcs can be added.
+		skipOrAdd = func(retPC uintptr) bool {
+			if skip > 0 {
+				skip--
+			} else {
+				newPCBuf = append(newPCBuf, retPC)
+			}
+			return len(newPCBuf) < cap(newPCBuf)
+		}
+	)
+
+outer:
+	for _, retPC := range pcBuf[1:] {
+		callPC := retPC - 1
+		fi := findfunc(callPC)
+		if !fi.valid() {
+			// There is no funcInfo if callPC belongs to a C function. In this case
+			// we still keep the pc, but don't attempt to expand inlined frames.
+			if more := skipOrAdd(retPC); !more {
+				break outer
+			}
+			continue
+		}
+
+		u, uf := newInlineUnwinder(fi, callPC)
+		for ; uf.valid(); uf = u.next(uf) {
+			sf := u.srcFunc(uf)
+			if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
+				// ignore wrappers
+			} else if more := skipOrAdd(uf.pc + 1); !more {
+				break outer
+			}
+			lastFuncID = sf.funcID
+		}
+	}
+	return newPCBuf
+}
+
+// startPCForTrace returns the start PC of a goroutine for tracing purposes.
+// If pc is a wrapper, it returns the PC of the wrapped function. Otherwise it
+// returns pc.
+func startPCForTrace(pc uintptr) uintptr {
+	f := findfunc(pc)
+	if !f.valid() {
+		return pc // may happen for locked g in extra M since its pc is 0.
+	}
+	w := funcdata(f, abi.FUNCDATA_WrapInfo)
+	if w == nil {
+		return pc // not a wrapper
+	}
+	return f.datap.textAddr(*(*uint32)(w))
+}
diff --git a/src/runtime/trace2status.go b/src/runtime/trace2status.go
new file mode 100644
index 0000000..5016e08
--- /dev/null
+++ b/src/runtime/trace2status.go
@@ -0,0 +1,214 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Trace goroutine and P status management.
+
+package runtime
+
+import "runtime/internal/atomic"
+
+// traceGoStatus is the status of a goroutine.
+//
+// They correspond directly to the various goroutine
+// statuses.
+type traceGoStatus uint8
+
+const (
+	traceGoBad traceGoStatus = iota
+	traceGoRunnable
+	traceGoRunning
+	traceGoSyscall
+	traceGoWaiting
+)
+
+// traceProcStatus is the status of a P.
+//
+// They mostly correspond to the various P statuses.
+type traceProcStatus uint8
+
+const (
+	traceProcBad traceProcStatus = iota
+	traceProcRunning
+	traceProcIdle
+	traceProcSyscall
+
+	// traceProcSyscallAbandoned is a special case of
+	// traceProcSyscall. It's used in the very specific case
+	// where the first a P is mentioned in a generation is
+	// part of a ProcSteal event. If that's the first time
+	// it's mentioned, then there's no GoSyscallBegin to
+	// connect the P stealing back to at that point. This
+	// special state indicates this to the parser, so it
+	// doesn't try to find a GoSyscallEndBlocked that
+	// corresponds with the ProcSteal.
+	traceProcSyscallAbandoned
+)
+
+// writeGoStatus emits a GoStatus event as well as any active ranges on the goroutine.
+func (w traceWriter) writeGoStatus(goid uint64, mid int64, status traceGoStatus, markAssist bool) traceWriter {
+	// The status should never be bad. Some invariant must have been violated.
+	if status == traceGoBad {
+		print("runtime: goid=", goid, "\n")
+		throw("attempted to trace a bad status for a goroutine")
+	}
+
+	// Trace the status.
+	w = w.event(traceEvGoStatus, traceArg(goid), traceArg(uint64(mid)), traceArg(status))
+
+	// Trace any special ranges that are in-progress.
+	if markAssist {
+		w = w.event(traceEvGCMarkAssistActive, traceArg(goid))
+	}
+	return w
+}
+
+// writeProcStatusForP emits a ProcStatus event for the provided p based on its status.
+//
+// The caller must fully own pp and it must be prevented from transitioning (e.g. this can be
+// called by a forEachP callback or from a STW).
+func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter {
+	if !pp.trace.acquireStatus(w.gen) {
+		return w
+	}
+	var status traceProcStatus
+	switch pp.status {
+	case _Pidle, _Pgcstop:
+		status = traceProcIdle
+		if pp.status == _Pgcstop && inSTW {
+			// N.B. a P that is running and currently has the world stopped will be
+			// in _Pgcstop, but we model it as running in the tracer.
+			status = traceProcRunning
+		}
+	case _Prunning:
+		status = traceProcRunning
+		// There's a short window wherein the goroutine may have entered _Gsyscall
+		// but it still owns the P (it's not in _Psyscall yet). The goroutine entering
+		// _Gsyscall is the tracer's signal that the P its bound to is also in a syscall,
+		// so we need to emit a status that matches. See #64318.
+		if w.mp.p.ptr() == pp && w.mp.curg != nil && readgstatus(w.mp.curg)&^_Gscan == _Gsyscall {
+			status = traceProcSyscall
+		}
+	case _Psyscall:
+		status = traceProcSyscall
+	default:
+		throw("attempt to trace invalid or unsupported P status")
+	}
+	w = w.writeProcStatus(uint64(pp.id), status, pp.trace.inSweep)
+	return w
+}
+
+// writeProcStatus emits a ProcStatus event with all the provided information.
+//
+// The caller must have taken ownership of a P's status writing, and the P must be
+// prevented from transitioning.
+func (w traceWriter) writeProcStatus(pid uint64, status traceProcStatus, inSweep bool) traceWriter {
+	// The status should never be bad. Some invariant must have been violated.
+	if status == traceProcBad {
+		print("runtime: pid=", pid, "\n")
+		throw("attempted to trace a bad status for a proc")
+	}
+
+	// Trace the status.
+	w = w.event(traceEvProcStatus, traceArg(pid), traceArg(status))
+
+	// Trace any special ranges that are in-progress.
+	if inSweep {
+		w = w.event(traceEvGCSweepActive, traceArg(pid))
+	}
+	return w
+}
+
+// goStatusToTraceGoStatus translates the internal status to tracGoStatus.
+//
+// status must not be _Gdead or any status whose name has the suffix "_unused."
+func goStatusToTraceGoStatus(status uint32, wr waitReason) traceGoStatus {
+	// N.B. Ignore the _Gscan bit. We don't model it in the tracer.
+	var tgs traceGoStatus
+	switch status &^ _Gscan {
+	case _Grunnable:
+		tgs = traceGoRunnable
+	case _Grunning, _Gcopystack:
+		tgs = traceGoRunning
+	case _Gsyscall:
+		tgs = traceGoSyscall
+	case _Gwaiting, _Gpreempted:
+		// There are a number of cases where a G might end up in
+		// _Gwaiting but it's actually running in a non-preemptive
+		// state but needs to present itself as preempted to the
+		// garbage collector. In these cases, we're not going to
+		// emit an event, and we want these goroutines to appear in
+		// the final trace as if they're running, not blocked.
+		tgs = traceGoWaiting
+		if status == _Gwaiting &&
+			wr == waitReasonStoppingTheWorld ||
+			wr == waitReasonGCMarkTermination ||
+			wr == waitReasonGarbageCollection ||
+			wr == waitReasonTraceProcStatus ||
+			wr == waitReasonPageTraceFlush ||
+			wr == waitReasonGCWorkerActive {
+			tgs = traceGoRunning
+		}
+	case _Gdead:
+		throw("tried to trace dead goroutine")
+	default:
+		throw("tried to trace goroutine with invalid or unsupported status")
+	}
+	return tgs
+}
+
+// traceSchedResourceState is shared state for scheduling resources (i.e. fields common to
+// both Gs and Ps).
+type traceSchedResourceState struct {
+	// statusTraced indicates whether a status event was traced for this resource
+	// a particular generation.
+	//
+	// There are 3 of these because when transitioning across generations, traceAdvance
+	// needs to be able to reliably observe whether a status was traced for the previous
+	// generation, while we need to clear the value for the next generation.
+	statusTraced [3]atomic.Uint32
+
+	// seq is the sequence counter for this scheduling resource's events.
+	// The purpose of the sequence counter is to establish a partial order between
+	// events that don't obviously happen serially (same M) in the stream ofevents.
+	//
+	// There are two of these so that we can reset the counter on each generation.
+	// This saves space in the resulting trace by keeping the counter small and allows
+	// GoStatus and GoCreate events to omit a sequence number (implicitly 0).
+	seq [2]uint64
+}
+
+// acquireStatus acquires the right to emit a Status event for the scheduling resource.
+func (r *traceSchedResourceState) acquireStatus(gen uintptr) bool {
+	if !r.statusTraced[gen%3].CompareAndSwap(0, 1) {
+		return false
+	}
+	r.readyNextGen(gen)
+	return true
+}
+
+// readyNextGen readies r for the generation following gen.
+func (r *traceSchedResourceState) readyNextGen(gen uintptr) {
+	nextGen := traceNextGen(gen)
+	r.seq[nextGen%2] = 0
+	r.statusTraced[nextGen%3].Store(0)
+}
+
+// statusWasTraced returns true if the sched resource's status was already acquired for tracing.
+func (r *traceSchedResourceState) statusWasTraced(gen uintptr) bool {
+	return r.statusTraced[gen%3].Load() != 0
+}
+
+// setStatusTraced indicates that the resource's status was already traced, for example
+// when a goroutine is created.
+func (r *traceSchedResourceState) setStatusTraced(gen uintptr) {
+	r.statusTraced[gen%3].Store(1)
+}
+
+// nextSeq returns the next sequence number for the resource.
+func (r *traceSchedResourceState) nextSeq(gen uintptr) traceArg {
+	r.seq[gen%2]++
+	return traceArg(r.seq[gen%2])
+}
diff --git a/src/runtime/trace2string.go b/src/runtime/trace2string.go
new file mode 100644
index 0000000..cbb0ecf
--- /dev/null
+++ b/src/runtime/trace2string.go
@@ -0,0 +1,104 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Trace string management.
+
+package runtime
+
+// Trace strings.
+
+const maxTraceStringLen = 1024
+
+// traceStringTable is map of string -> unique ID that also manages
+// writing strings out into the trace.
+type traceStringTable struct {
+	// lock protects buf.
+	lock mutex
+	buf  *traceBuf // string batches to write out to the trace.
+
+	// tab is a mapping of string -> unique ID.
+	tab traceMap
+}
+
+// put adds a string to the table, emits it, and returns a unique ID for it.
+func (t *traceStringTable) put(gen uintptr, s string) uint64 {
+	// Put the string in the table.
+	ss := stringStructOf(&s)
+	id, added := t.tab.put(ss.str, uintptr(ss.len))
+	if added {
+		// Write the string to the buffer.
+		systemstack(func() {
+			t.writeString(gen, id, s)
+		})
+	}
+	return id
+}
+
+// emit emits a string and creates an ID for it, but doesn't add it to the table. Returns the ID.
+func (t *traceStringTable) emit(gen uintptr, s string) uint64 {
+	// Grab an ID and write the string to the buffer.
+	id := t.tab.stealID()
+	systemstack(func() {
+		t.writeString(gen, id, s)
+	})
+	return id
+}
+
+// writeString writes the string to t.buf.
+//
+// Must run on the systemstack because it may flush buffers and thus could acquire trace.lock.
+//
+//go:systemstack
+func (t *traceStringTable) writeString(gen uintptr, id uint64, s string) {
+	// Truncate the string if necessary.
+	if len(s) > maxTraceStringLen {
+		s = s[:maxTraceStringLen]
+	}
+
+	lock(&t.lock)
+	w := unsafeTraceWriter(gen, t.buf)
+
+	// Ensure we have a place to write to.
+	var flushed bool
+	w, flushed = w.ensure(2 + 2*traceBytesPerNumber + len(s) /* traceEvStrings + traceEvString + ID + len + string data */)
+	if flushed {
+		// Annotate the batch as containing strings.
+		w.byte(byte(traceEvStrings))
+	}
+
+	// Write out the string.
+	w.byte(byte(traceEvString))
+	w.varint(id)
+	w.varint(uint64(len(s)))
+	w.stringData(s)
+
+	// Store back buf if it was updated during ensure.
+	t.buf = w.traceBuf
+	unlock(&t.lock)
+}
+
+// reset clears the string table and flushes any buffers it has.
+//
+// Must be called only once the caller is certain nothing else will be
+// added to this table.
+//
+// Because it flushes buffers, this may acquire trace.lock and thus
+// must run on the systemstack.
+//
+//go:systemstack
+func (t *traceStringTable) reset(gen uintptr) {
+	if t.buf != nil {
+		lock(&trace.lock)
+		traceBufFlush(t.buf, gen)
+		unlock(&trace.lock)
+		t.buf = nil
+	}
+
+	// Reset the table.
+	lock(&t.tab.lock)
+	t.tab.reset()
+	unlock(&t.tab.lock)
+}
diff --git a/src/runtime/trace2time.go b/src/runtime/trace2time.go
new file mode 100644
index 0000000..8a4499e
--- /dev/null
+++ b/src/runtime/trace2time.go
@@ -0,0 +1,90 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.exectracer2
+
+// Trace time and clock.
+
+package runtime
+
+import "internal/goarch"
+
+// Timestamps in trace are produced through either nanotime or cputicks
+// and divided by traceTimeDiv. nanotime is used everywhere except on
+// platforms where osHasLowResClock is true, because the system clock
+// isn't granular enough to get useful information out of a trace in
+// many cases.
+//
+// This makes absolute values of timestamp diffs smaller, and so they are
+// encoded in fewer bytes.
+//
+// The target resolution in all cases is 64 nanoseconds.
+// This is based on the fact that fundamentally the execution tracer won't emit
+// events more frequently than roughly every 200 ns or so, because that's roughly
+// how long it takes to call through the scheduler.
+// We could be more aggressive and bump this up to 128 ns while still getting
+// useful data, but the extra bit doesn't save us that much and the headroom is
+// nice to have.
+//
+// Hitting this target resolution is easy in the nanotime case: just pick a
+// division of 64. In the cputicks case it's a bit more complex.
+//
+// For x86, on a 3 GHz machine, we'd want to divide by 3*64 to hit our target.
+// To keep the division operation efficient, we round that up to 4*64, or 256.
+// Given what cputicks represents, we use this on all other platforms except
+// for PowerPC.
+// The suggested increment frequency for PowerPC's time base register is
+// 512 MHz according to Power ISA v2.07 section 6.2, so we use 32 on ppc64
+// and ppc64le.
+const traceTimeDiv = (1-osHasLowResClockInt)*64 + osHasLowResClockInt*(256-224*(goarch.IsPpc64|goarch.IsPpc64le))
+
+// traceTime represents a timestamp for the trace.
+type traceTime uint64
+
+// traceClockNow returns a monotonic timestamp. The clock this function gets
+// the timestamp from is specific to tracing, and shouldn't be mixed with other
+// clock sources.
+//
+// nosplit because it's called from exitsyscall, which is nosplit.
+//
+//go:nosplit
+func traceClockNow() traceTime {
+	if osHasLowResClock {
+		return traceTime(cputicks() / traceTimeDiv)
+	}
+	return traceTime(nanotime() / traceTimeDiv)
+}
+
+// traceClockUnitsPerSecond estimates the number of trace clock units per
+// second that elapse.
+func traceClockUnitsPerSecond() uint64 {
+	if osHasLowResClock {
+		// We're using cputicks as our clock, so we need a real estimate.
+		return uint64(ticksPerSecond())
+	}
+	// Our clock is nanotime, so it's just the constant time division.
+	// (trace clock units / nanoseconds) * (1e9 nanoseconds / 1 second)
+	return uint64(1.0 / float64(traceTimeDiv) * 1e9)
+}
+
+// traceFrequency writes a batch with a single EvFrequency event.
+//
+// freq is the number of trace clock units per second.
+func traceFrequency(gen uintptr) {
+	w := unsafeTraceWriter(gen, nil)
+
+	// Ensure we have a place to write to.
+	w, _ = w.ensure(1 + traceBytesPerNumber /* traceEvFrequency + frequency */)
+
+	// Write out the string.
+	w.byte(byte(traceEvFrequency))
+	w.varint(traceClockUnitsPerSecond())
+
+	// Immediately flush the buffer.
+	systemstack(func() {
+		lock(&trace.lock)
+		traceBufFlush(w.traceBuf, gen)
+		unlock(&trace.lock)
+	})
+}
diff --git a/src/runtime/trace_cgo_test.go b/src/runtime/trace_cgo_test.go
index 3f207aa..d6357b1 100644
--- a/src/runtime/trace_cgo_test.go
+++ b/src/runtime/trace_cgo_test.go
@@ -9,8 +9,10 @@
 import (
 	"bytes"
 	"fmt"
+	"internal/goexperiment"
 	"internal/testenv"
 	"internal/trace"
+	tracev2 "internal/trace/v2"
 	"io"
 	"os"
 	"runtime"
@@ -33,9 +35,17 @@
 		t.Fatal(err)
 	}
 
-	logs := map[string]*trace.Event{
-		"goCalledFromC":       nil,
-		"goCalledFromCThread": nil,
+	wantLogs := []string{
+		"goCalledFromC",
+		"goCalledFromCThread",
+	}
+	logs := make(map[string]*trace.Event)
+	for _, category := range wantLogs {
+		logs[category] = nil
+	}
+	logsV2 := make(map[string]*tracev2.Event)
+	for _, category := range wantLogs {
+		logsV2[category] = nil
 	}
 	for _, tracefpunwindoff := range []int{1, 0} {
 		env := fmt.Sprintf("GODEBUG=tracefpunwindoff=%d", tracefpunwindoff)
@@ -50,14 +60,25 @@
 		if err != nil {
 			t.Fatalf("failed to read trace: %s", err)
 		}
-		events := parseTrace(t, bytes.NewReader(traceData))
+		if goexperiment.ExecTracer2 {
+			for category := range logs {
+				event := mustFindLogV2(t, bytes.NewReader(traceData), category)
+				if wantEvent := logsV2[category]; wantEvent == nil {
+					logsV2[category] = &event
+				} else if got, want := dumpStackV2(&event), dumpStackV2(wantEvent); got != want {
+					t.Errorf("%q: got stack:\n%s\nwant stack:\n%s\n", category, got, want)
+				}
+			}
+		} else {
+			events := parseTrace(t, bytes.NewReader(traceData))
 
-		for category := range logs {
-			event := mustFindLog(t, events, category)
-			if wantEvent := logs[category]; wantEvent == nil {
-				logs[category] = event
-			} else if got, want := dumpStack(event), dumpStack(wantEvent); got != want {
-				t.Errorf("%q: got stack:\n%s\nwant stack:\n%s\n", category, got, want)
+			for category := range logs {
+				event := mustFindLog(t, events, category)
+				if wantEvent := logs[category]; wantEvent == nil {
+					logs[category] = event
+				} else if got, want := dumpStack(event), dumpStack(wantEvent); got != want {
+					t.Errorf("%q: got stack:\n%s\nwant stack:\n%s\n", category, got, want)
+				}
 			}
 		}
 	}
@@ -103,3 +124,40 @@
 	}
 	return res.Events
 }
+
+func mustFindLogV2(t *testing.T, trace io.Reader, category string) tracev2.Event {
+	r, err := tracev2.NewReader(trace)
+	if err != nil {
+		t.Fatalf("bad trace: %v", err)
+	}
+	var candidates []tracev2.Event
+	for {
+		ev, err := r.ReadEvent()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatalf("failed to parse trace: %v", err)
+		}
+		if ev.Kind() == tracev2.EventLog && ev.Log().Category == category {
+			candidates = append(candidates, ev)
+		}
+	}
+	if len(candidates) == 0 {
+		t.Fatalf("could not find log with category: %q", category)
+	} else if len(candidates) > 1 {
+		t.Fatalf("found more than one log with category: %q", category)
+	}
+	return candidates[0]
+}
+
+// dumpStack returns e.Stack() as a string.
+func dumpStackV2(e *tracev2.Event) string {
+	var buf bytes.Buffer
+	e.Stack().Frames(func(f tracev2.StackFrame) bool {
+		file := strings.TrimPrefix(f.File, runtime.GOROOT())
+		fmt.Fprintf(&buf, "%s\n\t%s:%d\n", f.Func, file, f.Line)
+		return true
+	})
+	return buf.String()
+}
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 72200d4..1e5afc6 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -112,9 +112,6 @@
 	// flags are the flags to this unwind. Some of these are updated as we
 	// unwind (see the flags documentation).
 	flags unwindFlags
-
-	// cache is used to cache pcvalue lookups.
-	cache pcvalueCache
 }
 
 // init initializes u to start unwinding gp's stack and positions the
@@ -180,7 +177,7 @@
 			frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
 			frame.lr = 0
 		} else {
-			frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
+			frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
 			frame.sp += goarch.PtrSize
 		}
 	}
@@ -202,7 +199,7 @@
 	f := findfunc(frame.pc)
 	if !f.valid() {
 		if flags&unwindSilentErrors == 0 {
-			print("runtime: g ", gp.goid, ": unknown pc ", hex(frame.pc), "\n")
+			print("runtime: g ", gp.goid, " gp=", gp, ": unknown pc ", hex(frame.pc), "\n")
 			tracebackHexdump(gp.stack, &frame, 0)
 		}
 		if flags&(unwindPrintErrors|unwindSilentErrors) == 0 {
@@ -307,7 +304,7 @@
 			case abi.FuncID_systemstack:
 				// systemstack returns normally, so just follow the
 				// stack transition.
-				if usesLR && funcspdelta(f, frame.pc, &u.cache) == 0 {
+				if usesLR && funcspdelta(f, frame.pc) == 0 {
 					// We're at the function prologue and the stack
 					// switch hasn't happened, or epilogue where we're
 					// about to return. Just unwind normally.
@@ -325,7 +322,7 @@
 				flag &^= abi.FuncFlagSPWrite
 			}
 		}
-		frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &u.cache))
+		frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc))
 		if !usesLR {
 			// On x86, call instruction pushes return PC before entering new function.
 			frame.fp += goarch.PtrSize
@@ -359,15 +356,12 @@
 		//
 		// uSE uPE inn | action
 		//  T   _   _  | frame.lr = 0
-		//  F   T   F  | frame.lr = 0; print
-		//  F   T   T  | frame.lr = 0
+		//  F   T   _  | frame.lr = 0
 		//  F   F   F  | print; panic
 		//  F   F   T  | ignore SPWrite
-		if u.flags&unwindSilentErrors == 0 && !innermost {
+		if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && !innermost {
 			println("traceback: unexpected SPWRITE function", funcname(f))
-			if u.flags&unwindPrintErrors == 0 {
-				throw("traceback")
-			}
+			throw("traceback")
 		}
 		frame.lr = 0
 	} else {
@@ -510,7 +504,7 @@
 		frame.fn = f
 		if !f.valid() {
 			frame.pc = x
-		} else if funcspdelta(f, frame.pc, &u.cache) == 0 {
+		} else if funcspdelta(f, frame.pc) == 0 {
 			frame.lr = x
 		}
 	}
@@ -630,7 +624,7 @@
 		cgoN := u.cgoCallers(cgoBuf[:])
 
 		// TODO: Why does &u.cache cause u to escape? (Same in traceback2)
-		for iu, uf := newInlineUnwinder(f, u.symPC(), noEscapePtr(&u.cache)); n < len(pcBuf) && uf.valid(); uf = iu.next(uf) {
+		for iu, uf := newInlineUnwinder(f, u.symPC()); n < len(pcBuf) && uf.valid(); uf = iu.next(uf) {
 			sf := iu.srcFunc(uf)
 			if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(u.calleeFuncID) {
 				// ignore wrappers
@@ -680,7 +674,7 @@
 	}
 
 	liveInfo := funcdata(f, abi.FUNCDATA_ArgLiveInfo)
-	liveIdx := pcdatavalue(f, abi.PCDATA_ArgLiveIndex, pc, nil)
+	liveIdx := pcdatavalue(f, abi.PCDATA_ArgLiveIndex, pc)
 	startOffset := uint8(0xff) // smallest offset that needs liveness info (slots with a lower offset is always live)
 	if liveInfo != nil {
 		startOffset = *(*uint8)(liveInfo)
@@ -987,7 +981,7 @@
 	for ; u.valid(); u.next() {
 		lastN = 0
 		f := u.frame.fn
-		for iu, uf := newInlineUnwinder(f, u.symPC(), noEscapePtr(&u.cache)); uf.valid(); uf = iu.next(uf) {
+		for iu, uf := newInlineUnwinder(f, u.symPC()); uf.valid(); uf = iu.next(uf) {
 			sf := iu.srcFunc(uf)
 			callee := u.calleeFuncID
 			u.calleeFuncID = sf.funcID
@@ -1088,7 +1082,7 @@
 // due to only have access to the pcs at the time of the caller
 // goroutine being created.
 func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) {
-	u, uf := newInlineUnwinder(f, pc, nil)
+	u, uf := newInlineUnwinder(f, pc)
 	file, line := u.fileLine(uf)
 	printFuncName(u.srcFunc(uf).name())
 	print("(...)\n")
@@ -1183,6 +1177,8 @@
 }
 
 func goroutineheader(gp *g) {
+	level, _, _ := gotraceback()
+
 	gpstatus := readgstatus(gp)
 
 	isScan := gpstatus&_Gscan != 0
@@ -1206,7 +1202,16 @@
 	if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
 		waitfor = (nanotime() - gp.waitsince) / 60e9
 	}
-	print("goroutine ", gp.goid, " [", status)
+	print("goroutine ", gp.goid)
+	if gp.m != nil && gp.m.throwing >= throwTypeRuntime && gp == gp.m.curg || level >= 2 {
+		print(" gp=", gp)
+		if gp.m != nil {
+			print(" m=", gp.m.id, " mp=", gp.m)
+		} else {
+			print(" m=nil")
+		}
+	}
+	print(" [", status)
 	if isScan {
 		print(" (scan)")
 	}
@@ -1317,7 +1322,7 @@
 	if !f.valid() {
 		return false
 	}
-	if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_handleAsyncEvent {
+	if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_corostart || f.funcID == abi.FuncID_handleAsyncEvent {
 		return false
 	}
 	if f.funcID == abi.FuncID_runfinq {
diff --git a/src/runtime/traceback_test.go b/src/runtime/traceback_test.go
index 1617612..8cbccac 100644
--- a/src/runtime/traceback_test.go
+++ b/src/runtime/traceback_test.go
@@ -108,7 +108,13 @@
 		recover()
 	}()
 	ttiSigpanic2()
-	panic("did not panic")
+	// without condition below the inliner might decide to de-prioritize
+	// the callsite above (since it would be on an "always leads to panic"
+	// path).
+	if alwaysTrue {
+		panic("did not panic")
+	}
+	return nil
 }
 func ttiSigpanic2() {
 	ttiSigpanic3()
@@ -118,6 +124,8 @@
 	*p = 3
 }
 
+var alwaysTrue = true
+
 //go:noinline
 func ttiWrapper1() *ttiResult {
 	var w ttiWrapper
@@ -411,6 +419,17 @@
 				"testTracebackArgs11b(0xffffffff?, 0xffffffff?, 0x3?, 0x4)",
 				"testTracebackArgs11b(0x1, 0x2, 0x3, 0x4)"),
 		},
+		// Make sure spilled slice data pointers are spilled to the right location
+		// to ensure we see it listed without a ?.
+		// See issue 64414.
+		{
+			func() int {
+				poisonStack()
+				return testTracebackArgsSlice(testTracebackArgsSliceBackingStore[:])
+			},
+			// Note: capacity of the slice might be junk, as it is not used.
+			fmt.Sprintf("testTracebackArgsSlice({%p, 0x2, ", &testTracebackArgsSliceBackingStore[0]),
+		},
 	}
 	for _, test := range tests {
 		n := test.fn()
@@ -442,7 +461,6 @@
 		return b.a + b.b + b.c + b.x[0] + b.x[1] + int(d[0]) + int(d[1]) + int(d[2])
 	}
 	return n
-
 }
 
 //go:noinline
@@ -659,6 +677,19 @@
 	return runtime.Stack(testTracebackArgsBuf[:], false)
 }
 
+// norace to avoid race instrumentation changing spill locations.
+// nosplit to avoid preemption or morestack spilling registers.
+//
+//go:norace
+//go:nosplit
+//go:noinline
+func testTracebackArgsSlice(a []int) int {
+	n := runtime.Stack(testTracebackArgsBuf[:], false)
+	return a[1] + n
+}
+
+var testTracebackArgsSliceBackingStore [2]int
+
 // Poison the arg area with deterministic values.
 //
 //go:noinline
diff --git a/src/runtime/unsafepoint_test.go b/src/runtime/unsafepoint_test.go
new file mode 100644
index 0000000..2c97ade
--- /dev/null
+++ b/src/runtime/unsafepoint_test.go
@@ -0,0 +1,122 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+	"internal/testenv"
+	"os"
+	"os/exec"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+// This is the function we'll be testing.
+// It has a simple write barrier in it.
+func setGlobalPointer() {
+	globalPointer = nil
+}
+
+var globalPointer *int
+
+func TestUnsafePoint(t *testing.T) {
+	testenv.MustHaveExec(t)
+	switch runtime.GOARCH {
+	case "amd64", "arm64":
+	default:
+		t.Skipf("test not enabled for %s", runtime.GOARCH)
+	}
+
+	// Get a reference we can use to ask the runtime about
+	// which of its instructions are unsafe preemption points.
+	f := runtime.FuncForPC(reflect.ValueOf(setGlobalPointer).Pointer())
+
+	// Disassemble the test function.
+	// Note that normally "go test runtime" would strip symbols
+	// and prevent this step from working. So there's a hack in
+	// cmd/go/internal/test that exempts runtime tests from
+	// symbol stripping.
+	cmd := exec.Command(testenv.GoToolPath(t), "tool", "objdump", "-s", "setGlobalPointer", os.Args[0])
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("can't objdump %v", err)
+	}
+	lines := strings.Split(string(out), "\n")[1:]
+
+	// Walk through assembly instructions, checking preemptible flags.
+	var entry uint64
+	var startedWB bool
+	var doneWB bool
+	instructionCount := 0
+	unsafeCount := 0
+	for _, line := range lines {
+		line = strings.TrimSpace(line)
+		t.Logf("%s", line)
+		parts := strings.Fields(line)
+		if len(parts) < 4 {
+			continue
+		}
+		if !strings.HasPrefix(parts[0], "unsafepoint_test.go:") {
+			continue
+		}
+		pc, err := strconv.ParseUint(parts[1][2:], 16, 64)
+		if err != nil {
+			t.Fatalf("can't parse pc %s: %v", parts[1], err)
+		}
+		if entry == 0 {
+			entry = pc
+		}
+		// Note that some platforms do ASLR, so the PCs in the disassembly
+		// don't match PCs in the address space. Only offsets from function
+		// entry make sense.
+		unsafe := runtime.UnsafePoint(f.Entry() + uintptr(pc-entry))
+		t.Logf("unsafe: %v\n", unsafe)
+		instructionCount++
+		if unsafe {
+			unsafeCount++
+		}
+
+		// All the instructions inside the write barrier must be unpreemptible.
+		if startedWB && !doneWB && !unsafe {
+			t.Errorf("instruction %s must be marked unsafe, but isn't", parts[1])
+		}
+
+		// Detect whether we're in the write barrier.
+		switch runtime.GOARCH {
+		case "arm64":
+			if parts[3] == "MOVWU" {
+				// The unpreemptible region starts after the
+				// load of runtime.writeBarrier.
+				startedWB = true
+			}
+			if parts[3] == "MOVD" && parts[4] == "ZR," {
+				// The unpreemptible region ends after the
+				// write of nil.
+				doneWB = true
+			}
+		case "amd64":
+			if parts[3] == "CMPL" {
+				startedWB = true
+			}
+			if parts[3] == "MOVQ" && parts[4] == "$0x0," {
+				doneWB = true
+			}
+		}
+	}
+
+	if instructionCount == 0 {
+		t.Errorf("no instructions")
+	}
+	if unsafeCount == instructionCount {
+		t.Errorf("no interruptible instructions")
+	}
+	// Note that there are other instructions marked unpreemptible besides
+	// just the ones required by the write barrier. Those include possibly
+	// the preamble and postamble, as well as bleeding out from the
+	// write barrier proper into adjacent instructions (in both directions).
+	// Hopefully we can clean up the latter at some point.
+}
diff --git a/src/runtime/vdso_linux_arm64.go b/src/runtime/vdso_linux_arm64.go
index 2f003cd..f595952 100644
--- a/src/runtime/vdso_linux_arm64.go
+++ b/src/runtime/vdso_linux_arm64.go
@@ -14,7 +14,7 @@
 var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6.39", 0x75fcb89}
 
 var vdsoSymbolKeys = []vdsoSymbolKey{
-	{"__kernel_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym},
+	{"__kernel_clock_gettime", 0xb0cd725, 0xdfa941fd, &vdsoClockgettimeSym},
 }
 
 // initialize to fall back to syscall
diff --git a/src/runtime/vdso_linux_s390x.go b/src/runtime/vdso_linux_s390x.go
index c1c0b1b..970ecd3 100644
--- a/src/runtime/vdso_linux_s390x.go
+++ b/src/runtime/vdso_linux_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && s390x
-// +build linux,s390x
 
 package runtime
 
diff --git a/src/runtime/vdso_test.go b/src/runtime/vdso_test.go
new file mode 100644
index 0000000..d025ba5
--- /dev/null
+++ b/src/runtime/vdso_test.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd && (386 || amd64 || arm || arm64 || riscv64)) || (linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x))
+
+package runtime_test
+
+import (
+	"bytes"
+	"internal/testenv"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"syscall"
+	"testing"
+	"time"
+)
+
+// TestUsingVDSO tests that we are actually using the VDSO to fetch
+// the time.
+func TestUsingVDSO(t *testing.T) {
+	const calls = 100
+
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		// Fetch the time a lot.
+		var total int64
+		for i := 0; i < calls; i++ {
+			total += time.Now().UnixNano()
+		}
+		os.Exit(0)
+	}
+
+	t.Parallel()
+
+	// Look for strace in /bin or /usr/bin. Don't assume that some
+	// strace on PATH is the one that we want.
+	strace := "/bin/strace"
+	if _, err := os.Stat(strace); err != nil {
+		strace = "/usr/bin/strace"
+		if _, err := os.Stat(strace); err != nil {
+			t.Skipf("skipping test because strace not found: %v", err)
+		}
+	}
+
+	exe, err := os.Executable()
+	if err != nil {
+		t.Skipf("skipping because Executable failed: %v", err)
+	}
+
+	t.Logf("GO_WANT_HELPER_PROCESS=1 %s -f -e clock_gettime %s -test.run=^TestUsingVDSO$", strace, exe)
+	cmd := testenv.Command(t, strace, "-f", "-e", "clock_gettime", exe, "-test.run=^TestUsingVDSO$")
+	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+	out, err := cmd.CombinedOutput()
+	if len(out) > 0 {
+		t.Logf("%s", out)
+	}
+	if err != nil {
+		if err := err.(*exec.ExitError); err != nil && err.Sys().(syscall.WaitStatus).Signaled() {
+			if !bytes.Contains(out, []byte("+++ killed by")) {
+				// strace itself occasionally crashes.
+				// Here, it exited with a signal, but
+				// the strace log didn't report any
+				// signal from the child process.
+				t.Log(err)
+				testenv.SkipFlaky(t, 63734)
+			}
+		}
+		t.Fatal(err)
+	}
+
+	if got := bytes.Count(out, []byte("gettime")); got >= calls {
+		t.Logf("found %d gettime calls, want < %d", got, calls)
+
+		// Try to double-check that a C program uses the VDSO.
+		tempdir := t.TempDir()
+		cfn := filepath.Join(tempdir, "time.c")
+		cexe := filepath.Join(tempdir, "time")
+		if err := os.WriteFile(cfn, []byte(vdsoCProgram), 0o644); err != nil {
+			t.Fatal(err)
+		}
+		cc := os.Getenv("CC")
+		if cc == "" {
+			cc, err = exec.LookPath("gcc")
+			if err != nil {
+				cc, err = exec.LookPath("clang")
+				if err != nil {
+					t.Skip("can't verify VDSO status, no C compiler")
+				}
+			}
+		}
+
+		t.Logf("%s -o %s %s", cc, cexe, cfn)
+		cmd = testenv.Command(t, cc, "-o", cexe, cfn)
+		cmd = testenv.CleanCmdEnv(cmd)
+		out, err = cmd.CombinedOutput()
+		if len(out) > 0 {
+			t.Logf("%s", out)
+		}
+		if err != nil {
+			t.Skipf("can't verify VDSO status, C compiled failed: %v", err)
+		}
+
+		t.Logf("%s -f -e clock_gettime %s", strace, cexe)
+		cmd = testenv.Command(t, strace, "-f", "-e", "clock_gettime", cexe)
+		cmd = testenv.CleanCmdEnv(cmd)
+		out, err = cmd.CombinedOutput()
+		if len(out) > 0 {
+			t.Logf("%s", out)
+		}
+		if err != nil {
+			t.Skipf("can't verify VDSO status, C program failed: %v", err)
+		}
+
+		if cgot := bytes.Count(out, []byte("gettime")); cgot >= 100 {
+			t.Logf("found %d gettime calls, want < %d", cgot, 100)
+			t.Log("C program does not use VDSO either")
+			return
+		}
+
+		// The Go program used the system call but the C
+		// program did not. This is a VDSO failure for Go.
+		t.Errorf("did not use VDSO system call")
+	}
+}
+
+const vdsoCProgram = `
+#include <stdio.h>
+#include <time.h>
+
+int main() {
+	int i;
+	time_t tot;
+	for (i = 0; i < 100; i++) {
+		struct timespec ts;
+		clock_gettime(CLOCK_MONOTONIC, &ts);
+		tot += ts.tv_nsec;
+	}
+	printf("%d\n", (int)(tot));
+	return 0;
+}
+`
diff --git a/src/slices/example_test.go b/src/slices/example_test.go
index 3e76907..41d8890 100644
--- a/src/slices/example_test.go
+++ b/src/slices/example_test.go
@@ -51,9 +51,7 @@
 
 func ExampleCompactFunc() {
 	names := []string{"bob", "Bob", "alice", "Vera", "VERA"}
-	names = slices.CompactFunc(names, func(a, b string) bool {
-		return strings.ToLower(a) == strings.ToLower(b)
-	})
+	names = slices.CompactFunc(names, strings.EqualFold)
 	fmt.Println(names)
 	// Output:
 	// [bob alice Vera]
diff --git a/src/slices/slices.go b/src/slices/slices.go
index afeed0a..b0f048a 100644
--- a/src/slices/slices.go
+++ b/src/slices/slices.go
@@ -130,6 +130,8 @@
 // Insert panics if i is out of range.
 // This function is O(len(s) + len(v)).
 func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+	_ = s[i:] // bounds check
+
 	m := len(v)
 	if m == 0 {
 		return s
@@ -209,46 +211,48 @@
 }
 
 // Delete removes the elements s[i:j] from s, returning the modified slice.
-// Delete panics if s[i:j] is not a valid slice of s.
-// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-i), so if many items must be deleted, it is better to
 // make a single call deleting them all together than to delete one at a time.
-// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
-// elements contain pointers you might consider zeroing those elements so that
-// objects they reference can be garbage collected.
+// Delete zeroes the elements s[len(s)-(j-i):len(s)].
 func Delete[S ~[]E, E any](s S, i, j int) S {
-	_ = s[i:j] // bounds check
+	_ = s[i:j:len(s)] // bounds check
 
-	return append(s[:i], s[j:]...)
+	if i == j {
+		return s
+	}
+
+	oldlen := len(s)
+	s = append(s[:i], s[j:]...)
+	clear(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
+	return s
 }
 
 // DeleteFunc removes any elements from s for which del returns true,
 // returning the modified slice.
-// When DeleteFunc removes m elements, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage
-// collected.
+// DeleteFunc zeroes the elements between the new length and the original length.
 func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+	i := IndexFunc(s, del)
+	if i == -1 {
+		return s
+	}
 	// Don't start copying elements until we find one to delete.
-	for i, v := range s {
-		if del(v) {
-			j := i
-			for i++; i < len(s); i++ {
-				v = s[i]
-				if !del(v) {
-					s[j] = v
-					j++
-				}
-			}
-			return s[:j]
+	for j := i + 1; j < len(s); j++ {
+		if v := s[j]; !del(v) {
+			s[i] = v
+			i++
 		}
 	}
-	return s
+	clear(s[i:]) // zero/nil out the obsolete elements, for GC
+	return s[:i]
 }
 
 // Replace replaces the elements s[i:j] by the given v, and returns the
-// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+// modified slice.
+// Replace panics if j > len(s) or s[i:j] is not a valid slice of s.
+// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
 func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
-	_ = s[i:j] // verify that i:j is a valid subslice
+	_ = s[i:j] // bounds check
 
 	if i == j {
 		return Insert(s, i, v...)
@@ -271,9 +275,8 @@
 	if i+len(v) <= j {
 		// Easy, as v fits in the deleted portion.
 		copy(r[i:], v)
-		if i+len(v) != j {
-			copy(r[i+len(v):], s[j:])
-		}
+		copy(r[i+len(v):], s[j:])
+		clear(s[tot:]) // zero/nil out the obsolete elements, for GC
 		return r
 	}
 
@@ -336,20 +339,15 @@
 // Clone returns a copy of the slice.
 // The elements are copied using assignment, so this is a shallow clone.
 func Clone[S ~[]E, E any](s S) S {
-	// Preserve nil in case it matters.
-	if s == nil {
-		return nil
-	}
-	return append(S([]E{}), s...)
+	// The s[:0:0] preserves nil in case it matters.
+	return append(s[:0:0], s...)
 }
 
 // Compact replaces consecutive runs of equal elements with a single copy.
 // This is like the uniq command found on Unix.
 // Compact modifies the contents of the slice s and returns the modified slice,
 // which may have a smaller length.
-// When Compact discards m elements in total, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage collected.
+// Compact zeroes the elements between the new length and the original length.
 func Compact[S ~[]E, E comparable](s S) S {
 	if len(s) < 2 {
 		return s
@@ -363,11 +361,13 @@
 			i++
 		}
 	}
+	clear(s[i:]) // zero/nil out the obsolete elements, for GC
 	return s[:i]
 }
 
 // CompactFunc is like [Compact] but uses an equality function to compare elements.
 // For runs of elements that compare equal, CompactFunc keeps the first one.
+// CompactFunc zeroes the elements between the new length and the original length.
 func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
 	if len(s) < 2 {
 		return s
@@ -381,6 +381,7 @@
 			i++
 		}
 	}
+	clear(s[i:]) // zero/nil out the obsolete elements, for GC
 	return s[:i]
 }
 
@@ -496,3 +497,19 @@
 		s[i], s[j] = s[j], s[i]
 	}
 }
+
+// Concat returns a new slice concatenating the passed in slices.
+func Concat[S ~[]E, E any](slices ...S) S {
+	size := 0
+	for _, s := range slices {
+		size += len(s)
+		if size < 0 {
+			panic("len out of range")
+		}
+	}
+	newslice := Grow[S](nil, size)
+	for _, s := range slices {
+		newslice = append(newslice, s...)
+	}
+	return newslice
+}
diff --git a/src/slices/slices_test.go b/src/slices/slices_test.go
index e6da3b0..31d59ab 100644
--- a/src/slices/slices_test.go
+++ b/src/slices/slices_test.go
@@ -2,13 +2,14 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package slices
+package slices_test
 
 import (
 	"cmp"
 	"internal/race"
 	"internal/testenv"
 	"math"
+	. "slices"
 	"strings"
 	"testing"
 )
@@ -535,6 +536,33 @@
 	}
 }
 
+func TestInsertPanics(t *testing.T) {
+	a := [3]int{}
+	b := [1]int{}
+	for _, test := range []struct {
+		name string
+		s    []int
+		i    int
+		v    []int
+	}{
+		// There are no values.
+		{"with negative index", a[:1:1], -1, nil},
+		{"with out-of-bounds index and > cap", a[:1:1], 2, nil},
+		{"with out-of-bounds index and = cap", a[:1:2], 2, nil},
+		{"with out-of-bounds index and < cap", a[:1:3], 2, nil},
+
+		// There are values.
+		{"with negative index", a[:1:1], -1, b[:]},
+		{"with out-of-bounds index and > cap", a[:1:1], 2, b[:]},
+		{"with out-of-bounds index and = cap", a[:1:2], 2, b[:]},
+		{"with out-of-bounds index and < cap", a[:1:3], 2, b[:]},
+	} {
+		if !panics(func() { _ = Insert(test.s, test.i, test.v...) }) {
+			t.Errorf("Insert %s: got no panic, want panic", test.name)
+		}
+	}
+}
+
 var deleteTests = []struct {
 	s    []int
 	i, j int
@@ -638,6 +666,10 @@
 }
 
 func TestDeletePanics(t *testing.T) {
+	s := []int{0, 1, 2, 3, 4}
+	s = s[0:2]
+	_ = s[0:4] // this is a valid slice of s
+
 	for _, test := range []struct {
 		name string
 		s    []int
@@ -647,14 +679,50 @@
 		{"with negative second index", []int{42}, 1, -1},
 		{"with out-of-bounds first index", []int{42}, 2, 3},
 		{"with out-of-bounds second index", []int{42}, 0, 2},
+		{"with out-of-bounds both indexes", []int{42}, 2, 2},
 		{"with invalid i>j", []int{42}, 1, 0},
+		{"s[i:j] is valid and j > len(s)", s, 0, 4},
+		{"s[i:j] is valid and i == j > len(s)", s, 3, 3},
 	} {
-		if !panics(func() { Delete(test.s, test.i, test.j) }) {
+		if !panics(func() { _ = Delete(test.s, test.i, test.j) }) {
 			t.Errorf("Delete %s: got no panic, want panic", test.name)
 		}
 	}
 }
 
+func TestDeleteClearTail(t *testing.T) {
+	mem := []*int{new(int), new(int), new(int), new(int), new(int), new(int)}
+	s := mem[0:5] // there is 1 element beyond len(s), within cap(s)
+
+	s = Delete(s, 2, 4)
+
+	if mem[3] != nil || mem[4] != nil {
+		// Check that potential memory leak is avoided
+		t.Errorf("Delete: want nil discarded elements, got %v, %v", mem[3], mem[4])
+	}
+	if mem[5] == nil {
+		t.Errorf("Delete: want unchanged elements beyond original len, got nil")
+	}
+}
+
+func TestDeleteFuncClearTail(t *testing.T) {
+	mem := []*int{new(int), new(int), new(int), new(int), new(int), new(int)}
+	*mem[2], *mem[3] = 42, 42
+	s := mem[0:5] // there is 1 element beyond len(s), within cap(s)
+
+	s = DeleteFunc(s, func(i *int) bool {
+		return i != nil && *i == 42
+	})
+
+	if mem[3] != nil || mem[4] != nil {
+		// Check that potential memory leak is avoided
+		t.Errorf("DeleteFunc: want nil discarded elements, got %v, %v", mem[3], mem[4])
+	}
+	if mem[5] == nil {
+		t.Errorf("DeleteFunc: want unchanged elements beyond original len, got nil")
+	}
+}
+
 func TestClone(t *testing.T) {
 	s1 := []int{1, 2, 3}
 	s2 := Clone(s1)
@@ -758,6 +826,53 @@
 	}
 }
 
+func TestCompactClearTail(t *testing.T) {
+	one, two, three, four := 1, 2, 3, 4
+	mem := []*int{&one, &one, &two, &two, &three, &four}
+	s := mem[0:5] // there is 1 element beyond len(s), within cap(s)
+	copy := Clone(s)
+
+	s = Compact(s)
+
+	if want := []*int{&one, &two, &three}; !Equal(s, want) {
+		t.Errorf("Compact(%v) = %v, want %v", copy, s, want)
+	}
+
+	if mem[3] != nil || mem[4] != nil {
+		// Check that potential memory leak is avoided
+		t.Errorf("Compact: want nil discarded elements, got %v, %v", mem[3], mem[4])
+	}
+	if mem[5] != &four {
+		t.Errorf("Compact: want unchanged element beyond original len, got %v", mem[5])
+	}
+}
+
+func TestCompactFuncClearTail(t *testing.T) {
+	a, b, c, d, e, f := 1, 1, 2, 2, 3, 4
+	mem := []*int{&a, &b, &c, &d, &e, &f}
+	s := mem[0:5] // there is 1 element beyond len(s), within cap(s)
+	copy := Clone(s)
+
+	s = CompactFunc(s, func(x, y *int) bool {
+		if x == nil || y == nil {
+			return x == y
+		}
+		return *x == *y
+	})
+
+	if want := []*int{&a, &c, &e}; !Equal(s, want) {
+		t.Errorf("CompactFunc(%v) = %v, want %v", copy, s, want)
+	}
+
+	if mem[3] != nil || mem[4] != nil {
+		// Check that potential memory leak is avoided
+		t.Errorf("CompactFunc: want nil discarded elements, got %v, %v", mem[3], mem[4])
+	}
+	if mem[5] != &f {
+		t.Errorf("CompactFunc: want unchanged elements beyond original len, got %v", mem[5])
+	}
+}
+
 func BenchmarkCompactFunc_Large(b *testing.B) {
 	type Large [4 * 1024]byte
 
@@ -791,10 +906,10 @@
 	}
 
 	// Test number of allocations.
-	if n := testing.AllocsPerRun(100, func() { Grow(s2, cap(s2)-len(s2)) }); n != 0 {
+	if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)) }); n != 0 {
 		t.Errorf("Grow should not allocate when given sufficient capacity; allocated %v times", n)
 	}
-	if n := testing.AllocsPerRun(100, func() { Grow(s2, cap(s2)-len(s2)+1) }); n != 1 {
+	if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)+1) }); n != 1 {
 		errorf := t.Errorf
 		if race.Enabled || testenv.OptimizationOff() {
 			errorf = t.Logf // this allocates multiple times in race detector mode
@@ -806,7 +921,7 @@
 	var gotPanic bool
 	func() {
 		defer func() { gotPanic = recover() != nil }()
-		Grow(s1, -1)
+		_ = Grow(s1, -1)
 	}()
 	if !gotPanic {
 		t.Errorf("Grow(-1) did not panic; expected a panic")
@@ -907,6 +1022,10 @@
 }
 
 func TestReplacePanics(t *testing.T) {
+	s := []int{0, 1, 2, 3, 4}
+	s = s[0:2]
+	_ = s[0:4] // this is a valid slice of s
+
 	for _, test := range []struct {
 		name string
 		s, v []int
@@ -915,14 +1034,65 @@
 		{"indexes out of order", []int{1, 2}, []int{3}, 2, 1},
 		{"large index", []int{1, 2}, []int{3}, 1, 10},
 		{"negative index", []int{1, 2}, []int{3}, -1, 2},
+		{"s[i:j] is valid and j > len(s)", s, nil, 0, 4},
 	} {
 		ss, vv := Clone(test.s), Clone(test.v)
-		if !panics(func() { Replace(ss, test.i, test.j, vv...) }) {
+		if !panics(func() { _ = Replace(ss, test.i, test.j, vv...) }) {
 			t.Errorf("Replace %s: should have panicked", test.name)
 		}
 	}
 }
 
+func TestReplaceGrow(t *testing.T) {
+	// When Replace needs to allocate a new slice, we want the original slice
+	// to not be changed.
+	a, b, c, d, e, f := 1, 2, 3, 4, 5, 6
+	mem := []*int{&a, &b, &c, &d, &e, &f}
+	memcopy := Clone(mem)
+	s := mem[0:5] // there is 1 element beyond len(s), within cap(s)
+	copy := Clone(s)
+	original := s
+
+	// The new elements don't fit within cap(s), so Replace will allocate.
+	z := 99
+	s = Replace(s, 1, 3, &z, &z, &z, &z)
+
+	if want := []*int{&a, &z, &z, &z, &z, &d, &e}; !Equal(s, want) {
+		t.Errorf("Replace(%v, 1, 3, %v, %v, %v, %v) = %v, want %v", copy, &z, &z, &z, &z, s, want)
+	}
+
+	if !Equal(original, copy) {
+		t.Errorf("original slice has changed, got %v, want %v", original, copy)
+	}
+
+	if !Equal(mem, memcopy) {
+		// Changing the original tail s[len(s):cap(s)] is unwanted
+		t.Errorf("original backing memory has changed, got %v, want %v", mem, memcopy)
+	}
+}
+
+func TestReplaceClearTail(t *testing.T) {
+	a, b, c, d, e, f := 1, 2, 3, 4, 5, 6
+	mem := []*int{&a, &b, &c, &d, &e, &f}
+	s := mem[0:5] // there is 1 element beyond len(s), within cap(s)
+	copy := Clone(s)
+
+	y, z := 8, 9
+	s = Replace(s, 1, 4, &y, &z)
+
+	if want := []*int{&a, &y, &z, &e}; !Equal(s, want) {
+		t.Errorf("Replace(%v) = %v, want %v", copy, s, want)
+	}
+
+	if mem[4] != nil {
+		// Check that potential memory leak is avoided
+		t.Errorf("Replace: want nil discarded element, got %v", mem[4])
+	}
+	if mem[5] != &f {
+		t.Errorf("Replace: want unchanged elements beyond original len, got %v", mem[5])
+	}
+}
+
 func TestReplaceOverlap(t *testing.T) {
 	const N = 10
 	a := make([]int, N)
@@ -999,25 +1169,6 @@
 
 }
 
-func TestRotate(t *testing.T) {
-	const N = 10
-	s := make([]int, 0, N)
-	for n := 0; n < N; n++ {
-		for r := 0; r < n; r++ {
-			s = s[:0]
-			for i := 0; i < n; i++ {
-				s = append(s, i)
-			}
-			rotateLeft(s, r)
-			for i := 0; i < n; i++ {
-				if s[i] != (i+r)%n {
-					t.Errorf("expected n=%d r=%d i:%d want:%d got:%d", n, r, i, (i+r)%n, s[i])
-				}
-			}
-		}
-	}
-}
-
 func TestInsertGrowthRate(t *testing.T) {
 	b := make([]byte, 1)
 	maxCap := cap(b)
@@ -1073,3 +1224,101 @@
 		t.Errorf("Reverse(%v) = %v, want %v", S{4, 5, 6}, s2, want)
 	}
 }
+
+func TestConcat(t *testing.T) {
+	cases := []struct {
+		s    [][]int
+		want []int
+	}{
+		{
+			s:    [][]int{nil},
+			want: nil,
+		},
+		{
+			s:    [][]int{{1}},
+			want: []int{1},
+		},
+		{
+			s:    [][]int{{1}, {2}},
+			want: []int{1, 2},
+		},
+		{
+			s:    [][]int{{1}, nil, {2}},
+			want: []int{1, 2},
+		},
+	}
+	for _, tc := range cases {
+		got := Concat(tc.s...)
+		if !Equal(tc.want, got) {
+			t.Errorf("Concat(%v) = %v, want %v", tc.s, got, tc.want)
+		}
+		var sink []int
+		allocs := testing.AllocsPerRun(5, func() {
+			sink = Concat(tc.s...)
+		})
+		_ = sink
+		if allocs > 1 {
+			errorf := t.Errorf
+			if testenv.OptimizationOff() || race.Enabled {
+				errorf = t.Logf
+			}
+			errorf("Concat(%v) allocated %v times; want 1", tc.s, allocs)
+		}
+	}
+}
+
+func TestConcat_too_large(t *testing.T) {
+	// Use zero length element to minimize memory in testing
+	type void struct{}
+	cases := []struct {
+		lengths     []int
+		shouldPanic bool
+	}{
+		{
+			lengths:     []int{0, 0},
+			shouldPanic: false,
+		},
+		{
+			lengths:     []int{math.MaxInt, 0},
+			shouldPanic: false,
+		},
+		{
+			lengths:     []int{0, math.MaxInt},
+			shouldPanic: false,
+		},
+		{
+			lengths:     []int{math.MaxInt - 1, 1},
+			shouldPanic: false,
+		},
+		{
+			lengths:     []int{math.MaxInt - 1, 1, 1},
+			shouldPanic: true,
+		},
+		{
+			lengths:     []int{math.MaxInt, 1},
+			shouldPanic: true,
+		},
+		{
+			lengths:     []int{math.MaxInt, math.MaxInt},
+			shouldPanic: true,
+		},
+	}
+	for _, tc := range cases {
+		var r any
+		ss := make([][]void, 0, len(tc.lengths))
+		for _, l := range tc.lengths {
+			s := make([]void, l)
+			ss = append(ss, s)
+		}
+		func() {
+			defer func() {
+				r = recover()
+			}()
+			_ = Concat(ss...)
+		}()
+		if didPanic := r != nil; didPanic != tc.shouldPanic {
+			t.Errorf("slices.Concat(lens(%v)) got panic == %v",
+				tc.lengths, didPanic)
+		}
+	}
+}
diff --git a/src/slices/sort.go b/src/slices/sort.go
index 822f2fc..d5e998c 100644
--- a/src/slices/sort.go
+++ b/src/slices/sort.go
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -generic
+
 package slices
 
 import (
diff --git a/src/slices/sort_benchmark_test.go b/src/slices/sort_benchmark_test.go
index 0f08842..d73a318 100644
--- a/src/slices/sort_benchmark_test.go
+++ b/src/slices/sort_benchmark_test.go
@@ -2,252 +2,14 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package slices
+package slices_test
 
 import (
 	"fmt"
-	"math/rand"
-	"sort"
-	"strconv"
-	"strings"
+	"slices"
 	"testing"
 )
 
-// These benchmarks compare sorting a large slice of int with sort.Ints vs.
-// slices.Sort
-func makeRandomInts(n int) []int {
-	rand.Seed(42)
-	ints := make([]int, n)
-	for i := 0; i < n; i++ {
-		ints[i] = rand.Intn(n)
-	}
-	return ints
-}
-
-func makeSortedInts(n int) []int {
-	ints := make([]int, n)
-	for i := 0; i < n; i++ {
-		ints[i] = i
-	}
-	return ints
-}
-
-func makeReversedInts(n int) []int {
-	ints := make([]int, n)
-	for i := 0; i < n; i++ {
-		ints[i] = n - i
-	}
-	return ints
-}
-
-const N = 100_000
-
-func BenchmarkSortInts(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ints := makeRandomInts(N)
-		b.StartTimer()
-		sort.Ints(ints)
-	}
-}
-
-func makeSortedStrings(n int) []string {
-	x := make([]string, n)
-	for i := 0; i < n; i++ {
-		x[i] = strconv.Itoa(i)
-	}
-	Sort(x)
-	return x
-}
-
-func BenchmarkSlicesSortInts(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ints := makeRandomInts(N)
-		b.StartTimer()
-		Sort(ints)
-	}
-}
-
-func BenchmarkSlicesSortInts_Sorted(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ints := makeSortedInts(N)
-		b.StartTimer()
-		Sort(ints)
-	}
-}
-
-func BenchmarkSlicesSortInts_Reversed(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ints := makeReversedInts(N)
-		b.StartTimer()
-		Sort(ints)
-	}
-}
-
-func BenchmarkIntsAreSorted(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ints := makeSortedInts(N)
-		b.StartTimer()
-		sort.IntsAreSorted(ints)
-	}
-}
-
-func BenchmarkIsSorted(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ints := makeSortedInts(N)
-		b.StartTimer()
-		IsSorted(ints)
-	}
-}
-
-// Since we're benchmarking these sorts against each other, make sure that they
-// generate similar results.
-func TestIntSorts(t *testing.T) {
-	ints := makeRandomInts(200)
-	ints2 := Clone(ints)
-
-	sort.Ints(ints)
-	Sort(ints2)
-
-	for i := range ints {
-		if ints[i] != ints2[i] {
-			t.Fatalf("ints2 mismatch at %d; %d != %d", i, ints[i], ints2[i])
-		}
-	}
-}
-
-// The following is a benchmark for sorting strings.
-
-// makeRandomStrings generates n random strings with alphabetic runes of
-// varying lengths.
-func makeRandomStrings(n int) []string {
-	rand.Seed(42)
-	var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
-	ss := make([]string, n)
-	for i := 0; i < n; i++ {
-		var sb strings.Builder
-		slen := 2 + rand.Intn(50)
-		for j := 0; j < slen; j++ {
-			sb.WriteRune(letters[rand.Intn(len(letters))])
-		}
-		ss[i] = sb.String()
-	}
-	return ss
-}
-
-func TestStringSorts(t *testing.T) {
-	ss := makeRandomStrings(200)
-	ss2 := Clone(ss)
-
-	sort.Strings(ss)
-	Sort(ss2)
-
-	for i := range ss {
-		if ss[i] != ss2[i] {
-			t.Fatalf("ss2 mismatch at %d; %s != %s", i, ss[i], ss2[i])
-		}
-	}
-}
-
-func BenchmarkSortStrings(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ss := makeRandomStrings(N)
-		b.StartTimer()
-		sort.Strings(ss)
-	}
-}
-
-func BenchmarkSortStrings_Sorted(b *testing.B) {
-	ss := makeSortedStrings(N)
-	b.ResetTimer()
-
-	for i := 0; i < b.N; i++ {
-		sort.Strings(ss)
-	}
-}
-
-func BenchmarkSlicesSortStrings(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ss := makeRandomStrings(N)
-		b.StartTimer()
-		Sort(ss)
-	}
-}
-
-func BenchmarkSlicesSortStrings_Sorted(b *testing.B) {
-	ss := makeSortedStrings(N)
-	b.ResetTimer()
-
-	for i := 0; i < b.N; i++ {
-		Sort(ss)
-	}
-}
-
-// These benchmarks compare sorting a slice of structs with sort.Sort vs.
-// slices.SortFunc.
-type myStruct struct {
-	a, b, c, d string
-	n          int
-}
-
-type myStructs []*myStruct
-
-func (s myStructs) Len() int           { return len(s) }
-func (s myStructs) Less(i, j int) bool { return s[i].n < s[j].n }
-func (s myStructs) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-func makeRandomStructs(n int) myStructs {
-	rand.Seed(42)
-	structs := make([]*myStruct, n)
-	for i := 0; i < n; i++ {
-		structs[i] = &myStruct{n: rand.Intn(n)}
-	}
-	return structs
-}
-
-func TestStructSorts(t *testing.T) {
-	ss := makeRandomStructs(200)
-	ss2 := make([]*myStruct, len(ss))
-	for i := range ss {
-		ss2[i] = &myStruct{n: ss[i].n}
-	}
-
-	sort.Sort(ss)
-	SortFunc(ss2, func(a, b *myStruct) int { return a.n - b.n })
-
-	for i := range ss {
-		if *ss[i] != *ss2[i] {
-			t.Fatalf("ints2 mismatch at %d; %v != %v", i, *ss[i], *ss2[i])
-		}
-	}
-}
-
-func BenchmarkSortStructs(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ss := makeRandomStructs(N)
-		b.StartTimer()
-		sort.Sort(ss)
-	}
-}
-
-func BenchmarkSortFuncStructs(b *testing.B) {
-	cmpFunc := func(a, b *myStruct) int { return a.n - b.n }
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		ss := makeRandomStructs(N)
-		b.StartTimer()
-		SortFunc(ss, cmpFunc)
-	}
-}
-
 func BenchmarkBinarySearchFloats(b *testing.B) {
 	for _, size := range []int{16, 32, 64, 128, 512, 1024} {
 		b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) {
@@ -259,12 +21,17 @@
 			needle := (floats[midpoint] + floats[midpoint+1]) / 2
 			b.ResetTimer()
 			for i := 0; i < b.N; i++ {
-				BinarySearch(floats, needle)
+				slices.BinarySearch(floats, needle)
 			}
 		})
 	}
 }
 
+type myStruct struct {
+	a, b, c, d string
+	n          int
+}
+
 func BenchmarkBinarySearchFuncStruct(b *testing.B) {
 	for _, size := range []int{16, 32, 64, 128, 512, 1024} {
 		b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) {
@@ -277,7 +44,7 @@
 			lessFunc := func(a, b *myStruct) int { return a.n - b.n }
 			b.ResetTimer()
 			for i := 0; i < b.N; i++ {
-				BinarySearchFunc(structs, needle, lessFunc)
+				slices.BinarySearchFunc(structs, needle, lessFunc)
 			}
 		})
 	}
diff --git a/src/slices/sort_test.go b/src/slices/sort_test.go
index af05859..7aaf954 100644
--- a/src/slices/sort_test.go
+++ b/src/slices/sort_test.go
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package slices
+package slices_test
 
 import (
 	"cmp"
 	"fmt"
 	"math"
 	"math/rand"
-	"sort"
+	. "slices"
 	"strconv"
 	"strings"
 	"testing"
@@ -17,7 +17,6 @@
 
 var ints = [...]int{74, 59, 238, -784, 9845, 959, 905, 0, 0, 42, 7586, -5467984, 7586}
 var float64s = [...]float64{74.3, 59.0, math.Inf(1), 238.2, -784.0, 2.3, math.Inf(-1), 9845.768, -959.7485, 905, 7.8, 7.8, 74.3, 59.0, math.Inf(1), 238.2, -784.0, 2.3}
-var float64sWithNaNs = [...]float64{74.3, 59.0, math.Inf(1), 238.2, -784.0, 2.3, math.NaN(), math.NaN(), math.Inf(-1), 9845.768, -959.7485, 905, 7.8, 7.8}
 var strs = [...]string{"", "Hello", "foo", "bar", "foo", "f00", "%*&^*&^&", "***"}
 
 func TestSortIntSlice(t *testing.T) {
@@ -47,23 +46,6 @@
 	}
 }
 
-func TestSortFloat64SliceWithNaNs(t *testing.T) {
-	data := float64sWithNaNs[:]
-	data2 := Clone(data)
-
-	Sort(data)
-	sort.Float64s(data2)
-
-	if !IsSorted(data) {
-		t.Error("IsSorted indicates data isn't sorted")
-	}
-
-	// Compare for equality using cmp.Compare, which considers NaNs equal.
-	if !EqualFunc(data, data2, func(a, b float64) bool { return cmp.Compare(a, b) == 0 }) {
-		t.Errorf("mismatch between Sort and sort.Float64: got %v, want %v", data, data2)
-	}
-}
-
 func TestSortStringSlice(t *testing.T) {
 	data := Clone(strs[:])
 	Sort(data)
diff --git a/src/sort/gen_sort_variants.go b/src/sort/gen_sort_variants.go
index 2c12b98..95fca70 100644
--- a/src/sort/gen_sort_variants.go
+++ b/src/sort/gen_sort_variants.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 // This program is run via "go generate" (via a directive in sort.go)
 // to generate implementation variants of the underlying sorting algorithm.
@@ -68,51 +67,9 @@
 	Funcs template.FuncMap
 }
 
-func main() {
-	genGeneric := flag.Bool("generic", false, "generate generic versions")
-	flag.Parse()
-
-	if *genGeneric {
-		generate(&Variant{
-			Name:       "generic_ordered",
-			Path:       "zsortordered.go",
-			Package:    "slices",
-			Imports:    "import \"cmp\"\n",
-			FuncSuffix: "Ordered",
-			TypeParam:  "[E cmp.Ordered]",
-			ExtraParam: "",
-			ExtraArg:   "",
-			DataType:   "[]E",
-			Funcs: template.FuncMap{
-				"Less": func(name, i, j string) string {
-					return fmt.Sprintf("cmp.Less(%s[%s], %s[%s])", name, i, name, j)
-				},
-				"Swap": func(name, i, j string) string {
-					return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
-				},
-			},
-		})
-
-		generate(&Variant{
-			Name:       "generic_func",
-			Path:       "zsortanyfunc.go",
-			Package:    "slices",
-			FuncSuffix: "CmpFunc",
-			TypeParam:  "[E any]",
-			ExtraParam: ", cmp func(a, b E) int",
-			ExtraArg:   ", cmp",
-			DataType:   "[]E",
-			Funcs: template.FuncMap{
-				"Less": func(name, i, j string) string {
-					return fmt.Sprintf("(cmp(%s[%s], %s[%s]) < 0)", name, i, name, j)
-				},
-				"Swap": func(name, i, j string) string {
-					return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
-				},
-			},
-		})
-	} else {
-		generate(&Variant{
+var (
+	traditionalVariants = []Variant{
+		Variant{
 			Name:       "interface",
 			Path:       "zsortinterface.go",
 			Package:    "sort",
@@ -130,9 +87,8 @@
 					return fmt.Sprintf("%s.Swap(%s, %s)", name, i, j)
 				},
 			},
-		})
-
-		generate(&Variant{
+		},
+		Variant{
 			Name:       "func",
 			Path:       "zsortfunc.go",
 			Package:    "sort",
@@ -150,7 +106,105 @@
 					return fmt.Sprintf("%s.Swap(%s, %s)", name, i, j)
 				},
 			},
-		})
+		},
+	}
+
+	genericVariants = []Variant{
+		Variant{
+			Name:       "generic_ordered",
+			Path:       "zsortordered.go",
+			Package:    "slices",
+			Imports:    "import \"cmp\"\n",
+			FuncSuffix: "Ordered",
+			TypeParam:  "[E cmp.Ordered]",
+			ExtraParam: "",
+			ExtraArg:   "",
+			DataType:   "[]E",
+			Funcs: template.FuncMap{
+				"Less": func(name, i, j string) string {
+					return fmt.Sprintf("cmp.Less(%s[%s], %s[%s])", name, i, name, j)
+				},
+				"Swap": func(name, i, j string) string {
+					return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
+				},
+			},
+		},
+		Variant{
+			Name:       "generic_func",
+			Path:       "zsortanyfunc.go",
+			Package:    "slices",
+			FuncSuffix: "CmpFunc",
+			TypeParam:  "[E any]",
+			ExtraParam: ", cmp func(a, b E) int",
+			ExtraArg:   ", cmp",
+			DataType:   "[]E",
+			Funcs: template.FuncMap{
+				"Less": func(name, i, j string) string {
+					return fmt.Sprintf("(cmp(%s[%s], %s[%s]) < 0)", name, i, name, j)
+				},
+				"Swap": func(name, i, j string) string {
+					return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
+				},
+			},
+		},
+	}
+
+	expVariants = []Variant{
+		Variant{
+			Name:       "exp_ordered",
+			Path:       "zsortordered.go",
+			Package:    "slices",
+			Imports:    "import \"golang.org/x/exp/constraints\"\n",
+			FuncSuffix: "Ordered",
+			TypeParam:  "[E constraints.Ordered]",
+			ExtraParam: "",
+			ExtraArg:   "",
+			DataType:   "[]E",
+			Funcs: template.FuncMap{
+				"Less": func(name, i, j string) string {
+					return fmt.Sprintf("cmpLess(%s[%s], %s[%s])", name, i, name, j)
+				},
+				"Swap": func(name, i, j string) string {
+					return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
+				},
+			},
+		},
+		Variant{
+			Name:       "exp_func",
+			Path:       "zsortanyfunc.go",
+			Package:    "slices",
+			FuncSuffix: "CmpFunc",
+			TypeParam:  "[E any]",
+			ExtraParam: ", cmp func(a, b E) int",
+			ExtraArg:   ", cmp",
+			DataType:   "[]E",
+			Funcs: template.FuncMap{
+				"Less": func(name, i, j string) string {
+					return fmt.Sprintf("(cmp(%s[%s], %s[%s]) < 0)", name, i, name, j)
+				},
+				"Swap": func(name, i, j string) string {
+					return fmt.Sprintf("%s[%s], %s[%s] = %s[%s], %s[%s]", name, i, name, j, name, j, name, i)
+				},
+			},
+		},
+	}
+)
+
+func main() {
+	genGeneric := flag.Bool("generic", false, "generate generic versions")
+	genExp := flag.Bool("exp", false, "generate x/exp/slices versions")
+	flag.Parse()
+
+	var variants []Variant
+	if *genExp {
+		variants = expVariants
+	} else if *genGeneric {
+		variants = genericVariants
+	} else {
+		variants = traditionalVariants
+	}
+	for i := range variants {
+		generate(&variants[i])
 	}
 }
 
diff --git a/src/sort/search.go b/src/sort/search.go
index 874e408..ccf76db 100644
--- a/src/sort/search.go
+++ b/src/sort/search.go
@@ -117,7 +117,7 @@
 // Convenience wrappers for common cases.
 
 // SearchInts searches for x in a sorted slice of ints and returns the index
-// as specified by Search. The return value is the index to insert x if x is
+// as specified by [Search]. The return value is the index to insert x if x is
 // not present (it could be len(a)).
 // The slice must be sorted in ascending order.
 func SearchInts(a []int, x int) int {
@@ -125,7 +125,7 @@
 }
 
 // SearchFloat64s searches for x in a sorted slice of float64s and returns the index
-// as specified by Search. The return value is the index to insert x if x is not
+// as specified by [Search]. The return value is the index to insert x if x is not
 // present (it could be len(a)).
 // The slice must be sorted in ascending order.
 func SearchFloat64s(a []float64, x float64) int {
@@ -140,11 +140,11 @@
 	return Search(len(a), func(i int) bool { return a[i] >= x })
 }
 
-// Search returns the result of applying SearchInts to the receiver and x.
+// Search returns the result of applying [SearchInts] to the receiver and x.
 func (p IntSlice) Search(x int) int { return SearchInts(p, x) }
 
-// Search returns the result of applying SearchFloat64s to the receiver and x.
+// Search returns the result of applying [SearchFloat64s] to the receiver and x.
 func (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) }
 
-// Search returns the result of applying SearchStrings to the receiver and x.
+// Search returns the result of applying [SearchStrings] to the receiver and x.
 func (p StringSlice) Search(x string) int { return SearchStrings(p, x) }
diff --git a/src/sort/slice.go b/src/sort/slice.go
index d0b2102..bc9dd84 100644
--- a/src/sort/slice.go
+++ b/src/sort/slice.go
@@ -14,10 +14,13 @@
 //
 // The sort is not guaranteed to be stable: equal elements
 // may be reversed from their original order.
-// For a stable sort, use SliceStable.
+// For a stable sort, use [SliceStable].
 //
 // The less function must satisfy the same requirements as
 // the Interface type's Less method.
+//
+// Note: in many situations, the newer [slices.SortFunc] function is more
+// ergonomic and runs faster.
 func Slice(x any, less func(i, j int) bool) {
 	rv := reflectlite.ValueOf(x)
 	swap := reflectlite.Swapper(x)
@@ -32,6 +35,9 @@
 //
 // The less function must satisfy the same requirements as
 // the Interface type's Less method.
+//
+// Note: in many situations, the newer [slices.SortStableFunc] function is more
+// ergonomic and runs faster.
 func SliceStable(x any, less func(i, j int) bool) {
 	rv := reflectlite.ValueOf(x)
 	swap := reflectlite.Swapper(x)
@@ -40,6 +46,9 @@
 
 // SliceIsSorted reports whether the slice x is sorted according to the provided less function.
 // It panics if x is not a slice.
+//
+// Note: in many situations, the newer [slices.IsSortedFunc] function is more
+// ergonomic and runs faster.
 func SliceIsSorted(x any, less func(i, j int) bool) bool {
 	rv := reflectlite.ValueOf(x)
 	n := rv.Len()
diff --git a/src/sort/sort.go b/src/sort/sort.go
index 1760e12..6db161f 100644
--- a/src/sort/sort.go
+++ b/src/sort/sort.go
@@ -40,7 +40,7 @@
 // It makes one call to data.Len to determine n and O(n*log(n)) calls to
 // data.Less and data.Swap. The sort is not guaranteed to be stable.
 //
-// Note: in many situations, the newer slices.SortFunc function is more
+// Note: in many situations, the newer [slices.SortFunc] function is more
 // ergonomic and runs faster.
 func Sort(data Interface) {
 	n := data.Len()
@@ -100,7 +100,7 @@
 
 // IsSorted reports whether data is sorted.
 //
-// Note: in many situations, the newer slices.IsSortedFunc function is more
+// Note: in many situations, the newer [slices.IsSortedFunc] function is more
 // ergonomic and runs faster.
 func IsSorted(data Interface) bool {
 	n := data.Len()
@@ -161,35 +161,35 @@
 
 // Ints sorts a slice of ints in increasing order.
 //
-// Note: consider using the newer slices.Sort function, which runs faster.
-func Ints(x []int) { Sort(IntSlice(x)) }
+// Note: as of Go 1.22, this function simply calls [slices.Sort].
+func Ints(x []int) { intsImpl(x) }
 
 // Float64s sorts a slice of float64s in increasing order.
 // Not-a-number (NaN) values are ordered before other values.
 //
-// Note: consider using the newer slices.Sort function, which runs faster.
-func Float64s(x []float64) { Sort(Float64Slice(x)) }
+// Note: as of Go 1.22, this function simply calls [slices.Sort].
+func Float64s(x []float64) { float64sImpl(x) }
 
 // Strings sorts a slice of strings in increasing order.
 //
-// Note: consider using the newer slices.Sort function, which runs faster.
-func Strings(x []string) { Sort(StringSlice(x)) }
+// Note: as of Go 1.22, this function simply calls [slices.Sort].
+func Strings(x []string) { stringsImpl(x) }
 
 // IntsAreSorted reports whether the slice x is sorted in increasing order.
 //
-// Note: consider using the newer slices.IsSorted function, which runs faster.
-func IntsAreSorted(x []int) bool { return IsSorted(IntSlice(x)) }
+// Note: as of Go 1.22, this function simply calls [slices.IsSorted].
+func IntsAreSorted(x []int) bool { return intsAreSortedImpl(x) }
 
 // Float64sAreSorted reports whether the slice x is sorted in increasing order,
 // with not-a-number (NaN) values before any other values.
 //
-// Note: consider using the newer slices.IsSorted function, which runs faster.
-func Float64sAreSorted(x []float64) bool { return IsSorted(Float64Slice(x)) }
+// Note: as of Go 1.22, this function simply calls [slices.IsSorted].
+func Float64sAreSorted(x []float64) bool { return float64sAreSortedImpl(x) }
 
 // StringsAreSorted reports whether the slice x is sorted in increasing order.
 //
-// Note: consider using the newer slices.IsSorted function, which runs faster.
-func StringsAreSorted(x []string) bool { return IsSorted(StringSlice(x)) }
+// Note: as of Go 1.22, this function simply calls [slices.IsSorted].
+func StringsAreSorted(x []string) bool { return stringsAreSortedImpl(x) }
 
 // Notes on stable sorting:
 // The used algorithms are simple and provable correct on all input and use
diff --git a/src/sort/sort_impl_120.go b/src/sort/sort_impl_120.go
new file mode 100644
index 0000000..5980da6
--- /dev/null
+++ b/src/sort/sort_impl_120.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package sort
+
+func intsImpl(x []int)         { Sort(IntSlice(x)) }
+func float64sImpl(x []float64) { Sort(Float64Slice(x)) }
+func stringsImpl(x []string)   { Sort(StringSlice(x)) }
+
+func intsAreSortedImpl(x []int) bool         { return IsSorted(IntSlice(x)) }
+func float64sAreSortedImpl(x []float64) bool { return IsSorted(Float64Slice(x)) }
+func stringsAreSortedImpl(x []string) bool   { return IsSorted(StringSlice(x)) }
diff --git a/src/sort/sort_impl_go121.go b/src/sort/sort_impl_go121.go
new file mode 100644
index 0000000..0a6a6a6
--- /dev/null
+++ b/src/sort/sort_impl_go121.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+// Starting with Go 1.21, we can leverage the new generic functions from the
+// slices package to implement some `sort` functions faster. However, until
+// the bootstrap compiler uses Go 1.21 or later, we keep a fallback version
+// in sort_impl_120.go that retains the old implementation.
+
+package sort
+
+import "slices"
+
+func intsImpl(x []int)         { slices.Sort(x) }
+func float64sImpl(x []float64) { slices.Sort(x) }
+func stringsImpl(x []string)   { slices.Sort(x) }
+
+func intsAreSortedImpl(x []int) bool         { return slices.IsSorted(x) }
+func float64sAreSortedImpl(x []float64) bool { return slices.IsSorted(x) }
+func stringsAreSortedImpl(x []string) bool   { return slices.IsSorted(x) }
diff --git a/src/sort/sort_slices_benchmark_test.go b/src/sort/sort_slices_benchmark_test.go
new file mode 100644
index 0000000..37f3b1b
--- /dev/null
+++ b/src/sort/sort_slices_benchmark_test.go
@@ -0,0 +1,201 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sort_test
+
+import (
+	"math/rand"
+	"slices"
+	. "sort"
+	"strconv"
+	stringspkg "strings"
+	"testing"
+)
+
+// Benchmarks comparing sorting from the slices package with functions from
+// the sort package (avoiding functions that are just forwarding to the slices
+// package).
+
+func makeRandomInts(n int) []int {
+	rand.Seed(42)
+	ints := make([]int, n)
+	for i := 0; i < n; i++ {
+		ints[i] = rand.Intn(n)
+	}
+	return ints
+}
+
+func makeSortedInts(n int) []int {
+	ints := make([]int, n)
+	for i := 0; i < n; i++ {
+		ints[i] = i
+	}
+	return ints
+}
+
+func makeReversedInts(n int) []int {
+	ints := make([]int, n)
+	for i := 0; i < n; i++ {
+		ints[i] = n - i
+	}
+	return ints
+}
+
+func makeSortedStrings(n int) []string {
+	x := make([]string, n)
+	for i := 0; i < n; i++ {
+		x[i] = strconv.Itoa(i)
+	}
+	Strings(x)
+	return x
+}
+
+const N = 100_000
+
+func BenchmarkSortInts(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ints := makeRandomInts(N)
+		b.StartTimer()
+		Sort(IntSlice(ints))
+	}
+}
+
+func BenchmarkSlicesSortInts(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ints := makeRandomInts(N)
+		b.StartTimer()
+		slices.Sort(ints)
+	}
+}
+
+func BenchmarkSortIsSorted(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ints := makeSortedInts(N)
+		b.StartTimer()
+		IsSorted(IntSlice(ints))
+	}
+}
+
+func BenchmarkSlicesIsSorted(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ints := makeSortedInts(N)
+		b.StartTimer()
+		slices.IsSorted(ints)
+	}
+}
+
+// makeRandomStrings generates n random strings with alphabetic runes of
+// varying lengths.
+func makeRandomStrings(n int) []string {
+	rand.Seed(42)
+	var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+	ss := make([]string, n)
+	for i := 0; i < n; i++ {
+		var sb stringspkg.Builder
+		slen := 2 + rand.Intn(50)
+		for j := 0; j < slen; j++ {
+			sb.WriteRune(letters[rand.Intn(len(letters))])
+		}
+		ss[i] = sb.String()
+	}
+	return ss
+}
+
+func BenchmarkSortStrings(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ss := makeRandomStrings(N)
+		b.StartTimer()
+		Sort(StringSlice(ss))
+	}
+}
+
+func BenchmarkSlicesSortStrings(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ss := makeRandomStrings(N)
+		b.StartTimer()
+		slices.Sort(ss)
+	}
+}
+
+func BenchmarkSortStrings_Sorted(b *testing.B) {
+	ss := makeSortedStrings(N)
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		Sort(StringSlice(ss))
+	}
+}
+
+func BenchmarkSlicesSortStrings_Sorted(b *testing.B) {
+	ss := makeSortedStrings(N)
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		slices.Sort(ss)
+	}
+}
+
+// These benchmarks compare sorting a slice of structs with sort.Sort vs.
+// slices.SortFunc.
+type myStruct struct {
+	a, b, c, d string
+	n          int
+}
+
+type myStructs []*myStruct
+
+func (s myStructs) Len() int           { return len(s) }
+func (s myStructs) Less(i, j int) bool { return s[i].n < s[j].n }
+func (s myStructs) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+func makeRandomStructs(n int) myStructs {
+	rand.Seed(42)
+	structs := make([]*myStruct, n)
+	for i := 0; i < n; i++ {
+		structs[i] = &myStruct{n: rand.Intn(n)}
+	}
+	return structs
+}
+
+func TestStructSorts(t *testing.T) {
+	ss := makeRandomStructs(200)
+	ss2 := make([]*myStruct, len(ss))
+	for i := range ss {
+		ss2[i] = &myStruct{n: ss[i].n}
+	}
+
+	Sort(ss)
+	slices.SortFunc(ss2, func(a, b *myStruct) int { return a.n - b.n })
+
+	for i := range ss {
+		if *ss[i] != *ss2[i] {
+			t.Fatalf("ints2 mismatch at %d; %v != %v", i, *ss[i], *ss2[i])
+		}
+	}
+}
+
+func BenchmarkSortStructs(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ss := makeRandomStructs(N)
+		b.StartTimer()
+		Sort(ss)
+	}
+}
+
+func BenchmarkSortFuncStructs(b *testing.B) {
+	cmpFunc := func(a, b *myStruct) int { return a.n - b.n }
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		ss := makeRandomStructs(N)
+		b.StartTimer()
+		slices.SortFunc(ss, cmpFunc)
+	}
+}
diff --git a/src/sort/sort_test.go b/src/sort/sort_test.go
index 862bba2..ccb8987 100644
--- a/src/sort/sort_test.go
+++ b/src/sort/sort_test.go
@@ -5,10 +5,12 @@
 package sort_test
 
 import (
+	"cmp"
 	"fmt"
 	"internal/testenv"
 	"math"
 	"math/rand"
+	"slices"
 	. "sort"
 	"strconv"
 	stringspkg "strings"
@@ -39,6 +41,20 @@
 	}
 }
 
+// Compare Sort with slices.Sort sorting a float64 slice containing NaNs.
+func TestSortFloat64sCompareSlicesSort(t *testing.T) {
+	slice1 := slices.Clone(float64s[:])
+	slice2 := slices.Clone(float64s[:])
+
+	Sort(Float64Slice(slice1))
+	slices.Sort(slice2)
+
+	// Compare for equality using cmp.Compare, which considers NaNs equal.
+	if !slices.EqualFunc(slice1, slice1, func(a, b float64) bool { return cmp.Compare(a, b) == 0 }) {
+		t.Errorf("mismatch between Sort and slices.Sort: got %v, want %v", slice1, slice2)
+	}
+}
+
 func TestSortStringSlice(t *testing.T) {
 	data := strings
 	a := StringSlice(data[0:])
@@ -399,13 +415,6 @@
 	d.data[i], d.data[j] = d.data[j], d.data[i]
 }
 
-func min(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
 func lg(n int) int {
 	i := 0
 	for 1<<uint(i) < n {
diff --git a/src/strconv/atob_test.go b/src/strconv/atob_test.go
index 28f469f..40d43a9 100644
--- a/src/strconv/atob_test.go
+++ b/src/strconv/atob_test.go
@@ -39,19 +39,19 @@
 		if test.err != nil {
 			// expect an error
 			if e == nil {
-				t.Errorf("%s: expected %s but got nil", test.in, test.err)
+				t.Errorf("ParseBool(%s) = nil; want %s", test.in, test.err)
 			} else {
 				// NumError assertion must succeed; it's the only thing we return.
-				if test.err != e.(*NumError).Err {
-					t.Errorf("%s: expected %s but got %s", test.in, test.err, e)
+				if e.(*NumError).Err != test.err {
+					t.Errorf("ParseBool(%s) = %s; want %s", test.in, e, test.err)
 				}
 			}
 		} else {
 			if e != nil {
-				t.Errorf("%s: expected no error but got %s", test.in, e)
+				t.Errorf("ParseBool(%s) = %s; want nil", test.in, e)
 			}
 			if b != test.out {
-				t.Errorf("%s: expected %t but got %t", test.in, test.out, b)
+				t.Errorf("ParseBool(%s) = %t; want %t", test.in, b, test.out)
 			}
 		}
 	}
@@ -65,7 +65,7 @@
 func TestFormatBool(t *testing.T) {
 	for b, s := range boolString {
 		if f := FormatBool(b); f != s {
-			t.Errorf(`FormatBool(%v): expected %q but got %q`, b, s, f)
+			t.Errorf("FormatBool(%v) = %q; want %q", b, f, s)
 		}
 	}
 }
@@ -85,7 +85,7 @@
 	for _, test := range appendBoolTests {
 		b := AppendBool(test.in, test.b)
 		if !bytes.Equal(b, test.out) {
-			t.Errorf("AppendBool(%q, %v): expected %q but got %q", test.in, test.b, test.out, b)
+			t.Errorf("AppendBool(%q, %v) = %q; want %q", test.in, test.b, b, test.out)
 		}
 	}
 }
diff --git a/src/strconv/bytealg.go b/src/strconv/bytealg.go
index a2bb12c..6fe1624 100644
--- a/src/strconv/bytealg.go
+++ b/src/strconv/bytealg.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !compiler_bootstrap
-// +build !compiler_bootstrap
 
 package strconv
 
diff --git a/src/strconv/bytealg_bootstrap.go b/src/strconv/bytealg_bootstrap.go
index 0ed79f4..12d96e5 100644
--- a/src/strconv/bytealg_bootstrap.go
+++ b/src/strconv/bytealg_bootstrap.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build compiler_bootstrap
-// +build compiler_bootstrap
 
 package strconv
 
diff --git a/src/strconv/example_test.go b/src/strconv/example_test.go
index b02392d..428fde4 100644
--- a/src/strconv/example_test.go
+++ b/src/strconv/example_test.go
@@ -369,6 +369,23 @@
 	// "\" This is a ☺ \\n \""
 }
 
+func ExampleQuotedPrefix() {
+	s, err := strconv.QuotedPrefix("not a quoted string")
+	fmt.Printf("%q, %v\n", s, err)
+	s, err = strconv.QuotedPrefix("\"double-quoted string\" with trailing text")
+	fmt.Printf("%q, %v\n", s, err)
+	s, err = strconv.QuotedPrefix("`or backquoted` with more trailing text")
+	fmt.Printf("%q, %v\n", s, err)
+	s, err = strconv.QuotedPrefix("'\u263a' is also okay")
+	fmt.Printf("%q, %v\n", s, err)
+
+	// Output:
+	// "", invalid syntax
+	// "\"double-quoted string\"", <nil>
+	// "`or backquoted`", <nil>
+	// "'☺'", <nil>
+}
+
 func ExampleUnquote() {
 	s, err := strconv.Unquote("You can't unquote a string without quotes")
 	fmt.Printf("%q, %v\n", s, err)
diff --git a/src/strconv/ftoa.go b/src/strconv/ftoa.go
index fcbf4df..c514e66 100644
--- a/src/strconv/ftoa.go
+++ b/src/strconv/ftoa.go
@@ -568,17 +568,3 @@
 
 	return dst
 }
-
-func min(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func max(a, b int) int {
-	if a > b {
-		return a
-	}
-	return b
-}
diff --git a/src/strconv/makeisprint.go b/src/strconv/makeisprint.go
index 909f9e4..ff361e7 100644
--- a/src/strconv/makeisprint.go
+++ b/src/strconv/makeisprint.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ignore
-// +build ignore
 
 //
 // usage:
diff --git a/src/strconv/quote.go b/src/strconv/quote.go
index 1b5bddf..7c38433 100644
--- a/src/strconv/quote.go
+++ b/src/strconv/quote.go
@@ -66,7 +66,6 @@
 }
 
 func appendEscapedRune(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
-	var runeTmp [utf8.UTFMax]byte
 	if r == rune(quote) || r == '\\' { // always backslashed
 		buf = append(buf, '\\')
 		buf = append(buf, byte(r))
@@ -78,9 +77,7 @@
 			return buf
 		}
 	} else if IsPrint(r) || graphicOnly && isInGraphicList(r) {
-		n := utf8.EncodeRune(runeTmp[:], r)
-		buf = append(buf, runeTmp[:n]...)
-		return buf
+		return utf8.AppendRune(buf, r)
 	}
 	switch r {
 	case '\a':
@@ -471,9 +468,7 @@
 				if r < utf8.RuneSelf || !multibyte {
 					buf = append(buf, byte(r))
 				} else {
-					var arr [utf8.UTFMax]byte
-					n := utf8.EncodeRune(arr[:], r)
-					buf = append(buf, arr[:n]...)
+					buf = utf8.AppendRune(buf, r)
 				}
 			}
 
diff --git a/src/strings/builder.go b/src/strings/builder.go
index 299ad51..189dadb 100644
--- a/src/strings/builder.go
+++ b/src/strings/builder.go
@@ -10,7 +10,7 @@
 	"unsafe"
 )
 
-// A Builder is used to efficiently build a string using Write methods.
+// A Builder is used to efficiently build a string using [Builder.Write] methods.
 // It minimizes memory copying. The zero value is ready to use.
 // Do not copy a non-zero Builder.
 type Builder struct {
@@ -57,7 +57,7 @@
 // already written.
 func (b *Builder) Cap() int { return cap(b.buf) }
 
-// Reset resets the Builder to be empty.
+// Reset resets the [Builder] to be empty.
 func (b *Builder) Reset() {
 	b.addr = nil
 	b.buf = nil
diff --git a/src/strings/builder_test.go b/src/strings/builder_test.go
index dbc2c19..c3c627e 100644
--- a/src/strings/builder_test.go
+++ b/src/strings/builder_test.go
@@ -355,6 +355,22 @@
 	})
 }
 
+func BenchmarkBuildString_WriteString(b *testing.B) {
+	someString := string(someBytes)
+	benchmarkBuilder(b, func(b *testing.B, numWrite int, grow bool) {
+		for i := 0; i < b.N; i++ {
+			var buf Builder
+			if grow {
+				buf.Grow(len(someString) * numWrite)
+			}
+			for i := 0; i < numWrite; i++ {
+				buf.WriteString(someString)
+			}
+			sinkS = buf.String()
+		}
+	})
+}
+
 func BenchmarkBuildString_ByteBuffer(b *testing.B) {
 	benchmarkBuilder(b, func(b *testing.B, numWrite int, grow bool) {
 		for i := 0; i < b.N; i++ {
diff --git a/src/strings/example_test.go b/src/strings/example_test.go
index ab83e10..bdab7ae 100644
--- a/src/strings/example_test.go
+++ b/src/strings/example_test.go
@@ -80,6 +80,17 @@
 	// false
 }
 
+func ExampleContainsFunc() {
+	f := func(r rune) bool {
+		return r == 'a' || r == 'e' || r == 'i' || r == 'o' || r == 'u'
+	}
+	fmt.Println(strings.ContainsFunc("hello", f))
+	fmt.Println(strings.ContainsFunc("rhythms", f))
+	// Output:
+	// true
+	// false
+}
+
 func ExampleCount() {
 	fmt.Println(strings.Count("cheese", "e"))
 	fmt.Println(strings.Count("five", "")) // before & after each rune
diff --git a/src/strings/reader.go b/src/strings/reader.go
index 04f31a1..497ffb7 100644
--- a/src/strings/reader.go
+++ b/src/strings/reader.go
@@ -10,8 +10,8 @@
 	"unicode/utf8"
 )
 
-// A Reader implements the io.Reader, io.ReaderAt, io.ByteReader, io.ByteScanner,
-// io.RuneReader, io.RuneScanner, io.Seeker, and io.WriterTo interfaces by reading
+// A Reader implements the [io.Reader], [io.ReaderAt], [io.ByteReader], [io.ByteScanner],
+// [io.RuneReader], [io.RuneScanner], [io.Seeker], and [io.WriterTo] interfaces by reading
 // from a string.
 // The zero value for Reader operates like a Reader of an empty string.
 type Reader struct {
@@ -30,12 +30,12 @@
 }
 
 // Size returns the original length of the underlying string.
-// Size is the number of bytes available for reading via ReadAt.
+// Size is the number of bytes available for reading via [Reader.ReadAt].
 // The returned value is always the same and is not affected by calls
 // to any other method.
 func (r *Reader) Size() int64 { return int64(len(r.s)) }
 
-// Read implements the io.Reader interface.
+// Read implements the [io.Reader] interface.
 func (r *Reader) Read(b []byte) (n int, err error) {
 	if r.i >= int64(len(r.s)) {
 		return 0, io.EOF
@@ -46,7 +46,7 @@
 	return
 }
 
-// ReadAt implements the io.ReaderAt interface.
+// ReadAt implements the [io.ReaderAt] interface.
 func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
 	// cannot modify state - see io.ReaderAt
 	if off < 0 {
@@ -62,7 +62,7 @@
 	return
 }
 
-// ReadByte implements the io.ByteReader interface.
+// ReadByte implements the [io.ByteReader] interface.
 func (r *Reader) ReadByte() (byte, error) {
 	r.prevRune = -1
 	if r.i >= int64(len(r.s)) {
@@ -73,7 +73,7 @@
 	return b, nil
 }
 
-// UnreadByte implements the io.ByteScanner interface.
+// UnreadByte implements the [io.ByteScanner] interface.
 func (r *Reader) UnreadByte() error {
 	if r.i <= 0 {
 		return errors.New("strings.Reader.UnreadByte: at beginning of string")
@@ -83,7 +83,7 @@
 	return nil
 }
 
-// ReadRune implements the io.RuneReader interface.
+// ReadRune implements the [io.RuneReader] interface.
 func (r *Reader) ReadRune() (ch rune, size int, err error) {
 	if r.i >= int64(len(r.s)) {
 		r.prevRune = -1
@@ -99,7 +99,7 @@
 	return
 }
 
-// UnreadRune implements the io.RuneScanner interface.
+// UnreadRune implements the [io.RuneScanner] interface.
 func (r *Reader) UnreadRune() error {
 	if r.i <= 0 {
 		return errors.New("strings.Reader.UnreadRune: at beginning of string")
@@ -112,7 +112,7 @@
 	return nil
 }
 
-// Seek implements the io.Seeker interface.
+// Seek implements the [io.Seeker] interface.
 func (r *Reader) Seek(offset int64, whence int) (int64, error) {
 	r.prevRune = -1
 	var abs int64
@@ -133,7 +133,7 @@
 	return abs, nil
 }
 
-// WriteTo implements the io.WriterTo interface.
+// WriteTo implements the [io.WriterTo] interface.
 func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
 	r.prevRune = -1
 	if r.i >= int64(len(r.s)) {
@@ -152,9 +152,9 @@
 	return
 }
 
-// Reset resets the Reader to be reading from s.
+// Reset resets the [Reader] to be reading from s.
 func (r *Reader) Reset(s string) { *r = Reader{s, 0, -1} }
 
-// NewReader returns a new Reader reading from s.
-// It is similar to bytes.NewBufferString but more efficient and non-writable.
+// NewReader returns a new [Reader] reading from s.
+// It is similar to [bytes.NewBufferString] but more efficient and non-writable.
 func NewReader(s string) *Reader { return &Reader{s, 0, -1} }
diff --git a/src/strings/replace.go b/src/strings/replace.go
index f504fb4..3b17a55 100644
--- a/src/strings/replace.go
+++ b/src/strings/replace.go
@@ -23,7 +23,7 @@
 	WriteString(w io.Writer, s string) (n int, err error)
 }
 
-// NewReplacer returns a new Replacer from a list of old, new string
+// NewReplacer returns a new [Replacer] from a list of old, new string
 // pairs. Replacements are performed in the order they appear in the
 // target string, without overlapping matches. The old string
 // comparisons are done in argument order.
diff --git a/src/strings/search.go b/src/strings/search.go
index e5bffbb..e1ace3e 100644
--- a/src/strings/search.go
+++ b/src/strings/search.go
@@ -115,10 +115,3 @@
 	}
 	return -1
 }
-
-func max(a, b int) int {
-	if a > b {
-		return a
-	}
-	return b
-}
diff --git a/src/strings/strings.go b/src/strings/strings.go
index 2dd4321..f3f0723 100644
--- a/src/strings/strings.go
+++ b/src/strings/strings.go
@@ -83,7 +83,7 @@
 	case n == 0:
 		return len(s)
 	case n == 1:
-		return LastIndexByte(s, substr[0])
+		return bytealg.LastIndexByteString(s, substr[0])
 	case n == len(s):
 		if substr == s {
 			return 0
@@ -227,12 +227,7 @@
 
 // LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
 func LastIndexByte(s string, c byte) int {
-	for i := len(s) - 1; i >= 0; i-- {
-		if s[i] == c {
-			return i
-		}
-	}
-	return -1
+	return bytealg.LastIndexByteString(s, c)
 }
 
 // Generic split: splits after each instance of sep,
@@ -277,7 +272,7 @@
 //	n < 0: all substrings
 //
 // Edge cases for s and sep (for example, empty strings) are handled
-// as described in the documentation for Split.
+// as described in the documentation for [Split].
 //
 // To split around the first instance of a separator, see Cut.
 func SplitN(s, sep string, n int) []string { return genSplit(s, sep, 0, n) }
@@ -306,7 +301,7 @@
 // If sep is empty, Split splits after each UTF-8 sequence. If both s
 // and sep are empty, Split returns an empty slice.
 //
-// It is equivalent to SplitN with a count of -1.
+// It is equivalent to [SplitN] with a count of -1.
 //
 // To split around the first instance of a separator, see Cut.
 func Split(s, sep string) []string { return genSplit(s, sep, 0, -1) }
@@ -320,7 +315,7 @@
 // If sep is empty, SplitAfter splits after each UTF-8 sequence. If
 // both s and sep are empty, SplitAfter returns an empty slice.
 //
-// It is equivalent to SplitAfterN with a count of -1.
+// It is equivalent to [SplitAfterN] with a count of -1.
 func SplitAfter(s, sep string) []string {
 	return genSplit(s, sep, len(sep), -1)
 }
@@ -463,12 +458,12 @@
 	return b.String()
 }
 
-// HasPrefix tests whether the string s begins with prefix.
+// HasPrefix reports whether the string s begins with prefix.
 func HasPrefix(s, prefix string) bool {
 	return len(s) >= len(prefix) && s[0:len(prefix)] == prefix
 }
 
-// HasSuffix tests whether the string s ends with suffix.
+// HasSuffix reports whether the string s ends with suffix.
 func HasSuffix(s, suffix string) bool {
 	return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
 }
@@ -526,7 +521,7 @@
 			if r < utf8.RuneSelf {
 				b.WriteByte(byte(r))
 			} else {
-				// r is not a ASCII rune.
+				// r is not an ASCII rune.
 				b.WriteRune(r)
 			}
 		}
@@ -909,7 +904,7 @@
 // TrimLeft returns a slice of the string s with all leading
 // Unicode code points contained in cutset removed.
 //
-// To remove a prefix, use TrimPrefix instead.
+// To remove a prefix, use [TrimPrefix] instead.
 func TrimLeft(s, cutset string) string {
 	if s == "" || cutset == "" {
 		return s
@@ -957,7 +952,7 @@
 // TrimRight returns a slice of the string s, with all trailing
 // Unicode code points contained in cutset removed.
 //
-// To remove a suffix, use TrimSuffix instead.
+// To remove a suffix, use [TrimSuffix] instead.
 func TrimRight(s, cutset string) string {
 	if s == "" || cutset == "" {
 		return s
diff --git a/src/sync/cond.go b/src/sync/cond.go
index cc927ad..7ef3188 100644
--- a/src/sync/cond.go
+++ b/src/sync/cond.go
@@ -96,6 +96,10 @@
 type copyChecker uintptr
 
 func (c *copyChecker) check() {
+	// Check if c has been copied in three steps:
+	// 1. The first comparison is the fast-path. If c has been initialized and not copied, this will return immediately. Otherwise, c is either not initialized, or has been copied.
+	// 2. Ensure c is initialized. If the CAS succeeds, we're done. If it fails, c was either initialized concurrently and we simply lost the race, or c has been copied.
+	// 3. Do step 1 again. Now that c is definitely initialized, if this fails, c was copied.
 	if uintptr(*c) != uintptr(unsafe.Pointer(c)) &&
 		!atomic.CompareAndSwapUintptr((*uintptr)(c), 0, uintptr(unsafe.Pointer(c))) &&
 		uintptr(*c) != uintptr(unsafe.Pointer(c)) {
diff --git a/src/sync/export_test.go b/src/sync/export_test.go
index c020ef7..b55cecd 100644
--- a/src/sync/export_test.go
+++ b/src/sync/export_test.go
@@ -23,7 +23,7 @@
 	}
 	// For testing purposes, set the head and tail indexes close
 	// to wrapping around.
-	d.headTail = d.pack(1<<dequeueBits-500, 1<<dequeueBits-500)
+	d.headTail.Store(d.pack(1<<dequeueBits-500, 1<<dequeueBits-500))
 	return d
 }
 
diff --git a/src/sync/map.go b/src/sync/map.go
index e8ccf58..7a9eebd 100644
--- a/src/sync/map.go
+++ b/src/sync/map.go
@@ -8,7 +8,7 @@
 	"sync/atomic"
 )
 
-// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
+// Map is like a Go map[any]any but is safe for concurrent use
 // by multiple goroutines without additional locking or coordination.
 // Loads, stores, and deletes run in amortized constant time.
 //
@@ -461,7 +461,8 @@
 		read = m.loadReadOnly()
 		if read.amended {
 			read = readOnly{m: m.dirty}
-			m.read.Store(&read)
+			copyRead := read
+			m.read.Store(&copyRead)
 			m.dirty = nil
 			m.misses = 0
 		}
diff --git a/src/sync/map_test.go b/src/sync/map_test.go
index 1eb3fc6..316f87b 100644
--- a/src/sync/map_test.go
+++ b/src/sync/map_test.go
@@ -5,6 +5,7 @@
 package sync_test
 
 import (
+	"internal/testenv"
 	"math/rand"
 	"reflect"
 	"runtime"
@@ -277,6 +278,19 @@
 	m := &sync.Map{}
 	if m.CompareAndSwap(m, nil, 42) {
 		// See https://go.dev/issue/51972#issuecomment-1126408637.
-		t.Fatalf("CompareAndSwap on an non-existing key succeeded")
+		t.Fatalf("CompareAndSwap on a non-existing key succeeded")
+	}
+}
+
+func TestMapRangeNoAllocations(t *testing.T) { // Issue 62404
+	testenv.SkipIfOptimizationOff(t)
+	var m sync.Map
+	allocs := testing.AllocsPerRun(10, func() {
+		m.Range(func(key, value any) bool {
+			return true
+		})
+	})
+	if allocs > 0 {
+		t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs)
 	}
 }
diff --git a/src/sync/once.go b/src/sync/once.go
index b6399cf..3f58707 100644
--- a/src/sync/once.go
+++ b/src/sync/once.go
@@ -21,7 +21,7 @@
 	// The hot path is inlined at every call site.
 	// Placing done first allows more compact instructions on some architectures (amd64/386),
 	// and fewer instructions (to calculate offset) on other architectures.
-	done uint32
+	done atomic.Uint32
 	m    Mutex
 }
 
@@ -48,7 +48,7 @@
 func (o *Once) Do(f func()) {
 	// Note: Here is an incorrect implementation of Do:
 	//
-	//	if atomic.CompareAndSwapUint32(&o.done, 0, 1) {
+	//	if o.done.CompareAndSwap(0, 1) {
 	//		f()
 	//	}
 	//
@@ -58,9 +58,9 @@
 	// call f, and the second would return immediately, without
 	// waiting for the first's call to f to complete.
 	// This is why the slow path falls back to a mutex, and why
-	// the atomic.StoreUint32 must be delayed until after f returns.
+	// the o.done.Store must be delayed until after f returns.
 
-	if atomic.LoadUint32(&o.done) == 0 {
+	if o.done.Load() == 0 {
 		// Outlined slow-path to allow inlining of the fast-path.
 		o.doSlow(f)
 	}
@@ -69,8 +69,8 @@
 func (o *Once) doSlow(f func()) {
 	o.m.Lock()
 	defer o.m.Unlock()
-	if o.done == 0 {
-		defer atomic.StoreUint32(&o.done, 1)
+	if o.done.Load() == 0 {
+		defer o.done.Store(1)
 		f()
 	}
 }
diff --git a/src/sync/oncefunc.go b/src/sync/oncefunc.go
index 9ef8344..db28628 100644
--- a/src/sync/oncefunc.go
+++ b/src/sync/oncefunc.go
@@ -25,7 +25,8 @@
 			}
 		}()
 		f()
-		valid = true // Set only if f does not panic
+		f = nil      // Do not keep f alive after invoking it.
+		valid = true // Set only if f does not panic.
 	}
 	return func() {
 		once.Do(g)
@@ -54,6 +55,7 @@
 			}
 		}()
 		result = f()
+		f = nil
 		valid = true
 	}
 	return func() T {
@@ -85,6 +87,7 @@
 			}
 		}()
 		r1, r2 = f()
+		f = nil
 		valid = true
 	}
 	return func() (T1, T2) {
diff --git a/src/sync/oncefunc_test.go b/src/sync/oncefunc_test.go
index 3c523a5..5f0d564 100644
--- a/src/sync/oncefunc_test.go
+++ b/src/sync/oncefunc_test.go
@@ -6,10 +6,13 @@
 
 import (
 	"bytes"
+	"math"
 	"runtime"
 	"runtime/debug"
 	"sync"
+	"sync/atomic"
 	"testing"
+	_ "unsafe"
 )
 
 // We assume that the Once.Do tests have already covered parallelism.
@@ -182,6 +185,53 @@
 	panic("x")
 }
 
+func TestOnceXGC(t *testing.T) {
+	fns := map[string]func([]byte) func(){
+		"OnceFunc": func(buf []byte) func() {
+			return sync.OnceFunc(func() { buf[0] = 1 })
+		},
+		"OnceValue": func(buf []byte) func() {
+			f := sync.OnceValue(func() any { buf[0] = 1; return nil })
+			return func() { f() }
+		},
+		"OnceValues": func(buf []byte) func() {
+			f := sync.OnceValues(func() (any, any) { buf[0] = 1; return nil, nil })
+			return func() { f() }
+		},
+	}
+	for n, fn := range fns {
+		t.Run(n, func(t *testing.T) {
+			buf := make([]byte, 1024)
+			var gc atomic.Bool
+			runtime.SetFinalizer(&buf[0], func(_ *byte) {
+				gc.Store(true)
+			})
+			f := fn(buf)
+			gcwaitfin()
+			if gc.Load() != false {
+				t.Fatal("wrapped function garbage collected too early")
+			}
+			f()
+			gcwaitfin()
+			if gc.Load() != true {
+				// Even if f is still alive, the function passed to Once(Func|Value|Values)
+				// is not kept alive after the first call to f.
+				t.Fatal("wrapped function should be garbage collected, but still live")
+			}
+			f()
+		})
+	}
+}
+
+// gcwaitfin performs garbage collection and waits for all finalizers to run.
+func gcwaitfin() {
+	runtime.GC()
+	runtime_blockUntilEmptyFinalizerQueue(math.MaxInt64)
+}
+
+//go:linkname runtime_blockUntilEmptyFinalizerQueue runtime.blockUntilEmptyFinalizerQueue
+func runtime_blockUntilEmptyFinalizerQueue(int64) bool
+
 var (
 	onceFunc = sync.OnceFunc(func() {})
 
diff --git a/src/sync/pool.go b/src/sync/pool.go
index cf01e2e..3359aba 100644
--- a/src/sync/pool.go
+++ b/src/sync/pool.go
@@ -76,7 +76,8 @@
 }
 
 // from runtime
-func fastrandn(n uint32) uint32
+//go:linkname runtime_randn runtime.randn
+func runtime_randn(n uint32) uint32
 
 var poolRaceHash [128]uint64
 
@@ -97,7 +98,7 @@
 		return
 	}
 	if race.Enabled {
-		if fastrandn(4) == 0 {
+		if runtime_randn(4) == 0 {
 			// Randomly drop x on floor.
 			return
 		}
@@ -196,6 +197,13 @@
 // returns poolLocal pool for the P and the P's id.
 // Caller must call runtime_procUnpin() when done with the pool.
 func (p *Pool) pin() (*poolLocal, int) {
+	// Check whether p is nil to get a panic.
+	// Otherwise the nil dereference happens while the m is pinned,
+	// causing a fatal error rather than a panic.
+	if p == nil {
+		panic("nil Pool")
+	}
+
 	pid := runtime_procPin()
 	// In pinSlow we store to local and then to localSize, here we load in opposite order.
 	// Since we've disabled preemption, GC cannot happen in between.
diff --git a/src/sync/pool_test.go b/src/sync/pool_test.go
index 5e38597..1b6746d 100644
--- a/src/sync/pool_test.go
+++ b/src/sync/pool_test.go
@@ -247,6 +247,28 @@
 	}
 }
 
+func TestNilPool(t *testing.T) {
+	catch := func() {
+		if recover() == nil {
+			t.Error("expected panic")
+		}
+	}
+
+	var p *Pool
+	t.Run("Get", func(t *testing.T) {
+		defer catch()
+		if p.Get() != nil {
+			t.Error("expected empty")
+		}
+		t.Error("should have panicked already")
+	})
+	t.Run("Put", func(t *testing.T) {
+		defer catch()
+		p.Put("a")
+		t.Error("should have panicked already")
+	})
+}
+
 func BenchmarkPool(b *testing.B) {
 	var p Pool
 	b.RunParallel(func(pb *testing.PB) {
diff --git a/src/sync/poolqueue.go b/src/sync/poolqueue.go
index 631f2c1..5c640f9 100644
--- a/src/sync/poolqueue.go
+++ b/src/sync/poolqueue.go
@@ -31,7 +31,7 @@
 	// The head index is stored in the most-significant bits so
 	// that we can atomically add to it and the overflow is
 	// harmless.
-	headTail uint64
+	headTail atomic.Uint64
 
 	// vals is a ring buffer of interface{} values stored in this
 	// dequeue. The size of this must be a power of 2.
@@ -78,7 +78,7 @@
 // pushHead adds val at the head of the queue. It returns false if the
 // queue is full. It must only be called by a single producer.
 func (d *poolDequeue) pushHead(val any) bool {
-	ptrs := atomic.LoadUint64(&d.headTail)
+	ptrs := d.headTail.Load()
 	head, tail := d.unpack(ptrs)
 	if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
 		// Queue is full.
@@ -102,7 +102,7 @@
 
 	// Increment head. This passes ownership of slot to popTail
 	// and acts as a store barrier for writing the slot.
-	atomic.AddUint64(&d.headTail, 1<<dequeueBits)
+	d.headTail.Add(1 << dequeueBits)
 	return true
 }
 
@@ -112,7 +112,7 @@
 func (d *poolDequeue) popHead() (any, bool) {
 	var slot *eface
 	for {
-		ptrs := atomic.LoadUint64(&d.headTail)
+		ptrs := d.headTail.Load()
 		head, tail := d.unpack(ptrs)
 		if tail == head {
 			// Queue is empty.
@@ -124,7 +124,7 @@
 		// slot.
 		head--
 		ptrs2 := d.pack(head, tail)
-		if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
+		if d.headTail.CompareAndSwap(ptrs, ptrs2) {
 			// We successfully took back slot.
 			slot = &d.vals[head&uint32(len(d.vals)-1)]
 			break
@@ -147,7 +147,7 @@
 func (d *poolDequeue) popTail() (any, bool) {
 	var slot *eface
 	for {
-		ptrs := atomic.LoadUint64(&d.headTail)
+		ptrs := d.headTail.Load()
 		head, tail := d.unpack(ptrs)
 		if tail == head {
 			// Queue is empty.
@@ -158,7 +158,7 @@
 		// above) and increment tail. If this succeeds, then
 		// we own the slot at tail.
 		ptrs2 := d.pack(head, tail+1)
-		if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
+		if d.headTail.CompareAndSwap(ptrs, ptrs2) {
 			// Success.
 			slot = &d.vals[tail&uint32(len(d.vals)-1)]
 			break
diff --git a/src/sync/rwmutex.go b/src/sync/rwmutex.go
index 1317624..f445b66 100644
--- a/src/sync/rwmutex.go
+++ b/src/sync/rwmutex.go
@@ -19,12 +19,11 @@
 //
 // A RWMutex must not be copied after first use.
 //
-// If a goroutine holds a RWMutex for reading and another goroutine might
-// call Lock, no goroutine should expect to be able to acquire a read lock
-// until the initial read lock is released. In particular, this prohibits
-// recursive read locking. This is to ensure that the lock eventually becomes
-// available; a blocked Lock call excludes new readers from acquiring the
-// lock.
+// If any goroutine calls Lock while the lock is already held by
+// one or more readers, concurrent calls to RLock will block until
+// the writer has acquired (and released) the lock, to ensure that
+// the lock eventually becomes available to the writer.
+// Note that this prohibits recursive read-locking.
 //
 // In the terminology of the Go memory model,
 // the n'th call to Unlock “synchronizes before” the m'th call to Lock
diff --git a/src/syscall/asm_linux_386.s b/src/syscall/asm_linux_386.s
index a8e63f7..d14df50 100644
--- a/src/syscall/asm_linux_386.s
+++ b/src/syscall/asm_linux_386.s
@@ -13,24 +13,24 @@
 // instead of the glibc-specific "CALL 0x10(GS)".
 #define INVOKE_SYSCALL	INT	$0x80
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-20
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-24
 	MOVL	trap+0(FP), AX	// syscall entry
 	MOVL	a1+4(FP), BX
 	MOVL	a2+8(FP), CX
-	MOVL	$0, DX
+	MOVL	a3+12(FP), DX
 	POPL	SI // preserve return address
 	INVOKE_SYSCALL
 	PUSHL	SI
 	CMPL	AX, $0xfffff001
 	JLS	ok
-	MOVL	$-1, r1+12(FP)
+	MOVL	$-1, r1+16(FP)
 	NEGL	AX
-	MOVL	AX, err+16(FP)
+	MOVL	AX, err+20(FP)
 	RET
 ok:
-	MOVL	AX, r1+12(FP)
-	MOVL	$0, err+16(FP)
+	MOVL	AX, r1+16(FP)
+	MOVL	$0, err+20(FP)
 	RET
 
 // func rawSyscallNoError(trap uintptr, a1, a2, a3 uintptr) (r1, r2 uintptr);
diff --git a/src/syscall/asm_linux_amd64.s b/src/syscall/asm_linux_amd64.s
index 00d6fed..da170c5 100644
--- a/src/syscall/asm_linux_amd64.s
+++ b/src/syscall/asm_linux_amd64.s
@@ -11,11 +11,11 @@
 
 #define SYS_gettimeofday 96
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-48
 	MOVQ	a1+8(FP), DI
 	MOVQ	a2+16(FP), SI
-	MOVQ	$0, DX
+	MOVQ	a3+24(FP), DX
 	MOVQ	$0, R10
 	MOVQ	$0, R8
 	MOVQ	$0, R9
@@ -25,13 +25,13 @@
 	PUSHQ	R12
 	CMPQ	AX, $0xfffffffffffff001
 	JLS	ok2
-	MOVQ	$-1, r1+24(FP)
+	MOVQ	$-1, r1+32(FP)
 	NEGQ	AX
-	MOVQ	AX, err+32(FP)
+	MOVQ	AX, err+40(FP)
 	RET
 ok2:
-	MOVQ	AX, r1+24(FP)
-	MOVQ	$0, err+32(FP)
+	MOVQ	AX, r1+32(FP)
+	MOVQ	$0, err+40(FP)
 	RET
 
 // func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
diff --git a/src/syscall/asm_linux_arm.s b/src/syscall/asm_linux_arm.s
index d399541..06a35b5 100644
--- a/src/syscall/asm_linux_arm.s
+++ b/src/syscall/asm_linux_arm.s
@@ -41,25 +41,25 @@
 	BL	runtime·exitsyscall(SB)
 	RET
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-20
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-24
 	MOVW	trap+0(FP), R7	// syscall entry
 	MOVW	a1+4(FP), R0
 	MOVW	a2+8(FP), R1
-	MOVW	$0, R2
+	MOVW	a3+12(FP), R2
 	SWI	$0
 	MOVW	$0xfffff001, R1
 	CMP	R1, R0
 	BLS	ok
 	MOVW	$-1, R1
-	MOVW	R1, r1+12(FP)
+	MOVW	R1, r1+16(FP)
 	RSB	$0, R0, R0
-	MOVW	R0, err+16(FP)
+	MOVW	R0, err+20(FP)
 	RET
 ok:
-	MOVW	R0, r1+12(FP)
+	MOVW	R0, r1+16(FP)
 	MOVW	$0, R0
-	MOVW	R0, err+16(FP)
+	MOVW	R0, err+20(FP)
 	RET
 
 // func rawSyscallNoError(trap uintptr, a1, a2, a3 uintptr) (r1, r2 uintptr);
diff --git a/src/syscall/asm_linux_arm64.s b/src/syscall/asm_linux_arm64.s
index 7fa789a..883645f 100644
--- a/src/syscall/asm_linux_arm64.s
+++ b/src/syscall/asm_linux_arm64.s
@@ -4,11 +4,11 @@
 
 #include "textflag.h"
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-48
 	MOVD	a1+8(FP), R0
 	MOVD	a2+16(FP), R1
-	MOVD	$0, R2
+	MOVD	a3+24(FP), R2
 	MOVD	$0, R3
 	MOVD	$0, R4
 	MOVD	$0, R5
@@ -17,13 +17,13 @@
 	CMN	$4095, R0
 	BCC	ok
 	MOVD	$-1, R4
-	MOVD	R4, r1+24(FP)	// r1
+	MOVD	R4, r1+32(FP)	// r1
 	NEG	R0, R0
-	MOVD	R0, err+32(FP)	// errno
+	MOVD	R0, err+40(FP)	// errno
 	RET
 ok:
-	MOVD	R0, r1+24(FP)	// r1
-	MOVD	ZR, err+32(FP)	// errno
+	MOVD	R0, r1+32(FP)	// r1
+	MOVD	ZR, err+40(FP)	// errno
 	RET
 
 // func rawSyscallNoError(trap uintptr, a1, a2, a3 uintptr) (r1, r2 uintptr);
diff --git a/src/syscall/asm_linux_loong64.s b/src/syscall/asm_linux_loong64.s
index 1a7457c..2bbf0f1 100644
--- a/src/syscall/asm_linux_loong64.s
+++ b/src/syscall/asm_linux_loong64.s
@@ -8,11 +8,11 @@
 // System calls for loong64, Linux
 //
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-48
 	MOVV	a1+8(FP), R4
 	MOVV	a2+16(FP), R5
-	MOVV	$0, R6
+	MOVV	a3+24(FP), R6
 	MOVV	$0, R7
 	MOVV	$0, R8
 	MOVV	$0, R9
@@ -21,13 +21,13 @@
 	MOVW	$-4096, R12
 	BGEU	R12, R4, ok
 	MOVV	$-1, R12
-	MOVV	R12, r1+24(FP)		// r1
+	MOVV	R12, r1+32(FP)	// r1
 	SUBVU	R4, R0, R4
-	MOVV	R4, err+32(FP)		// errno
+	MOVV	R4, err+40(FP)	// errno
 	RET
 ok:
-	MOVV	R4, r1+24(FP)	// r1
-	MOVV	R0, err+32(FP)	// errno
+	MOVV	R4, r1+32(FP)	// r1
+	MOVV	R0, err+40(FP)	// errno
 	RET
 
 TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
diff --git a/src/syscall/asm_linux_mips64x.s b/src/syscall/asm_linux_mips64x.s
index 6c7a6bc..1784e3a 100644
--- a/src/syscall/asm_linux_mips64x.s
+++ b/src/syscall/asm_linux_mips64x.s
@@ -10,11 +10,11 @@
 // System calls for mips64, Linux
 //
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-48
 	MOVV	a1+8(FP), R4
 	MOVV	a2+16(FP), R5
-	MOVV	R0, R6
+	MOVV	a3+24(FP), R6
 	MOVV	R0, R7
 	MOVV	R0, R8
 	MOVV	R0, R9
@@ -22,12 +22,12 @@
 	SYSCALL
 	BEQ	R7, ok
 	MOVV	$-1, R1
-	MOVV	R1, r1+24(FP)	// r1
-	MOVV	R2, err+32(FP)	// errno
+	MOVV	R1, r1+32(FP)	// r1
+	MOVV	R2, err+40(FP)	// errno
 	RET
 ok:
-	MOVV	R2, r1+24(FP)	// r1
-	MOVV	R0, err+32(FP)	// errno
+	MOVV	R2, r1+32(FP)	// r1
+	MOVV	R0, err+40(FP)	// errno
 	RET
 
 TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
diff --git a/src/syscall/asm_linux_mipsx.s b/src/syscall/asm_linux_mipsx.s
index 99f0154..7544abb 100644
--- a/src/syscall/asm_linux_mipsx.s
+++ b/src/syscall/asm_linux_mipsx.s
@@ -45,21 +45,21 @@
 	JAL	runtime·exitsyscall(SB)
 	RET
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-20
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-24
 	MOVW	a1+4(FP), R4
 	MOVW	a2+8(FP), R5
-	MOVW	R0, R6
+	MOVW	a3+12(FP), R6
 	MOVW	trap+0(FP), R2	// syscall entry
 	SYSCALL
 	BEQ	R7, ok
 	MOVW	$-1, R1
-	MOVW	R1, r1+12(FP)	// r1
-	MOVW	R2, err+16(FP)	// errno
+	MOVW	R1, r1+16(FP)	// r1
+	MOVW	R2, err+20(FP)	// errno
 	RET
 ok:
-	MOVW	R2, r1+12(FP)	// r1
-	MOVW	R0, err+16(FP)	// errno
+	MOVW	R2, r1+16(FP)	// r1
+	MOVW	R0, err+20(FP)	// errno
 	RET
 
 TEXT ·rawSyscallNoError(SB),NOSPLIT,$20-24
diff --git a/src/syscall/asm_linux_ppc64x.s b/src/syscall/asm_linux_ppc64x.s
index b9412fe..6c29868 100644
--- a/src/syscall/asm_linux_ppc64x.s
+++ b/src/syscall/asm_linux_ppc64x.s
@@ -10,11 +10,11 @@
 // System calls for ppc64, Linux
 //
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-48
 	MOVD	a1+8(FP), R3
 	MOVD	a2+16(FP), R4
-	MOVD	R0, R5
+	MOVD	a3+24(FP), R5
 	MOVD	R0, R6
 	MOVD	R0, R7
 	MOVD	R0, R8
@@ -22,12 +22,12 @@
 	SYSCALL R9
 	BVC	ok
 	MOVD	$-1, R4
-	MOVD	R4, r1+24(FP)	// r1
-	MOVD	R3, err+32(FP)	// errno
+	MOVD	R4, r1+32(FP)	// r1
+	MOVD	R3, err+40(FP)	// errno
 	RET
 ok:
-	MOVD	R3, r1+24(FP)	// r1
-	MOVD	R0, err+32(FP)	// errno
+	MOVD	R3, r1+32(FP)	// r1
+	MOVD	R0, err+40(FP)	// errno
 	RET
 
 TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
diff --git a/src/syscall/asm_linux_riscv64.s b/src/syscall/asm_linux_riscv64.s
index 6fd09ec..0386b36 100644
--- a/src/syscall/asm_linux_riscv64.s
+++ b/src/syscall/asm_linux_riscv64.s
@@ -8,11 +8,11 @@
 // System calls for riscv64, Linux
 //
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-48
 	MOV	a1+8(FP), A0
 	MOV	a2+16(FP), A1
-	MOV	ZERO, A2
+	MOV	a3+24(FP), A2
 	MOV	ZERO, A3
 	MOV	ZERO, A4
 	MOV	ZERO, A5
@@ -20,14 +20,14 @@
 	ECALL
 	MOV	$-4096, T0
 	BLTU	T0, A0, err
-	MOV	A0, r1+24(FP)	// r1
-	MOV	ZERO, err+32(FP)	// errno
+	MOV	A0, r1+32(FP)	// r1
+	MOV	ZERO, err+40(FP)	// errno
 	RET
 err:
 	MOV	$-1, T0
-	MOV	T0, r1+24(FP)	// r1
+	MOV	T0, r1+32(FP)	// r1
 	SUB	A0, ZERO, A0
-	MOV	A0, err+32(FP)	// errno
+	MOV	A0, err+40(FP)	// errno
 	RET
 
 TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
diff --git a/src/syscall/asm_linux_s390x.s b/src/syscall/asm_linux_s390x.s
index 41c34b1..e76b1e3 100644
--- a/src/syscall/asm_linux_s390x.s
+++ b/src/syscall/asm_linux_s390x.s
@@ -8,11 +8,11 @@
 // System calls for s390x, Linux
 //
 
-// func rawVforkSyscall(trap, a1, a2 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-40
+// func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-48
 	MOVD	a1+8(FP), R2
 	MOVD	a2+16(FP), R3
-	MOVD	$0, R4
+	MOVD	a3+24(FP), R4
 	MOVD	$0, R5
 	MOVD	$0, R6
 	MOVD	$0, R7
@@ -20,13 +20,13 @@
 	SYSCALL
 	MOVD	$0xfffffffffffff001, R8
 	CMPUBLT	R2, R8, ok2
-	MOVD	$-1, r1+24(FP)
+	MOVD	$-1, r1+32(FP)
 	NEG	R2, R2
-	MOVD	R2, err+32(FP)	// errno
+	MOVD	R2, err+40(FP)	// errno
 	RET
 ok2:
-	MOVD	R2, r1+24(FP)
-	MOVD	$0, err+32(FP)	// errno
+	MOVD	R2, r1+32(FP)
+	MOVD	$0, err+40(FP)	// errno
 	RET
 
 // func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
diff --git a/src/syscall/asm_openbsd_ppc64.s b/src/syscall/asm_openbsd_ppc64.s
new file mode 100644
index 0000000..58216f8
--- /dev/null
+++ b/src/syscall/asm_openbsd_ppc64.s
@@ -0,0 +1,32 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System call support for PPC64, OpenBSD
+//
+
+// Provide these function names via assembly so they are provided as ABI0,
+// rather than ABIInternal.
+
+// func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·Syscall(SB),NOSPLIT,$0-56
+	JMP	·syscallInternal(SB)
+
+// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·Syscall6(SB),NOSPLIT,$0-80
+	JMP	·syscall6Internal(SB)
+
+// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·RawSyscall(SB),NOSPLIT,$0-56
+	JMP	·rawSyscallInternal(SB)
+
+// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·RawSyscall6(SB),NOSPLIT,$0-80
+	JMP	·rawSyscall6Internal(SB)
+
+// func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·Syscall9(SB),NOSPLIT,$0-104
+	JMP	·syscall9Internal(SB)
diff --git a/src/syscall/asm_openbsd_riscv64.s b/src/syscall/asm_openbsd_riscv64.s
new file mode 100644
index 0000000..fbb473c
--- /dev/null
+++ b/src/syscall/asm_openbsd_riscv64.s
@@ -0,0 +1,32 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System call support for RISCV64, OpenBSD
+//
+
+// Provide these function names via assembly so they are provided as ABI0,
+// rather than ABIInternal.
+
+// func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·Syscall(SB),NOSPLIT,$0-56
+	JMP	·syscallInternal(SB)
+
+// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·Syscall6(SB),NOSPLIT,$0-80
+	JMP	·syscall6Internal(SB)
+
+// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·RawSyscall(SB),NOSPLIT,$0-56
+	JMP	·rawSyscallInternal(SB)
+
+// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·RawSyscall6(SB),NOSPLIT,$0-80
+	JMP	·rawSyscall6Internal(SB)
+
+// func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
+TEXT	·Syscall9(SB),NOSPLIT,$0-104
+	JMP	·syscall9Internal(SB)
diff --git a/src/syscall/bpf_bsd.go b/src/syscall/bpf_bsd.go
index 735c078..27d0ca3 100644
--- a/src/syscall/bpf_bsd.go
+++ b/src/syscall/bpf_bsd.go
@@ -1,8 +1,8 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build dragonfly || freebsd || netbsd || openbsd
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd
 
 // Berkeley packet filter for BSD variants
 
@@ -25,18 +25,18 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func BpfBuflen(fd int) (int, error) {
 	var l int
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCGBLEN, uintptr(unsafe.Pointer(&l)))
-	if err != 0 {
-		return 0, Errno(err)
+	err := ioctlPtr(fd, BIOCGBLEN, unsafe.Pointer(&l))
+	if err != nil {
+		return 0, err
 	}
 	return l, nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func SetBpfBuflen(fd, l int) (int, error) {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCSBLEN, uintptr(unsafe.Pointer(&l)))
-	if err != 0 {
-		return 0, Errno(err)
+	err := ioctlPtr(fd, BIOCSBLEN, unsafe.Pointer(&l))
+	if err != nil {
+		return 0, err
 	}
 	return l, nil
 }
@@ -44,36 +44,36 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func BpfDatalink(fd int) (int, error) {
 	var t int
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCGDLT, uintptr(unsafe.Pointer(&t)))
-	if err != 0 {
-		return 0, Errno(err)
+	err := ioctlPtr(fd, BIOCGDLT, unsafe.Pointer(&t))
+	if err != nil {
+		return 0, err
 	}
 	return t, nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func SetBpfDatalink(fd, t int) (int, error) {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCSDLT, uintptr(unsafe.Pointer(&t)))
-	if err != 0 {
-		return 0, Errno(err)
+	err := ioctlPtr(fd, BIOCSDLT, unsafe.Pointer(&t))
+	if err != nil {
+		return 0, err
 	}
 	return t, nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func SetBpfPromisc(fd, m int) error {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCPROMISC, uintptr(unsafe.Pointer(&m)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCPROMISC, unsafe.Pointer(&m))
+	if err != nil {
+		return err
 	}
 	return nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func FlushBpf(fd int) error {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCFLUSH, 0)
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCFLUSH, nil)
+	if err != nil {
+		return err
 	}
 	return nil
 }
@@ -86,9 +86,9 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func BpfInterface(fd int, name string) (string, error) {
 	var iv ivalue
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCGETIF, uintptr(unsafe.Pointer(&iv)))
-	if err != 0 {
-		return "", Errno(err)
+	err := ioctlPtr(fd, BIOCGETIF, unsafe.Pointer(&iv))
+	if err != nil {
+		return "", err
 	}
 	return name, nil
 }
@@ -97,9 +97,9 @@
 func SetBpfInterface(fd int, name string) error {
 	var iv ivalue
 	copy(iv.name[:], []byte(name))
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCSETIF, uintptr(unsafe.Pointer(&iv)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCSETIF, unsafe.Pointer(&iv))
+	if err != nil {
+		return err
 	}
 	return nil
 }
@@ -107,18 +107,18 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func BpfTimeout(fd int) (*Timeval, error) {
 	var tv Timeval
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCGRTIMEOUT, uintptr(unsafe.Pointer(&tv)))
-	if err != 0 {
-		return nil, Errno(err)
+	err := ioctlPtr(fd, BIOCGRTIMEOUT, unsafe.Pointer(&tv))
+	if err != nil {
+		return nil, err
 	}
 	return &tv, nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func SetBpfTimeout(fd int, tv *Timeval) error {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCSRTIMEOUT, uintptr(unsafe.Pointer(tv)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCSRTIMEOUT, unsafe.Pointer(tv))
+	if err != nil {
+		return err
 	}
 	return nil
 }
@@ -126,18 +126,18 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func BpfStats(fd int) (*BpfStat, error) {
 	var s BpfStat
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCGSTATS, uintptr(unsafe.Pointer(&s)))
-	if err != 0 {
-		return nil, Errno(err)
+	err := ioctlPtr(fd, BIOCGSTATS, unsafe.Pointer(&s))
+	if err != nil {
+		return nil, err
 	}
 	return &s, nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func SetBpfImmediate(fd, m int) error {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCIMMEDIATE, uintptr(unsafe.Pointer(&m)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCIMMEDIATE, unsafe.Pointer(&m))
+	if err != nil {
+		return err
 	}
 	return nil
 }
@@ -147,9 +147,9 @@
 	var p BpfProgram
 	p.Len = uint32(len(i))
 	p.Insns = (*BpfInsn)(unsafe.Pointer(&i[0]))
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCSETF, uintptr(unsafe.Pointer(&p)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCSETF, unsafe.Pointer(&p))
+	if err != nil {
+		return err
 	}
 	return nil
 }
@@ -157,9 +157,9 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func CheckBpfVersion(fd int) error {
 	var v BpfVersion
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCVERSION, uintptr(unsafe.Pointer(&v)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCVERSION, unsafe.Pointer(&v))
+	if err != nil {
+		return err
 	}
 	if v.Major != BPF_MAJOR_VERSION || v.Minor != BPF_MINOR_VERSION {
 		return EINVAL
@@ -170,18 +170,18 @@
 // Deprecated: Use golang.org/x/net/bpf instead.
 func BpfHeadercmpl(fd int) (int, error) {
 	var f int
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCGHDRCMPLT, uintptr(unsafe.Pointer(&f)))
-	if err != 0 {
-		return 0, Errno(err)
+	err := ioctlPtr(fd, BIOCGHDRCMPLT, unsafe.Pointer(&f))
+	if err != nil {
+		return 0, err
 	}
 	return f, nil
 }
 
 // Deprecated: Use golang.org/x/net/bpf instead.
 func SetBpfHeadercmpl(fd, f int) error {
-	_, _, err := Syscall(SYS_IOCTL, uintptr(fd), BIOCSHDRCMPLT, uintptr(unsafe.Pointer(&f)))
-	if err != 0 {
-		return Errno(err)
+	err := ioctlPtr(fd, BIOCSHDRCMPLT, unsafe.Pointer(&f))
+	if err != nil {
+		return err
 	}
 	return nil
 }
diff --git a/src/syscall/bpf_darwin.go b/src/syscall/bpf_darwin.go
deleted file mode 100644
index fb86049..0000000
--- a/src/syscall/bpf_darwin.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Berkeley packet filter for Darwin
-
-package syscall
-
-import (
-	"unsafe"
-)
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfStmt(code, k int) *BpfInsn {
-	return &BpfInsn{Code: uint16(code), K: uint32(k)}
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfJump(code, k, jt, jf int) *BpfInsn {
-	return &BpfInsn{Code: uint16(code), Jt: uint8(jt), Jf: uint8(jf), K: uint32(k)}
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfBuflen(fd int) (int, error) {
-	var l int
-	err := ioctlPtr(fd, BIOCGBLEN, unsafe.Pointer(&l))
-	if err != nil {
-		return 0, err
-	}
-	return l, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfBuflen(fd, l int) (int, error) {
-	err := ioctlPtr(fd, BIOCSBLEN, unsafe.Pointer(&l))
-	if err != nil {
-		return 0, err
-	}
-	return l, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfDatalink(fd int) (int, error) {
-	var t int
-	err := ioctlPtr(fd, BIOCGDLT, unsafe.Pointer(&t))
-	if err != nil {
-		return 0, err
-	}
-	return t, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfDatalink(fd, t int) (int, error) {
-	err := ioctlPtr(fd, BIOCSDLT, unsafe.Pointer(&t))
-	if err != nil {
-		return 0, err
-	}
-	return t, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfPromisc(fd, m int) error {
-	err := ioctlPtr(fd, BIOCPROMISC, unsafe.Pointer(&m))
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func FlushBpf(fd int) error {
-	err := ioctlPtr(fd, BIOCFLUSH, nil)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-type ivalue struct {
-	name  [IFNAMSIZ]byte
-	value int16
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfInterface(fd int, name string) (string, error) {
-	var iv ivalue
-	err := ioctlPtr(fd, BIOCGETIF, unsafe.Pointer(&iv))
-	if err != nil {
-		return "", err
-	}
-	return name, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfInterface(fd int, name string) error {
-	var iv ivalue
-	copy(iv.name[:], []byte(name))
-	err := ioctlPtr(fd, BIOCSETIF, unsafe.Pointer(&iv))
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfTimeout(fd int) (*Timeval, error) {
-	var tv Timeval
-	err := ioctlPtr(fd, BIOCGRTIMEOUT, unsafe.Pointer(&tv))
-	if err != nil {
-		return nil, err
-	}
-	return &tv, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfTimeout(fd int, tv *Timeval) error {
-	err := ioctlPtr(fd, BIOCSRTIMEOUT, unsafe.Pointer(tv))
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfStats(fd int) (*BpfStat, error) {
-	var s BpfStat
-	err := ioctlPtr(fd, BIOCGSTATS, unsafe.Pointer(&s))
-	if err != nil {
-		return nil, err
-	}
-	return &s, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfImmediate(fd, m int) error {
-	err := ioctlPtr(fd, BIOCIMMEDIATE, unsafe.Pointer(&m))
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpf(fd int, i []BpfInsn) error {
-	var p BpfProgram
-	p.Len = uint32(len(i))
-	p.Insns = (*BpfInsn)(unsafe.Pointer(&i[0]))
-	err := ioctlPtr(fd, BIOCSETF, unsafe.Pointer(&p))
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func CheckBpfVersion(fd int) error {
-	var v BpfVersion
-	err := ioctlPtr(fd, BIOCVERSION, unsafe.Pointer(&v))
-	if err != nil {
-		return err
-	}
-	if v.Major != BPF_MAJOR_VERSION || v.Minor != BPF_MINOR_VERSION {
-		return EINVAL
-	}
-	return nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func BpfHeadercmpl(fd int) (int, error) {
-	var f int
-	err := ioctlPtr(fd, BIOCGHDRCMPLT, unsafe.Pointer(&f))
-	if err != nil {
-		return 0, err
-	}
-	return f, nil
-}
-
-// Deprecated: Use golang.org/x/net/bpf instead.
-func SetBpfHeadercmpl(fd, f int) error {
-	err := ioctlPtr(fd, BIOCSHDRCMPLT, unsafe.Pointer(&f))
-	if err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/src/syscall/dirent_test.go b/src/syscall/dirent_test.go
index 68e766e..befe78f 100644
--- a/src/syscall/dirent_test.go
+++ b/src/syscall/dirent_test.go
@@ -82,7 +82,7 @@
 		if err != nil {
 			t.Fatalf("names[%d] is non-integer %q: %v", i, names[i], err)
 		}
-		if expected := string(strings.Repeat(name[:1], filenameMinSize+ord)); name != expected {
+		if expected := strings.Repeat(name[:1], filenameMinSize+ord); name != expected {
 			t.Errorf("names[%d] is %q (len %d); expected %q (len %d)", i, name, len(name), expected, len(expected))
 		}
 	}
diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go
index 0b0cd24..149cc2f 100644
--- a/src/syscall/exec_bsd.go
+++ b/src/syscall/exec_bsd.go
@@ -64,7 +64,7 @@
 		ngroups, groups uintptr
 	)
 
-	rlim, rlimOK := origRlimitNofile.Load().(Rlimit)
+	rlim := origRlimitNofile.Load()
 
 	// guard against side effects of shuffling fds below.
 	// Make sure that nextfd is beyond any currently open files so
@@ -276,8 +276,8 @@
 	}
 
 	// Restore original rlimit.
-	if rlimOK && rlim.Cur != 0 {
-		RawSyscall(SYS_SETRLIMIT, uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(&rlim)), 0)
+	if rlim != nil {
+		RawSyscall(SYS_SETRLIMIT, uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(rlim)), 0)
 	}
 
 	// Time to exec.
diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go
index bd198d0..3226cb8 100644
--- a/src/syscall/exec_freebsd.go
+++ b/src/syscall/exec_freebsd.go
@@ -71,7 +71,7 @@
 		upid            uintptr
 	)
 
-	rlim, rlimOK := origRlimitNofile.Load().(Rlimit)
+	rlim := origRlimitNofile.Load()
 
 	// Record parent PID so child can test if it has died.
 	ppid, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
@@ -300,8 +300,8 @@
 	}
 
 	// Restore original rlimit.
-	if rlimOK && rlim.Cur != 0 {
-		RawSyscall(SYS_SETRLIMIT, uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(&rlim)), 0)
+	if rlim != nil {
+		RawSyscall(SYS_SETRLIMIT, uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(rlim)), 0)
 	}
 
 	// Time to exec.
diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go
index 4455786..768e8c1 100644
--- a/src/syscall/exec_libc.go
+++ b/src/syscall/exec_libc.go
@@ -91,7 +91,7 @@
 		ngroups, groups uintptr
 	)
 
-	rlim, rlimOK := origRlimitNofile.Load().(Rlimit)
+	rlim := origRlimitNofile.Load()
 
 	// guard against side effects of shuffling fds below.
 	// Make sure that nextfd is beyond any currently open files so
@@ -296,8 +296,8 @@
 	}
 
 	// Restore original rlimit.
-	if rlimOK && rlim.Cur != 0 {
-		setrlimit1(RLIMIT_NOFILE, unsafe.Pointer(&rlim))
+	if rlim != nil {
+		setrlimit1(RLIMIT_NOFILE, unsafe.Pointer(rlim))
 	}
 
 	// Time to exec.
diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go
index 4fca701..7a67500 100644
--- a/src/syscall/exec_libc2.go
+++ b/src/syscall/exec_libc2.go
@@ -65,7 +65,7 @@
 		ngroups, groups uintptr
 	)
 
-	rlim, rlimOK := origRlimitNofile.Load().(Rlimit)
+	rlim := origRlimitNofile.Load()
 
 	// guard against side effects of shuffling fds below.
 	// Make sure that nextfd is beyond any currently open files so
@@ -272,8 +272,8 @@
 	}
 
 	// Restore original rlimit.
-	if rlimOK && rlim.Cur != 0 {
-		rawSyscall(abi.FuncPCABI0(libc_setrlimit_trampoline), uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(&rlim)), 0)
+	if rlim != nil {
+		rawSyscall(abi.FuncPCABI0(libc_setrlimit_trampoline), uintptr(RLIMIT_NOFILE), uintptr(unsafe.Pointer(rlim)), 0)
 	}
 
 	// Time to exec.
diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go
index dfbb38a..e6d6343 100644
--- a/src/syscall/exec_linux.go
+++ b/src/syscall/exec_linux.go
@@ -75,8 +75,8 @@
 	// in the child process: an index into ProcAttr.Files.
 	// This is only meaningful if Setsid is true.
 	Setctty bool
-	Noctty  bool // Detach fd 0 from controlling terminal
-	Ctty    int  // Controlling TTY fd
+	Noctty  bool // Detach fd 0 from controlling terminal.
+	Ctty    int  // Controlling TTY fd.
 	// Foreground places the child process group in the foreground.
 	// This implies Setpgid. The Ctty field must be set to
 	// the descriptor of the controlling TTY.
@@ -89,8 +89,8 @@
 	// is sent on thread termination, which may happen before process termination.
 	// There are more details at https://go.dev/issue/27505.
 	Pdeathsig    Signal
-	Cloneflags   uintptr        // Flags for clone calls (Linux only)
-	Unshareflags uintptr        // Flags for unshare calls (Linux only)
+	Cloneflags   uintptr        // Flags for clone calls.
+	Unshareflags uintptr        // Flags for unshare calls.
 	UidMappings  []SysProcIDMap // User ID mappings for user namespaces.
 	GidMappings  []SysProcIDMap // Group ID mappings for user namespaces.
 	// GidMappingsEnableSetgroups enabling setgroups syscall.
@@ -98,14 +98,20 @@
 	// This parameter is no-op if GidMappings == nil. Otherwise for unprivileged
 	// users this should be set to false for mappings work.
 	GidMappingsEnableSetgroups bool
-	AmbientCaps                []uintptr // Ambient capabilities (Linux only)
+	AmbientCaps                []uintptr // Ambient capabilities.
 	UseCgroupFD                bool      // Whether to make use of the CgroupFD field.
 	CgroupFD                   int       // File descriptor of a cgroup to put the new process into.
+	// PidFD, if not nil, is used to store the pidfd of a child, if the
+	// functionality is supported by the kernel, or -1. Note *PidFD is
+	// changed only if the process starts successfully.
+	PidFD *int
 }
 
 var (
 	none  = [...]byte{'n', 'o', 'n', 'e', 0}
 	slash = [...]byte{'/', 0}
+
+	forceClone3 = false // Used by unit tests only.
 )
 
 // Implemented in runtime package.
@@ -127,7 +133,7 @@
 func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
 	// Set up and fork. This returns immediately in the parent or
 	// if there's an error.
-	upid, err, mapPipe, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe)
+	upid, pidfd, err, mapPipe, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe)
 	if locked {
 		runtime_AfterFork()
 	}
@@ -137,6 +143,9 @@
 
 	// parent; return PID
 	pid = int(upid)
+	if sys.PidFD != nil {
+		*sys.PidFD = int(pidfd)
+	}
 
 	if sys.UidMappings != nil || sys.GidMappings != nil {
 		Close(mapPipe[0])
@@ -204,7 +213,7 @@
 //go:noinline
 //go:norace
 //go:nocheckptr
-func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid uintptr, err1 Errno, mapPipe [2]int, locked bool) {
+func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid uintptr, pidfd int32, err1 Errno, mapPipe [2]int, locked bool) {
 	// Defined in linux/prctl.h starting with Linux 4.3.
 	const (
 		PR_CAP_AMBIENT       = 0x2f
@@ -240,8 +249,9 @@
 		ngroups, groups           uintptr
 		c                         uintptr
 	)
+	pidfd = -1
 
-	rlim, rlimOK := origRlimitNofile.Load().(Rlimit)
+	rlim := origRlimitNofile.Load()
 
 	if sys.UidMappings != nil {
 		puid = []byte("/proc/self/uid_map\000")
@@ -289,18 +299,22 @@
 	if sys.Cloneflags&CLONE_NEWUSER == 0 && sys.Unshareflags&CLONE_NEWUSER == 0 {
 		flags |= CLONE_VFORK | CLONE_VM
 	}
+	if sys.PidFD != nil {
+		flags |= CLONE_PIDFD
+	}
 	// Whether to use clone3.
-	if sys.UseCgroupFD {
-		clone3 = &cloneArgs{
-			flags:      uint64(flags) | CLONE_INTO_CGROUP,
-			exitSignal: uint64(SIGCHLD),
-			cgroup:     uint64(sys.CgroupFD),
-		}
-	} else if flags&CLONE_NEWTIME != 0 {
+	if sys.UseCgroupFD || flags&CLONE_NEWTIME != 0 || forceClone3 {
 		clone3 = &cloneArgs{
 			flags:      uint64(flags),
 			exitSignal: uint64(SIGCHLD),
 		}
+		if sys.UseCgroupFD {
+			clone3.flags |= CLONE_INTO_CGROUP
+			clone3.cgroup = uint64(sys.CgroupFD)
+		}
+		if sys.PidFD != nil {
+			clone3.pidFD = uint64(uintptr(unsafe.Pointer(&pidfd)))
+		}
 	}
 
 	// About to call fork.
@@ -308,14 +322,14 @@
 	runtime_BeforeFork()
 	locked = true
 	if clone3 != nil {
-		pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3))
+		pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0)
 	} else {
 		flags |= uintptr(SIGCHLD)
 		if runtime.GOARCH == "s390x" {
 			// On Linux/s390, the first two arguments of clone(2) are swapped.
-			pid, err1 = rawVforkSyscall(SYS_CLONE, 0, flags)
+			pid, err1 = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(&pidfd)))
 		} else {
-			pid, err1 = rawVforkSyscall(SYS_CLONE, flags, 0)
+			pid, err1 = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(&pidfd)))
 		}
 	}
 	if err1 != 0 || pid != 0 {
@@ -405,22 +419,22 @@
 			if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&psetgroups[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
 				goto childerror
 			}
-			pid, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&setgroups[0])), uintptr(len(setgroups)))
+			pid, _, err1 = RawSyscall(SYS_WRITE, fd1, uintptr(unsafe.Pointer(&setgroups[0])), uintptr(len(setgroups)))
 			if err1 != 0 {
 				goto childerror
 			}
-			if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+			if _, _, err1 = RawSyscall(SYS_CLOSE, fd1, 0, 0); err1 != 0 {
 				goto childerror
 			}
 
 			if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&pgid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
 				goto childerror
 			}
-			pid, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&gidmap[0])), uintptr(len(gidmap)))
+			pid, _, err1 = RawSyscall(SYS_WRITE, fd1, uintptr(unsafe.Pointer(&gidmap[0])), uintptr(len(gidmap)))
 			if err1 != 0 {
 				goto childerror
 			}
-			if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+			if _, _, err1 = RawSyscall(SYS_CLOSE, fd1, 0, 0); err1 != 0 {
 				goto childerror
 			}
 		}
@@ -430,11 +444,11 @@
 			if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&puid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
 				goto childerror
 			}
-			pid, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&uidmap[0])), uintptr(len(uidmap)))
+			pid, _, err1 = RawSyscall(SYS_WRITE, fd1, uintptr(unsafe.Pointer(&uidmap[0])), uintptr(len(uidmap)))
 			if err1 != 0 {
 				goto childerror
 			}
-			if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+			if _, _, err1 = RawSyscall(SYS_CLOSE, fd1, 0, 0); err1 != 0 {
 				goto childerror
 			}
 		}
@@ -613,8 +627,8 @@
 	}
 
 	// Restore original rlimit.
-	if rlimOK && rlim.Cur != 0 {
-		rawSetrlimit(RLIMIT_NOFILE, &rlim)
+	if rlim != nil {
+		rawSetrlimit(RLIMIT_NOFILE, rlim)
 	}
 
 	// Enable tracing if requested.
diff --git a/src/syscall/exec_linux_test.go b/src/syscall/exec_linux_test.go
index f4ff7bf..68ec6fe 100644
--- a/src/syscall/exec_linux_test.go
+++ b/src/syscall/exec_linux_test.go
@@ -8,8 +8,11 @@
 
 import (
 	"bytes"
+	"errors"
 	"flag"
 	"fmt"
+	"internal/platform"
+	"internal/syscall/unix"
 	"internal/testenv"
 	"io"
 	"os"
@@ -22,6 +25,7 @@
 	"strings"
 	"syscall"
 	"testing"
+	"time"
 	"unsafe"
 )
 
@@ -109,11 +113,17 @@
 		t.Fatal(err)
 	}
 
-	orig, err := os.ReadFile(path)
+	b, err := os.ReadFile(path)
 	if err != nil {
 		t.Fatal(err)
 	}
-	origLines := strings.Split(strings.TrimSpace(string(orig)), "\n")
+	orig := strings.TrimSpace(string(b))
+	if strings.Contains(orig, "lo:") && strings.Count(orig, ":") == 1 {
+		// This test expects there to be at least 1 more network interface
+		// in addition to the local network interface, so that it can tell
+		// that unshare worked.
+		t.Skip("not enough network interfaces to test unshare with")
+	}
 
 	cmd := testenv.Command(t, "cat", path)
 	cmd.SysProcAttr = &syscall.SysProcAttr{
@@ -128,15 +138,18 @@
 		t.Fatalf("Cmd failed with err %v, output: %s", err, out)
 	}
 
-	// Check there is only the local network interface
+	// Check there is only the local network interface.
 	sout := strings.TrimSpace(string(out))
 	if !strings.Contains(sout, "lo:") {
 		t.Fatalf("Expected lo network interface to exist, got %s", sout)
 	}
 
+	origLines := strings.Split(orig, "\n")
 	lines := strings.Split(sout, "\n")
 	if len(lines) >= len(origLines) {
-		t.Fatalf("Got %d lines of output, want <%d", len(lines), len(origLines))
+		t.Logf("%s before unshare:\n%s", path, orig)
+		t.Logf("%s after unshare:\n%s", path, sout)
+		t.Fatalf("Got %d lines of output, want < %d", len(lines), len(origLines))
 	}
 }
 
@@ -206,18 +219,20 @@
 // Test for https://go.dev/issue/19661: unshare fails because systemd
 // has forced / to be shared
 func TestUnshareMountNameSpace(t *testing.T) {
-	testenv.MustHaveExec(t)
-
+	const mountNotSupported = "mount is not supported: " // Output prefix indicatating a test skip.
 	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
 		dir := flag.Args()[0]
 		err := syscall.Mount("none", dir, "proc", 0, "")
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %#v", dir, err)
+		if testenv.SyscallIsNotSupported(err) {
+			fmt.Print(mountNotSupported, err)
+		} else if err != nil {
+			fmt.Fprintf(os.Stderr, "unshare: mount %s: %v\n", dir, err)
 			os.Exit(2)
 		}
 		os.Exit(0)
 	}
 
+	testenv.MustHaveExec(t)
 	exe, err := os.Executable()
 	if err != nil {
 		t.Fatal(err)
@@ -231,16 +246,21 @@
 			syscall.Unmount(d, syscall.MNT_FORCE)
 		}
 	})
-	cmd := testenv.Command(t, exe, "-test.run=TestUnshareMountNameSpace", d)
+	cmd := testenv.Command(t, exe, "-test.run=^TestUnshareMountNameSpace$", d)
 	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
 	cmd.SysProcAttr = &syscall.SysProcAttr{Unshareflags: syscall.CLONE_NEWNS}
 
-	o, err := cmd.CombinedOutput()
+	out, err := cmd.CombinedOutput()
 	if err != nil {
 		if testenv.SyscallIsNotSupported(err) {
 			t.Skipf("skipping: could not start process with CLONE_NEWNS: %v", err)
 		}
-		t.Fatalf("unshare failed: %v\n%s", err, o)
+		t.Fatalf("unshare failed: %v\n%s", err, out)
+	} else if len(out) != 0 {
+		if bytes.HasPrefix(out, []byte(mountNotSupported)) {
+			t.Skipf("skipping: helper process reported %s", out)
+		}
+		t.Fatalf("unexpected output from helper process: %s", out)
 	}
 
 	// How do we tell if the namespace was really unshared? It turns out
@@ -253,11 +273,14 @@
 
 // Test for Issue 20103: unshare fails when chroot is used
 func TestUnshareMountNameSpaceChroot(t *testing.T) {
+	const mountNotSupported = "mount is not supported: " // Output prefix indicatating a test skip.
 	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
 		dir := flag.Args()[0]
 		err := syscall.Mount("none", dir, "proc", 0, "")
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %#v", dir, err)
+		if testenv.SyscallIsNotSupported(err) {
+			fmt.Print(mountNotSupported, err)
+		} else if err != nil {
+			fmt.Fprintf(os.Stderr, "unshare: mount %s: %v\n", dir, err)
 			os.Exit(2)
 		}
 		os.Exit(0)
@@ -268,6 +291,9 @@
 	// Since we are doing a chroot, we need the binary there,
 	// and it must be statically linked.
 	testenv.MustHaveGoBuild(t)
+	if platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) {
+		t.Skipf("skipping: can't build static binary because %s/%s requires external linking", runtime.GOOS, runtime.GOARCH)
+	}
 	x := filepath.Join(d, "syscall.test")
 	t.Cleanup(func() {
 		// If the subprocess fails to unshare the parent directory, force-unmount it
@@ -280,19 +306,24 @@
 	cmd := testenv.Command(t, testenv.GoToolPath(t), "test", "-c", "-o", x, "syscall")
 	cmd.Env = append(cmd.Environ(), "CGO_ENABLED=0")
 	if o, err := cmd.CombinedOutput(); err != nil {
-		t.Fatalf("Build of syscall in chroot failed, output %v, err %v", o, err)
+		t.Fatalf("%v: %v\n%s", cmd, err, o)
 	}
 
-	cmd = testenv.Command(t, "/syscall.test", "-test.run=TestUnshareMountNameSpaceChroot", "/")
+	cmd = testenv.Command(t, "/syscall.test", "-test.run=^TestUnshareMountNameSpaceChroot$", "/")
 	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
 	cmd.SysProcAttr = &syscall.SysProcAttr{Chroot: d, Unshareflags: syscall.CLONE_NEWNS}
 
-	o, err := cmd.CombinedOutput()
+	out, err := cmd.CombinedOutput()
 	if err != nil {
 		if testenv.SyscallIsNotSupported(err) {
 			t.Skipf("skipping: could not start process with CLONE_NEWNS and Chroot %q: %v", d, err)
 		}
-		t.Fatalf("unshare failed: %v\n%s", err, o)
+		t.Fatalf("unshare failed: %v\n%s", err, out)
+	} else if len(out) != 0 {
+		if bytes.HasPrefix(out, []byte(mountNotSupported)) {
+			t.Skipf("skipping: helper process reported %s", out)
+		}
+		t.Fatalf("unexpected output from helper process: %s", out)
 	}
 
 	// How do we tell if the namespace was really unshared? It turns out
@@ -326,7 +357,7 @@
 		t.Fatal(err)
 	}
 
-	cmd := testenv.Command(t, exe, "-test.run=TestUnshareUidGidMapping")
+	cmd := testenv.Command(t, exe, "-test.run=^TestUnshareUidGidMapping$")
 	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
 	cmd.SysProcAttr = &syscall.SysProcAttr{
 		Unshareflags:               syscall.CLONE_NEWNS | syscall.CLONE_NEWUSER,
@@ -381,17 +412,6 @@
 		t.Skipf("cgroup v2 not available (/proc/self/cgroup contents: %q)", selfCg)
 	}
 
-	// Need clone3 with CLONE_INTO_CGROUP support.
-	_, err = syscall.ForkExec("non-existent binary", nil, &syscall.ProcAttr{
-		Sys: &syscall.SysProcAttr{
-			UseCgroupFD: true,
-			CgroupFD:    -1,
-		},
-	})
-	if testenv.SyscallIsNotSupported(err) {
-		t.Skipf("clone3 with CLONE_INTO_CGROUP not available: %v", err)
-	}
-
 	// Need an ability to create a sub-cgroup.
 	subCgroup, err := os.MkdirTemp(prefix+string(bytes.TrimSpace(cg)), "subcg-")
 	if err != nil {
@@ -415,6 +435,18 @@
 
 func TestUseCgroupFD(t *testing.T) {
 	testenv.MustHaveExec(t)
+
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		// Read and print own cgroup path.
+		selfCg, err := os.ReadFile("/proc/self/cgroup")
+		if err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			os.Exit(2)
+		}
+		fmt.Print(string(selfCg))
+		os.Exit(0)
+	}
+
 	exe, err := os.Executable()
 	if err != nil {
 		t.Fatal(err)
@@ -422,7 +454,7 @@
 
 	fd, suffix := prepareCgroupFD(t)
 
-	cmd := testenv.Command(t, exe, "-test.run=TestUseCgroupFDHelper")
+	cmd := testenv.Command(t, exe, "-test.run=^TestUseCgroupFD$")
 	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
 	cmd.SysProcAttr = &syscall.SysProcAttr{
 		UseCgroupFD: true,
@@ -430,6 +462,13 @@
 	}
 	out, err := cmd.CombinedOutput()
 	if err != nil {
+		if testenv.SyscallIsNotSupported(err) && !errors.Is(err, syscall.EINVAL) {
+			// Can be one of:
+			// - clone3 not supported (old kernel);
+			// - clone3 not allowed (by e.g. seccomp);
+			// - lack of CAP_SYS_ADMIN.
+			t.Skipf("clone3 with CLONE_INTO_CGROUP not available: %v", err)
+		}
 		t.Fatalf("Cmd failed with err %v, output: %s", err, out)
 	}
 	// NB: this wouldn't work with cgroupns.
@@ -438,20 +477,6 @@
 	}
 }
 
-func TestUseCgroupFDHelper(*testing.T) {
-	if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
-		return
-	}
-	defer os.Exit(0)
-	// Read and print own cgroup path.
-	selfCg, err := os.ReadFile("/proc/self/cgroup")
-	if err != nil {
-		fmt.Fprintln(os.Stderr, err)
-		os.Exit(2)
-	}
-	fmt.Print(string(selfCg))
-}
-
 func TestCloneTimeNamespace(t *testing.T) {
 	testenv.MustHaveExec(t)
 
@@ -470,7 +495,7 @@
 		t.Fatal(err)
 	}
 
-	cmd := testenv.Command(t, exe, "-test.run=TestCloneTimeNamespace")
+	cmd := testenv.Command(t, exe, "-test.run=^TestCloneTimeNamespace$")
 	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
 	cmd.SysProcAttr = &syscall.SysProcAttr{
 		Cloneflags: syscall.CLONE_NEWTIME,
@@ -491,13 +516,92 @@
 		t.Fatal(err)
 	}
 
-	parentTimeNS := string(timens)
+	parentTimeNS := timens
 	childTimeNS := string(out)
 	if childTimeNS == parentTimeNS {
 		t.Fatalf("expected child time namespace to be different from parent time namespace: %s", parentTimeNS)
 	}
 }
 
+func testPidFD(t *testing.T, userns bool) error {
+	testenv.MustHaveExec(t)
+
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		// Child: wait for a signal.
+		time.Sleep(time.Hour)
+	}
+
+	exe, err := os.Executable()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var pidfd int
+	cmd := testenv.Command(t, exe, "-test.run=^TestPidFD$")
+	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
+	cmd.SysProcAttr = &syscall.SysProcAttr{
+		PidFD: &pidfd,
+	}
+	if userns {
+		cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER
+	}
+	if err := cmd.Start(); err != nil {
+		return err
+	}
+	defer func() {
+		cmd.Process.Kill()
+		cmd.Wait()
+	}()
+	t.Log("got pidfd:", pidfd)
+	// If pidfd is not supported by the kernel, -1 is returned.
+	if pidfd == -1 {
+		t.Skip("pidfd not supported")
+	}
+	defer syscall.Close(pidfd)
+
+	// Use pidfd to send a signal to the child.
+	sig := syscall.SIGINT
+	if err := unix.PidFDSendSignal(uintptr(pidfd), sig); err != nil {
+		if err != syscall.EINVAL && testenv.SyscallIsNotSupported(err) {
+			t.Skip("pidfd_send_signal syscall not supported:", err)
+		}
+		t.Fatal("pidfd_send_signal syscall failed:", err)
+	}
+	// Check if the child received our signal.
+	err = cmd.Wait()
+	if cmd.ProcessState == nil || cmd.ProcessState.Sys().(syscall.WaitStatus).Signal() != sig {
+		t.Fatal("unexpected child error:", err)
+	}
+	return nil
+}
+
+func TestPidFD(t *testing.T) {
+	if err := testPidFD(t, false); err != nil {
+		t.Fatal("can't start a process:", err)
+	}
+}
+
+func TestPidFDWithUserNS(t *testing.T) {
+	if err := testPidFD(t, true); err != nil {
+		if testenv.SyscallIsNotSupported(err) {
+			t.Skip("userns not supported:", err)
+		}
+		t.Fatal("can't start a process:", err)
+	}
+}
+
+func TestPidFDClone3(t *testing.T) {
+	*syscall.ForceClone3 = true
+	defer func() { *syscall.ForceClone3 = false }()
+
+	if err := testPidFD(t, false); err != nil {
+		if testenv.SyscallIsNotSupported(err) {
+			t.Skip("clone3 not supported:", err)
+		}
+		t.Fatal("can't start a process:", err)
+	}
+}
+
 type capHeader struct {
 	version uint32
 	pid     int32
@@ -608,7 +712,7 @@
 		t.Fatal(err)
 	}
 
-	cmd := testenv.Command(t, f.Name(), "-test.run="+t.Name())
+	cmd := testenv.Command(t, f.Name(), "-test.run=^"+t.Name()+"$")
 	cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
 	cmd.Stdout = os.Stdout
 	cmd.Stderr = os.Stderr
@@ -626,12 +730,12 @@
 		gid := os.Getgid()
 		cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{
 			ContainerID: int(nobody),
-			HostID:      int(uid),
+			HostID:      uid,
 			Size:        int(1),
 		}}
 		cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{{
 			ContainerID: int(nobody),
-			HostID:      int(gid),
+			HostID:      gid,
 			Size:        int(1),
 		}}
 
diff --git a/src/syscall/exec_pdeathsig_test.go b/src/syscall/exec_pdeathsig_test.go
index 96ae27b..a907afd 100644
--- a/src/syscall/exec_pdeathsig_test.go
+++ b/src/syscall/exec_pdeathsig_test.go
@@ -9,23 +9,31 @@
 import (
 	"bufio"
 	"fmt"
+	"internal/testenv"
 	"io"
 	"os"
 	"os/exec"
 	"os/signal"
+	"os/user"
 	"path/filepath"
+	"strconv"
+	"strings"
 	"syscall"
 	"testing"
-	"time"
 )
 
-func TestDeathSignal(t *testing.T) {
-	if os.Getuid() != 0 {
-		t.Skip("skipping root only test")
+// TestDeathSignalSetuid verifies that a command run with a different UID still
+// receives PDeathsig; it is a regression test for https://go.dev/issue/9686.
+func TestDeathSignalSetuid(t *testing.T) {
+	if testing.Short() {
+		t.Skipf("skipping test that copies its binary into temp dir")
 	}
 
-	// Copy the test binary to a location that a non-root user can read/execute
-	// after we drop privileges
+	// Copy the test binary to a location that another user can read/execute
+	// after we drop privileges.
+	//
+	// TODO(bcmills): Why do we believe that another users will be able to
+	// execute a binary in this directory? (It could be mounted noexec.)
 	tempDir, err := os.MkdirTemp("", "TestDeathSignal")
 	if err != nil {
 		t.Fatalf("cannot create temporary directory: %v", err)
@@ -53,8 +61,8 @@
 		t.Fatalf("failed to close test binary %q, %v", tmpBinary, err)
 	}
 
-	cmd := exec.Command(tmpBinary)
-	cmd.Env = append(os.Environ(), "GO_DEATHSIG_PARENT=1")
+	cmd := testenv.Command(t, tmpBinary)
+	cmd.Env = append(cmd.Environ(), "GO_DEATHSIG_PARENT=1")
 	chldStdin, err := cmd.StdinPipe()
 	if err != nil {
 		t.Fatalf("failed to create new stdin pipe: %v", err)
@@ -63,10 +71,17 @@
 	if err != nil {
 		t.Fatalf("failed to create new stdout pipe: %v", err)
 	}
-	cmd.Stderr = os.Stderr
+	stderr := new(strings.Builder)
+	cmd.Stderr = stderr
 
 	err = cmd.Start()
-	defer cmd.Wait()
+	defer func() {
+		chldStdin.Close()
+		cmd.Wait()
+		if stderr.Len() > 0 {
+			t.Logf("stderr:\n%s", stderr)
+		}
+	}()
 	if err != nil {
 		t.Fatalf("failed to start first child process: %v", err)
 	}
@@ -76,21 +91,57 @@
 	if got, err := chldPipe.ReadString('\n'); got == "start\n" {
 		syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
 
-		go func() {
-			time.Sleep(5 * time.Second)
-			chldStdin.Close()
-		}()
-
 		want := "ok\n"
 		if got, err = chldPipe.ReadString('\n'); got != want {
 			t.Fatalf("expected %q, received %q, %v", want, got, err)
 		}
+	} else if got == "skip\n" {
+		t.Skipf("skipping: parent could not run child program as selected user")
 	} else {
 		t.Fatalf("did not receive start from child, received %q, %v", got, err)
 	}
 }
 
 func deathSignalParent() {
+	var (
+		u   *user.User
+		err error
+	)
+	if os.Getuid() == 0 {
+		tryUsers := []string{"nobody"}
+		if testenv.Builder() != "" {
+			tryUsers = append(tryUsers, "gopher")
+		}
+		for _, name := range tryUsers {
+			u, err = user.Lookup(name)
+			if err == nil {
+				break
+			}
+			fmt.Fprintf(os.Stderr, "Lookup(%q): %v\n", name, err)
+		}
+	}
+	if u == nil {
+		// If we couldn't find an unprivileged user to run as, try running as
+		// the current user. (Empirically this still causes the call to Start to
+		// fail with a permission error if running as a non-root user on Linux.)
+		u, err = user.Current()
+		if err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			os.Exit(1)
+		}
+	}
+
+	uid, err := strconv.ParseUint(u.Uid, 10, 32)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "invalid UID: %v\n", err)
+		os.Exit(1)
+	}
+	gid, err := strconv.ParseUint(u.Gid, 10, 32)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "invalid GID: %v\n", err)
+		os.Exit(1)
+	}
+
 	cmd := exec.Command(os.Args[0])
 	cmd.Env = append(os.Environ(),
 		"GO_DEATHSIG_PARENT=",
@@ -99,16 +150,18 @@
 	cmd.Stdin = os.Stdin
 	cmd.Stdout = os.Stdout
 	attrs := syscall.SysProcAttr{
-		Pdeathsig: syscall.SIGUSR1,
-		// UID/GID 99 is the user/group "nobody" on RHEL/Fedora and is
-		// unused on Ubuntu
-		Credential: &syscall.Credential{Uid: 99, Gid: 99},
+		Pdeathsig:  syscall.SIGUSR1,
+		Credential: &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)},
 	}
 	cmd.SysProcAttr = &attrs
 
-	err := cmd.Start()
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "death signal parent error: %v\n", err)
+	fmt.Fprintf(os.Stderr, "starting process as user %q\n", u.Username)
+	if err := cmd.Start(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		if testenv.SyscallIsNotSupported(err) {
+			fmt.Println("skip")
+			os.Exit(0)
+		}
 		os.Exit(1)
 	}
 	cmd.Wait()
diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go
index 9a5f2d3..469b660 100644
--- a/src/syscall/exec_unix.go
+++ b/src/syscall/exec_unix.go
@@ -107,6 +107,9 @@
 	if err != nil {
 		return err
 	}
+	if (flag&O_NONBLOCK != 0) == nonblocking {
+		return nil
+	}
 	if nonblocking {
 		flag |= O_NONBLOCK
 	} else {
@@ -278,9 +281,9 @@
 	}
 	runtime_BeforeExec()
 
-	rlim, rlimOK := origRlimitNofile.Load().(Rlimit)
-	if rlimOK && rlim.Cur != 0 {
-		Setrlimit(RLIMIT_NOFILE, &rlim)
+	rlim := origRlimitNofile.Load()
+	if rlim != nil {
+		Setrlimit(RLIMIT_NOFILE, rlim)
 	}
 
 	var err1 error
@@ -293,7 +296,7 @@
 	} else if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
 		// Similarly on Darwin.
 		err1 = execveDarwin(argv0p, &argvp[0], &envvp[0])
-	} else if runtime.GOOS == "openbsd" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
+	} else if runtime.GOOS == "openbsd" && runtime.GOARCH != "mips64" {
 		// Similarly on OpenBSD.
 		err1 = execveOpenBSD(argv0p, &argvp[0], &envvp[0])
 	} else {
diff --git a/src/syscall/exec_unix_test.go b/src/syscall/exec_unix_test.go
index 9627317..5584f7d 100644
--- a/src/syscall/exec_unix_test.go
+++ b/src/syscall/exec_unix_test.go
@@ -310,7 +310,7 @@
 // TestExec is for issue #41702.
 func TestExec(t *testing.T) {
 	testenv.MustHaveExec(t)
-	cmd := exec.Command(os.Args[0], "-test.run=TestExecHelper")
+	cmd := exec.Command(os.Args[0], "-test.run=^TestExecHelper$")
 	cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=2")
 	o, err := cmd.CombinedOutput()
 	if err != nil {
@@ -343,7 +343,7 @@
 
 	time.Sleep(10 * time.Millisecond)
 
-	argv := []string{os.Args[0], "-test.run=TestExecHelper"}
+	argv := []string{os.Args[0], "-test.run=^TestExecHelper$"}
 	syscall.Exec(os.Args[0], argv, os.Environ())
 
 	t.Error("syscall.Exec returned")
@@ -357,7 +357,7 @@
 	}
 
 	orig := syscall.OrigRlimitNofile()
-	if orig.Cur == 0 {
+	if orig == nil {
 		t.Skip("skipping test because rlimit not adjusted at startup")
 	}
 
@@ -366,7 +366,7 @@
 		executable = os.Args[0]
 	}
 
-	cmd := testenv.Command(t, executable, "-test.run=TestRlimitRestored")
+	cmd := testenv.Command(t, executable, "-test.run=^TestRlimitRestored$")
 	cmd = testenv.CleanCmdEnv(cmd)
 	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
 
diff --git a/src/syscall/exec_windows.go b/src/syscall/exec_windows.go
index 0a93bc0..1220de4 100644
--- a/src/syscall/exec_windows.go
+++ b/src/syscall/exec_windows.go
@@ -14,6 +14,7 @@
 	"unsafe"
 )
 
+// ForkLock is not used on Windows.
 var ForkLock sync.RWMutex
 
 // EscapeArg rewrites command line argument s as prescribed
@@ -117,11 +118,11 @@
 // terminated strings followed by a nil.
 // Last bytes are two UCS-2 NULs, or four NUL bytes.
 // If any string contains a NUL, it returns (nil, EINVAL).
-func createEnvBlock(envv []string) (*uint16, error) {
+func createEnvBlock(envv []string) ([]uint16, error) {
 	if len(envv) == 0 {
-		return &utf16.Encode([]rune("\x00\x00"))[0], nil
+		return utf16.Encode([]rune("\x00\x00")), nil
 	}
-	length := 0
+	var length int
 	for _, s := range envv {
 		if bytealg.IndexByteString(s, 0) != -1 {
 			return nil, EINVAL
@@ -130,17 +131,15 @@
 	}
 	length += 1
 
-	b := make([]byte, length)
-	i := 0
+	b := make([]uint16, 0, length)
 	for _, s := range envv {
-		l := len(s)
-		copy(b[i:i+l], []byte(s))
-		copy(b[i+l:i+l+1], []byte{0})
-		i = i + l + 1
+		for _, c := range s {
+			b = utf16.AppendRune(b, c)
+		}
+		b = utf16.AppendRune(b, 0)
 	}
-	copy(b[i:i+1], []byte{0})
-
-	return &utf16.Encode([]rune(string(b)))[0], nil
+	b = utf16.AppendRune(b, 0)
+	return b, nil
 }
 
 func CloseOnExec(fd Handle) {
@@ -318,17 +317,6 @@
 		}
 	}
 
-	var maj, min, build uint32
-	rtlGetNtVersionNumbers(&maj, &min, &build)
-	isWin7 := maj < 6 || (maj == 6 && min <= 1)
-	// NT kernel handles are divisible by 4, with the bottom 3 bits left as
-	// a tag. The fully set tag correlates with the types of handles we're
-	// concerned about here.  Except, the kernel will interpret some
-	// special handle values, like -1, -2, and so forth, so kernelbase.dll
-	// checks to see that those bottom three bits are checked, but that top
-	// bit is not checked.
-	isLegacyWin7ConsoleHandle := func(handle Handle) bool { return isWin7 && handle&0x10000003 == 3 }
-
 	p, _ := GetCurrentProcess()
 	parentProcess := p
 	if sys.ParentProcess != 0 {
@@ -337,15 +325,7 @@
 	fd := make([]Handle, len(attr.Files))
 	for i := range attr.Files {
 		if attr.Files[i] > 0 {
-			destinationProcessHandle := parentProcess
-
-			// On Windows 7, console handles aren't real handles, and can only be duplicated
-			// into the current process, not a parent one, which amounts to the same thing.
-			if parentProcess != p && isLegacyWin7ConsoleHandle(Handle(attr.Files[i])) {
-				destinationProcessHandle = p
-			}
-
-			err := DuplicateHandle(p, Handle(attr.Files[i]), destinationProcessHandle, &fd[i], 0, true, DUPLICATE_SAME_ACCESS)
+			err := DuplicateHandle(p, Handle(attr.Files[i]), parentProcess, &fd[i], 0, true, DUPLICATE_SAME_ACCESS)
 			if err != nil {
 				return 0, 0, err
 			}
@@ -376,14 +356,6 @@
 
 	fd = append(fd, sys.AdditionalInheritedHandles...)
 
-	// On Windows 7, console handles aren't real handles, so don't pass them
-	// through to PROC_THREAD_ATTRIBUTE_HANDLE_LIST.
-	for i := range fd {
-		if isLegacyWin7ConsoleHandle(fd[i]) {
-			fd[i] = 0
-		}
-	}
-
 	// The presence of a NULL handle in the list is enough to cause PROC_THREAD_ATTRIBUTE_HANDLE_LIST
 	// to treat the entire list as empty, so remove NULL handles.
 	j := 0
@@ -413,9 +385,9 @@
 	pi := new(ProcessInformation)
 	flags := sys.CreationFlags | CREATE_UNICODE_ENVIRONMENT | _EXTENDED_STARTUPINFO_PRESENT
 	if sys.Token != 0 {
-		err = CreateProcessAsUser(sys.Token, argv0p, argvp, sys.ProcessAttributes, sys.ThreadAttributes, willInheritHandles, flags, envBlock, dirp, &si.StartupInfo, pi)
+		err = CreateProcessAsUser(sys.Token, argv0p, argvp, sys.ProcessAttributes, sys.ThreadAttributes, willInheritHandles, flags, &envBlock[0], dirp, &si.StartupInfo, pi)
 	} else {
-		err = CreateProcess(argv0p, argvp, sys.ProcessAttributes, sys.ThreadAttributes, willInheritHandles, flags, envBlock, dirp, &si.StartupInfo, pi)
+		err = CreateProcess(argv0p, argvp, sys.ProcessAttributes, sys.ThreadAttributes, willInheritHandles, flags, &envBlock[0], dirp, &si.StartupInfo, pi)
 	}
 	if err != nil {
 		return 0, 0, err
diff --git a/src/syscall/exec_windows_test.go b/src/syscall/exec_windows_test.go
index 8b8f330..5cacf42 100644
--- a/src/syscall/exec_windows_test.go
+++ b/src/syscall/exec_windows_test.go
@@ -73,7 +73,7 @@
 
 	// run parent process
 
-	parent := exec.Command(os.Args[0], "-test.run=TestChangingProcessParent")
+	parent := exec.Command(os.Args[0], "-test.run=^TestChangingProcessParent$")
 	parent.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=parent")
 	err := parent.Start()
 	if err != nil {
@@ -96,7 +96,7 @@
 	}
 	defer syscall.CloseHandle(ph)
 
-	child := exec.Command(os.Args[0], "-test.run=TestChangingProcessParent")
+	child := exec.Command(os.Args[0], "-test.run=^TestChangingProcessParent$")
 	child.Env = append(os.Environ(),
 		"GO_WANT_HELPER_PROCESS=child",
 		"GO_WANT_HELPER_PROCESS_FILE="+childDumpPath)
diff --git a/src/syscall/export_linux_test.go b/src/syscall/export_linux_test.go
index 274849e..3aa877c 100644
--- a/src/syscall/export_linux_test.go
+++ b/src/syscall/export_linux_test.go
@@ -4,6 +4,11 @@
 
 package syscall
 
-var RawSyscallNoError = rawSyscallNoError
+var (
+	RawSyscallNoError = rawSyscallNoError
+	ForceClone3       = &forceClone3
+)
 
-const Sys_GETEUID = sys_GETEUID
+const (
+	Sys_GETEUID = sys_GETEUID
+)
diff --git a/src/syscall/export_rlimit_test.go b/src/syscall/export_rlimit_test.go
index 320e331..8b1545c 100644
--- a/src/syscall/export_rlimit_test.go
+++ b/src/syscall/export_rlimit_test.go
@@ -6,9 +6,6 @@
 
 package syscall
 
-func OrigRlimitNofile() Rlimit {
-	if rlim, ok := origRlimitNofile.Load().(Rlimit); ok {
-		return rlim
-	}
-	return Rlimit{0, 0}
+func OrigRlimitNofile() *Rlimit {
+	return origRlimitNofile.Load()
 }
diff --git a/src/syscall/flock.go b/src/syscall/flock.go
deleted file mode 100644
index 820f526..0000000
--- a/src/syscall/flock.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux || freebsd || openbsd || netbsd || dragonfly
-
-package syscall
-
-import "unsafe"
-
-// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
-// systems by flock_linux_32bit.go to be SYS_FCNTL64.
-var fcntl64Syscall uintptr = SYS_FCNTL
-
-// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
-func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
-	_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
-	if errno == 0 {
-		return nil
-	}
-	return errno
-}
diff --git a/src/syscall/flock_bsd.go b/src/syscall/flock_bsd.go
new file mode 100644
index 0000000..68d3470
--- /dev/null
+++ b/src/syscall/flock_bsd.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd
+
+package syscall
+
+import "unsafe"
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+	_, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk))
+	return err
+}
diff --git a/src/syscall/flock_darwin.go b/src/syscall/flock_darwin.go
deleted file mode 100644
index d2bd841..0000000
--- a/src/syscall/flock_darwin.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syscall
-
-import "unsafe"
-
-// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
-func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
-	_, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk))
-	return err
-}
diff --git a/src/syscall/flock_linux.go b/src/syscall/flock_linux.go
new file mode 100644
index 0000000..7d1169b
--- /dev/null
+++ b/src/syscall/flock_linux.go
@@ -0,0 +1,20 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by flock_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+	_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+	if errno == 0 {
+		return nil
+	}
+	return errno
+}
diff --git a/src/syscall/js/js.go b/src/syscall/js/js.go
index 2515b4f..f7e32eb 100644
--- a/src/syscall/js/js.go
+++ b/src/syscall/js/js.go
@@ -576,7 +576,7 @@
 	n, ok := copyBytesToGo(dst, src.ref)
 	runtime.KeepAlive(src)
 	if !ok {
-		panic("syscall/js: CopyBytesToGo: expected src to be an Uint8Array or Uint8ClampedArray")
+		panic("syscall/js: CopyBytesToGo: expected src to be a Uint8Array or Uint8ClampedArray")
 	}
 	return n
 }
@@ -591,7 +591,7 @@
 	n, ok := copyBytesToJS(dst.ref, src)
 	runtime.KeepAlive(dst)
 	if !ok {
-		panic("syscall/js: CopyBytesToJS: expected dst to be an Uint8Array or Uint8ClampedArray")
+		panic("syscall/js: CopyBytesToJS: expected dst to be a Uint8Array or Uint8ClampedArray")
 	}
 	return n
 }
diff --git a/src/syscall/mkall.sh b/src/syscall/mkall.sh
index 6b27253..a3bc767 100755
--- a/src/syscall/mkall.sh
+++ b/src/syscall/mkall.sh
@@ -190,6 +190,7 @@
 	mktypes="GOARCH=$GOARCH go tool cgo -godefs"
 	;;
 linux_arm)
+	GOOSARCH_in="syscall_linux_arm.go syscall_linux_accept.go"
 	mkerrors="$mkerrors"
 	mksyscall="./mksyscall.pl -l32 -arm"
 	mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
@@ -355,6 +356,30 @@
 	# API consistent between platforms.
 	mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
 	;;
+openbsd_ppc64)
+	GOOSARCH_in="syscall_openbsd_libc.go syscall_openbsd_$GOARCH.go"
+	mkerrors="$mkerrors -m64"
+	mksyscall="./mksyscall.pl -openbsd -libc"
+	mksysctl="./mksysctl_openbsd.pl"
+	zsysctl="zsysctl_openbsd.go"
+	mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+	# Let the type of C char be signed to make the bare syscall
+	# API consistent between platforms.
+	mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+	mkasm="go run mkasm.go"
+	;;
+openbsd_riscv64)
+	GOOSARCH_in="syscall_openbsd_libc.go syscall_openbsd_$GOARCH.go"
+	mkerrors="$mkerrors -m64"
+	mksyscall="./mksyscall.pl -openbsd -libc"
+	mksysctl="./mksysctl_openbsd.pl"
+	zsysctl="zsysctl_openbsd.go"
+	mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+	# Let the type of C char be signed to make the bare syscall
+	# API consistent between platforms.
+	mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+	mkasm="go run mkasm.go"
+	;;
 plan9_386)
 	mkerrors=
 	mksyscall="./mksyscall.pl -l32 -plan9"
diff --git a/src/syscall/mkasm.go b/src/syscall/mkasm.go
index dce61f3..c9503e9 100644
--- a/src/syscall/mkasm.go
+++ b/src/syscall/mkasm.go
@@ -54,7 +54,12 @@
 		if !trampolines[fn] {
 			trampolines[fn] = true
 			fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
-			fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
+			if goos == "openbsd" && arch == "ppc64" {
+				fmt.Fprintf(&out, "\tCALL\t%s(SB)\n", fn)
+				fmt.Fprintf(&out, "\tRET\n")
+			} else {
+				fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
+			}
 		}
 	}
 	err = os.WriteFile(fmt.Sprintf("zsyscall_%s_%s.s", goos, arch), out.Bytes(), 0644)
diff --git a/src/syscall/mksyscall.pl b/src/syscall/mksyscall.pl
index 075eb1f..47efbff 100755
--- a/src/syscall/mksyscall.pl
+++ b/src/syscall/mksyscall.pl
@@ -85,6 +85,9 @@
 if($libc) {
 	$extraimports = 'import "internal/abi"';
 }
+if($darwin) {
+	$extraimports .= "\nimport \"runtime\"";
+}
 
 sub parseparamlist($) {
 	my ($list) = @_;
@@ -137,7 +140,7 @@
 	# without reading the header.
 	$text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
 
-	if (($darwin && $func =~ /^ptrace1(Ptr)?$/) || (($openbsd && $libc) && $func =~ /^ptrace(Ptr)?$/)) {
+	if ((($darwin || ($openbsd && $libc)) && $func =~ /^ptrace(Ptr)?$/)) {
 		# The ptrace function is called from forkAndExecInChild where stack
 		# growth is forbidden.
 		$text .= "//go:nosplit\n"
@@ -147,6 +150,13 @@
 	my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : "";
 	$text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl;
 
+	# Disable ptrace on iOS.
+	if ($darwin && $func =~ /^ptrace(Ptr)?$/) {
+		$text .= "\tif runtime.GOOS == \"ios\" {\n";
+		$text .= "\t\tpanic(\"unimplemented\")\n";
+		$text .= "\t}\n";
+	}
+
 	# Check if err return available
 	my $errvar = "";
 	foreach my $p (@out) {
@@ -284,6 +294,7 @@
 	if($libc) {
 		if($funcname eq "") {
 			$sysname = substr $sysname, 4;
+			$sysname =~ y/A-Z/a-z/;
 			$funcname = "libc_$sysname";
 		}
 		$sysname = "abi.FuncPCABI0(${funcname}_trampoline)";
diff --git a/src/syscall/net_fake.go b/src/syscall/net_fake.go
index 689f6f8..549f2be 100644
--- a/src/syscall/net_fake.go
+++ b/src/syscall/net_fake.go
@@ -3,9 +3,8 @@
 // license that can be found in the LICENSE file.
 
 // Fake networking for js/wasm and wasip1/wasm.
-// This file only exists to make the compiler happy.
 
-//go:build (js && wasm) || wasip1
+//go:build js || wasip1
 
 package syscall
 
@@ -32,9 +31,12 @@
 )
 
 const (
+	SOMAXCONN = 0x80
+)
+
+const (
 	_ = iota
 	IPV6_V6ONLY
-	SOMAXCONN
 	SO_ERROR
 )
 
diff --git a/src/syscall/netlink_linux.go b/src/syscall/netlink_linux.go
index a503a07..99b5b59 100644
--- a/src/syscall/netlink_linux.go
+++ b/src/syscall/netlink_linux.go
@@ -36,7 +36,7 @@
 	*(*uint16)(unsafe.Pointer(&b[6:8][0])) = rr.Header.Flags
 	*(*uint32)(unsafe.Pointer(&b[8:12][0])) = rr.Header.Seq
 	*(*uint32)(unsafe.Pointer(&b[12:16][0])) = rr.Header.Pid
-	b[16] = byte(rr.Data.Family)
+	b[16] = rr.Data.Family
 	return b
 }
 
diff --git a/src/syscall/ptrace_darwin.go b/src/syscall/ptrace_darwin.go
deleted file mode 100644
index 466f813..0000000
--- a/src/syscall/ptrace_darwin.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !ios
-
-package syscall
-
-import "unsafe"
-
-// Nosplit because it is called from forkAndExecInChild.
-//
-//go:nosplit
-func ptrace(request int, pid int, addr uintptr, data uintptr) error {
-	return ptrace1(request, pid, addr, data)
-}
-
-//go:nosplit
-func ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) error {
-	return ptrace1Ptr(request, pid, addr, data)
-}
diff --git a/src/syscall/ptrace_ios.go b/src/syscall/ptrace_ios.go
deleted file mode 100644
index a9b2918..0000000
--- a/src/syscall/ptrace_ios.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build ios
-
-package syscall
-
-import "unsafe"
-
-// Nosplit because it is called from forkAndExecInChild.
-//
-//go:nosplit
-func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
-	panic("unimplemented")
-}
-
-//go:nosplit
-func ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	panic("unimplemented")
-}
diff --git a/src/syscall/rlimit.go b/src/syscall/rlimit.go
index cc7935d..d77341b 100644
--- a/src/syscall/rlimit.go
+++ b/src/syscall/rlimit.go
@@ -10,10 +10,8 @@
 	"sync/atomic"
 )
 
-// origRlimitNofile, if not {0, 0}, is the original soft RLIMIT_NOFILE.
-// When we can assume that we are bootstrapping with Go 1.19,
-// this can be atomic.Pointer[Rlimit].
-var origRlimitNofile atomic.Value // of Rlimit
+// origRlimitNofile, if non-nil, is the original soft RLIMIT_NOFILE.
+var origRlimitNofile atomic.Pointer[Rlimit]
 
 // Some systems set an artificially low soft limit on open file count, for compatibility
 // with code that uses select and its hard-coded maximum file descriptor
@@ -32,19 +30,20 @@
 func init() {
 	var lim Rlimit
 	if err := Getrlimit(RLIMIT_NOFILE, &lim); err == nil && lim.Cur != lim.Max {
-		origRlimitNofile.Store(lim)
-		lim.Cur = lim.Max
-		adjustFileLimit(&lim)
-		setrlimit(RLIMIT_NOFILE, &lim)
+		origRlimitNofile.Store(&lim)
+		nlim := lim
+		nlim.Cur = nlim.Max
+		adjustFileLimit(&nlim)
+		setrlimit(RLIMIT_NOFILE, &nlim)
 	}
 }
 
 func Setrlimit(resource int, rlim *Rlimit) error {
 	err := setrlimit(resource, rlim)
 	if err == nil && resource == RLIMIT_NOFILE {
-		// Store zeroes in origRlimitNofile to tell StartProcess
+		// Store nil in origRlimitNofile to tell StartProcess
 		// to not adjust the rlimit in the child process.
-		origRlimitNofile.Store(Rlimit{0, 0})
+		origRlimitNofile.Store(nil)
 	}
 	return err
 }
diff --git a/src/syscall/rlimit_stub.go b/src/syscall/rlimit_stub.go
index e8f839d..7daa935 100644
--- a/src/syscall/rlimit_stub.go
+++ b/src/syscall/rlimit_stub.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+//go:build unix && !darwin
 
 package syscall
 
diff --git a/src/syscall/rlimit_test.go b/src/syscall/rlimit_test.go
index e48f45e..764694f 100644
--- a/src/syscall/rlimit_test.go
+++ b/src/syscall/rlimit_test.go
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build unix
+
 package syscall_test
 
 import (
diff --git a/src/syscall/security_windows.go b/src/syscall/security_windows.go
index 00dc920..4e988c4 100644
--- a/src/syscall/security_windows.go
+++ b/src/syscall/security_windows.go
@@ -30,7 +30,7 @@
 )
 
 // This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
-// https://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx
+// https://learn.microsoft.com/en-gb/archive/blogs/drnick/windows-and-upn-format-credentials
 //sys	TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW
 //sys	GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW
 
diff --git a/src/syscall/syscall.go b/src/syscall/syscall.go
index 446a299..f75ba31 100644
--- a/src/syscall/syscall.go
+++ b/src/syscall/syscall.go
@@ -18,11 +18,11 @@
 // err is an operating system error describing the failure.
 // On most systems, that error has type syscall.Errno.
 //
-// Deprecated: this package is locked down. Callers should use the
-// corresponding package in the golang.org/x/sys repository instead.
-// That is also where updates required by new systems or versions
-// should be applied. See https://golang.org/s/go1.4-syscall for more
-// information.
+// NOTE: Most of the functions, types, and constants defined in
+// this package are also available in the [golang.org/x/sys] package.
+// That package has more system call support than this one,
+// and most new code should prefer that package where possible.
+// See https://golang.org/s/go1.4-syscall for more information.
 package syscall
 
 import "internal/bytealg"
diff --git a/src/syscall/syscall_bsd.go b/src/syscall/syscall_bsd.go
index 0bb3cdf..233c6b2 100644
--- a/src/syscall/syscall_bsd.go
+++ b/src/syscall/syscall_bsd.go
@@ -515,6 +515,9 @@
 }
 
 //sys	fcntl(fd int, cmd int, arg int) (val int, err error)
+//sys	fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_FCNTL
+//sysnb ioctl(fd int, req int, arg int) (err error)
+//sysnb ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
 
 var mapper = &mmapper{
 	active: make(map[*byte][]byte),
diff --git a/src/syscall/syscall_darwin.go b/src/syscall/syscall_darwin.go
index a9639e3..2e13b57c 100644
--- a/src/syscall/syscall_darwin.go
+++ b/src/syscall/syscall_darwin.go
@@ -211,12 +211,9 @@
 //sys   mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys   munmap(addr uintptr, length uintptr) (err error)
 //sysnb fork() (pid int, err error)
-//sysnb ioctl(fd int, req int, arg int) (err error)
-//sysnb ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_ioctl
 //sysnb execve(path *byte, argv **byte, envp **byte) (err error)
 //sysnb exit(res int) (err error)
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error)
-//sys	fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_fcntl
 //sys   unlinkat(fd int, path string, flags int) (err error)
 //sys   openat(fd int, path string, flags int, perm uint32) (fdret int, err error)
 //sys	getcwd(buf []byte) (n int, err error)
@@ -247,15 +244,6 @@
 	return
 }
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
 func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
 	// Simulate Getdirentries using fdopendir/readdir_r/closedir.
 	// We store the number of entries to skip in the seek
diff --git a/src/syscall/syscall_darwin_amd64.go b/src/syscall/syscall_darwin_amd64.go
index 52ca3c8..64e54ad 100644
--- a/src/syscall/syscall_darwin_amd64.go
+++ b/src/syscall/syscall_darwin_amd64.go
@@ -24,8 +24,7 @@
 //sys	Stat(path string, stat *Stat_t) (err error) = SYS_stat64
 //sys	Statfs(path string, stat *Statfs_t) (err error) = SYS_statfs64
 //sys   fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_fstatat64
-//sys   ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
-//sys   ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
+//sys   ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
 	k.Ident = uint64(fd)
diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go
index d5da988..913c748 100644
--- a/src/syscall/syscall_darwin_arm64.go
+++ b/src/syscall/syscall_darwin_arm64.go
@@ -24,8 +24,7 @@
 //sys	Stat(path string, stat *Stat_t) (err error)
 //sys	Statfs(path string, stat *Statfs_t) (err error)
 //sys	fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
-//sys	ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
-//sys	ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
+//sys	ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
 	k.Ident = uint64(fd)
diff --git a/src/syscall/syscall_dragonfly.go b/src/syscall/syscall_dragonfly.go
index 6cb506b..30936c7 100644
--- a/src/syscall/syscall_dragonfly.go
+++ b/src/syscall/syscall_dragonfly.go
@@ -257,7 +257,6 @@
 //sys   mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys   munmap(addr uintptr, length uintptr) (err error)
 //sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
 //sys	getcwd(buf []byte) (n int, err error) = SYS___GETCWD
diff --git a/src/syscall/syscall_freebsd.go b/src/syscall/syscall_freebsd.go
index a0faa81..584522d 100644
--- a/src/syscall/syscall_freebsd.go
+++ b/src/syscall/syscall_freebsd.go
@@ -250,7 +250,6 @@
 //sys   mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys   munmap(addr uintptr, length uintptr) (err error)
 //sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
 //sys	getcwd(buf []byte) (n int, err error) = SYS___GETCWD
diff --git a/src/syscall/syscall_linux.go b/src/syscall/syscall_linux.go
index 8b0a57b..b6e8420 100644
--- a/src/syscall/syscall_linux.go
+++ b/src/syscall/syscall_linux.go
@@ -95,7 +95,7 @@
 }
 
 func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
-func rawVforkSyscall(trap, a1, a2 uintptr) (r1 uintptr, err Errno)
+func rawVforkSyscall(trap, a1, a2, a3 uintptr) (r1 uintptr, err Errno)
 
 /*
  * Wrapped
@@ -241,15 +241,23 @@
 }
 
 //sys	fchmodat(dirfd int, path string, mode uint32) (err error)
+//sys	fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) = _SYS_fchmodat2
 
-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
-	// Linux fchmodat doesn't support the flags parameter. Mimic glibc's behavior
-	// and check the flags. Otherwise the mode would be applied to the symlink
-	// destination which is not what the user expects.
-	if flags&^_AT_SYMLINK_NOFOLLOW != 0 {
-		return EINVAL
-	} else if flags&_AT_SYMLINK_NOFOLLOW != 0 {
-		return EOPNOTSUPP
+func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
+	// Linux fchmodat doesn't support the flags parameter, but fchmodat2 does.
+	// Try fchmodat2 if flags are specified.
+	if flags != 0 {
+		err := fchmodat2(dirfd, path, mode, flags)
+		if err == ENOSYS {
+			// fchmodat2 isn't available. If the flags are known to be valid,
+			// return EOPNOTSUPP to indicate that fchmodat doesn't support them.
+			if flags&^(_AT_SYMLINK_NOFOLLOW|_AT_EMPTY_PATH) != 0 {
+				return EINVAL
+			} else if flags&(_AT_SYMLINK_NOFOLLOW|_AT_EMPTY_PATH) != 0 {
+				return EOPNOTSUPP
+			}
+		}
+		return err
 	}
 	return fchmodat(dirfd, path, mode)
 }
@@ -554,7 +562,8 @@
 	if n > 0 {
 		sl += _Socklen(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -1246,7 +1255,6 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	exitThread(code int) (err error) = SYS_EXIT
 //sys	readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
 
 // mmap varies by architecture; see syscall_linux_*.go.
 //sys	munmap(addr uintptr, length uintptr) (err error)
@@ -1278,7 +1286,7 @@
 func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
 	err = prlimit1(pid, resource, newlimit, old)
 	if err == nil && newlimit != nil && resource == RLIMIT_NOFILE {
-		origRlimitNofile.Store(Rlimit{0, 0})
+		origRlimitNofile.Store(nil)
 	}
 	return err
 }
diff --git a/src/syscall/syscall_linux_386.go b/src/syscall/syscall_linux_386.go
index 9cbd9ac..a559f7e 100644
--- a/src/syscall/syscall_linux_386.go
+++ b/src/syscall/syscall_linux_386.go
@@ -10,6 +10,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS32
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 func setTimespec(sec, nsec int64) Timespec {
diff --git a/src/syscall/syscall_linux_amd64.go b/src/syscall/syscall_linux_amd64.go
index aa85a52..ec52f8a 100644
--- a/src/syscall/syscall_linux_amd64.go
+++ b/src/syscall/syscall_linux_amd64.go
@@ -12,6 +12,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 //sys	Dup2(oldfd int, newfd int) (err error)
diff --git a/src/syscall/syscall_linux_arm.go b/src/syscall/syscall_linux_arm.go
index 600ec35..a6d92ce 100644
--- a/src/syscall/syscall_linux_arm.go
+++ b/src/syscall/syscall_linux_arm.go
@@ -10,6 +10,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS32
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 func setTimespec(sec, nsec int64) Timespec {
diff --git a/src/syscall/syscall_linux_arm64.go b/src/syscall/syscall_linux_arm64.go
index 42984ba..b87b51c 100644
--- a/src/syscall/syscall_linux_arm64.go
+++ b/src/syscall/syscall_linux_arm64.go
@@ -10,6 +10,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
@@ -36,7 +37,7 @@
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
 //sys	Setfsgid(gid int) (err error)
 //sys	Setfsuid(uid int) (err error)
-//sysnb	setrlimit1(resource int, rlim *Rlimit) (err error)
+//sysnb	setrlimit1(resource int, rlim *Rlimit) (err error) = SYS_SETRLIMIT
 //sys	Shutdown(fd int, how int) (err error)
 //sys	Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
 
diff --git a/src/syscall/syscall_linux_loong64.go b/src/syscall/syscall_linux_loong64.go
index f8f01c2..634cf30 100644
--- a/src/syscall/syscall_linux_loong64.go
+++ b/src/syscall/syscall_linux_loong64.go
@@ -10,6 +10,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
diff --git a/src/syscall/syscall_linux_mips64x.go b/src/syscall/syscall_linux_mips64x.go
index 47410d4..41106ed 100644
--- a/src/syscall/syscall_linux_mips64x.go
+++ b/src/syscall/syscall_linux_mips64x.go
@@ -14,6 +14,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 5435
 	_SYS_faccessat2 = 5439
+	_SYS_fchmodat2  = 5452
 )
 
 //sys	Dup2(oldfd int, newfd int) (err error)
diff --git a/src/syscall/syscall_linux_mipsx.go b/src/syscall/syscall_linux_mipsx.go
index d8d5044..7d4f8f2 100644
--- a/src/syscall/syscall_linux_mipsx.go
+++ b/src/syscall/syscall_linux_mipsx.go
@@ -12,6 +12,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 4435
 	_SYS_faccessat2 = 4439
+	_SYS_fchmodat2  = 4452
 )
 
 func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
diff --git a/src/syscall/syscall_linux_ppc64x.go b/src/syscall/syscall_linux_ppc64x.go
index 36f7711..13c184c 100644
--- a/src/syscall/syscall_linux_ppc64x.go
+++ b/src/syscall/syscall_linux_ppc64x.go
@@ -14,6 +14,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 //sys	Dup2(oldfd int, newfd int) (err error)
diff --git a/src/syscall/syscall_linux_riscv64.go b/src/syscall/syscall_linux_riscv64.go
index 44ff1d7..00872a7 100644
--- a/src/syscall/syscall_linux_riscv64.go
+++ b/src/syscall/syscall_linux_riscv64.go
@@ -10,6 +10,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
diff --git a/src/syscall/syscall_linux_s390x.go b/src/syscall/syscall_linux_s390x.go
index 44990f2..ea667ec 100644
--- a/src/syscall/syscall_linux_s390x.go
+++ b/src/syscall/syscall_linux_s390x.go
@@ -10,6 +10,7 @@
 	_SYS_setgroups  = SYS_SETGROUPS
 	_SYS_clone3     = 435
 	_SYS_faccessat2 = 439
+	_SYS_fchmodat2  = 452
 )
 
 //sys	Dup2(oldfd int, newfd int) (err error)
diff --git a/src/syscall/syscall_linux_test.go b/src/syscall/syscall_linux_test.go
index ff128b1..1300fc0 100644
--- a/src/syscall/syscall_linux_test.go
+++ b/src/syscall/syscall_linux_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"fmt"
+	"internal/testenv"
 	"io"
 	"io/fs"
 	"os"
@@ -198,6 +199,13 @@
 	if os.Getuid() != 0 {
 		t.Skip("skipping root only test")
 	}
+	if testing.Short() && testenv.Builder() != "" && os.Getenv("USER") == "swarming" {
+		// The Go build system's swarming user is known not to be root.
+		// Unfortunately, it sometimes appears as root due the current
+		// implementation of a no-network check using 'unshare -n -r'.
+		// Since this test does need root to work, we need to skip it.
+		t.Skip("skipping root only test on a non-root builder")
+	}
 
 	if runtime.GOOS == "android" {
 		t.Skip("skipping on rooted android, see issue 27364")
@@ -516,6 +524,13 @@
 	if syscall.Getuid() != 0 {
 		t.Skip("skipping root only test")
 	}
+	if testing.Short() && testenv.Builder() != "" && os.Getenv("USER") == "swarming" {
+		// The Go build system's swarming user is known not to be root.
+		// Unfortunately, it sometimes appears as root due the current
+		// implementation of a no-network check using 'unshare -n -r'.
+		// Since this test does need root to work, we need to skip it.
+		t.Skip("skipping root only test on a non-root builder")
+	}
 	if _, err := os.Stat("/etc/alpine-release"); err == nil {
 		t.Skip("skipping glibc test on alpine - go.dev/issue/19938")
 	}
diff --git a/src/syscall/syscall_netbsd.go b/src/syscall/syscall_netbsd.go
index 333dd3a..5a239f8 100644
--- a/src/syscall/syscall_netbsd.go
+++ b/src/syscall/syscall_netbsd.go
@@ -239,7 +239,6 @@
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
 //sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
 //sys	getcwd(buf []byte) (n int, err error) = SYS___GETCWD
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
diff --git a/src/syscall/syscall_openbsd.go b/src/syscall/syscall_openbsd.go
index 5784d5c..80a3854 100644
--- a/src/syscall/syscall_openbsd.go
+++ b/src/syscall/syscall_openbsd.go
@@ -37,7 +37,7 @@
 	left := 0
 	right := len(sysctlMib) - 1
 	for {
-		idx := left + (right-left)/2
+		idx := int(uint(left+right) >> 1)
 		switch {
 		case name == sysctlMib[idx].ctlname:
 			return sysctlMib[idx].ctloid, nil
@@ -116,18 +116,13 @@
 }
 
 func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
-	var _p0 unsafe.Pointer
+	var bufptr *Statfs_t
 	var bufsize uintptr
 	if len(buf) > 0 {
-		_p0 = unsafe.Pointer(&buf[0])
+		bufptr = &buf[0]
 		bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
 	}
-	r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
-	n = int(r0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
+	return getfsstat(bufptr, bufsize, flags)
 }
 
 /*
@@ -212,4 +207,5 @@
 //sys	writev(fd int, iovecs []Iovec) (n uintptr, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
+//sys	getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error)
 //sys	utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
diff --git a/src/syscall/syscall_openbsd1.go b/src/syscall/syscall_openbsd1.go
index bddeda6..6c24243 100644
--- a/src/syscall/syscall_openbsd1.go
+++ b/src/syscall/syscall_openbsd1.go
@@ -7,7 +7,6 @@
 package syscall
 
 //sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
 //sys	Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
 //sys	getcwd(buf []byte) (n int, err error) = SYS___GETCWD
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
diff --git a/src/syscall/syscall_openbsd_libc.go b/src/syscall/syscall_openbsd_libc.go
index de503cc..ddf62f4 100644
--- a/src/syscall/syscall_openbsd_libc.go
+++ b/src/syscall/syscall_openbsd_libc.go
@@ -58,18 +58,14 @@
 }
 
 //sys	readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_read
-//sys	writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_write
 //sys	Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_lseek
 //sys	getcwd(buf []byte) (n int, err error)
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error)
 //sysnb fork() (pid int, err error)
-//sysnb ioctl(fd int, req int, arg int) (err error)
 //sysnb execve(path *byte, argv **byte, envp **byte) (err error)
 //sysnb exit(res int) (err error)
 //sys   ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
-//sys   ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
 //sysnb getentropy(p []byte) (err error)
 //sys   fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
-//sys	fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_fcntl
 //sys   unlinkat(fd int, path string, flags int) (err error)
 //sys   openat(fd int, path string, flags int, perm uint32) (fdret int, err error)
diff --git a/src/syscall/syscall_openbsd_ppc64.go b/src/syscall/syscall_openbsd_ppc64.go
new file mode 100644
index 0000000..ae4d825
--- /dev/null
+++ b/src/syscall/syscall_openbsd_ppc64.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+	k.Ident = uint64(fd)
+	k.Filter = int16(mode)
+	k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+	iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+	msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+	cmsg.Len = uint32(length)
+}
+
+// RTM_LOCK only exists in OpenBSD 6.3 and earlier.
+const RTM_LOCK = 0x8
+
+// SYS___SYSCTL only exists in OpenBSD 5.8 and earlier, when it was
+// was renamed to SYS_SYSCTL.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/src/syscall/syscall_openbsd_riscv64.go b/src/syscall/syscall_openbsd_riscv64.go
new file mode 100644
index 0000000..ae4d825
--- /dev/null
+++ b/src/syscall/syscall_openbsd_riscv64.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+	k.Ident = uint64(fd)
+	k.Filter = int16(mode)
+	k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+	iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+	msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+	cmsg.Len = uint32(length)
+}
+
+// RTM_LOCK only exists in OpenBSD 6.3 and earlier.
+const RTM_LOCK = 0x8
+
+// SYS___SYSCTL only exists in OpenBSD 5.8 and earlier, when it was
+// was renamed to SYS_SYSCTL.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/src/syscall/syscall_solaris.go b/src/syscall/syscall_solaris.go
index 523a2a8..28d3727 100644
--- a/src/syscall/syscall_solaris.go
+++ b/src/syscall/syscall_solaris.go
@@ -131,7 +131,8 @@
 	if n > 0 {
 		sl += _Socklen(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -527,15 +528,6 @@
 	return
 }
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_write)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
-	n = int(r0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
-}
-
 var mapper = &mmapper{
 	active: make(map[*byte][]byte),
 	mmap:   mmap,
diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go
index e348905..d13acc5 100644
--- a/src/syscall/syscall_windows.go
+++ b/src/syscall/syscall_windows.go
@@ -264,7 +264,7 @@
 //sys	OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error)
 //sys	TerminateProcess(handle Handle, exitcode uint32) (err error)
 //sys	GetExitCodeProcess(handle Handle, exitcode *uint32) (err error)
-//sys	GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW
+//sys	getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW
 //sys	GetCurrentProcess() (pseudoHandle Handle, err error)
 //sys	GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error)
 //sys	DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error)
@@ -409,6 +409,10 @@
 		// Necessary for opening directory handles.
 		attrs |= FILE_FLAG_BACKUP_SEMANTICS
 	}
+	if mode&O_SYNC != 0 {
+		const _FILE_FLAG_WRITE_THROUGH = 0x80000000
+		attrs |= _FILE_FLAG_WRITE_THROUGH
+	}
 	return CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0)
 }
 
@@ -475,7 +479,7 @@
 const ptrSize = unsafe.Sizeof(uintptr(0))
 
 // setFilePointerEx calls SetFilePointerEx.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365542(v=vs.85).aspx
+// See https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-setfilepointerex
 func setFilePointerEx(handle Handle, distToMove int64, newFilePointer *int64, whence uint32) error {
 	var e1 Errno
 	if unsafe.Sizeof(uintptr(0)) == 8 {
@@ -862,7 +866,8 @@
 	if n > 0 {
 		sl += int32(n) + 1
 	}
-	if sa.raw.Path[0] == '@' {
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
 		sa.raw.Path[0] = 0
 		// Don't count trailing NUL for abstract address.
 		sl--
@@ -1437,3 +1442,8 @@
 func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
 	return regEnumKeyEx(key, index, name, nameLen, reserved, class, classLen, lastWriteTime)
 }
+
+func GetStartupInfo(startupInfo *StartupInfo) error {
+	getStartupInfo(startupInfo)
+	return nil
+}
diff --git a/src/syscall/syscall_windows_test.go b/src/syscall/syscall_windows_test.go
index 81285e9..f67e899 100644
--- a/src/syscall/syscall_windows_test.go
+++ b/src/syscall/syscall_windows_test.go
@@ -204,6 +204,15 @@
 	syscall.Getwd()
 }
 
+func TestGetStartupInfo(t *testing.T) {
+	var si syscall.StartupInfo
+	err := syscall.GetStartupInfo(&si)
+	if err != nil {
+		// see https://go.dev/issue/31316
+		t.Fatalf("GetStartupInfo: got error %v, want nil", err)
+	}
+}
+
 func FuzzUTF16FromString(f *testing.F) {
 	f.Add("hi")           // ASCII
 	f.Add("â")            // latin1
diff --git a/src/syscall/types_windows.go b/src/syscall/types_windows.go
index 384b5b4..b338ec4 100644
--- a/src/syscall/types_windows.go
+++ b/src/syscall/types_windows.go
@@ -586,7 +586,7 @@
 	SIO_KEEPALIVE_VALS                 = IOC_IN | IOC_VENDOR | 4
 	SIO_UDP_CONNRESET                  = IOC_IN | IOC_VENDOR | 12
 
-	// cf. https://support.microsoft.com/default.aspx?scid=kb;en-us;257460
+	// cf. https://learn.microsoft.com/en-US/troubleshoot/windows/win32/header-library-requirement-socket-ipproto-ip
 
 	IP_TOS             = 0x3
 	IP_TTL             = 0x4
diff --git a/src/syscall/zerrors_openbsd_ppc64.go b/src/syscall/zerrors_openbsd_ppc64.go
new file mode 100644
index 0000000..aaca580
--- /dev/null
+++ b/src/syscall/zerrors_openbsd_ppc64.go
@@ -0,0 +1,1694 @@
+// mkerrors.sh -m64
+// Code generated by the command above; DO NOT EDIT.
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -m64 _const.go
+
+package syscall
+
+const (
+	AF_APPLETALK                      = 0x10
+	AF_BLUETOOTH                      = 0x20
+	AF_CCITT                          = 0xa
+	AF_CHAOS                          = 0x5
+	AF_CNT                            = 0x15
+	AF_COIP                           = 0x14
+	AF_DATAKIT                        = 0x9
+	AF_DECnet                         = 0xc
+	AF_DLI                            = 0xd
+	AF_E164                           = 0x1a
+	AF_ECMA                           = 0x8
+	AF_ENCAP                          = 0x1c
+	AF_HYLINK                         = 0xf
+	AF_IMPLINK                        = 0x3
+	AF_INET                           = 0x2
+	AF_INET6                          = 0x18
+	AF_IPX                            = 0x17
+	AF_ISDN                           = 0x1a
+	AF_ISO                            = 0x7
+	AF_KEY                            = 0x1e
+	AF_LAT                            = 0xe
+	AF_LINK                           = 0x12
+	AF_LOCAL                          = 0x1
+	AF_MAX                            = 0x24
+	AF_MPLS                           = 0x21
+	AF_NATM                           = 0x1b
+	AF_NS                             = 0x6
+	AF_OSI                            = 0x7
+	AF_PUP                            = 0x4
+	AF_ROUTE                          = 0x11
+	AF_SIP                            = 0x1d
+	AF_SNA                            = 0xb
+	AF_UNIX                           = 0x1
+	AF_UNSPEC                         = 0x0
+	ARPHRD_ETHER                      = 0x1
+	ARPHRD_FRELAY                     = 0xf
+	ARPHRD_IEEE1394                   = 0x18
+	ARPHRD_IEEE802                    = 0x6
+	B0                                = 0x0
+	B110                              = 0x6e
+	B115200                           = 0x1c200
+	B1200                             = 0x4b0
+	B134                              = 0x86
+	B14400                            = 0x3840
+	B150                              = 0x96
+	B1800                             = 0x708
+	B19200                            = 0x4b00
+	B200                              = 0xc8
+	B230400                           = 0x38400
+	B2400                             = 0x960
+	B28800                            = 0x7080
+	B300                              = 0x12c
+	B38400                            = 0x9600
+	B4800                             = 0x12c0
+	B50                               = 0x32
+	B57600                            = 0xe100
+	B600                              = 0x258
+	B7200                             = 0x1c20
+	B75                               = 0x4b
+	B76800                            = 0x12c00
+	B9600                             = 0x2580
+	BIOCFLUSH                         = 0x20004268
+	BIOCGBLEN                         = 0x40044266
+	BIOCGDIRFILT                      = 0x4004427c
+	BIOCGDLT                          = 0x4004426a
+	BIOCGDLTLIST                      = 0xc010427b
+	BIOCGETIF                         = 0x4020426b
+	BIOCGFILDROP                      = 0x40044278
+	BIOCGHDRCMPLT                     = 0x40044274
+	BIOCGRSIG                         = 0x40044273
+	BIOCGRTIMEOUT                     = 0x4010426e
+	BIOCGSTATS                        = 0x4008426f
+	BIOCIMMEDIATE                     = 0x80044270
+	BIOCLOCK                          = 0x20004276
+	BIOCPROMISC                       = 0x20004269
+	BIOCSBLEN                         = 0xc0044266
+	BIOCSDIRFILT                      = 0x8004427d
+	BIOCSDLT                          = 0x8004427a
+	BIOCSETF                          = 0x80104267
+	BIOCSETIF                         = 0x8020426c
+	BIOCSETWF                         = 0x80104277
+	BIOCSFILDROP                      = 0x80044279
+	BIOCSHDRCMPLT                     = 0x80044275
+	BIOCSRSIG                         = 0x80044272
+	BIOCSRTIMEOUT                     = 0x8010426d
+	BIOCVERSION                       = 0x40044271
+	BPF_A                             = 0x10
+	BPF_ABS                           = 0x20
+	BPF_ADD                           = 0x0
+	BPF_ALIGNMENT                     = 0x4
+	BPF_ALU                           = 0x4
+	BPF_AND                           = 0x50
+	BPF_B                             = 0x10
+	BPF_DIRECTION_IN                  = 0x1
+	BPF_DIRECTION_OUT                 = 0x2
+	BPF_DIV                           = 0x30
+	BPF_FILDROP_CAPTURE               = 0x1
+	BPF_FILDROP_DROP                  = 0x2
+	BPF_FILDROP_PASS                  = 0x0
+	BPF_F_DIR_IN                      = 0x10
+	BPF_F_DIR_MASK                    = 0x30
+	BPF_F_DIR_OUT                     = 0x20
+	BPF_F_DIR_SHIFT                   = 0x4
+	BPF_F_FLOWID                      = 0x8
+	BPF_F_PRI_MASK                    = 0x7
+	BPF_H                             = 0x8
+	BPF_IMM                           = 0x0
+	BPF_IND                           = 0x40
+	BPF_JA                            = 0x0
+	BPF_JEQ                           = 0x10
+	BPF_JGE                           = 0x30
+	BPF_JGT                           = 0x20
+	BPF_JMP                           = 0x5
+	BPF_JSET                          = 0x40
+	BPF_K                             = 0x0
+	BPF_LD                            = 0x0
+	BPF_LDX                           = 0x1
+	BPF_LEN                           = 0x80
+	BPF_LSH                           = 0x60
+	BPF_MAJOR_VERSION                 = 0x1
+	BPF_MAXBUFSIZE                    = 0x200000
+	BPF_MAXINSNS                      = 0x200
+	BPF_MEM                           = 0x60
+	BPF_MEMWORDS                      = 0x10
+	BPF_MINBUFSIZE                    = 0x20
+	BPF_MINOR_VERSION                 = 0x1
+	BPF_MISC                          = 0x7
+	BPF_MSH                           = 0xa0
+	BPF_MUL                           = 0x20
+	BPF_NEG                           = 0x80
+	BPF_OR                            = 0x40
+	BPF_RELEASE                       = 0x30bb6
+	BPF_RET                           = 0x6
+	BPF_RND                           = 0xc0
+	BPF_RSH                           = 0x70
+	BPF_ST                            = 0x2
+	BPF_STX                           = 0x3
+	BPF_SUB                           = 0x10
+	BPF_TAX                           = 0x0
+	BPF_TXA                           = 0x80
+	BPF_W                             = 0x0
+	BPF_X                             = 0x8
+	BRKINT                            = 0x2
+	CFLUSH                            = 0xf
+	CLOCAL                            = 0x8000
+	CREAD                             = 0x800
+	CS5                               = 0x0
+	CS6                               = 0x100
+	CS7                               = 0x200
+	CS8                               = 0x300
+	CSIZE                             = 0x300
+	CSTART                            = 0x11
+	CSTATUS                           = 0xff
+	CSTOP                             = 0x13
+	CSTOPB                            = 0x400
+	CSUSP                             = 0x1a
+	CTL_MAXNAME                       = 0xc
+	CTL_NET                           = 0x4
+	DIOCOSFPFLUSH                     = 0x2000444e
+	DLT_ARCNET                        = 0x7
+	DLT_ATM_RFC1483                   = 0xb
+	DLT_AX25                          = 0x3
+	DLT_CHAOS                         = 0x5
+	DLT_C_HDLC                        = 0x68
+	DLT_EN10MB                        = 0x1
+	DLT_EN3MB                         = 0x2
+	DLT_ENC                           = 0xd
+	DLT_FDDI                          = 0xa
+	DLT_IEEE802                       = 0x6
+	DLT_IEEE802_11                    = 0x69
+	DLT_IEEE802_11_RADIO              = 0x7f
+	DLT_LOOP                          = 0xc
+	DLT_MPLS                          = 0xdb
+	DLT_NULL                          = 0x0
+	DLT_OPENFLOW                      = 0x10b
+	DLT_PFLOG                         = 0x75
+	DLT_PFSYNC                        = 0x12
+	DLT_PPP                           = 0x9
+	DLT_PPP_BSDOS                     = 0x10
+	DLT_PPP_ETHER                     = 0x33
+	DLT_PPP_SERIAL                    = 0x32
+	DLT_PRONET                        = 0x4
+	DLT_RAW                           = 0xe
+	DLT_SLIP                          = 0x8
+	DLT_SLIP_BSDOS                    = 0xf
+	DLT_USBPCAP                       = 0xf9
+	DLT_USER0                         = 0x93
+	DLT_USER1                         = 0x94
+	DLT_USER10                        = 0x9d
+	DLT_USER11                        = 0x9e
+	DLT_USER12                        = 0x9f
+	DLT_USER13                        = 0xa0
+	DLT_USER14                        = 0xa1
+	DLT_USER15                        = 0xa2
+	DLT_USER2                         = 0x95
+	DLT_USER3                         = 0x96
+	DLT_USER4                         = 0x97
+	DLT_USER5                         = 0x98
+	DLT_USER6                         = 0x99
+	DLT_USER7                         = 0x9a
+	DLT_USER8                         = 0x9b
+	DLT_USER9                         = 0x9c
+	DT_BLK                            = 0x6
+	DT_CHR                            = 0x2
+	DT_DIR                            = 0x4
+	DT_FIFO                           = 0x1
+	DT_LNK                            = 0xa
+	DT_REG                            = 0x8
+	DT_SOCK                           = 0xc
+	DT_UNKNOWN                        = 0x0
+	ECHO                              = 0x8
+	ECHOCTL                           = 0x40
+	ECHOE                             = 0x2
+	ECHOK                             = 0x4
+	ECHOKE                            = 0x1
+	ECHONL                            = 0x10
+	ECHOPRT                           = 0x20
+	EMT_TAGOVF                        = 0x1
+	EMUL_ENABLED                      = 0x1
+	EMUL_NATIVE                       = 0x2
+	ENDRUNDISC                        = 0x9
+	ETH64_8021_RSVD_MASK              = 0xfffffffffff0
+	ETH64_8021_RSVD_PREFIX            = 0x180c2000000
+	ETHERMIN                          = 0x2e
+	ETHERMTU                          = 0x5dc
+	ETHERTYPE_8023                    = 0x4
+	ETHERTYPE_AARP                    = 0x80f3
+	ETHERTYPE_ACCTON                  = 0x8390
+	ETHERTYPE_AEONIC                  = 0x8036
+	ETHERTYPE_ALPHA                   = 0x814a
+	ETHERTYPE_AMBER                   = 0x6008
+	ETHERTYPE_AMOEBA                  = 0x8145
+	ETHERTYPE_AOE                     = 0x88a2
+	ETHERTYPE_APOLLO                  = 0x80f7
+	ETHERTYPE_APOLLODOMAIN            = 0x8019
+	ETHERTYPE_APPLETALK               = 0x809b
+	ETHERTYPE_APPLITEK                = 0x80c7
+	ETHERTYPE_ARGONAUT                = 0x803a
+	ETHERTYPE_ARP                     = 0x806
+	ETHERTYPE_AT                      = 0x809b
+	ETHERTYPE_ATALK                   = 0x809b
+	ETHERTYPE_ATOMIC                  = 0x86df
+	ETHERTYPE_ATT                     = 0x8069
+	ETHERTYPE_ATTSTANFORD             = 0x8008
+	ETHERTYPE_AUTOPHON                = 0x806a
+	ETHERTYPE_AXIS                    = 0x8856
+	ETHERTYPE_BCLOOP                  = 0x9003
+	ETHERTYPE_BOFL                    = 0x8102
+	ETHERTYPE_CABLETRON               = 0x7034
+	ETHERTYPE_CHAOS                   = 0x804
+	ETHERTYPE_COMDESIGN               = 0x806c
+	ETHERTYPE_COMPUGRAPHIC            = 0x806d
+	ETHERTYPE_COUNTERPOINT            = 0x8062
+	ETHERTYPE_CRONUS                  = 0x8004
+	ETHERTYPE_CRONUSVLN               = 0x8003
+	ETHERTYPE_DCA                     = 0x1234
+	ETHERTYPE_DDE                     = 0x807b
+	ETHERTYPE_DEBNI                   = 0xaaaa
+	ETHERTYPE_DECAM                   = 0x8048
+	ETHERTYPE_DECCUST                 = 0x6006
+	ETHERTYPE_DECDIAG                 = 0x6005
+	ETHERTYPE_DECDNS                  = 0x803c
+	ETHERTYPE_DECDTS                  = 0x803e
+	ETHERTYPE_DECEXPER                = 0x6000
+	ETHERTYPE_DECLAST                 = 0x8041
+	ETHERTYPE_DECLTM                  = 0x803f
+	ETHERTYPE_DECMUMPS                = 0x6009
+	ETHERTYPE_DECNETBIOS              = 0x8040
+	ETHERTYPE_DELTACON                = 0x86de
+	ETHERTYPE_DIDDLE                  = 0x4321
+	ETHERTYPE_DLOG1                   = 0x660
+	ETHERTYPE_DLOG2                   = 0x661
+	ETHERTYPE_DN                      = 0x6003
+	ETHERTYPE_DOGFIGHT                = 0x1989
+	ETHERTYPE_DSMD                    = 0x8039
+	ETHERTYPE_EAPOL                   = 0x888e
+	ETHERTYPE_ECMA                    = 0x803
+	ETHERTYPE_ENCRYPT                 = 0x803d
+	ETHERTYPE_ES                      = 0x805d
+	ETHERTYPE_EXCELAN                 = 0x8010
+	ETHERTYPE_EXPERDATA               = 0x8049
+	ETHERTYPE_FLIP                    = 0x8146
+	ETHERTYPE_FLOWCONTROL             = 0x8808
+	ETHERTYPE_FRARP                   = 0x808
+	ETHERTYPE_GENDYN                  = 0x8068
+	ETHERTYPE_HAYES                   = 0x8130
+	ETHERTYPE_HIPPI_FP                = 0x8180
+	ETHERTYPE_HITACHI                 = 0x8820
+	ETHERTYPE_HP                      = 0x8005
+	ETHERTYPE_IEEEPUP                 = 0xa00
+	ETHERTYPE_IEEEPUPAT               = 0xa01
+	ETHERTYPE_IMLBL                   = 0x4c42
+	ETHERTYPE_IMLBLDIAG               = 0x424c
+	ETHERTYPE_IP                      = 0x800
+	ETHERTYPE_IPAS                    = 0x876c
+	ETHERTYPE_IPV6                    = 0x86dd
+	ETHERTYPE_IPX                     = 0x8137
+	ETHERTYPE_IPXNEW                  = 0x8037
+	ETHERTYPE_KALPANA                 = 0x8582
+	ETHERTYPE_LANBRIDGE               = 0x8038
+	ETHERTYPE_LANPROBE                = 0x8888
+	ETHERTYPE_LAT                     = 0x6004
+	ETHERTYPE_LBACK                   = 0x9000
+	ETHERTYPE_LITTLE                  = 0x8060
+	ETHERTYPE_LLDP                    = 0x88cc
+	ETHERTYPE_LOGICRAFT               = 0x8148
+	ETHERTYPE_LOOPBACK                = 0x9000
+	ETHERTYPE_MACSEC                  = 0x88e5
+	ETHERTYPE_MATRA                   = 0x807a
+	ETHERTYPE_MAX                     = 0xffff
+	ETHERTYPE_MERIT                   = 0x807c
+	ETHERTYPE_MICP                    = 0x873a
+	ETHERTYPE_MOPDL                   = 0x6001
+	ETHERTYPE_MOPRC                   = 0x6002
+	ETHERTYPE_MOTOROLA                = 0x818d
+	ETHERTYPE_MPLS                    = 0x8847
+	ETHERTYPE_MPLS_MCAST              = 0x8848
+	ETHERTYPE_MUMPS                   = 0x813f
+	ETHERTYPE_NBPCC                   = 0x3c04
+	ETHERTYPE_NBPCLAIM                = 0x3c09
+	ETHERTYPE_NBPCLREQ                = 0x3c05
+	ETHERTYPE_NBPCLRSP                = 0x3c06
+	ETHERTYPE_NBPCREQ                 = 0x3c02
+	ETHERTYPE_NBPCRSP                 = 0x3c03
+	ETHERTYPE_NBPDG                   = 0x3c07
+	ETHERTYPE_NBPDGB                  = 0x3c08
+	ETHERTYPE_NBPDLTE                 = 0x3c0a
+	ETHERTYPE_NBPRAR                  = 0x3c0c
+	ETHERTYPE_NBPRAS                  = 0x3c0b
+	ETHERTYPE_NBPRST                  = 0x3c0d
+	ETHERTYPE_NBPSCD                  = 0x3c01
+	ETHERTYPE_NBPVCD                  = 0x3c00
+	ETHERTYPE_NBS                     = 0x802
+	ETHERTYPE_NCD                     = 0x8149
+	ETHERTYPE_NESTAR                  = 0x8006
+	ETHERTYPE_NETBEUI                 = 0x8191
+	ETHERTYPE_NHRP                    = 0x2001
+	ETHERTYPE_NOVELL                  = 0x8138
+	ETHERTYPE_NS                      = 0x600
+	ETHERTYPE_NSAT                    = 0x601
+	ETHERTYPE_NSCOMPAT                = 0x807
+	ETHERTYPE_NSH                     = 0x984f
+	ETHERTYPE_NTRAILER                = 0x10
+	ETHERTYPE_OS9                     = 0x7007
+	ETHERTYPE_OS9NET                  = 0x7009
+	ETHERTYPE_PACER                   = 0x80c6
+	ETHERTYPE_PBB                     = 0x88e7
+	ETHERTYPE_PCS                     = 0x4242
+	ETHERTYPE_PLANNING                = 0x8044
+	ETHERTYPE_PPP                     = 0x880b
+	ETHERTYPE_PPPOE                   = 0x8864
+	ETHERTYPE_PPPOEDISC               = 0x8863
+	ETHERTYPE_PRIMENTS                = 0x7031
+	ETHERTYPE_PUP                     = 0x200
+	ETHERTYPE_PUPAT                   = 0x200
+	ETHERTYPE_QINQ                    = 0x88a8
+	ETHERTYPE_RACAL                   = 0x7030
+	ETHERTYPE_RATIONAL                = 0x8150
+	ETHERTYPE_RAWFR                   = 0x6559
+	ETHERTYPE_RCL                     = 0x1995
+	ETHERTYPE_RDP                     = 0x8739
+	ETHERTYPE_RETIX                   = 0x80f2
+	ETHERTYPE_REVARP                  = 0x8035
+	ETHERTYPE_SCA                     = 0x6007
+	ETHERTYPE_SECTRA                  = 0x86db
+	ETHERTYPE_SECUREDATA              = 0x876d
+	ETHERTYPE_SGITW                   = 0x817e
+	ETHERTYPE_SG_BOUNCE               = 0x8016
+	ETHERTYPE_SG_DIAG                 = 0x8013
+	ETHERTYPE_SG_NETGAMES             = 0x8014
+	ETHERTYPE_SG_RESV                 = 0x8015
+	ETHERTYPE_SIMNET                  = 0x5208
+	ETHERTYPE_SLOW                    = 0x8809
+	ETHERTYPE_SNA                     = 0x80d5
+	ETHERTYPE_SNMP                    = 0x814c
+	ETHERTYPE_SONIX                   = 0xfaf5
+	ETHERTYPE_SPIDER                  = 0x809f
+	ETHERTYPE_SPRITE                  = 0x500
+	ETHERTYPE_STP                     = 0x8181
+	ETHERTYPE_TALARIS                 = 0x812b
+	ETHERTYPE_TALARISMC               = 0x852b
+	ETHERTYPE_TCPCOMP                 = 0x876b
+	ETHERTYPE_TCPSM                   = 0x9002
+	ETHERTYPE_TEC                     = 0x814f
+	ETHERTYPE_TIGAN                   = 0x802f
+	ETHERTYPE_TRAIL                   = 0x1000
+	ETHERTYPE_TRANSETHER              = 0x6558
+	ETHERTYPE_TYMSHARE                = 0x802e
+	ETHERTYPE_UBBST                   = 0x7005
+	ETHERTYPE_UBDEBUG                 = 0x900
+	ETHERTYPE_UBDIAGLOOP              = 0x7002
+	ETHERTYPE_UBDL                    = 0x7000
+	ETHERTYPE_UBNIU                   = 0x7001
+	ETHERTYPE_UBNMC                   = 0x7003
+	ETHERTYPE_VALID                   = 0x1600
+	ETHERTYPE_VARIAN                  = 0x80dd
+	ETHERTYPE_VAXELN                  = 0x803b
+	ETHERTYPE_VEECO                   = 0x8067
+	ETHERTYPE_VEXP                    = 0x805b
+	ETHERTYPE_VGLAB                   = 0x8131
+	ETHERTYPE_VINES                   = 0xbad
+	ETHERTYPE_VINESECHO               = 0xbaf
+	ETHERTYPE_VINESLOOP               = 0xbae
+	ETHERTYPE_VITAL                   = 0xff00
+	ETHERTYPE_VLAN                    = 0x8100
+	ETHERTYPE_VLTLMAN                 = 0x8080
+	ETHERTYPE_VPROD                   = 0x805c
+	ETHERTYPE_VURESERVED              = 0x8147
+	ETHERTYPE_WATERLOO                = 0x8130
+	ETHERTYPE_WELLFLEET               = 0x8103
+	ETHERTYPE_X25                     = 0x805
+	ETHERTYPE_X75                     = 0x801
+	ETHERTYPE_XNSSM                   = 0x9001
+	ETHERTYPE_XTP                     = 0x817d
+	ETHER_ADDR_LEN                    = 0x6
+	ETHER_ALIGN                       = 0x2
+	ETHER_CRC_LEN                     = 0x4
+	ETHER_CRC_POLY_BE                 = 0x4c11db6
+	ETHER_CRC_POLY_LE                 = 0xedb88320
+	ETHER_HDR_LEN                     = 0xe
+	ETHER_MAX_DIX_LEN                 = 0x600
+	ETHER_MAX_HARDMTU_LEN             = 0xff9b
+	ETHER_MAX_LEN                     = 0x5ee
+	ETHER_MIN_LEN                     = 0x40
+	ETHER_TYPE_LEN                    = 0x2
+	ETHER_VLAN_ENCAP_LEN              = 0x4
+	EVFILT_AIO                        = -0x3
+	EVFILT_DEVICE                     = -0x8
+	EVFILT_EXCEPT                     = -0x9
+	EVFILT_PROC                       = -0x5
+	EVFILT_READ                       = -0x1
+	EVFILT_SIGNAL                     = -0x6
+	EVFILT_SYSCOUNT                   = 0x9
+	EVFILT_TIMER                      = -0x7
+	EVFILT_VNODE                      = -0x4
+	EVFILT_WRITE                      = -0x2
+	EVL_ENCAPLEN                      = 0x4
+	EVL_PRIO_BITS                     = 0xd
+	EVL_PRIO_MAX                      = 0x7
+	EVL_VLID_MASK                     = 0xfff
+	EVL_VLID_MAX                      = 0xffe
+	EVL_VLID_MIN                      = 0x1
+	EVL_VLID_NULL                     = 0x0
+	EV_ADD                            = 0x1
+	EV_CLEAR                          = 0x20
+	EV_DELETE                         = 0x2
+	EV_DISABLE                        = 0x8
+	EV_DISPATCH                       = 0x80
+	EV_ENABLE                         = 0x4
+	EV_EOF                            = 0x8000
+	EV_ERROR                          = 0x4000
+	EV_FLAG1                          = 0x2000
+	EV_ONESHOT                        = 0x10
+	EV_RECEIPT                        = 0x40
+	EV_SYSFLAGS                       = 0xf800
+	EXTA                              = 0x4b00
+	EXTB                              = 0x9600
+	EXTPROC                           = 0x800
+	FD_CLOEXEC                        = 0x1
+	FD_SETSIZE                        = 0x400
+	FLUSHO                            = 0x800000
+	F_DUPFD                           = 0x0
+	F_DUPFD_CLOEXEC                   = 0xa
+	F_GETFD                           = 0x1
+	F_GETFL                           = 0x3
+	F_GETLK                           = 0x7
+	F_GETOWN                          = 0x5
+	F_ISATTY                          = 0xb
+	F_RDLCK                           = 0x1
+	F_SETFD                           = 0x2
+	F_SETFL                           = 0x4
+	F_SETLK                           = 0x8
+	F_SETLKW                          = 0x9
+	F_SETOWN                          = 0x6
+	F_UNLCK                           = 0x2
+	F_WRLCK                           = 0x3
+	HUPCL                             = 0x4000
+	ICANON                            = 0x100
+	ICMP6_FILTER                      = 0x12
+	ICRNL                             = 0x100
+	IEXTEN                            = 0x400
+	IFAN_ARRIVAL                      = 0x0
+	IFAN_DEPARTURE                    = 0x1
+	IFF_ALLMULTI                      = 0x200
+	IFF_BROADCAST                     = 0x2
+	IFF_CANTCHANGE                    = 0x8e52
+	IFF_DEBUG                         = 0x4
+	IFF_LINK0                         = 0x1000
+	IFF_LINK1                         = 0x2000
+	IFF_LINK2                         = 0x4000
+	IFF_LOOPBACK                      = 0x8
+	IFF_MULTICAST                     = 0x8000
+	IFF_NOARP                         = 0x80
+	IFF_OACTIVE                       = 0x400
+	IFF_POINTOPOINT                   = 0x10
+	IFF_PROMISC                       = 0x100
+	IFF_RUNNING                       = 0x40
+	IFF_SIMPLEX                       = 0x800
+	IFF_STATICARP                     = 0x20
+	IFF_UP                            = 0x1
+	IFNAMSIZ                          = 0x10
+	IFT_1822                          = 0x2
+	IFT_A12MPPSWITCH                  = 0x82
+	IFT_AAL2                          = 0xbb
+	IFT_AAL5                          = 0x31
+	IFT_ADSL                          = 0x5e
+	IFT_AFLANE8023                    = 0x3b
+	IFT_AFLANE8025                    = 0x3c
+	IFT_ARAP                          = 0x58
+	IFT_ARCNET                        = 0x23
+	IFT_ARCNETPLUS                    = 0x24
+	IFT_ASYNC                         = 0x54
+	IFT_ATM                           = 0x25
+	IFT_ATMDXI                        = 0x69
+	IFT_ATMFUNI                       = 0x6a
+	IFT_ATMIMA                        = 0x6b
+	IFT_ATMLOGICAL                    = 0x50
+	IFT_ATMRADIO                      = 0xbd
+	IFT_ATMSUBINTERFACE               = 0x86
+	IFT_ATMVCIENDPT                   = 0xc2
+	IFT_ATMVIRTUAL                    = 0x95
+	IFT_BGPPOLICYACCOUNTING           = 0xa2
+	IFT_BLUETOOTH                     = 0xf8
+	IFT_BRIDGE                        = 0xd1
+	IFT_BSC                           = 0x53
+	IFT_CARP                          = 0xf7
+	IFT_CCTEMUL                       = 0x3d
+	IFT_CEPT                          = 0x13
+	IFT_CES                           = 0x85
+	IFT_CHANNEL                       = 0x46
+	IFT_CNR                           = 0x55
+	IFT_COFFEE                        = 0x84
+	IFT_COMPOSITELINK                 = 0x9b
+	IFT_DCN                           = 0x8d
+	IFT_DIGITALPOWERLINE              = 0x8a
+	IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+	IFT_DLSW                          = 0x4a
+	IFT_DOCSCABLEDOWNSTREAM           = 0x80
+	IFT_DOCSCABLEMACLAYER             = 0x7f
+	IFT_DOCSCABLEUPSTREAM             = 0x81
+	IFT_DOCSCABLEUPSTREAMCHANNEL      = 0xcd
+	IFT_DS0                           = 0x51
+	IFT_DS0BUNDLE                     = 0x52
+	IFT_DS1FDL                        = 0xaa
+	IFT_DS3                           = 0x1e
+	IFT_DTM                           = 0x8c
+	IFT_DUMMY                         = 0xf1
+	IFT_DVBASILN                      = 0xac
+	IFT_DVBASIOUT                     = 0xad
+	IFT_DVBRCCDOWNSTREAM              = 0x93
+	IFT_DVBRCCMACLAYER                = 0x92
+	IFT_DVBRCCUPSTREAM                = 0x94
+	IFT_ECONET                        = 0xce
+	IFT_ENC                           = 0xf4
+	IFT_EON                           = 0x19
+	IFT_EPLRS                         = 0x57
+	IFT_ESCON                         = 0x49
+	IFT_ETHER                         = 0x6
+	IFT_FAITH                         = 0xf3
+	IFT_FAST                          = 0x7d
+	IFT_FASTETHER                     = 0x3e
+	IFT_FASTETHERFX                   = 0x45
+	IFT_FDDI                          = 0xf
+	IFT_FIBRECHANNEL                  = 0x38
+	IFT_FRAMERELAYINTERCONNECT        = 0x3a
+	IFT_FRAMERELAYMPI                 = 0x5c
+	IFT_FRDLCIENDPT                   = 0xc1
+	IFT_FRELAY                        = 0x20
+	IFT_FRELAYDCE                     = 0x2c
+	IFT_FRF16MFRBUNDLE                = 0xa3
+	IFT_FRFORWARD                     = 0x9e
+	IFT_G703AT2MB                     = 0x43
+	IFT_G703AT64K                     = 0x42
+	IFT_GIF                           = 0xf0
+	IFT_GIGABITETHERNET               = 0x75
+	IFT_GR303IDT                      = 0xb2
+	IFT_GR303RDT                      = 0xb1
+	IFT_H323GATEKEEPER                = 0xa4
+	IFT_H323PROXY                     = 0xa5
+	IFT_HDH1822                       = 0x3
+	IFT_HDLC                          = 0x76
+	IFT_HDSL2                         = 0xa8
+	IFT_HIPERLAN2                     = 0xb7
+	IFT_HIPPI                         = 0x2f
+	IFT_HIPPIINTERFACE                = 0x39
+	IFT_HOSTPAD                       = 0x5a
+	IFT_HSSI                          = 0x2e
+	IFT_HY                            = 0xe
+	IFT_IBM370PARCHAN                 = 0x48
+	IFT_IDSL                          = 0x9a
+	IFT_IEEE1394                      = 0x90
+	IFT_IEEE80211                     = 0x47
+	IFT_IEEE80212                     = 0x37
+	IFT_IEEE8023ADLAG                 = 0xa1
+	IFT_IFGSN                         = 0x91
+	IFT_IMT                           = 0xbe
+	IFT_INFINIBAND                    = 0xc7
+	IFT_INTERLEAVE                    = 0x7c
+	IFT_IP                            = 0x7e
+	IFT_IPFORWARD                     = 0x8e
+	IFT_IPOVERATM                     = 0x72
+	IFT_IPOVERCDLC                    = 0x6d
+	IFT_IPOVERCLAW                    = 0x6e
+	IFT_IPSWITCH                      = 0x4e
+	IFT_ISDN                          = 0x3f
+	IFT_ISDNBASIC                     = 0x14
+	IFT_ISDNPRIMARY                   = 0x15
+	IFT_ISDNS                         = 0x4b
+	IFT_ISDNU                         = 0x4c
+	IFT_ISO88022LLC                   = 0x29
+	IFT_ISO88023                      = 0x7
+	IFT_ISO88024                      = 0x8
+	IFT_ISO88025                      = 0x9
+	IFT_ISO88025CRFPINT               = 0x62
+	IFT_ISO88025DTR                   = 0x56
+	IFT_ISO88025FIBER                 = 0x73
+	IFT_ISO88026                      = 0xa
+	IFT_ISUP                          = 0xb3
+	IFT_L2VLAN                        = 0x87
+	IFT_L3IPVLAN                      = 0x88
+	IFT_L3IPXVLAN                     = 0x89
+	IFT_LAPB                          = 0x10
+	IFT_LAPD                          = 0x4d
+	IFT_LAPF                          = 0x77
+	IFT_LINEGROUP                     = 0xd2
+	IFT_LOCALTALK                     = 0x2a
+	IFT_LOOP                          = 0x18
+	IFT_MBIM                          = 0xfa
+	IFT_MEDIAMAILOVERIP               = 0x8b
+	IFT_MFSIGLINK                     = 0xa7
+	IFT_MIOX25                        = 0x26
+	IFT_MODEM                         = 0x30
+	IFT_MPC                           = 0x71
+	IFT_MPLS                          = 0xa6
+	IFT_MPLSTUNNEL                    = 0x96
+	IFT_MSDSL                         = 0x8f
+	IFT_MVL                           = 0xbf
+	IFT_MYRINET                       = 0x63
+	IFT_NFAS                          = 0xaf
+	IFT_NSIP                          = 0x1b
+	IFT_OPTICALCHANNEL                = 0xc3
+	IFT_OPTICALTRANSPORT              = 0xc4
+	IFT_OTHER                         = 0x1
+	IFT_P10                           = 0xc
+	IFT_P80                           = 0xd
+	IFT_PARA                          = 0x22
+	IFT_PFLOG                         = 0xf5
+	IFT_PFLOW                         = 0xf9
+	IFT_PFSYNC                        = 0xf6
+	IFT_PLC                           = 0xae
+	IFT_PON155                        = 0xcf
+	IFT_PON622                        = 0xd0
+	IFT_POS                           = 0xab
+	IFT_PPP                           = 0x17
+	IFT_PPPMULTILINKBUNDLE            = 0x6c
+	IFT_PROPATM                       = 0xc5
+	IFT_PROPBWAP2MP                   = 0xb8
+	IFT_PROPCNLS                      = 0x59
+	IFT_PROPDOCSWIRELESSDOWNSTREAM    = 0xb5
+	IFT_PROPDOCSWIRELESSMACLAYER      = 0xb4
+	IFT_PROPDOCSWIRELESSUPSTREAM      = 0xb6
+	IFT_PROPMUX                       = 0x36
+	IFT_PROPVIRTUAL                   = 0x35
+	IFT_PROPWIRELESSP2P               = 0x9d
+	IFT_PTPSERIAL                     = 0x16
+	IFT_PVC                           = 0xf2
+	IFT_Q2931                         = 0xc9
+	IFT_QLLC                          = 0x44
+	IFT_RADIOMAC                      = 0xbc
+	IFT_RADSL                         = 0x5f
+	IFT_REACHDSL                      = 0xc0
+	IFT_RFC1483                       = 0x9f
+	IFT_RS232                         = 0x21
+	IFT_RSRB                          = 0x4f
+	IFT_SDLC                          = 0x11
+	IFT_SDSL                          = 0x60
+	IFT_SHDSL                         = 0xa9
+	IFT_SIP                           = 0x1f
+	IFT_SIPSIG                        = 0xcc
+	IFT_SIPTG                         = 0xcb
+	IFT_SLIP                          = 0x1c
+	IFT_SMDSDXI                       = 0x2b
+	IFT_SMDSICIP                      = 0x34
+	IFT_SONET                         = 0x27
+	IFT_SONETOVERHEADCHANNEL          = 0xb9
+	IFT_SONETPATH                     = 0x32
+	IFT_SONETVT                       = 0x33
+	IFT_SRP                           = 0x97
+	IFT_SS7SIGLINK                    = 0x9c
+	IFT_STACKTOSTACK                  = 0x6f
+	IFT_STARLAN                       = 0xb
+	IFT_T1                            = 0x12
+	IFT_TDLC                          = 0x74
+	IFT_TELINK                        = 0xc8
+	IFT_TERMPAD                       = 0x5b
+	IFT_TR008                         = 0xb0
+	IFT_TRANSPHDLC                    = 0x7b
+	IFT_TUNNEL                        = 0x83
+	IFT_ULTRA                         = 0x1d
+	IFT_USB                           = 0xa0
+	IFT_V11                           = 0x40
+	IFT_V35                           = 0x2d
+	IFT_V36                           = 0x41
+	IFT_V37                           = 0x78
+	IFT_VDSL                          = 0x61
+	IFT_VIRTUALIPADDRESS              = 0x70
+	IFT_VIRTUALTG                     = 0xca
+	IFT_VOICEDID                      = 0xd5
+	IFT_VOICEEM                       = 0x64
+	IFT_VOICEEMFGD                    = 0xd3
+	IFT_VOICEENCAP                    = 0x67
+	IFT_VOICEFGDEANA                  = 0xd4
+	IFT_VOICEFXO                      = 0x65
+	IFT_VOICEFXS                      = 0x66
+	IFT_VOICEOVERATM                  = 0x98
+	IFT_VOICEOVERCABLE                = 0xc6
+	IFT_VOICEOVERFRAMERELAY           = 0x99
+	IFT_VOICEOVERIP                   = 0x68
+	IFT_WIREGUARD                     = 0xfb
+	IFT_X213                          = 0x5d
+	IFT_X25                           = 0x5
+	IFT_X25DDN                        = 0x4
+	IFT_X25HUNTGROUP                  = 0x7a
+	IFT_X25MLP                        = 0x79
+	IFT_X25PLE                        = 0x28
+	IFT_XETHER                        = 0x1a
+	IGNBRK                            = 0x1
+	IGNCR                             = 0x80
+	IGNPAR                            = 0x4
+	IMAXBEL                           = 0x2000
+	INLCR                             = 0x40
+	INPCK                             = 0x10
+	IN_CLASSA_HOST                    = 0xffffff
+	IN_CLASSA_MAX                     = 0x80
+	IN_CLASSA_NET                     = 0xff000000
+	IN_CLASSA_NSHIFT                  = 0x18
+	IN_CLASSB_HOST                    = 0xffff
+	IN_CLASSB_MAX                     = 0x10000
+	IN_CLASSB_NET                     = 0xffff0000
+	IN_CLASSB_NSHIFT                  = 0x10
+	IN_CLASSC_HOST                    = 0xff
+	IN_CLASSC_NET                     = 0xffffff00
+	IN_CLASSC_NSHIFT                  = 0x8
+	IN_CLASSD_HOST                    = 0xfffffff
+	IN_CLASSD_NET                     = 0xf0000000
+	IN_CLASSD_NSHIFT                  = 0x1c
+	IN_LOOPBACKNET                    = 0x7f
+	IN_RFC3021_HOST                   = 0x1
+	IN_RFC3021_NET                    = 0xfffffffe
+	IN_RFC3021_NSHIFT                 = 0x1f
+	IPPROTO_AH                        = 0x33
+	IPPROTO_CARP                      = 0x70
+	IPPROTO_DIVERT                    = 0x102
+	IPPROTO_DONE                      = 0x101
+	IPPROTO_DSTOPTS                   = 0x3c
+	IPPROTO_EGP                       = 0x8
+	IPPROTO_ENCAP                     = 0x62
+	IPPROTO_EON                       = 0x50
+	IPPROTO_ESP                       = 0x32
+	IPPROTO_ETHERIP                   = 0x61
+	IPPROTO_FRAGMENT                  = 0x2c
+	IPPROTO_GGP                       = 0x3
+	IPPROTO_GRE                       = 0x2f
+	IPPROTO_HOPOPTS                   = 0x0
+	IPPROTO_ICMP                      = 0x1
+	IPPROTO_ICMPV6                    = 0x3a
+	IPPROTO_IDP                       = 0x16
+	IPPROTO_IGMP                      = 0x2
+	IPPROTO_IP                        = 0x0
+	IPPROTO_IPCOMP                    = 0x6c
+	IPPROTO_IPIP                      = 0x4
+	IPPROTO_IPV4                      = 0x4
+	IPPROTO_IPV6                      = 0x29
+	IPPROTO_MAX                       = 0x100
+	IPPROTO_MAXID                     = 0x103
+	IPPROTO_MOBILE                    = 0x37
+	IPPROTO_MPLS                      = 0x89
+	IPPROTO_NONE                      = 0x3b
+	IPPROTO_PFSYNC                    = 0xf0
+	IPPROTO_PIM                       = 0x67
+	IPPROTO_PUP                       = 0xc
+	IPPROTO_RAW                       = 0xff
+	IPPROTO_ROUTING                   = 0x2b
+	IPPROTO_RSVP                      = 0x2e
+	IPPROTO_SCTP                      = 0x84
+	IPPROTO_TCP                       = 0x6
+	IPPROTO_TP                        = 0x1d
+	IPPROTO_UDP                       = 0x11
+	IPPROTO_UDPLITE                   = 0x88
+	IPV6_AUTH_LEVEL                   = 0x35
+	IPV6_AUTOFLOWLABEL                = 0x3b
+	IPV6_CHECKSUM                     = 0x1a
+	IPV6_DEFAULT_MULTICAST_HOPS       = 0x1
+	IPV6_DEFAULT_MULTICAST_LOOP       = 0x1
+	IPV6_DEFHLIM                      = 0x40
+	IPV6_DONTFRAG                     = 0x3e
+	IPV6_DSTOPTS                      = 0x32
+	IPV6_ESP_NETWORK_LEVEL            = 0x37
+	IPV6_ESP_TRANS_LEVEL              = 0x36
+	IPV6_FAITH                        = 0x1d
+	IPV6_FLOWINFO_MASK                = 0xfffffff
+	IPV6_FLOWLABEL_MASK               = 0xfffff
+	IPV6_FRAGTTL                      = 0x78
+	IPV6_HLIMDEC                      = 0x1
+	IPV6_HOPLIMIT                     = 0x2f
+	IPV6_HOPOPTS                      = 0x31
+	IPV6_IPCOMP_LEVEL                 = 0x3c
+	IPV6_JOIN_GROUP                   = 0xc
+	IPV6_LEAVE_GROUP                  = 0xd
+	IPV6_MAXHLIM                      = 0xff
+	IPV6_MAXPACKET                    = 0xffff
+	IPV6_MINHOPCOUNT                  = 0x41
+	IPV6_MMTU                         = 0x500
+	IPV6_MULTICAST_HOPS               = 0xa
+	IPV6_MULTICAST_IF                 = 0x9
+	IPV6_MULTICAST_LOOP               = 0xb
+	IPV6_NEXTHOP                      = 0x30
+	IPV6_OPTIONS                      = 0x1
+	IPV6_PATHMTU                      = 0x2c
+	IPV6_PIPEX                        = 0x3f
+	IPV6_PKTINFO                      = 0x2e
+	IPV6_PORTRANGE                    = 0xe
+	IPV6_PORTRANGE_DEFAULT            = 0x0
+	IPV6_PORTRANGE_HIGH               = 0x1
+	IPV6_PORTRANGE_LOW                = 0x2
+	IPV6_RECVDSTOPTS                  = 0x28
+	IPV6_RECVDSTPORT                  = 0x40
+	IPV6_RECVHOPLIMIT                 = 0x25
+	IPV6_RECVHOPOPTS                  = 0x27
+	IPV6_RECVPATHMTU                  = 0x2b
+	IPV6_RECVPKTINFO                  = 0x24
+	IPV6_RECVRTHDR                    = 0x26
+	IPV6_RECVTCLASS                   = 0x39
+	IPV6_RTABLE                       = 0x1021
+	IPV6_RTHDR                        = 0x33
+	IPV6_RTHDRDSTOPTS                 = 0x23
+	IPV6_RTHDR_LOOSE                  = 0x0
+	IPV6_RTHDR_STRICT                 = 0x1
+	IPV6_RTHDR_TYPE_0                 = 0x0
+	IPV6_SOCKOPT_RESERVED1            = 0x3
+	IPV6_TCLASS                       = 0x3d
+	IPV6_UNICAST_HOPS                 = 0x4
+	IPV6_USE_MIN_MTU                  = 0x2a
+	IPV6_V6ONLY                       = 0x1b
+	IPV6_VERSION                      = 0x60
+	IPV6_VERSION_MASK                 = 0xf0
+	IP_ADD_MEMBERSHIP                 = 0xc
+	IP_AUTH_LEVEL                     = 0x14
+	IP_DEFAULT_MULTICAST_LOOP         = 0x1
+	IP_DEFAULT_MULTICAST_TTL          = 0x1
+	IP_DF                             = 0x4000
+	IP_DROP_MEMBERSHIP                = 0xd
+	IP_ESP_NETWORK_LEVEL              = 0x16
+	IP_ESP_TRANS_LEVEL                = 0x15
+	IP_HDRINCL                        = 0x2
+	IP_IPCOMP_LEVEL                   = 0x1d
+	IP_IPDEFTTL                       = 0x25
+	IP_IPSECFLOWINFO                  = 0x24
+	IP_IPSEC_LOCAL_AUTH               = 0x1b
+	IP_IPSEC_LOCAL_CRED               = 0x19
+	IP_IPSEC_LOCAL_ID                 = 0x17
+	IP_IPSEC_REMOTE_AUTH              = 0x1c
+	IP_IPSEC_REMOTE_CRED              = 0x1a
+	IP_IPSEC_REMOTE_ID                = 0x18
+	IP_MAXPACKET                      = 0xffff
+	IP_MAX_MEMBERSHIPS                = 0xfff
+	IP_MF                             = 0x2000
+	IP_MINTTL                         = 0x20
+	IP_MIN_MEMBERSHIPS                = 0xf
+	IP_MSS                            = 0x240
+	IP_MULTICAST_IF                   = 0x9
+	IP_MULTICAST_LOOP                 = 0xb
+	IP_MULTICAST_TTL                  = 0xa
+	IP_OFFMASK                        = 0x1fff
+	IP_OPTIONS                        = 0x1
+	IP_PIPEX                          = 0x22
+	IP_PORTRANGE                      = 0x13
+	IP_PORTRANGE_DEFAULT              = 0x0
+	IP_PORTRANGE_HIGH                 = 0x1
+	IP_PORTRANGE_LOW                  = 0x2
+	IP_RECVDSTADDR                    = 0x7
+	IP_RECVDSTPORT                    = 0x21
+	IP_RECVIF                         = 0x1e
+	IP_RECVOPTS                       = 0x5
+	IP_RECVRETOPTS                    = 0x6
+	IP_RECVRTABLE                     = 0x23
+	IP_RECVTTL                        = 0x1f
+	IP_RETOPTS                        = 0x8
+	IP_RF                             = 0x8000
+	IP_RTABLE                         = 0x1021
+	IP_SENDSRCADDR                    = 0x7
+	IP_TOS                            = 0x3
+	IP_TTL                            = 0x4
+	ISIG                              = 0x80
+	ISTRIP                            = 0x20
+	IXANY                             = 0x800
+	IXOFF                             = 0x400
+	IXON                              = 0x200
+	LCNT_OVERLOAD_FLUSH               = 0x6
+	LOCK_EX                           = 0x2
+	LOCK_NB                           = 0x4
+	LOCK_SH                           = 0x1
+	LOCK_UN                           = 0x8
+	MADV_DONTNEED                     = 0x4
+	MADV_FREE                         = 0x6
+	MADV_NORMAL                       = 0x0
+	MADV_RANDOM                       = 0x1
+	MADV_SEQUENTIAL                   = 0x2
+	MADV_SPACEAVAIL                   = 0x5
+	MADV_WILLNEED                     = 0x3
+	MAP_ANON                          = 0x1000
+	MAP_ANONYMOUS                     = 0x1000
+	MAP_CONCEAL                       = 0x8000
+	MAP_COPY                          = 0x2
+	MAP_FILE                          = 0x0
+	MAP_FIXED                         = 0x10
+	MAP_FLAGMASK                      = 0xfff7
+	MAP_HASSEMAPHORE                  = 0x0
+	MAP_INHERIT                       = 0x0
+	MAP_INHERIT_COPY                  = 0x1
+	MAP_INHERIT_NONE                  = 0x2
+	MAP_INHERIT_SHARE                 = 0x0
+	MAP_INHERIT_ZERO                  = 0x3
+	MAP_NOEXTEND                      = 0x0
+	MAP_NORESERVE                     = 0x0
+	MAP_PRIVATE                       = 0x2
+	MAP_RENAME                        = 0x0
+	MAP_SHARED                        = 0x1
+	MAP_STACK                         = 0x4000
+	MAP_TRYFIXED                      = 0x0
+	MCL_CURRENT                       = 0x1
+	MCL_FUTURE                        = 0x2
+	MSG_BCAST                         = 0x100
+	MSG_CMSG_CLOEXEC                  = 0x800
+	MSG_CTRUNC                        = 0x20
+	MSG_DONTROUTE                     = 0x4
+	MSG_DONTWAIT                      = 0x80
+	MSG_EOR                           = 0x8
+	MSG_MCAST                         = 0x200
+	MSG_NOSIGNAL                      = 0x400
+	MSG_OOB                           = 0x1
+	MSG_PEEK                          = 0x2
+	MSG_TRUNC                         = 0x10
+	MSG_WAITALL                       = 0x40
+	MSG_WAITFORONE                    = 0x1000
+	MS_ASYNC                          = 0x1
+	MS_INVALIDATE                     = 0x4
+	MS_SYNC                           = 0x2
+	NAME_MAX                          = 0xff
+	NET_RT_DUMP                       = 0x1
+	NET_RT_FLAGS                      = 0x2
+	NET_RT_IFLIST                     = 0x3
+	NET_RT_IFNAMES                    = 0x6
+	NET_RT_MAXID                      = 0x8
+	NET_RT_SOURCE                     = 0x7
+	NET_RT_STATS                      = 0x4
+	NET_RT_TABLE                      = 0x5
+	NOFLSH                            = 0x80000000
+	NOTE_ATTRIB                       = 0x8
+	NOTE_CHANGE                       = 0x1
+	NOTE_CHILD                        = 0x4
+	NOTE_DELETE                       = 0x1
+	NOTE_EOF                          = 0x2
+	NOTE_EXEC                         = 0x20000000
+	NOTE_EXIT                         = 0x80000000
+	NOTE_EXTEND                       = 0x4
+	NOTE_FORK                         = 0x40000000
+	NOTE_LINK                         = 0x10
+	NOTE_LOWAT                        = 0x1
+	NOTE_OOB                          = 0x4
+	NOTE_PCTRLMASK                    = 0xf0000000
+	NOTE_PDATAMASK                    = 0xfffff
+	NOTE_RENAME                       = 0x20
+	NOTE_REVOKE                       = 0x40
+	NOTE_TRACK                        = 0x1
+	NOTE_TRACKERR                     = 0x2
+	NOTE_TRUNCATE                     = 0x80
+	NOTE_WRITE                        = 0x2
+	OCRNL                             = 0x10
+	ONLCR                             = 0x2
+	ONLRET                            = 0x80
+	ONOCR                             = 0x40
+	ONOEOT                            = 0x8
+	OPOST                             = 0x1
+	O_ACCMODE                         = 0x3
+	O_APPEND                          = 0x8
+	O_ASYNC                           = 0x40
+	O_CLOEXEC                         = 0x10000
+	O_CREAT                           = 0x200
+	O_DIRECTORY                       = 0x20000
+	O_DSYNC                           = 0x80
+	O_EXCL                            = 0x800
+	O_EXLOCK                          = 0x20
+	O_FSYNC                           = 0x80
+	O_NDELAY                          = 0x4
+	O_NOCTTY                          = 0x8000
+	O_NOFOLLOW                        = 0x100
+	O_NONBLOCK                        = 0x4
+	O_RDONLY                          = 0x0
+	O_RDWR                            = 0x2
+	O_RSYNC                           = 0x80
+	O_SHLOCK                          = 0x10
+	O_SYNC                            = 0x80
+	O_TRUNC                           = 0x400
+	O_WRONLY                          = 0x1
+	PARENB                            = 0x1000
+	PARMRK                            = 0x8
+	PARODD                            = 0x2000
+	PENDIN                            = 0x20000000
+	PF_FLUSH                          = 0x1
+	PRIO_PGRP                         = 0x1
+	PRIO_PROCESS                      = 0x0
+	PRIO_USER                         = 0x2
+	PROT_EXEC                         = 0x4
+	PROT_NONE                         = 0x0
+	PROT_READ                         = 0x1
+	PROT_WRITE                        = 0x2
+	RLIMIT_CORE                       = 0x4
+	RLIMIT_CPU                        = 0x0
+	RLIMIT_DATA                       = 0x2
+	RLIMIT_FSIZE                      = 0x1
+	RLIMIT_NOFILE                     = 0x8
+	RLIMIT_STACK                      = 0x3
+	RLIM_INFINITY                     = 0x7fffffffffffffff
+	RTAX_AUTHOR                       = 0x6
+	RTAX_BFD                          = 0xb
+	RTAX_BRD                          = 0x7
+	RTAX_DNS                          = 0xc
+	RTAX_DST                          = 0x0
+	RTAX_GATEWAY                      = 0x1
+	RTAX_GENMASK                      = 0x3
+	RTAX_IFA                          = 0x5
+	RTAX_IFP                          = 0x4
+	RTAX_LABEL                        = 0xa
+	RTAX_MAX                          = 0xf
+	RTAX_NETMASK                      = 0x2
+	RTAX_SEARCH                       = 0xe
+	RTAX_SRC                          = 0x8
+	RTAX_SRCMASK                      = 0x9
+	RTAX_STATIC                       = 0xd
+	RTA_AUTHOR                        = 0x40
+	RTA_BFD                           = 0x800
+	RTA_BRD                           = 0x80
+	RTA_DNS                           = 0x1000
+	RTA_DST                           = 0x1
+	RTA_GATEWAY                       = 0x2
+	RTA_GENMASK                       = 0x8
+	RTA_IFA                           = 0x20
+	RTA_IFP                           = 0x10
+	RTA_LABEL                         = 0x400
+	RTA_NETMASK                       = 0x4
+	RTA_SEARCH                        = 0x4000
+	RTA_SRC                           = 0x100
+	RTA_SRCMASK                       = 0x200
+	RTA_STATIC                        = 0x2000
+	RTF_ANNOUNCE                      = 0x4000
+	RTF_BFD                           = 0x1000000
+	RTF_BLACKHOLE                     = 0x1000
+	RTF_BROADCAST                     = 0x400000
+	RTF_CACHED                        = 0x20000
+	RTF_CLONED                        = 0x10000
+	RTF_CLONING                       = 0x100
+	RTF_CONNECTED                     = 0x800000
+	RTF_DONE                          = 0x40
+	RTF_DYNAMIC                       = 0x10
+	RTF_FMASK                         = 0x110fc08
+	RTF_GATEWAY                       = 0x2
+	RTF_HOST                          = 0x4
+	RTF_LLINFO                        = 0x400
+	RTF_LOCAL                         = 0x200000
+	RTF_MODIFIED                      = 0x20
+	RTF_MPATH                         = 0x40000
+	RTF_MPLS                          = 0x100000
+	RTF_MULTICAST                     = 0x200
+	RTF_PERMANENT_ARP                 = 0x2000
+	RTF_PROTO1                        = 0x8000
+	RTF_PROTO2                        = 0x4000
+	RTF_PROTO3                        = 0x2000
+	RTF_REJECT                        = 0x8
+	RTF_STATIC                        = 0x800
+	RTF_UP                            = 0x1
+	RTF_USETRAILERS                   = 0x8000
+	RTM_80211INFO                     = 0x15
+	RTM_ADD                           = 0x1
+	RTM_BFD                           = 0x12
+	RTM_CHANGE                        = 0x3
+	RTM_CHGADDRATTR                   = 0x14
+	RTM_DELADDR                       = 0xd
+	RTM_DELETE                        = 0x2
+	RTM_DESYNC                        = 0x10
+	RTM_GET                           = 0x4
+	RTM_IFANNOUNCE                    = 0xf
+	RTM_IFINFO                        = 0xe
+	RTM_INVALIDATE                    = 0x11
+	RTM_LOSING                        = 0x5
+	RTM_MAXSIZE                       = 0x800
+	RTM_MISS                          = 0x7
+	RTM_NEWADDR                       = 0xc
+	RTM_PROPOSAL                      = 0x13
+	RTM_REDIRECT                      = 0x6
+	RTM_RESOLVE                       = 0xb
+	RTM_SOURCE                        = 0x16
+	RTM_VERSION                       = 0x5
+	RTV_EXPIRE                        = 0x4
+	RTV_HOPCOUNT                      = 0x2
+	RTV_MTU                           = 0x1
+	RTV_RPIPE                         = 0x8
+	RTV_RTT                           = 0x40
+	RTV_RTTVAR                        = 0x80
+	RTV_SPIPE                         = 0x10
+	RTV_SSTHRESH                      = 0x20
+	RT_TABLEID_BITS                   = 0x8
+	RT_TABLEID_MASK                   = 0xff
+	RT_TABLEID_MAX                    = 0xff
+	RUSAGE_CHILDREN                   = -0x1
+	RUSAGE_SELF                       = 0x0
+	RUSAGE_THREAD                     = 0x1
+	SCM_RIGHTS                        = 0x1
+	SCM_TIMESTAMP                     = 0x4
+	SHUT_RD                           = 0x0
+	SHUT_RDWR                         = 0x2
+	SHUT_WR                           = 0x1
+	SIOCADDMULTI                      = 0x80206931
+	SIOCAIFADDR                       = 0x8040691a
+	SIOCAIFGROUP                      = 0x80286987
+	SIOCATMARK                        = 0x40047307
+	SIOCBRDGADD                       = 0x8060693c
+	SIOCBRDGADDL                      = 0x80606949
+	SIOCBRDGADDS                      = 0x80606941
+	SIOCBRDGARL                       = 0x808c694d
+	SIOCBRDGDADDR                     = 0x81286947
+	SIOCBRDGDEL                       = 0x8060693d
+	SIOCBRDGDELS                      = 0x80606942
+	SIOCBRDGFLUSH                     = 0x80606948
+	SIOCBRDGFRL                       = 0x808c694e
+	SIOCBRDGGCACHE                    = 0xc0146941
+	SIOCBRDGGFD                       = 0xc0146952
+	SIOCBRDGGHT                       = 0xc0146951
+	SIOCBRDGGIFFLGS                   = 0xc060693e
+	SIOCBRDGGMA                       = 0xc0146953
+	SIOCBRDGGPARAM                    = 0xc0406958
+	SIOCBRDGGPRI                      = 0xc0146950
+	SIOCBRDGGRL                       = 0xc030694f
+	SIOCBRDGGTO                       = 0xc0146946
+	SIOCBRDGIFS                       = 0xc0606942
+	SIOCBRDGRTS                       = 0xc0206943
+	SIOCBRDGSADDR                     = 0xc1286944
+	SIOCBRDGSCACHE                    = 0x80146940
+	SIOCBRDGSFD                       = 0x80146952
+	SIOCBRDGSHT                       = 0x80146951
+	SIOCBRDGSIFCOST                   = 0x80606955
+	SIOCBRDGSIFFLGS                   = 0x8060693f
+	SIOCBRDGSIFPRIO                   = 0x80606954
+	SIOCBRDGSIFPROT                   = 0x8060694a
+	SIOCBRDGSMA                       = 0x80146953
+	SIOCBRDGSPRI                      = 0x80146950
+	SIOCBRDGSPROTO                    = 0x8014695a
+	SIOCBRDGSTO                       = 0x80146945
+	SIOCBRDGSTXHC                     = 0x80146959
+	SIOCDELLABEL                      = 0x80206997
+	SIOCDELMULTI                      = 0x80206932
+	SIOCDIFADDR                       = 0x80206919
+	SIOCDIFGROUP                      = 0x80286989
+	SIOCDIFPARENT                     = 0x802069b4
+	SIOCDIFPHYADDR                    = 0x80206949
+	SIOCDPWE3NEIGHBOR                 = 0x802069de
+	SIOCDVNETID                       = 0x802069af
+	SIOCGETKALIVE                     = 0xc01869a4
+	SIOCGETLABEL                      = 0x8020699a
+	SIOCGETMPWCFG                     = 0xc02069ae
+	SIOCGETPFLOW                      = 0xc02069fe
+	SIOCGETPFSYNC                     = 0xc02069f8
+	SIOCGETSGCNT                      = 0xc0207534
+	SIOCGETVIFCNT                     = 0xc0287533
+	SIOCGETVLAN                       = 0xc0206990
+	SIOCGIFADDR                       = 0xc0206921
+	SIOCGIFBRDADDR                    = 0xc0206923
+	SIOCGIFCONF                       = 0xc0106924
+	SIOCGIFDATA                       = 0xc020691b
+	SIOCGIFDESCR                      = 0xc0206981
+	SIOCGIFDSTADDR                    = 0xc0206922
+	SIOCGIFFLAGS                      = 0xc0206911
+	SIOCGIFGATTR                      = 0xc028698b
+	SIOCGIFGENERIC                    = 0xc020693a
+	SIOCGIFGLIST                      = 0xc028698d
+	SIOCGIFGMEMB                      = 0xc028698a
+	SIOCGIFGROUP                      = 0xc0286988
+	SIOCGIFHARDMTU                    = 0xc02069a5
+	SIOCGIFLLPRIO                     = 0xc02069b6
+	SIOCGIFMEDIA                      = 0xc0406938
+	SIOCGIFMETRIC                     = 0xc0206917
+	SIOCGIFMTU                        = 0xc020697e
+	SIOCGIFNETMASK                    = 0xc0206925
+	SIOCGIFPAIR                       = 0xc02069b1
+	SIOCGIFPARENT                     = 0xc02069b3
+	SIOCGIFPRIORITY                   = 0xc020699c
+	SIOCGIFRDOMAIN                    = 0xc02069a0
+	SIOCGIFRTLABEL                    = 0xc0206983
+	SIOCGIFRXR                        = 0x802069aa
+	SIOCGIFSFFPAGE                    = 0xc1126939
+	SIOCGIFXFLAGS                     = 0xc020699e
+	SIOCGLIFPHYADDR                   = 0xc218694b
+	SIOCGLIFPHYDF                     = 0xc02069c2
+	SIOCGLIFPHYECN                    = 0xc02069c8
+	SIOCGLIFPHYRTABLE                 = 0xc02069a2
+	SIOCGLIFPHYTTL                    = 0xc02069a9
+	SIOCGPGRP                         = 0x40047309
+	SIOCGPWE3                         = 0xc0206998
+	SIOCGPWE3CTRLWORD                 = 0xc02069dc
+	SIOCGPWE3FAT                      = 0xc02069dd
+	SIOCGPWE3NEIGHBOR                 = 0xc21869de
+	SIOCGRXHPRIO                      = 0xc02069db
+	SIOCGSPPPPARAMS                   = 0xc0206994
+	SIOCGTXHPRIO                      = 0xc02069c6
+	SIOCGUMBINFO                      = 0xc02069be
+	SIOCGUMBPARAM                     = 0xc02069c0
+	SIOCGVH                           = 0xc02069f6
+	SIOCGVNETFLOWID                   = 0xc02069c4
+	SIOCGVNETID                       = 0xc02069a7
+	SIOCIFAFATTACH                    = 0x801169ab
+	SIOCIFAFDETACH                    = 0x801169ac
+	SIOCIFCREATE                      = 0x8020697a
+	SIOCIFDESTROY                     = 0x80206979
+	SIOCIFGCLONERS                    = 0xc0106978
+	SIOCSETKALIVE                     = 0x801869a3
+	SIOCSETLABEL                      = 0x80206999
+	SIOCSETMPWCFG                     = 0x802069ad
+	SIOCSETPFLOW                      = 0x802069fd
+	SIOCSETPFSYNC                     = 0x802069f7
+	SIOCSETVLAN                       = 0x8020698f
+	SIOCSIFADDR                       = 0x8020690c
+	SIOCSIFBRDADDR                    = 0x80206913
+	SIOCSIFDESCR                      = 0x80206980
+	SIOCSIFDSTADDR                    = 0x8020690e
+	SIOCSIFFLAGS                      = 0x80206910
+	SIOCSIFGATTR                      = 0x8028698c
+	SIOCSIFGENERIC                    = 0x80206939
+	SIOCSIFLLADDR                     = 0x8020691f
+	SIOCSIFLLPRIO                     = 0x802069b5
+	SIOCSIFMEDIA                      = 0xc0206937
+	SIOCSIFMETRIC                     = 0x80206918
+	SIOCSIFMTU                        = 0x8020697f
+	SIOCSIFNETMASK                    = 0x80206916
+	SIOCSIFPAIR                       = 0x802069b0
+	SIOCSIFPARENT                     = 0x802069b2
+	SIOCSIFPRIORITY                   = 0x8020699b
+	SIOCSIFRDOMAIN                    = 0x8020699f
+	SIOCSIFRTLABEL                    = 0x80206982
+	SIOCSIFXFLAGS                     = 0x8020699d
+	SIOCSLIFPHYADDR                   = 0x8218694a
+	SIOCSLIFPHYDF                     = 0x802069c1
+	SIOCSLIFPHYECN                    = 0x802069c7
+	SIOCSLIFPHYRTABLE                 = 0x802069a1
+	SIOCSLIFPHYTTL                    = 0x802069a8
+	SIOCSPGRP                         = 0x80047308
+	SIOCSPWE3CTRLWORD                 = 0x802069dc
+	SIOCSPWE3FAT                      = 0x802069dd
+	SIOCSPWE3NEIGHBOR                 = 0x821869de
+	SIOCSRXHPRIO                      = 0x802069db
+	SIOCSSPPPPARAMS                   = 0x80206993
+	SIOCSTXHPRIO                      = 0x802069c5
+	SIOCSUMBPARAM                     = 0x802069bf
+	SIOCSVH                           = 0xc02069f5
+	SIOCSVNETFLOWID                   = 0x802069c3
+	SIOCSVNETID                       = 0x802069a6
+	SOCK_CLOEXEC                      = 0x8000
+	SOCK_DGRAM                        = 0x2
+	SOCK_DNS                          = 0x1000
+	SOCK_NONBLOCK                     = 0x4000
+	SOCK_RAW                          = 0x3
+	SOCK_RDM                          = 0x4
+	SOCK_SEQPACKET                    = 0x5
+	SOCK_STREAM                       = 0x1
+	SOL_SOCKET                        = 0xffff
+	SOMAXCONN                         = 0x80
+	SO_ACCEPTCONN                     = 0x2
+	SO_BINDANY                        = 0x1000
+	SO_BROADCAST                      = 0x20
+	SO_DEBUG                          = 0x1
+	SO_DOMAIN                         = 0x1024
+	SO_DONTROUTE                      = 0x10
+	SO_ERROR                          = 0x1007
+	SO_KEEPALIVE                      = 0x8
+	SO_LINGER                         = 0x80
+	SO_NETPROC                        = 0x1020
+	SO_OOBINLINE                      = 0x100
+	SO_PEERCRED                       = 0x1022
+	SO_PROTOCOL                       = 0x1025
+	SO_RCVBUF                         = 0x1002
+	SO_RCVLOWAT                       = 0x1004
+	SO_RCVTIMEO                       = 0x1006
+	SO_REUSEADDR                      = 0x4
+	SO_REUSEPORT                      = 0x200
+	SO_RTABLE                         = 0x1021
+	SO_SNDBUF                         = 0x1001
+	SO_SNDLOWAT                       = 0x1003
+	SO_SNDTIMEO                       = 0x1005
+	SO_SPLICE                         = 0x1023
+	SO_TIMESTAMP                      = 0x800
+	SO_TYPE                           = 0x1008
+	SO_USELOOPBACK                    = 0x40
+	SO_ZEROIZE                        = 0x2000
+	TCIFLUSH                          = 0x1
+	TCIOFLUSH                         = 0x3
+	TCOFLUSH                          = 0x2
+	TCP_INFO                          = 0x9
+	TCP_MAXSEG                        = 0x2
+	TCP_MAXWIN                        = 0xffff
+	TCP_MAX_SACK                      = 0x3
+	TCP_MAX_WINSHIFT                  = 0xe
+	TCP_MD5SIG                        = 0x4
+	TCP_MSS                           = 0x200
+	TCP_NODELAY                       = 0x1
+	TCP_NOPUSH                        = 0x10
+	TCP_SACKHOLE_LIMIT                = 0x80
+	TCP_SACK_ENABLE                   = 0x8
+	TCSAFLUSH                         = 0x2
+	TIOCCBRK                          = 0x2000747a
+	TIOCCDTR                          = 0x20007478
+	TIOCCHKVERAUTH                    = 0x2000741e
+	TIOCCLRVERAUTH                    = 0x2000741d
+	TIOCCONS                          = 0x80047462
+	TIOCDRAIN                         = 0x2000745e
+	TIOCEXCL                          = 0x2000740d
+	TIOCEXT                           = 0x80047460
+	TIOCFLAG_CLOCAL                   = 0x2
+	TIOCFLAG_CRTSCTS                  = 0x4
+	TIOCFLAG_MDMBUF                   = 0x8
+	TIOCFLAG_PPS                      = 0x10
+	TIOCFLAG_SOFTCAR                  = 0x1
+	TIOCFLUSH                         = 0x80047410
+	TIOCGETA                          = 0x402c7413
+	TIOCGETD                          = 0x4004741a
+	TIOCGFLAGS                        = 0x4004745d
+	TIOCGPGRP                         = 0x40047477
+	TIOCGSID                          = 0x40047463
+	TIOCGTSTAMP                       = 0x4010745b
+	TIOCGWINSZ                        = 0x40087468
+	TIOCMBIC                          = 0x8004746b
+	TIOCMBIS                          = 0x8004746c
+	TIOCMGET                          = 0x4004746a
+	TIOCMODG                          = 0x4004746a
+	TIOCMODS                          = 0x8004746d
+	TIOCMSET                          = 0x8004746d
+	TIOCM_CAR                         = 0x40
+	TIOCM_CD                          = 0x40
+	TIOCM_CTS                         = 0x20
+	TIOCM_DSR                         = 0x100
+	TIOCM_DTR                         = 0x2
+	TIOCM_LE                          = 0x1
+	TIOCM_RI                          = 0x80
+	TIOCM_RNG                         = 0x80
+	TIOCM_RTS                         = 0x4
+	TIOCM_SR                          = 0x10
+	TIOCM_ST                          = 0x8
+	TIOCNOTTY                         = 0x20007471
+	TIOCNXCL                          = 0x2000740e
+	TIOCOUTQ                          = 0x40047473
+	TIOCPKT                           = 0x80047470
+	TIOCPKT_DATA                      = 0x0
+	TIOCPKT_DOSTOP                    = 0x20
+	TIOCPKT_FLUSHREAD                 = 0x1
+	TIOCPKT_FLUSHWRITE                = 0x2
+	TIOCPKT_IOCTL                     = 0x40
+	TIOCPKT_NOSTOP                    = 0x10
+	TIOCPKT_START                     = 0x8
+	TIOCPKT_STOP                      = 0x4
+	TIOCREMOTE                        = 0x80047469
+	TIOCSBRK                          = 0x2000747b
+	TIOCSCTTY                         = 0x20007461
+	TIOCSDTR                          = 0x20007479
+	TIOCSETA                          = 0x802c7414
+	TIOCSETAF                         = 0x802c7416
+	TIOCSETAW                         = 0x802c7415
+	TIOCSETD                          = 0x8004741b
+	TIOCSETVERAUTH                    = 0x8004741c
+	TIOCSFLAGS                        = 0x8004745c
+	TIOCSIG                           = 0x8004745f
+	TIOCSPGRP                         = 0x80047476
+	TIOCSTART                         = 0x2000746e
+	TIOCSTAT                          = 0x20007465
+	TIOCSTOP                          = 0x2000746f
+	TIOCSTSTAMP                       = 0x8008745a
+	TIOCSWINSZ                        = 0x80087467
+	TIOCUCNTL                         = 0x80047466
+	TIOCUCNTL_CBRK                    = 0x7a
+	TIOCUCNTL_SBRK                    = 0x7b
+	TOSTOP                            = 0x400000
+	VDISCARD                          = 0xf
+	VDSUSP                            = 0xb
+	VEOF                              = 0x0
+	VEOL                              = 0x1
+	VEOL2                             = 0x2
+	VERASE                            = 0x3
+	VINTR                             = 0x8
+	VKILL                             = 0x5
+	VLNEXT                            = 0xe
+	VMIN                              = 0x10
+	VQUIT                             = 0x9
+	VREPRINT                          = 0x6
+	VSTART                            = 0xc
+	VSTATUS                           = 0x12
+	VSTOP                             = 0xd
+	VSUSP                             = 0xa
+	VTIME                             = 0x11
+	VWERASE                           = 0x4
+	WALTSIG                           = 0x4
+	WCONTINUED                        = 0x8
+	WCOREFLAG                         = 0x80
+	WNOHANG                           = 0x1
+	WUNTRACED                         = 0x2
+)
+
+// Errors
+const (
+	E2BIG           = Errno(0x7)
+	EACCES          = Errno(0xd)
+	EADDRINUSE      = Errno(0x30)
+	EADDRNOTAVAIL   = Errno(0x31)
+	EAFNOSUPPORT    = Errno(0x2f)
+	EAGAIN          = Errno(0x23)
+	EALREADY        = Errno(0x25)
+	EAUTH           = Errno(0x50)
+	EBADF           = Errno(0x9)
+	EBADMSG         = Errno(0x5c)
+	EBADRPC         = Errno(0x48)
+	EBUSY           = Errno(0x10)
+	ECANCELED       = Errno(0x58)
+	ECHILD          = Errno(0xa)
+	ECONNABORTED    = Errno(0x35)
+	ECONNREFUSED    = Errno(0x3d)
+	ECONNRESET      = Errno(0x36)
+	EDEADLK         = Errno(0xb)
+	EDESTADDRREQ    = Errno(0x27)
+	EDOM            = Errno(0x21)
+	EDQUOT          = Errno(0x45)
+	EEXIST          = Errno(0x11)
+	EFAULT          = Errno(0xe)
+	EFBIG           = Errno(0x1b)
+	EFTYPE          = Errno(0x4f)
+	EHOSTDOWN       = Errno(0x40)
+	EHOSTUNREACH    = Errno(0x41)
+	EIDRM           = Errno(0x59)
+	EILSEQ          = Errno(0x54)
+	EINPROGRESS     = Errno(0x24)
+	EINTR           = Errno(0x4)
+	EINVAL          = Errno(0x16)
+	EIO             = Errno(0x5)
+	EIPSEC          = Errno(0x52)
+	EISCONN         = Errno(0x38)
+	EISDIR          = Errno(0x15)
+	ELAST           = Errno(0x5f)
+	ELOOP           = Errno(0x3e)
+	EMEDIUMTYPE     = Errno(0x56)
+	EMFILE          = Errno(0x18)
+	EMLINK          = Errno(0x1f)
+	EMSGSIZE        = Errno(0x28)
+	ENAMETOOLONG    = Errno(0x3f)
+	ENEEDAUTH       = Errno(0x51)
+	ENETDOWN        = Errno(0x32)
+	ENETRESET       = Errno(0x34)
+	ENETUNREACH     = Errno(0x33)
+	ENFILE          = Errno(0x17)
+	ENOATTR         = Errno(0x53)
+	ENOBUFS         = Errno(0x37)
+	ENODEV          = Errno(0x13)
+	ENOENT          = Errno(0x2)
+	ENOEXEC         = Errno(0x8)
+	ENOLCK          = Errno(0x4d)
+	ENOMEDIUM       = Errno(0x55)
+	ENOMEM          = Errno(0xc)
+	ENOMSG          = Errno(0x5a)
+	ENOPROTOOPT     = Errno(0x2a)
+	ENOSPC          = Errno(0x1c)
+	ENOSYS          = Errno(0x4e)
+	ENOTBLK         = Errno(0xf)
+	ENOTCONN        = Errno(0x39)
+	ENOTDIR         = Errno(0x14)
+	ENOTEMPTY       = Errno(0x42)
+	ENOTRECOVERABLE = Errno(0x5d)
+	ENOTSOCK        = Errno(0x26)
+	ENOTSUP         = Errno(0x5b)
+	ENOTTY          = Errno(0x19)
+	ENXIO           = Errno(0x6)
+	EOPNOTSUPP      = Errno(0x2d)
+	EOVERFLOW       = Errno(0x57)
+	EOWNERDEAD      = Errno(0x5e)
+	EPERM           = Errno(0x1)
+	EPFNOSUPPORT    = Errno(0x2e)
+	EPIPE           = Errno(0x20)
+	EPROCLIM        = Errno(0x43)
+	EPROCUNAVAIL    = Errno(0x4c)
+	EPROGMISMATCH   = Errno(0x4b)
+	EPROGUNAVAIL    = Errno(0x4a)
+	EPROTO          = Errno(0x5f)
+	EPROTONOSUPPORT = Errno(0x2b)
+	EPROTOTYPE      = Errno(0x29)
+	ERANGE          = Errno(0x22)
+	EREMOTE         = Errno(0x47)
+	EROFS           = Errno(0x1e)
+	ERPCMISMATCH    = Errno(0x49)
+	ESHUTDOWN       = Errno(0x3a)
+	ESOCKTNOSUPPORT = Errno(0x2c)
+	ESPIPE          = Errno(0x1d)
+	ESRCH           = Errno(0x3)
+	ESTALE          = Errno(0x46)
+	ETIMEDOUT       = Errno(0x3c)
+	ETOOMANYREFS    = Errno(0x3b)
+	ETXTBSY         = Errno(0x1a)
+	EUSERS          = Errno(0x44)
+	EWOULDBLOCK     = Errno(0x23)
+	EXDEV           = Errno(0x12)
+)
+
+// Signals
+const (
+	SIGABRT   = Signal(0x6)
+	SIGALRM   = Signal(0xe)
+	SIGBUS    = Signal(0xa)
+	SIGCHLD   = Signal(0x14)
+	SIGCONT   = Signal(0x13)
+	SIGEMT    = Signal(0x7)
+	SIGFPE    = Signal(0x8)
+	SIGHUP    = Signal(0x1)
+	SIGILL    = Signal(0x4)
+	SIGINFO   = Signal(0x1d)
+	SIGINT    = Signal(0x2)
+	SIGIO     = Signal(0x17)
+	SIGIOT    = Signal(0x6)
+	SIGKILL   = Signal(0x9)
+	SIGPIPE   = Signal(0xd)
+	SIGPROF   = Signal(0x1b)
+	SIGQUIT   = Signal(0x3)
+	SIGSEGV   = Signal(0xb)
+	SIGSTOP   = Signal(0x11)
+	SIGSYS    = Signal(0xc)
+	SIGTERM   = Signal(0xf)
+	SIGTHR    = Signal(0x20)
+	SIGTRAP   = Signal(0x5)
+	SIGTSTP   = Signal(0x12)
+	SIGTTIN   = Signal(0x15)
+	SIGTTOU   = Signal(0x16)
+	SIGURG    = Signal(0x10)
+	SIGUSR1   = Signal(0x1e)
+	SIGUSR2   = Signal(0x1f)
+	SIGVTALRM = Signal(0x1a)
+	SIGWINCH  = Signal(0x1c)
+	SIGXCPU   = Signal(0x18)
+	SIGXFSZ   = Signal(0x19)
+)
+
+// Error table
+var errors = [...]string{
+	1:  "operation not permitted",
+	2:  "no such file or directory",
+	3:  "no such process",
+	4:  "interrupted system call",
+	5:  "input/output error",
+	6:  "device not configured",
+	7:  "argument list too long",
+	8:  "exec format error",
+	9:  "bad file descriptor",
+	10: "no child processes",
+	11: "resource deadlock avoided",
+	12: "cannot allocate memory",
+	13: "permission denied",
+	14: "bad address",
+	15: "block device required",
+	16: "device busy",
+	17: "file exists",
+	18: "cross-device link",
+	19: "operation not supported by device",
+	20: "not a directory",
+	21: "is a directory",
+	22: "invalid argument",
+	23: "too many open files in system",
+	24: "too many open files",
+	25: "inappropriate ioctl for device",
+	26: "text file busy",
+	27: "file too large",
+	28: "no space left on device",
+	29: "illegal seek",
+	30: "read-only file system",
+	31: "too many links",
+	32: "broken pipe",
+	33: "numerical argument out of domain",
+	34: "result too large",
+	35: "resource temporarily unavailable",
+	36: "operation now in progress",
+	37: "operation already in progress",
+	38: "socket operation on non-socket",
+	39: "destination address required",
+	40: "message too long",
+	41: "protocol wrong type for socket",
+	42: "protocol not available",
+	43: "protocol not supported",
+	44: "socket type not supported",
+	45: "operation not supported",
+	46: "protocol family not supported",
+	47: "address family not supported by protocol family",
+	48: "address already in use",
+	49: "can't assign requested address",
+	50: "network is down",
+	51: "network is unreachable",
+	52: "network dropped connection on reset",
+	53: "software caused connection abort",
+	54: "connection reset by peer",
+	55: "no buffer space available",
+	56: "socket is already connected",
+	57: "socket is not connected",
+	58: "can't send after socket shutdown",
+	59: "too many references: can't splice",
+	60: "operation timed out",
+	61: "connection refused",
+	62: "too many levels of symbolic links",
+	63: "file name too long",
+	64: "host is down",
+	65: "no route to host",
+	66: "directory not empty",
+	67: "too many processes",
+	68: "too many users",
+	69: "disk quota exceeded",
+	70: "stale NFS file handle",
+	71: "too many levels of remote in path",
+	72: "RPC struct is bad",
+	73: "RPC version wrong",
+	74: "RPC program not available",
+	75: "program version wrong",
+	76: "bad procedure for program",
+	77: "no locks available",
+	78: "function not implemented",
+	79: "inappropriate file type or format",
+	80: "authentication error",
+	81: "need authenticator",
+	82: "IPsec processing failure",
+	83: "attribute not found",
+	84: "illegal byte sequence",
+	85: "no medium found",
+	86: "wrong medium type",
+	87: "value too large to be stored in data type",
+	88: "operation canceled",
+	89: "identifier removed",
+	90: "no message of desired type",
+	91: "not supported",
+	92: "bad message",
+	93: "state not recoverable",
+	94: "previous owner died",
+	95: "protocol error",
+}
+
+// Signal table
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/BPT trap",
+	6:  "abort trap",
+	7:  "EMT trap",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "bus error",
+	11: "segmentation fault",
+	12: "bad system call",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+	16: "urgent I/O condition",
+	17: "suspended (signal)",
+	18: "suspended",
+	19: "continued",
+	20: "child exited",
+	21: "stopped (tty input)",
+	22: "stopped (tty output)",
+	23: "I/O possible",
+	24: "cputime limit exceeded",
+	25: "filesize limit exceeded",
+	26: "virtual timer expired",
+	27: "profiling timer expired",
+	28: "window size changes",
+	29: "information request",
+	30: "user defined signal 1",
+	31: "user defined signal 2",
+	32: "thread AST",
+}
diff --git a/src/syscall/zerrors_openbsd_riscv64.go b/src/syscall/zerrors_openbsd_riscv64.go
new file mode 100644
index 0000000..3740f22
--- /dev/null
+++ b/src/syscall/zerrors_openbsd_riscv64.go
@@ -0,0 +1,1693 @@
+// mkerrors.sh -m64
+// Code generated by the command above; DO NOT EDIT.
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -m64 _const.go
+
+package syscall
+
+const (
+	AF_APPLETALK                      = 0x10
+	AF_BLUETOOTH                      = 0x20
+	AF_CCITT                          = 0xa
+	AF_CHAOS                          = 0x5
+	AF_CNT                            = 0x15
+	AF_COIP                           = 0x14
+	AF_DATAKIT                        = 0x9
+	AF_DECnet                         = 0xc
+	AF_DLI                            = 0xd
+	AF_E164                           = 0x1a
+	AF_ECMA                           = 0x8
+	AF_ENCAP                          = 0x1c
+	AF_HYLINK                         = 0xf
+	AF_IMPLINK                        = 0x3
+	AF_INET                           = 0x2
+	AF_INET6                          = 0x18
+	AF_IPX                            = 0x17
+	AF_ISDN                           = 0x1a
+	AF_ISO                            = 0x7
+	AF_KEY                            = 0x1e
+	AF_LAT                            = 0xe
+	AF_LINK                           = 0x12
+	AF_LOCAL                          = 0x1
+	AF_MAX                            = 0x24
+	AF_MPLS                           = 0x21
+	AF_NATM                           = 0x1b
+	AF_NS                             = 0x6
+	AF_OSI                            = 0x7
+	AF_PUP                            = 0x4
+	AF_ROUTE                          = 0x11
+	AF_SIP                            = 0x1d
+	AF_SNA                            = 0xb
+	AF_UNIX                           = 0x1
+	AF_UNSPEC                         = 0x0
+	ARPHRD_ETHER                      = 0x1
+	ARPHRD_FRELAY                     = 0xf
+	ARPHRD_IEEE1394                   = 0x18
+	ARPHRD_IEEE802                    = 0x6
+	B0                                = 0x0
+	B110                              = 0x6e
+	B115200                           = 0x1c200
+	B1200                             = 0x4b0
+	B134                              = 0x86
+	B14400                            = 0x3840
+	B150                              = 0x96
+	B1800                             = 0x708
+	B19200                            = 0x4b00
+	B200                              = 0xc8
+	B230400                           = 0x38400
+	B2400                             = 0x960
+	B28800                            = 0x7080
+	B300                              = 0x12c
+	B38400                            = 0x9600
+	B4800                             = 0x12c0
+	B50                               = 0x32
+	B57600                            = 0xe100
+	B600                              = 0x258
+	B7200                             = 0x1c20
+	B75                               = 0x4b
+	B76800                            = 0x12c00
+	B9600                             = 0x2580
+	BIOCFLUSH                         = 0x20004268
+	BIOCGBLEN                         = 0x40044266
+	BIOCGDIRFILT                      = 0x4004427c
+	BIOCGDLT                          = 0x4004426a
+	BIOCGDLTLIST                      = 0xc010427b
+	BIOCGETIF                         = 0x4020426b
+	BIOCGFILDROP                      = 0x40044278
+	BIOCGHDRCMPLT                     = 0x40044274
+	BIOCGRSIG                         = 0x40044273
+	BIOCGRTIMEOUT                     = 0x4010426e
+	BIOCGSTATS                        = 0x4008426f
+	BIOCIMMEDIATE                     = 0x80044270
+	BIOCLOCK                          = 0x20004276
+	BIOCPROMISC                       = 0x20004269
+	BIOCSBLEN                         = 0xc0044266
+	BIOCSDIRFILT                      = 0x8004427d
+	BIOCSDLT                          = 0x8004427a
+	BIOCSETF                          = 0x80104267
+	BIOCSETIF                         = 0x8020426c
+	BIOCSETWF                         = 0x80104277
+	BIOCSFILDROP                      = 0x80044279
+	BIOCSHDRCMPLT                     = 0x80044275
+	BIOCSRSIG                         = 0x80044272
+	BIOCSRTIMEOUT                     = 0x8010426d
+	BIOCVERSION                       = 0x40044271
+	BPF_A                             = 0x10
+	BPF_ABS                           = 0x20
+	BPF_ADD                           = 0x0
+	BPF_ALIGNMENT                     = 0x4
+	BPF_ALU                           = 0x4
+	BPF_AND                           = 0x50
+	BPF_B                             = 0x10
+	BPF_DIRECTION_IN                  = 0x1
+	BPF_DIRECTION_OUT                 = 0x2
+	BPF_DIV                           = 0x30
+	BPF_FILDROP_CAPTURE               = 0x1
+	BPF_FILDROP_DROP                  = 0x2
+	BPF_FILDROP_PASS                  = 0x0
+	BPF_F_DIR_IN                      = 0x10
+	BPF_F_DIR_MASK                    = 0x30
+	BPF_F_DIR_OUT                     = 0x20
+	BPF_F_DIR_SHIFT                   = 0x4
+	BPF_F_FLOWID                      = 0x8
+	BPF_F_PRI_MASK                    = 0x7
+	BPF_H                             = 0x8
+	BPF_IMM                           = 0x0
+	BPF_IND                           = 0x40
+	BPF_JA                            = 0x0
+	BPF_JEQ                           = 0x10
+	BPF_JGE                           = 0x30
+	BPF_JGT                           = 0x20
+	BPF_JMP                           = 0x5
+	BPF_JSET                          = 0x40
+	BPF_K                             = 0x0
+	BPF_LD                            = 0x0
+	BPF_LDX                           = 0x1
+	BPF_LEN                           = 0x80
+	BPF_LSH                           = 0x60
+	BPF_MAJOR_VERSION                 = 0x1
+	BPF_MAXBUFSIZE                    = 0x200000
+	BPF_MAXINSNS                      = 0x200
+	BPF_MEM                           = 0x60
+	BPF_MEMWORDS                      = 0x10
+	BPF_MINBUFSIZE                    = 0x20
+	BPF_MINOR_VERSION                 = 0x1
+	BPF_MISC                          = 0x7
+	BPF_MSH                           = 0xa0
+	BPF_MUL                           = 0x20
+	BPF_NEG                           = 0x80
+	BPF_OR                            = 0x40
+	BPF_RELEASE                       = 0x30bb6
+	BPF_RET                           = 0x6
+	BPF_RND                           = 0xc0
+	BPF_RSH                           = 0x70
+	BPF_ST                            = 0x2
+	BPF_STX                           = 0x3
+	BPF_SUB                           = 0x10
+	BPF_TAX                           = 0x0
+	BPF_TXA                           = 0x80
+	BPF_W                             = 0x0
+	BPF_X                             = 0x8
+	BRKINT                            = 0x2
+	CFLUSH                            = 0xf
+	CLOCAL                            = 0x8000
+	CREAD                             = 0x800
+	CS5                               = 0x0
+	CS6                               = 0x100
+	CS7                               = 0x200
+	CS8                               = 0x300
+	CSIZE                             = 0x300
+	CSTART                            = 0x11
+	CSTATUS                           = 0xff
+	CSTOP                             = 0x13
+	CSTOPB                            = 0x400
+	CSUSP                             = 0x1a
+	CTL_MAXNAME                       = 0xc
+	CTL_NET                           = 0x4
+	DIOCOSFPFLUSH                     = 0x2000444e
+	DLT_ARCNET                        = 0x7
+	DLT_ATM_RFC1483                   = 0xb
+	DLT_AX25                          = 0x3
+	DLT_CHAOS                         = 0x5
+	DLT_C_HDLC                        = 0x68
+	DLT_EN10MB                        = 0x1
+	DLT_EN3MB                         = 0x2
+	DLT_ENC                           = 0xd
+	DLT_FDDI                          = 0xa
+	DLT_IEEE802                       = 0x6
+	DLT_IEEE802_11                    = 0x69
+	DLT_IEEE802_11_RADIO              = 0x7f
+	DLT_LOOP                          = 0xc
+	DLT_MPLS                          = 0xdb
+	DLT_NULL                          = 0x0
+	DLT_OPENFLOW                      = 0x10b
+	DLT_PFLOG                         = 0x75
+	DLT_PFSYNC                        = 0x12
+	DLT_PPP                           = 0x9
+	DLT_PPP_BSDOS                     = 0x10
+	DLT_PPP_ETHER                     = 0x33
+	DLT_PPP_SERIAL                    = 0x32
+	DLT_PRONET                        = 0x4
+	DLT_RAW                           = 0xe
+	DLT_SLIP                          = 0x8
+	DLT_SLIP_BSDOS                    = 0xf
+	DLT_USBPCAP                       = 0xf9
+	DLT_USER0                         = 0x93
+	DLT_USER1                         = 0x94
+	DLT_USER10                        = 0x9d
+	DLT_USER11                        = 0x9e
+	DLT_USER12                        = 0x9f
+	DLT_USER13                        = 0xa0
+	DLT_USER14                        = 0xa1
+	DLT_USER15                        = 0xa2
+	DLT_USER2                         = 0x95
+	DLT_USER3                         = 0x96
+	DLT_USER4                         = 0x97
+	DLT_USER5                         = 0x98
+	DLT_USER6                         = 0x99
+	DLT_USER7                         = 0x9a
+	DLT_USER8                         = 0x9b
+	DLT_USER9                         = 0x9c
+	DT_BLK                            = 0x6
+	DT_CHR                            = 0x2
+	DT_DIR                            = 0x4
+	DT_FIFO                           = 0x1
+	DT_LNK                            = 0xa
+	DT_REG                            = 0x8
+	DT_SOCK                           = 0xc
+	DT_UNKNOWN                        = 0x0
+	ECHO                              = 0x8
+	ECHOCTL                           = 0x40
+	ECHOE                             = 0x2
+	ECHOK                             = 0x4
+	ECHOKE                            = 0x1
+	ECHONL                            = 0x10
+	ECHOPRT                           = 0x20
+	EMT_TAGOVF                        = 0x1
+	EMUL_ENABLED                      = 0x1
+	EMUL_NATIVE                       = 0x2
+	ENDRUNDISC                        = 0x9
+	ETH64_8021_RSVD_MASK              = 0xfffffffffff0
+	ETH64_8021_RSVD_PREFIX            = 0x180c2000000
+	ETHERMIN                          = 0x2e
+	ETHERMTU                          = 0x5dc
+	ETHERTYPE_8023                    = 0x4
+	ETHERTYPE_AARP                    = 0x80f3
+	ETHERTYPE_ACCTON                  = 0x8390
+	ETHERTYPE_AEONIC                  = 0x8036
+	ETHERTYPE_ALPHA                   = 0x814a
+	ETHERTYPE_AMBER                   = 0x6008
+	ETHERTYPE_AMOEBA                  = 0x8145
+	ETHERTYPE_AOE                     = 0x88a2
+	ETHERTYPE_APOLLO                  = 0x80f7
+	ETHERTYPE_APOLLODOMAIN            = 0x8019
+	ETHERTYPE_APPLETALK               = 0x809b
+	ETHERTYPE_APPLITEK                = 0x80c7
+	ETHERTYPE_ARGONAUT                = 0x803a
+	ETHERTYPE_ARP                     = 0x806
+	ETHERTYPE_AT                      = 0x809b
+	ETHERTYPE_ATALK                   = 0x809b
+	ETHERTYPE_ATOMIC                  = 0x86df
+	ETHERTYPE_ATT                     = 0x8069
+	ETHERTYPE_ATTSTANFORD             = 0x8008
+	ETHERTYPE_AUTOPHON                = 0x806a
+	ETHERTYPE_AXIS                    = 0x8856
+	ETHERTYPE_BCLOOP                  = 0x9003
+	ETHERTYPE_BOFL                    = 0x8102
+	ETHERTYPE_CABLETRON               = 0x7034
+	ETHERTYPE_CHAOS                   = 0x804
+	ETHERTYPE_COMDESIGN               = 0x806c
+	ETHERTYPE_COMPUGRAPHIC            = 0x806d
+	ETHERTYPE_COUNTERPOINT            = 0x8062
+	ETHERTYPE_CRONUS                  = 0x8004
+	ETHERTYPE_CRONUSVLN               = 0x8003
+	ETHERTYPE_DCA                     = 0x1234
+	ETHERTYPE_DDE                     = 0x807b
+	ETHERTYPE_DEBNI                   = 0xaaaa
+	ETHERTYPE_DECAM                   = 0x8048
+	ETHERTYPE_DECCUST                 = 0x6006
+	ETHERTYPE_DECDIAG                 = 0x6005
+	ETHERTYPE_DECDNS                  = 0x803c
+	ETHERTYPE_DECDTS                  = 0x803e
+	ETHERTYPE_DECEXPER                = 0x6000
+	ETHERTYPE_DECLAST                 = 0x8041
+	ETHERTYPE_DECLTM                  = 0x803f
+	ETHERTYPE_DECMUMPS                = 0x6009
+	ETHERTYPE_DECNETBIOS              = 0x8040
+	ETHERTYPE_DELTACON                = 0x86de
+	ETHERTYPE_DIDDLE                  = 0x4321
+	ETHERTYPE_DLOG1                   = 0x660
+	ETHERTYPE_DLOG2                   = 0x661
+	ETHERTYPE_DN                      = 0x6003
+	ETHERTYPE_DOGFIGHT                = 0x1989
+	ETHERTYPE_DSMD                    = 0x8039
+	ETHERTYPE_EAPOL                   = 0x888e
+	ETHERTYPE_ECMA                    = 0x803
+	ETHERTYPE_ENCRYPT                 = 0x803d
+	ETHERTYPE_ES                      = 0x805d
+	ETHERTYPE_EXCELAN                 = 0x8010
+	ETHERTYPE_EXPERDATA               = 0x8049
+	ETHERTYPE_FLIP                    = 0x8146
+	ETHERTYPE_FLOWCONTROL             = 0x8808
+	ETHERTYPE_FRARP                   = 0x808
+	ETHERTYPE_GENDYN                  = 0x8068
+	ETHERTYPE_HAYES                   = 0x8130
+	ETHERTYPE_HIPPI_FP                = 0x8180
+	ETHERTYPE_HITACHI                 = 0x8820
+	ETHERTYPE_HP                      = 0x8005
+	ETHERTYPE_IEEEPUP                 = 0xa00
+	ETHERTYPE_IEEEPUPAT               = 0xa01
+	ETHERTYPE_IMLBL                   = 0x4c42
+	ETHERTYPE_IMLBLDIAG               = 0x424c
+	ETHERTYPE_IP                      = 0x800
+	ETHERTYPE_IPAS                    = 0x876c
+	ETHERTYPE_IPV6                    = 0x86dd
+	ETHERTYPE_IPX                     = 0x8137
+	ETHERTYPE_IPXNEW                  = 0x8037
+	ETHERTYPE_KALPANA                 = 0x8582
+	ETHERTYPE_LANBRIDGE               = 0x8038
+	ETHERTYPE_LANPROBE                = 0x8888
+	ETHERTYPE_LAT                     = 0x6004
+	ETHERTYPE_LBACK                   = 0x9000
+	ETHERTYPE_LITTLE                  = 0x8060
+	ETHERTYPE_LLDP                    = 0x88cc
+	ETHERTYPE_LOGICRAFT               = 0x8148
+	ETHERTYPE_LOOPBACK                = 0x9000
+	ETHERTYPE_MACSEC                  = 0x88e5
+	ETHERTYPE_MATRA                   = 0x807a
+	ETHERTYPE_MAX                     = 0xffff
+	ETHERTYPE_MERIT                   = 0x807c
+	ETHERTYPE_MICP                    = 0x873a
+	ETHERTYPE_MOPDL                   = 0x6001
+	ETHERTYPE_MOPRC                   = 0x6002
+	ETHERTYPE_MOTOROLA                = 0x818d
+	ETHERTYPE_MPLS                    = 0x8847
+	ETHERTYPE_MPLS_MCAST              = 0x8848
+	ETHERTYPE_MUMPS                   = 0x813f
+	ETHERTYPE_NBPCC                   = 0x3c04
+	ETHERTYPE_NBPCLAIM                = 0x3c09
+	ETHERTYPE_NBPCLREQ                = 0x3c05
+	ETHERTYPE_NBPCLRSP                = 0x3c06
+	ETHERTYPE_NBPCREQ                 = 0x3c02
+	ETHERTYPE_NBPCRSP                 = 0x3c03
+	ETHERTYPE_NBPDG                   = 0x3c07
+	ETHERTYPE_NBPDGB                  = 0x3c08
+	ETHERTYPE_NBPDLTE                 = 0x3c0a
+	ETHERTYPE_NBPRAR                  = 0x3c0c
+	ETHERTYPE_NBPRAS                  = 0x3c0b
+	ETHERTYPE_NBPRST                  = 0x3c0d
+	ETHERTYPE_NBPSCD                  = 0x3c01
+	ETHERTYPE_NBPVCD                  = 0x3c00
+	ETHERTYPE_NBS                     = 0x802
+	ETHERTYPE_NCD                     = 0x8149
+	ETHERTYPE_NESTAR                  = 0x8006
+	ETHERTYPE_NETBEUI                 = 0x8191
+	ETHERTYPE_NHRP                    = 0x2001
+	ETHERTYPE_NOVELL                  = 0x8138
+	ETHERTYPE_NS                      = 0x600
+	ETHERTYPE_NSAT                    = 0x601
+	ETHERTYPE_NSCOMPAT                = 0x807
+	ETHERTYPE_NSH                     = 0x984f
+	ETHERTYPE_NTRAILER                = 0x10
+	ETHERTYPE_OS9                     = 0x7007
+	ETHERTYPE_OS9NET                  = 0x7009
+	ETHERTYPE_PACER                   = 0x80c6
+	ETHERTYPE_PBB                     = 0x88e7
+	ETHERTYPE_PCS                     = 0x4242
+	ETHERTYPE_PLANNING                = 0x8044
+	ETHERTYPE_PPP                     = 0x880b
+	ETHERTYPE_PPPOE                   = 0x8864
+	ETHERTYPE_PPPOEDISC               = 0x8863
+	ETHERTYPE_PRIMENTS                = 0x7031
+	ETHERTYPE_PUP                     = 0x200
+	ETHERTYPE_PUPAT                   = 0x200
+	ETHERTYPE_QINQ                    = 0x88a8
+	ETHERTYPE_RACAL                   = 0x7030
+	ETHERTYPE_RATIONAL                = 0x8150
+	ETHERTYPE_RAWFR                   = 0x6559
+	ETHERTYPE_RCL                     = 0x1995
+	ETHERTYPE_RDP                     = 0x8739
+	ETHERTYPE_RETIX                   = 0x80f2
+	ETHERTYPE_REVARP                  = 0x8035
+	ETHERTYPE_SCA                     = 0x6007
+	ETHERTYPE_SECTRA                  = 0x86db
+	ETHERTYPE_SECUREDATA              = 0x876d
+	ETHERTYPE_SGITW                   = 0x817e
+	ETHERTYPE_SG_BOUNCE               = 0x8016
+	ETHERTYPE_SG_DIAG                 = 0x8013
+	ETHERTYPE_SG_NETGAMES             = 0x8014
+	ETHERTYPE_SG_RESV                 = 0x8015
+	ETHERTYPE_SIMNET                  = 0x5208
+	ETHERTYPE_SLOW                    = 0x8809
+	ETHERTYPE_SNA                     = 0x80d5
+	ETHERTYPE_SNMP                    = 0x814c
+	ETHERTYPE_SONIX                   = 0xfaf5
+	ETHERTYPE_SPIDER                  = 0x809f
+	ETHERTYPE_SPRITE                  = 0x500
+	ETHERTYPE_STP                     = 0x8181
+	ETHERTYPE_TALARIS                 = 0x812b
+	ETHERTYPE_TALARISMC               = 0x852b
+	ETHERTYPE_TCPCOMP                 = 0x876b
+	ETHERTYPE_TCPSM                   = 0x9002
+	ETHERTYPE_TEC                     = 0x814f
+	ETHERTYPE_TIGAN                   = 0x802f
+	ETHERTYPE_TRAIL                   = 0x1000
+	ETHERTYPE_TRANSETHER              = 0x6558
+	ETHERTYPE_TYMSHARE                = 0x802e
+	ETHERTYPE_UBBST                   = 0x7005
+	ETHERTYPE_UBDEBUG                 = 0x900
+	ETHERTYPE_UBDIAGLOOP              = 0x7002
+	ETHERTYPE_UBDL                    = 0x7000
+	ETHERTYPE_UBNIU                   = 0x7001
+	ETHERTYPE_UBNMC                   = 0x7003
+	ETHERTYPE_VALID                   = 0x1600
+	ETHERTYPE_VARIAN                  = 0x80dd
+	ETHERTYPE_VAXELN                  = 0x803b
+	ETHERTYPE_VEECO                   = 0x8067
+	ETHERTYPE_VEXP                    = 0x805b
+	ETHERTYPE_VGLAB                   = 0x8131
+	ETHERTYPE_VINES                   = 0xbad
+	ETHERTYPE_VINESECHO               = 0xbaf
+	ETHERTYPE_VINESLOOP               = 0xbae
+	ETHERTYPE_VITAL                   = 0xff00
+	ETHERTYPE_VLAN                    = 0x8100
+	ETHERTYPE_VLTLMAN                 = 0x8080
+	ETHERTYPE_VPROD                   = 0x805c
+	ETHERTYPE_VURESERVED              = 0x8147
+	ETHERTYPE_WATERLOO                = 0x8130
+	ETHERTYPE_WELLFLEET               = 0x8103
+	ETHERTYPE_X25                     = 0x805
+	ETHERTYPE_X75                     = 0x801
+	ETHERTYPE_XNSSM                   = 0x9001
+	ETHERTYPE_XTP                     = 0x817d
+	ETHER_ADDR_LEN                    = 0x6
+	ETHER_ALIGN                       = 0x2
+	ETHER_CRC_LEN                     = 0x4
+	ETHER_CRC_POLY_BE                 = 0x4c11db6
+	ETHER_CRC_POLY_LE                 = 0xedb88320
+	ETHER_HDR_LEN                     = 0xe
+	ETHER_MAX_DIX_LEN                 = 0x600
+	ETHER_MAX_HARDMTU_LEN             = 0xff9b
+	ETHER_MAX_LEN                     = 0x5ee
+	ETHER_MIN_LEN                     = 0x40
+	ETHER_TYPE_LEN                    = 0x2
+	ETHER_VLAN_ENCAP_LEN              = 0x4
+	EVFILT_AIO                        = -0x3
+	EVFILT_DEVICE                     = -0x8
+	EVFILT_EXCEPT                     = -0x9
+	EVFILT_PROC                       = -0x5
+	EVFILT_READ                       = -0x1
+	EVFILT_SIGNAL                     = -0x6
+	EVFILT_SYSCOUNT                   = 0x9
+	EVFILT_TIMER                      = -0x7
+	EVFILT_VNODE                      = -0x4
+	EVFILT_WRITE                      = -0x2
+	EVL_ENCAPLEN                      = 0x4
+	EVL_PRIO_BITS                     = 0xd
+	EVL_PRIO_MAX                      = 0x7
+	EVL_VLID_MASK                     = 0xfff
+	EVL_VLID_MAX                      = 0xffe
+	EVL_VLID_MIN                      = 0x1
+	EVL_VLID_NULL                     = 0x0
+	EV_ADD                            = 0x1
+	EV_CLEAR                          = 0x20
+	EV_DELETE                         = 0x2
+	EV_DISABLE                        = 0x8
+	EV_DISPATCH                       = 0x80
+	EV_ENABLE                         = 0x4
+	EV_EOF                            = 0x8000
+	EV_ERROR                          = 0x4000
+	EV_FLAG1                          = 0x2000
+	EV_ONESHOT                        = 0x10
+	EV_RECEIPT                        = 0x40
+	EV_SYSFLAGS                       = 0xf800
+	EXTA                              = 0x4b00
+	EXTB                              = 0x9600
+	EXTPROC                           = 0x800
+	FD_CLOEXEC                        = 0x1
+	FD_SETSIZE                        = 0x400
+	FLUSHO                            = 0x800000
+	F_DUPFD                           = 0x0
+	F_DUPFD_CLOEXEC                   = 0xa
+	F_GETFD                           = 0x1
+	F_GETFL                           = 0x3
+	F_GETLK                           = 0x7
+	F_GETOWN                          = 0x5
+	F_ISATTY                          = 0xb
+	F_RDLCK                           = 0x1
+	F_SETFD                           = 0x2
+	F_SETFL                           = 0x4
+	F_SETLK                           = 0x8
+	F_SETLKW                          = 0x9
+	F_SETOWN                          = 0x6
+	F_UNLCK                           = 0x2
+	F_WRLCK                           = 0x3
+	HUPCL                             = 0x4000
+	ICANON                            = 0x100
+	ICMP6_FILTER                      = 0x12
+	ICRNL                             = 0x100
+	IEXTEN                            = 0x400
+	IFAN_ARRIVAL                      = 0x0
+	IFAN_DEPARTURE                    = 0x1
+	IFF_ALLMULTI                      = 0x200
+	IFF_BROADCAST                     = 0x2
+	IFF_CANTCHANGE                    = 0x8e52
+	IFF_DEBUG                         = 0x4
+	IFF_LINK0                         = 0x1000
+	IFF_LINK1                         = 0x2000
+	IFF_LINK2                         = 0x4000
+	IFF_LOOPBACK                      = 0x8
+	IFF_MULTICAST                     = 0x8000
+	IFF_NOARP                         = 0x80
+	IFF_OACTIVE                       = 0x400
+	IFF_POINTOPOINT                   = 0x10
+	IFF_PROMISC                       = 0x100
+	IFF_RUNNING                       = 0x40
+	IFF_SIMPLEX                       = 0x800
+	IFF_STATICARP                     = 0x20
+	IFF_UP                            = 0x1
+	IFNAMSIZ                          = 0x10
+	IFT_1822                          = 0x2
+	IFT_A12MPPSWITCH                  = 0x82
+	IFT_AAL2                          = 0xbb
+	IFT_AAL5                          = 0x31
+	IFT_ADSL                          = 0x5e
+	IFT_AFLANE8023                    = 0x3b
+	IFT_AFLANE8025                    = 0x3c
+	IFT_ARAP                          = 0x58
+	IFT_ARCNET                        = 0x23
+	IFT_ARCNETPLUS                    = 0x24
+	IFT_ASYNC                         = 0x54
+	IFT_ATM                           = 0x25
+	IFT_ATMDXI                        = 0x69
+	IFT_ATMFUNI                       = 0x6a
+	IFT_ATMIMA                        = 0x6b
+	IFT_ATMLOGICAL                    = 0x50
+	IFT_ATMRADIO                      = 0xbd
+	IFT_ATMSUBINTERFACE               = 0x86
+	IFT_ATMVCIENDPT                   = 0xc2
+	IFT_ATMVIRTUAL                    = 0x95
+	IFT_BGPPOLICYACCOUNTING           = 0xa2
+	IFT_BLUETOOTH                     = 0xf8
+	IFT_BRIDGE                        = 0xd1
+	IFT_BSC                           = 0x53
+	IFT_CARP                          = 0xf7
+	IFT_CCTEMUL                       = 0x3d
+	IFT_CEPT                          = 0x13
+	IFT_CES                           = 0x85
+	IFT_CHANNEL                       = 0x46
+	IFT_CNR                           = 0x55
+	IFT_COFFEE                        = 0x84
+	IFT_COMPOSITELINK                 = 0x9b
+	IFT_DCN                           = 0x8d
+	IFT_DIGITALPOWERLINE              = 0x8a
+	IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+	IFT_DLSW                          = 0x4a
+	IFT_DOCSCABLEDOWNSTREAM           = 0x80
+	IFT_DOCSCABLEMACLAYER             = 0x7f
+	IFT_DOCSCABLEUPSTREAM             = 0x81
+	IFT_DOCSCABLEUPSTREAMCHANNEL      = 0xcd
+	IFT_DS0                           = 0x51
+	IFT_DS0BUNDLE                     = 0x52
+	IFT_DS1FDL                        = 0xaa
+	IFT_DS3                           = 0x1e
+	IFT_DTM                           = 0x8c
+	IFT_DUMMY                         = 0xf1
+	IFT_DVBASILN                      = 0xac
+	IFT_DVBASIOUT                     = 0xad
+	IFT_DVBRCCDOWNSTREAM              = 0x93
+	IFT_DVBRCCMACLAYER                = 0x92
+	IFT_DVBRCCUPSTREAM                = 0x94
+	IFT_ECONET                        = 0xce
+	IFT_ENC                           = 0xf4
+	IFT_EON                           = 0x19
+	IFT_EPLRS                         = 0x57
+	IFT_ESCON                         = 0x49
+	IFT_ETHER                         = 0x6
+	IFT_FAITH                         = 0xf3
+	IFT_FAST                          = 0x7d
+	IFT_FASTETHER                     = 0x3e
+	IFT_FASTETHERFX                   = 0x45
+	IFT_FDDI                          = 0xf
+	IFT_FIBRECHANNEL                  = 0x38
+	IFT_FRAMERELAYINTERCONNECT        = 0x3a
+	IFT_FRAMERELAYMPI                 = 0x5c
+	IFT_FRDLCIENDPT                   = 0xc1
+	IFT_FRELAY                        = 0x20
+	IFT_FRELAYDCE                     = 0x2c
+	IFT_FRF16MFRBUNDLE                = 0xa3
+	IFT_FRFORWARD                     = 0x9e
+	IFT_G703AT2MB                     = 0x43
+	IFT_G703AT64K                     = 0x42
+	IFT_GIF                           = 0xf0
+	IFT_GIGABITETHERNET               = 0x75
+	IFT_GR303IDT                      = 0xb2
+	IFT_GR303RDT                      = 0xb1
+	IFT_H323GATEKEEPER                = 0xa4
+	IFT_H323PROXY                     = 0xa5
+	IFT_HDH1822                       = 0x3
+	IFT_HDLC                          = 0x76
+	IFT_HDSL2                         = 0xa8
+	IFT_HIPERLAN2                     = 0xb7
+	IFT_HIPPI                         = 0x2f
+	IFT_HIPPIINTERFACE                = 0x39
+	IFT_HOSTPAD                       = 0x5a
+	IFT_HSSI                          = 0x2e
+	IFT_HY                            = 0xe
+	IFT_IBM370PARCHAN                 = 0x48
+	IFT_IDSL                          = 0x9a
+	IFT_IEEE1394                      = 0x90
+	IFT_IEEE80211                     = 0x47
+	IFT_IEEE80212                     = 0x37
+	IFT_IEEE8023ADLAG                 = 0xa1
+	IFT_IFGSN                         = 0x91
+	IFT_IMT                           = 0xbe
+	IFT_INFINIBAND                    = 0xc7
+	IFT_INTERLEAVE                    = 0x7c
+	IFT_IP                            = 0x7e
+	IFT_IPFORWARD                     = 0x8e
+	IFT_IPOVERATM                     = 0x72
+	IFT_IPOVERCDLC                    = 0x6d
+	IFT_IPOVERCLAW                    = 0x6e
+	IFT_IPSWITCH                      = 0x4e
+	IFT_ISDN                          = 0x3f
+	IFT_ISDNBASIC                     = 0x14
+	IFT_ISDNPRIMARY                   = 0x15
+	IFT_ISDNS                         = 0x4b
+	IFT_ISDNU                         = 0x4c
+	IFT_ISO88022LLC                   = 0x29
+	IFT_ISO88023                      = 0x7
+	IFT_ISO88024                      = 0x8
+	IFT_ISO88025                      = 0x9
+	IFT_ISO88025CRFPINT               = 0x62
+	IFT_ISO88025DTR                   = 0x56
+	IFT_ISO88025FIBER                 = 0x73
+	IFT_ISO88026                      = 0xa
+	IFT_ISUP                          = 0xb3
+	IFT_L2VLAN                        = 0x87
+	IFT_L3IPVLAN                      = 0x88
+	IFT_L3IPXVLAN                     = 0x89
+	IFT_LAPB                          = 0x10
+	IFT_LAPD                          = 0x4d
+	IFT_LAPF                          = 0x77
+	IFT_LINEGROUP                     = 0xd2
+	IFT_LOCALTALK                     = 0x2a
+	IFT_LOOP                          = 0x18
+	IFT_MBIM                          = 0xfa
+	IFT_MEDIAMAILOVERIP               = 0x8b
+	IFT_MFSIGLINK                     = 0xa7
+	IFT_MIOX25                        = 0x26
+	IFT_MODEM                         = 0x30
+	IFT_MPC                           = 0x71
+	IFT_MPLS                          = 0xa6
+	IFT_MPLSTUNNEL                    = 0x96
+	IFT_MSDSL                         = 0x8f
+	IFT_MVL                           = 0xbf
+	IFT_MYRINET                       = 0x63
+	IFT_NFAS                          = 0xaf
+	IFT_NSIP                          = 0x1b
+	IFT_OPTICALCHANNEL                = 0xc3
+	IFT_OPTICALTRANSPORT              = 0xc4
+	IFT_OTHER                         = 0x1
+	IFT_P10                           = 0xc
+	IFT_P80                           = 0xd
+	IFT_PARA                          = 0x22
+	IFT_PFLOG                         = 0xf5
+	IFT_PFLOW                         = 0xf9
+	IFT_PFSYNC                        = 0xf6
+	IFT_PLC                           = 0xae
+	IFT_PON155                        = 0xcf
+	IFT_PON622                        = 0xd0
+	IFT_POS                           = 0xab
+	IFT_PPP                           = 0x17
+	IFT_PPPMULTILINKBUNDLE            = 0x6c
+	IFT_PROPATM                       = 0xc5
+	IFT_PROPBWAP2MP                   = 0xb8
+	IFT_PROPCNLS                      = 0x59
+	IFT_PROPDOCSWIRELESSDOWNSTREAM    = 0xb5
+	IFT_PROPDOCSWIRELESSMACLAYER      = 0xb4
+	IFT_PROPDOCSWIRELESSUPSTREAM      = 0xb6
+	IFT_PROPMUX                       = 0x36
+	IFT_PROPVIRTUAL                   = 0x35
+	IFT_PROPWIRELESSP2P               = 0x9d
+	IFT_PTPSERIAL                     = 0x16
+	IFT_PVC                           = 0xf2
+	IFT_Q2931                         = 0xc9
+	IFT_QLLC                          = 0x44
+	IFT_RADIOMAC                      = 0xbc
+	IFT_RADSL                         = 0x5f
+	IFT_REACHDSL                      = 0xc0
+	IFT_RFC1483                       = 0x9f
+	IFT_RS232                         = 0x21
+	IFT_RSRB                          = 0x4f
+	IFT_SDLC                          = 0x11
+	IFT_SDSL                          = 0x60
+	IFT_SHDSL                         = 0xa9
+	IFT_SIP                           = 0x1f
+	IFT_SIPSIG                        = 0xcc
+	IFT_SIPTG                         = 0xcb
+	IFT_SLIP                          = 0x1c
+	IFT_SMDSDXI                       = 0x2b
+	IFT_SMDSICIP                      = 0x34
+	IFT_SONET                         = 0x27
+	IFT_SONETOVERHEADCHANNEL          = 0xb9
+	IFT_SONETPATH                     = 0x32
+	IFT_SONETVT                       = 0x33
+	IFT_SRP                           = 0x97
+	IFT_SS7SIGLINK                    = 0x9c
+	IFT_STACKTOSTACK                  = 0x6f
+	IFT_STARLAN                       = 0xb
+	IFT_T1                            = 0x12
+	IFT_TDLC                          = 0x74
+	IFT_TELINK                        = 0xc8
+	IFT_TERMPAD                       = 0x5b
+	IFT_TR008                         = 0xb0
+	IFT_TRANSPHDLC                    = 0x7b
+	IFT_TUNNEL                        = 0x83
+	IFT_ULTRA                         = 0x1d
+	IFT_USB                           = 0xa0
+	IFT_V11                           = 0x40
+	IFT_V35                           = 0x2d
+	IFT_V36                           = 0x41
+	IFT_V37                           = 0x78
+	IFT_VDSL                          = 0x61
+	IFT_VIRTUALIPADDRESS              = 0x70
+	IFT_VIRTUALTG                     = 0xca
+	IFT_VOICEDID                      = 0xd5
+	IFT_VOICEEM                       = 0x64
+	IFT_VOICEEMFGD                    = 0xd3
+	IFT_VOICEENCAP                    = 0x67
+	IFT_VOICEFGDEANA                  = 0xd4
+	IFT_VOICEFXO                      = 0x65
+	IFT_VOICEFXS                      = 0x66
+	IFT_VOICEOVERATM                  = 0x98
+	IFT_VOICEOVERCABLE                = 0xc6
+	IFT_VOICEOVERFRAMERELAY           = 0x99
+	IFT_VOICEOVERIP                   = 0x68
+	IFT_WIREGUARD                     = 0xfb
+	IFT_X213                          = 0x5d
+	IFT_X25                           = 0x5
+	IFT_X25DDN                        = 0x4
+	IFT_X25HUNTGROUP                  = 0x7a
+	IFT_X25MLP                        = 0x79
+	IFT_X25PLE                        = 0x28
+	IFT_XETHER                        = 0x1a
+	IGNBRK                            = 0x1
+	IGNCR                             = 0x80
+	IGNPAR                            = 0x4
+	IMAXBEL                           = 0x2000
+	INLCR                             = 0x40
+	INPCK                             = 0x10
+	IN_CLASSA_HOST                    = 0xffffff
+	IN_CLASSA_MAX                     = 0x80
+	IN_CLASSA_NET                     = 0xff000000
+	IN_CLASSA_NSHIFT                  = 0x18
+	IN_CLASSB_HOST                    = 0xffff
+	IN_CLASSB_MAX                     = 0x10000
+	IN_CLASSB_NET                     = 0xffff0000
+	IN_CLASSB_NSHIFT                  = 0x10
+	IN_CLASSC_HOST                    = 0xff
+	IN_CLASSC_NET                     = 0xffffff00
+	IN_CLASSC_NSHIFT                  = 0x8
+	IN_CLASSD_HOST                    = 0xfffffff
+	IN_CLASSD_NET                     = 0xf0000000
+	IN_CLASSD_NSHIFT                  = 0x1c
+	IN_LOOPBACKNET                    = 0x7f
+	IN_RFC3021_HOST                   = 0x1
+	IN_RFC3021_NET                    = 0xfffffffe
+	IN_RFC3021_NSHIFT                 = 0x1f
+	IPPROTO_AH                        = 0x33
+	IPPROTO_CARP                      = 0x70
+	IPPROTO_DIVERT                    = 0x102
+	IPPROTO_DONE                      = 0x101
+	IPPROTO_DSTOPTS                   = 0x3c
+	IPPROTO_EGP                       = 0x8
+	IPPROTO_ENCAP                     = 0x62
+	IPPROTO_EON                       = 0x50
+	IPPROTO_ESP                       = 0x32
+	IPPROTO_ETHERIP                   = 0x61
+	IPPROTO_FRAGMENT                  = 0x2c
+	IPPROTO_GGP                       = 0x3
+	IPPROTO_GRE                       = 0x2f
+	IPPROTO_HOPOPTS                   = 0x0
+	IPPROTO_ICMP                      = 0x1
+	IPPROTO_ICMPV6                    = 0x3a
+	IPPROTO_IDP                       = 0x16
+	IPPROTO_IGMP                      = 0x2
+	IPPROTO_IP                        = 0x0
+	IPPROTO_IPCOMP                    = 0x6c
+	IPPROTO_IPIP                      = 0x4
+	IPPROTO_IPV4                      = 0x4
+	IPPROTO_IPV6                      = 0x29
+	IPPROTO_MAX                       = 0x100
+	IPPROTO_MAXID                     = 0x103
+	IPPROTO_MOBILE                    = 0x37
+	IPPROTO_MPLS                      = 0x89
+	IPPROTO_NONE                      = 0x3b
+	IPPROTO_PFSYNC                    = 0xf0
+	IPPROTO_PIM                       = 0x67
+	IPPROTO_PUP                       = 0xc
+	IPPROTO_RAW                       = 0xff
+	IPPROTO_ROUTING                   = 0x2b
+	IPPROTO_RSVP                      = 0x2e
+	IPPROTO_SCTP                      = 0x84
+	IPPROTO_TCP                       = 0x6
+	IPPROTO_TP                        = 0x1d
+	IPPROTO_UDP                       = 0x11
+	IPPROTO_UDPLITE                   = 0x88
+	IPV6_AUTH_LEVEL                   = 0x35
+	IPV6_AUTOFLOWLABEL                = 0x3b
+	IPV6_CHECKSUM                     = 0x1a
+	IPV6_DEFAULT_MULTICAST_HOPS       = 0x1
+	IPV6_DEFAULT_MULTICAST_LOOP       = 0x1
+	IPV6_DEFHLIM                      = 0x40
+	IPV6_DONTFRAG                     = 0x3e
+	IPV6_DSTOPTS                      = 0x32
+	IPV6_ESP_NETWORK_LEVEL            = 0x37
+	IPV6_ESP_TRANS_LEVEL              = 0x36
+	IPV6_FAITH                        = 0x1d
+	IPV6_FLOWINFO_MASK                = 0xffffff0f
+	IPV6_FLOWLABEL_MASK               = 0xffff0f00
+	IPV6_FRAGTTL                      = 0x78
+	IPV6_HLIMDEC                      = 0x1
+	IPV6_HOPLIMIT                     = 0x2f
+	IPV6_HOPOPTS                      = 0x31
+	IPV6_IPCOMP_LEVEL                 = 0x3c
+	IPV6_JOIN_GROUP                   = 0xc
+	IPV6_LEAVE_GROUP                  = 0xd
+	IPV6_MAXHLIM                      = 0xff
+	IPV6_MAXPACKET                    = 0xffff
+	IPV6_MINHOPCOUNT                  = 0x41
+	IPV6_MMTU                         = 0x500
+	IPV6_MULTICAST_HOPS               = 0xa
+	IPV6_MULTICAST_IF                 = 0x9
+	IPV6_MULTICAST_LOOP               = 0xb
+	IPV6_NEXTHOP                      = 0x30
+	IPV6_OPTIONS                      = 0x1
+	IPV6_PATHMTU                      = 0x2c
+	IPV6_PIPEX                        = 0x3f
+	IPV6_PKTINFO                      = 0x2e
+	IPV6_PORTRANGE                    = 0xe
+	IPV6_PORTRANGE_DEFAULT            = 0x0
+	IPV6_PORTRANGE_HIGH               = 0x1
+	IPV6_PORTRANGE_LOW                = 0x2
+	IPV6_RECVDSTOPTS                  = 0x28
+	IPV6_RECVDSTPORT                  = 0x40
+	IPV6_RECVHOPLIMIT                 = 0x25
+	IPV6_RECVHOPOPTS                  = 0x27
+	IPV6_RECVPATHMTU                  = 0x2b
+	IPV6_RECVPKTINFO                  = 0x24
+	IPV6_RECVRTHDR                    = 0x26
+	IPV6_RECVTCLASS                   = 0x39
+	IPV6_RTABLE                       = 0x1021
+	IPV6_RTHDR                        = 0x33
+	IPV6_RTHDRDSTOPTS                 = 0x23
+	IPV6_RTHDR_LOOSE                  = 0x0
+	IPV6_RTHDR_STRICT                 = 0x1
+	IPV6_RTHDR_TYPE_0                 = 0x0
+	IPV6_SOCKOPT_RESERVED1            = 0x3
+	IPV6_TCLASS                       = 0x3d
+	IPV6_UNICAST_HOPS                 = 0x4
+	IPV6_USE_MIN_MTU                  = 0x2a
+	IPV6_V6ONLY                       = 0x1b
+	IPV6_VERSION                      = 0x60
+	IPV6_VERSION_MASK                 = 0xf0
+	IP_ADD_MEMBERSHIP                 = 0xc
+	IP_AUTH_LEVEL                     = 0x14
+	IP_DEFAULT_MULTICAST_LOOP         = 0x1
+	IP_DEFAULT_MULTICAST_TTL          = 0x1
+	IP_DF                             = 0x4000
+	IP_DROP_MEMBERSHIP                = 0xd
+	IP_ESP_NETWORK_LEVEL              = 0x16
+	IP_ESP_TRANS_LEVEL                = 0x15
+	IP_HDRINCL                        = 0x2
+	IP_IPCOMP_LEVEL                   = 0x1d
+	IP_IPDEFTTL                       = 0x25
+	IP_IPSECFLOWINFO                  = 0x24
+	IP_IPSEC_LOCAL_AUTH               = 0x1b
+	IP_IPSEC_LOCAL_CRED               = 0x19
+	IP_IPSEC_LOCAL_ID                 = 0x17
+	IP_IPSEC_REMOTE_AUTH              = 0x1c
+	IP_IPSEC_REMOTE_CRED              = 0x1a
+	IP_IPSEC_REMOTE_ID                = 0x18
+	IP_MAXPACKET                      = 0xffff
+	IP_MAX_MEMBERSHIPS                = 0xfff
+	IP_MF                             = 0x2000
+	IP_MINTTL                         = 0x20
+	IP_MIN_MEMBERSHIPS                = 0xf
+	IP_MSS                            = 0x240
+	IP_MULTICAST_IF                   = 0x9
+	IP_MULTICAST_LOOP                 = 0xb
+	IP_MULTICAST_TTL                  = 0xa
+	IP_OFFMASK                        = 0x1fff
+	IP_OPTIONS                        = 0x1
+	IP_PIPEX                          = 0x22
+	IP_PORTRANGE                      = 0x13
+	IP_PORTRANGE_DEFAULT              = 0x0
+	IP_PORTRANGE_HIGH                 = 0x1
+	IP_PORTRANGE_LOW                  = 0x2
+	IP_RECVDSTADDR                    = 0x7
+	IP_RECVDSTPORT                    = 0x21
+	IP_RECVIF                         = 0x1e
+	IP_RECVOPTS                       = 0x5
+	IP_RECVRETOPTS                    = 0x6
+	IP_RECVRTABLE                     = 0x23
+	IP_RECVTTL                        = 0x1f
+	IP_RETOPTS                        = 0x8
+	IP_RF                             = 0x8000
+	IP_RTABLE                         = 0x1021
+	IP_SENDSRCADDR                    = 0x7
+	IP_TOS                            = 0x3
+	IP_TTL                            = 0x4
+	ISIG                              = 0x80
+	ISTRIP                            = 0x20
+	IXANY                             = 0x800
+	IXOFF                             = 0x400
+	IXON                              = 0x200
+	LCNT_OVERLOAD_FLUSH               = 0x6
+	LOCK_EX                           = 0x2
+	LOCK_NB                           = 0x4
+	LOCK_SH                           = 0x1
+	LOCK_UN                           = 0x8
+	MADV_DONTNEED                     = 0x4
+	MADV_FREE                         = 0x6
+	MADV_NORMAL                       = 0x0
+	MADV_RANDOM                       = 0x1
+	MADV_SEQUENTIAL                   = 0x2
+	MADV_SPACEAVAIL                   = 0x5
+	MADV_WILLNEED                     = 0x3
+	MAP_ANON                          = 0x1000
+	MAP_ANONYMOUS                     = 0x1000
+	MAP_CONCEAL                       = 0x8000
+	MAP_COPY                          = 0x2
+	MAP_FILE                          = 0x0
+	MAP_FIXED                         = 0x10
+	MAP_FLAGMASK                      = 0xfff7
+	MAP_HASSEMAPHORE                  = 0x0
+	MAP_INHERIT                       = 0x0
+	MAP_INHERIT_COPY                  = 0x1
+	MAP_INHERIT_NONE                  = 0x2
+	MAP_INHERIT_SHARE                 = 0x0
+	MAP_INHERIT_ZERO                  = 0x3
+	MAP_NOEXTEND                      = 0x0
+	MAP_NORESERVE                     = 0x0
+	MAP_PRIVATE                       = 0x2
+	MAP_RENAME                        = 0x0
+	MAP_SHARED                        = 0x1
+	MAP_STACK                         = 0x4000
+	MAP_TRYFIXED                      = 0x0
+	MCL_CURRENT                       = 0x1
+	MCL_FUTURE                        = 0x2
+	MSG_BCAST                         = 0x100
+	MSG_CMSG_CLOEXEC                  = 0x800
+	MSG_CTRUNC                        = 0x20
+	MSG_DONTROUTE                     = 0x4
+	MSG_DONTWAIT                      = 0x80
+	MSG_EOR                           = 0x8
+	MSG_MCAST                         = 0x200
+	MSG_NOSIGNAL                      = 0x400
+	MSG_OOB                           = 0x1
+	MSG_PEEK                          = 0x2
+	MSG_TRUNC                         = 0x10
+	MSG_WAITALL                       = 0x40
+	MS_ASYNC                          = 0x1
+	MS_INVALIDATE                     = 0x4
+	MS_SYNC                           = 0x2
+	NAME_MAX                          = 0xff
+	NET_RT_DUMP                       = 0x1
+	NET_RT_FLAGS                      = 0x2
+	NET_RT_IFLIST                     = 0x3
+	NET_RT_IFNAMES                    = 0x6
+	NET_RT_MAXID                      = 0x8
+	NET_RT_SOURCE                     = 0x7
+	NET_RT_STATS                      = 0x4
+	NET_RT_TABLE                      = 0x5
+	NOFLSH                            = 0x80000000
+	NOTE_ATTRIB                       = 0x8
+	NOTE_CHANGE                       = 0x1
+	NOTE_CHILD                        = 0x4
+	NOTE_DELETE                       = 0x1
+	NOTE_EOF                          = 0x2
+	NOTE_EXEC                         = 0x20000000
+	NOTE_EXIT                         = 0x80000000
+	NOTE_EXTEND                       = 0x4
+	NOTE_FORK                         = 0x40000000
+	NOTE_LINK                         = 0x10
+	NOTE_LOWAT                        = 0x1
+	NOTE_OOB                          = 0x4
+	NOTE_PCTRLMASK                    = 0xf0000000
+	NOTE_PDATAMASK                    = 0xfffff
+	NOTE_RENAME                       = 0x20
+	NOTE_REVOKE                       = 0x40
+	NOTE_TRACK                        = 0x1
+	NOTE_TRACKERR                     = 0x2
+	NOTE_TRUNCATE                     = 0x80
+	NOTE_WRITE                        = 0x2
+	OCRNL                             = 0x10
+	ONLCR                             = 0x2
+	ONLRET                            = 0x80
+	ONOCR                             = 0x40
+	ONOEOT                            = 0x8
+	OPOST                             = 0x1
+	O_ACCMODE                         = 0x3
+	O_APPEND                          = 0x8
+	O_ASYNC                           = 0x40
+	O_CLOEXEC                         = 0x10000
+	O_CREAT                           = 0x200
+	O_DIRECTORY                       = 0x20000
+	O_DSYNC                           = 0x80
+	O_EXCL                            = 0x800
+	O_EXLOCK                          = 0x20
+	O_FSYNC                           = 0x80
+	O_NDELAY                          = 0x4
+	O_NOCTTY                          = 0x8000
+	O_NOFOLLOW                        = 0x100
+	O_NONBLOCK                        = 0x4
+	O_RDONLY                          = 0x0
+	O_RDWR                            = 0x2
+	O_RSYNC                           = 0x80
+	O_SHLOCK                          = 0x10
+	O_SYNC                            = 0x80
+	O_TRUNC                           = 0x400
+	O_WRONLY                          = 0x1
+	PARENB                            = 0x1000
+	PARMRK                            = 0x8
+	PARODD                            = 0x2000
+	PENDIN                            = 0x20000000
+	PF_FLUSH                          = 0x1
+	PRIO_PGRP                         = 0x1
+	PRIO_PROCESS                      = 0x0
+	PRIO_USER                         = 0x2
+	PROT_EXEC                         = 0x4
+	PROT_NONE                         = 0x0
+	PROT_READ                         = 0x1
+	PROT_WRITE                        = 0x2
+	RLIMIT_CORE                       = 0x4
+	RLIMIT_CPU                        = 0x0
+	RLIMIT_DATA                       = 0x2
+	RLIMIT_FSIZE                      = 0x1
+	RLIMIT_NOFILE                     = 0x8
+	RLIMIT_STACK                      = 0x3
+	RLIM_INFINITY                     = 0x7fffffffffffffff
+	RTAX_AUTHOR                       = 0x6
+	RTAX_BFD                          = 0xb
+	RTAX_BRD                          = 0x7
+	RTAX_DNS                          = 0xc
+	RTAX_DST                          = 0x0
+	RTAX_GATEWAY                      = 0x1
+	RTAX_GENMASK                      = 0x3
+	RTAX_IFA                          = 0x5
+	RTAX_IFP                          = 0x4
+	RTAX_LABEL                        = 0xa
+	RTAX_MAX                          = 0xf
+	RTAX_NETMASK                      = 0x2
+	RTAX_SEARCH                       = 0xe
+	RTAX_SRC                          = 0x8
+	RTAX_SRCMASK                      = 0x9
+	RTAX_STATIC                       = 0xd
+	RTA_AUTHOR                        = 0x40
+	RTA_BFD                           = 0x800
+	RTA_BRD                           = 0x80
+	RTA_DNS                           = 0x1000
+	RTA_DST                           = 0x1
+	RTA_GATEWAY                       = 0x2
+	RTA_GENMASK                       = 0x8
+	RTA_IFA                           = 0x20
+	RTA_IFP                           = 0x10
+	RTA_LABEL                         = 0x400
+	RTA_NETMASK                       = 0x4
+	RTA_SEARCH                        = 0x4000
+	RTA_SRC                           = 0x100
+	RTA_SRCMASK                       = 0x200
+	RTA_STATIC                        = 0x2000
+	RTF_ANNOUNCE                      = 0x4000
+	RTF_BFD                           = 0x1000000
+	RTF_BLACKHOLE                     = 0x1000
+	RTF_BROADCAST                     = 0x400000
+	RTF_CACHED                        = 0x20000
+	RTF_CLONED                        = 0x10000
+	RTF_CLONING                       = 0x100
+	RTF_CONNECTED                     = 0x800000
+	RTF_DONE                          = 0x40
+	RTF_DYNAMIC                       = 0x10
+	RTF_FMASK                         = 0x110fc08
+	RTF_GATEWAY                       = 0x2
+	RTF_HOST                          = 0x4
+	RTF_LLINFO                        = 0x400
+	RTF_LOCAL                         = 0x200000
+	RTF_MODIFIED                      = 0x20
+	RTF_MPATH                         = 0x40000
+	RTF_MPLS                          = 0x100000
+	RTF_MULTICAST                     = 0x200
+	RTF_PERMANENT_ARP                 = 0x2000
+	RTF_PROTO1                        = 0x8000
+	RTF_PROTO2                        = 0x4000
+	RTF_PROTO3                        = 0x2000
+	RTF_REJECT                        = 0x8
+	RTF_STATIC                        = 0x800
+	RTF_UP                            = 0x1
+	RTF_USETRAILERS                   = 0x8000
+	RTM_80211INFO                     = 0x15
+	RTM_ADD                           = 0x1
+	RTM_BFD                           = 0x12
+	RTM_CHANGE                        = 0x3
+	RTM_CHGADDRATTR                   = 0x14
+	RTM_DELADDR                       = 0xd
+	RTM_DELETE                        = 0x2
+	RTM_DESYNC                        = 0x10
+	RTM_GET                           = 0x4
+	RTM_IFANNOUNCE                    = 0xf
+	RTM_IFINFO                        = 0xe
+	RTM_INVALIDATE                    = 0x11
+	RTM_LOSING                        = 0x5
+	RTM_MAXSIZE                       = 0x800
+	RTM_MISS                          = 0x7
+	RTM_NEWADDR                       = 0xc
+	RTM_PROPOSAL                      = 0x13
+	RTM_REDIRECT                      = 0x6
+	RTM_RESOLVE                       = 0xb
+	RTM_SOURCE                        = 0x16
+	RTM_VERSION                       = 0x5
+	RTV_EXPIRE                        = 0x4
+	RTV_HOPCOUNT                      = 0x2
+	RTV_MTU                           = 0x1
+	RTV_RPIPE                         = 0x8
+	RTV_RTT                           = 0x40
+	RTV_RTTVAR                        = 0x80
+	RTV_SPIPE                         = 0x10
+	RTV_SSTHRESH                      = 0x20
+	RT_TABLEID_BITS                   = 0x8
+	RT_TABLEID_MASK                   = 0xff
+	RT_TABLEID_MAX                    = 0xff
+	RUSAGE_CHILDREN                   = -0x1
+	RUSAGE_SELF                       = 0x0
+	RUSAGE_THREAD                     = 0x1
+	SCM_RIGHTS                        = 0x1
+	SCM_TIMESTAMP                     = 0x4
+	SHUT_RD                           = 0x0
+	SHUT_RDWR                         = 0x2
+	SHUT_WR                           = 0x1
+	SIOCADDMULTI                      = 0x80206931
+	SIOCAIFADDR                       = 0x8040691a
+	SIOCAIFGROUP                      = 0x80286987
+	SIOCATMARK                        = 0x40047307
+	SIOCBRDGADD                       = 0x8060693c
+	SIOCBRDGADDL                      = 0x80606949
+	SIOCBRDGADDS                      = 0x80606941
+	SIOCBRDGARL                       = 0x808c694d
+	SIOCBRDGDADDR                     = 0x81286947
+	SIOCBRDGDEL                       = 0x8060693d
+	SIOCBRDGDELS                      = 0x80606942
+	SIOCBRDGFLUSH                     = 0x80606948
+	SIOCBRDGFRL                       = 0x808c694e
+	SIOCBRDGGCACHE                    = 0xc0146941
+	SIOCBRDGGFD                       = 0xc0146952
+	SIOCBRDGGHT                       = 0xc0146951
+	SIOCBRDGGIFFLGS                   = 0xc060693e
+	SIOCBRDGGMA                       = 0xc0146953
+	SIOCBRDGGPARAM                    = 0xc0406958
+	SIOCBRDGGPRI                      = 0xc0146950
+	SIOCBRDGGRL                       = 0xc030694f
+	SIOCBRDGGTO                       = 0xc0146946
+	SIOCBRDGIFS                       = 0xc0606942
+	SIOCBRDGRTS                       = 0xc0206943
+	SIOCBRDGSADDR                     = 0xc1286944
+	SIOCBRDGSCACHE                    = 0x80146940
+	SIOCBRDGSFD                       = 0x80146952
+	SIOCBRDGSHT                       = 0x80146951
+	SIOCBRDGSIFCOST                   = 0x80606955
+	SIOCBRDGSIFFLGS                   = 0x8060693f
+	SIOCBRDGSIFPRIO                   = 0x80606954
+	SIOCBRDGSIFPROT                   = 0x8060694a
+	SIOCBRDGSMA                       = 0x80146953
+	SIOCBRDGSPRI                      = 0x80146950
+	SIOCBRDGSPROTO                    = 0x8014695a
+	SIOCBRDGSTO                       = 0x80146945
+	SIOCBRDGSTXHC                     = 0x80146959
+	SIOCDELLABEL                      = 0x80206997
+	SIOCDELMULTI                      = 0x80206932
+	SIOCDIFADDR                       = 0x80206919
+	SIOCDIFGROUP                      = 0x80286989
+	SIOCDIFPARENT                     = 0x802069b4
+	SIOCDIFPHYADDR                    = 0x80206949
+	SIOCDPWE3NEIGHBOR                 = 0x802069de
+	SIOCDVNETID                       = 0x802069af
+	SIOCGETKALIVE                     = 0xc01869a4
+	SIOCGETLABEL                      = 0x8020699a
+	SIOCGETMPWCFG                     = 0xc02069ae
+	SIOCGETPFLOW                      = 0xc02069fe
+	SIOCGETPFSYNC                     = 0xc02069f8
+	SIOCGETSGCNT                      = 0xc0207534
+	SIOCGETVIFCNT                     = 0xc0287533
+	SIOCGETVLAN                       = 0xc0206990
+	SIOCGIFADDR                       = 0xc0206921
+	SIOCGIFBRDADDR                    = 0xc0206923
+	SIOCGIFCONF                       = 0xc0106924
+	SIOCGIFDATA                       = 0xc020691b
+	SIOCGIFDESCR                      = 0xc0206981
+	SIOCGIFDSTADDR                    = 0xc0206922
+	SIOCGIFFLAGS                      = 0xc0206911
+	SIOCGIFGATTR                      = 0xc028698b
+	SIOCGIFGENERIC                    = 0xc020693a
+	SIOCGIFGLIST                      = 0xc028698d
+	SIOCGIFGMEMB                      = 0xc028698a
+	SIOCGIFGROUP                      = 0xc0286988
+	SIOCGIFHARDMTU                    = 0xc02069a5
+	SIOCGIFLLPRIO                     = 0xc02069b6
+	SIOCGIFMEDIA                      = 0xc0406938
+	SIOCGIFMETRIC                     = 0xc0206917
+	SIOCGIFMTU                        = 0xc020697e
+	SIOCGIFNETMASK                    = 0xc0206925
+	SIOCGIFPAIR                       = 0xc02069b1
+	SIOCGIFPARENT                     = 0xc02069b3
+	SIOCGIFPRIORITY                   = 0xc020699c
+	SIOCGIFRDOMAIN                    = 0xc02069a0
+	SIOCGIFRTLABEL                    = 0xc0206983
+	SIOCGIFRXR                        = 0x802069aa
+	SIOCGIFSFFPAGE                    = 0xc1126939
+	SIOCGIFXFLAGS                     = 0xc020699e
+	SIOCGLIFPHYADDR                   = 0xc218694b
+	SIOCGLIFPHYDF                     = 0xc02069c2
+	SIOCGLIFPHYECN                    = 0xc02069c8
+	SIOCGLIFPHYRTABLE                 = 0xc02069a2
+	SIOCGLIFPHYTTL                    = 0xc02069a9
+	SIOCGPGRP                         = 0x40047309
+	SIOCGPWE3                         = 0xc0206998
+	SIOCGPWE3CTRLWORD                 = 0xc02069dc
+	SIOCGPWE3FAT                      = 0xc02069dd
+	SIOCGPWE3NEIGHBOR                 = 0xc21869de
+	SIOCGRXHPRIO                      = 0xc02069db
+	SIOCGSPPPPARAMS                   = 0xc0206994
+	SIOCGTXHPRIO                      = 0xc02069c6
+	SIOCGUMBINFO                      = 0xc02069be
+	SIOCGUMBPARAM                     = 0xc02069c0
+	SIOCGVH                           = 0xc02069f6
+	SIOCGVNETFLOWID                   = 0xc02069c4
+	SIOCGVNETID                       = 0xc02069a7
+	SIOCIFAFATTACH                    = 0x801169ab
+	SIOCIFAFDETACH                    = 0x801169ac
+	SIOCIFCREATE                      = 0x8020697a
+	SIOCIFDESTROY                     = 0x80206979
+	SIOCIFGCLONERS                    = 0xc0106978
+	SIOCSETKALIVE                     = 0x801869a3
+	SIOCSETLABEL                      = 0x80206999
+	SIOCSETMPWCFG                     = 0x802069ad
+	SIOCSETPFLOW                      = 0x802069fd
+	SIOCSETPFSYNC                     = 0x802069f7
+	SIOCSETVLAN                       = 0x8020698f
+	SIOCSIFADDR                       = 0x8020690c
+	SIOCSIFBRDADDR                    = 0x80206913
+	SIOCSIFDESCR                      = 0x80206980
+	SIOCSIFDSTADDR                    = 0x8020690e
+	SIOCSIFFLAGS                      = 0x80206910
+	SIOCSIFGATTR                      = 0x8028698c
+	SIOCSIFGENERIC                    = 0x80206939
+	SIOCSIFLLADDR                     = 0x8020691f
+	SIOCSIFLLPRIO                     = 0x802069b5
+	SIOCSIFMEDIA                      = 0xc0206937
+	SIOCSIFMETRIC                     = 0x80206918
+	SIOCSIFMTU                        = 0x8020697f
+	SIOCSIFNETMASK                    = 0x80206916
+	SIOCSIFPAIR                       = 0x802069b0
+	SIOCSIFPARENT                     = 0x802069b2
+	SIOCSIFPRIORITY                   = 0x8020699b
+	SIOCSIFRDOMAIN                    = 0x8020699f
+	SIOCSIFRTLABEL                    = 0x80206982
+	SIOCSIFXFLAGS                     = 0x8020699d
+	SIOCSLIFPHYADDR                   = 0x8218694a
+	SIOCSLIFPHYDF                     = 0x802069c1
+	SIOCSLIFPHYECN                    = 0x802069c7
+	SIOCSLIFPHYRTABLE                 = 0x802069a1
+	SIOCSLIFPHYTTL                    = 0x802069a8
+	SIOCSPGRP                         = 0x80047308
+	SIOCSPWE3CTRLWORD                 = 0x802069dc
+	SIOCSPWE3FAT                      = 0x802069dd
+	SIOCSPWE3NEIGHBOR                 = 0x821869de
+	SIOCSRXHPRIO                      = 0x802069db
+	SIOCSSPPPPARAMS                   = 0x80206993
+	SIOCSTXHPRIO                      = 0x802069c5
+	SIOCSUMBPARAM                     = 0x802069bf
+	SIOCSVH                           = 0xc02069f5
+	SIOCSVNETFLOWID                   = 0x802069c3
+	SIOCSVNETID                       = 0x802069a6
+	SOCK_CLOEXEC                      = 0x8000
+	SOCK_DGRAM                        = 0x2
+	SOCK_DNS                          = 0x1000
+	SOCK_NONBLOCK                     = 0x4000
+	SOCK_RAW                          = 0x3
+	SOCK_RDM                          = 0x4
+	SOCK_SEQPACKET                    = 0x5
+	SOCK_STREAM                       = 0x1
+	SOL_SOCKET                        = 0xffff
+	SOMAXCONN                         = 0x80
+	SO_ACCEPTCONN                     = 0x2
+	SO_BINDANY                        = 0x1000
+	SO_BROADCAST                      = 0x20
+	SO_DEBUG                          = 0x1
+	SO_DOMAIN                         = 0x1024
+	SO_DONTROUTE                      = 0x10
+	SO_ERROR                          = 0x1007
+	SO_KEEPALIVE                      = 0x8
+	SO_LINGER                         = 0x80
+	SO_NETPROC                        = 0x1020
+	SO_OOBINLINE                      = 0x100
+	SO_PEERCRED                       = 0x1022
+	SO_PROTOCOL                       = 0x1025
+	SO_RCVBUF                         = 0x1002
+	SO_RCVLOWAT                       = 0x1004
+	SO_RCVTIMEO                       = 0x1006
+	SO_REUSEADDR                      = 0x4
+	SO_REUSEPORT                      = 0x200
+	SO_RTABLE                         = 0x1021
+	SO_SNDBUF                         = 0x1001
+	SO_SNDLOWAT                       = 0x1003
+	SO_SNDTIMEO                       = 0x1005
+	SO_SPLICE                         = 0x1023
+	SO_TIMESTAMP                      = 0x800
+	SO_TYPE                           = 0x1008
+	SO_USELOOPBACK                    = 0x40
+	SO_ZEROIZE                        = 0x2000
+	TCIFLUSH                          = 0x1
+	TCIOFLUSH                         = 0x3
+	TCOFLUSH                          = 0x2
+	TCP_INFO                          = 0x9
+	TCP_MAXSEG                        = 0x2
+	TCP_MAXWIN                        = 0xffff
+	TCP_MAX_SACK                      = 0x3
+	TCP_MAX_WINSHIFT                  = 0xe
+	TCP_MD5SIG                        = 0x4
+	TCP_MSS                           = 0x200
+	TCP_NODELAY                       = 0x1
+	TCP_NOPUSH                        = 0x10
+	TCP_SACKHOLE_LIMIT                = 0x80
+	TCP_SACK_ENABLE                   = 0x8
+	TCSAFLUSH                         = 0x2
+	TIOCCBRK                          = 0x2000747a
+	TIOCCDTR                          = 0x20007478
+	TIOCCHKVERAUTH                    = 0x2000741e
+	TIOCCLRVERAUTH                    = 0x2000741d
+	TIOCCONS                          = 0x80047462
+	TIOCDRAIN                         = 0x2000745e
+	TIOCEXCL                          = 0x2000740d
+	TIOCEXT                           = 0x80047460
+	TIOCFLAG_CLOCAL                   = 0x2
+	TIOCFLAG_CRTSCTS                  = 0x4
+	TIOCFLAG_MDMBUF                   = 0x8
+	TIOCFLAG_PPS                      = 0x10
+	TIOCFLAG_SOFTCAR                  = 0x1
+	TIOCFLUSH                         = 0x80047410
+	TIOCGETA                          = 0x402c7413
+	TIOCGETD                          = 0x4004741a
+	TIOCGFLAGS                        = 0x4004745d
+	TIOCGPGRP                         = 0x40047477
+	TIOCGSID                          = 0x40047463
+	TIOCGTSTAMP                       = 0x4010745b
+	TIOCGWINSZ                        = 0x40087468
+	TIOCMBIC                          = 0x8004746b
+	TIOCMBIS                          = 0x8004746c
+	TIOCMGET                          = 0x4004746a
+	TIOCMODG                          = 0x4004746a
+	TIOCMODS                          = 0x8004746d
+	TIOCMSET                          = 0x8004746d
+	TIOCM_CAR                         = 0x40
+	TIOCM_CD                          = 0x40
+	TIOCM_CTS                         = 0x20
+	TIOCM_DSR                         = 0x100
+	TIOCM_DTR                         = 0x2
+	TIOCM_LE                          = 0x1
+	TIOCM_RI                          = 0x80
+	TIOCM_RNG                         = 0x80
+	TIOCM_RTS                         = 0x4
+	TIOCM_SR                          = 0x10
+	TIOCM_ST                          = 0x8
+	TIOCNOTTY                         = 0x20007471
+	TIOCNXCL                          = 0x2000740e
+	TIOCOUTQ                          = 0x40047473
+	TIOCPKT                           = 0x80047470
+	TIOCPKT_DATA                      = 0x0
+	TIOCPKT_DOSTOP                    = 0x20
+	TIOCPKT_FLUSHREAD                 = 0x1
+	TIOCPKT_FLUSHWRITE                = 0x2
+	TIOCPKT_IOCTL                     = 0x40
+	TIOCPKT_NOSTOP                    = 0x10
+	TIOCPKT_START                     = 0x8
+	TIOCPKT_STOP                      = 0x4
+	TIOCREMOTE                        = 0x80047469
+	TIOCSBRK                          = 0x2000747b
+	TIOCSCTTY                         = 0x20007461
+	TIOCSDTR                          = 0x20007479
+	TIOCSETA                          = 0x802c7414
+	TIOCSETAF                         = 0x802c7416
+	TIOCSETAW                         = 0x802c7415
+	TIOCSETD                          = 0x8004741b
+	TIOCSETVERAUTH                    = 0x8004741c
+	TIOCSFLAGS                        = 0x8004745c
+	TIOCSIG                           = 0x8004745f
+	TIOCSPGRP                         = 0x80047476
+	TIOCSTART                         = 0x2000746e
+	TIOCSTAT                          = 0x20007465
+	TIOCSTOP                          = 0x2000746f
+	TIOCSTSTAMP                       = 0x8008745a
+	TIOCSWINSZ                        = 0x80087467
+	TIOCUCNTL                         = 0x80047466
+	TIOCUCNTL_CBRK                    = 0x7a
+	TIOCUCNTL_SBRK                    = 0x7b
+	TOSTOP                            = 0x400000
+	VDISCARD                          = 0xf
+	VDSUSP                            = 0xb
+	VEOF                              = 0x0
+	VEOL                              = 0x1
+	VEOL2                             = 0x2
+	VERASE                            = 0x3
+	VINTR                             = 0x8
+	VKILL                             = 0x5
+	VLNEXT                            = 0xe
+	VMIN                              = 0x10
+	VQUIT                             = 0x9
+	VREPRINT                          = 0x6
+	VSTART                            = 0xc
+	VSTATUS                           = 0x12
+	VSTOP                             = 0xd
+	VSUSP                             = 0xa
+	VTIME                             = 0x11
+	VWERASE                           = 0x4
+	WALTSIG                           = 0x4
+	WCONTINUED                        = 0x8
+	WCOREFLAG                         = 0x80
+	WNOHANG                           = 0x1
+	WUNTRACED                         = 0x2
+)
+
+// Errors
+const (
+	E2BIG           = Errno(0x7)
+	EACCES          = Errno(0xd)
+	EADDRINUSE      = Errno(0x30)
+	EADDRNOTAVAIL   = Errno(0x31)
+	EAFNOSUPPORT    = Errno(0x2f)
+	EAGAIN          = Errno(0x23)
+	EALREADY        = Errno(0x25)
+	EAUTH           = Errno(0x50)
+	EBADF           = Errno(0x9)
+	EBADMSG         = Errno(0x5c)
+	EBADRPC         = Errno(0x48)
+	EBUSY           = Errno(0x10)
+	ECANCELED       = Errno(0x58)
+	ECHILD          = Errno(0xa)
+	ECONNABORTED    = Errno(0x35)
+	ECONNREFUSED    = Errno(0x3d)
+	ECONNRESET      = Errno(0x36)
+	EDEADLK         = Errno(0xb)
+	EDESTADDRREQ    = Errno(0x27)
+	EDOM            = Errno(0x21)
+	EDQUOT          = Errno(0x45)
+	EEXIST          = Errno(0x11)
+	EFAULT          = Errno(0xe)
+	EFBIG           = Errno(0x1b)
+	EFTYPE          = Errno(0x4f)
+	EHOSTDOWN       = Errno(0x40)
+	EHOSTUNREACH    = Errno(0x41)
+	EIDRM           = Errno(0x59)
+	EILSEQ          = Errno(0x54)
+	EINPROGRESS     = Errno(0x24)
+	EINTR           = Errno(0x4)
+	EINVAL          = Errno(0x16)
+	EIO             = Errno(0x5)
+	EIPSEC          = Errno(0x52)
+	EISCONN         = Errno(0x38)
+	EISDIR          = Errno(0x15)
+	ELAST           = Errno(0x5f)
+	ELOOP           = Errno(0x3e)
+	EMEDIUMTYPE     = Errno(0x56)
+	EMFILE          = Errno(0x18)
+	EMLINK          = Errno(0x1f)
+	EMSGSIZE        = Errno(0x28)
+	ENAMETOOLONG    = Errno(0x3f)
+	ENEEDAUTH       = Errno(0x51)
+	ENETDOWN        = Errno(0x32)
+	ENETRESET       = Errno(0x34)
+	ENETUNREACH     = Errno(0x33)
+	ENFILE          = Errno(0x17)
+	ENOATTR         = Errno(0x53)
+	ENOBUFS         = Errno(0x37)
+	ENODEV          = Errno(0x13)
+	ENOENT          = Errno(0x2)
+	ENOEXEC         = Errno(0x8)
+	ENOLCK          = Errno(0x4d)
+	ENOMEDIUM       = Errno(0x55)
+	ENOMEM          = Errno(0xc)
+	ENOMSG          = Errno(0x5a)
+	ENOPROTOOPT     = Errno(0x2a)
+	ENOSPC          = Errno(0x1c)
+	ENOSYS          = Errno(0x4e)
+	ENOTBLK         = Errno(0xf)
+	ENOTCONN        = Errno(0x39)
+	ENOTDIR         = Errno(0x14)
+	ENOTEMPTY       = Errno(0x42)
+	ENOTRECOVERABLE = Errno(0x5d)
+	ENOTSOCK        = Errno(0x26)
+	ENOTSUP         = Errno(0x5b)
+	ENOTTY          = Errno(0x19)
+	ENXIO           = Errno(0x6)
+	EOPNOTSUPP      = Errno(0x2d)
+	EOVERFLOW       = Errno(0x57)
+	EOWNERDEAD      = Errno(0x5e)
+	EPERM           = Errno(0x1)
+	EPFNOSUPPORT    = Errno(0x2e)
+	EPIPE           = Errno(0x20)
+	EPROCLIM        = Errno(0x43)
+	EPROCUNAVAIL    = Errno(0x4c)
+	EPROGMISMATCH   = Errno(0x4b)
+	EPROGUNAVAIL    = Errno(0x4a)
+	EPROTO          = Errno(0x5f)
+	EPROTONOSUPPORT = Errno(0x2b)
+	EPROTOTYPE      = Errno(0x29)
+	ERANGE          = Errno(0x22)
+	EREMOTE         = Errno(0x47)
+	EROFS           = Errno(0x1e)
+	ERPCMISMATCH    = Errno(0x49)
+	ESHUTDOWN       = Errno(0x3a)
+	ESOCKTNOSUPPORT = Errno(0x2c)
+	ESPIPE          = Errno(0x1d)
+	ESRCH           = Errno(0x3)
+	ESTALE          = Errno(0x46)
+	ETIMEDOUT       = Errno(0x3c)
+	ETOOMANYREFS    = Errno(0x3b)
+	ETXTBSY         = Errno(0x1a)
+	EUSERS          = Errno(0x44)
+	EWOULDBLOCK     = Errno(0x23)
+	EXDEV           = Errno(0x12)
+)
+
+// Signals
+const (
+	SIGABRT   = Signal(0x6)
+	SIGALRM   = Signal(0xe)
+	SIGBUS    = Signal(0xa)
+	SIGCHLD   = Signal(0x14)
+	SIGCONT   = Signal(0x13)
+	SIGEMT    = Signal(0x7)
+	SIGFPE    = Signal(0x8)
+	SIGHUP    = Signal(0x1)
+	SIGILL    = Signal(0x4)
+	SIGINFO   = Signal(0x1d)
+	SIGINT    = Signal(0x2)
+	SIGIO     = Signal(0x17)
+	SIGIOT    = Signal(0x6)
+	SIGKILL   = Signal(0x9)
+	SIGPIPE   = Signal(0xd)
+	SIGPROF   = Signal(0x1b)
+	SIGQUIT   = Signal(0x3)
+	SIGSEGV   = Signal(0xb)
+	SIGSTOP   = Signal(0x11)
+	SIGSYS    = Signal(0xc)
+	SIGTERM   = Signal(0xf)
+	SIGTHR    = Signal(0x20)
+	SIGTRAP   = Signal(0x5)
+	SIGTSTP   = Signal(0x12)
+	SIGTTIN   = Signal(0x15)
+	SIGTTOU   = Signal(0x16)
+	SIGURG    = Signal(0x10)
+	SIGUSR1   = Signal(0x1e)
+	SIGUSR2   = Signal(0x1f)
+	SIGVTALRM = Signal(0x1a)
+	SIGWINCH  = Signal(0x1c)
+	SIGXCPU   = Signal(0x18)
+	SIGXFSZ   = Signal(0x19)
+)
+
+// Error table
+var errors = [...]string{
+	1:  "operation not permitted",
+	2:  "no such file or directory",
+	3:  "no such process",
+	4:  "interrupted system call",
+	5:  "input/output error",
+	6:  "device not configured",
+	7:  "argument list too long",
+	8:  "exec format error",
+	9:  "bad file descriptor",
+	10: "no child processes",
+	11: "resource deadlock avoided",
+	12: "cannot allocate memory",
+	13: "permission denied",
+	14: "bad address",
+	15: "block device required",
+	16: "device busy",
+	17: "file exists",
+	18: "cross-device link",
+	19: "operation not supported by device",
+	20: "not a directory",
+	21: "is a directory",
+	22: "invalid argument",
+	23: "too many open files in system",
+	24: "too many open files",
+	25: "inappropriate ioctl for device",
+	26: "text file busy",
+	27: "file too large",
+	28: "no space left on device",
+	29: "illegal seek",
+	30: "read-only file system",
+	31: "too many links",
+	32: "broken pipe",
+	33: "numerical argument out of domain",
+	34: "result too large",
+	35: "resource temporarily unavailable",
+	36: "operation now in progress",
+	37: "operation already in progress",
+	38: "socket operation on non-socket",
+	39: "destination address required",
+	40: "message too long",
+	41: "protocol wrong type for socket",
+	42: "protocol not available",
+	43: "protocol not supported",
+	44: "socket type not supported",
+	45: "operation not supported",
+	46: "protocol family not supported",
+	47: "address family not supported by protocol family",
+	48: "address already in use",
+	49: "can't assign requested address",
+	50: "network is down",
+	51: "network is unreachable",
+	52: "network dropped connection on reset",
+	53: "software caused connection abort",
+	54: "connection reset by peer",
+	55: "no buffer space available",
+	56: "socket is already connected",
+	57: "socket is not connected",
+	58: "can't send after socket shutdown",
+	59: "too many references: can't splice",
+	60: "operation timed out",
+	61: "connection refused",
+	62: "too many levels of symbolic links",
+	63: "file name too long",
+	64: "host is down",
+	65: "no route to host",
+	66: "directory not empty",
+	67: "too many processes",
+	68: "too many users",
+	69: "disk quota exceeded",
+	70: "stale NFS file handle",
+	71: "too many levels of remote in path",
+	72: "RPC struct is bad",
+	73: "RPC version wrong",
+	74: "RPC program not available",
+	75: "program version wrong",
+	76: "bad procedure for program",
+	77: "no locks available",
+	78: "function not implemented",
+	79: "inappropriate file type or format",
+	80: "authentication error",
+	81: "need authenticator",
+	82: "IPsec processing failure",
+	83: "attribute not found",
+	84: "illegal byte sequence",
+	85: "no medium found",
+	86: "wrong medium type",
+	87: "value too large to be stored in data type",
+	88: "operation canceled",
+	89: "identifier removed",
+	90: "no message of desired type",
+	91: "not supported",
+	92: "bad message",
+	93: "state not recoverable",
+	94: "previous owner died",
+	95: "protocol error",
+}
+
+// Signal table
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/BPT trap",
+	6:  "abort trap",
+	7:  "EMT trap",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "bus error",
+	11: "segmentation fault",
+	12: "bad system call",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+	16: "urgent I/O condition",
+	17: "suspended (signal)",
+	18: "suspended",
+	19: "continued",
+	20: "child exited",
+	21: "stopped (tty input)",
+	22: "stopped (tty output)",
+	23: "I/O possible",
+	24: "cputime limit exceeded",
+	25: "filesize limit exceeded",
+	26: "virtual timer expired",
+	27: "profiling timer expired",
+	28: "window size changes",
+	29: "information request",
+	30: "user defined signal 1",
+	31: "user defined signal 2",
+	32: "thread AST",
+}
diff --git a/src/syscall/zsyscall_aix_ppc64.go b/src/syscall/zsyscall_aix_ppc64.go
index fb36fd3..111e671 100644
--- a/src/syscall/zsyscall_aix_ppc64.go
+++ b/src/syscall/zsyscall_aix_ppc64.go
@@ -87,7 +87,7 @@
 //go:cgo_import_dynamic libc_Setpriority setpriority "libc.a/shr_64.o"
 //go:cgo_import_dynamic libc_Setregid setregid "libc.a/shr_64.o"
 //go:cgo_import_dynamic libc_Setreuid setreuid "libc.a/shr_64.o"
-//go:cgo_import_dynamic libc_Setrlimit setrlimit "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o"
 //go:cgo_import_dynamic libc_Stat stat "libc.a/shr_64.o"
 //go:cgo_import_dynamic libc_Statfs statfs "libc.a/shr_64.o"
 //go:cgo_import_dynamic libc_Symlink symlink "libc.a/shr_64.o"
@@ -181,7 +181,7 @@
 //go:linkname libc_Setpriority libc_Setpriority
 //go:linkname libc_Setregid libc_Setregid
 //go:linkname libc_Setreuid libc_Setreuid
-//go:linkname libc_Setrlimit libc_Setrlimit
+//go:linkname libc_setrlimit libc_setrlimit
 //go:linkname libc_Stat libc_Stat
 //go:linkname libc_Statfs libc_Statfs
 //go:linkname libc_Symlink libc_Symlink
@@ -278,7 +278,7 @@
 	libc_Setpriority,
 	libc_Setregid,
 	libc_Setreuid,
-	libc_Setrlimit,
+	libc_setrlimit,
 	libc_Stat,
 	libc_Statfs,
 	libc_Symlink,
@@ -1272,7 +1272,7 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func setrlimit(which int, lim *Rlimit) (err error) {
-	_, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
+	_, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go
index 83680b3..8812fb1 100644
--- a/src/syscall/zsyscall_darwin_amd64.go
+++ b/src/syscall/zsyscall_darwin_amd64.go
@@ -7,6 +7,7 @@
 
 import "unsafe"
 import "internal/abi"
+import "runtime"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -330,6 +331,41 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe(p *[2]int32) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0)
 	if e1 != 0 {
@@ -1749,30 +1785,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req int, arg int) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func execve(path *byte, argv **byte, envp **byte) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
 	if e1 != 0 {
@@ -1821,17 +1833,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
-	val = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func unlinkat(fd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -2011,7 +2012,10 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 //go:nosplit
-func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) {
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	if runtime.GOOS == "ios" {
+		panic("unimplemented")
+	}
 	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2022,14 +2026,3 @@
 func libc_ptrace_trampoline()
 
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-//go:nosplit
-func ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
diff --git a/src/syscall/zsyscall_darwin_amd64.s b/src/syscall/zsyscall_darwin_amd64.s
index 90e51fb..3dc0e4e 100644
--- a/src/syscall/zsyscall_darwin_amd64.s
+++ b/src/syscall/zsyscall_darwin_amd64.s
@@ -49,6 +49,8 @@
 	JMP	libc_futimes(SB)
 TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
 TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_pipe(SB)
 TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
@@ -223,8 +225,6 @@
 	JMP	libc_munmap(SB)
 TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
-	JMP	libc_ioctl(SB)
 TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
 TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go
index 08da237..22b0963 100644
--- a/src/syscall/zsyscall_darwin_arm64.go
+++ b/src/syscall/zsyscall_darwin_arm64.go
@@ -7,6 +7,7 @@
 
 import "unsafe"
 import "internal/abi"
+import "runtime"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
@@ -330,6 +331,41 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe(p *[2]int32) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0)
 	if e1 != 0 {
@@ -1749,30 +1785,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req int, arg int) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func execve(path *byte, argv **byte, envp **byte) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
 	if e1 != 0 {
@@ -1821,17 +1833,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
-	val = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func unlinkat(fd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -2011,7 +2012,10 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 //go:nosplit
-func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) {
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	if runtime.GOOS == "ios" {
+		panic("unimplemented")
+	}
 	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
@@ -2022,14 +2026,3 @@
 func libc_ptrace_trampoline()
 
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-//go:nosplit
-func ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
diff --git a/src/syscall/zsyscall_darwin_arm64.s b/src/syscall/zsyscall_darwin_arm64.s
index f007479..5ac7440 100644
--- a/src/syscall/zsyscall_darwin_arm64.s
+++ b/src/syscall/zsyscall_darwin_arm64.s
@@ -49,6 +49,8 @@
 	JMP	libc_futimes(SB)
 TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
 TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_pipe(SB)
 TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
@@ -223,8 +225,6 @@
 	JMP	libc_munmap(SB)
 TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
-	JMP	libc_ioctl(SB)
 TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
 TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
diff --git a/src/syscall/zsyscall_dragonfly_amd64.go b/src/syscall/zsyscall_dragonfly_amd64.go
index a591682..1bbc9d6 100644
--- a/src/syscall/zsyscall_dragonfly_amd64.go
+++ b/src/syscall/zsyscall_dragonfly_amd64.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe() (r int, w int, err error) {
 	r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
 	r = int(r0)
@@ -1284,17 +1299,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
@@ -1335,3 +1339,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_freebsd_386.go b/src/syscall/zsyscall_freebsd_386.go
index e729adc..b948c95 100644
--- a/src/syscall/zsyscall_freebsd_386.go
+++ b/src/syscall/zsyscall_freebsd_386.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1255,17 +1270,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
@@ -1306,3 +1310,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_freebsd_amd64.go b/src/syscall/zsyscall_freebsd_amd64.go
index df0b817..f5b7371 100644
--- a/src/syscall/zsyscall_freebsd_amd64.go
+++ b/src/syscall/zsyscall_freebsd_amd64.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1255,17 +1270,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
@@ -1306,3 +1310,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_freebsd_arm.go b/src/syscall/zsyscall_freebsd_arm.go
index abd4bf1..330f709 100644
--- a/src/syscall/zsyscall_freebsd_arm.go
+++ b/src/syscall/zsyscall_freebsd_arm.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1255,17 +1270,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
@@ -1306,3 +1310,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_freebsd_arm64.go b/src/syscall/zsyscall_freebsd_arm64.go
index b86cfb5..46a0fd3 100644
--- a/src/syscall/zsyscall_freebsd_arm64.go
+++ b/src/syscall/zsyscall_freebsd_arm64.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1255,17 +1270,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
@@ -1306,3 +1310,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_freebsd_riscv64.go b/src/syscall/zsyscall_freebsd_riscv64.go
index ad195e8..f0eceb7e 100644
--- a/src/syscall/zsyscall_freebsd_riscv64.go
+++ b/src/syscall/zsyscall_freebsd_riscv64.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1255,17 +1270,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
 	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
 	nfd = int(r0)
@@ -1306,3 +1310,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_linux_386.go b/src/syscall/zsyscall_linux_386.go
index 8e0de44..661cfe7 100644
--- a/src/syscall/zsyscall_linux_386.go
+++ b/src/syscall/zsyscall_linux_386.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_amd64.go b/src/syscall/zsyscall_linux_amd64.go
index c3ee372..9d05781 100644
--- a/src/syscall/zsyscall_linux_amd64.go
+++ b/src/syscall/zsyscall_linux_amd64.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_arm.go b/src/syscall/zsyscall_linux_arm.go
index a72355f..a386120 100644
--- a/src/syscall/zsyscall_linux_arm.go
+++ b/src/syscall/zsyscall_linux_arm.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_arm64.go b/src/syscall/zsyscall_linux_arm64.go
index b7c7a38..a1c9107 100644
--- a/src/syscall/zsyscall_linux_arm64.go
+++ b/src/syscall/zsyscall_linux_arm64.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_loong64.go b/src/syscall/zsyscall_linux_loong64.go
index 51ebfc0..e32c87d 100644
--- a/src/syscall/zsyscall_linux_loong64.go
+++ b/src/syscall/zsyscall_linux_loong64.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_mips.go b/src/syscall/zsyscall_linux_mips.go
index af75dc7..7e216b0 100644
--- a/src/syscall/zsyscall_linux_mips.go
+++ b/src/syscall/zsyscall_linux_mips.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_mips64.go b/src/syscall/zsyscall_linux_mips64.go
index c667300..8c894b1 100644
--- a/src/syscall/zsyscall_linux_mips64.go
+++ b/src/syscall/zsyscall_linux_mips64.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_mips64le.go b/src/syscall/zsyscall_linux_mips64le.go
index 407bea7..812a6ba 100644
--- a/src/syscall/zsyscall_linux_mips64le.go
+++ b/src/syscall/zsyscall_linux_mips64le.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_mipsle.go b/src/syscall/zsyscall_linux_mipsle.go
index 7c5ea99..d32a8af 100644
--- a/src/syscall/zsyscall_linux_mipsle.go
+++ b/src/syscall/zsyscall_linux_mipsle.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_ppc64.go b/src/syscall/zsyscall_linux_ppc64.go
index 1e82f4b..c321267 100644
--- a/src/syscall/zsyscall_linux_ppc64.go
+++ b/src/syscall/zsyscall_linux_ppc64.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_ppc64le.go b/src/syscall/zsyscall_linux_ppc64le.go
index ddc1c81..40475d7 100644
--- a/src/syscall/zsyscall_linux_ppc64le.go
+++ b/src/syscall/zsyscall_linux_ppc64le.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_riscv64.go b/src/syscall/zsyscall_linux_riscv64.go
index c59cad0..dc74acf 100644
--- a/src/syscall/zsyscall_linux_riscv64.go
+++ b/src/syscall/zsyscall_linux_riscv64.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_linux_s390x.go b/src/syscall/zsyscall_linux_s390x.go
index 29be487..cc189d9 100644
--- a/src/syscall/zsyscall_linux_s390x.go
+++ b/src/syscall/zsyscall_linux_s390x.go
@@ -54,6 +54,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(_SYS_fchmodat2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
@@ -977,17 +992,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, p *byte, np int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func munmap(addr uintptr, length uintptr) (err error) {
 	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
 	if e1 != 0 {
diff --git a/src/syscall/zsyscall_netbsd_386.go b/src/syscall/zsyscall_netbsd_386.go
index e401d02..6ca20b4 100644
--- a/src/syscall/zsyscall_netbsd_386.go
+++ b/src/syscall/zsyscall_netbsd_386.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1218,17 +1233,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1258,3 +1262,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_netbsd_amd64.go b/src/syscall/zsyscall_netbsd_amd64.go
index a4f217b..1fe69bc 100644
--- a/src/syscall/zsyscall_netbsd_amd64.go
+++ b/src/syscall/zsyscall_netbsd_amd64.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1218,17 +1233,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1258,3 +1262,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_netbsd_arm.go b/src/syscall/zsyscall_netbsd_arm.go
index 1f55614..fd082bf 100644
--- a/src/syscall/zsyscall_netbsd_arm.go
+++ b/src/syscall/zsyscall_netbsd_arm.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1218,17 +1233,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1258,3 +1262,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_netbsd_arm64.go b/src/syscall/zsyscall_netbsd_arm64.go
index be33fcd..8065c8c 100644
--- a/src/syscall/zsyscall_netbsd_arm64.go
+++ b/src/syscall/zsyscall_netbsd_arm64.go
@@ -209,22 +209,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
-	var _p0 unsafe.Pointer
-	if len(mib) > 0 {
-		_p0 = unsafe.Pointer(&mib[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimes(path string, timeval *[2]Timeval) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -261,6 +245,37 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := RawSyscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1218,17 +1233,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1258,3 +1262,19 @@
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/src/syscall/zsyscall_openbsd_386.go b/src/syscall/zsyscall_openbsd_386.go
index 714fa7e..084b4b7 100644
--- a/src/syscall/zsyscall_openbsd_386.go
+++ b/src/syscall/zsyscall_openbsd_386.go
@@ -330,6 +330,41 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe2_trampoline), uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1660,6 +1695,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1705,17 +1755,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
 	r0, r1, e1 := syscall6X(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0)
 	newoffset = int64(int64(r1)<<32 | int64(r0))
@@ -1787,20 +1826,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req int, arg int) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func execve(path *byte, argv **byte, envp **byte) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
 	if e1 != 0 {
@@ -1844,17 +1869,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-//go:nosplit
-func ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func getentropy(p []byte) (err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1894,17 +1908,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
-	val = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func unlinkat(fd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_openbsd_386.s b/src/syscall/zsyscall_openbsd_386.s
index a77d931..319ad20 100644
--- a/src/syscall/zsyscall_openbsd_386.s
+++ b/src/syscall/zsyscall_openbsd_386.s
@@ -43,6 +43,8 @@
 	JMP	libc_futimes(SB)
 TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
 TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_pipe2(SB)
 TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
@@ -207,6 +209,8 @@
 	JMP	libc_mmap(SB)
 TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
+TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
 TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
@@ -219,8 +223,6 @@
 	JMP	libc_sysctl(SB)
 TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
-	JMP	libc_ioctl(SB)
 TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
 TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
diff --git a/src/syscall/zsyscall_openbsd_amd64.go b/src/syscall/zsyscall_openbsd_amd64.go
index f948d64..5a7b4c1 100644
--- a/src/syscall/zsyscall_openbsd_amd64.go
+++ b/src/syscall/zsyscall_openbsd_amd64.go
@@ -330,6 +330,41 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe2_trampoline), uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1660,6 +1695,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1705,17 +1755,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
 	r0, _, e1 := syscallX(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence))
 	newoffset = int64(r0)
@@ -1787,20 +1826,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req int, arg int) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func execve(path *byte, argv **byte, envp **byte) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
 	if e1 != 0 {
@@ -1844,17 +1869,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-//go:nosplit
-func ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func getentropy(p []byte) (err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1894,17 +1908,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
-	val = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func unlinkat(fd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_openbsd_amd64.s b/src/syscall/zsyscall_openbsd_amd64.s
index d13e9c0..c0e3977 100644
--- a/src/syscall/zsyscall_openbsd_amd64.s
+++ b/src/syscall/zsyscall_openbsd_amd64.s
@@ -43,6 +43,8 @@
 	JMP	libc_futimes(SB)
 TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
 TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_pipe2(SB)
 TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
@@ -207,6 +209,8 @@
 	JMP	libc_mmap(SB)
 TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
+TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
 TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
@@ -219,8 +223,6 @@
 	JMP	libc_sysctl(SB)
 TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
-	JMP	libc_ioctl(SB)
 TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
 TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
diff --git a/src/syscall/zsyscall_openbsd_arm.go b/src/syscall/zsyscall_openbsd_arm.go
index f56fcc4..66a3227 100644
--- a/src/syscall/zsyscall_openbsd_arm.go
+++ b/src/syscall/zsyscall_openbsd_arm.go
@@ -330,6 +330,41 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe2_trampoline), uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1660,6 +1695,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1705,17 +1755,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
 	r0, r1, e1 := syscall6X(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0)
 	newoffset = int64(int64(r1)<<32 | int64(r0))
@@ -1787,20 +1826,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req int, arg int) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func execve(path *byte, argv **byte, envp **byte) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
 	if e1 != 0 {
@@ -1844,17 +1869,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-//go:nosplit
-func ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func getentropy(p []byte) (err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1894,17 +1908,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
-	val = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func unlinkat(fd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_openbsd_arm.s b/src/syscall/zsyscall_openbsd_arm.s
index 34ae794..73b6a09 100644
--- a/src/syscall/zsyscall_openbsd_arm.s
+++ b/src/syscall/zsyscall_openbsd_arm.s
@@ -43,6 +43,8 @@
 	JMP	libc_futimes(SB)
 TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
 TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_pipe2(SB)
 TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
@@ -207,6 +209,8 @@
 	JMP	libc_mmap(SB)
 TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
+TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
 TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
@@ -219,8 +223,6 @@
 	JMP	libc_sysctl(SB)
 TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
-	JMP	libc_ioctl(SB)
 TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
 TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
diff --git a/src/syscall/zsyscall_openbsd_arm64.go b/src/syscall/zsyscall_openbsd_arm64.go
index c0d8f5c..a90f144 100644
--- a/src/syscall/zsyscall_openbsd_arm64.go
+++ b/src/syscall/zsyscall_openbsd_arm64.go
@@ -330,6 +330,41 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe2_trampoline), uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
@@ -1660,6 +1695,21 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1705,17 +1755,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
 	r0, _, e1 := syscallX(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence))
 	newoffset = int64(r0)
@@ -1787,20 +1826,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req int, arg int) (err error) {
-	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func execve(path *byte, argv **byte, envp **byte) (err error) {
 	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
 	if e1 != 0 {
@@ -1844,17 +1869,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-//go:nosplit
-func ptracePtr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) {
-	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func getentropy(p []byte) (err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1894,17 +1908,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
-	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
-	val = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func unlinkat(fd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_openbsd_arm64.s b/src/syscall/zsyscall_openbsd_arm64.s
index 20a79e4..6665669 100644
--- a/src/syscall/zsyscall_openbsd_arm64.s
+++ b/src/syscall/zsyscall_openbsd_arm64.s
@@ -43,6 +43,8 @@
 	JMP	libc_futimes(SB)
 TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
 TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_pipe2(SB)
 TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
@@ -207,6 +209,8 @@
 	JMP	libc_mmap(SB)
 TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
+TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
 TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
 TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
@@ -219,8 +223,6 @@
 	JMP	libc_sysctl(SB)
 TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
-	JMP	libc_ioctl(SB)
 TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
 TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
diff --git a/src/syscall/zsyscall_openbsd_mips64.go b/src/syscall/zsyscall_openbsd_mips64.go
index 2a91c36..1fbb2d1 100644
--- a/src/syscall/zsyscall_openbsd_mips64.go
+++ b/src/syscall/zsyscall_openbsd_mips64.go
@@ -1242,17 +1242,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
-	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/src/syscall/zsyscall_openbsd_ppc64.go b/src/syscall/zsyscall_openbsd_ppc64.go
new file mode 100644
index 0000000..661c895
--- /dev/null
+++ b/src/syscall/zsyscall_openbsd_ppc64.go
@@ -0,0 +1,1946 @@
+// mksyscall.pl -openbsd -libc -tags openbsd,ppc64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_libc.go syscall_openbsd_ppc64.go
+// Code generated by the command above; DO NOT EDIT.
+
+//go:build openbsd && ppc64
+
+package syscall
+
+import "unsafe"
+import "internal/abi"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getgroups_trampoline()
+
+//go:cgo_import_dynamic libc_getgroups getgroups "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(ngid int, gid *_Gid_t) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setgroups_trampoline()
+
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+	wpid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_wait4_trampoline()
+
+//go:cgo_import_dynamic libc_wait4 wait4 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_accept_trampoline()
+
+//go:cgo_import_dynamic libc_accept accept "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_bind_trampoline()
+
+//go:cgo_import_dynamic libc_bind bind "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_connect_trampoline()
+
+//go:cgo_import_dynamic libc_connect connect "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_socket_trampoline()
+
+//go:cgo_import_dynamic libc_socket socket "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getsockopt_trampoline()
+
+//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setsockopt_trampoline()
+
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getpeername_trampoline()
+
+//go:cgo_import_dynamic libc_getpeername getpeername "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getsockname_trampoline()
+
+//go:cgo_import_dynamic libc_getsockname getsockname "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(s int, how int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_shutdown_trampoline()
+
+//go:cgo_import_dynamic libc_shutdown shutdown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+	_, _, e1 := rawSyscall6(abi.FuncPCABI0(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_socketpair_trampoline()
+
+//go:cgo_import_dynamic libc_socketpair socketpair "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_recvfrom_trampoline()
+
+//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sendto_trampoline()
+
+//go:cgo_import_dynamic libc_sendto sendto "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_recvmsg_trampoline()
+
+//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sendmsg_trampoline()
+
+//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_kevent_trampoline()
+
+//go:cgo_import_dynamic libc_kevent kevent "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, timeval *[2]Timeval) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_utimes_trampoline()
+
+//go:cgo_import_dynamic libc_utimes utimes "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimes(fd int, timeval *[2]Timeval) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_futimes_trampoline()
+
+//go:cgo_import_dynamic libc_futimes futimes "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fcntl_trampoline()
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe2_trampoline), uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pipe2_trampoline()
+
+//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_accept4_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+	nfd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_accept4_trampoline()
+
+//go:cgo_import_dynamic libc_accept4 accept4 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getdents(fd int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getdents_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getdents_trampoline()
+
+//go:cgo_import_dynamic libc_getdents getdents "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Access(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_access_trampoline()
+
+//go:cgo_import_dynamic libc_access access "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_adjtime_trampoline()
+
+//go:cgo_import_dynamic libc_adjtime adjtime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chdir_trampoline()
+
+//go:cgo_import_dynamic libc_chdir chdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chflags(path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chflags_trampoline()
+
+//go:cgo_import_dynamic libc_chflags chflags "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chmod(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chmod_trampoline()
+
+//go:cgo_import_dynamic libc_chmod chmod "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chown_trampoline()
+
+//go:cgo_import_dynamic libc_chown chown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chroot_trampoline()
+
+//go:cgo_import_dynamic libc_chroot chroot "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_close_trampoline()
+
+//go:cgo_import_dynamic libc_close close "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(fd int) (nfd int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_dup_trampoline), uintptr(fd), 0, 0)
+	nfd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_dup_trampoline()
+
+//go:cgo_import_dynamic libc_dup dup "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(from int, to int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(from), uintptr(to), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_dup2_trampoline()
+
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func dup3(from int, to int, flags int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_dup3_trampoline), uintptr(from), uintptr(to), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_dup3_trampoline()
+
+//go:cgo_import_dynamic libc_dup3 dup3 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchdir_trampoline), uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchdir_trampoline()
+
+//go:cgo_import_dynamic libc_fchdir fchdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchflags(fd int, flags int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchflags_trampoline()
+
+//go:cgo_import_dynamic libc_fchflags fchflags "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchmod_trampoline()
+
+//go:cgo_import_dynamic libc_fchmod fchmod "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchown_trampoline()
+
+//go:cgo_import_dynamic libc_fchown fchown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_flock_trampoline), uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_flock_trampoline()
+
+//go:cgo_import_dynamic libc_flock flock "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fpathconf(fd int, name int) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0)
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fpathconf_trampoline()
+
+//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fstat_trampoline()
+
+//go:cgo_import_dynamic libc_fstat fstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fstatfs_trampoline()
+
+//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fsync_trampoline), uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fsync_trampoline()
+
+//go:cgo_import_dynamic libc_fsync fsync "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ftruncate_trampoline()
+
+//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getegid_trampoline), 0, 0, 0)
+	egid = int(r0)
+	return
+}
+
+func libc_getegid_trampoline()
+
+//go:cgo_import_dynamic libc_getegid getegid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (uid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_geteuid_trampoline), 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+func libc_geteuid_trampoline()
+
+//go:cgo_import_dynamic libc_geteuid geteuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getgid_trampoline), 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+func libc_getgid_trampoline()
+
+//go:cgo_import_dynamic libc_getgid getgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpgid_trampoline), uintptr(pid), 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getpgid_trampoline()
+
+//go:cgo_import_dynamic libc_getpgid getpgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgrp() (pgrp int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpgrp_trampoline), 0, 0, 0)
+	pgrp = int(r0)
+	return
+}
+
+func libc_getpgrp_trampoline()
+
+//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpid_trampoline), 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+func libc_getpid_trampoline()
+
+//go:cgo_import_dynamic libc_getpid getpid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getppid_trampoline), 0, 0, 0)
+	ppid = int(r0)
+	return
+}
+
+func libc_getppid_trampoline()
+
+//go:cgo_import_dynamic libc_getppid getppid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0)
+	prio = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getpriority_trampoline()
+
+//go:cgo_import_dynamic libc_getpriority getpriority "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(which int, lim *Rlimit) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getrlimit_trampoline()
+
+//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getrusage_trampoline()
+
+//go:cgo_import_dynamic libc_getrusage getrusage "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsid_trampoline), uintptr(pid), 0, 0)
+	sid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getsid_trampoline()
+
+//go:cgo_import_dynamic libc_getsid getsid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getuid_trampoline), 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+func libc_getuid_trampoline()
+
+//go:cgo_import_dynamic libc_getuid getuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Issetugid() (tainted bool) {
+	r0, _, _ := syscall(abi.FuncPCABI0(libc_issetugid_trampoline), 0, 0, 0)
+	tainted = bool(r0 != 0)
+	return
+}
+
+func libc_issetugid_trampoline()
+
+//go:cgo_import_dynamic libc_issetugid issetugid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, signum Signal) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_kill_trampoline), uintptr(pid), uintptr(signum), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_kill_trampoline()
+
+//go:cgo_import_dynamic libc_kill kill "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kqueue() (fd int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_kqueue_trampoline), 0, 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_kqueue_trampoline()
+
+//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_lchown_trampoline()
+
+//go:cgo_import_dynamic libc_lchown lchown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Link(path string, link string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(link)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_link_trampoline()
+
+//go:cgo_import_dynamic libc_link link "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, backlog int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_listen_trampoline()
+
+//go:cgo_import_dynamic libc_listen listen "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_lstat_trampoline()
+
+//go:cgo_import_dynamic libc_lstat lstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdir(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mkdir_trampoline()
+
+//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifo(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mkfifo_trampoline()
+
+//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mknod_trampoline()
+
+//go:cgo_import_dynamic libc_mknod mknod "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_nanosleep_trampoline), uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_nanosleep_trampoline()
+
+//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_open_trampoline()
+
+//go:cgo_import_dynamic libc_open open "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pathconf(path string, name int) (val int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pathconf_trampoline()
+
+//go:cgo_import_dynamic libc_pathconf pathconf "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pread(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pread_trampoline()
+
+//go:cgo_import_dynamic libc_pread pread "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwrite(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pwrite_trampoline()
+
+//go:cgo_import_dynamic libc_pwrite pwrite "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_read_trampoline()
+
+//go:cgo_import_dynamic libc_read read "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlink(path string, buf []byte) (n int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(buf) > 0 {
+		_p1 = unsafe.Pointer(&buf[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_readlink_trampoline()
+
+//go:cgo_import_dynamic libc_readlink readlink "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rename(from string, to string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(from)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(to)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_rename_trampoline()
+
+//go:cgo_import_dynamic libc_rename rename "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Revoke(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_revoke_trampoline()
+
+//go:cgo_import_dynamic libc_revoke revoke "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rmdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_rmdir_trampoline()
+
+//go:cgo_import_dynamic libc_rmdir rmdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_select_trampoline()
+
+//go:cgo_import_dynamic libc_select select "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setegid(egid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setegid_trampoline), uintptr(egid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setegid_trampoline()
+
+//go:cgo_import_dynamic libc_setegid setegid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seteuid(euid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_seteuid_trampoline), uintptr(euid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_seteuid_trampoline()
+
+//go:cgo_import_dynamic libc_seteuid seteuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setgid(gid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgid_trampoline), uintptr(gid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setgid_trampoline()
+
+//go:cgo_import_dynamic libc_setgid setgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setlogin(name string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setlogin_trampoline()
+
+//go:cgo_import_dynamic libc_setlogin setlogin "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setpgid_trampoline()
+
+//go:cgo_import_dynamic libc_setpgid setpgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setpriority_trampoline()
+
+//go:cgo_import_dynamic libc_setpriority setpriority "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setregid_trampoline()
+
+//go:cgo_import_dynamic libc_setregid setregid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setreuid_trampoline()
+
+//go:cgo_import_dynamic libc_setreuid setreuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setrlimit(which int, lim *Rlimit) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setrlimit_trampoline()
+
+//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setsid_trampoline), 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setsid_trampoline()
+
+//go:cgo_import_dynamic libc_setsid setsid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tp *Timeval) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_settimeofday_trampoline()
+
+//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setuid(uid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setuid_trampoline), uintptr(uid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setuid_trampoline()
+
+//go:cgo_import_dynamic libc_setuid setuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_stat_trampoline()
+
+//go:cgo_import_dynamic libc_stat stat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, stat *Statfs_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_statfs_trampoline()
+
+//go:cgo_import_dynamic libc_statfs statfs "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlink(path string, link string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(link)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_symlink_trampoline()
+
+//go:cgo_import_dynamic libc_symlink symlink "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_sync_trampoline), 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sync_trampoline()
+
+//go:cgo_import_dynamic libc_sync sync "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_truncate_trampoline()
+
+//go:cgo_import_dynamic libc_truncate truncate "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(newmask int) (oldmask int) {
+	r0, _, _ := syscall(abi.FuncPCABI0(libc_umask_trampoline), uintptr(newmask), 0, 0)
+	oldmask = int(r0)
+	return
+}
+
+func libc_umask_trampoline()
+
+//go:cgo_import_dynamic libc_umask umask "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlink(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_unlink_trampoline()
+
+//go:cgo_import_dynamic libc_unlink unlink "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_unmount_trampoline()
+
+//go:cgo_import_dynamic libc_unmount unmount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_write_trampoline()
+
+//go:cgo_import_dynamic libc_write write "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n uintptr, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscallX(abi.FuncPCABI0(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+	n = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_writev_trampoline()
+
+//go:cgo_import_dynamic libc_writev writev "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
+	r0, _, e1 := syscall6X(abi.FuncPCABI0(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
+	ret = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mmap_trampoline()
+
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_munmap_trampoline()
+
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_utimensat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_utimensat_trampoline()
+
+//go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func directSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) (ret uintptr, err error) {
+	r0, _, e1 := syscall6X(abi.FuncPCABI0(libc_syscall_trampoline), uintptr(trap), uintptr(a1), uintptr(a2), uintptr(a3), uintptr(a4), uintptr(a5))
+	ret = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_syscall_trampoline()
+
+//go:cgo_import_dynamic libc_syscall syscall "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+	r0, _, e1 := syscallX(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence))
+	newoffset = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_lseek_trampoline()
+
+//go:cgo_import_dynamic libc_lseek lseek "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getcwd(buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getcwd_trampoline()
+
+//go:cgo_import_dynamic libc_getcwd getcwd "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sysctl_trampoline()
+
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fork() (pid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_fork_trampoline), 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fork_trampoline()
+
+//go:cgo_import_dynamic libc_fork fork "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func execve(path *byte, argv **byte, envp **byte) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_execve_trampoline()
+
+//go:cgo_import_dynamic libc_execve execve "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exit(res int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), uintptr(res), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_exit_trampoline()
+
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+//go:nosplit
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ptrace_trampoline()
+
+//go:cgo_import_dynamic libc_ptrace ptrace "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getentropy(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getentropy_trampoline), uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getentropy_trampoline()
+
+//go:cgo_import_dynamic libc_getentropy getentropy "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fstatat_trampoline()
+
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(fd int, path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_unlinkat_trampoline()
+
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0)
+	fdret = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_openat_trampoline()
+
+//go:cgo_import_dynamic libc_openat openat "libc.so"
diff --git a/src/syscall/zsyscall_openbsd_ppc64.s b/src/syscall/zsyscall_openbsd_ppc64.s
new file mode 100644
index 0000000..8f3ff9a
--- /dev/null
+++ b/src/syscall/zsyscall_openbsd_ppc64.s
@@ -0,0 +1,357 @@
+// go run mkasm.go openbsd ppc64
+// Code generated by the command above; DO NOT EDIT.
+#include "textflag.h"
+TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getgroups(SB)
+	RET
+TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setgroups(SB)
+	RET
+TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_wait4(SB)
+	RET
+TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_accept(SB)
+	RET
+TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_bind(SB)
+	RET
+TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_connect(SB)
+	RET
+TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_socket(SB)
+	RET
+TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getsockopt(SB)
+	RET
+TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setsockopt(SB)
+	RET
+TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getpeername(SB)
+	RET
+TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getsockname(SB)
+	RET
+TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_shutdown(SB)
+	RET
+TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_socketpair(SB)
+	RET
+TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_recvfrom(SB)
+	RET
+TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_sendto(SB)
+	RET
+TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_recvmsg(SB)
+	RET
+TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_sendmsg(SB)
+	RET
+TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_kevent(SB)
+	RET
+TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_utimes(SB)
+	RET
+TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_futimes(SB)
+	RET
+TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fcntl(SB)
+	RET
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_ioctl(SB)
+	RET
+TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_pipe2(SB)
+	RET
+TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_accept4(SB)
+	RET
+TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getdents(SB)
+	RET
+TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_access(SB)
+	RET
+TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_adjtime(SB)
+	RET
+TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_chdir(SB)
+	RET
+TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_chflags(SB)
+	RET
+TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_chmod(SB)
+	RET
+TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_chown(SB)
+	RET
+TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_chroot(SB)
+	RET
+TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_close(SB)
+	RET
+TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_dup(SB)
+	RET
+TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_dup2(SB)
+	RET
+TEXT ·libc_dup3_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_dup3(SB)
+	RET
+TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fchdir(SB)
+	RET
+TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fchflags(SB)
+	RET
+TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fchmod(SB)
+	RET
+TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fchown(SB)
+	RET
+TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_flock(SB)
+	RET
+TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fpathconf(SB)
+	RET
+TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fstat(SB)
+	RET
+TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fstatfs(SB)
+	RET
+TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fsync(SB)
+	RET
+TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_ftruncate(SB)
+	RET
+TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getegid(SB)
+	RET
+TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_geteuid(SB)
+	RET
+TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getgid(SB)
+	RET
+TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getpgid(SB)
+	RET
+TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getpgrp(SB)
+	RET
+TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getpid(SB)
+	RET
+TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getppid(SB)
+	RET
+TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getpriority(SB)
+	RET
+TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getrlimit(SB)
+	RET
+TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getrusage(SB)
+	RET
+TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getsid(SB)
+	RET
+TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_gettimeofday(SB)
+	RET
+TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getuid(SB)
+	RET
+TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_issetugid(SB)
+	RET
+TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_kill(SB)
+	RET
+TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_kqueue(SB)
+	RET
+TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_lchown(SB)
+	RET
+TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_link(SB)
+	RET
+TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_listen(SB)
+	RET
+TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_lstat(SB)
+	RET
+TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_mkdir(SB)
+	RET
+TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_mkfifo(SB)
+	RET
+TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_mknod(SB)
+	RET
+TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_nanosleep(SB)
+	RET
+TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_open(SB)
+	RET
+TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_pathconf(SB)
+	RET
+TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_pread(SB)
+	RET
+TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_pwrite(SB)
+	RET
+TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_read(SB)
+	RET
+TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_readlink(SB)
+	RET
+TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_rename(SB)
+	RET
+TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_revoke(SB)
+	RET
+TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_rmdir(SB)
+	RET
+TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_select(SB)
+	RET
+TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setegid(SB)
+	RET
+TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_seteuid(SB)
+	RET
+TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setgid(SB)
+	RET
+TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setlogin(SB)
+	RET
+TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setpgid(SB)
+	RET
+TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setpriority(SB)
+	RET
+TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setregid(SB)
+	RET
+TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setreuid(SB)
+	RET
+TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setrlimit(SB)
+	RET
+TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setsid(SB)
+	RET
+TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_settimeofday(SB)
+	RET
+TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_setuid(SB)
+	RET
+TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_stat(SB)
+	RET
+TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_statfs(SB)
+	RET
+TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_symlink(SB)
+	RET
+TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_sync(SB)
+	RET
+TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_truncate(SB)
+	RET
+TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_umask(SB)
+	RET
+TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_unlink(SB)
+	RET
+TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_unmount(SB)
+	RET
+TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_write(SB)
+	RET
+TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_writev(SB)
+	RET
+TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_mmap(SB)
+	RET
+TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_munmap(SB)
+	RET
+TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getfsstat(SB)
+	RET
+TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_utimensat(SB)
+	RET
+TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_syscall(SB)
+	RET
+TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_lseek(SB)
+	RET
+TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getcwd(SB)
+	RET
+TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_sysctl(SB)
+	RET
+TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fork(SB)
+	RET
+TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_execve(SB)
+	RET
+TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_exit(SB)
+	RET
+TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_ptrace(SB)
+	RET
+TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_getentropy(SB)
+	RET
+TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_fstatat(SB)
+	RET
+TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_unlinkat(SB)
+	RET
+TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
+	CALL	libc_openat(SB)
+	RET
diff --git a/src/syscall/zsyscall_openbsd_riscv64.go b/src/syscall/zsyscall_openbsd_riscv64.go
new file mode 100644
index 0000000..a24fcba
--- /dev/null
+++ b/src/syscall/zsyscall_openbsd_riscv64.go
@@ -0,0 +1,1946 @@
+// mksyscall.pl -openbsd -libc -tags openbsd,riscv64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_libc.go syscall_openbsd_riscv64.go
+// Code generated by the command above; DO NOT EDIT.
+
+//go:build openbsd && riscv64
+
+package syscall
+
+import "unsafe"
+import "internal/abi"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getgroups_trampoline()
+
+//go:cgo_import_dynamic libc_getgroups getgroups "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(ngid int, gid *_Gid_t) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setgroups_trampoline()
+
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+	wpid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_wait4_trampoline()
+
+//go:cgo_import_dynamic libc_wait4 wait4 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_accept_trampoline()
+
+//go:cgo_import_dynamic libc_accept accept "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_bind_trampoline()
+
+//go:cgo_import_dynamic libc_bind bind "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_connect_trampoline()
+
+//go:cgo_import_dynamic libc_connect connect "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_socket_trampoline()
+
+//go:cgo_import_dynamic libc_socket socket "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getsockopt_trampoline()
+
+//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setsockopt_trampoline()
+
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getpeername_trampoline()
+
+//go:cgo_import_dynamic libc_getpeername getpeername "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getsockname_trampoline()
+
+//go:cgo_import_dynamic libc_getsockname getsockname "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(s int, how int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_shutdown_trampoline()
+
+//go:cgo_import_dynamic libc_shutdown shutdown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+	_, _, e1 := rawSyscall6(abi.FuncPCABI0(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_socketpair_trampoline()
+
+//go:cgo_import_dynamic libc_socketpair socketpair "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_recvfrom_trampoline()
+
+//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sendto_trampoline()
+
+//go:cgo_import_dynamic libc_sendto sendto "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_recvmsg_trampoline()
+
+//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sendmsg_trampoline()
+
+//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_kevent_trampoline()
+
+//go:cgo_import_dynamic libc_kevent kevent "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, timeval *[2]Timeval) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_utimes_trampoline()
+
+//go:cgo_import_dynamic libc_utimes utimes "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimes(fd int, timeval *[2]Timeval) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_futimes_trampoline()
+
+//go:cgo_import_dynamic libc_futimes futimes "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fcntl_trampoline()
+
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe2_trampoline), uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pipe2_trampoline()
+
+//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_accept4_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+	nfd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_accept4_trampoline()
+
+//go:cgo_import_dynamic libc_accept4 accept4 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getdents(fd int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getdents_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getdents_trampoline()
+
+//go:cgo_import_dynamic libc_getdents getdents "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Access(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_access_trampoline()
+
+//go:cgo_import_dynamic libc_access access "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_adjtime_trampoline()
+
+//go:cgo_import_dynamic libc_adjtime adjtime "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chdir_trampoline()
+
+//go:cgo_import_dynamic libc_chdir chdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chflags(path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chflags_trampoline()
+
+//go:cgo_import_dynamic libc_chflags chflags "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chmod(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chmod_trampoline()
+
+//go:cgo_import_dynamic libc_chmod chmod "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chown_trampoline()
+
+//go:cgo_import_dynamic libc_chown chown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_chroot_trampoline()
+
+//go:cgo_import_dynamic libc_chroot chroot "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_close_trampoline()
+
+//go:cgo_import_dynamic libc_close close "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(fd int) (nfd int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_dup_trampoline), uintptr(fd), 0, 0)
+	nfd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_dup_trampoline()
+
+//go:cgo_import_dynamic libc_dup dup "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(from int, to int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(from), uintptr(to), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_dup2_trampoline()
+
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func dup3(from int, to int, flags int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_dup3_trampoline), uintptr(from), uintptr(to), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_dup3_trampoline()
+
+//go:cgo_import_dynamic libc_dup3 dup3 "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchdir_trampoline), uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchdir_trampoline()
+
+//go:cgo_import_dynamic libc_fchdir fchdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchflags(fd int, flags int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchflags_trampoline()
+
+//go:cgo_import_dynamic libc_fchflags fchflags "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchmod_trampoline()
+
+//go:cgo_import_dynamic libc_fchmod fchmod "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fchown_trampoline()
+
+//go:cgo_import_dynamic libc_fchown fchown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_flock_trampoline), uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_flock_trampoline()
+
+//go:cgo_import_dynamic libc_flock flock "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fpathconf(fd int, name int) (val int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0)
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fpathconf_trampoline()
+
+//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fstat_trampoline()
+
+//go:cgo_import_dynamic libc_fstat fstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fstatfs_trampoline()
+
+//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_fsync_trampoline), uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fsync_trampoline()
+
+//go:cgo_import_dynamic libc_fsync fsync "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ftruncate_trampoline()
+
+//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getegid_trampoline), 0, 0, 0)
+	egid = int(r0)
+	return
+}
+
+func libc_getegid_trampoline()
+
+//go:cgo_import_dynamic libc_getegid getegid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (uid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_geteuid_trampoline), 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+func libc_geteuid_trampoline()
+
+//go:cgo_import_dynamic libc_geteuid geteuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getgid_trampoline), 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+func libc_getgid_trampoline()
+
+//go:cgo_import_dynamic libc_getgid getgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpgid_trampoline), uintptr(pid), 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getpgid_trampoline()
+
+//go:cgo_import_dynamic libc_getpgid getpgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgrp() (pgrp int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpgrp_trampoline), 0, 0, 0)
+	pgrp = int(r0)
+	return
+}
+
+func libc_getpgrp_trampoline()
+
+//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpid_trampoline), 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+func libc_getpid_trampoline()
+
+//go:cgo_import_dynamic libc_getpid getpid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getppid_trampoline), 0, 0, 0)
+	ppid = int(r0)
+	return
+}
+
+func libc_getppid_trampoline()
+
+//go:cgo_import_dynamic libc_getppid getppid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0)
+	prio = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getpriority_trampoline()
+
+//go:cgo_import_dynamic libc_getpriority getpriority "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(which int, lim *Rlimit) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getrlimit_trampoline()
+
+//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getrusage_trampoline()
+
+//go:cgo_import_dynamic libc_getrusage getrusage "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsid_trampoline), uintptr(pid), 0, 0)
+	sid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getsid_trampoline()
+
+//go:cgo_import_dynamic libc_getsid getsid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+	r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getuid_trampoline), 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+func libc_getuid_trampoline()
+
+//go:cgo_import_dynamic libc_getuid getuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Issetugid() (tainted bool) {
+	r0, _, _ := syscall(abi.FuncPCABI0(libc_issetugid_trampoline), 0, 0, 0)
+	tainted = bool(r0 != 0)
+	return
+}
+
+func libc_issetugid_trampoline()
+
+//go:cgo_import_dynamic libc_issetugid issetugid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, signum Signal) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_kill_trampoline), uintptr(pid), uintptr(signum), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_kill_trampoline()
+
+//go:cgo_import_dynamic libc_kill kill "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kqueue() (fd int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_kqueue_trampoline), 0, 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_kqueue_trampoline()
+
+//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_lchown_trampoline()
+
+//go:cgo_import_dynamic libc_lchown lchown "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Link(path string, link string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(link)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_link_trampoline()
+
+//go:cgo_import_dynamic libc_link link "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, backlog int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_listen_trampoline()
+
+//go:cgo_import_dynamic libc_listen listen "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_lstat_trampoline()
+
+//go:cgo_import_dynamic libc_lstat lstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdir(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mkdir_trampoline()
+
+//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifo(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mkfifo_trampoline()
+
+//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mknod_trampoline()
+
+//go:cgo_import_dynamic libc_mknod mknod "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_nanosleep_trampoline), uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_nanosleep_trampoline()
+
+//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_open_trampoline()
+
+//go:cgo_import_dynamic libc_open open "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pathconf(path string, name int) (val int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pathconf_trampoline()
+
+//go:cgo_import_dynamic libc_pathconf pathconf "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pread(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pread_trampoline()
+
+//go:cgo_import_dynamic libc_pread pread "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwrite(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_pwrite_trampoline()
+
+//go:cgo_import_dynamic libc_pwrite pwrite "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_read_trampoline()
+
+//go:cgo_import_dynamic libc_read read "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlink(path string, buf []byte) (n int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(buf) > 0 {
+		_p1 = unsafe.Pointer(&buf[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_readlink_trampoline()
+
+//go:cgo_import_dynamic libc_readlink readlink "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rename(from string, to string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(from)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(to)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_rename_trampoline()
+
+//go:cgo_import_dynamic libc_rename rename "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Revoke(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_revoke_trampoline()
+
+//go:cgo_import_dynamic libc_revoke revoke "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rmdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_rmdir_trampoline()
+
+//go:cgo_import_dynamic libc_rmdir rmdir "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_select_trampoline()
+
+//go:cgo_import_dynamic libc_select select "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setegid(egid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setegid_trampoline), uintptr(egid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setegid_trampoline()
+
+//go:cgo_import_dynamic libc_setegid setegid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seteuid(euid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_seteuid_trampoline), uintptr(euid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_seteuid_trampoline()
+
+//go:cgo_import_dynamic libc_seteuid seteuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setgid(gid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgid_trampoline), uintptr(gid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setgid_trampoline()
+
+//go:cgo_import_dynamic libc_setgid setgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setlogin(name string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setlogin_trampoline()
+
+//go:cgo_import_dynamic libc_setlogin setlogin "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setpgid_trampoline()
+
+//go:cgo_import_dynamic libc_setpgid setpgid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setpriority_trampoline()
+
+//go:cgo_import_dynamic libc_setpriority setpriority "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setregid_trampoline()
+
+//go:cgo_import_dynamic libc_setregid setregid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setreuid_trampoline()
+
+//go:cgo_import_dynamic libc_setreuid setreuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setrlimit(which int, lim *Rlimit) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setrlimit_trampoline()
+
+//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setsid_trampoline), 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setsid_trampoline()
+
+//go:cgo_import_dynamic libc_setsid setsid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tp *Timeval) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_settimeofday_trampoline()
+
+//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setuid(uid int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setuid_trampoline), uintptr(uid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_setuid_trampoline()
+
+//go:cgo_import_dynamic libc_setuid setuid "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_stat_trampoline()
+
+//go:cgo_import_dynamic libc_stat stat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, stat *Statfs_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_statfs_trampoline()
+
+//go:cgo_import_dynamic libc_statfs statfs "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlink(path string, link string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(link)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_symlink_trampoline()
+
+//go:cgo_import_dynamic libc_symlink symlink "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_sync_trampoline), 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sync_trampoline()
+
+//go:cgo_import_dynamic libc_sync sync "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_truncate_trampoline()
+
+//go:cgo_import_dynamic libc_truncate truncate "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(newmask int) (oldmask int) {
+	r0, _, _ := syscall(abi.FuncPCABI0(libc_umask_trampoline), uintptr(newmask), 0, 0)
+	oldmask = int(r0)
+	return
+}
+
+func libc_umask_trampoline()
+
+//go:cgo_import_dynamic libc_umask umask "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlink(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_unlink_trampoline()
+
+//go:cgo_import_dynamic libc_unlink unlink "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_unmount_trampoline()
+
+//go:cgo_import_dynamic libc_unmount unmount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_write_trampoline()
+
+//go:cgo_import_dynamic libc_write write "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n uintptr, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscallX(abi.FuncPCABI0(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+	n = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_writev_trampoline()
+
+//go:cgo_import_dynamic libc_writev writev "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
+	r0, _, e1 := syscall6X(abi.FuncPCABI0(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
+	ret = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_mmap_trampoline()
+
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_munmap_trampoline()
+
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_utimensat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_utimensat_trampoline()
+
+//go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func directSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) (ret uintptr, err error) {
+	r0, _, e1 := syscall6X(abi.FuncPCABI0(libc_syscall_trampoline), uintptr(trap), uintptr(a1), uintptr(a2), uintptr(a3), uintptr(a4), uintptr(a5))
+	ret = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_syscall_trampoline()
+
+//go:cgo_import_dynamic libc_syscall syscall "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+	r0, _, e1 := syscallX(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence))
+	newoffset = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_lseek_trampoline()
+
+//go:cgo_import_dynamic libc_lseek lseek "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getcwd(buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall(abi.FuncPCABI0(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getcwd_trampoline()
+
+//go:cgo_import_dynamic libc_getcwd getcwd "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+	var _p0 unsafe.Pointer
+	if len(mib) > 0 {
+		_p0 = unsafe.Pointer(&mib[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_sysctl_trampoline()
+
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fork() (pid int, err error) {
+	r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_fork_trampoline), 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fork_trampoline()
+
+//go:cgo_import_dynamic libc_fork fork "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func execve(path *byte, argv **byte, envp **byte) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_execve_trampoline()
+
+//go:cgo_import_dynamic libc_execve execve "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exit(res int) (err error) {
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), uintptr(res), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_exit_trampoline()
+
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+//go:nosplit
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_ptrace_trampoline()
+
+//go:cgo_import_dynamic libc_ptrace ptrace "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getentropy(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getentropy_trampoline), uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_getentropy_trampoline()
+
+//go:cgo_import_dynamic libc_getentropy getentropy "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall6(abi.FuncPCABI0(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_fstatat_trampoline()
+
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(fd int, path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := syscall(abi.FuncPCABI0(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_unlinkat_trampoline()
+
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := syscall6(abi.FuncPCABI0(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0)
+	fdret = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func libc_openat_trampoline()
+
+//go:cgo_import_dynamic libc_openat openat "libc.so"
diff --git a/src/syscall/zsyscall_openbsd_riscv64.s b/src/syscall/zsyscall_openbsd_riscv64.s
new file mode 100644
index 0000000..4f787ee
--- /dev/null
+++ b/src/syscall/zsyscall_openbsd_riscv64.s
@@ -0,0 +1,239 @@
+// go run mkasm.go openbsd riscv64
+// Code generated by the command above; DO NOT EDIT.
+#include "textflag.h"
+TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getgroups(SB)
+TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setgroups(SB)
+TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_wait4(SB)
+TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_accept(SB)
+TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_bind(SB)
+TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_connect(SB)
+TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_socket(SB)
+TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getsockopt(SB)
+TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setsockopt(SB)
+TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getpeername(SB)
+TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getsockname(SB)
+TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_shutdown(SB)
+TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_socketpair(SB)
+TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_recvfrom(SB)
+TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_sendto(SB)
+TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_recvmsg(SB)
+TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_sendmsg(SB)
+TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_kevent(SB)
+TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_utimes(SB)
+TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_futimes(SB)
+TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fcntl(SB)
+TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ioctl(SB)
+TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_pipe2(SB)
+TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_accept4(SB)
+TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getdents(SB)
+TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_access(SB)
+TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_adjtime(SB)
+TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_chdir(SB)
+TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_chflags(SB)
+TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_chmod(SB)
+TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_chown(SB)
+TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_chroot(SB)
+TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_close(SB)
+TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_dup(SB)
+TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_dup2(SB)
+TEXT ·libc_dup3_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_dup3(SB)
+TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fchdir(SB)
+TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fchflags(SB)
+TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fchmod(SB)
+TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fchown(SB)
+TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_flock(SB)
+TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fpathconf(SB)
+TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fstat(SB)
+TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fstatfs(SB)
+TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fsync(SB)
+TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ftruncate(SB)
+TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getegid(SB)
+TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_geteuid(SB)
+TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getgid(SB)
+TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getpgid(SB)
+TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getpgrp(SB)
+TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getpid(SB)
+TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getppid(SB)
+TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getpriority(SB)
+TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getrlimit(SB)
+TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getrusage(SB)
+TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getsid(SB)
+TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_gettimeofday(SB)
+TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getuid(SB)
+TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_issetugid(SB)
+TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_kill(SB)
+TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_kqueue(SB)
+TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_lchown(SB)
+TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_link(SB)
+TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_listen(SB)
+TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_lstat(SB)
+TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_mkdir(SB)
+TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_mkfifo(SB)
+TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_mknod(SB)
+TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_nanosleep(SB)
+TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_open(SB)
+TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_pathconf(SB)
+TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_pread(SB)
+TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_pwrite(SB)
+TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_read(SB)
+TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_readlink(SB)
+TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_rename(SB)
+TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_revoke(SB)
+TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_rmdir(SB)
+TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_select(SB)
+TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setegid(SB)
+TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_seteuid(SB)
+TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setgid(SB)
+TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setlogin(SB)
+TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setpgid(SB)
+TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setpriority(SB)
+TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setregid(SB)
+TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setreuid(SB)
+TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setrlimit(SB)
+TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setsid(SB)
+TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_settimeofday(SB)
+TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_setuid(SB)
+TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_stat(SB)
+TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_statfs(SB)
+TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_symlink(SB)
+TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_sync(SB)
+TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_truncate(SB)
+TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_umask(SB)
+TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_unlink(SB)
+TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_unmount(SB)
+TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_write(SB)
+TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_writev(SB)
+TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_mmap(SB)
+TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_munmap(SB)
+TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getfsstat(SB)
+TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_utimensat(SB)
+TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_syscall(SB)
+TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_lseek(SB)
+TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getcwd(SB)
+TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_sysctl(SB)
+TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fork(SB)
+TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_execve(SB)
+TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_exit(SB)
+TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_ptrace(SB)
+TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_getentropy(SB)
+TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_fstatat(SB)
+TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_unlinkat(SB)
+TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
+	JMP	libc_openat(SB)
diff --git a/src/syscall/zsyscall_solaris_amd64.go b/src/syscall/zsyscall_solaris_amd64.go
index e25db63..5f05db9 100644
--- a/src/syscall/zsyscall_solaris_amd64.go
+++ b/src/syscall/zsyscall_solaris_amd64.go
@@ -64,7 +64,7 @@
 //go:cgo_import_dynamic libc_Setpriority setpriority "libc.so"
 //go:cgo_import_dynamic libc_Setregid setregid "libc.so"
 //go:cgo_import_dynamic libc_Setreuid setreuid "libc.so"
-//go:cgo_import_dynamic libc_Setrlimit setrlimit "libc.so"
+//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so"
 //go:cgo_import_dynamic libc_Setsid setsid "libc.so"
 //go:cgo_import_dynamic libc_Setuid setuid "libc.so"
 //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so"
@@ -152,7 +152,7 @@
 //go:linkname libc_Setpriority libc_Setpriority
 //go:linkname libc_Setregid libc_Setregid
 //go:linkname libc_Setreuid libc_Setreuid
-//go:linkname libc_Setrlimit libc_Setrlimit
+//go:linkname libc_setrlimit libc_setrlimit
 //go:linkname libc_Setsid libc_Setsid
 //go:linkname libc_Setuid libc_Setuid
 //go:linkname libc_shutdown libc_shutdown
@@ -243,7 +243,7 @@
 	libc_Setpriority,
 	libc_Setregid,
 	libc_Setreuid,
-	libc_Setrlimit,
+	libc_setrlimit,
 	libc_Setsid,
 	libc_Setuid,
 	libc_shutdown,
@@ -963,7 +963,7 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func setrlimit(which int, lim *Rlimit) (err error) {
-	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
+	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/src/syscall/zsyscall_windows.go b/src/syscall/zsyscall_windows.go
index 68c29d8..6302708 100644
--- a/src/syscall/zsyscall_windows.go
+++ b/src/syscall/zsyscall_windows.go
@@ -858,11 +858,8 @@
 	return
 }
 
-func GetStartupInfo(startupInfo *StartupInfo) (err error) {
-	r1, _, e1 := Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
-	if r1 == 0 {
-		err = errnoErr(e1)
-	}
+func getStartupInfo(startupInfo *StartupInfo) {
+	Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
 	return
 }
 
diff --git a/src/syscall/zsysctl_openbsd.go b/src/syscall/zsysctl_openbsd.go
index 045930d..4ef15a3 100644
--- a/src/syscall/zsysctl_openbsd.go
+++ b/src/syscall/zsysctl_openbsd.go
@@ -14,6 +14,7 @@
 	{"ddb.max_line", []_C_int{9, 3}},
 	{"ddb.max_width", []_C_int{9, 2}},
 	{"ddb.panic", []_C_int{9, 5}},
+	{"ddb.profile", []_C_int{9, 9}},
 	{"ddb.radix", []_C_int{9, 1}},
 	{"ddb.tab_stop_width", []_C_int{9, 4}},
 	{"ddb.trigger", []_C_int{9, 8}},
@@ -28,30 +29,39 @@
 	{"hw.model", []_C_int{6, 2}},
 	{"hw.ncpu", []_C_int{6, 3}},
 	{"hw.ncpufound", []_C_int{6, 21}},
+	{"hw.ncpuonline", []_C_int{6, 25}},
 	{"hw.pagesize", []_C_int{6, 7}},
+	{"hw.perfpolicy", []_C_int{6, 23}},
 	{"hw.physmem", []_C_int{6, 19}},
+	{"hw.power", []_C_int{6, 26}},
 	{"hw.product", []_C_int{6, 15}},
 	{"hw.serialno", []_C_int{6, 17}},
 	{"hw.setperf", []_C_int{6, 13}},
+	{"hw.smt", []_C_int{6, 24}},
 	{"hw.usermem", []_C_int{6, 20}},
 	{"hw.uuid", []_C_int{6, 18}},
 	{"hw.vendor", []_C_int{6, 14}},
 	{"hw.version", []_C_int{6, 16}},
-	{"kern.arandom", []_C_int{1, 37}},
+	{"kern.allowdt", []_C_int{1, 65}},
+	{"kern.allowkmem", []_C_int{1, 52}},
 	{"kern.argmax", []_C_int{1, 8}},
+	{"kern.audio", []_C_int{1, 84}},
 	{"kern.boottime", []_C_int{1, 21}},
 	{"kern.bufcachepercent", []_C_int{1, 72}},
 	{"kern.ccpu", []_C_int{1, 45}},
 	{"kern.clockrate", []_C_int{1, 12}},
+	{"kern.consbuf", []_C_int{1, 83}},
+	{"kern.consbufsize", []_C_int{1, 82}},
 	{"kern.consdev", []_C_int{1, 75}},
 	{"kern.cp_time", []_C_int{1, 40}},
 	{"kern.cp_time2", []_C_int{1, 71}},
-	{"kern.cryptodevallowsoft", []_C_int{1, 53}},
+	{"kern.cpustats", []_C_int{1, 85}},
 	{"kern.domainname", []_C_int{1, 22}},
 	{"kern.file", []_C_int{1, 73}},
 	{"kern.forkstat", []_C_int{1, 42}},
 	{"kern.fscale", []_C_int{1, 46}},
 	{"kern.fsync", []_C_int{1, 33}},
+	{"kern.global_ptrace", []_C_int{1, 81}},
 	{"kern.hostid", []_C_int{1, 11}},
 	{"kern.hostname", []_C_int{1, 10}},
 	{"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}},
@@ -74,17 +84,16 @@
 	{"kern.ngroups", []_C_int{1, 18}},
 	{"kern.nosuidcoredump", []_C_int{1, 32}},
 	{"kern.nprocs", []_C_int{1, 47}},
-	{"kern.nselcoll", []_C_int{1, 43}},
 	{"kern.nthreads", []_C_int{1, 26}},
 	{"kern.numvnodes", []_C_int{1, 58}},
 	{"kern.osrelease", []_C_int{1, 2}},
 	{"kern.osrevision", []_C_int{1, 3}},
 	{"kern.ostype", []_C_int{1, 1}},
 	{"kern.osversion", []_C_int{1, 27}},
+	{"kern.pfstatus", []_C_int{1, 86}},
 	{"kern.pool_debug", []_C_int{1, 77}},
 	{"kern.posix1version", []_C_int{1, 17}},
 	{"kern.proc", []_C_int{1, 66}},
-	{"kern.random", []_C_int{1, 31}},
 	{"kern.rawpartition", []_C_int{1, 24}},
 	{"kern.saved_ids", []_C_int{1, 20}},
 	{"kern.securelevel", []_C_int{1, 9}},
@@ -102,21 +111,20 @@
 	{"kern.timecounter.hardware", []_C_int{1, 69, 3}},
 	{"kern.timecounter.tick", []_C_int{1, 69, 1}},
 	{"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
-	{"kern.tty.maxptys", []_C_int{1, 44, 6}},
-	{"kern.tty.nptys", []_C_int{1, 44, 7}},
+	{"kern.timeout_stats", []_C_int{1, 87}},
 	{"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
 	{"kern.tty.tk_nin", []_C_int{1, 44, 1}},
 	{"kern.tty.tk_nout", []_C_int{1, 44, 2}},
 	{"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
 	{"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
 	{"kern.ttycount", []_C_int{1, 57}},
-	{"kern.userasymcrypto", []_C_int{1, 60}},
-	{"kern.usercrypto", []_C_int{1, 52}},
-	{"kern.usermount", []_C_int{1, 30}},
+	{"kern.utc_offset", []_C_int{1, 88}},
 	{"kern.version", []_C_int{1, 4}},
-	{"kern.vnode", []_C_int{1, 13}},
+	{"kern.video", []_C_int{1, 89}},
 	{"kern.watchdog.auto", []_C_int{1, 64, 2}},
 	{"kern.watchdog.period", []_C_int{1, 64, 1}},
+	{"kern.witnesswatch", []_C_int{1, 53}},
+	{"kern.wxabort", []_C_int{1, 74}},
 	{"net.bpf.bufsize", []_C_int{4, 31, 1}},
 	{"net.bpf.maxbufsize", []_C_int{4, 31, 2}},
 	{"net.inet.ah.enable", []_C_int{4, 2, 51, 1}},
@@ -144,7 +152,9 @@
 	{"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}},
 	{"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}},
 	{"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}},
+	{"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}},
 	{"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}},
+	{"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}},
 	{"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}},
 	{"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}},
 	{"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}},
@@ -153,8 +163,10 @@
 	{"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}},
 	{"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}},
 	{"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}},
+	{"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}},
 	{"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}},
 	{"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}},
+	{"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}},
 	{"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}},
 	{"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}},
 	{"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}},
@@ -171,9 +183,7 @@
 	{"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
 	{"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
 	{"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
-	{"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}},
 	{"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
-	{"net.inet.pim.stats", []_C_int{4, 2, 103, 1}},
 	{"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
 	{"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
 	{"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}},
@@ -187,6 +197,7 @@
 	{"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}},
 	{"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}},
 	{"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}},
+	{"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}},
 	{"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}},
 	{"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}},
 	{"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}},
@@ -194,9 +205,12 @@
 	{"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}},
 	{"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}},
 	{"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}},
+	{"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}},
+	{"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}},
 	{"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}},
 	{"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}},
 	{"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}},
+	{"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}},
 	{"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}},
 	{"net.inet.udp.stats", []_C_int{4, 2, 17, 5}},
 	{"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}},
@@ -209,13 +223,8 @@
 	{"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}},
 	{"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}},
 	{"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}},
-	{"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}},
 	{"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}},
-	{"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}},
-	{"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}},
-	{"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}},
 	{"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}},
-	{"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}},
 	{"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}},
 	{"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}},
 	{"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}},
@@ -228,20 +237,19 @@
 	{"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}},
 	{"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}},
 	{"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}},
-	{"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}},
-	{"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}},
 	{"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}},
+	{"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}},
+	{"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}},
 	{"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}},
 	{"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}},
 	{"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}},
 	{"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}},
 	{"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}},
 	{"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}},
-	{"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}},
+	{"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}},
 	{"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}},
 	{"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}},
 	{"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}},
-	{"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}},
 	{"net.key.sadb_dump", []_C_int{4, 30, 1}},
 	{"net.key.spd_dump", []_C_int{4, 30, 2}},
 	{"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}},
@@ -250,21 +258,7 @@
 	{"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
 	{"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
 	{"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
-	{"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}},
 	{"net.mpls.ttl", []_C_int{4, 33, 2}},
 	{"net.pflow.stats", []_C_int{4, 34, 1}},
 	{"net.pipex.enable", []_C_int{4, 35, 1}},
-	{"vm.anonmin", []_C_int{2, 7}},
-	{"vm.loadavg", []_C_int{2, 2}},
-	{"vm.maxslp", []_C_int{2, 10}},
-	{"vm.nkmempages", []_C_int{2, 6}},
-	{"vm.psstrings", []_C_int{2, 3}},
-	{"vm.swapencrypt.enable", []_C_int{2, 5, 0}},
-	{"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}},
-	{"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}},
-	{"vm.uspace", []_C_int{2, 11}},
-	{"vm.uvmexp", []_C_int{2, 4}},
-	{"vm.vmmeter", []_C_int{2, 1}},
-	{"vm.vnodemin", []_C_int{2, 9}},
-	{"vm.vtextmin", []_C_int{2, 8}},
 }
diff --git a/src/syscall/zsysnum_openbsd_ppc64.go b/src/syscall/zsysnum_openbsd_ppc64.go
new file mode 100644
index 0000000..d92f1a1
--- /dev/null
+++ b/src/syscall/zsysnum_openbsd_ppc64.go
@@ -0,0 +1,230 @@
+// mksysnum_openbsd.pl
+// Code generated by the command above; DO NOT EDIT.
+
+package syscall
+
+const (
+	SYS_EXIT           = 1   // { void sys_exit(int rval); }
+	SYS_FORK           = 2   // { int sys_fork(void); }
+	SYS_READ           = 3   // { ssize_t sys_read(int fd, void *buf, size_t nbyte); }
+	SYS_WRITE          = 4   // { ssize_t sys_write(int fd, const void *buf, \
+	SYS_OPEN           = 5   // { int sys_open(const char *path, \
+	SYS_CLOSE          = 6   // { int sys_close(int fd); }
+	SYS_GETENTROPY     = 7   // { int sys_getentropy(void *buf, size_t nbyte); }
+	SYS___TFORK        = 8   // { int sys___tfork(const struct __tfork *param, \
+	SYS_LINK           = 9   // { int sys_link(const char *path, const char *link); }
+	SYS_UNLINK         = 10  // { int sys_unlink(const char *path); }
+	SYS_WAIT4          = 11  // { pid_t sys_wait4(pid_t pid, int *status, \
+	SYS_CHDIR          = 12  // { int sys_chdir(const char *path); }
+	SYS_FCHDIR         = 13  // { int sys_fchdir(int fd); }
+	SYS_MKNOD          = 14  // { int sys_mknod(const char *path, mode_t mode, \
+	SYS_CHMOD          = 15  // { int sys_chmod(const char *path, mode_t mode); }
+	SYS_CHOWN          = 16  // { int sys_chown(const char *path, uid_t uid, \
+	SYS_OBREAK         = 17  // { int sys_obreak(char *nsize); } break
+	SYS_GETDTABLECOUNT = 18  // { int sys_getdtablecount(void); }
+	SYS_GETRUSAGE      = 19  // { int sys_getrusage(int who, \
+	SYS_GETPID         = 20  // { pid_t sys_getpid(void); }
+	SYS_MOUNT          = 21  // { int sys_mount(const char *type, const char *path, \
+	SYS_UNMOUNT        = 22  // { int sys_unmount(const char *path, int flags); }
+	SYS_SETUID         = 23  // { int sys_setuid(uid_t uid); }
+	SYS_GETUID         = 24  // { uid_t sys_getuid(void); }
+	SYS_GETEUID        = 25  // { uid_t sys_geteuid(void); }
+	SYS_PTRACE         = 26  // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \
+	SYS_RECVMSG        = 27  // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \
+	SYS_SENDMSG        = 28  // { ssize_t sys_sendmsg(int s, \
+	SYS_RECVFROM       = 29  // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \
+	SYS_ACCEPT         = 30  // { int sys_accept(int s, struct sockaddr *name, \
+	SYS_GETPEERNAME    = 31  // { int sys_getpeername(int fdes, struct sockaddr *asa, \
+	SYS_GETSOCKNAME    = 32  // { int sys_getsockname(int fdes, struct sockaddr *asa, \
+	SYS_ACCESS         = 33  // { int sys_access(const char *path, int amode); }
+	SYS_CHFLAGS        = 34  // { int sys_chflags(const char *path, u_int flags); }
+	SYS_FCHFLAGS       = 35  // { int sys_fchflags(int fd, u_int flags); }
+	SYS_SYNC           = 36  // { void sys_sync(void); }
+	SYS_MSYSCALL       = 37  // { int sys_msyscall(void *addr, size_t len); }
+	SYS_STAT           = 38  // { int sys_stat(const char *path, struct stat *ub); }
+	SYS_GETPPID        = 39  // { pid_t sys_getppid(void); }
+	SYS_LSTAT          = 40  // { int sys_lstat(const char *path, struct stat *ub); }
+	SYS_DUP            = 41  // { int sys_dup(int fd); }
+	SYS_FSTATAT        = 42  // { int sys_fstatat(int fd, const char *path, \
+	SYS_GETEGID        = 43  // { gid_t sys_getegid(void); }
+	SYS_PROFIL         = 44  // { int sys_profil(caddr_t samples, size_t size, \
+	SYS_KTRACE         = 45  // { int sys_ktrace(const char *fname, int ops, \
+	SYS_SIGACTION      = 46  // { int sys_sigaction(int signum, \
+	SYS_GETGID         = 47  // { gid_t sys_getgid(void); }
+	SYS_SIGPROCMASK    = 48  // { int sys_sigprocmask(int how, sigset_t mask); }
+	SYS_MMAP           = 49  // { void *sys_mmap(void *addr, size_t len, int prot, \
+	SYS_SETLOGIN       = 50  // { int sys_setlogin(const char *namebuf); }
+	SYS_ACCT           = 51  // { int sys_acct(const char *path); }
+	SYS_SIGPENDING     = 52  // { int sys_sigpending(void); }
+	SYS_FSTAT          = 53  // { int sys_fstat(int fd, struct stat *sb); }
+	SYS_IOCTL          = 54  // { int sys_ioctl(int fd, \
+	SYS_REBOOT         = 55  // { int sys_reboot(int opt); }
+	SYS_REVOKE         = 56  // { int sys_revoke(const char *path); }
+	SYS_SYMLINK        = 57  // { int sys_symlink(const char *path, \
+	SYS_READLINK       = 58  // { ssize_t sys_readlink(const char *path, \
+	SYS_EXECVE         = 59  // { int sys_execve(const char *path, \
+	SYS_UMASK          = 60  // { mode_t sys_umask(mode_t newmask); }
+	SYS_CHROOT         = 61  // { int sys_chroot(const char *path); }
+	SYS_GETFSSTAT      = 62  // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \
+	SYS_STATFS         = 63  // { int sys_statfs(const char *path, \
+	SYS_FSTATFS        = 64  // { int sys_fstatfs(int fd, struct statfs *buf); }
+	SYS_FHSTATFS       = 65  // { int sys_fhstatfs(const fhandle_t *fhp, \
+	SYS_VFORK          = 66  // { int sys_vfork(void); }
+	SYS_GETTIMEOFDAY   = 67  // { int sys_gettimeofday(struct timeval *tp, \
+	SYS_SETTIMEOFDAY   = 68  // { int sys_settimeofday(const struct timeval *tv, \
+	SYS_SETITIMER      = 69  // { int sys_setitimer(int which, \
+	SYS_GETITIMER      = 70  // { int sys_getitimer(int which, \
+	SYS_SELECT         = 71  // { int sys_select(int nd, fd_set *in, fd_set *ou, \
+	SYS_KEVENT         = 72  // { int sys_kevent(int fd, \
+	SYS_MUNMAP         = 73  // { int sys_munmap(void *addr, size_t len); }
+	SYS_MPROTECT       = 74  // { int sys_mprotect(void *addr, size_t len, \
+	SYS_MADVISE        = 75  // { int sys_madvise(void *addr, size_t len, \
+	SYS_UTIMES         = 76  // { int sys_utimes(const char *path, \
+	SYS_FUTIMES        = 77  // { int sys_futimes(int fd, \
+	SYS_MQUERY         = 78  // { void *sys_mquery(void *addr, size_t len, int prot, \
+	SYS_GETGROUPS      = 79  // { int sys_getgroups(int gidsetsize, \
+	SYS_SETGROUPS      = 80  // { int sys_setgroups(int gidsetsize, \
+	SYS_GETPGRP        = 81  // { int sys_getpgrp(void); }
+	SYS_SETPGID        = 82  // { int sys_setpgid(pid_t pid, pid_t pgid); }
+	SYS_FUTEX          = 83  // { int sys_futex(uint32_t *f, int op, int val, \
+	SYS_UTIMENSAT      = 84  // { int sys_utimensat(int fd, const char *path, \
+	SYS_FUTIMENS       = 85  // { int sys_futimens(int fd, \
+	SYS_KBIND          = 86  // { int sys_kbind(const struct __kbind *param, \
+	SYS_CLOCK_GETTIME  = 87  // { int sys_clock_gettime(clockid_t clock_id, \
+	SYS_CLOCK_SETTIME  = 88  // { int sys_clock_settime(clockid_t clock_id, \
+	SYS_CLOCK_GETRES   = 89  // { int sys_clock_getres(clockid_t clock_id, \
+	SYS_DUP2           = 90  // { int sys_dup2(int from, int to); }
+	SYS_NANOSLEEP      = 91  // { int sys_nanosleep(const struct timespec *rqtp, \
+	SYS_FCNTL          = 92  // { int sys_fcntl(int fd, int cmd, ... void *arg); }
+	SYS_ACCEPT4        = 93  // { int sys_accept4(int s, struct sockaddr *name, \
+	SYS___THRSLEEP     = 94  // { int sys___thrsleep(const volatile void *ident, \
+	SYS_FSYNC          = 95  // { int sys_fsync(int fd); }
+	SYS_SETPRIORITY    = 96  // { int sys_setpriority(int which, id_t who, int prio); }
+	SYS_SOCKET         = 97  // { int sys_socket(int domain, int type, int protocol); }
+	SYS_CONNECT        = 98  // { int sys_connect(int s, const struct sockaddr *name, \
+	SYS_GETDENTS       = 99  // { int sys_getdents(int fd, void *buf, size_t buflen); }
+	SYS_GETPRIORITY    = 100 // { int sys_getpriority(int which, id_t who); }
+	SYS_PIPE2          = 101 // { int sys_pipe2(int *fdp, int flags); }
+	SYS_DUP3           = 102 // { int sys_dup3(int from, int to, int flags); }
+	SYS_SIGRETURN      = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); }
+	SYS_BIND           = 104 // { int sys_bind(int s, const struct sockaddr *name, \
+	SYS_SETSOCKOPT     = 105 // { int sys_setsockopt(int s, int level, int name, \
+	SYS_LISTEN         = 106 // { int sys_listen(int s, int backlog); }
+	SYS_CHFLAGSAT      = 107 // { int sys_chflagsat(int fd, const char *path, \
+	SYS_PLEDGE         = 108 // { int sys_pledge(const char *promises, \
+	SYS_PPOLL          = 109 // { int sys_ppoll(struct pollfd *fds, \
+	SYS_PSELECT        = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \
+	SYS_SIGSUSPEND     = 111 // { int sys_sigsuspend(int mask); }
+	SYS_SENDSYSLOG     = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, \
+	SYS_UNVEIL         = 114 // { int sys_unveil(const char *path, \
+	SYS___REALPATH     = 115 // { int sys___realpath(const char *pathname, \
+	SYS_RECVMMSG       = 116 // { int sys_recvmmsg(int s, struct mmsghdr *mmsg, \
+	SYS_SENDMMSG       = 117 // { int sys_sendmmsg(int s,  struct mmsghdr *mmsg,\
+	SYS_GETSOCKOPT     = 118 // { int sys_getsockopt(int s, int level, int name, \
+	SYS_THRKILL        = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); }
+	SYS_READV          = 120 // { ssize_t sys_readv(int fd, \
+	SYS_WRITEV         = 121 // { ssize_t sys_writev(int fd, \
+	SYS_KILL           = 122 // { int sys_kill(int pid, int signum); }
+	SYS_FCHOWN         = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); }
+	SYS_FCHMOD         = 124 // { int sys_fchmod(int fd, mode_t mode); }
+	SYS_SETREUID       = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); }
+	SYS_SETREGID       = 127 // { int sys_setregid(gid_t rgid, gid_t egid); }
+	SYS_RENAME         = 128 // { int sys_rename(const char *from, const char *to); }
+	SYS_FLOCK          = 131 // { int sys_flock(int fd, int how); }
+	SYS_MKFIFO         = 132 // { int sys_mkfifo(const char *path, mode_t mode); }
+	SYS_SENDTO         = 133 // { ssize_t sys_sendto(int s, const void *buf, \
+	SYS_SHUTDOWN       = 134 // { int sys_shutdown(int s, int how); }
+	SYS_SOCKETPAIR     = 135 // { int sys_socketpair(int domain, int type, \
+	SYS_MKDIR          = 136 // { int sys_mkdir(const char *path, mode_t mode); }
+	SYS_RMDIR          = 137 // { int sys_rmdir(const char *path); }
+	SYS_ADJTIME        = 140 // { int sys_adjtime(const struct timeval *delta, \
+	SYS_GETLOGIN_R     = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); }
+	SYS_SETSID         = 147 // { int sys_setsid(void); }
+	SYS_QUOTACTL       = 148 // { int sys_quotactl(const char *path, int cmd, \
+	SYS_YPCONNECT      = 150 // { int sys_ypconnect(int type); }
+	SYS_NFSSVC         = 155 // { int sys_nfssvc(int flag, void *argp); }
+	SYS_GETFH          = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); }
+	SYS___TMPFD        = 164 // { int sys___tmpfd(int flags); }
+	SYS_SYSARCH        = 165 // { int sys_sysarch(int op, void *parms); }
+	SYS_LSEEK          = 166 // { off_t sys_lseek(int fd, off_t offset, int whence); }
+	SYS_TRUNCATE       = 167 // { int sys_truncate(const char *path, off_t length); }
+	SYS_FTRUNCATE      = 168 // { int sys_ftruncate(int fd, off_t length); }
+	SYS_PREAD          = 169 // { ssize_t sys_pread(int fd, void *buf, \
+	SYS_PWRITE         = 170 // { ssize_t sys_pwrite(int fd, const void *buf, \
+	SYS_PREADV         = 171 // { ssize_t sys_preadv(int fd, \
+	SYS_PWRITEV        = 172 // { ssize_t sys_pwritev(int fd, \
+	SYS_PAD_PREAD      = 173 // { ssize_t sys_pad_pread(int fd, void *buf, \
+	SYS_PAD_PWRITE     = 174 // { ssize_t sys_pad_pwrite(int fd, const void *buf, \
+	SYS_SETGID         = 181 // { int sys_setgid(gid_t gid); }
+	SYS_SETEGID        = 182 // { int sys_setegid(gid_t egid); }
+	SYS_SETEUID        = 183 // { int sys_seteuid(uid_t euid); }
+	SYS_PATHCONF       = 191 // { long sys_pathconf(const char *path, int name); }
+	SYS_FPATHCONF      = 192 // { long sys_fpathconf(int fd, int name); }
+	SYS_SWAPCTL        = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); }
+	SYS_GETRLIMIT      = 194 // { int sys_getrlimit(int which, \
+	SYS_SETRLIMIT      = 195 // { int sys_setrlimit(int which, \
+	SYS_PAD_MMAP       = 197 // { void *sys_pad_mmap(void *addr, size_t len, int prot, \
+	SYS_PAD_LSEEK      = 199 // { off_t sys_pad_lseek(int fd, int pad, off_t offset, \
+	SYS_PAD_TRUNCATE   = 200 // { int sys_pad_truncate(const char *path, int pad, \
+	SYS_PAD_FTRUNCATE  = 201 // { int sys_pad_ftruncate(int fd, int pad, off_t length); }
+	SYS_SYSCTL         = 202 // { int sys_sysctl(const int *name, u_int namelen, \
+	SYS_MLOCK          = 203 // { int sys_mlock(const void *addr, size_t len); }
+	SYS_MUNLOCK        = 204 // { int sys_munlock(const void *addr, size_t len); }
+	SYS_GETPGID        = 207 // { pid_t sys_getpgid(pid_t pid); }
+	SYS_UTRACE         = 209 // { int sys_utrace(const char *label, const void *addr, \
+	SYS_SEMGET         = 221 // { int sys_semget(key_t key, int nsems, int semflg); }
+	SYS_MSGGET         = 225 // { int sys_msgget(key_t key, int msgflg); }
+	SYS_MSGSND         = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \
+	SYS_MSGRCV         = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \
+	SYS_SHMAT          = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \
+	SYS_SHMDT          = 230 // { int sys_shmdt(const void *shmaddr); }
+	SYS_MINHERIT       = 250 // { int sys_minherit(void *addr, size_t len, \
+	SYS_POLL           = 252 // { int sys_poll(struct pollfd *fds, \
+	SYS_ISSETUGID      = 253 // { int sys_issetugid(void); }
+	SYS_LCHOWN         = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); }
+	SYS_GETSID         = 255 // { pid_t sys_getsid(pid_t pid); }
+	SYS_MSYNC          = 256 // { int sys_msync(void *addr, size_t len, int flags); }
+	SYS_PIPE           = 263 // { int sys_pipe(int *fdp); }
+	SYS_FHOPEN         = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); }
+	SYS_PAD_PREADV     = 267 // { ssize_t sys_pad_preadv(int fd, \
+	SYS_PAD_PWRITEV    = 268 // { ssize_t sys_pad_pwritev(int fd, \
+	SYS_KQUEUE         = 269 // { int sys_kqueue(void); }
+	SYS_MLOCKALL       = 271 // { int sys_mlockall(int flags); }
+	SYS_MUNLOCKALL     = 272 // { int sys_munlockall(void); }
+	SYS_GETRESUID      = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \
+	SYS_SETRESUID      = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \
+	SYS_GETRESGID      = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \
+	SYS_SETRESGID      = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \
+	SYS_PAD_MQUERY     = 286 // { void *sys_pad_mquery(void *addr, size_t len, \
+	SYS_CLOSEFROM      = 287 // { int sys_closefrom(int fd); }
+	SYS_SIGALTSTACK    = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \
+	SYS_SHMGET         = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); }
+	SYS_SEMOP          = 290 // { int sys_semop(int semid, struct sembuf *sops, \
+	SYS_FHSTAT         = 294 // { int sys_fhstat(const fhandle_t *fhp, \
+	SYS___SEMCTL       = 295 // { int sys___semctl(int semid, int semnum, int cmd, \
+	SYS_SHMCTL         = 296 // { int sys_shmctl(int shmid, int cmd, \
+	SYS_MSGCTL         = 297 // { int sys_msgctl(int msqid, int cmd, \
+	SYS_SCHED_YIELD    = 298 // { int sys_sched_yield(void); }
+	SYS_GETTHRID       = 299 // { pid_t sys_getthrid(void); }
+	SYS___THRWAKEUP    = 301 // { int sys___thrwakeup(const volatile void *ident, \
+	SYS___THREXIT      = 302 // { void sys___threxit(pid_t *notdead); }
+	SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \
+	SYS___GETCWD       = 304 // { int sys___getcwd(char *buf, size_t len); }
+	SYS_ADJFREQ        = 305 // { int sys_adjfreq(const int64_t *freq, \
+	SYS_SETRTABLE      = 310 // { int sys_setrtable(int rtableid); }
+	SYS_GETRTABLE      = 311 // { int sys_getrtable(void); }
+	SYS_FACCESSAT      = 313 // { int sys_faccessat(int fd, const char *path, \
+	SYS_FCHMODAT       = 314 // { int sys_fchmodat(int fd, const char *path, \
+	SYS_FCHOWNAT       = 315 // { int sys_fchownat(int fd, const char *path, \
+	SYS_LINKAT         = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \
+	SYS_MKDIRAT        = 318 // { int sys_mkdirat(int fd, const char *path, \
+	SYS_MKFIFOAT       = 319 // { int sys_mkfifoat(int fd, const char *path, \
+	SYS_MKNODAT        = 320 // { int sys_mknodat(int fd, const char *path, \
+	SYS_OPENAT         = 321 // { int sys_openat(int fd, const char *path, int flags, \
+	SYS_READLINKAT     = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \
+	SYS_RENAMEAT       = 323 // { int sys_renameat(int fromfd, const char *from, \
+	SYS_SYMLINKAT      = 324 // { int sys_symlinkat(const char *path, int fd, \
+	SYS_UNLINKAT       = 325 // { int sys_unlinkat(int fd, const char *path, \
+	SYS___SET_TCB      = 329 // { void sys___set_tcb(void *tcb); }
+	SYS___GET_TCB      = 330 // { void *sys___get_tcb(void); }
+)
diff --git a/src/syscall/zsysnum_openbsd_riscv64.go b/src/syscall/zsysnum_openbsd_riscv64.go
new file mode 100644
index 0000000..b09b2bd
--- /dev/null
+++ b/src/syscall/zsysnum_openbsd_riscv64.go
@@ -0,0 +1,228 @@
+// mksysnum_openbsd.pl
+// Code generated by the command above; DO NOT EDIT.
+
+package syscall
+
+const (
+	SYS_EXIT           = 1   // { void sys_exit(int rval); }
+	SYS_FORK           = 2   // { int sys_fork(void); }
+	SYS_READ           = 3   // { ssize_t sys_read(int fd, void *buf, size_t nbyte); }
+	SYS_WRITE          = 4   // { ssize_t sys_write(int fd, const void *buf, \
+	SYS_OPEN           = 5   // { int sys_open(const char *path, \
+	SYS_CLOSE          = 6   // { int sys_close(int fd); }
+	SYS_GETENTROPY     = 7   // { int sys_getentropy(void *buf, size_t nbyte); }
+	SYS___TFORK        = 8   // { int sys___tfork(const struct __tfork *param, \
+	SYS_LINK           = 9   // { int sys_link(const char *path, const char *link); }
+	SYS_UNLINK         = 10  // { int sys_unlink(const char *path); }
+	SYS_WAIT4          = 11  // { pid_t sys_wait4(pid_t pid, int *status, \
+	SYS_CHDIR          = 12  // { int sys_chdir(const char *path); }
+	SYS_FCHDIR         = 13  // { int sys_fchdir(int fd); }
+	SYS_MKNOD          = 14  // { int sys_mknod(const char *path, mode_t mode, \
+	SYS_CHMOD          = 15  // { int sys_chmod(const char *path, mode_t mode); }
+	SYS_CHOWN          = 16  // { int sys_chown(const char *path, uid_t uid, \
+	SYS_OBREAK         = 17  // { int sys_obreak(char *nsize); } break
+	SYS_GETDTABLECOUNT = 18  // { int sys_getdtablecount(void); }
+	SYS_GETRUSAGE      = 19  // { int sys_getrusage(int who, \
+	SYS_GETPID         = 20  // { pid_t sys_getpid(void); }
+	SYS_MOUNT          = 21  // { int sys_mount(const char *type, const char *path, \
+	SYS_UNMOUNT        = 22  // { int sys_unmount(const char *path, int flags); }
+	SYS_SETUID         = 23  // { int sys_setuid(uid_t uid); }
+	SYS_GETUID         = 24  // { uid_t sys_getuid(void); }
+	SYS_GETEUID        = 25  // { uid_t sys_geteuid(void); }
+	SYS_PTRACE         = 26  // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \
+	SYS_RECVMSG        = 27  // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \
+	SYS_SENDMSG        = 28  // { ssize_t sys_sendmsg(int s, \
+	SYS_RECVFROM       = 29  // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \
+	SYS_ACCEPT         = 30  // { int sys_accept(int s, struct sockaddr *name, \
+	SYS_GETPEERNAME    = 31  // { int sys_getpeername(int fdes, struct sockaddr *asa, \
+	SYS_GETSOCKNAME    = 32  // { int sys_getsockname(int fdes, struct sockaddr *asa, \
+	SYS_ACCESS         = 33  // { int sys_access(const char *path, int amode); }
+	SYS_CHFLAGS        = 34  // { int sys_chflags(const char *path, u_int flags); }
+	SYS_FCHFLAGS       = 35  // { int sys_fchflags(int fd, u_int flags); }
+	SYS_SYNC           = 36  // { void sys_sync(void); }
+	SYS_MSYSCALL       = 37  // { int sys_msyscall(void *addr, size_t len); }
+	SYS_STAT           = 38  // { int sys_stat(const char *path, struct stat *ub); }
+	SYS_GETPPID        = 39  // { pid_t sys_getppid(void); }
+	SYS_LSTAT          = 40  // { int sys_lstat(const char *path, struct stat *ub); }
+	SYS_DUP            = 41  // { int sys_dup(int fd); }
+	SYS_FSTATAT        = 42  // { int sys_fstatat(int fd, const char *path, \
+	SYS_GETEGID        = 43  // { gid_t sys_getegid(void); }
+	SYS_PROFIL         = 44  // { int sys_profil(caddr_t samples, size_t size, \
+	SYS_KTRACE         = 45  // { int sys_ktrace(const char *fname, int ops, \
+	SYS_SIGACTION      = 46  // { int sys_sigaction(int signum, \
+	SYS_GETGID         = 47  // { gid_t sys_getgid(void); }
+	SYS_SIGPROCMASK    = 48  // { int sys_sigprocmask(int how, sigset_t mask); }
+	SYS_MMAP           = 49  // { void *sys_mmap(void *addr, size_t len, int prot, \
+	SYS_SETLOGIN       = 50  // { int sys_setlogin(const char *namebuf); }
+	SYS_ACCT           = 51  // { int sys_acct(const char *path); }
+	SYS_SIGPENDING     = 52  // { int sys_sigpending(void); }
+	SYS_FSTAT          = 53  // { int sys_fstat(int fd, struct stat *sb); }
+	SYS_IOCTL          = 54  // { int sys_ioctl(int fd, \
+	SYS_REBOOT         = 55  // { int sys_reboot(int opt); }
+	SYS_REVOKE         = 56  // { int sys_revoke(const char *path); }
+	SYS_SYMLINK        = 57  // { int sys_symlink(const char *path, \
+	SYS_READLINK       = 58  // { ssize_t sys_readlink(const char *path, \
+	SYS_EXECVE         = 59  // { int sys_execve(const char *path, \
+	SYS_UMASK          = 60  // { mode_t sys_umask(mode_t newmask); }
+	SYS_CHROOT         = 61  // { int sys_chroot(const char *path); }
+	SYS_GETFSSTAT      = 62  // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \
+	SYS_STATFS         = 63  // { int sys_statfs(const char *path, \
+	SYS_FSTATFS        = 64  // { int sys_fstatfs(int fd, struct statfs *buf); }
+	SYS_FHSTATFS       = 65  // { int sys_fhstatfs(const fhandle_t *fhp, \
+	SYS_VFORK          = 66  // { int sys_vfork(void); }
+	SYS_GETTIMEOFDAY   = 67  // { int sys_gettimeofday(struct timeval *tp, \
+	SYS_SETTIMEOFDAY   = 68  // { int sys_settimeofday(const struct timeval *tv, \
+	SYS_SETITIMER      = 69  // { int sys_setitimer(int which, \
+	SYS_GETITIMER      = 70  // { int sys_getitimer(int which, \
+	SYS_SELECT         = 71  // { int sys_select(int nd, fd_set *in, fd_set *ou, \
+	SYS_KEVENT         = 72  // { int sys_kevent(int fd, \
+	SYS_MUNMAP         = 73  // { int sys_munmap(void *addr, size_t len); }
+	SYS_MPROTECT       = 74  // { int sys_mprotect(void *addr, size_t len, \
+	SYS_MADVISE        = 75  // { int sys_madvise(void *addr, size_t len, \
+	SYS_UTIMES         = 76  // { int sys_utimes(const char *path, \
+	SYS_FUTIMES        = 77  // { int sys_futimes(int fd, \
+	SYS_MQUERY         = 78  // { void *sys_mquery(void *addr, size_t len, int prot, \
+	SYS_GETGROUPS      = 79  // { int sys_getgroups(int gidsetsize, \
+	SYS_SETGROUPS      = 80  // { int sys_setgroups(int gidsetsize, \
+	SYS_GETPGRP        = 81  // { int sys_getpgrp(void); }
+	SYS_SETPGID        = 82  // { int sys_setpgid(pid_t pid, pid_t pgid); }
+	SYS_FUTEX          = 83  // { int sys_futex(uint32_t *f, int op, int val, \
+	SYS_UTIMENSAT      = 84  // { int sys_utimensat(int fd, const char *path, \
+	SYS_FUTIMENS       = 85  // { int sys_futimens(int fd, \
+	SYS_KBIND          = 86  // { int sys_kbind(const struct __kbind *param, \
+	SYS_CLOCK_GETTIME  = 87  // { int sys_clock_gettime(clockid_t clock_id, \
+	SYS_CLOCK_SETTIME  = 88  // { int sys_clock_settime(clockid_t clock_id, \
+	SYS_CLOCK_GETRES   = 89  // { int sys_clock_getres(clockid_t clock_id, \
+	SYS_DUP2           = 90  // { int sys_dup2(int from, int to); }
+	SYS_NANOSLEEP      = 91  // { int sys_nanosleep(const struct timespec *rqtp, \
+	SYS_FCNTL          = 92  // { int sys_fcntl(int fd, int cmd, ... void *arg); }
+	SYS_ACCEPT4        = 93  // { int sys_accept4(int s, struct sockaddr *name, \
+	SYS___THRSLEEP     = 94  // { int sys___thrsleep(const volatile void *ident, \
+	SYS_FSYNC          = 95  // { int sys_fsync(int fd); }
+	SYS_SETPRIORITY    = 96  // { int sys_setpriority(int which, id_t who, int prio); }
+	SYS_SOCKET         = 97  // { int sys_socket(int domain, int type, int protocol); }
+	SYS_CONNECT        = 98  // { int sys_connect(int s, const struct sockaddr *name, \
+	SYS_GETDENTS       = 99  // { int sys_getdents(int fd, void *buf, size_t buflen); }
+	SYS_GETPRIORITY    = 100 // { int sys_getpriority(int which, id_t who); }
+	SYS_PIPE2          = 101 // { int sys_pipe2(int *fdp, int flags); }
+	SYS_DUP3           = 102 // { int sys_dup3(int from, int to, int flags); }
+	SYS_SIGRETURN      = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); }
+	SYS_BIND           = 104 // { int sys_bind(int s, const struct sockaddr *name, \
+	SYS_SETSOCKOPT     = 105 // { int sys_setsockopt(int s, int level, int name, \
+	SYS_LISTEN         = 106 // { int sys_listen(int s, int backlog); }
+	SYS_CHFLAGSAT      = 107 // { int sys_chflagsat(int fd, const char *path, \
+	SYS_PLEDGE         = 108 // { int sys_pledge(const char *promises, \
+	SYS_PPOLL          = 109 // { int sys_ppoll(struct pollfd *fds, \
+	SYS_PSELECT        = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \
+	SYS_SIGSUSPEND     = 111 // { int sys_sigsuspend(int mask); }
+	SYS_SENDSYSLOG     = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, \
+	SYS_UNVEIL         = 114 // { int sys_unveil(const char *path, \
+	SYS___REALPATH     = 115 // { int sys___realpath(const char *pathname, \
+	SYS_GETSOCKOPT     = 118 // { int sys_getsockopt(int s, int level, int name, \
+	SYS_THRKILL        = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); }
+	SYS_READV          = 120 // { ssize_t sys_readv(int fd, \
+	SYS_WRITEV         = 121 // { ssize_t sys_writev(int fd, \
+	SYS_KILL           = 122 // { int sys_kill(int pid, int signum); }
+	SYS_FCHOWN         = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); }
+	SYS_FCHMOD         = 124 // { int sys_fchmod(int fd, mode_t mode); }
+	SYS_SETREUID       = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); }
+	SYS_SETREGID       = 127 // { int sys_setregid(gid_t rgid, gid_t egid); }
+	SYS_RENAME         = 128 // { int sys_rename(const char *from, const char *to); }
+	SYS_FLOCK          = 131 // { int sys_flock(int fd, int how); }
+	SYS_MKFIFO         = 132 // { int sys_mkfifo(const char *path, mode_t mode); }
+	SYS_SENDTO         = 133 // { ssize_t sys_sendto(int s, const void *buf, \
+	SYS_SHUTDOWN       = 134 // { int sys_shutdown(int s, int how); }
+	SYS_SOCKETPAIR     = 135 // { int sys_socketpair(int domain, int type, \
+	SYS_MKDIR          = 136 // { int sys_mkdir(const char *path, mode_t mode); }
+	SYS_RMDIR          = 137 // { int sys_rmdir(const char *path); }
+	SYS_ADJTIME        = 140 // { int sys_adjtime(const struct timeval *delta, \
+	SYS_GETLOGIN_R     = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); }
+	SYS_SETSID         = 147 // { int sys_setsid(void); }
+	SYS_QUOTACTL       = 148 // { int sys_quotactl(const char *path, int cmd, \
+	SYS_YPCONNECT      = 150 // { int sys_ypconnect(int type); }
+	SYS_NFSSVC         = 155 // { int sys_nfssvc(int flag, void *argp); }
+	SYS_GETFH          = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); }
+	SYS___TMPFD        = 164 // { int sys___tmpfd(int flags); }
+	SYS_SYSARCH        = 165 // { int sys_sysarch(int op, void *parms); }
+	SYS_LSEEK          = 166 // { off_t sys_lseek(int fd, off_t offset, int whence); }
+	SYS_TRUNCATE       = 167 // { int sys_truncate(const char *path, off_t length); }
+	SYS_FTRUNCATE      = 168 // { int sys_ftruncate(int fd, off_t length); }
+	SYS_PREAD          = 169 // { ssize_t sys_pread(int fd, void *buf, \
+	SYS_PWRITE         = 170 // { ssize_t sys_pwrite(int fd, const void *buf, \
+	SYS_PREADV         = 171 // { ssize_t sys_preadv(int fd, \
+	SYS_PWRITEV        = 172 // { ssize_t sys_pwritev(int fd, \
+	SYS_PAD_PREAD      = 173 // { ssize_t sys_pad_pread(int fd, void *buf, \
+	SYS_PAD_PWRITE     = 174 // { ssize_t sys_pad_pwrite(int fd, const void *buf, \
+	SYS_SETGID         = 181 // { int sys_setgid(gid_t gid); }
+	SYS_SETEGID        = 182 // { int sys_setegid(gid_t egid); }
+	SYS_SETEUID        = 183 // { int sys_seteuid(uid_t euid); }
+	SYS_PATHCONF       = 191 // { long sys_pathconf(const char *path, int name); }
+	SYS_FPATHCONF      = 192 // { long sys_fpathconf(int fd, int name); }
+	SYS_SWAPCTL        = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); }
+	SYS_GETRLIMIT      = 194 // { int sys_getrlimit(int which, \
+	SYS_SETRLIMIT      = 195 // { int sys_setrlimit(int which, \
+	SYS_PAD_MMAP       = 197 // { void *sys_pad_mmap(void *addr, size_t len, int prot, \
+	SYS_PAD_LSEEK      = 199 // { off_t sys_pad_lseek(int fd, int pad, off_t offset, \
+	SYS_PAD_TRUNCATE   = 200 // { int sys_pad_truncate(const char *path, int pad, \
+	SYS_PAD_FTRUNCATE  = 201 // { int sys_pad_ftruncate(int fd, int pad, off_t length); }
+	SYS_SYSCTL         = 202 // { int sys_sysctl(const int *name, u_int namelen, \
+	SYS_MLOCK          = 203 // { int sys_mlock(const void *addr, size_t len); }
+	SYS_MUNLOCK        = 204 // { int sys_munlock(const void *addr, size_t len); }
+	SYS_GETPGID        = 207 // { pid_t sys_getpgid(pid_t pid); }
+	SYS_UTRACE         = 209 // { int sys_utrace(const char *label, const void *addr, \
+	SYS_SEMGET         = 221 // { int sys_semget(key_t key, int nsems, int semflg); }
+	SYS_MSGGET         = 225 // { int sys_msgget(key_t key, int msgflg); }
+	SYS_MSGSND         = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \
+	SYS_MSGRCV         = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \
+	SYS_SHMAT          = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \
+	SYS_SHMDT          = 230 // { int sys_shmdt(const void *shmaddr); }
+	SYS_MINHERIT       = 250 // { int sys_minherit(void *addr, size_t len, \
+	SYS_POLL           = 252 // { int sys_poll(struct pollfd *fds, \
+	SYS_ISSETUGID      = 253 // { int sys_issetugid(void); }
+	SYS_LCHOWN         = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); }
+	SYS_GETSID         = 255 // { pid_t sys_getsid(pid_t pid); }
+	SYS_MSYNC          = 256 // { int sys_msync(void *addr, size_t len, int flags); }
+	SYS_PIPE           = 263 // { int sys_pipe(int *fdp); }
+	SYS_FHOPEN         = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); }
+	SYS_PAD_PREADV     = 267 // { ssize_t sys_pad_preadv(int fd, \
+	SYS_PAD_PWRITEV    = 268 // { ssize_t sys_pad_pwritev(int fd, \
+	SYS_KQUEUE         = 269 // { int sys_kqueue(void); }
+	SYS_MLOCKALL       = 271 // { int sys_mlockall(int flags); }
+	SYS_MUNLOCKALL     = 272 // { int sys_munlockall(void); }
+	SYS_GETRESUID      = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \
+	SYS_SETRESUID      = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \
+	SYS_GETRESGID      = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \
+	SYS_SETRESGID      = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \
+	SYS_PAD_MQUERY     = 286 // { void *sys_pad_mquery(void *addr, size_t len, \
+	SYS_CLOSEFROM      = 287 // { int sys_closefrom(int fd); }
+	SYS_SIGALTSTACK    = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \
+	SYS_SHMGET         = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); }
+	SYS_SEMOP          = 290 // { int sys_semop(int semid, struct sembuf *sops, \
+	SYS_FHSTAT         = 294 // { int sys_fhstat(const fhandle_t *fhp, \
+	SYS___SEMCTL       = 295 // { int sys___semctl(int semid, int semnum, int cmd, \
+	SYS_SHMCTL         = 296 // { int sys_shmctl(int shmid, int cmd, \
+	SYS_MSGCTL         = 297 // { int sys_msgctl(int msqid, int cmd, \
+	SYS_SCHED_YIELD    = 298 // { int sys_sched_yield(void); }
+	SYS_GETTHRID       = 299 // { pid_t sys_getthrid(void); }
+	SYS___THRWAKEUP    = 301 // { int sys___thrwakeup(const volatile void *ident, \
+	SYS___THREXIT      = 302 // { void sys___threxit(pid_t *notdead); }
+	SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \
+	SYS___GETCWD       = 304 // { int sys___getcwd(char *buf, size_t len); }
+	SYS_ADJFREQ        = 305 // { int sys_adjfreq(const int64_t *freq, \
+	SYS_SETRTABLE      = 310 // { int sys_setrtable(int rtableid); }
+	SYS_GETRTABLE      = 311 // { int sys_getrtable(void); }
+	SYS_FACCESSAT      = 313 // { int sys_faccessat(int fd, const char *path, \
+	SYS_FCHMODAT       = 314 // { int sys_fchmodat(int fd, const char *path, \
+	SYS_FCHOWNAT       = 315 // { int sys_fchownat(int fd, const char *path, \
+	SYS_LINKAT         = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \
+	SYS_MKDIRAT        = 318 // { int sys_mkdirat(int fd, const char *path, \
+	SYS_MKFIFOAT       = 319 // { int sys_mkfifoat(int fd, const char *path, \
+	SYS_MKNODAT        = 320 // { int sys_mknodat(int fd, const char *path, \
+	SYS_OPENAT         = 321 // { int sys_openat(int fd, const char *path, int flags, \
+	SYS_READLINKAT     = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \
+	SYS_RENAMEAT       = 323 // { int sys_renameat(int fromfd, const char *from, \
+	SYS_SYMLINKAT      = 324 // { int sys_symlinkat(const char *path, int fd, \
+	SYS_UNLINKAT       = 325 // { int sys_unlinkat(int fd, const char *path, \
+	SYS___SET_TCB      = 329 // { void sys___set_tcb(void *tcb); }
+	SYS___GET_TCB      = 330 // { void *sys___get_tcb(void); }
+)
diff --git a/src/syscall/ztypes_linux_386.go b/src/syscall/ztypes_linux_386.go
index a45511e..79a46a4 100644
--- a/src/syscall/ztypes_linux_386.go
+++ b/src/syscall/ztypes_linux_386.go
@@ -577,6 +577,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_amd64.go b/src/syscall/ztypes_linux_amd64.go
index 1bab13b..3d223fe 100644
--- a/src/syscall/ztypes_linux_amd64.go
+++ b/src/syscall/ztypes_linux_amd64.go
@@ -595,6 +595,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_arm.go b/src/syscall/ztypes_linux_arm.go
index a4d61bd..9db1142 100644
--- a/src/syscall/ztypes_linux_arm.go
+++ b/src/syscall/ztypes_linux_arm.go
@@ -566,6 +566,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_arm64.go b/src/syscall/ztypes_linux_arm64.go
index 1e469c3..996950f 100644
--- a/src/syscall/ztypes_linux_arm64.go
+++ b/src/syscall/ztypes_linux_arm64.go
@@ -574,6 +574,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_mips.go b/src/syscall/ztypes_linux_mips.go
index 621ef2d..4ce8497 100644
--- a/src/syscall/ztypes_linux_mips.go
+++ b/src/syscall/ztypes_linux_mips.go
@@ -572,6 +572,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_mips64.go b/src/syscall/ztypes_linux_mips64.go
index 75a5bc4..de39e73 100644
--- a/src/syscall/ztypes_linux_mips64.go
+++ b/src/syscall/ztypes_linux_mips64.go
@@ -579,6 +579,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_mips64le.go b/src/syscall/ztypes_linux_mips64le.go
index 75a5bc4..de39e73 100644
--- a/src/syscall/ztypes_linux_mips64le.go
+++ b/src/syscall/ztypes_linux_mips64le.go
@@ -579,6 +579,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_mipsle.go b/src/syscall/ztypes_linux_mipsle.go
index 621ef2d..4ce8497 100644
--- a/src/syscall/ztypes_linux_mipsle.go
+++ b/src/syscall/ztypes_linux_mipsle.go
@@ -572,6 +572,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_ppc64.go b/src/syscall/ztypes_linux_ppc64.go
index c830cee..717e273 100644
--- a/src/syscall/ztypes_linux_ppc64.go
+++ b/src/syscall/ztypes_linux_ppc64.go
@@ -584,6 +584,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_ppc64le.go b/src/syscall/ztypes_linux_ppc64le.go
index 770ddc9..177c1f1 100644
--- a/src/syscall/ztypes_linux_ppc64le.go
+++ b/src/syscall/ztypes_linux_ppc64le.go
@@ -584,6 +584,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_riscv64.go b/src/syscall/ztypes_linux_riscv64.go
index f6b9ced..a6c4d78 100644
--- a/src/syscall/ztypes_linux_riscv64.go
+++ b/src/syscall/ztypes_linux_riscv64.go
@@ -599,6 +599,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_linux_s390x.go b/src/syscall/ztypes_linux_s390x.go
index b67877f..5c5a714 100644
--- a/src/syscall/ztypes_linux_s390x.go
+++ b/src/syscall/ztypes_linux_s390x.go
@@ -598,6 +598,7 @@
 	_AT_REMOVEDIR        = 0x200
 	_AT_SYMLINK_NOFOLLOW = 0x100
 	_AT_EACCESS          = 0x200
+	_AT_EMPTY_PATH       = 0x1000
 )
 
 type pollFd struct {
diff --git a/src/syscall/ztypes_openbsd_ppc64.go b/src/syscall/ztypes_openbsd_ppc64.go
new file mode 100644
index 0000000..3bc5a84
--- /dev/null
+++ b/src/syscall/ztypes_openbsd_ppc64.go
@@ -0,0 +1,446 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -fsigned-char types_openbsd.go
+
+package syscall
+
+const (
+	sizeofPtr      = 0x8
+	sizeofShort    = 0x2
+	sizeofInt      = 0x4
+	sizeofLong     = 0x8
+	sizeofLongLong = 0x8
+)
+
+type (
+	_C_short     int16
+	_C_int       int32
+	_C_long      int64
+	_C_long_long int64
+)
+
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+type Timeval struct {
+	Sec  int64
+	Usec int64
+}
+
+type Rusage struct {
+	Utime    Timeval
+	Stime    Timeval
+	Maxrss   int64
+	Ixrss    int64
+	Idrss    int64
+	Isrss    int64
+	Minflt   int64
+	Majflt   int64
+	Nswap    int64
+	Inblock  int64
+	Oublock  int64
+	Msgsnd   int64
+	Msgrcv   int64
+	Nsignals int64
+	Nvcsw    int64
+	Nivcsw   int64
+}
+
+type Rlimit struct {
+	Cur uint64
+	Max uint64
+}
+
+type _Gid_t uint32
+
+const (
+	S_IFMT   = 0xf000
+	S_IFIFO  = 0x1000
+	S_IFCHR  = 0x2000
+	S_IFDIR  = 0x4000
+	S_IFBLK  = 0x6000
+	S_IFREG  = 0x8000
+	S_IFLNK  = 0xa000
+	S_IFSOCK = 0xc000
+	S_ISUID  = 0x800
+	S_ISGID  = 0x400
+	S_ISVTX  = 0x200
+	S_IRUSR  = 0x100
+	S_IWUSR  = 0x80
+	S_IXUSR  = 0x40
+	S_IRWXG  = 0x38
+	S_IRWXO  = 0x7
+)
+
+type Stat_t struct {
+	Mode           uint32
+	Dev            int32
+	Ino            uint64
+	Nlink          uint32
+	Uid            uint32
+	Gid            uint32
+	Rdev           int32
+	Atim           Timespec
+	Mtim           Timespec
+	Ctim           Timespec
+	Size           int64
+	Blocks         int64
+	Blksize        int32
+	Flags          uint32
+	Gen            uint32
+	X__st_birthtim Timespec
+}
+
+type Statfs_t struct {
+	F_flags       uint32
+	F_bsize       uint32
+	F_iosize      uint32
+	F_blocks      uint64
+	F_bfree       uint64
+	F_bavail      int64
+	F_files       uint64
+	F_ffree       uint64
+	F_favail      int64
+	F_syncwrites  uint64
+	F_syncreads   uint64
+	F_asyncwrites uint64
+	F_asyncreads  uint64
+	F_fsid        Fsid
+	F_namemax     uint32
+	F_owner       uint32
+	F_ctime       uint64
+	F_fstypename  [16]int8
+	F_mntonname   [90]int8
+	F_mntfromname [90]int8
+	F_mntfromspec [90]int8
+	Pad_cgo_0     [2]byte
+	Mount_info    [160]byte
+}
+
+type Flock_t struct {
+	Start  int64
+	Len    int64
+	Pid    int32
+	Type   int16
+	Whence int16
+}
+
+type Dirent struct {
+	Fileno       uint64
+	Off          int64
+	Reclen       uint16
+	Type         uint8
+	Namlen       uint8
+	X__d_padding [4]uint8
+	Name         [256]int8
+}
+
+type Fsid struct {
+	Val [2]int32
+}
+
+const (
+	pathMax = 0x400
+)
+
+type RawSockaddrInet4 struct {
+	Len    uint8
+	Family uint8
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type RawSockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+	Len    uint8
+	Family uint8
+	Path   [104]int8
+}
+
+type RawSockaddrDatalink struct {
+	Len    uint8
+	Family uint8
+	Index  uint16
+	Type   uint8
+	Nlen   uint8
+	Alen   uint8
+	Slen   uint8
+	Data   [24]int8
+}
+
+type RawSockaddr struct {
+	Len    uint8
+	Family uint8
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [92]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type Iovec struct {
+	Base *byte
+	Len  uint64
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type Msghdr struct {
+	Name       *byte
+	Namelen    uint32
+	Iov        *Iovec
+	Iovlen     uint32
+	Control    *byte
+	Controllen uint32
+	Flags      int32
+}
+
+type Cmsghdr struct {
+	Len   uint32
+	Level int32
+	Type  int32
+}
+
+type Inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+	Addr RawSockaddrInet6
+	Mtu  uint32
+}
+
+type ICMPv6Filter struct {
+	Filt [8]uint32
+}
+
+const (
+	SizeofSockaddrInet4    = 0x10
+	SizeofSockaddrInet6    = 0x1c
+	SizeofSockaddrAny      = 0x6c
+	SizeofSockaddrUnix     = 0x6a
+	SizeofSockaddrDatalink = 0x20
+	SizeofLinger           = 0x8
+	SizeofIPMreq           = 0x8
+	SizeofIPv6Mreq         = 0x14
+	SizeofMsghdr           = 0x30
+	SizeofCmsghdr          = 0xc
+	SizeofInet6Pktinfo     = 0x14
+	SizeofIPv6MTUInfo      = 0x20
+	SizeofICMPv6Filter     = 0x20
+)
+
+const (
+	PTRACE_TRACEME = 0x0
+	PTRACE_CONT    = 0x7
+	PTRACE_KILL    = 0x8
+)
+
+type Kevent_t struct {
+	Ident  uint64
+	Filter int16
+	Flags  uint16
+	Fflags uint32
+	Data   int64
+	Udata  *byte
+}
+
+type FdSet struct {
+	Bits [32]uint32
+}
+
+const (
+	SizeofIfMsghdr         = 0xa8
+	SizeofIfData           = 0x90
+	SizeofIfaMsghdr        = 0x18
+	SizeofIfAnnounceMsghdr = 0x1a
+	SizeofRtMsghdr         = 0x60
+	SizeofRtMetrics        = 0x38
+)
+
+type IfMsghdr struct {
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Hdrlen  uint16
+	Index   uint16
+	Tableid uint16
+	Pad1    uint8
+	Pad2    uint8
+	Addrs   int32
+	Flags   int32
+	Xflags  int32
+	Data    IfData
+}
+
+type IfData struct {
+	Type         uint8
+	Addrlen      uint8
+	Hdrlen       uint8
+	Link_state   uint8
+	Mtu          uint32
+	Metric       uint32
+	Rdomain      uint32
+	Baudrate     uint64
+	Ipackets     uint64
+	Ierrors      uint64
+	Opackets     uint64
+	Oerrors      uint64
+	Collisions   uint64
+	Ibytes       uint64
+	Obytes       uint64
+	Imcasts      uint64
+	Omcasts      uint64
+	Iqdrops      uint64
+	Oqdrops      uint64
+	Noproto      uint64
+	Capabilities uint32
+	Lastchange   Timeval
+}
+
+type IfaMsghdr struct {
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Hdrlen  uint16
+	Index   uint16
+	Tableid uint16
+	Pad1    uint8
+	Pad2    uint8
+	Addrs   int32
+	Flags   int32
+	Metric  int32
+}
+
+type IfAnnounceMsghdr struct {
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Hdrlen  uint16
+	Index   uint16
+	What    uint16
+	Name    [16]int8
+}
+
+type RtMsghdr struct {
+	Msglen   uint16
+	Version  uint8
+	Type     uint8
+	Hdrlen   uint16
+	Index    uint16
+	Tableid  uint16
+	Priority uint8
+	Mpls     uint8
+	Addrs    int32
+	Flags    int32
+	Fmask    int32
+	Pid      int32
+	Seq      int32
+	Errno    int32
+	Inits    uint32
+	Rmx      RtMetrics
+}
+
+type RtMetrics struct {
+	Pksent   uint64
+	Expire   int64
+	Locks    uint32
+	Mtu      uint32
+	Refcnt   uint32
+	Hopcount uint32
+	Recvpipe uint32
+	Sendpipe uint32
+	Ssthresh uint32
+	Rtt      uint32
+	Rttvar   uint32
+	Pad      uint32
+}
+
+type Mclpool struct{}
+
+const (
+	SizeofBpfVersion = 0x4
+	SizeofBpfStat    = 0x8
+	SizeofBpfProgram = 0x10
+	SizeofBpfInsn    = 0x8
+	SizeofBpfHdr     = 0x18
+)
+
+type BpfVersion struct {
+	Major uint16
+	Minor uint16
+}
+
+type BpfStat struct {
+	Recv uint32
+	Drop uint32
+}
+
+type BpfProgram struct {
+	Len   uint32
+	Insns *BpfInsn
+}
+
+type BpfInsn struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+
+type BpfHdr struct {
+	Tstamp  BpfTimeval
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	Ifidx   uint16
+	Flowid  uint16
+	Flags   uint8
+	Drops   uint8
+}
+
+type BpfTimeval struct {
+	Sec  uint32
+	Usec uint32
+}
+
+const (
+	_AT_FDCWD = -0x64
+)
+
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]uint8
+	Ispeed int32
+	Ospeed int32
+}
diff --git a/src/syscall/ztypes_openbsd_riscv64.go b/src/syscall/ztypes_openbsd_riscv64.go
new file mode 100644
index 0000000..3bc5a84
--- /dev/null
+++ b/src/syscall/ztypes_openbsd_riscv64.go
@@ -0,0 +1,446 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -fsigned-char types_openbsd.go
+
+package syscall
+
+const (
+	sizeofPtr      = 0x8
+	sizeofShort    = 0x2
+	sizeofInt      = 0x4
+	sizeofLong     = 0x8
+	sizeofLongLong = 0x8
+)
+
+type (
+	_C_short     int16
+	_C_int       int32
+	_C_long      int64
+	_C_long_long int64
+)
+
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+type Timeval struct {
+	Sec  int64
+	Usec int64
+}
+
+type Rusage struct {
+	Utime    Timeval
+	Stime    Timeval
+	Maxrss   int64
+	Ixrss    int64
+	Idrss    int64
+	Isrss    int64
+	Minflt   int64
+	Majflt   int64
+	Nswap    int64
+	Inblock  int64
+	Oublock  int64
+	Msgsnd   int64
+	Msgrcv   int64
+	Nsignals int64
+	Nvcsw    int64
+	Nivcsw   int64
+}
+
+type Rlimit struct {
+	Cur uint64
+	Max uint64
+}
+
+type _Gid_t uint32
+
+const (
+	S_IFMT   = 0xf000
+	S_IFIFO  = 0x1000
+	S_IFCHR  = 0x2000
+	S_IFDIR  = 0x4000
+	S_IFBLK  = 0x6000
+	S_IFREG  = 0x8000
+	S_IFLNK  = 0xa000
+	S_IFSOCK = 0xc000
+	S_ISUID  = 0x800
+	S_ISGID  = 0x400
+	S_ISVTX  = 0x200
+	S_IRUSR  = 0x100
+	S_IWUSR  = 0x80
+	S_IXUSR  = 0x40
+	S_IRWXG  = 0x38
+	S_IRWXO  = 0x7
+)
+
+type Stat_t struct {
+	Mode           uint32
+	Dev            int32
+	Ino            uint64
+	Nlink          uint32
+	Uid            uint32
+	Gid            uint32
+	Rdev           int32
+	Atim           Timespec
+	Mtim           Timespec
+	Ctim           Timespec
+	Size           int64
+	Blocks         int64
+	Blksize        int32
+	Flags          uint32
+	Gen            uint32
+	X__st_birthtim Timespec
+}
+
+type Statfs_t struct {
+	F_flags       uint32
+	F_bsize       uint32
+	F_iosize      uint32
+	F_blocks      uint64
+	F_bfree       uint64
+	F_bavail      int64
+	F_files       uint64
+	F_ffree       uint64
+	F_favail      int64
+	F_syncwrites  uint64
+	F_syncreads   uint64
+	F_asyncwrites uint64
+	F_asyncreads  uint64
+	F_fsid        Fsid
+	F_namemax     uint32
+	F_owner       uint32
+	F_ctime       uint64
+	F_fstypename  [16]int8
+	F_mntonname   [90]int8
+	F_mntfromname [90]int8
+	F_mntfromspec [90]int8
+	Pad_cgo_0     [2]byte
+	Mount_info    [160]byte
+}
+
+type Flock_t struct {
+	Start  int64
+	Len    int64
+	Pid    int32
+	Type   int16
+	Whence int16
+}
+
+type Dirent struct {
+	Fileno       uint64
+	Off          int64
+	Reclen       uint16
+	Type         uint8
+	Namlen       uint8
+	X__d_padding [4]uint8
+	Name         [256]int8
+}
+
+type Fsid struct {
+	Val [2]int32
+}
+
+const (
+	pathMax = 0x400
+)
+
+type RawSockaddrInet4 struct {
+	Len    uint8
+	Family uint8
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type RawSockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+	Len    uint8
+	Family uint8
+	Path   [104]int8
+}
+
+type RawSockaddrDatalink struct {
+	Len    uint8
+	Family uint8
+	Index  uint16
+	Type   uint8
+	Nlen   uint8
+	Alen   uint8
+	Slen   uint8
+	Data   [24]int8
+}
+
+type RawSockaddr struct {
+	Len    uint8
+	Family uint8
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [92]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type Iovec struct {
+	Base *byte
+	Len  uint64
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type Msghdr struct {
+	Name       *byte
+	Namelen    uint32
+	Iov        *Iovec
+	Iovlen     uint32
+	Control    *byte
+	Controllen uint32
+	Flags      int32
+}
+
+type Cmsghdr struct {
+	Len   uint32
+	Level int32
+	Type  int32
+}
+
+type Inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+	Addr RawSockaddrInet6
+	Mtu  uint32
+}
+
+type ICMPv6Filter struct {
+	Filt [8]uint32
+}
+
+const (
+	SizeofSockaddrInet4    = 0x10
+	SizeofSockaddrInet6    = 0x1c
+	SizeofSockaddrAny      = 0x6c
+	SizeofSockaddrUnix     = 0x6a
+	SizeofSockaddrDatalink = 0x20
+	SizeofLinger           = 0x8
+	SizeofIPMreq           = 0x8
+	SizeofIPv6Mreq         = 0x14
+	SizeofMsghdr           = 0x30
+	SizeofCmsghdr          = 0xc
+	SizeofInet6Pktinfo     = 0x14
+	SizeofIPv6MTUInfo      = 0x20
+	SizeofICMPv6Filter     = 0x20
+)
+
+const (
+	PTRACE_TRACEME = 0x0
+	PTRACE_CONT    = 0x7
+	PTRACE_KILL    = 0x8
+)
+
+type Kevent_t struct {
+	Ident  uint64
+	Filter int16
+	Flags  uint16
+	Fflags uint32
+	Data   int64
+	Udata  *byte
+}
+
+type FdSet struct {
+	Bits [32]uint32
+}
+
+const (
+	SizeofIfMsghdr         = 0xa8
+	SizeofIfData           = 0x90
+	SizeofIfaMsghdr        = 0x18
+	SizeofIfAnnounceMsghdr = 0x1a
+	SizeofRtMsghdr         = 0x60
+	SizeofRtMetrics        = 0x38
+)
+
+type IfMsghdr struct {
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Hdrlen  uint16
+	Index   uint16
+	Tableid uint16
+	Pad1    uint8
+	Pad2    uint8
+	Addrs   int32
+	Flags   int32
+	Xflags  int32
+	Data    IfData
+}
+
+type IfData struct {
+	Type         uint8
+	Addrlen      uint8
+	Hdrlen       uint8
+	Link_state   uint8
+	Mtu          uint32
+	Metric       uint32
+	Rdomain      uint32
+	Baudrate     uint64
+	Ipackets     uint64
+	Ierrors      uint64
+	Opackets     uint64
+	Oerrors      uint64
+	Collisions   uint64
+	Ibytes       uint64
+	Obytes       uint64
+	Imcasts      uint64
+	Omcasts      uint64
+	Iqdrops      uint64
+	Oqdrops      uint64
+	Noproto      uint64
+	Capabilities uint32
+	Lastchange   Timeval
+}
+
+type IfaMsghdr struct {
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Hdrlen  uint16
+	Index   uint16
+	Tableid uint16
+	Pad1    uint8
+	Pad2    uint8
+	Addrs   int32
+	Flags   int32
+	Metric  int32
+}
+
+type IfAnnounceMsghdr struct {
+	Msglen  uint16
+	Version uint8
+	Type    uint8
+	Hdrlen  uint16
+	Index   uint16
+	What    uint16
+	Name    [16]int8
+}
+
+type RtMsghdr struct {
+	Msglen   uint16
+	Version  uint8
+	Type     uint8
+	Hdrlen   uint16
+	Index    uint16
+	Tableid  uint16
+	Priority uint8
+	Mpls     uint8
+	Addrs    int32
+	Flags    int32
+	Fmask    int32
+	Pid      int32
+	Seq      int32
+	Errno    int32
+	Inits    uint32
+	Rmx      RtMetrics
+}
+
+type RtMetrics struct {
+	Pksent   uint64
+	Expire   int64
+	Locks    uint32
+	Mtu      uint32
+	Refcnt   uint32
+	Hopcount uint32
+	Recvpipe uint32
+	Sendpipe uint32
+	Ssthresh uint32
+	Rtt      uint32
+	Rttvar   uint32
+	Pad      uint32
+}
+
+type Mclpool struct{}
+
+const (
+	SizeofBpfVersion = 0x4
+	SizeofBpfStat    = 0x8
+	SizeofBpfProgram = 0x10
+	SizeofBpfInsn    = 0x8
+	SizeofBpfHdr     = 0x18
+)
+
+type BpfVersion struct {
+	Major uint16
+	Minor uint16
+}
+
+type BpfStat struct {
+	Recv uint32
+	Drop uint32
+}
+
+type BpfProgram struct {
+	Len   uint32
+	Insns *BpfInsn
+}
+
+type BpfInsn struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+
+type BpfHdr struct {
+	Tstamp  BpfTimeval
+	Caplen  uint32
+	Datalen uint32
+	Hdrlen  uint16
+	Ifidx   uint16
+	Flowid  uint16
+	Flags   uint8
+	Drops   uint8
+}
+
+type BpfTimeval struct {
+	Sec  uint32
+	Usec uint32
+}
+
+const (
+	_AT_FDCWD = -0x64
+)
+
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]uint8
+	Ispeed int32
+	Ospeed int32
+}
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
index be9b87f..9491213 100644
--- a/src/testing/benchmark.go
+++ b/src/testing/benchmark.go
@@ -7,7 +7,6 @@
 import (
 	"flag"
 	"fmt"
-	"internal/race"
 	"internal/sysinfo"
 	"io"
 	"math"
@@ -25,7 +24,7 @@
 func initBenchmarkFlags() {
 	matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
 	benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
-	flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
+	flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d` or N times if `d` is of the form Nx")
 }
 
 var (
@@ -78,7 +77,7 @@
 	F    func(b *B)
 }
 
-// B is a type passed to Benchmark functions to manage benchmark
+// B is a type passed to [Benchmark] functions to manage benchmark
 // timing and to specify the number of iterations to run.
 //
 // A benchmark ends when its Benchmark function returns or calls any of the methods
@@ -118,7 +117,7 @@
 
 // StartTimer starts timing a test. This function is called automatically
 // before a benchmark starts, but it can also be used to resume timing after
-// a call to StopTimer.
+// a call to [B.StopTimer].
 func (b *B) StartTimer() {
 	if !b.timerOn {
 		runtime.ReadMemStats(&memStats)
@@ -151,9 +150,7 @@
 		// Pre-size it to make more allocation unlikely.
 		b.extra = make(map[string]float64, 16)
 	} else {
-		for k := range b.extra {
-			delete(b.extra, k)
-		}
+		clear(b.extra)
 	}
 	if b.timerOn {
 		runtime.ReadMemStats(&memStats)
@@ -181,11 +178,14 @@
 func (b *B) runN(n int) {
 	benchmarkLock.Lock()
 	defer benchmarkLock.Unlock()
-	defer b.runCleanup(normalPanic)
+	defer func() {
+		b.runCleanup(normalPanic)
+		b.checkRaces()
+	}()
 	// Try to get a comparable environment for each run
 	// by clearing garbage from previous runs.
 	runtime.GC()
-	b.raceErrors = -race.Errors()
+	b.resetRaces()
 	b.N = n
 	b.parallelism = 1
 	b.ResetTimer()
@@ -194,24 +194,6 @@
 	b.StopTimer()
 	b.previousN = n
 	b.previousDuration = b.duration
-	b.raceErrors += race.Errors()
-	if b.raceErrors > 0 {
-		b.Errorf("race detected during execution of benchmark")
-	}
-}
-
-func min(x, y int64) int64 {
-	if x > y {
-		return y
-	}
-	return x
-}
-
-func max(x, y int64) int64 {
-	if x < y {
-		return y
-	}
-	return x
 }
 
 // run1 runs the first iteration of benchFunc. It reports whether more
@@ -267,7 +249,7 @@
 		if b.importPath != "" {
 			fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
 		}
-		if cpu := sysinfo.CPU.Name(); cpu != "" {
+		if cpu := sysinfo.CPUName(); cpu != "" {
 			fmt.Fprintf(b.w, "cpu: %s\n", cpu)
 		}
 	})
@@ -339,7 +321,7 @@
 
 // Elapsed returns the measured elapsed time of the benchmark.
 // The duration reported by Elapsed matches the one measured by
-// StartTimer, StopTimer, and ResetTimer.
+// [B.StartTimer], [B.StopTimer], and [B.ResetTimer].
 func (b *B) Elapsed() time.Duration {
 	d := b.duration
 	if b.timerOn {
@@ -431,7 +413,7 @@
 // benchmark name.
 // Extra metrics override built-in metrics of the same name.
 // String does not include allocs/op or B/op, since those are reported
-// by MemString.
+// by [BenchmarkResult.MemString].
 func (r BenchmarkResult) String() string {
 	buf := new(strings.Builder)
 	fmt.Fprintf(buf, "%8d", r.N)
@@ -684,7 +666,7 @@
 			if b.importPath != "" {
 				fmt.Printf("pkg: %s\n", b.importPath)
 			}
-			if cpu := sysinfo.CPU.Name(); cpu != "" {
+			if cpu := sysinfo.CPUName(); cpu != "" {
 				fmt.Printf("cpu: %s\n", cpu)
 			}
 		})
@@ -745,16 +727,16 @@
 
 // A PB is used by RunParallel for running parallel benchmarks.
 type PB struct {
-	globalN *uint64 // shared between all worker goroutines iteration counter
-	grain   uint64  // acquire that many iterations from globalN at once
-	cache   uint64  // local cache of acquired iterations
-	bN      uint64  // total number of iterations to execute (b.N)
+	globalN *atomic.Uint64 // shared between all worker goroutines iteration counter
+	grain   uint64         // acquire that many iterations from globalN at once
+	cache   uint64         // local cache of acquired iterations
+	bN      uint64         // total number of iterations to execute (b.N)
 }
 
 // Next reports whether there are more iterations to execute.
 func (pb *PB) Next() bool {
 	if pb.cache == 0 {
-		n := atomic.AddUint64(pb.globalN, pb.grain)
+		n := pb.globalN.Add(pb.grain)
 		if n <= pb.bN {
 			pb.cache = pb.grain
 		} else if n < pb.bN+pb.grain {
@@ -770,13 +752,13 @@
 // RunParallel runs a benchmark in parallel.
 // It creates multiple goroutines and distributes b.N iterations among them.
 // The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
-// non-CPU-bound benchmarks, call SetParallelism before RunParallel.
+// non-CPU-bound benchmarks, call [B.SetParallelism] before RunParallel.
 // RunParallel is usually used with the go test -cpu flag.
 //
 // The body function will be run in each goroutine. It should set up any
 // goroutine-local state and then iterate until pb.Next returns false.
-// It should not use the StartTimer, StopTimer, or ResetTimer functions,
-// because they have global effect. It should also not call Run.
+// It should not use the [B.StartTimer], [B.StopTimer], or [B.ResetTimer] functions,
+// because they have global effect. It should also not call [B.Run].
 //
 // RunParallel reports ns/op values as wall time for the benchmark as a whole,
 // not the sum of wall time or CPU time over each parallel goroutine.
@@ -800,7 +782,7 @@
 		grain = 1e4
 	}
 
-	n := uint64(0)
+	var n atomic.Uint64
 	numProcs := b.parallelism * runtime.GOMAXPROCS(0)
 	var wg sync.WaitGroup
 	wg.Add(numProcs)
@@ -816,12 +798,12 @@
 		}()
 	}
 	wg.Wait()
-	if n <= uint64(b.N) && !b.Failed() {
+	if n.Load() <= uint64(b.N) && !b.Failed() {
 		b.Fatal("RunParallel: body exited without pb.Next() == false")
 	}
 }
 
-// SetParallelism sets the number of goroutines used by RunParallel to p*GOMAXPROCS.
+// SetParallelism sets the number of goroutines used by [B.RunParallel] to p*GOMAXPROCS.
 // There is usually no need to call SetParallelism for CPU-bound benchmarks.
 // If p is less than 1, this call will have no effect.
 func (b *B) SetParallelism(p int) {
@@ -833,8 +815,8 @@
 // Benchmark benchmarks a single function. It is useful for creating
 // custom benchmarks that do not use the "go test" command.
 //
-// If f depends on testing flags, then Init must be used to register
-// those flags before calling Benchmark and before calling flag.Parse.
+// If f depends on testing flags, then [Init] must be used to register
+// those flags before calling Benchmark and before calling [flag.Parse].
 //
 // If f calls Run, the result will be an estimate of running all its
 // subbenchmarks that don't call Run in sequence in a single benchmark.
diff --git a/src/testing/example.go b/src/testing/example.go
index 42ee555..07aa5cb 100644
--- a/src/testing/example.go
+++ b/src/testing/example.go
@@ -6,7 +6,6 @@
 
 import (
 	"fmt"
-	"os"
 	"sort"
 	"strings"
 	"time"
@@ -29,14 +28,11 @@
 func runExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ran, ok bool) {
 	ok = true
 
-	var eg InternalExample
+	m := newMatcher(matchString, *match, "-test.run", *skip)
 
+	var eg InternalExample
 	for _, eg = range examples {
-		matched, err := matchString(*match, eg.Name)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err)
-			os.Exit(1)
-		}
+		_, matched, _ := m.fullName(nil, eg.Name)
 		if !matched {
 			continue
 		}
diff --git a/src/testing/flag_test.go b/src/testing/flag_test.go
index 416d8c9..6f76c23 100644
--- a/src/testing/flag_test.go
+++ b/src/testing/flag_test.go
@@ -32,7 +32,7 @@
 			if err != nil {
 				exe = os.Args[0]
 			}
-			cmd := exec.Command(exe, "-test.run=TestFlag", "-test_flag_arg="+flag)
+			cmd := exec.Command(exe, "-test.run=^TestFlag$", "-test_flag_arg="+flag)
 			if flag != "" {
 				cmd.Args = append(cmd.Args, flag)
 			}
diff --git a/src/testing/fstest/mapfs.go b/src/testing/fstest/mapfs.go
index a0b1f65..1409d62 100644
--- a/src/testing/fstest/mapfs.go
+++ b/src/testing/fstest/mapfs.go
@@ -19,8 +19,8 @@
 //
 // The map need not include parent directories for files contained
 // in the map; those will be synthesized if needed.
-// But a directory can still be included by setting the MapFile.Mode's ModeDir bit;
-// this may be necessary for detailed control over the directory's FileInfo
+// But a directory can still be included by setting the [MapFile.Mode]'s [fs.ModeDir] bit;
+// this may be necessary for detailed control over the directory's [fs.FileInfo]
 // or to create an empty directory.
 //
 // File system operations read directly from the map,
@@ -32,12 +32,12 @@
 // than a few hundred entries or directory reads.
 type MapFS map[string]*MapFile
 
-// A MapFile describes a single file in a MapFS.
+// A MapFile describes a single file in a [MapFS].
 type MapFile struct {
 	Data    []byte      // file content
-	Mode    fs.FileMode // FileInfo.Mode
-	ModTime time.Time   // FileInfo.ModTime
-	Sys     any         // FileInfo.Sys
+	Mode    fs.FileMode // fs.FileInfo.Mode
+	ModTime time.Time   // fs.FileInfo.ModTime
+	Sys     any         // fs.FileInfo.Sys
 }
 
 var _ fs.FS = MapFS(nil)
@@ -98,14 +98,14 @@
 		delete(need, fi.name)
 	}
 	for name := range need {
-		list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir}})
+		list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir | 0555}})
 	}
 	sort.Slice(list, func(i, j int) bool {
 		return list[i].name < list[j].name
 	})
 
 	if file == nil {
-		file = &MapFile{Mode: fs.ModeDir}
+		file = &MapFile{Mode: fs.ModeDir | 0555}
 	}
 	return &mapDir{name, mapFileInfo{elem, file}, list, 0}, nil
 }
diff --git a/src/testing/fstest/mapfs_test.go b/src/testing/fstest/mapfs_test.go
index c8d2928..c64dc8d 100644
--- a/src/testing/fstest/mapfs_test.go
+++ b/src/testing/fstest/mapfs_test.go
@@ -16,7 +16,7 @@
 		"hello":             {Data: []byte("hello, world\n")},
 		"fortune/k/ken.txt": {Data: []byte("If a program is too slow, it must have a loop.\n")},
 	}
-	if err := TestFS(m, "hello", "fortune/k/ken.txt"); err != nil {
+	if err := TestFS(m, "hello", "fortune", "fortune/k", "fortune/k/ken.txt"); err != nil {
 		t.Fatal(err)
 	}
 }
@@ -37,7 +37,7 @@
 	})
 	want := `
 .: drwxrwxrwx
-a: d---------
+a: dr-xr-xr-x
 a/b.txt: -rw-rw-rw-
 `[1:]
 	got := buf.String()
diff --git a/src/testing/fuzz.go b/src/testing/fuzz.go
index d31a3f8..d50ea79 100644
--- a/src/testing/fuzz.go
+++ b/src/testing/fuzz.go
@@ -59,7 +59,7 @@
 // by (*F).Add and entries in the testdata/fuzz/<FuzzTestName> directory. After
 // any necessary setup and calls to (*F).Add, the fuzz test must then call
 // (*F).Fuzz to provide the fuzz target. See the testing package documentation
-// for an example, and see the F.Fuzz and F.Add method documentation for
+// for an example, and see the [F.Fuzz] and [F.Add] method documentation for
 // details.
 //
 // *F methods can only be called before (*F).Fuzz. Once the test is
@@ -206,7 +206,7 @@
 //
 // When fuzzing, F.Fuzz does not return until a problem is found, time runs out
 // (set with -fuzztime), or the test process is interrupted by a signal. F.Fuzz
-// should be called exactly once, unless F.Skip or F.Fail is called beforehand.
+// should be called exactly once, unless F.Skip or [F.Fail] is called beforehand.
 func (f *F) Fuzz(ff any) {
 	if f.fuzzCalled {
 		panic("testing: F.Fuzz called more than once")
@@ -636,6 +636,7 @@
 		// Unfortunately, recovering here adds stack frames, but the location of
 		// the original panic should still be
 		// clear.
+		f.checkRaces()
 		if f.Failed() {
 			numFailed.Add(1)
 		}
@@ -719,6 +720,7 @@
 	}()
 
 	f.start = time.Now()
+	f.resetRaces()
 	fn(f)
 
 	// Code beyond this point will not be executed when FailNow or SkipNow
diff --git a/src/testing/helper_test.go b/src/testing/helper_test.go
index 6e8986a..da5622f 100644
--- a/src/testing/helper_test.go
+++ b/src/testing/helper_test.go
@@ -2,98 +2,107 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package testing
+package testing_test
 
 import (
+	"internal/testenv"
+	"os"
 	"regexp"
 	"strings"
+	"testing"
 )
 
-func TestTBHelper(t *T) {
-	var buf strings.Builder
-	ctx := newTestContext(1, allMatcher())
-	t1 := &T{
-		common: common{
-			signal: make(chan bool),
-			w:      &buf,
-		},
-		context: ctx,
-	}
-	t1.Run("Test", testHelper)
+func TestTBHelper(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		testTestHelper(t)
 
-	want := `--- FAIL: Test (?s)
-helperfuncs_test.go:12: 0
-helperfuncs_test.go:40: 1
-helperfuncs_test.go:21: 2
-helperfuncs_test.go:42: 3
-helperfuncs_test.go:49: 4
---- FAIL: Test/sub (?s)
-helperfuncs_test.go:52: 5
-helperfuncs_test.go:21: 6
-helperfuncs_test.go:51: 7
-helperfuncs_test.go:63: 8
---- FAIL: Test/sub2 (?s)
-helperfuncs_test.go:78: 11
-helperfuncs_test.go:82: recover 12
-helperfuncs_test.go:84: GenericFloat64
-helperfuncs_test.go:85: GenericInt
-helperfuncs_test.go:71: 9
-helperfuncs_test.go:67: 10
+		// Check that calling Helper from inside a top-level test function
+		// has no effect.
+		t.Helper()
+		t.Error("8")
+		return
+	}
+
+	testenv.MustHaveExec(t)
+	t.Parallel()
+
+	exe, err := os.Executable()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cmd := testenv.Command(t, exe, "-test.run=^TestTBHelper$")
+	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+	out, _ := cmd.CombinedOutput()
+
+	want := `--- FAIL: TestTBHelper \([^)]+\)
+    helperfuncs_test.go:15: 0
+    helperfuncs_test.go:47: 1
+    helperfuncs_test.go:24: 2
+    helperfuncs_test.go:49: 3
+    helperfuncs_test.go:56: 4
+    --- FAIL: TestTBHelper/sub \([^)]+\)
+        helperfuncs_test.go:59: 5
+        helperfuncs_test.go:24: 6
+        helperfuncs_test.go:58: 7
+    --- FAIL: TestTBHelper/sub2 \([^)]+\)
+        helperfuncs_test.go:80: 11
+    helperfuncs_test.go:84: recover 12
+    helperfuncs_test.go:86: GenericFloat64
+    helperfuncs_test.go:87: GenericInt
+    helper_test.go:22: 8
+    helperfuncs_test.go:73: 9
+    helperfuncs_test.go:69: 10
 `
-	lines := strings.Split(buf.String(), "\n")
-	durationRE := regexp.MustCompile(`\(.*\)$`)
-	for i, line := range lines {
-		line = strings.TrimSpace(line)
-		line = durationRE.ReplaceAllString(line, "(?s)")
-		lines[i] = line
-	}
-	got := strings.Join(lines, "\n")
-	if got != want {
-		t.Errorf("got output:\n\n%s\nwant:\n\n%s", got, want)
+	if !regexp.MustCompile(want).Match(out) {
+		t.Errorf("got output:\n\n%s\nwant matching:\n\n%s", out, want)
 	}
 }
 
-func TestTBHelperParallel(t *T) {
-	var buf strings.Builder
-	ctx := newTestContext(1, newMatcher(regexp.MatchString, "", "", ""))
-	t1 := &T{
-		common: common{
-			signal: make(chan bool),
-			w:      &buf,
-		},
-		context: ctx,
+func TestTBHelperParallel(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		parallelTestHelper(t)
+		return
 	}
-	t1.Run("Test", parallelTestHelper)
 
-	lines := strings.Split(strings.TrimSpace(buf.String()), "\n")
-	if len(lines) != 6 {
-		t.Fatalf("parallelTestHelper gave %d lines of output; want 6", len(lines))
+	testenv.MustHaveExec(t)
+	t.Parallel()
+
+	exe, err := os.Executable()
+	if err != nil {
+		t.Fatal(err)
 	}
-	want := "helperfuncs_test.go:21: parallel"
+
+	cmd := testenv.Command(t, exe, "-test.run=^TestTBHelperParallel$")
+	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+	out, _ := cmd.CombinedOutput()
+
+	t.Logf("output:\n%s", out)
+
+	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+
+	// We expect to see one "--- FAIL" line at the start
+	// of the log, five lines of "parallel" logging,
+	// and a final "FAIL" line at the end of the test.
+	const wantLines = 7
+
+	if len(lines) != wantLines {
+		t.Fatalf("parallelTestHelper gave %d lines of output; want %d", len(lines), wantLines)
+	}
+	want := "helperfuncs_test.go:24: parallel"
 	if got := strings.TrimSpace(lines[1]); got != want {
-		t.Errorf("got output line %q; want %q", got, want)
+		t.Errorf("got second output line %q; want %q", got, want)
 	}
 }
 
-type noopWriter int
-
-func (nw *noopWriter) Write(b []byte) (int, error) { return len(b), nil }
-
-func BenchmarkTBHelper(b *B) {
-	w := noopWriter(0)
-	ctx := newTestContext(1, allMatcher())
-	t1 := &T{
-		common: common{
-			signal: make(chan bool),
-			w:      &w,
-		},
-		context: ctx,
-	}
+func BenchmarkTBHelper(b *testing.B) {
 	f1 := func() {
-		t1.Helper()
+		b.Helper()
 	}
 	f2 := func() {
-		t1.Helper()
+		b.Helper()
 	}
 	b.ResetTimer()
 	b.ReportAllocs()
diff --git a/src/testing/helperfuncs_test.go b/src/testing/helperfuncs_test.go
index b63bc91..f0295f3 100644
--- a/src/testing/helperfuncs_test.go
+++ b/src/testing/helperfuncs_test.go
@@ -2,38 +2,45 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package testing
+package testing_test
 
-import "sync"
+import (
+	"sync"
+	"testing"
+)
 
 // The line numbering of this file is important for TestTBHelper.
 
-func notHelper(t *T, msg string) {
+func notHelper(t *testing.T, msg string) {
 	t.Error(msg)
 }
 
-func helper(t *T, msg string) {
+func helper(t *testing.T, msg string) {
 	t.Helper()
 	t.Error(msg)
 }
 
-func notHelperCallingHelper(t *T, msg string) {
+func notHelperCallingHelper(t *testing.T, msg string) {
 	helper(t, msg)
 }
 
-func helperCallingHelper(t *T, msg string) {
+func helperCallingHelper(t *testing.T, msg string) {
 	t.Helper()
 	helper(t, msg)
 }
 
-func genericHelper[G any](t *T, msg string) {
+func genericHelper[G any](t *testing.T, msg string) {
 	t.Helper()
 	t.Error(msg)
 }
 
 var genericIntHelper = genericHelper[int]
 
-func testHelper(t *T) {
+func testTestHelper(t *testing.T) {
+	testHelper(t)
+}
+
+func testHelper(t *testing.T) {
 	// Check combinations of directly and indirectly
 	// calling helper functions.
 	notHelper(t, "0")
@@ -48,7 +55,7 @@
 	}
 	fn("4")
 
-	t.Run("sub", func(t *T) {
+	t.Run("sub", func(t *testing.T) {
 		helper(t, "5")
 		notHelperCallingHelper(t, "6")
 		// Check that calling Helper from inside a subtest entry function
@@ -57,11 +64,6 @@
 		t.Error("7")
 	})
 
-	// Check that calling Helper from inside a top-level test function
-	// has no effect.
-	t.Helper()
-	t.Error("8")
-
 	// Check that right caller is reported for func passed to Cleanup when
 	// multiple cleanup functions have been registered.
 	t.Cleanup(func() {
@@ -85,7 +87,7 @@
 	genericIntHelper(t, "GenericInt")
 }
 
-func parallelTestHelper(t *T) {
+func parallelTestHelper(t *testing.T) {
 	var wg sync.WaitGroup
 	for i := 0; i < 5; i++ {
 		wg.Add(1)
@@ -97,15 +99,15 @@
 	wg.Wait()
 }
 
-func helperSubCallingHelper(t *T, msg string) {
+func helperSubCallingHelper(t *testing.T, msg string) {
 	t.Helper()
-	t.Run("sub2", func(t *T) {
+	t.Run("sub2", func(t *testing.T) {
 		t.Helper()
 		t.Fatal(msg)
 	})
 }
 
-func recoverHelper(t *T, msg string) {
+func recoverHelper(t *testing.T, msg string) {
 	t.Helper()
 	defer func() {
 		t.Helper()
@@ -116,7 +118,7 @@
 	doPanic(t, msg)
 }
 
-func doPanic(t *T, msg string) {
+func doPanic(t *testing.T, msg string) {
 	t.Helper()
 	panic(msg)
 }
diff --git a/src/testing/internal/testdeps/deps.go b/src/testing/internal/testdeps/deps.go
index 2e85a41..8683075 100644
--- a/src/testing/internal/testdeps/deps.go
+++ b/src/testing/internal/testdeps/deps.go
@@ -27,7 +27,7 @@
 )
 
 // TestDeps is an implementation of the testing.testDeps interface,
-// suitable for passing to testing.MainStart.
+// suitable for passing to [testing.MainStart].
 type TestDeps struct{}
 
 var matchPat string
diff --git a/src/testing/iotest/logger.go b/src/testing/iotest/logger.go
index 99548dc..10d0cb5 100644
--- a/src/testing/iotest/logger.go
+++ b/src/testing/iotest/logger.go
@@ -25,7 +25,7 @@
 }
 
 // NewWriteLogger returns a writer that behaves like w except
-// that it logs (using log.Printf) each write to standard error,
+// that it logs (using [log.Printf]) each write to standard error,
 // printing the prefix and the hexadecimal data written.
 func NewWriteLogger(prefix string, w io.Writer) io.Writer {
 	return &writeLogger{prefix, w}
@@ -47,7 +47,7 @@
 }
 
 // NewReadLogger returns a reader that behaves like r except
-// that it logs (using log.Printf) each read to standard error,
+// that it logs (using [log.Printf]) each read to standard error,
 // printing the prefix and the hexadecimal data read.
 func NewReadLogger(prefix string, r io.Reader) io.Reader {
 	return &readLogger{prefix, r}
diff --git a/src/testing/iotest/reader.go b/src/testing/iotest/reader.go
index 770d87f..8529e1c 100644
--- a/src/testing/iotest/reader.go
+++ b/src/testing/iotest/reader.go
@@ -73,7 +73,7 @@
 // ErrTimeout is a fake timeout error.
 var ErrTimeout = errors.New("timeout")
 
-// TimeoutReader returns ErrTimeout on the second read
+// TimeoutReader returns [ErrTimeout] on the second read
 // with no data. Subsequent calls to read succeed.
 func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }
 
@@ -90,7 +90,7 @@
 	return r.r.Read(p)
 }
 
-// ErrReader returns an io.Reader that returns 0, err from all Read calls.
+// ErrReader returns an [io.Reader] that returns 0, err from all Read calls.
 func ErrReader(err error) io.Reader {
 	return &errReader{err: err}
 }
@@ -128,7 +128,7 @@
 
 // TestReader tests that reading from r returns the expected file content.
 // It does reads of different sizes, until EOF.
-// If r implements io.ReaderAt or io.Seeker, TestReader also checks
+// If r implements [io.ReaderAt] or [io.Seeker], TestReader also checks
 // that those operations behave as they should.
 //
 // If TestReader finds any misbehaviors, it returns an error reporting them.
diff --git a/src/testing/match.go b/src/testing/match.go
index 92b7dc6..84804dc 100644
--- a/src/testing/match.go
+++ b/src/testing/match.go
@@ -119,9 +119,7 @@
 func (m *matcher) clearSubNames() {
 	m.mu.Lock()
 	defer m.mu.Unlock()
-	for key := range m.subNames {
-		delete(m.subNames, key)
-	}
+	clear(m.subNames)
 }
 
 func (m simpleMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) {
diff --git a/src/testing/panic_test.go b/src/testing/panic_test.go
index 8733bc3..6307b84 100644
--- a/src/testing/panic_test.go
+++ b/src/testing/panic_test.go
@@ -139,7 +139,7 @@
 	}}
 	for _, tc := range testCases {
 		t.Run(tc.desc, func(t *testing.T) {
-			cmd := exec.Command(os.Args[0], "-test.run=TestPanicHelper")
+			cmd := exec.Command(os.Args[0], "-test.run=^TestPanicHelper$")
 			cmd.Args = append(cmd.Args, tc.flags...)
 			cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
 			b, _ := cmd.CombinedOutput()
@@ -220,13 +220,13 @@
 	}{
 		{
 			desc:  "Issue 48502: call runtime.Goexit in t.Cleanup after panic",
-			flags: []string{"-test.run=TestGoexitInCleanupAfterPanicHelper"},
+			flags: []string{"-test.run=^TestGoexitInCleanupAfterPanicHelper$"},
 			want: `panic: die
 	panic: test executed panic(nil) or runtime.Goexit`,
 		},
 		{
 			desc:  "Issue 48515: call t.Run in t.Cleanup should trigger panic",
-			flags: []string{"-test.run=TestCallRunInCleanupHelper"},
+			flags: []string{"-test.run=^TestCallRunInCleanupHelper$"},
 			want:  `panic: testing: t.Run called during t.Cleanup`,
 		},
 	}
diff --git a/src/testing/quick/quick.go b/src/testing/quick/quick.go
index d711742..8ef9cf7 100644
--- a/src/testing/quick/quick.go
+++ b/src/testing/quick/quick.go
@@ -54,7 +54,7 @@
 const complexSize = 50
 
 // Value returns an arbitrary value of the given type.
-// If the type implements the Generator interface, that will be used.
+// If the type implements the [Generator] interface, that will be used.
 // Note: To create arbitrary values for structs, all the fields must be exported.
 func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) {
 	return sizedValue(t, rand, complexSize)
@@ -234,7 +234,7 @@
 	return fmt.Sprintf("#%d: failed on input %s", s.Count, toString(s.In))
 }
 
-// A CheckEqualError is the result CheckEqual finding an error.
+// A CheckEqualError is the result [CheckEqual] finding an error.
 type CheckEqualError struct {
 	CheckError
 	Out1 []any
@@ -248,7 +248,7 @@
 // Check looks for an input to f, any function that returns bool,
 // such that f returns false. It calls f repeatedly, with arbitrary
 // values for each argument. If f returns false on a given input,
-// Check returns that input as a *CheckError.
+// Check returns that input as a *[CheckError].
 // For example:
 //
 //	func TestOddMultipleOfThree(t *testing.T) {
@@ -297,7 +297,7 @@
 
 // CheckEqual looks for an input on which f and g return different results.
 // It calls f and g repeatedly with arbitrary values for each argument.
-// If f and g return different answers, CheckEqual returns a *CheckEqualError
+// If f and g return different answers, CheckEqual returns a *[CheckEqualError]
 // describing the input and the outputs.
 func CheckEqual(f, g any, config *Config) error {
 	if config == nil {
diff --git a/src/testing/slogtest/run_test.go b/src/testing/slogtest/run_test.go
new file mode 100644
index 0000000..c82da10
--- /dev/null
+++ b/src/testing/slogtest/run_test.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slogtest_test
+
+import (
+	"bytes"
+	"encoding/json"
+	"log/slog"
+	"testing"
+	"testing/slogtest"
+)
+
+func TestRun(t *testing.T) {
+	var buf bytes.Buffer
+
+	newHandler := func(*testing.T) slog.Handler {
+		buf.Reset()
+		return slog.NewJSONHandler(&buf, nil)
+	}
+	result := func(t *testing.T) map[string]any {
+		m := map[string]any{}
+		if err := json.Unmarshal(buf.Bytes(), &m); err != nil {
+			t.Fatal(err)
+		}
+		return m
+	}
+
+	slogtest.Run(t, newHandler, result)
+}
diff --git a/src/testing/slogtest/slogtest.go b/src/testing/slogtest/slogtest.go
index b16d122..5c3aced 100644
--- a/src/testing/slogtest/slogtest.go
+++ b/src/testing/slogtest/slogtest.go
@@ -12,10 +12,13 @@
 	"log/slog"
 	"reflect"
 	"runtime"
+	"testing"
 	"time"
 )
 
 type testCase struct {
+	// Subtest name.
+	name string
 	// If non-empty, explanation explains the violated constraint.
 	explanation string
 	// f executes a single log event using its argument logger.
@@ -30,18 +33,215 @@
 	checks []check
 }
 
+var cases = []testCase{
+	{
+		name:        "built-ins",
+		explanation: withSource("this test expects slog.TimeKey, slog.LevelKey and slog.MessageKey"),
+		f: func(l *slog.Logger) {
+			l.Info("message")
+		},
+		checks: []check{
+			hasKey(slog.TimeKey),
+			hasKey(slog.LevelKey),
+			hasAttr(slog.MessageKey, "message"),
+		},
+	},
+	{
+		name:        "attrs",
+		explanation: withSource("a Handler should output attributes passed to the logging function"),
+		f: func(l *slog.Logger) {
+			l.Info("message", "k", "v")
+		},
+		checks: []check{
+			hasAttr("k", "v"),
+		},
+	},
+	{
+		name:        "empty-attr",
+		explanation: withSource("a Handler should ignore an empty Attr"),
+		f: func(l *slog.Logger) {
+			l.Info("msg", "a", "b", "", nil, "c", "d")
+		},
+		checks: []check{
+			hasAttr("a", "b"),
+			missingKey(""),
+			hasAttr("c", "d"),
+		},
+	},
+	{
+		name:        "zero-time",
+		explanation: withSource("a Handler should ignore a zero Record.Time"),
+		f: func(l *slog.Logger) {
+			l.Info("msg", "k", "v")
+		},
+		mod: func(r *slog.Record) { r.Time = time.Time{} },
+		checks: []check{
+			missingKey(slog.TimeKey),
+		},
+	},
+	{
+		name:        "WithAttrs",
+		explanation: withSource("a Handler should include the attributes from the WithAttrs method"),
+		f: func(l *slog.Logger) {
+			l.With("a", "b").Info("msg", "k", "v")
+		},
+		checks: []check{
+			hasAttr("a", "b"),
+			hasAttr("k", "v"),
+		},
+	},
+	{
+		name:        "groups",
+		explanation: withSource("a Handler should handle Group attributes"),
+		f: func(l *slog.Logger) {
+			l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f")
+		},
+		checks: []check{
+			hasAttr("a", "b"),
+			inGroup("G", hasAttr("c", "d")),
+			hasAttr("e", "f"),
+		},
+	},
+	{
+		name:        "empty-group",
+		explanation: withSource("a Handler should ignore an empty group"),
+		f: func(l *slog.Logger) {
+			l.Info("msg", "a", "b", slog.Group("G"), "e", "f")
+		},
+		checks: []check{
+			hasAttr("a", "b"),
+			missingKey("G"),
+			hasAttr("e", "f"),
+		},
+	},
+	{
+		name:        "inline-group",
+		explanation: withSource("a Handler should inline the Attrs of a group with an empty key"),
+		f: func(l *slog.Logger) {
+			l.Info("msg", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f")
+
+		},
+		checks: []check{
+			hasAttr("a", "b"),
+			hasAttr("c", "d"),
+			hasAttr("e", "f"),
+		},
+	},
+	{
+		name:        "WithGroup",
+		explanation: withSource("a Handler should handle the WithGroup method"),
+		f: func(l *slog.Logger) {
+			l.WithGroup("G").Info("msg", "a", "b")
+		},
+		checks: []check{
+			hasKey(slog.TimeKey),
+			hasKey(slog.LevelKey),
+			hasAttr(slog.MessageKey, "msg"),
+			missingKey("a"),
+			inGroup("G", hasAttr("a", "b")),
+		},
+	},
+	{
+		name:        "multi-With",
+		explanation: withSource("a Handler should handle multiple WithGroup and WithAttr calls"),
+		f: func(l *slog.Logger) {
+			l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg", "e", "f")
+		},
+		checks: []check{
+			hasKey(slog.TimeKey),
+			hasKey(slog.LevelKey),
+			hasAttr(slog.MessageKey, "msg"),
+			hasAttr("a", "b"),
+			inGroup("G", hasAttr("c", "d")),
+			inGroup("G", inGroup("H", hasAttr("e", "f"))),
+		},
+	},
+	{
+		name:        "empty-group-record",
+		explanation: withSource("a Handler should not output groups if there are no attributes"),
+		f: func(l *slog.Logger) {
+			l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg")
+		},
+		checks: []check{
+			hasKey(slog.TimeKey),
+			hasKey(slog.LevelKey),
+			hasAttr(slog.MessageKey, "msg"),
+			hasAttr("a", "b"),
+			inGroup("G", hasAttr("c", "d")),
+			inGroup("G", missingKey("H")),
+		},
+	},
+	{
+		name:        "resolve",
+		explanation: withSource("a Handler should call Resolve on attribute values"),
+		f: func(l *slog.Logger) {
+			l.Info("msg", "k", &replace{"replaced"})
+		},
+		checks: []check{hasAttr("k", "replaced")},
+	},
+	{
+		name:        "resolve-groups",
+		explanation: withSource("a Handler should call Resolve on attribute values in groups"),
+		f: func(l *slog.Logger) {
+			l.Info("msg",
+				slog.Group("G",
+					slog.String("a", "v1"),
+					slog.Any("b", &replace{"v2"})))
+		},
+		checks: []check{
+			inGroup("G", hasAttr("a", "v1")),
+			inGroup("G", hasAttr("b", "v2")),
+		},
+	},
+	{
+		name:        "resolve-WithAttrs",
+		explanation: withSource("a Handler should call Resolve on attribute values from WithAttrs"),
+		f: func(l *slog.Logger) {
+			l = l.With("k", &replace{"replaced"})
+			l.Info("msg")
+		},
+		checks: []check{hasAttr("k", "replaced")},
+	},
+	{
+		name:        "resolve-WithAttrs-groups",
+		explanation: withSource("a Handler should call Resolve on attribute values in groups from WithAttrs"),
+		f: func(l *slog.Logger) {
+			l = l.With(slog.Group("G",
+				slog.String("a", "v1"),
+				slog.Any("b", &replace{"v2"})))
+			l.Info("msg")
+		},
+		checks: []check{
+			inGroup("G", hasAttr("a", "v1")),
+			inGroup("G", hasAttr("b", "v2")),
+		},
+	},
+	{
+		name:        "empty-PC",
+		explanation: withSource("a Handler should not output SourceKey if the PC is zero"),
+		f: func(l *slog.Logger) {
+			l.Info("message")
+		},
+		mod: func(r *slog.Record) { r.PC = 0 },
+		checks: []check{
+			missingKey(slog.SourceKey),
+		},
+	},
+}
+
 // TestHandler tests a [slog.Handler].
 // If TestHandler finds any misbehaviors, it returns an error for each,
-// combined into a single error with errors.Join.
+// combined into a single error with [errors.Join].
 //
 // TestHandler installs the given Handler in a [slog.Logger] and
 // makes several calls to the Logger's output methods.
+// The Handler should be enabled for levels Info and above.
 //
 // The results function is invoked after all such calls.
 // It should return a slice of map[string]any, one for each call to a Logger output method.
 // The keys and values of the map should correspond to the keys and values of the Handler's
 // output. Each group in the output should be represented as its own nested map[string]any.
-// The standard keys slog.TimeKey, slog.LevelKey and slog.MessageKey should be used.
+// The standard keys [slog.TimeKey], [slog.LevelKey] and [slog.MessageKey] should be used.
 //
 // If the Handler outputs JSON, then calling [encoding/json.Unmarshal] with a `map[string]any`
 // will create the right data structure.
@@ -49,176 +249,6 @@
 // If a Handler intentionally drops an attribute that is checked by a test,
 // then the results function should check for its absence and add it to the map it returns.
 func TestHandler(h slog.Handler, results func() []map[string]any) error {
-	cases := []testCase{
-		{
-			explanation: withSource("this test expects slog.TimeKey, slog.LevelKey and slog.MessageKey"),
-			f: func(l *slog.Logger) {
-				l.Info("message")
-			},
-			checks: []check{
-				hasKey(slog.TimeKey),
-				hasKey(slog.LevelKey),
-				hasAttr(slog.MessageKey, "message"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should output attributes passed to the logging function"),
-			f: func(l *slog.Logger) {
-				l.Info("message", "k", "v")
-			},
-			checks: []check{
-				hasAttr("k", "v"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should ignore an empty Attr"),
-			f: func(l *slog.Logger) {
-				l.Info("msg", "a", "b", "", nil, "c", "d")
-			},
-			checks: []check{
-				hasAttr("a", "b"),
-				missingKey(""),
-				hasAttr("c", "d"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should ignore a zero Record.Time"),
-			f: func(l *slog.Logger) {
-				l.Info("msg", "k", "v")
-			},
-			mod: func(r *slog.Record) { r.Time = time.Time{} },
-			checks: []check{
-				missingKey(slog.TimeKey),
-			},
-		},
-		{
-			explanation: withSource("a Handler should include the attributes from the WithAttrs method"),
-			f: func(l *slog.Logger) {
-				l.With("a", "b").Info("msg", "k", "v")
-			},
-			checks: []check{
-				hasAttr("a", "b"),
-				hasAttr("k", "v"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should handle Group attributes"),
-			f: func(l *slog.Logger) {
-				l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f")
-			},
-			checks: []check{
-				hasAttr("a", "b"),
-				inGroup("G", hasAttr("c", "d")),
-				hasAttr("e", "f"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should ignore an empty group"),
-			f: func(l *slog.Logger) {
-				l.Info("msg", "a", "b", slog.Group("G"), "e", "f")
-			},
-			checks: []check{
-				hasAttr("a", "b"),
-				missingKey("G"),
-				hasAttr("e", "f"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should inline the Attrs of a group with an empty key"),
-			f: func(l *slog.Logger) {
-				l.Info("msg", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f")
-
-			},
-			checks: []check{
-				hasAttr("a", "b"),
-				hasAttr("c", "d"),
-				hasAttr("e", "f"),
-			},
-		},
-		{
-			explanation: withSource("a Handler should handle the WithGroup method"),
-			f: func(l *slog.Logger) {
-				l.WithGroup("G").Info("msg", "a", "b")
-			},
-			checks: []check{
-				hasKey(slog.TimeKey),
-				hasKey(slog.LevelKey),
-				hasAttr(slog.MessageKey, "msg"),
-				missingKey("a"),
-				inGroup("G", hasAttr("a", "b")),
-			},
-		},
-		{
-			explanation: withSource("a Handler should handle multiple WithGroup and WithAttr calls"),
-			f: func(l *slog.Logger) {
-				l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg", "e", "f")
-			},
-			checks: []check{
-				hasKey(slog.TimeKey),
-				hasKey(slog.LevelKey),
-				hasAttr(slog.MessageKey, "msg"),
-				hasAttr("a", "b"),
-				inGroup("G", hasAttr("c", "d")),
-				inGroup("G", inGroup("H", hasAttr("e", "f"))),
-			},
-		},
-		{
-			explanation: withSource("a Handler should not output groups for an empty Record"),
-			f: func(l *slog.Logger) {
-				l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg")
-			},
-			checks: []check{
-				hasKey(slog.TimeKey),
-				hasKey(slog.LevelKey),
-				hasAttr(slog.MessageKey, "msg"),
-				hasAttr("a", "b"),
-				inGroup("G", hasAttr("c", "d")),
-				inGroup("G", missingKey("H")),
-			},
-		},
-		{
-			explanation: withSource("a Handler should call Resolve on attribute values"),
-			f: func(l *slog.Logger) {
-				l.Info("msg", "k", &replace{"replaced"})
-			},
-			checks: []check{hasAttr("k", "replaced")},
-		},
-		{
-			explanation: withSource("a Handler should call Resolve on attribute values in groups"),
-			f: func(l *slog.Logger) {
-				l.Info("msg",
-					slog.Group("G",
-						slog.String("a", "v1"),
-						slog.Any("b", &replace{"v2"})))
-			},
-			checks: []check{
-				inGroup("G", hasAttr("a", "v1")),
-				inGroup("G", hasAttr("b", "v2")),
-			},
-		},
-		{
-			explanation: withSource("a Handler should call Resolve on attribute values from WithAttrs"),
-			f: func(l *slog.Logger) {
-				l = l.With("k", &replace{"replaced"})
-				l.Info("msg")
-			},
-			checks: []check{hasAttr("k", "replaced")},
-		},
-		{
-			explanation: withSource("a Handler should call Resolve on attribute values in groups from WithAttrs"),
-			f: func(l *slog.Logger) {
-				l = l.With(slog.Group("G",
-					slog.String("a", "v1"),
-					slog.Any("b", &replace{"v2"})))
-				l.Info("msg")
-			},
-			checks: []check{
-				inGroup("G", hasAttr("a", "v1")),
-				inGroup("G", hasAttr("b", "v2")),
-			},
-		},
-	}
-
 	// Run the handler on the test cases.
 	for _, c := range cases {
 		ht := h
@@ -238,14 +268,37 @@
 	for i, got := range results() {
 		c := cases[i]
 		for _, check := range c.checks {
-			if p := check(got); p != "" {
-				errs = append(errs, fmt.Errorf("%s: %s", p, c.explanation))
+			if problem := check(got); problem != "" {
+				errs = append(errs, fmt.Errorf("%s: %s", problem, c.explanation))
 			}
 		}
 	}
 	return errors.Join(errs...)
 }
 
+// Run exercises a [slog.Handler] on the same test cases as [TestHandler], but
+// runs each case in a subtest. For each test case, it first calls newHandler to
+// get an instance of the handler under test, then runs the test case, then
+// calls result to get the result. If the test case fails, it calls t.Error.
+func Run(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any) {
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			h := newHandler(t)
+			if c.mod != nil {
+				h = &wrapper{h, c.mod}
+			}
+			l := slog.New(h)
+			c.f(l)
+			got := result(t)
+			for _, check := range c.checks {
+				if p := check(got); p != "" {
+					t.Errorf("%s: %s", p, c.explanation)
+				}
+			}
+		})
+	}
+}
+
 type check func(map[string]any) string
 
 func hasKey(key string) check {
diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go
index 55b14c3..1c23d05 100644
--- a/src/testing/sub_test.go
+++ b/src/testing/sub_test.go
@@ -767,22 +767,6 @@
 	})
 }
 
-func TestParallelSub(t *T) {
-	c := make(chan int)
-	block := make(chan int)
-	for i := 0; i < 10; i++ {
-		go func(i int) {
-			<-block
-			t.Run(fmt.Sprint(i), func(t *T) {})
-			c <- 1
-		}(i)
-	}
-	close(block)
-	for i := 0; i < 10; i++ {
-		<-c
-	}
-}
-
 type funcWriter struct {
 	write func([]byte) (int, error)
 }
@@ -910,18 +894,22 @@
 func TestConcurrentCleanup(t *T) {
 	cleanups := 0
 	t.Run("test", func(t *T) {
-		done := make(chan struct{})
+		var wg sync.WaitGroup
+		wg.Add(2)
 		for i := 0; i < 2; i++ {
 			i := i
 			go func() {
 				t.Cleanup(func() {
+					// Although the calls to Cleanup are concurrent, the functions passed
+					// to Cleanup should be called sequentially, in some nondeterministic
+					// order based on when the Cleanup calls happened to be scheduled.
+					// So these assignments to the cleanups variable should not race.
 					cleanups |= 1 << i
 				})
-				done <- struct{}{}
+				wg.Done()
 			}()
 		}
-		<-done
-		<-done
+		wg.Wait()
 	})
 	if cleanups != 1|2 {
 		t.Errorf("unexpected cleanup; got %d want 3", cleanups)
diff --git a/src/testing/testing.go b/src/testing/testing.go
index fcf7048..5c06aea 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -398,7 +398,7 @@
 // the "go test" command before running test functions, so Init is only needed
 // when calling functions such as Benchmark without using "go test".
 //
-// Init has no effect if it was already called.
+// Init is not safe to call concurrently. It has no effect if it was already called.
 func Init() {
 	if initRan {
 		return
@@ -611,7 +611,6 @@
 	bench          bool           // Whether the current test is a benchmark.
 	hasSub         atomic.Bool    // whether there are sub-benchmarks.
 	cleanupStarted atomic.Bool    // Registered cleanup callbacks have started to execute
-	raceErrors     int            // Number of races detected during test.
 	runner         string         // Function name of tRunner running the test.
 	isParallel     bool           // Whether the test is parallel.
 
@@ -625,6 +624,9 @@
 	signal   chan bool // To signal a test is done.
 	sub      []*T      // Queue of subtests to be run in parallel.
 
+	lastRaceErrors  atomic.Int64 // Max value of race.Errors seen during the test or its subtests.
+	raceErrorLogged atomic.Bool
+
 	tempDirMu  sync.Mutex
 	tempDir    string
 	tempDirErr error
@@ -771,9 +773,7 @@
 	if file != "" {
 		if *fullPath {
 			// If relative path, truncate file name at last file name separator.
-		} else if index := strings.LastIndex(file, "/"); index >= 0 {
-			file = file[index+1:]
-		} else if index = strings.LastIndex(file, "\\"); index >= 0 {
+		} else if index := strings.LastIndexAny(file, `/\`); index >= 0 {
 			file = file[index+1:]
 		}
 	} else {
@@ -957,9 +957,15 @@
 // Failed reports whether the function has failed.
 func (c *common) Failed() bool {
 	c.mu.RLock()
-	failed := c.failed
-	c.mu.RUnlock()
-	return failed || c.raceErrors+race.Errors() > 0
+	defer c.mu.RUnlock()
+
+	if !c.done && int64(race.Errors()) > c.lastRaceErrors.Load() {
+		c.mu.RUnlock()
+		c.checkRaces()
+		c.mu.RLock()
+	}
+
+	return c.failed
 }
 
 // FailNow marks the function as having failed and stops its execution
@@ -1098,7 +1104,7 @@
 }
 
 // SkipNow marks the test as having been skipped and stops its execution
-// by calling runtime.Goexit.
+// by calling [runtime.Goexit].
 // If a test fails (see Error, Errorf, Fail) and is then skipped,
 // it is still considered to have failed.
 // Execution will continue at the next test or benchmark. See also FailNow.
@@ -1175,7 +1181,7 @@
 }
 
 // TempDir returns a temporary directory for the test to use.
-// The directory is automatically removed by Cleanup when the test and
+// The directory is automatically removed when the test and
 // all its subtests complete.
 // Each subsequent call to t.TempDir returns a unique directory;
 // if the directory creation fails, TempDir terminates the test by calling Fatal.
@@ -1300,7 +1306,7 @@
 	}
 }
 
-// panicHanding is an argument to runCleanup.
+// panicHanding controls the panic handling used by runCleanup.
 type panicHandling int
 
 const (
@@ -1309,8 +1315,8 @@
 )
 
 // runCleanup is called at the end of the test.
-// If catchPanic is true, this will catch panics, and return the recovered
-// value if any.
+// If ph is recoverAndReturnPanic, it will catch panics, and return the
+// recovered value if any.
 func (c *common) runCleanup(ph panicHandling) (panicVal any) {
 	c.cleanupStarted.Store(true)
 	defer c.cleanupStarted.Store(false)
@@ -1348,6 +1354,69 @@
 	}
 }
 
+// resetRaces updates c.parent's count of data race errors (or the global count,
+// if c has no parent), and updates c.lastRaceErrors to match.
+//
+// Any races that occurred prior to this call to resetRaces will
+// not be attributed to c.
+func (c *common) resetRaces() {
+	if c.parent == nil {
+		c.lastRaceErrors.Store(int64(race.Errors()))
+	} else {
+		c.lastRaceErrors.Store(c.parent.checkRaces())
+	}
+}
+
+// checkRaces checks whether the global count of data race errors has increased
+// since c's count was last reset.
+//
+// If so, it marks c as having failed due to those races (logging an error for
+// the first such race), and updates the race counts for the parents of c so
+// that if they are currently suspended (such as in a call to T.Run) they will
+// not log separate errors for the race(s).
+//
+// Note that multiple tests may be marked as failed due to the same race if they
+// are executing in parallel.
+func (c *common) checkRaces() (raceErrors int64) {
+	raceErrors = int64(race.Errors())
+	for {
+		last := c.lastRaceErrors.Load()
+		if raceErrors <= last {
+			// All races have already been reported.
+			return raceErrors
+		}
+		if c.lastRaceErrors.CompareAndSwap(last, raceErrors) {
+			break
+		}
+	}
+
+	if c.raceErrorLogged.CompareAndSwap(false, true) {
+		// This is the first race we've encountered for this test.
+		// Mark the test as failed, and log the reason why only once.
+		// (Note that the race detector itself will still write a goroutine
+		// dump for any further races it detects.)
+		c.Errorf("race detected during execution of test")
+	}
+
+	// Update the parent(s) of this test so that they don't re-report the race.
+	parent := c.parent
+	for parent != nil {
+		for {
+			last := parent.lastRaceErrors.Load()
+			if raceErrors <= last {
+				// This race was already reported by another (likely parallel) subtest.
+				return raceErrors
+			}
+			if parent.lastRaceErrors.CompareAndSwap(last, raceErrors) {
+				break
+			}
+		}
+		parent = parent.parent
+	}
+
+	return raceErrors
+}
+
 // callerName gives the function name (qualified with a package path)
 // for the caller after skip frames (where 0 means the current function).
 func callerName(skip int) string {
@@ -1392,7 +1461,18 @@
 
 	// Add to the list of tests to be released by the parent.
 	t.parent.sub = append(t.parent.sub, t)
-	t.raceErrors += race.Errors()
+
+	// Report any races during execution of this test up to this point.
+	//
+	// We will assume that any races that occur between here and the point where
+	// we unblock are not caused by this subtest. That assumption usually holds,
+	// although it can be wrong if the test spawns a goroutine that races in the
+	// background while the rest of the test is blocked on the call to Parallel.
+	// If that happens, we will misattribute the background race to some other
+	// test, or to no test at all — but that false-negative is so unlikely that it
+	// is not worth adding race-report noise for the common case where the test is
+	// completely suspended during the call to Parallel.
+	t.checkRaces()
 
 	if t.chatty != nil {
 		t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name)
@@ -1407,9 +1487,16 @@
 		t.chatty.Updatef(t.name, "=== CONT  %s\n", t.name)
 	}
 	running.Store(t.name, time.Now())
-
 	t.start = time.Now()
-	t.raceErrors += -race.Errors()
+
+	// Reset the local race counter to ignore any races that happened while this
+	// goroutine was blocked, such as in the parent test or in other parallel
+	// subtests.
+	//
+	// (Note that we don't call parent.checkRaces here:
+	// if other parallel subtests have already introduced races, we want to
+	// let them report those races instead of attributing them to the parent.)
+	t.lastRaceErrors.Store(int64(race.Errors()))
 }
 
 // Setenv calls os.Setenv(key, value) and uses Cleanup to
@@ -1457,14 +1544,13 @@
 	// a call to runtime.Goexit, record the duration and send
 	// a signal saying that the test is done.
 	defer func() {
+		t.checkRaces()
+
+		// TODO(#61034): This is the wrong place for this check.
 		if t.Failed() {
 			numFailed.Add(1)
 		}
 
-		if t.raceErrors+race.Errors() > 0 {
-			t.Errorf("race detected during execution of test")
-		}
-
 		// Check if the test panicked or Goexited inappropriately.
 		//
 		// If this happens in a normal test, print output but continue panicking.
@@ -1552,20 +1638,28 @@
 
 		if len(t.sub) > 0 {
 			// Run parallel subtests.
-			// Decrease the running count for this test.
+
+			// Decrease the running count for this test and mark it as no longer running.
 			t.context.release()
+			running.Delete(t.name)
+
 			// Release the parallel subtests.
 			close(t.barrier)
 			// Wait for subtests to complete.
 			for _, sub := range t.sub {
 				<-sub.signal
 			}
+
+			// Run any cleanup callbacks, marking the test as running
+			// in case the cleanup hangs.
 			cleanupStart := time.Now()
+			running.Store(t.name, cleanupStart)
 			err := t.runCleanup(recoverAndReturnPanic)
 			t.duration += time.Since(cleanupStart)
 			if err != nil {
 				doPanic(err)
 			}
+			t.checkRaces()
 			if !t.isParallel {
 				// Reacquire the count for sequential tests. See comment in Run.
 				t.context.waitParallel()
@@ -1591,7 +1685,7 @@
 	}()
 
 	t.start = time.Now()
-	t.raceErrors = -race.Errors()
+	t.resetRaces()
 	fn(t)
 
 	// code beyond here will not be executed when FailNow is invoked
@@ -1646,11 +1740,19 @@
 	// without being preempted, even when their parent is a parallel test. This
 	// may especially reduce surprises if *parallel == 1.
 	go tRunner(t, f)
+
+	// The parent goroutine will block until the subtest either finishes or calls
+	// Parallel, but in general we don't know whether the parent goroutine is the
+	// top-level test function or some other goroutine it has spawned.
+	// To avoid confusing false-negatives, we leave the parent in the running map
+	// even though in the typical case it is blocked.
+
 	if !<-t.signal {
 		// At this point, it is likely that FailNow was called on one of the
 		// parent tests by one of the subtests. Continue aborting up the chain.
 		runtime.Goexit()
 	}
+
 	if t.chatty != nil && t.chatty.json {
 		t.chatty.Updatef(t.parent.name, "=== NAME  %s\n", t.parent.name)
 	}
@@ -1938,7 +2040,12 @@
 				testOk = false
 			}
 		}
-		if !testOk || !exampleOk || !fuzzTargetsOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks) || race.Errors() > 0 {
+		anyFailed := !testOk || !exampleOk || !fuzzTargetsOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks)
+		if !anyFailed && race.Errors() > 0 {
+			fmt.Print(chatty.prefix(), "testing: race detected outside of test execution\n")
+			anyFailed = true
+		}
+		if anyFailed {
 			fmt.Print(chatty.prefix(), "FAIL\n")
 			m.exitCode = 1
 			return
diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go
index 5e92687..d3822df 100644
--- a/src/testing/testing_test.go
+++ b/src/testing/testing_test.go
@@ -6,10 +6,18 @@
 
 import (
 	"bytes"
+	"fmt"
+	"internal/race"
 	"internal/testenv"
 	"os"
+	"os/exec"
 	"path/filepath"
+	"regexp"
+	"slices"
+	"strings"
+	"sync"
 	"testing"
+	"time"
 )
 
 // This is exactly what a test would do without a TestMain.
@@ -17,7 +25,22 @@
 // standard library with a TestMain, so that code is executed.
 
 func TestMain(m *testing.M) {
-	os.Exit(m.Run())
+	if os.Getenv("GO_WANT_RACE_BEFORE_TESTS") == "1" {
+		doRace()
+	}
+
+	m.Run()
+
+	// Note: m.Run currently prints the final "PASS" line, so if any race is
+	// reported here (after m.Run but before the process exits), it will print
+	// "PASS", then print the stack traces for the race, then exit with nonzero
+	// status.
+	//
+	// This is a somewhat fundamental race: because the race detector hooks into
+	// the runtime at a very low level, no matter where we put the printing it
+	// would be possible to report a race that occurs afterward. However, we could
+	// theoretically move the printing after TestMain, which would at least do a
+	// better job of diagnosing races in cleanup functions within TestMain itself.
 }
 
 func TestTempDirInCleanup(t *testing.T) {
@@ -293,3 +316,499 @@
 		t.Errorf("in non-test testing.Test() returned %q, want %q", s, "false")
 	}
 }
+
+// runTest runs a helper test with -test.v, ignoring its exit status.
+// runTest both logs and returns the test output.
+func runTest(t *testing.T, test string) []byte {
+	t.Helper()
+
+	testenv.MustHaveExec(t)
+
+	exe, err := os.Executable()
+	if err != nil {
+		t.Skipf("can't find test executable: %v", err)
+	}
+
+	cmd := testenv.Command(t, exe, "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x")
+	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+	out, err := cmd.CombinedOutput()
+	t.Logf("%v: %v\n%s", cmd, err, out)
+
+	return out
+}
+
+// doRace provokes a data race that generates a race detector report if run
+// under the race detector and is otherwise benign.
+func doRace() {
+	var x int
+	c1 := make(chan bool)
+	go func() {
+		x = 1 // racy write
+		c1 <- true
+	}()
+	_ = x // racy read
+	<-c1
+}
+
+func TestRaceReports(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		// Generate a race detector report in a sub test.
+		t.Run("Sub", func(t *testing.T) {
+			doRace()
+		})
+		return
+	}
+
+	out := runTest(t, "TestRaceReports")
+
+	// We should see at most one race detector report.
+	c := bytes.Count(out, []byte("race detected"))
+	want := 0
+	if race.Enabled {
+		want = 1
+	}
+	if c != want {
+		t.Errorf("got %d race reports, want %d", c, want)
+	}
+}
+
+// Issue #60083. This used to fail on the race builder.
+func TestRaceName(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		doRace()
+		return
+	}
+
+	out := runTest(t, "TestRaceName")
+
+	if regexp.MustCompile(`=== NAME\s*$`).Match(out) {
+		t.Errorf("incorrectly reported test with no name")
+	}
+}
+
+func TestRaceSubReports(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		t.Parallel()
+		c1 := make(chan bool, 1)
+		t.Run("sub", func(t *testing.T) {
+			t.Run("subsub1", func(t *testing.T) {
+				t.Parallel()
+				doRace()
+				c1 <- true
+			})
+			t.Run("subsub2", func(t *testing.T) {
+				t.Parallel()
+				doRace()
+				<-c1
+			})
+		})
+		doRace()
+		return
+	}
+
+	out := runTest(t, "TestRaceSubReports")
+
+	// There should be three race reports: one for each subtest, and one for the
+	// race after the subtests complete. Note that because the subtests run in
+	// parallel, the race stacks may both be printed in with one or the other
+	// test's logs.
+	cReport := bytes.Count(out, []byte("race detected during execution of test"))
+	wantReport := 0
+	if race.Enabled {
+		wantReport = 3
+	}
+	if cReport != wantReport {
+		t.Errorf("got %d race reports, want %d", cReport, wantReport)
+	}
+
+	// Regardless of when the stacks are printed, we expect each subtest to be
+	// marked as failed, and that failure should propagate up to the parents.
+	cFail := bytes.Count(out, []byte("--- FAIL:"))
+	wantFail := 0
+	if race.Enabled {
+		wantFail = 4
+	}
+	if cFail != wantFail {
+		t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport)
+	}
+}
+
+func TestRaceInCleanup(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		t.Cleanup(doRace)
+		t.Parallel()
+		t.Run("sub", func(t *testing.T) {
+			t.Parallel()
+			// No race should be reported for sub.
+		})
+		return
+	}
+
+	out := runTest(t, "TestRaceInCleanup")
+
+	// There should be one race report, for the parent test only.
+	cReport := bytes.Count(out, []byte("race detected during execution of test"))
+	wantReport := 0
+	if race.Enabled {
+		wantReport = 1
+	}
+	if cReport != wantReport {
+		t.Errorf("got %d race reports, want %d", cReport, wantReport)
+	}
+
+	// Only the parent test should be marked as failed.
+	// (The subtest does not race, and should pass.)
+	cFail := bytes.Count(out, []byte("--- FAIL:"))
+	wantFail := 0
+	if race.Enabled {
+		wantFail = 1
+	}
+	if cFail != wantFail {
+		t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport)
+	}
+}
+
+func TestDeepSubtestRace(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		t.Run("sub", func(t *testing.T) {
+			t.Run("subsub", func(t *testing.T) {
+				t.Run("subsubsub", func(t *testing.T) {
+					doRace()
+				})
+			})
+			doRace()
+		})
+		return
+	}
+
+	out := runTest(t, "TestDeepSubtestRace")
+
+	c := bytes.Count(out, []byte("race detected during execution of test"))
+	want := 0
+	// There should be two race reports.
+	if race.Enabled {
+		want = 2
+	}
+	if c != want {
+		t.Errorf("got %d race reports, want %d", c, want)
+	}
+}
+
+func TestRaceDuringParallelFailsAllSubtests(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		var ready sync.WaitGroup
+		ready.Add(2)
+		done := make(chan struct{})
+		go func() {
+			ready.Wait()
+			doRace() // This race happens while both subtests are running.
+			close(done)
+		}()
+
+		t.Run("sub", func(t *testing.T) {
+			t.Run("subsub1", func(t *testing.T) {
+				t.Parallel()
+				ready.Done()
+				<-done
+			})
+			t.Run("subsub2", func(t *testing.T) {
+				t.Parallel()
+				ready.Done()
+				<-done
+			})
+		})
+
+		return
+	}
+
+	out := runTest(t, "TestRaceDuringParallelFailsAllSubtests")
+
+	c := bytes.Count(out, []byte("race detected during execution of test"))
+	want := 0
+	// Each subtest should report the race independently.
+	if race.Enabled {
+		want = 2
+	}
+	if c != want {
+		t.Errorf("got %d race reports, want %d", c, want)
+	}
+}
+
+func TestRaceBeforeParallel(t *testing.T) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		t.Run("sub", func(t *testing.T) {
+			doRace()
+			t.Parallel()
+		})
+		return
+	}
+
+	out := runTest(t, "TestRaceBeforeParallel")
+
+	c := bytes.Count(out, []byte("race detected during execution of test"))
+	want := 0
+	// We should see one race detector report.
+	if race.Enabled {
+		want = 1
+	}
+	if c != want {
+		t.Errorf("got %d race reports, want %d", c, want)
+	}
+}
+
+func TestRaceBeforeTests(t *testing.T) {
+	testenv.MustHaveExec(t)
+
+	exe, err := os.Executable()
+	if err != nil {
+		t.Skipf("can't find test executable: %v", err)
+	}
+
+	cmd := testenv.Command(t, exe, "-test.run=^$")
+	cmd = testenv.CleanCmdEnv(cmd)
+	cmd.Env = append(cmd.Env, "GO_WANT_RACE_BEFORE_TESTS=1")
+	out, _ := cmd.CombinedOutput()
+	t.Logf("%s", out)
+
+	c := bytes.Count(out, []byte("race detected outside of test execution"))
+
+	want := 0
+	if race.Enabled {
+		want = 1
+	}
+	if c != want {
+		t.Errorf("got %d race reports; want %d", c, want)
+	}
+}
+
+func TestBenchmarkRace(t *testing.T) {
+	out := runTest(t, "BenchmarkRacy")
+	c := bytes.Count(out, []byte("race detected during execution of test"))
+
+	want := 0
+	// We should see one race detector report.
+	if race.Enabled {
+		want = 1
+	}
+	if c != want {
+		t.Errorf("got %d race reports; want %d", c, want)
+	}
+}
+
+func BenchmarkRacy(b *testing.B) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+		b.Skipf("skipping intentionally-racy benchmark")
+	}
+	for i := 0; i < b.N; i++ {
+		doRace()
+	}
+}
+
+func TestBenchmarkSubRace(t *testing.T) {
+	out := runTest(t, "BenchmarkSubRacy")
+	c := bytes.Count(out, []byte("race detected during execution of test"))
+
+	want := 0
+	// We should see two race detector reports:
+	// one in the sub-bencmark, and one in the parent afterward.
+	if race.Enabled {
+		want = 2
+	}
+	if c != want {
+		t.Errorf("got %d race reports; want %d", c, want)
+	}
+}
+
+func BenchmarkSubRacy(b *testing.B) {
+	if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+		b.Skipf("skipping intentionally-racy benchmark")
+	}
+
+	b.Run("non-racy", func(b *testing.B) {
+		tot := 0
+		for i := 0; i < b.N; i++ {
+			tot++
+		}
+		_ = tot
+	})
+
+	b.Run("racy", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			doRace()
+		}
+	})
+
+	doRace() // should be reported separately
+}
+
+func TestRunningTests(t *testing.T) {
+	t.Parallel()
+
+	// Regression test for https://go.dev/issue/64404:
+	// on timeout, the "running tests" message should not include
+	// tests that are waiting on parked subtests.
+
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		for i := 0; i < 2; i++ {
+			t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) {
+				t.Parallel()
+				for j := 0; j < 2; j++ {
+					t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) {
+						t.Parallel()
+						for {
+							time.Sleep(1 * time.Millisecond)
+						}
+					})
+				}
+			})
+		}
+	}
+
+	timeout := 10 * time.Millisecond
+	for {
+		cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String(), "-test.parallel=4")
+		cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
+		out, err := cmd.CombinedOutput()
+		t.Logf("%v:\n%s", cmd, out)
+		if _, ok := err.(*exec.ExitError); !ok {
+			t.Fatal(err)
+		}
+
+		// Because the outer subtests (and TestRunningTests itself) are marked as
+		// parallel, their test functions return (and are no longer “running”)
+		// before the inner subtests are released to run and hang.
+		// Only those inner subtests should be reported as running.
+		want := []string{
+			"TestRunningTests/outer0/inner0",
+			"TestRunningTests/outer0/inner1",
+			"TestRunningTests/outer1/inner0",
+			"TestRunningTests/outer1/inner1",
+		}
+
+		got, ok := parseRunningTests(out)
+		if slices.Equal(got, want) {
+			break
+		}
+		if ok {
+			t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
+		} else {
+			t.Logf("no running tests found")
+		}
+		t.Logf("retrying with longer timeout")
+		timeout *= 2
+	}
+}
+
+func TestRunningTestsInCleanup(t *testing.T) {
+	t.Parallel()
+
+	if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+		for i := 0; i < 2; i++ {
+			t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) {
+				// Not parallel: we expect to see only one outer test,
+				// stuck in cleanup after its subtest finishes.
+
+				t.Cleanup(func() {
+					for {
+						time.Sleep(1 * time.Millisecond)
+					}
+				})
+
+				for j := 0; j < 2; j++ {
+					t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) {
+						t.Parallel()
+					})
+				}
+			})
+		}
+	}
+
+	timeout := 10 * time.Millisecond
+	for {
+		cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String())
+		cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
+		out, err := cmd.CombinedOutput()
+		t.Logf("%v:\n%s", cmd, out)
+		if _, ok := err.(*exec.ExitError); !ok {
+			t.Fatal(err)
+		}
+
+		// TestRunningTestsInCleanup is blocked in the call to t.Run,
+		// but its test function has not yet returned so it should still
+		// be considered to be running.
+		// outer1 hasn't even started yet, so only outer0 and the top-level
+		// test function should be reported as running.
+		want := []string{
+			"TestRunningTestsInCleanup",
+			"TestRunningTestsInCleanup/outer0",
+		}
+
+		got, ok := parseRunningTests(out)
+		if slices.Equal(got, want) {
+			break
+		}
+		if ok {
+			t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
+		} else {
+			t.Logf("no running tests found")
+		}
+		t.Logf("retrying with longer timeout")
+		timeout *= 2
+	}
+}
+
+func parseRunningTests(out []byte) (runningTests []string, ok bool) {
+	inRunningTests := false
+	for _, line := range strings.Split(string(out), "\n") {
+		if inRunningTests {
+			if trimmed, ok := strings.CutPrefix(line, "\t"); ok {
+				if name, _, ok := strings.Cut(trimmed, " "); ok {
+					runningTests = append(runningTests, name)
+					continue
+				}
+			}
+
+			// This line is not the name of a running test.
+			return runningTests, true
+		}
+
+		if strings.TrimSpace(line) == "running tests:" {
+			inRunningTests = true
+		}
+	}
+
+	return nil, false
+}
+
+func TestConcurrentRun(t *testing.T) {
+	// Regression test for https://go.dev/issue/64402:
+	// this deadlocked after https://go.dev/cl/506755.
+
+	block := make(chan struct{})
+	var ready, done sync.WaitGroup
+	for i := 0; i < 2; i++ {
+		ready.Add(1)
+		done.Add(1)
+		go t.Run("", func(*testing.T) {
+			ready.Done()
+			<-block
+			done.Done()
+		})
+	}
+	ready.Wait()
+	close(block)
+	done.Wait()
+}
+
+func TestParentRun(t1 *testing.T) {
+	// Regression test for https://go.dev/issue/64402:
+	// this deadlocked after https://go.dev/cl/506755.
+
+	t1.Run("outer", func(t2 *testing.T) {
+		t2.Log("Hello outer!")
+		t1.Run("not_inner", func(t3 *testing.T) { // Note: this is t1.Run, not t2.Run.
+			t3.Log("Hello inner!")
+		})
+	})
+}
diff --git a/src/text/template/doc.go b/src/text/template/doc.go
index 4c01b05..032784b 100644
--- a/src/text/template/doc.go
+++ b/src/text/template/doc.go
@@ -438,13 +438,13 @@
 By construction, a template may reside in only one association. If it's
 necessary to have a template addressable from multiple associations, the
 template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
+values, or must be copied with [Template.Clone] or [Template.AddParseTree].
 
 Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
+see [ParseFiles], [ParseGlob], [Template.ParseFiles] and [Template.ParseGlob]
+for simple ways to parse related templates stored in files.
 
-A template may be executed directly or through ExecuteTemplate, which executes
+A template may be executed directly or through [Template.ExecuteTemplate], which executes
 an associated template identified by name. To invoke our example above, we
 might write,
 
diff --git a/src/text/template/exec.go b/src/text/template/exec.go
index fd7db65..2b778ff 100644
--- a/src/text/template/exec.go
+++ b/src/text/template/exec.go
@@ -94,7 +94,7 @@
 
 var missingVal = reflect.ValueOf(missingValType{})
 
-var missingValReflectType = reflect.TypeOf(missingValType{})
+var missingValReflectType = reflect.TypeFor[missingValType]()
 
 func isMissing(v reflect.Value) bool {
 	return v.IsValid() && v.Type() == missingValReflectType
@@ -708,9 +708,9 @@
 }
 
 var (
-	errorType        = reflect.TypeOf((*error)(nil)).Elem()
-	fmtStringerType  = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-	reflectValueType = reflect.TypeOf((*reflect.Value)(nil)).Elem()
+	errorType        = reflect.TypeFor[error]()
+	fmtStringerType  = reflect.TypeFor[fmt.Stringer]()
+	reflectValueType = reflect.TypeFor[reflect.Value]()
 )
 
 // evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
index 6eb0d41..e607fd3 100644
--- a/src/text/template/exec_test.go
+++ b/src/text/template/exec_test.go
@@ -265,8 +265,8 @@
 // of the max int boundary.
 // We do it this way so the test doesn't depend on ints being 32 bits.
 var (
-	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
-	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeFor[int]().Bits()-1)-1))
+	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeFor[int]().Bits()-1)))
 )
 
 var execTests = []execTest{
diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
index b5a8c9e..a949f89 100644
--- a/src/text/template/funcs.go
+++ b/src/text/template/funcs.go
@@ -478,7 +478,7 @@
 			case k1 == uintKind && k2 == intKind:
 				truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
 			default:
-				if arg1 != zero && arg != zero {
+				if arg1.IsValid() && arg.IsValid() {
 					return false, errBadComparison
 				}
 			}
diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
index 4726822..c366888 100644
--- a/src/text/template/parse/node.go
+++ b/src/text/template/parse/node.go
@@ -284,7 +284,6 @@
 
 func (a *ActionNode) Copy() Node {
 	return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
-
 }
 
 // CommandNode holds a command (a pipeline inside an evaluating action).
diff --git a/src/time/example_test.go b/src/time/example_test.go
index 059c631..cfdee8f 100644
--- a/src/time/example_test.go
+++ b/src/time/example_test.go
@@ -591,19 +591,33 @@
 }
 
 func ExampleTime_AddDate() {
-	start := time.Date(2009, 1, 1, 0, 0, 0, 0, time.UTC)
+	start := time.Date(2023, 03, 25, 12, 0, 0, 0, time.UTC)
 	oneDayLater := start.AddDate(0, 0, 1)
+	dayDuration := oneDayLater.Sub(start)
 	oneMonthLater := start.AddDate(0, 1, 0)
 	oneYearLater := start.AddDate(1, 0, 0)
 
+	zurich, err := time.LoadLocation("Europe/Zurich")
+	if err != nil {
+		panic(err)
+	}
+	// This was the day before a daylight saving time transition in Zürich.
+	startZurich := time.Date(2023, 03, 25, 12, 0, 0, 0, zurich)
+	oneDayLaterZurich := startZurich.AddDate(0, 0, 1)
+	dayDurationZurich := oneDayLaterZurich.Sub(startZurich)
+
 	fmt.Printf("oneDayLater: start.AddDate(0, 0, 1) = %v\n", oneDayLater)
 	fmt.Printf("oneMonthLater: start.AddDate(0, 1, 0) = %v\n", oneMonthLater)
 	fmt.Printf("oneYearLater: start.AddDate(1, 0, 0) = %v\n", oneYearLater)
+	fmt.Printf("oneDayLaterZurich: startZurich.AddDate(0, 0, 1) = %v\n", oneDayLaterZurich)
+	fmt.Printf("Day duration in UTC: %v | Day duration in Zürich: %v\n", dayDuration, dayDurationZurich)
 
 	// Output:
-	// oneDayLater: start.AddDate(0, 0, 1) = 2009-01-02 00:00:00 +0000 UTC
-	// oneMonthLater: start.AddDate(0, 1, 0) = 2009-02-01 00:00:00 +0000 UTC
-	// oneYearLater: start.AddDate(1, 0, 0) = 2010-01-01 00:00:00 +0000 UTC
+	// oneDayLater: start.AddDate(0, 0, 1) = 2023-03-26 12:00:00 +0000 UTC
+	// oneMonthLater: start.AddDate(0, 1, 0) = 2023-04-25 12:00:00 +0000 UTC
+	// oneYearLater: start.AddDate(1, 0, 0) = 2024-03-25 12:00:00 +0000 UTC
+	// oneDayLaterZurich: startZurich.AddDate(0, 0, 1) = 2023-03-26 12:00:00 +0200 CEST
+	// Day duration in UTC: 24h0m0s | Day duration in Zürich: 23h0m0s
 }
 
 func ExampleTime_After() {
diff --git a/src/time/sleep.go b/src/time/sleep.go
index cdab478..0aec4ca 100644
--- a/src/time/sleep.go
+++ b/src/time/sleep.go
@@ -160,6 +160,7 @@
 // AfterFunc waits for the duration to elapse and then calls f
 // in its own goroutine. It returns a Timer that can
 // be used to cancel the call using its Stop method.
+// The returned Timer's C field is not used and will be nil.
 func AfterFunc(d Duration, f func()) *Timer {
 	t := &Timer{
 		r: runtimeTimer{
diff --git a/src/time/time.go b/src/time/time.go
index e8aac59..9d4c6e9 100644
--- a/src/time/time.go
+++ b/src/time/time.go
@@ -76,6 +76,14 @@
 // For debugging, the result of t.String does include the monotonic
 // clock reading if present. If t != u because of different monotonic clock readings,
 // that difference will be visible when printing t.String() and u.String().
+//
+// # Timer Resolution
+//
+// Timer resolution varies depending on the Go runtime, the operating system
+// and the underlying hardware.
+// On Unix, the resolution is approximately 1ms.
+// On Windows, the default resolution is approximately 16ms, but
+// a higher resolution may be requested using [golang.org/x/sys/windows.TimeBeginPeriod].
 package time
 
 import (
@@ -101,12 +109,10 @@
 // As this time is unlikely to come up in practice, the IsZero method gives
 // a simple way of detecting a time that has not been initialized explicitly.
 //
-// Each Time has associated with it a Location, consulted when computing the
-// presentation form of the time, such as in the Format, Hour, and Year methods.
-// The methods Local, UTC, and In return a Time with a specific location.
-// Changing the location in this way changes only the presentation; it does not
-// change the instant in time being denoted and therefore does not affect the
-// computations described in earlier paragraphs.
+// Each time has an associated Location. The methods Local, UTC, and In return a
+// Time with a specific Location. Changing the Location of a Time value with
+// these methods does not change the actual instant it represents, only the time
+// zone in which to interpret it.
 //
 // Representations of a Time value saved by the GobEncode, MarshalBinary,
 // MarshalJSON, and MarshalText methods store the Time.Location's offset, but not
@@ -642,8 +648,17 @@
 // second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
 // that the leading digit is non-zero. The zero duration formats as 0s.
 func (d Duration) String() string {
+	// This is inlinable to take advantage of "function outlining".
+	// Thus, the caller can decide whether a string must be heap allocated.
+	var arr [32]byte
+	n := d.format(&arr)
+	return string(arr[n:])
+}
+
+// format formats the representation of d into the end of buf and
+// returns the offset of the first character.
+func (d Duration) format(buf *[32]byte) int {
 	// Largest time is 2540400h10m10.000000000s
-	var buf [32]byte
 	w := len(buf)
 
 	u := uint64(d)
@@ -661,7 +676,8 @@
 		w--
 		switch {
 		case u == 0:
-			return "0s"
+			buf[w] = '0'
+			return w
 		case u < uint64(Microsecond):
 			// print nanoseconds
 			prec = 0
@@ -711,7 +727,7 @@
 		buf[w] = '-'
 	}
 
-	return string(buf[w:])
+	return w
 }
 
 // fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
@@ -883,16 +899,7 @@
 // To compute t-d for a duration d, use t.Add(-d).
 func (t Time) Sub(u Time) Duration {
 	if t.wall&u.wall&hasMonotonic != 0 {
-		te := t.ext
-		ue := u.ext
-		d := Duration(te - ue)
-		if d < 0 && te > ue {
-			return maxDuration // t - u is positive out of range
-		}
-		if d > 0 && te < ue {
-			return minDuration // t - u is negative out of range
-		}
-		return d
+		return subMono(t.ext, u.ext)
 	}
 	d := Duration(t.sec()-u.sec())*Second + Duration(t.nsec()-u.nsec())
 	// Check for overflow or underflow.
@@ -906,30 +913,35 @@
 	}
 }
 
+func subMono(t, u int64) Duration {
+	d := Duration(t - u)
+	if d < 0 && t > u {
+		return maxDuration // t - u is positive out of range
+	}
+	if d > 0 && t < u {
+		return minDuration // t - u is negative out of range
+	}
+	return d
+}
+
 // Since returns the time elapsed since t.
 // It is shorthand for time.Now().Sub(t).
 func Since(t Time) Duration {
-	var now Time
 	if t.wall&hasMonotonic != 0 {
 		// Common case optimization: if t has monotonic time, then Sub will use only it.
-		now = Time{hasMonotonic, runtimeNano() - startNano, nil}
-	} else {
-		now = Now()
+		return subMono(runtimeNano()-startNano, t.ext)
 	}
-	return now.Sub(t)
+	return Now().Sub(t)
 }
 
 // Until returns the duration until t.
 // It is shorthand for t.Sub(time.Now()).
 func Until(t Time) Duration {
-	var now Time
 	if t.wall&hasMonotonic != 0 {
 		// Common case optimization: if t has monotonic time, then Sub will use only it.
-		now = Time{hasMonotonic, runtimeNano() - startNano, nil}
-	} else {
-		now = Now()
+		return subMono(t.ext, runtimeNano()-startNano)
 	}
-	return t.Sub(now)
+	return t.Sub(Now())
 }
 
 // AddDate returns the time corresponding to adding the
@@ -937,6 +949,15 @@
 // For example, AddDate(-1, 2, 3) applied to January 1, 2011
 // returns March 4, 2010.
 //
+// Note that dates are fundamentally coupled to timezones, and calendrical
+// periods like days don't have fixed durations. AddDate uses the Location of
+// the Time value to determine these durations. That means that the same
+// AddDate arguments can produce a different shift in absolute time depending on
+// the base Time value and its Location. For example, AddDate(0, 0, 1) applied
+// to 12:00 on March 27 always returns 12:00 on March 28. At some locations and
+// in some years this is a 24 hour shift. In others it's a 23 hour shift due to
+// daylight savings time transitions.
+//
 // AddDate normalizes its result in the same way that Date does,
 // so, for example, adding one month to October 31 yields
 // December 1, the normalized form for November 31.
diff --git a/src/time/time_test.go b/src/time/time_test.go
index 3b30f80..86335e3 100644
--- a/src/time/time_test.go
+++ b/src/time/time_test.go
@@ -283,7 +283,7 @@
 	testOne := func(ti, tns, di int64) bool {
 		t.Helper()
 
-		t0 := Unix(ti, int64(tns)).UTC()
+		t0 := Unix(ti, tns).UTC()
 		d := Duration(di)
 		if d < 0 {
 			d = -d
@@ -321,7 +321,7 @@
 		// The commented out code would round half to even instead of up,
 		// but that makes it time-zone dependent, which is a bit strange.
 		if r > int64(d)/2 || r+r == int64(d) /*&& bq.Bit(0) == 1*/ {
-			t1 = t1.Add(Duration(d))
+			t1 = t1.Add(d)
 		}
 
 		// Check that time.Round works.
@@ -1106,14 +1106,14 @@
 	{Date(2009, 11, 23, 0, 0, 0, 0, UTC), Date(2009, 11, 24, 0, 0, 0, 0, UTC), -24 * Hour},
 	{Date(2009, 11, 24, 0, 0, 0, 0, UTC), Date(2009, 11, 23, 0, 0, 0, 0, UTC), 24 * Hour},
 	{Date(-2009, 11, 24, 0, 0, 0, 0, UTC), Date(-2009, 11, 23, 0, 0, 0, 0, UTC), 24 * Hour},
-	{Time{}, Date(2109, 11, 23, 0, 0, 0, 0, UTC), Duration(minDuration)},
-	{Date(2109, 11, 23, 0, 0, 0, 0, UTC), Time{}, Duration(maxDuration)},
-	{Time{}, Date(-2109, 11, 23, 0, 0, 0, 0, UTC), Duration(maxDuration)},
-	{Date(-2109, 11, 23, 0, 0, 0, 0, UTC), Time{}, Duration(minDuration)},
+	{Time{}, Date(2109, 11, 23, 0, 0, 0, 0, UTC), minDuration},
+	{Date(2109, 11, 23, 0, 0, 0, 0, UTC), Time{}, maxDuration},
+	{Time{}, Date(-2109, 11, 23, 0, 0, 0, 0, UTC), maxDuration},
+	{Date(-2109, 11, 23, 0, 0, 0, 0, UTC), Time{}, minDuration},
 	{Date(2290, 1, 1, 0, 0, 0, 0, UTC), Date(2000, 1, 1, 0, 0, 0, 0, UTC), 290*365*24*Hour + 71*24*Hour},
-	{Date(2300, 1, 1, 0, 0, 0, 0, UTC), Date(2000, 1, 1, 0, 0, 0, 0, UTC), Duration(maxDuration)},
+	{Date(2300, 1, 1, 0, 0, 0, 0, UTC), Date(2000, 1, 1, 0, 0, 0, 0, UTC), maxDuration},
 	{Date(2000, 1, 1, 0, 0, 0, 0, UTC), Date(2290, 1, 1, 0, 0, 0, 0, UTC), -290*365*24*Hour - 71*24*Hour},
-	{Date(2000, 1, 1, 0, 0, 0, 0, UTC), Date(2300, 1, 1, 0, 0, 0, 0, UTC), Duration(minDuration)},
+	{Date(2000, 1, 1, 0, 0, 0, 0, UTC), Date(2300, 1, 1, 0, 0, 0, 0, UTC), minDuration},
 	{Date(2311, 11, 26, 02, 16, 47, 63535996, UTC), Date(2019, 8, 16, 2, 29, 30, 268436582, UTC), 9223372036795099414},
 	{MinMonoTime, MaxMonoTime, minDuration},
 	{MaxMonoTime, MinMonoTime, maxDuration},
@@ -1640,7 +1640,7 @@
 
 // Issue 24692: Out of range weekday panics
 func TestWeekdayString(t *testing.T) {
-	if got, want := Weekday(Tuesday).String(), "Tuesday"; got != want {
+	if got, want := Tuesday.String(), "Tuesday"; got != want {
 		t.Errorf("Tuesday weekday = %q; want %q", got, want)
 	}
 	if got, want := Weekday(14).String(), "%!Weekday(14)"; got != want {
diff --git a/src/time/zoneinfo.go b/src/time/zoneinfo.go
index 4edcf3d..c8d1762 100644
--- a/src/time/zoneinfo.go
+++ b/src/time/zoneinfo.go
@@ -16,6 +16,10 @@
 // Typically, the Location represents the collection of time offsets
 // in use in a geographical area. For many Locations the time offset varies
 // depending on whether daylight savings time is in use at the time instant.
+//
+// Location is used to provide a time zone in a printed Time value and for
+// calculations involving intervals that may cross daylight savings time
+// boundaries.
 type Location struct {
 	name string
 	zone []zone
@@ -184,7 +188,7 @@
 	lo := 0
 	hi := len(tx)
 	for hi-lo > 1 {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		lim := tx[m].when
 		if sec < lim {
 			end = lim
diff --git a/src/time/zoneinfo_read.go b/src/time/zoneinfo_read.go
index 4d0e47d..707dd11 100644
--- a/src/time/zoneinfo_read.go
+++ b/src/time/zoneinfo_read.go
@@ -90,7 +90,7 @@
 	return p[0], true
 }
 
-// read returns the read of the data in the buffer.
+// rest returns the rest of the data in the buffer.
 func (d *dataIO) rest() []byte {
 	r := d.p
 	d.p = nil
diff --git a/src/time/zoneinfo_windows.go b/src/time/zoneinfo_windows.go
index 76d7975..c9f38ea 100644
--- a/src/time/zoneinfo_windows.go
+++ b/src/time/zoneinfo_windows.go
@@ -20,8 +20,8 @@
 // time apply to all previous and future years as well.
 
 // matchZoneKey checks if stdname and dstname match the corresponding key
-// values "MUI_Std" and MUI_Dlt" or "Std" and "Dlt" (the latter down-level
-// from Vista) in the kname key stored under the open registry key zones.
+// values "MUI_Std" and MUI_Dlt" or "Std" and "Dlt" in the kname key stored
+// under the open registry key zones.
 func matchZoneKey(zones registry.Key, kname string, stdname, dstname string) (matched bool, err2 error) {
 	k, err := registry.OpenKey(zones, kname, registry.READ)
 	if err != nil {
@@ -30,12 +30,10 @@
 	defer k.Close()
 
 	var std, dlt string
-	if err = registry.LoadRegLoadMUIString(); err == nil {
-		// Try MUI_Std and MUI_Dlt first, fallback to Std and Dlt if *any* error occurs
-		std, err = k.GetMUIStringValue("MUI_Std")
-		if err == nil {
-			dlt, err = k.GetMUIStringValue("MUI_Dlt")
-		}
+	// Try MUI_Std and MUI_Dlt first, fallback to Std and Dlt if *any* error occurs
+	std, err = k.GetMUIStringValue("MUI_Std")
+	if err == nil {
+		dlt, err = k.GetMUIStringValue("MUI_Dlt")
 	}
 	if err != nil { // Fallback to Std and Dlt
 		if std, _, err = k.GetStringValue("Std"); err != nil {
diff --git a/src/time/zoneinfo_windows_test.go b/src/time/zoneinfo_windows_test.go
index f23d9dc..5196b8e 100644
--- a/src/time/zoneinfo_windows_test.go
+++ b/src/time/zoneinfo_windows_test.go
@@ -45,12 +45,10 @@
 	defer k.Close()
 
 	var std, dlt string
-	if err = registry.LoadRegLoadMUIString(); err == nil {
-		// Try MUI_Std and MUI_Dlt first, fallback to Std and Dlt if *any* error occurs
-		std, err = k.GetMUIStringValue("MUI_Std")
-		if err == nil {
-			dlt, err = k.GetMUIStringValue("MUI_Dlt")
-		}
+	// Try MUI_Std and MUI_Dlt first, fallback to Std and Dlt if *any* error occurs
+	std, err = k.GetMUIStringValue("MUI_Std")
+	if err == nil {
+		dlt, err = k.GetMUIStringValue("MUI_Dlt")
 	}
 	if err != nil { // Fallback to Std and Dlt
 		if std, _, err = k.GetStringValue("Std"); err != nil {
diff --git a/src/unicode/graphic.go b/src/unicode/graphic.go
index 2af2977..aa62f2a 100644
--- a/src/unicode/graphic.go
+++ b/src/unicode/graphic.go
@@ -32,7 +32,7 @@
 
 // IsGraphic reports whether the rune is defined as a Graphic by Unicode.
 // Such characters include letters, marks, numbers, punctuation, symbols, and
-// spaces, from categories L, M, N, P, S, Zs.
+// spaces, from categories [L], [M], [N], [P], [S], [Zs].
 func IsGraphic(r rune) bool {
 	// We convert to uint32 to avoid the extra test for negative,
 	// and in the index we convert to uint8 to avoid the range check.
@@ -44,8 +44,8 @@
 
 // IsPrint reports whether the rune is defined as printable by Go. Such
 // characters include letters, marks, numbers, punctuation, symbols, and the
-// ASCII space character, from categories L, M, N, P, S and the ASCII space
-// character. This categorization is the same as IsGraphic except that the
+// ASCII space character, from categories [L], [M], [N], [P], [S] and the ASCII space
+// character. This categorization is the same as [IsGraphic] except that the
 // only spacing character is ASCII space, U+0020.
 func IsPrint(r rune) bool {
 	if uint32(r) <= MaxLatin1 {
@@ -76,8 +76,8 @@
 }
 
 // IsControl reports whether the rune is a control character.
-// The C (Other) Unicode category includes more code points
-// such as surrogates; use Is(C, r) to test for them.
+// The [C] ([Other]) Unicode category includes more code points
+// such as surrogates; use [Is](C, r) to test for them.
 func IsControl(r rune) bool {
 	if uint32(r) <= MaxLatin1 {
 		return properties[uint8(r)]&pC != 0
@@ -86,7 +86,7 @@
 	return false
 }
 
-// IsLetter reports whether the rune is a letter (category L).
+// IsLetter reports whether the rune is a letter (category [L]).
 func IsLetter(r rune) bool {
 	if uint32(r) <= MaxLatin1 {
 		return properties[uint8(r)]&(pLmask) != 0
@@ -94,13 +94,13 @@
 	return isExcludingLatin(Letter, r)
 }
 
-// IsMark reports whether the rune is a mark character (category M).
+// IsMark reports whether the rune is a mark character (category [M]).
 func IsMark(r rune) bool {
 	// There are no mark characters in Latin-1.
 	return isExcludingLatin(Mark, r)
 }
 
-// IsNumber reports whether the rune is a number (category N).
+// IsNumber reports whether the rune is a number (category [N]).
 func IsNumber(r rune) bool {
 	if uint32(r) <= MaxLatin1 {
 		return properties[uint8(r)]&pN != 0
@@ -109,7 +109,7 @@
 }
 
 // IsPunct reports whether the rune is a Unicode punctuation character
-// (category P).
+// (category [P]).
 func IsPunct(r rune) bool {
 	if uint32(r) <= MaxLatin1 {
 		return properties[uint8(r)]&pP != 0
@@ -124,7 +124,7 @@
 //	'\t', '\n', '\v', '\f', '\r', ' ', U+0085 (NEL), U+00A0 (NBSP).
 //
 // Other definitions of spacing characters are set by category
-// Z and property Pattern_White_Space.
+// Z and property [Pattern_White_Space].
 func IsSpace(r rune) bool {
 	// This property isn't the same as Z; special-case it.
 	if uint32(r) <= MaxLatin1 {
diff --git a/src/unicode/letter.go b/src/unicode/letter.go
index f3f8e52..9e2cead 100644
--- a/src/unicode/letter.go
+++ b/src/unicode/letter.go
@@ -76,9 +76,9 @@
 
 type d [MaxCase]rune // to make the CaseRanges text shorter
 
-// If the Delta field of a CaseRange is UpperLower, it means
+// If the Delta field of a [CaseRange] is UpperLower, it means
 // this CaseRange represents a sequence of the form (say)
-// Upper Lower Upper Lower.
+// [Upper] [Lower] [Upper] [Lower].
 const (
 	UpperLower = MaxRune + 1 // (Cannot be a valid delta.)
 )
@@ -106,7 +106,7 @@
 	lo := 0
 	hi := len(ranges)
 	for lo < hi {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		range_ := &ranges[m]
 		if range_.Lo <= r && r <= range_.Hi {
 			return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
@@ -139,7 +139,7 @@
 	lo := 0
 	hi := len(ranges)
 	for lo < hi {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		range_ := ranges[m]
 		if range_.Lo <= r && r <= range_.Hi {
 			return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
@@ -216,7 +216,7 @@
 	lo := 0
 	hi := len(caseRange)
 	for lo < hi {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		cr := caseRange[m]
 		if rune(cr.Lo) <= r && r <= rune(cr.Hi) {
 			delta := cr.Delta[_case]
@@ -244,7 +244,7 @@
 	return r, false
 }
 
-// To maps the rune to the specified case: UpperCase, LowerCase, or TitleCase.
+// To maps the rune to the specified case: [UpperCase], [LowerCase], or [TitleCase].
 func To(_case int, r rune) rune {
 	r, _ = to(_case, r, CaseRanges)
 	return r
@@ -350,7 +350,7 @@
 	lo := 0
 	hi := len(caseOrbit)
 	for lo < hi {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		if rune(caseOrbit[m].From) < r {
 			lo = m + 1
 		} else {
diff --git a/src/unicode/letter_test.go b/src/unicode/letter_test.go
index a91e3a3..123f9a6 100644
--- a/src/unicode/letter_test.go
+++ b/src/unicode/letter_test.go
@@ -518,7 +518,7 @@
 	lo := 0
 	hi := len(ranges)
 	for lo < hi {
-		m := lo + (hi-lo)/2
+		m := int(uint(lo+hi) >> 1)
 		range_ := &ranges[m]
 		if range_.Lo <= r && r <= range_.Hi {
 			return (r-range_.Lo)%range_.Stride == 0
diff --git a/src/unicode/utf8/utf8.go b/src/unicode/utf8/utf8.go
index 1e9f666..71d6bf1 100644
--- a/src/unicode/utf8/utf8.go
+++ b/src/unicode/utf8/utf8.go
@@ -141,7 +141,7 @@
 }
 
 // DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and
-// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
+// its width in bytes. If p is empty it returns ([RuneError], 0). Otherwise, if
 // the encoding is invalid, it returns (RuneError, 1). Both are impossible
 // results for correct, non-empty UTF-8.
 //
@@ -188,8 +188,8 @@
 	return rune(p0&mask4)<<18 | rune(b1&maskx)<<12 | rune(b2&maskx)<<6 | rune(b3&maskx), 4
 }
 
-// DecodeRuneInString is like DecodeRune but its input is a string. If s is
-// empty it returns (RuneError, 0). Otherwise, if the encoding is invalid, it
+// DecodeRuneInString is like [DecodeRune] but its input is a string. If s is
+// empty it returns ([RuneError], 0). Otherwise, if the encoding is invalid, it
 // returns (RuneError, 1). Both are impossible results for correct, non-empty
 // UTF-8.
 //
@@ -237,7 +237,7 @@
 }
 
 // DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and
-// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
+// its width in bytes. If p is empty it returns ([RuneError], 0). Otherwise, if
 // the encoding is invalid, it returns (RuneError, 1). Both are impossible
 // results for correct, non-empty UTF-8.
 //
@@ -276,8 +276,8 @@
 	return r, size
 }
 
-// DecodeLastRuneInString is like DecodeLastRune but its input is a string. If
-// s is empty it returns (RuneError, 0). Otherwise, if the encoding is invalid,
+// DecodeLastRuneInString is like [DecodeLastRune] but its input is a string. If
+// s is empty it returns ([RuneError], 0). Otherwise, if the encoding is invalid,
 // it returns (RuneError, 1). Both are impossible results for correct,
 // non-empty UTF-8.
 //
@@ -337,7 +337,7 @@
 }
 
 // EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
-// If the rune is out of range, it writes the encoding of RuneError.
+// If the rune is out of range, it writes the encoding of [RuneError].
 // It returns the number of bytes written.
 func EncodeRune(p []byte, r rune) int {
 	// Negative values are erroneous. Making it unsigned addresses the problem.
@@ -371,7 +371,7 @@
 
 // AppendRune appends the UTF-8 encoding of r to the end of p and
 // returns the extended buffer. If the rune is out of range,
-// it appends the encoding of RuneError.
+// it appends the encoding of [RuneError].
 func AppendRune(p []byte, r rune) []byte {
 	// This function is inlineable for fast handling of ASCII.
 	if uint32(r) <= rune1Max {
@@ -433,7 +433,7 @@
 	return n
 }
 
-// RuneCountInString is like RuneCount but its input is a string.
+// RuneCountInString is like [RuneCount] but its input is a string.
 func RuneCountInString(s string) (n int) {
 	ns := len(s)
 	for i := 0; i < ns; n++ {
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
index 94c71ac..661ea13 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.11 && gc && !purego
-// +build go1.11,gc,!purego
+//go:build gc && !purego
 
 package chacha20
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
index 63cae9e..7dd2638 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.11 && gc && !purego
-// +build go1.11,gc,!purego
+//go:build gc && !purego
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
index 025b498..db42e66 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego
-// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego
+//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego
 
 package chacha20
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
index da420b2..3a4287f 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 package chacha20
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
index 5c0fed2..66aebae 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
@@ -20,7 +20,6 @@
 // due to the calling conventions and initialization of constants.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
index 4652247..683ccfd 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 package chacha20
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
index f3ef5a0..1eda91a 100644
--- a/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 #include "go_asm.h"
 #include "textflag.h"
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
index 0c408c5..50695a1 100644
--- a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 package chacha20poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
index 867c181..731d2ac 100644
--- a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
@@ -5,7 +5,6 @@
 // This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 #include "textflag.h"
 // General register allocation
@@ -184,11 +183,31 @@
 #define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10
 #define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11
 #define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15
+
 // Some macros
+
+// ROL rotates the uint32s in register R left by N bits, using temporary T.
+#define ROL(N, R, T) \
+	MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R
+
+// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed.
+#ifdef GOAMD64_v2
+#define ROL16(R, T) PSHUFB ·rol16<>(SB), R
+#else
+#define ROL16(R, T) ROL(16, R, T)
+#endif
+
+// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed.
+#ifdef GOAMD64_v2
+#define ROL8(R, T) PSHUFB ·rol8<>(SB), R
+#else
+#define ROL8(R, T) ROL(8, R, T)
+#endif
+
 #define chachaQR(A, B, C, D, T) \
-	PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D                            \
+	PADDD B, A; PXOR A, D; ROL16(D, T) \
 	PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \
-	PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D                             \
+	PADDD B, A; PXOR A, D; ROL8(D, T) \
 	PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B
 
 #define chachaQR_AVX2(A, B, C, D, T) \
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
index f832b33..34e6ab1 100644
--- a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !amd64 || !gc || purego
-// +build !amd64 !gc purego
 
 package chacha20poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 6fc2838..2492f79 100644
--- a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -733,13 +733,14 @@
 	return true
 }
 
-// ReadOptionalASN1Boolean sets *out to the value of the next ASN.1 BOOLEAN or,
-// if the next bytes are not an ASN.1 BOOLEAN, to the value of defaultValue.
-// It reports whether the operation was successful.
-func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool {
+// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN
+// explicitly tagged with tag into out and advances. If no element with a
+// matching tag is present, it sets "out" to defaultValue instead. It reports
+// whether the read was successful.
+func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool {
 	var present bool
 	var child String
-	if !s.ReadOptionalASN1(&child, &present, asn1.BOOLEAN) {
+	if !s.ReadOptionalASN1(&child, &present, tag) {
 		return false
 	}
 
@@ -748,7 +749,7 @@
 		return true
 	}
 
-	return s.ReadASN1Boolean(out)
+	return child.ReadASN1Boolean(out)
 }
 
 func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/builder.go b/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
index c05ac7d..cf254f5 100644
--- a/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -95,6 +95,11 @@
 	b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
 }
 
+// AddUint48 appends a big-endian, 48-bit value to the byte string.
+func (b *Builder) AddUint48(v uint64) {
+	b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
 // AddUint64 appends a big-endian, 64-bit value to the byte string.
 func (b *Builder) AddUint64(v uint64) {
 	b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/string.go b/src/vendor/golang.org/x/crypto/cryptobyte/string.go
index 0531a3d..10692a8 100644
--- a/src/vendor/golang.org/x/crypto/cryptobyte/string.go
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/string.go
@@ -81,6 +81,17 @@
 	return true
 }
 
+// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint48(out *uint64) bool {
+	v := s.read(6)
+	if v == nil {
+		return false
+	}
+	*out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5])
+	return true
+}
+
 // ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
 // It reports whether the read was successful.
 func (s *String) ReadUint64(out *uint64) bool {
diff --git a/src/vendor/golang.org/x/crypto/hkdf/hkdf.go b/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
index dda3f14..f4ded5f 100644
--- a/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
+++ b/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
@@ -56,7 +56,9 @@
 
 	// Fill the rest of the buffer
 	for len(p) > 0 {
-		f.expander.Reset()
+		if f.counter > 1 {
+			f.expander.Reset()
+		}
 		f.expander.Write(f.prev)
 		f.expander.Write(f.info)
 		f.expander.Write([]byte{f.counter})
diff --git a/src/vendor/golang.org/x/crypto/internal/alias/alias.go b/src/vendor/golang.org/x/crypto/internal/alias/alias.go
index 69c17f8..551ff0c 100644
--- a/src/vendor/golang.org/x/crypto/internal/alias/alias.go
+++ b/src/vendor/golang.org/x/crypto/internal/alias/alias.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !purego
-// +build !purego
 
 // Package alias implements memory aliasing tests.
 package alias
diff --git a/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
index 4775b0a..6fe61b5 100644
--- a/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
+++ b/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build purego
-// +build purego
 
 // Package alias implements memory aliasing tests.
 package alias
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
index 45b5c96..d33c889 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.13
-// +build !go1.13
 
 package poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
index ed52b34..495c1fa 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.13
-// +build go1.13
 
 package poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
index f184b67..333da28 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
-// +build !amd64,!ppc64le,!s390x !gc purego
 
 package poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
index 6d52233..164cd47 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 package poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
index 1d74f0f..e0d3c64 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
index 4a06994..4aec487 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 package poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
index 58422aa..d2ca5de 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
index ec95966..e1d033a 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 package poly1305
 
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
index aa9e049..0fe3a7c 100644
--- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc && !purego
-// +build gc,!purego
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/net/dns/dnsmessage/message.go b/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
index 37da3de..42987ab 100644
--- a/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
+++ b/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
@@ -361,6 +361,8 @@
 		"Truncated: " + printBool(m.Truncated) + ", " +
 		"RecursionDesired: " + printBool(m.RecursionDesired) + ", " +
 		"RecursionAvailable: " + printBool(m.RecursionAvailable) + ", " +
+		"AuthenticData: " + printBool(m.AuthenticData) + ", " +
+		"CheckingDisabled: " + printBool(m.CheckingDisabled) + ", " +
 		"RCode: " + m.RCode.GoString() + "}"
 }
 
@@ -490,7 +492,7 @@
 // A ResourceBody is a DNS resource record minus the header.
 type ResourceBody interface {
 	// pack packs a Resource except for its header.
-	pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error)
+	pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error)
 
 	// realType returns the actual type of the Resource. This is used to
 	// fill in the header Type field.
@@ -501,7 +503,7 @@
 }
 
 // pack appends the wire format of the Resource to msg.
-func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *Resource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	if r.Body == nil {
 		return msg, errNilResouceBody
 	}
@@ -540,11 +542,13 @@
 	msg    []byte
 	header header
 
-	section        section
-	off            int
-	index          int
-	resHeaderValid bool
-	resHeader      ResourceHeader
+	section         section
+	off             int
+	index           int
+	resHeaderValid  bool
+	resHeaderOffset int
+	resHeaderType   Type
+	resHeaderLength uint16
 }
 
 // Start parses the header and enables the parsing of Questions.
@@ -595,8 +599,9 @@
 
 func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) {
 	if p.resHeaderValid {
-		return p.resHeader, nil
+		p.off = p.resHeaderOffset
 	}
+
 	if err := p.checkAdvance(sec); err != nil {
 		return ResourceHeader{}, err
 	}
@@ -606,14 +611,16 @@
 		return ResourceHeader{}, err
 	}
 	p.resHeaderValid = true
-	p.resHeader = hdr
+	p.resHeaderOffset = p.off
+	p.resHeaderType = hdr.Type
+	p.resHeaderLength = hdr.Length
 	p.off = off
 	return hdr, nil
 }
 
 func (p *Parser) skipResource(sec section) error {
-	if p.resHeaderValid {
-		newOff := p.off + int(p.resHeader.Length)
+	if p.resHeaderValid && p.section == sec {
+		newOff := p.off + int(p.resHeaderLength)
 		if newOff > len(p.msg) {
 			return errResourceLen
 		}
@@ -744,6 +751,9 @@
 }
 
 // SkipAnswer skips a single Answer Resource.
+//
+// It does not perform a complete validation of the resource header, which means
+// it may return a nil error when the [AnswerHeader] would actually return an error.
 func (p *Parser) SkipAnswer() error {
 	return p.skipResource(sectionAnswers)
 }
@@ -794,6 +804,9 @@
 }
 
 // SkipAuthority skips a single Authority Resource.
+//
+// It does not perform a complete validation of the resource header, which means
+// it may return a nil error when the [AuthorityHeader] would actually return an error.
 func (p *Parser) SkipAuthority() error {
 	return p.skipResource(sectionAuthorities)
 }
@@ -844,6 +857,9 @@
 }
 
 // SkipAdditional skips a single Additional Resource.
+//
+// It does not perform a complete validation of the resource header, which means
+// it may return a nil error when the [AdditionalHeader] would actually return an error.
 func (p *Parser) SkipAdditional() error {
 	return p.skipResource(sectionAdditionals)
 }
@@ -864,14 +880,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) CNAMEResource() (CNAMEResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeCNAME {
+	if !p.resHeaderValid || p.resHeaderType != TypeCNAME {
 		return CNAMEResource{}, ErrNotStarted
 	}
 	r, err := unpackCNAMEResource(p.msg, p.off)
 	if err != nil {
 		return CNAMEResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -882,14 +898,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) MXResource() (MXResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeMX {
+	if !p.resHeaderValid || p.resHeaderType != TypeMX {
 		return MXResource{}, ErrNotStarted
 	}
 	r, err := unpackMXResource(p.msg, p.off)
 	if err != nil {
 		return MXResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -900,14 +916,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) NSResource() (NSResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeNS {
+	if !p.resHeaderValid || p.resHeaderType != TypeNS {
 		return NSResource{}, ErrNotStarted
 	}
 	r, err := unpackNSResource(p.msg, p.off)
 	if err != nil {
 		return NSResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -918,14 +934,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) PTRResource() (PTRResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypePTR {
+	if !p.resHeaderValid || p.resHeaderType != TypePTR {
 		return PTRResource{}, ErrNotStarted
 	}
 	r, err := unpackPTRResource(p.msg, p.off)
 	if err != nil {
 		return PTRResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -936,14 +952,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) SOAResource() (SOAResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeSOA {
+	if !p.resHeaderValid || p.resHeaderType != TypeSOA {
 		return SOAResource{}, ErrNotStarted
 	}
 	r, err := unpackSOAResource(p.msg, p.off)
 	if err != nil {
 		return SOAResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -954,14 +970,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) TXTResource() (TXTResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeTXT {
+	if !p.resHeaderValid || p.resHeaderType != TypeTXT {
 		return TXTResource{}, ErrNotStarted
 	}
-	r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length)
+	r, err := unpackTXTResource(p.msg, p.off, p.resHeaderLength)
 	if err != nil {
 		return TXTResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -972,14 +988,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) SRVResource() (SRVResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeSRV {
+	if !p.resHeaderValid || p.resHeaderType != TypeSRV {
 		return SRVResource{}, ErrNotStarted
 	}
 	r, err := unpackSRVResource(p.msg, p.off)
 	if err != nil {
 		return SRVResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -990,14 +1006,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) AResource() (AResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeA {
+	if !p.resHeaderValid || p.resHeaderType != TypeA {
 		return AResource{}, ErrNotStarted
 	}
 	r, err := unpackAResource(p.msg, p.off)
 	if err != nil {
 		return AResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -1008,14 +1024,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) AAAAResource() (AAAAResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeAAAA {
+	if !p.resHeaderValid || p.resHeaderType != TypeAAAA {
 		return AAAAResource{}, ErrNotStarted
 	}
 	r, err := unpackAAAAResource(p.msg, p.off)
 	if err != nil {
 		return AAAAResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -1026,14 +1042,14 @@
 // One of the XXXHeader methods must have been called before calling this
 // method.
 func (p *Parser) OPTResource() (OPTResource, error) {
-	if !p.resHeaderValid || p.resHeader.Type != TypeOPT {
+	if !p.resHeaderValid || p.resHeaderType != TypeOPT {
 		return OPTResource{}, ErrNotStarted
 	}
-	r, err := unpackOPTResource(p.msg, p.off, p.resHeader.Length)
+	r, err := unpackOPTResource(p.msg, p.off, p.resHeaderLength)
 	if err != nil {
 		return OPTResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -1047,11 +1063,11 @@
 	if !p.resHeaderValid {
 		return UnknownResource{}, ErrNotStarted
 	}
-	r, err := unpackUnknownResource(p.resHeader.Type, p.msg, p.off, p.resHeader.Length)
+	r, err := unpackUnknownResource(p.resHeaderType, p.msg, p.off, p.resHeaderLength)
 	if err != nil {
 		return UnknownResource{}, err
 	}
-	p.off += int(p.resHeader.Length)
+	p.off += int(p.resHeaderLength)
 	p.resHeaderValid = false
 	p.index++
 	return r, nil
@@ -1122,7 +1138,7 @@
 	// DNS messages can be a maximum of 512 bytes long. Without compression,
 	// many DNS response messages are over this limit, so enabling
 	// compression will help ensure compliance.
-	compression := map[string]int{}
+	compression := map[string]uint16{}
 
 	for i := range m.Questions {
 		var err error
@@ -1213,7 +1229,7 @@
 
 	// compression is a mapping from name suffixes to their starting index
 	// in msg.
-	compression map[string]int
+	compression map[string]uint16
 }
 
 // NewBuilder creates a new builder with compression disabled.
@@ -1250,7 +1266,7 @@
 //
 // Compression should be enabled before any sections are added for best results.
 func (b *Builder) EnableCompression() {
-	b.compression = map[string]int{}
+	b.compression = map[string]uint16{}
 }
 
 func (b *Builder) startCheck(s section) error {
@@ -1666,7 +1682,7 @@
 // pack appends the wire format of the ResourceHeader to oldMsg.
 //
 // lenOff is the offset in msg where the Length field was packed.
-func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, lenOff int, err error) {
+func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]uint16, compressionOff int) (msg []byte, lenOff int, err error) {
 	msg = oldMsg
 	if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil {
 		return oldMsg, 0, &nestedError{"Name", err}
@@ -1894,7 +1910,7 @@
 
 const nonEncodedNameMax = 254
 
-// A Name is a non-encoded domain name. It is used instead of strings to avoid
+// A Name is a non-encoded and non-escaped domain name. It is used instead of strings to avoid
 // allocations.
 type Name struct {
 	Data   [255]byte
@@ -1921,6 +1937,8 @@
 }
 
 // String implements fmt.Stringer.String.
+//
+// Note: characters inside the labels are not escaped in any way.
 func (n Name) String() string {
 	return string(n.Data[:n.Length])
 }
@@ -1937,7 +1955,7 @@
 //
 // The compression map will be updated with new domain suffixes. If compression
 // is nil, compression will not be used.
-func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (n *Name) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	oldMsg := msg
 
 	if n.Length > nonEncodedNameMax {
@@ -1954,6 +1972,8 @@
 		return append(msg, 0), nil
 	}
 
+	var nameAsStr string
+
 	// Emit sequence of counted strings, chopping at dots.
 	for i, begin := 0, 0; i < int(n.Length); i++ {
 		// Check for the end of the segment.
@@ -1984,16 +2004,22 @@
 		// segment. A pointer is two bytes with the two most significant
 		// bits set to 1 to indicate that it is a pointer.
 		if (i == 0 || n.Data[i-1] == '.') && compression != nil {
-			if ptr, ok := compression[string(n.Data[i:])]; ok {
+			if ptr, ok := compression[string(n.Data[i:n.Length])]; ok {
 				// Hit. Emit a pointer instead of the rest of
 				// the domain.
 				return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil
 			}
 
 			// Miss. Add the suffix to the compression table if the
-			// offset can be stored in the available 14 bytes.
-			if len(msg) <= int(^uint16(0)>>2) {
-				compression[string(n.Data[i:])] = len(msg) - compressionOff
+			// offset can be stored in the available 14 bits.
+			newPtr := len(msg) - compressionOff
+			if newPtr <= int(^uint16(0)>>2) {
+				if nameAsStr == "" {
+					// allocate n.Data on the heap once, to avoid allocating it
+					// multiple times (for next labels).
+					nameAsStr = string(n.Data[:n.Length])
+				}
+				compression[nameAsStr[i:]] = uint16(newPtr)
 			}
 		}
 	}
@@ -2133,7 +2159,7 @@
 }
 
 // pack appends the wire format of the Question to msg.
-func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (q *Question) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	msg, err := q.Name.pack(msg, compression, compressionOff)
 	if err != nil {
 		return msg, &nestedError{"Name", err}
@@ -2229,7 +2255,7 @@
 }
 
 // pack appends the wire format of the CNAMEResource to msg.
-func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *CNAMEResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	return r.CNAME.pack(msg, compression, compressionOff)
 }
 
@@ -2257,7 +2283,7 @@
 }
 
 // pack appends the wire format of the MXResource to msg.
-func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *MXResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	oldMsg := msg
 	msg = packUint16(msg, r.Pref)
 	msg, err := r.MX.pack(msg, compression, compressionOff)
@@ -2296,7 +2322,7 @@
 }
 
 // pack appends the wire format of the NSResource to msg.
-func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *NSResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	return r.NS.pack(msg, compression, compressionOff)
 }
 
@@ -2323,7 +2349,7 @@
 }
 
 // pack appends the wire format of the PTRResource to msg.
-func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *PTRResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	return r.PTR.pack(msg, compression, compressionOff)
 }
 
@@ -2360,7 +2386,7 @@
 }
 
 // pack appends the wire format of the SOAResource to msg.
-func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *SOAResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	oldMsg := msg
 	msg, err := r.NS.pack(msg, compression, compressionOff)
 	if err != nil {
@@ -2432,7 +2458,7 @@
 }
 
 // pack appends the wire format of the TXTResource to msg.
-func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *TXTResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	oldMsg := msg
 	for _, s := range r.TXT {
 		var err error
@@ -2488,7 +2514,7 @@
 }
 
 // pack appends the wire format of the SRVResource to msg.
-func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *SRVResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	oldMsg := msg
 	msg = packUint16(msg, r.Priority)
 	msg = packUint16(msg, r.Weight)
@@ -2539,7 +2565,7 @@
 }
 
 // pack appends the wire format of the AResource to msg.
-func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *AResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	return packBytes(msg, r.A[:]), nil
 }
 
@@ -2573,7 +2599,7 @@
 }
 
 // pack appends the wire format of the AAAAResource to msg.
-func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *AAAAResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	return packBytes(msg, r.AAAA[:]), nil
 }
 
@@ -2613,7 +2639,7 @@
 	return TypeOPT
 }
 
-func (r *OPTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *OPTResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	for _, opt := range r.Options {
 		msg = packUint16(msg, opt.Code)
 		l := uint16(len(opt.Data))
@@ -2671,7 +2697,7 @@
 }
 
 // pack appends the wire format of the UnknownResource to msg.
-func (r *UnknownResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+func (r *UnknownResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) {
 	return packBytes(msg, r.Data[:]), nil
 }
 
diff --git a/src/vendor/golang.org/x/net/idna/go118.go b/src/vendor/golang.org/x/net/idna/go118.go
index c5c4338..712f1ad 100644
--- a/src/vendor/golang.org/x/net/idna/go118.go
+++ b/src/vendor/golang.org/x/net/idna/go118.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.18
-// +build go1.18
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/idna10.0.0.go b/src/vendor/golang.org/x/net/idna/idna10.0.0.go
index 64ccf85..7b37178 100644
--- a/src/vendor/golang.org/x/net/idna/idna10.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/idna10.0.0.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.10
-// +build go1.10
 
 // Package idna implements IDNA2008 using the compatibility processing
 // defined by UTS (Unicode Technical Standard) #46, which defines a standard to
diff --git a/src/vendor/golang.org/x/net/idna/idna9.0.0.go b/src/vendor/golang.org/x/net/idna/idna9.0.0.go
index ee1698c..cc6a892 100644
--- a/src/vendor/golang.org/x/net/idna/idna9.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/idna9.0.0.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.10
-// +build !go1.10
 
 // Package idna implements IDNA2008 using the compatibility processing
 // defined by UTS (Unicode Technical Standard) #46, which defines a standard to
diff --git a/src/vendor/golang.org/x/net/idna/pre_go118.go b/src/vendor/golang.org/x/net/idna/pre_go118.go
index 3aaccab..40e74bb 100644
--- a/src/vendor/golang.org/x/net/idna/pre_go118.go
+++ b/src/vendor/golang.org/x/net/idna/pre_go118.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.18
-// +build !go1.18
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/tables10.0.0.go b/src/vendor/golang.org/x/net/idna/tables10.0.0.go
index d1d62ef..c6c2bf1 100644
--- a/src/vendor/golang.org/x/net/idna/tables10.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/tables10.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.10 && !go1.13
-// +build go1.10,!go1.13
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/tables11.0.0.go b/src/vendor/golang.org/x/net/idna/tables11.0.0.go
index 167efba..7678939 100644
--- a/src/vendor/golang.org/x/net/idna/tables11.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.13 && !go1.14
-// +build go1.13,!go1.14
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/tables12.0.0.go b/src/vendor/golang.org/x/net/idna/tables12.0.0.go
index ab40f7b..0600cd2 100644
--- a/src/vendor/golang.org/x/net/idna/tables12.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/tables12.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.14 && !go1.16
-// +build go1.14,!go1.16
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/tables13.0.0.go b/src/vendor/golang.org/x/net/idna/tables13.0.0.go
index 66701ea..2fb768e 100644
--- a/src/vendor/golang.org/x/net/idna/tables13.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/tables13.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.16 && !go1.21
-// +build go1.16,!go1.21
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/tables15.0.0.go b/src/vendor/golang.org/x/net/idna/tables15.0.0.go
index 4003377..5ff05fe 100644
--- a/src/vendor/golang.org/x/net/idna/tables15.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/tables15.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.21
-// +build go1.21
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/tables9.0.0.go b/src/vendor/golang.org/x/net/idna/tables9.0.0.go
index 4074b53..0f25e84 100644
--- a/src/vendor/golang.org/x/net/idna/tables9.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/tables9.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build !go1.10
-// +build !go1.10
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/trie12.0.0.go b/src/vendor/golang.org/x/net/idna/trie12.0.0.go
index bb63f90..8a75b96 100644
--- a/src/vendor/golang.org/x/net/idna/trie12.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/trie12.0.0.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.16
-// +build !go1.16
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/idna/trie13.0.0.go b/src/vendor/golang.org/x/net/idna/trie13.0.0.go
index 7d68a8d..fa45bb9 100644
--- a/src/vendor/golang.org/x/net/idna/trie13.0.0.go
+++ b/src/vendor/golang.org/x/net/idna/trie13.0.0.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.16
-// +build go1.16
 
 package idna
 
diff --git a/src/vendor/golang.org/x/net/lif/address.go b/src/vendor/golang.org/x/net/lif/address.go
index 8eaddb5..0ed62a2 100644
--- a/src/vendor/golang.org/x/net/lif/address.go
+++ b/src/vendor/golang.org/x/net/lif/address.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build solaris
-// +build solaris
 
 package lif
 
diff --git a/src/vendor/golang.org/x/net/lif/binary.go b/src/vendor/golang.org/x/net/lif/binary.go
index f31ca3ad..8a6c456 100644
--- a/src/vendor/golang.org/x/net/lif/binary.go
+++ b/src/vendor/golang.org/x/net/lif/binary.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build solaris
-// +build solaris
 
 package lif
 
diff --git a/src/vendor/golang.org/x/net/lif/lif.go b/src/vendor/golang.org/x/net/lif/lif.go
index f1fce48..e9f2a9e 100644
--- a/src/vendor/golang.org/x/net/lif/lif.go
+++ b/src/vendor/golang.org/x/net/lif/lif.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build solaris
-// +build solaris
 
 // Package lif provides basic functions for the manipulation of
 // logical network interfaces and interface addresses on Solaris.
diff --git a/src/vendor/golang.org/x/net/lif/link.go b/src/vendor/golang.org/x/net/lif/link.go
index 00b7854..d0c615a 100644
--- a/src/vendor/golang.org/x/net/lif/link.go
+++ b/src/vendor/golang.org/x/net/lif/link.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build solaris
-// +build solaris
 
 package lif
 
diff --git a/src/vendor/golang.org/x/net/lif/sys.go b/src/vendor/golang.org/x/net/lif/sys.go
index d0b532d..caba2fe 100644
--- a/src/vendor/golang.org/x/net/lif/sys.go
+++ b/src/vendor/golang.org/x/net/lif/sys.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build solaris
-// +build solaris
 
 package lif
 
diff --git a/src/vendor/golang.org/x/net/lif/syscall.go b/src/vendor/golang.org/x/net/lif/syscall.go
index 8d03b4a..329a65f 100644
--- a/src/vendor/golang.org/x/net/lif/syscall.go
+++ b/src/vendor/golang.org/x/net/lif/syscall.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build solaris
-// +build solaris
 
 package lif
 
diff --git a/src/vendor/golang.org/x/net/nettest/nettest_stub.go b/src/vendor/golang.org/x/net/nettest/nettest_stub.go
index 6e3a931..1725b6a 100644
--- a/src/vendor/golang.org/x/net/nettest/nettest_stub.go
+++ b/src/vendor/golang.org/x/net/nettest/nettest_stub.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
 
 package nettest
 
diff --git a/src/vendor/golang.org/x/net/nettest/nettest_unix.go b/src/vendor/golang.org/x/net/nettest/nettest_unix.go
index b1cb8b2..9ba269d 100644
--- a/src/vendor/golang.org/x/net/nettest/nettest_unix.go
+++ b/src/vendor/golang.org/x/net/nettest/nettest_unix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
 
 package nettest
 
diff --git a/src/vendor/golang.org/x/net/route/address.go b/src/vendor/golang.org/x/net/route/address.go
index 5a3cc06..5443d67 100644
--- a/src/vendor/golang.org/x/net/route/address.go
+++ b/src/vendor/golang.org/x/net/route/address.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/binary.go b/src/vendor/golang.org/x/net/route/binary.go
index a5e28f1..db3f7e0 100644
--- a/src/vendor/golang.org/x/net/route/binary.go
+++ b/src/vendor/golang.org/x/net/route/binary.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/empty.s b/src/vendor/golang.org/x/net/route/empty.s
index 90ab4ca..49d7979 100644
--- a/src/vendor/golang.org/x/net/route/empty.s
+++ b/src/vendor/golang.org/x/net/route/empty.s
@@ -3,6 +3,5 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin && go1.12
-// +build darwin,go1.12
 
 // This exists solely so we can linkname in symbols from syscall.
diff --git a/src/vendor/golang.org/x/net/route/interface.go b/src/vendor/golang.org/x/net/route/interface.go
index 9e94078..0aa7055 100644
--- a/src/vendor/golang.org/x/net/route/interface.go
+++ b/src/vendor/golang.org/x/net/route/interface.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/interface_announce.go b/src/vendor/golang.org/x/net/route/interface_announce.go
index 8282bfe..70614c1 100644
--- a/src/vendor/golang.org/x/net/route/interface_announce.go
+++ b/src/vendor/golang.org/x/net/route/interface_announce.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build dragonfly || freebsd || netbsd
-// +build dragonfly freebsd netbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/interface_classic.go b/src/vendor/golang.org/x/net/route/interface_classic.go
index 903a196..be1bf26 100644
--- a/src/vendor/golang.org/x/net/route/interface_classic.go
+++ b/src/vendor/golang.org/x/net/route/interface_classic.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || netbsd
-// +build darwin dragonfly netbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/interface_multicast.go b/src/vendor/golang.org/x/net/route/interface_multicast.go
index dd0b214..2ee37b9 100644
--- a/src/vendor/golang.org/x/net/route/interface_multicast.go
+++ b/src/vendor/golang.org/x/net/route/interface_multicast.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd
-// +build darwin dragonfly freebsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/message.go b/src/vendor/golang.org/x/net/route/message.go
index 456a836..dc8bfc5 100644
--- a/src/vendor/golang.org/x/net/route/message.go
+++ b/src/vendor/golang.org/x/net/route/message.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/route.go b/src/vendor/golang.org/x/net/route/route.go
index 3ab5bcd..ca2ce2b 100644
--- a/src/vendor/golang.org/x/net/route/route.go
+++ b/src/vendor/golang.org/x/net/route/route.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 // Package route provides basic functions for the manipulation of
 // packet routing facilities on BSD variants.
diff --git a/src/vendor/golang.org/x/net/route/route_classic.go b/src/vendor/golang.org/x/net/route/route_classic.go
index d6ee42f..e273fe3 100644
--- a/src/vendor/golang.org/x/net/route/route_classic.go
+++ b/src/vendor/golang.org/x/net/route/route_classic.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd
-// +build darwin dragonfly freebsd netbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/sys.go b/src/vendor/golang.org/x/net/route/sys.go
index 7c75574..fcebee5 100644
--- a/src/vendor/golang.org/x/net/route/sys.go
+++ b/src/vendor/golang.org/x/net/route/sys.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/net/route/syscall.go b/src/vendor/golang.org/x/net/route/syscall.go
index 68d37c9..0ed5375 100644
--- a/src/vendor/golang.org/x/net/route/syscall.go
+++ b/src/vendor/golang.org/x/net/route/syscall.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
 
 package route
 
diff --git a/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
index db9171c..269e173 100644
--- a/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
+++ b/src/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu.go b/src/vendor/golang.org/x/sys/cpu/cpu.go
index 83f112c..4756ad5 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu.go
@@ -38,7 +38,7 @@
 	HasAVX512F          bool // Advanced vector extension 512 Foundation Instructions
 	HasAVX512CD         bool // Advanced vector extension 512 Conflict Detection Instructions
 	HasAVX512ER         bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
-	HasAVX512PF         bool // Advanced vector extension 512 Prefetch Instructions Instructions
+	HasAVX512PF         bool // Advanced vector extension 512 Prefetch Instructions
 	HasAVX512VL         bool // Advanced vector extension 512 Vector Length Extensions
 	HasAVX512BW         bool // Advanced vector extension 512 Byte and Word Instructions
 	HasAVX512DQ         bool // Advanced vector extension 512 Doubleword and Quadword Instructions
@@ -54,6 +54,9 @@
 	HasAVX512VBMI2      bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
 	HasAVX512BITALG     bool // Advanced vector extension 512 Bit Algorithms
 	HasAVX512BF16       bool // Advanced vector extension 512 BFloat16 Instructions
+	HasAMXTile          bool // Advanced Matrix Extension Tile instructions
+	HasAMXInt8          bool // Advanced Matrix Extension Int8 instructions
+	HasAMXBF16          bool // Advanced Matrix Extension BFloat16 instructions
 	HasBMI1             bool // Bit manipulation instruction set 1
 	HasBMI2             bool // Bit manipulation instruction set 2
 	HasCX16             bool // Compare and exchange 16 Bytes
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_aix.go b/src/vendor/golang.org/x/sys/cpu/cpu_aix.go
index 8aaeef5..9bf0c32 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_aix.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_aix.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build aix
-// +build aix
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s
index c61f95a..fcb9a38 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
index ccf542a..a8acd3e 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
index 0af2f24..c8ae6dd 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
index fa7cdb9..910728f 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (386 || amd64 || amd64p32) && gc
-// +build 386 amd64 amd64p32
-// +build gc
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
index 2aff318..7f19467 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gccgo
-// +build gccgo
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
index 4bfbda6..9526d2c 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gccgo
-// +build gccgo
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
index 6cc7310..3f73a05 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (386 || amd64 || amd64p32) && gccgo
-// +build 386 amd64 amd64p32
-// +build gccgo
 
 #include <cpuid.h>
 #include <stdint.h>
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
index 863d415..99c60fe 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (386 || amd64 || amd64p32) && gccgo
-// +build 386 amd64 amd64p32
-// +build gccgo
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_linux.go b/src/vendor/golang.org/x/sys/cpu/cpu_linux.go
index 159a686..743eb54 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_linux.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_linux.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !386 && !amd64 && !amd64p32 && !arm64
-// +build !386,!amd64,!amd64p32,!arm64
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
index 6000db4..4686c1d 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (mips64 || mips64le)
-// +build linux
-// +build mips64 mips64le
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
index f4992b1..cd63e73 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
-// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
index 021356d..197188e 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && (ppc64 || ppc64le)
-// +build linux
-// +build ppc64 ppc64le
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go
index 0f57b05..5586358 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build loong64
-// +build loong64
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
index f4063c6..fedb00c 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build mips64 || mips64le
-// +build mips64 mips64le
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
index 07c4e36..ffb4ec7 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build mips || mipsle
-// +build mips mipsle
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
index d7b4fb4..e9ecf2a 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !linux && arm
-// +build !linux,arm
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
index f3cde12..5341e7f 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !linux && !netbsd && !openbsd && arm64
-// +build !linux,!netbsd,!openbsd,arm64
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
index 0dafe96..5f8f241 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !linux && (mips64 || mips64le)
-// +build !linux
-// +build mips64 mips64le
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
index 060d46b..89608fb 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
@@ -3,9 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !aix && !linux && (ppc64 || ppc64le)
-// +build !aix
-// +build !linux
-// +build ppc64 ppc64le
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
index dd10eb7..5ab8780 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !linux && riscv64
-// +build !linux,riscv64
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
index 4e8acd1..c14f12b 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ppc64 || ppc64le
-// +build ppc64 ppc64le
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
index bd6c128..7f0c79c 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
@@ -3,10 +3,9 @@
 // license that can be found in the LICENSE file.
 
 //go:build riscv64
-// +build riscv64
 
 package cpu
 
-const cacheLineSize = 32
+const cacheLineSize = 64
 
 func initOptions() {}
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s
index 96f81e2..1fb4b70 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_s390x.s
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build gc
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go
index 7747d88..384787e 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_wasm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build wasm
-// +build wasm
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_x86.go b/src/vendor/golang.org/x/sys/cpu/cpu_x86.go
index f5aacfc..c29f5e4 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 || amd64 || amd64p32
-// +build 386 amd64 amd64p32
 
 package cpu
 
@@ -37,6 +36,9 @@
 		{Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2},
 		{Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG},
 		{Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
+		{Name: "amxtile", Feature: &X86.HasAMXTile},
+		{Name: "amxint8", Feature: &X86.HasAMXInt8},
+		{Name: "amxbf16", Feature: &X86.HasAMXBF16},
 		{Name: "bmi1", Feature: &X86.HasBMI1},
 		{Name: "bmi2", Feature: &X86.HasBMI2},
 		{Name: "cx16", Feature: &X86.HasCX16},
@@ -138,6 +140,10 @@
 		eax71, _, _, _ := cpuid(7, 1)
 		X86.HasAVX512BF16 = isSet(5, eax71)
 	}
+
+	X86.HasAMXTile = isSet(24, edx7)
+	X86.HasAMXInt8 = isSet(25, edx7)
+	X86.HasAMXBF16 = isSet(22, edx7)
 }
 
 func isSet(bitpos uint, value uint32) bool {
diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_x86.s b/src/vendor/golang.org/x/sys/cpu/cpu_x86.s
index 39acab2..7d7ba33 100644
--- a/src/vendor/golang.org/x/sys/cpu/cpu_x86.s
+++ b/src/vendor/golang.org/x/sys/cpu/cpu_x86.s
@@ -3,8 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build (386 || amd64 || amd64p32) && gc
-// +build 386 amd64 amd64p32
-// +build gc
 
 #include "textflag.h"
 
diff --git a/src/vendor/golang.org/x/sys/cpu/endian_big.go b/src/vendor/golang.org/x/sys/cpu/endian_big.go
index 93ce03a..7fe04b0 100644
--- a/src/vendor/golang.org/x/sys/cpu/endian_big.go
+++ b/src/vendor/golang.org/x/sys/cpu/endian_big.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
-// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/endian_little.go b/src/vendor/golang.org/x/sys/cpu/endian_little.go
index 55db853..48eccc4 100644
--- a/src/vendor/golang.org/x/sys/cpu/endian_little.go
+++ b/src/vendor/golang.org/x/sys/cpu/endian_little.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm
-// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go
index 1d9d91f..34e49f9 100644
--- a/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go
+++ b/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go
@@ -5,7 +5,7 @@
 package cpu
 
 import (
-	"io/ioutil"
+	"os"
 )
 
 const (
@@ -39,7 +39,7 @@
 		return nil
 	}
 
-	buf, err := ioutil.ReadFile(procAuxv)
+	buf, err := os.ReadFile(procAuxv)
 	if err != nil {
 		// e.g. on android /proc/self/auxv is not accessible, so silently
 		// ignore the error and leave Initialized = false. On some
diff --git a/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
index d87bd6b..4cd64c7 100644
--- a/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
+++ b/src/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build linux && arm64
-// +build linux,arm64
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
index b975ea2..4c9788e 100644
--- a/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
+++ b/src/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.21
-// +build go1.21
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
index 9613415..1b9ccb0 100644
--- a/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
+++ b/src/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
@@ -9,7 +9,6 @@
 // gccgo's libgo and thus must not used a CGo method.
 
 //go:build aix && gccgo
-// +build aix,gccgo
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
index 904be42..e8b6cdb 100644
--- a/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
+++ b/src/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
@@ -7,7 +7,6 @@
 // (See golang.org/issue/32102)
 
 //go:build aix && ppc64 && gc
-// +build aix,ppc64,gc
 
 package cpu
 
diff --git a/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
index 8a7392c..784bb88 100644
--- a/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
+++ b/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build go1.10
-// +build go1.10
 
 package bidirule
 
diff --git a/src/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/src/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
index bb0a920..8e1e943 100644
--- a/src/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
+++ b/src/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !go1.10
-// +build !go1.10
 
 package bidirule
 
diff --git a/src/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/src/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
index 42fa8d7..d2bd711 100644
--- a/src/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.10 && !go1.13
-// +build go1.10,!go1.13
 
 package bidi
 
diff --git a/src/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/src/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
index 56a0e1e..f76bdca 100644
--- a/src/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.13 && !go1.14
-// +build go1.13,!go1.14
 
 package bidi
 
diff --git a/src/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/src/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
index baacf32..3aa2c3b 100644
--- a/src/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.14 && !go1.16
-// +build go1.14,!go1.16
 
 package bidi
 
diff --git a/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go
index ffadb7b..a713757 100644
--- a/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.16 && !go1.21
-// +build go1.16,!go1.21
 
 package bidi
 
diff --git a/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go
index 92cce58..f15746f 100644
--- a/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.21
-// +build go1.21
 
 package bidi
 
diff --git a/src/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/src/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go
index f517fdb..c164d37 100644
--- a/src/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build !go1.10
-// +build !go1.10
 
 package bidi
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/src/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
index f5a0788..1af161c 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.10 && !go1.13
-// +build go1.10,!go1.13
 
 package norm
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/src/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
index cb7239c..eb73ecc 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.13 && !go1.14
-// +build go1.13,!go1.14
 
 package norm
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/src/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
index 11b2733..276cb8d 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.14 && !go1.16
-// +build go1.14,!go1.16
 
 package norm
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
index f65785e..0cceffd 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.16 && !go1.21
-// +build go1.16,!go1.21
 
 package norm
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go
index e1858b8..b0819e4 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build go1.21
-// +build go1.21
 
 package norm
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/src/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
index 0175eae..bf65457 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
@@ -1,7 +1,6 @@
 // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 
 //go:build !go1.10
-// +build !go1.10
 
 package norm
 
diff --git a/src/vendor/golang.org/x/text/unicode/norm/trie.go b/src/vendor/golang.org/x/text/unicode/norm/trie.go
index 423386b..e4250ae 100644
--- a/src/vendor/golang.org/x/text/unicode/norm/trie.go
+++ b/src/vendor/golang.org/x/text/unicode/norm/trie.go
@@ -29,7 +29,7 @@
 	nfkcData = newNfkcTrie(0)
 )
 
-// lookupValue determines the type of block n and looks up the value for b.
+// lookup determines the type of block n and looks up the value for b.
 // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
 // is a list of ranges with an accompanying value. Given a matching range r,
 // the value for b is by r.value + (b - r.lo) * stride.
diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt
index 4de656b0..338c496 100644
--- a/src/vendor/modules.txt
+++ b/src/vendor/modules.txt
@@ -1,5 +1,5 @@
-# golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d
-## explicit; go 1.17
+# golang.org/x/crypto v0.16.1-0.20231129163542-152cdb1503eb
+## explicit; go 1.18
 golang.org/x/crypto/chacha20
 golang.org/x/crypto/chacha20poly1305
 golang.org/x/crypto/cryptobyte
@@ -7,8 +7,8 @@
 golang.org/x/crypto/hkdf
 golang.org/x/crypto/internal/alias
 golang.org/x/crypto/internal/poly1305
-# golang.org/x/net v0.12.1-0.20231027154334-5ca955b1789c
-## explicit; go 1.17
+# golang.org/x/net v0.19.0
+## explicit; go 1.18
 golang.org/x/net/dns/dnsmessage
 golang.org/x/net/http/httpguts
 golang.org/x/net/http/httpproxy
@@ -17,11 +17,11 @@
 golang.org/x/net/lif
 golang.org/x/net/nettest
 golang.org/x/net/route
-# golang.org/x/sys v0.10.0
-## explicit; go 1.17
+# golang.org/x/sys v0.15.0
+## explicit; go 1.18
 golang.org/x/sys/cpu
-# golang.org/x/text v0.11.0
-## explicit; go 1.17
+# golang.org/x/text v0.14.0
+## explicit; go 1.18
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
diff --git a/test/abi/bad_internal_offsets.go b/test/abi/bad_internal_offsets.go
index d396c1a..bad4ebd 100644
--- a/test/abi/bad_internal_offsets.go
+++ b/test/abi/bad_internal_offsets.go
@@ -1,7 +1,6 @@
 // compile
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/double_nested_addressed_struct.go b/test/abi/double_nested_addressed_struct.go
index be7c88a..c3e68a1 100644
--- a/test/abi/double_nested_addressed_struct.go
+++ b/test/abi/double_nested_addressed_struct.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/double_nested_struct.go b/test/abi/double_nested_struct.go
index 814341e..0dce143 100644
--- a/test/abi/double_nested_struct.go
+++ b/test/abi/double_nested_struct.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/f_ret_z_not.go b/test/abi/f_ret_z_not.go
index 63d6c79..0fec654 100644
--- a/test/abi/f_ret_z_not.go
+++ b/test/abi/f_ret_z_not.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/fibish.go b/test/abi/fibish.go
index b72f132..20ed924 100644
--- a/test/abi/fibish.go
+++ b/test/abi/fibish.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/fibish_closure.go b/test/abi/fibish_closure.go
index 988001e..682886c 100644
--- a/test/abi/fibish_closure.go
+++ b/test/abi/fibish_closure.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/leaf.go b/test/abi/leaf.go
index f893f5d..fb1be5c 100644
--- a/test/abi/leaf.go
+++ b/test/abi/leaf.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/leaf2.go b/test/abi/leaf2.go
index d2018d5..fdad59c 100644
--- a/test/abi/leaf2.go
+++ b/test/abi/leaf2.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/many_int_input.go b/test/abi/many_int_input.go
index 8fda937..046e31c 100644
--- a/test/abi/many_int_input.go
+++ b/test/abi/many_int_input.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/many_intstar_input.go b/test/abi/many_intstar_input.go
index b209c80..dc9e43b 100644
--- a/test/abi/many_intstar_input.go
+++ b/test/abi/many_intstar_input.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/more_intstar_input.go b/test/abi/more_intstar_input.go
index f0a48fb..256081d 100644
--- a/test/abi/more_intstar_input.go
+++ b/test/abi/more_intstar_input.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
@@ -12,10 +11,6 @@
 
 package main
 
-import (
-	"fmt"
-)
-
 var sink int
 
 //go:registerparams
@@ -33,12 +28,12 @@
 	var scratch [1000 * 100]int
 	I := *c - *e - *l // zero.
 	scratch[I] = *d
-	fmt.Println("Got this far!")
+	println("Got this far!")
 	sink += scratch[0]
 }
 
 func main() {
 	a, b, c, d, e, f, g, h, i, j, k, l, m := 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13
 	F(&a, &b, &c, &d, &e, &f, &g, &h, &i, &j, &k, &l, &m)
-	fmt.Printf("Sink = %d\n", sink-7)
+	println("Sink =", sink-7)
 }
diff --git a/test/abi/named_results.go b/test/abi/named_results.go
index eaaadb1..07ea461 100644
--- a/test/abi/named_results.go
+++ b/test/abi/named_results.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/named_return_stuff.go b/test/abi/named_return_stuff.go
index faa0221..ac779f0 100644
--- a/test/abi/named_return_stuff.go
+++ b/test/abi/named_return_stuff.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/reg_not_ssa.go b/test/abi/reg_not_ssa.go
new file mode 100644
index 0000000..da0f405
--- /dev/null
+++ b/test/abi/reg_not_ssa.go
@@ -0,0 +1,39 @@
+// run
+
+//go:build !wasm
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// small enough for registers, too large for SSA
+type T struct {
+	a, b, c, d, e int
+}
+
+//go:noinline
+func F() {
+	a, b := g(), g()
+	h(b, b)
+	h(a, g())
+	if a.a == 1 {
+		a = g()
+	}
+	h(a, a)
+}
+
+//go:noinline
+func g() T {
+	return T{1, 2, 3, 4, 5}
+}
+
+//go:noinline
+func h(s, t T) {
+	if s != t {
+		println("NEQ")
+	}
+}
+
+func main() { F() }
diff --git a/test/abi/return_stuff.go b/test/abi/return_stuff.go
index 130d8be..222a9ee 100644
--- a/test/abi/return_stuff.go
+++ b/test/abi/return_stuff.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/s_sif_sif.go b/test/abi/s_sif_sif.go
index f05f26f..f57faac 100644
--- a/test/abi/s_sif_sif.go
+++ b/test/abi/s_sif_sif.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/spills3.go b/test/abi/spills3.go
index 2478284..4dee3eb 100644
--- a/test/abi/spills3.go
+++ b/test/abi/spills3.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/spills4.go b/test/abi/spills4.go
index 205f5a6..646d696 100644
--- a/test/abi/spills4.go
+++ b/test/abi/spills4.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/struct_3_string_input.go b/test/abi/struct_3_string_input.go
index 54a8b38..2291bbb 100644
--- a/test/abi/struct_3_string_input.go
+++ b/test/abi/struct_3_string_input.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/struct_lower_1.go b/test/abi/struct_lower_1.go
index b20de9b..363b85c 100644
--- a/test/abi/struct_lower_1.go
+++ b/test/abi/struct_lower_1.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/too_big_to_ssa.go b/test/abi/too_big_to_ssa.go
index 6c55d31..e10acd1 100644
--- a/test/abi/too_big_to_ssa.go
+++ b/test/abi/too_big_to_ssa.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/uglyfib.go b/test/abi/uglyfib.go
index b8e8739..9c9d710 100644
--- a/test/abi/uglyfib.go
+++ b/test/abi/uglyfib.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/abi/wrapdefer_largetmp.go b/test/abi/wrapdefer_largetmp.go
index fb6eeba..1aaaade 100644
--- a/test/abi/wrapdefer_largetmp.go
+++ b/test/abi/wrapdefer_largetmp.go
@@ -1,7 +1,6 @@
 // run
 
 //go:build !wasm
-// +build !wasm
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/chanlinear.go b/test/chanlinear.go
index 4d55586..87faa84 100644
--- a/test/chanlinear.go
+++ b/test/chanlinear.go
@@ -1,6 +1,7 @@
-// +build darwin linux
 // run
 
+//go:build darwin || linux
+
 // Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/checkbce.go b/test/checkbce.go
index 6a12609..71acfb7 100644
--- a/test/checkbce.go
+++ b/test/checkbce.go
@@ -1,6 +1,7 @@
-// +build amd64,!gcflags_noopt
 // errorcheck -0 -d=ssa/check_bce/debug=3
 
+//go:build amd64 && !gcflags_noopt
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
@@ -137,6 +138,10 @@
 		useInt(a[i+50])
 
 		// The following are out of bounds.
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		useInt(a[i-11]) // ERROR "Found IsInBounds$"
 		useInt(a[i+51]) // ERROR "Found IsInBounds$"
 	}
diff --git a/test/closure3.dir/main.go b/test/closure3.dir/main.go
index 07629bf..441da70 100644
--- a/test/closure3.dir/main.go
+++ b/test/closure3.dir/main.go
@@ -256,10 +256,10 @@
 			b := 3
 			return func(y int) int { // ERROR "can inline main.func27.1" "can inline main.main.func27.func34"
 				c := 5
-				return func(z int) int { // ERROR "can inline main.func27.1.1" "can inline main.main.func27.func34.1" "can inline main.func27.main.func27.1.func2" "can inline main.main.func27.main.main.func27.func34.func36"
+				return func(z int) int { // ERROR "can inline main.func27.1.1" "can inline main.main.func27.func34.1" "can inline main.func27.main.func27.1.2" "can inline main.main.func27.main.main.func27.func34.func36"
 					return a*x + b*y + c*z
 				}(10) // ERROR "inlining call to main.func27.1.1"
-			}(100) // ERROR "inlining call to main.func27.1" "inlining call to main.func27.main.func27.1.func2"
+			}(100) // ERROR "inlining call to main.func27.1" "inlining call to main.func27.main.func27.1.2"
 		}(1000); r != 2350 { // ERROR "inlining call to main.func27" "inlining call to main.main.func27.func34" "inlining call to main.main.func27.main.main.func27.func34.func36"
 			ppanic("r != 2350")
 		}
@@ -271,13 +271,13 @@
 			b := 3
 			return func(y int) int { // ERROR "can inline main.func28.1" "can inline main.main.func28.func35"
 				c := 5
-				func(z int) { // ERROR "can inline main.func28.1.1" "can inline main.func28.main.func28.1.func2" "can inline main.main.func28.func35.1" "can inline main.main.func28.main.main.func28.func35.func37"
+				func(z int) { // ERROR "can inline main.func28.1.1" "can inline main.func28.main.func28.1.2" "can inline main.main.func28.func35.1" "can inline main.main.func28.main.main.func28.func35.func37"
 					a = a * x
 					b = b * y
 					c = c * z
 				}(10) // ERROR "inlining call to main.func28.1.1"
 				return a + c
-			}(100) + b // ERROR "inlining call to main.func28.1" "inlining call to main.func28.main.func28.1.func2"
+			}(100) + b // ERROR "inlining call to main.func28.1" "inlining call to main.func28.main.func28.1.2"
 		}(1000); r != 2350 { // ERROR "inlining call to main.func28" "inlining call to main.main.func28.func35" "inlining call to main.main.func28.main.main.func28.func35.func37"
 			ppanic("r != 2350")
 		}
diff --git a/test/closure3.go b/test/closure3.go
index 452a527..24f8cef 100644
--- a/test/closure3.go
+++ b/test/closure3.go
@@ -1,5 +1,7 @@
 // errorcheckandrundir -0 -m -d=inlfuncswithclosures=1
 
+//go:build !goexperiment.newinliner
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/codegen/README b/test/codegen/README
index 5a46842..19a73d0 100644
--- a/test/codegen/README
+++ b/test/codegen/README
@@ -29,7 +29,7 @@
 used to run the test. After writing tests for a newly added codegen
 transformation, it can be useful to first run the test harness with a
 toolchain from a released Go version (and verify that the new tests
-fail), and then re-runnig the tests using the devel toolchain.
+fail), and then re-running the tests using the devel toolchain.
 
 
 - Regexps comments syntax
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index f381b34..174c2db 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -11,6 +11,21 @@
 // For codegen tests on float types, see floats.go.
 
 // ----------------- //
+//    Addition       //
+// ----------------- //
+
+func AddLargeConst(a uint64, out []uint64) {
+	// ppc64x/power10:"ADD\t[$]4294967296,"
+	// ppc64x/power9:"MOVD\t[$]1", "SLD\t[$]32" "ADD\tR[0-9]*"
+	// ppc64x/power8:"MOVD\t[$]1", "SLD\t[$]32" "ADD\tR[0-9]*"
+	out[0] = a + 0x100000000
+	// ppc64x/power10:"ADD\t[$]-8589934592,"
+	// ppc64x/power9:"MOVD\t[$]-1", "SLD\t[$]33" "ADD\tR[0-9]*"
+	// ppc64x/power8:"MOVD\t[$]-1", "SLD\t[$]33" "ADD\tR[0-9]*"
+	out[1] = a + 0xFFFFFFFE00000000
+}
+
+// ----------------- //
 //    Subtraction    //
 // ----------------- //
 
@@ -260,7 +275,7 @@
 	// amd64:"ANDL\t[$]31",-"DIVQ"
 	// arm:"AND\t[$]31",-".*udiv"
 	// arm64:"AND\t[$]31",-"UDIV"
-	// ppc64x:"ANDCC\t[$]31"
+	// ppc64x:"RLDICL"
 	a := n1 % 32 // unsigned
 
 	// 386:"SHRL",-"IDIVL"
@@ -279,14 +294,14 @@
 	// amd64:"TESTQ\t[$]63",-"DIVQ",-"SHRQ"
 	// arm:"AND\t[$]63",-".*udiv",-"SRA"
 	// arm64:"TST\t[$]63",-"UDIV",-"ASR",-"AND"
-	// ppc64x:"ANDCC\t[$]63",-"SRAD"
+	// ppc64x:"RLDICL",-"SRAD"
 	a := n1%64 == 0 // signed divisible
 
 	// 386:"TESTL\t[$]63",-"DIVL",-"SHRL"
 	// amd64:"TESTQ\t[$]63",-"DIVQ",-"SHRQ"
 	// arm:"AND\t[$]63",-".*udiv",-"SRA"
 	// arm64:"TST\t[$]63",-"UDIV",-"ASR",-"AND"
-	// ppc64x:"ANDCC\t[$]63",-"SRAD"
+	// ppc64x:"RLDICL",-"SRAD"
 	b := n2%64 != 0 // signed indivisible
 
 	return a, b
@@ -464,7 +479,7 @@
 	// arm64:"AND\t[$]1023",-"SDIV"
 	// arm/6:"AND",-".*udiv"
 	// arm/7:"BFC",-".*udiv",-"AND"
-	// ppc64x:"ANDCC\t[$]1023"
+	// ppc64x:"RLDICL"
 	return len(a) % 1024
 }
 
@@ -474,7 +489,7 @@
 	// arm64:"AND\t[$]2047",-"SDIV"
 	// arm/6:"AND",-".*udiv"
 	// arm/7:"BFC",-".*udiv",-"AND"
-	// ppc64x:"ANDCC\t[$]2047"
+	// ppc64x:"RLDICL"
 	return len(s) % (4097 >> 1)
 }
 
@@ -493,7 +508,7 @@
 	// arm64:"AND\t[$]4095",-"SDIV"
 	// arm/6:"AND",-".*udiv"
 	// arm/7:"BFC",-".*udiv",-"AND"
-	// ppc64x:"ANDCC\t[$]4095"
+	// ppc64x:"RLDICL"
 	return cap(a) % ((1 << 11) + 2048)
 }
 
diff --git a/test/codegen/bits.go b/test/codegen/bits.go
index 018f5b9..4b6c8b9 100644
--- a/test/codegen/bits.go
+++ b/test/codegen/bits.go
@@ -220,10 +220,10 @@
 	// amd64:"BTSL"
 	n += b | (1 << (a & 31))
 
-	// amd64:"BTSL\t[$]31"
+	// amd64:"ORL\t[$]-2147483648"
 	n += a | (1 << 31)
 
-	// amd64:"BTSL\t[$]28"
+	// amd64:"ORL\t[$]268435456"
 	n += a | (1 << 28)
 
 	// amd64:"ORL\t[$]1"
@@ -236,10 +236,10 @@
 	// amd64:"BTRL"
 	n += b &^ (1 << (a & 31))
 
-	// amd64:"BTRL\t[$]31"
+	// amd64:"ANDL\t[$]2147483647"
 	n += a &^ (1 << 31)
 
-	// amd64:"BTRL\t[$]28"
+	// amd64:"ANDL\t[$]-268435457"
 	n += a &^ (1 << 28)
 
 	// amd64:"ANDL\t[$]-2"
@@ -252,10 +252,10 @@
 	// amd64:"BTCL"
 	n += b ^ (1 << (a & 31))
 
-	// amd64:"BTCL\t[$]31"
+	// amd64:"XORL\t[$]-2147483648"
 	n += a ^ (1 << 31)
 
-	// amd64:"BTCL\t[$]28"
+	// amd64:"XORL\t[$]268435456"
 	n += a ^ (1 << 28)
 
 	// amd64:"XORL\t[$]1"
@@ -382,7 +382,6 @@
 	// ppc64x: -"MOVB", "ANDCC\t[$]247,"
 	z = uint64(uint8(a)) & 0x3F7
 	return
-
 }
 
 // Verify zero-extended values are not sign-extended under a bit mask (#61297)
@@ -392,5 +391,30 @@
 	// ppc64x: -"MOVH\t", -"ANDCC", "MOVHZ"
 	y = uint64(b) & 0xFFFF
 	return
+}
 
+// Verify rotate and mask instructions, and further simplified instructions for small types
+func bitRotateAndMask(io64 [4]uint64, io32 [4]uint32, io16 [4]uint16, io8 [4]uint8) {
+	// ppc64x: "RLDICR\t[$]0, R[0-9]*, [$]47, R"
+	io64[0] = io64[0] & 0xFFFFFFFFFFFF0000
+	// ppc64x: "RLDICL\t[$]0, R[0-9]*, [$]16, R"
+	io64[1] = io64[1] & 0x0000FFFFFFFFFFFF
+	// ppc64x: -"SRD", -"AND", "RLDICL\t[$]60, R[0-9]*, [$]16, R"
+	io64[2] = (io64[2] >> 4) & 0x0000FFFFFFFFFFFF
+	// ppc64x: -"SRD", -"AND", "RLDICL\t[$]36, R[0-9]*, [$]28, R"
+	io64[3] = (io64[3] >> 28) & 0x0000FFFFFFFFFFFF
+
+	// ppc64x: "RLWNM\t[$]0, R[0-9]*, [$]4, [$]19, R"
+	io32[0] = io32[0] & 0x0FFFF000
+	// ppc64x: "RLWNM\t[$]0, R[0-9]*, [$]20, [$]3, R"
+	io32[1] = io32[1] & 0xF0000FFF
+	// ppc64x: -"RLWNM", MOVD, AND
+	io32[2] = io32[2] & 0xFFFF0002
+
+	var bigc uint32 = 0x12345678
+	// ppc64x: "ANDCC\t[$]22136"
+	io16[0] = io16[0] & uint16(bigc)
+
+	// ppc64x: "ANDCC\t[$]120"
+	io8[0] = io8[0] & uint8(bigc)
 }
diff --git a/test/codegen/bool.go b/test/codegen/bool.go
index faf7033..990a9ed 100644
--- a/test/codegen/bool.go
+++ b/test/codegen/bool.go
@@ -6,53 +6,57 @@
 
 package codegen
 
+import (
+	"math/bits"
+)
+
 // This file contains codegen tests related to boolean simplifications/optimizations.
 
 func convertNeq0B(x uint8, c bool) bool {
 	// amd64:"ANDL\t[$]1",-"SETNE"
-	// ppc64x:"ANDCC",-"CMPW",-"ISEL"
+	// ppc64x:"RLDICL",-"CMPW",-"ISEL"
 	b := x&1 != 0
 	return c && b
 }
 
 func convertNeq0W(x uint16, c bool) bool {
 	// amd64:"ANDL\t[$]1",-"SETNE"
-	// ppc64x:"ANDCC",-"CMPW",-"ISEL"
+	// ppc64x:"RLDICL",-"CMPW",-"ISEL"
 	b := x&1 != 0
 	return c && b
 }
 
 func convertNeq0L(x uint32, c bool) bool {
 	// amd64:"ANDL\t[$]1",-"SETB"
-	// ppc64x:"ANDCC",-"CMPW",-"ISEL"
+	// ppc64x:"RLDICL",-"CMPW",-"ISEL"
 	b := x&1 != 0
 	return c && b
 }
 
 func convertNeq0Q(x uint64, c bool) bool {
 	// amd64:"ANDL\t[$]1",-"SETB"
-	// ppc64x:"ANDCC",-"CMP",-"ISEL"
+	// ppc64x:"RLDICL",-"CMP",-"ISEL"
 	b := x&1 != 0
 	return c && b
 }
 
 func convertNeqBool32(x uint32) bool {
-	// ppc64x:"ANDCC",-"CMPW",-"ISEL"
+	// ppc64x:"RLDICL",-"CMPW",-"ISEL"
 	return x&1 != 0
 }
 
 func convertEqBool32(x uint32) bool {
-	// ppc64x:"ANDCC",-"CMPW","XOR",-"ISEL"
+	// ppc64x:"RLDICL",-"CMPW","XOR",-"ISEL"
 	return x&1 == 0
 }
 
 func convertNeqBool64(x uint64) bool {
-	// ppc64x:"ANDCC",-"CMP",-"ISEL"
+	// ppc64x:"RLDICL",-"CMP",-"ISEL"
 	return x&1 != 0
 }
 
 func convertEqBool64(x uint64) bool {
-	// ppc64x:"ANDCC","XOR",-"CMP",-"ISEL"
+	// ppc64x:"RLDICL","XOR",-"CMP",-"ISEL"
 	return x&1 == 0
 }
 
@@ -211,3 +215,62 @@
 	b := !(x >= y)
 	return b
 }
+func TestLogicalCompareZero(x *[64]uint64) {
+	// ppc64x:"ANDCC",^"AND"
+	b := x[0]&3
+	if b!=0 {
+		x[0] = b
+	}
+	// ppc64x:"ANDCC",^"AND"
+	b = x[1]&x[2]
+	if b!=0 {
+		x[1] = b
+	}
+	// ppc64x:"ANDNCC",^"ANDN"
+	b = x[1]&^x[2]
+	if b!=0 {
+		x[1] = b
+	}
+	// ppc64x:"ORCC",^"OR"
+	b = x[3]|x[4]
+	if b!=0 {
+		x[3] = b
+	}
+	// ppc64x:"SUBCC",^"SUB"
+	b = x[5]-x[6]
+	if b!=0 {
+		x[5] = b
+	}
+	// ppc64x:"NORCC",^"NOR"
+	b = ^(x[5]|x[6])
+	if b!=0 {
+		x[5] = b
+	}
+	// ppc64x:"XORCC",^"XOR"
+	b = x[7]^x[8]
+	if b!=0 {
+		x[7] = b
+	}
+	// ppc64x:"ADDCC",^"ADD"
+	b = x[9]+x[10]
+	if b!=0 {
+		x[9] = b
+	}
+	// ppc64x:"NEGCC",^"NEG"
+	b = -x[11]
+	if b!=0 {
+		x[11] = b
+	}
+	// ppc64x:"CNTLZDCC",^"CNTLZD"
+	b = uint64(bits.LeadingZeros64(x[12]))
+	if b!=0 {
+		x[12] = b
+	}
+
+	// ppc64x:"ADDCCC\t[$]4,"
+	c := int64(x[12]) + 4
+	if c <= 0 {
+		x[12] = uint64(c)
+	}
+
+}
diff --git a/test/codegen/clobberdead.go b/test/codegen/clobberdead.go
index 732be5f..13d2efb 100644
--- a/test/codegen/clobberdead.go
+++ b/test/codegen/clobberdead.go
@@ -1,6 +1,6 @@
 // asmcheck -gcflags=-clobberdead
 
-// +build amd64 arm64
+//go:build amd64 || arm64
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/codegen/clobberdeadreg.go b/test/codegen/clobberdeadreg.go
index 2a93c41..39c4a74 100644
--- a/test/codegen/clobberdeadreg.go
+++ b/test/codegen/clobberdeadreg.go
@@ -1,6 +1,6 @@
 // asmcheck -gcflags=-clobberdeadreg
 
-// +build amd64
+//go:build amd64
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/codegen/compare_and_branch.go b/test/codegen/compare_and_branch.go
index b3feef0..c121f1d 100644
--- a/test/codegen/compare_and_branch.go
+++ b/test/codegen/compare_and_branch.go
@@ -23,24 +23,25 @@
 }
 
 // Signed 64-bit compare-and-branch with 8-bit immediate.
-func si64x8() {
+func si64x8(doNotOptimize int64) {
+	// take in doNotOptimize as an argument to avoid the loops being rewritten to count down
 	// s390x:"CGIJ\t[$]12, R[0-9]+, [$]127, "
-	for i := int64(0); i < 128; i++ {
+	for i := doNotOptimize; i < 128; i++ {
 		dummy()
 	}
 
 	// s390x:"CGIJ\t[$]10, R[0-9]+, [$]-128, "
-	for i := int64(0); i > -129; i-- {
+	for i := doNotOptimize; i > -129; i-- {
 		dummy()
 	}
 
 	// s390x:"CGIJ\t[$]2, R[0-9]+, [$]127, "
-	for i := int64(0); i >= 128; i++ {
+	for i := doNotOptimize; i >= 128; i++ {
 		dummy()
 	}
 
 	// s390x:"CGIJ\t[$]4, R[0-9]+, [$]-128, "
-	for i := int64(0); i <= -129; i-- {
+	for i := doNotOptimize; i <= -129; i-- {
 		dummy()
 	}
 }
@@ -71,7 +72,7 @@
 	}
 
 	// s390x:"CLGIJ\t[$]2, R[0-9]+, [$]255, "
-	for i := uint64(0); i >= 256; i-- {
+	for i := uint64(257); i >= 256; i-- {
 		dummy()
 	}
 
@@ -95,24 +96,25 @@
 }
 
 // Signed 32-bit compare-and-branch with 8-bit immediate.
-func si32x8() {
+func si32x8(doNotOptimize int32) {
+	// take in doNotOptimize as an argument to avoid the loops being rewritten to count down
 	// s390x:"CIJ\t[$]12, R[0-9]+, [$]127, "
-	for i := int32(0); i < 128; i++ {
+	for i := doNotOptimize; i < 128; i++ {
 		dummy()
 	}
 
 	// s390x:"CIJ\t[$]10, R[0-9]+, [$]-128, "
-	for i := int32(0); i > -129; i-- {
+	for i := doNotOptimize; i > -129; i-- {
 		dummy()
 	}
 
 	// s390x:"CIJ\t[$]2, R[0-9]+, [$]127, "
-	for i := int32(0); i >= 128; i++ {
+	for i := doNotOptimize; i >= 128; i++ {
 		dummy()
 	}
 
 	// s390x:"CIJ\t[$]4, R[0-9]+, [$]-128, "
-	for i := int32(0); i <= -129; i-- {
+	for i := doNotOptimize; i <= -129; i-- {
 		dummy()
 	}
 }
@@ -143,7 +145,7 @@
 	}
 
 	// s390x:"CLIJ\t[$]2, R[0-9]+, [$]255, "
-	for i := uint32(0); i >= 256; i-- {
+	for i := uint32(257); i >= 256; i-- {
 		dummy()
 	}
 
diff --git a/test/codegen/comparisons.go b/test/codegen/comparisons.go
index 071b68f..4edf930 100644
--- a/test/codegen/comparisons.go
+++ b/test/codegen/comparisons.go
@@ -788,3 +788,16 @@
 	cmp5[string]("") // force instantiation
 	cmp6[string]("") // force instantiation
 }
+
+type Point struct {
+	X, Y int
+}
+
+// invertLessThanNoov checks (LessThanNoov (InvertFlags x)) is lowered as
+// CMP, CSET, CSEL instruction sequence. InvertFlags are only generated under
+// certain conditions, see canonLessThan, so if the code below does not
+// generate an InvertFlags OP, this check may fail.
+func invertLessThanNoov(p1, p2, p3 Point) bool {
+	// arm64:`CMP`,`CSET`,`CSEL`
+	return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
+}
diff --git a/test/codegen/condmove.go b/test/codegen/condmove.go
index 6c08116..1058910 100644
--- a/test/codegen/condmove.go
+++ b/test/codegen/condmove.go
@@ -57,7 +57,7 @@
 	}
 	// amd64:"CMOVW(HI|CS)"
 	// arm64:"CSNEG\t(LS|HS)"
-	// ppc64x:"ISEL\t[$]0"
+	// ppc64x:"ISEL\t[$][01]"
 	// wasm:"Select"
 	return x
 }
diff --git a/test/codegen/constants.go b/test/codegen/constants.go
new file mode 100644
index 0000000..3ce17d0
--- /dev/null
+++ b/test/codegen/constants.go
@@ -0,0 +1,33 @@
+// asmcheck
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codegen
+
+// A uint16 or sint16 constant shifted left.
+func shifted16BitConstants(out [64]uint64) {
+	// ppc64x: "MOVD\t[$]8193,", "SLD\t[$]27,"
+	out[0] = 0x0000010008000000
+	// ppc64x: "MOVD\t[$]-32767", "SLD\t[$]26,"
+	out[1] = 0xFFFFFE0004000000
+	// ppc64x: "MOVD\t[$]-1", "SLD\t[$]48,"
+	out[2] = 0xFFFF000000000000
+	// ppc64x: "MOVD\t[$]65535", "SLD\t[$]44,"
+	out[3] = 0x0FFFF00000000000
+}
+
+// A contiguous set of 1 bits, potentially wrapping.
+func contiguousMaskConstants(out [64]uint64) {
+	// ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]44, [$]63,"
+	out[0] = 0xFFFFF00000000001
+	// ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]43, [$]63,"
+	out[1] = 0xFFFFF80000000001
+	// ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]43, [$]4,"
+	out[2] = 0x0FFFF80000000000
+	// ppc64x/power8: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]33, [$]63,"
+	// ppc64x/power9: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]33, [$]63,"
+	// ppc64x/power10: "MOVD\t[$]-8589934591,"
+	out[3] = 0xFFFFFFFE00000001
+}
diff --git a/test/codegen/copy.go b/test/codegen/copy.go
index 4c4c857..17ee8bc 100644
--- a/test/codegen/copy.go
+++ b/test/codegen/copy.go
@@ -151,3 +151,9 @@
 	// s390x:-"BEQ",-"BNE"
 	copy(x[1:], x[2:])
 }
+
+// Verify #62698 on PPC64.
+func noMaskOnCopy(a []int, s string, x int) int {
+	// ppc64x:-"MOVD\t$-1", -"AND"
+	return a[x&^copy([]byte{}, s)]
+}
diff --git a/test/codegen/floats.go b/test/codegen/floats.go
index 9cb62e0..7991174 100644
--- a/test/codegen/floats.go
+++ b/test/codegen/floats.go
@@ -70,17 +70,20 @@
 	// s390x:"FMADDS\t"
 	// ppc64x:"FMADDS\t"
 	// arm64:"FMADDS"
+	// riscv64:"FMADDS\t"
 	return x*y + z
 }
 
 func FusedSub32_a(x, y, z float32) float32 {
 	// s390x:"FMSUBS\t"
 	// ppc64x:"FMSUBS\t"
+	// riscv64:"FMSUBS\t"
 	return x*y - z
 }
 
 func FusedSub32_b(x, y, z float32) float32 {
 	// arm64:"FMSUBS"
+	// riscv64:"FNMSUBS\t"
 	return z - x*y
 }
 
@@ -88,17 +91,20 @@
 	// s390x:"FMADD\t"
 	// ppc64x:"FMADD\t"
 	// arm64:"FMADDD"
+	// riscv64:"FMADDD\t"
 	return x*y + z
 }
 
 func FusedSub64_a(x, y, z float64) float64 {
 	// s390x:"FMSUB\t"
 	// ppc64x:"FMSUB\t"
+	// riscv64:"FMSUBD\t"
 	return x*y - z
 }
 
 func FusedSub64_b(x, y, z float64) float64 {
 	// arm64:"FMSUBD"
+	// riscv64:"FNMSUBD\t"
 	return z - x*y
 }
 
diff --git a/test/codegen/ifaces.go b/test/codegen/ifaces.go
index d773845..2be3fa5 100644
--- a/test/codegen/ifaces.go
+++ b/test/codegen/ifaces.go
@@ -6,16 +6,22 @@
 
 package codegen
 
-type I interface { M() }
+type I interface{ M() }
 
 func NopConvertIface(x I) I {
-        // amd64:-`.*runtime.convI2I`
+	// amd64:-`.*runtime.convI2I`
 	return I(x)
 }
 
 func NopConvertGeneric[T any](x T) T {
-        // amd64:-`.*runtime.convI2I`
-        return T(x)
+	// amd64:-`.*runtime.convI2I`
+	return T(x)
 }
 
 var NopConvertGenericIface = NopConvertGeneric[I]
+
+func ConvToM(x any) I {
+	// amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(.*\)`,`MOVQ\t8\(.*\)(.*\*1)`
+	// arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU`,`MOVD\t\(R.*\)\(R.*\)`
+	return x.(I)
+}
diff --git a/test/codegen/issue61356.go b/test/codegen/issue61356.go
new file mode 100644
index 0000000..65753d5
--- /dev/null
+++ b/test/codegen/issue61356.go
@@ -0,0 +1,55 @@
+// asmcheck
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure this code doesn't generate spill/restore.
+
+package codegen
+
+func pack20(in *[20]uint64) uint64 {
+	var out uint64
+	out |= 4
+	// amd64:-`.*SP.*`
+	out |= in[0] << 4
+	// amd64:-`.*SP.*`
+	out |= in[1] << 7
+	// amd64:-`.*SP.*`
+	out |= in[2] << 10
+	// amd64:-`.*SP.*`
+	out |= in[3] << 13
+	// amd64:-`.*SP.*`
+	out |= in[4] << 16
+	// amd64:-`.*SP.*`
+	out |= in[5] << 19
+	// amd64:-`.*SP.*`
+	out |= in[6] << 22
+	// amd64:-`.*SP.*`
+	out |= in[7] << 25
+	// amd64:-`.*SP.*`
+	out |= in[8] << 28
+	// amd64:-`.*SP.*`
+	out |= in[9] << 31
+	// amd64:-`.*SP.*`
+	out |= in[10] << 34
+	// amd64:-`.*SP.*`
+	out |= in[11] << 37
+	// amd64:-`.*SP.*`
+	out |= in[12] << 40
+	// amd64:-`.*SP.*`
+	out |= in[13] << 43
+	// amd64:-`.*SP.*`
+	out |= in[14] << 46
+	// amd64:-`.*SP.*`
+	out |= in[15] << 49
+	// amd64:-`.*SP.*`
+	out |= in[16] << 52
+	// amd64:-`.*SP.*`
+	out |= in[17] << 55
+	// amd64:-`.*SP.*`
+	out |= in[18] << 58
+	// amd64:-`.*SP.*`
+	out |= in[19] << 61
+	return out
+}
diff --git a/test/codegen/issue63332.go b/test/codegen/issue63332.go
new file mode 100644
index 0000000..dbe671d
--- /dev/null
+++ b/test/codegen/issue63332.go
@@ -0,0 +1,14 @@
+// asmcheck
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codegen
+
+func issue63332(c chan int) {
+	x := 0
+	// amd64:-`MOVQ`
+	x += 2
+	c <- x
+}
diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go
index 8c971cf..184d608 100644
--- a/test/codegen/mathbits.go
+++ b/test/codegen/mathbits.go
@@ -335,19 +335,19 @@
 }
 
 func TrailingZeros16(n uint16) int {
-	// amd64:"BSFL","BTSL\\t\\$16"
+	// amd64:"BSFL","ORL\\t\\$65536"
 	// 386:"BSFL\t"
 	// arm:"ORR\t\\$65536","CLZ",-"MOVHU\tR"
 	// arm64:"ORR\t\\$65536","RBITW","CLZW",-"MOVHU\tR",-"RBIT\t",-"CLZ\t"
 	// s390x:"FLOGR","OR\t\\$65536"
-	// ppc64x/power8:"POPCNTD","OR\\t\\$65536"
-	// ppc64x/power9:"CNTTZD","OR\\t\\$65536"
+	// ppc64x/power8:"POPCNTD","ORIS\\t\\$1"
+	// ppc64x/power9:"CNTTZD","ORIS\\t\\$1"
 	// wasm:"I64Ctz"
 	return bits.TrailingZeros16(n)
 }
 
 func TrailingZeros8(n uint8) int {
-	// amd64:"BSFL","BTSL\\t\\$8"
+	// amd64:"BSFL","ORL\\t\\$256"
 	// 386:"BSFL"
 	// arm:"ORR\t\\$256","CLZ",-"MOVBU\tR"
 	// arm64:"ORR\t\\$256","RBITW","CLZW",-"MOVBU\tR",-"RBIT\t",-"CLZ\t"
@@ -434,6 +434,7 @@
 	// loong64: "ADDV", "SGTU"
 	// ppc64x: "ADDC", "ADDE", "ADDZE"
 	// s390x:"ADDE","ADDC\t[$]-1,"
+	// mips64:"ADDV","SGTU"
 	// riscv64: "ADD","SLTU"
 	return bits.Add(x, 7, ci)
 }
@@ -444,6 +445,7 @@
 	// loong64: "ADDV", "SGTU"
 	// ppc64x: "ADDC", -"ADDE", "ADDZE"
 	// s390x:"ADDC",-"ADDC\t[$]-1,"
+	// mips64:"ADDV","SGTU"
 	// riscv64: "ADD","SLTU"
 	return bits.Add(x, y, 0)
 }
@@ -454,6 +456,7 @@
 	// loong64: "ADDV", -"SGTU"
 	// ppc64x: "ADDC", "ADDE", -"ADDZE"
 	// s390x:"ADDE","ADDC\t[$]-1,"
+	// mips64:"ADDV",-"SGTU"
 	// riscv64: "ADD",-"SLTU"
 	r, _ := bits.Add(x, y, ci)
 	return r
@@ -475,6 +478,7 @@
 	// loong64: "ADDV", "SGTU"
 	// ppc64x: "ADDC", "ADDE", "ADDZE"
 	// s390x:"ADDE","ADDC\t[$]-1,"
+	// mips64:"ADDV","SGTU"
 	// riscv64: "ADD","SLTU"
 	return bits.Add64(x, y, ci)
 }
@@ -485,6 +489,7 @@
 	// loong64: "ADDV", "SGTU"
 	// ppc64x: "ADDC", "ADDE", "ADDZE"
 	// s390x:"ADDE","ADDC\t[$]-1,"
+	// mips64:"ADDV","SGTU"
 	// riscv64: "ADD","SLTU"
 	return bits.Add64(x, 7, ci)
 }
@@ -495,6 +500,7 @@
 	// loong64: "ADDV", "SGTU"
 	// ppc64x: "ADDC", -"ADDE", "ADDZE"
 	// s390x:"ADDC",-"ADDC\t[$]-1,"
+	// mips64:"ADDV","SGTU"
 	// riscv64: "ADD","SLTU"
 	return bits.Add64(x, y, 0)
 }
@@ -505,6 +511,7 @@
 	// loong64: "ADDV", -"SGTU"
 	// ppc64x: "ADDC", "ADDE", -"ADDZE"
 	// s390x:"ADDE","ADDC\t[$]-1,"
+	// mips64:"ADDV",-"SGTU"
 	// riscv64: "ADD",-"SLTU"
 	r, _ := bits.Add64(x, y, ci)
 	return r
@@ -629,6 +636,7 @@
 	// loong64:"SUBV","SGTU"
 	// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
 	// s390x:"SUBE"
+	// mips64:"SUBV","SGTU"
 	// riscv64: "SUB","SLTU"
 	return bits.Sub(x, y, ci)
 }
@@ -639,6 +647,7 @@
 	// loong64:"SUBV","SGTU"
 	// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
 	// s390x:"SUBE"
+	// mips64:"SUBV","SGTU"
 	// riscv64: "SUB","SLTU"
 	return bits.Sub(x, 7, ci)
 }
@@ -649,6 +658,7 @@
 	// loong64:"SUBV","SGTU"
 	// ppc64x:"SUBC", -"SUBE", "SUBZE", "NEG"
 	// s390x:"SUBC"
+	// mips64:"SUBV","SGTU"
 	// riscv64: "SUB","SLTU"
 	return bits.Sub(x, y, 0)
 }
@@ -680,6 +690,7 @@
 	// loong64:"SUBV","SGTU"
 	// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
 	// s390x:"SUBE"
+	// mips64:"SUBV","SGTU"
 	// riscv64: "SUB","SLTU"
 	return bits.Sub64(x, y, ci)
 }
@@ -690,6 +701,7 @@
 	// loong64:"SUBV","SGTU"
 	// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
 	// s390x:"SUBE"
+	// mips64:"SUBV","SGTU"
 	// riscv64: "SUB","SLTU"
 	return bits.Sub64(x, 7, ci)
 }
@@ -700,6 +712,7 @@
 	// loong64:"SUBV","SGTU"
 	// ppc64x:"SUBC", -"SUBE", "SUBZE", "NEG"
 	// s390x:"SUBC"
+	// mips64:"SUBV","SGTU"
 	// riscv64: "SUB","SLTU"
 	return bits.Sub64(x, y, 0)
 }
diff --git a/test/codegen/memcombine.go b/test/codegen/memcombine.go
index 0d1c390..6d6c33d 100644
--- a/test/codegen/memcombine.go
+++ b/test/codegen/memcombine.go
@@ -348,7 +348,6 @@
 func extrashift_load_uint32(b []byte) uint32 {
 	// amd64:`MOVL\s\([A-Z]+\)`,`SHLL\s[$]2`,-`MOV[BW]`,-`OR`
 	return uint32(b[0])<<2 | uint32(b[1])<<10 | uint32(b[2])<<18 | uint32(b[3])<<26
-
 }
 
 func outoforder_load_uint32(b []byte) uint32 {
@@ -748,16 +747,16 @@
 
 func zero_byte_8(b []byte) {
 	_ = b[7]
-	b[0], b[1], b[2], b[3] = 0, 0, 0, 0
-	b[4], b[5], b[6], b[7] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+	b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+	b[4], b[5], b[6], b[7] = 0, 0, 0, 0
 }
 
 func zero_byte_16(b []byte) {
 	_ = b[15]
-	b[0], b[1], b[2], b[3] = 0, 0, 0, 0
+	b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW"
 	b[4], b[5], b[6], b[7] = 0, 0, 0, 0
 	b[8], b[9], b[10], b[11] = 0, 0, 0, 0
-	b[12], b[13], b[14], b[15] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW"
+	b[12], b[13], b[14], b[15] = 0, 0, 0, 0
 }
 
 func zero_byte_30(a *[30]byte) {
@@ -809,8 +808,8 @@
 
 func zero_uint16_8(h []uint16) {
 	_ = h[7]
-	h[0], h[1], h[2], h[3] = 0, 0, 0, 0
-	h[4], h[5], h[6], h[7] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH"
+	h[0], h[1], h[2], h[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH"
+	h[4], h[5], h[6], h[7] = 0, 0, 0, 0
 }
 
 func zero_uint32_2(w1, w2 []uint32) {
@@ -836,3 +835,86 @@
 	d1[0], d1[1] = 0, 0 // arm64:"STP",-"MOVB",-"MOVH"
 	d2[1], d2[0] = 0, 0 // arm64:"STP",-"MOVB",-"MOVH"
 }
+
+func loadstore(p, q *[4]uint8) {
+	// amd64:"MOVL",-"MOVB"
+	// arm64:"MOVWU",-"MOVBU"
+	x0, x1, x2, x3 := q[0], q[1], q[2], q[3]
+	// amd64:"MOVL",-"MOVB"
+	// arm64:"MOVW",-"MOVB"
+	p[0], p[1], p[2], p[3] = x0, x1, x2, x3
+}
+
+type S1 struct {
+	a, b int16
+}
+
+func loadstore2(p, q *S1) {
+	// amd64:"MOVL",-"MOVWLZX"
+	// arm64:"MOVWU",-"MOVH"
+	a, b := p.a, p.b
+	// amd64:"MOVL",-"MOVW"
+	// arm64:"MOVW",-"MOVH"
+	q.a, q.b = a, b
+}
+
+func wideStore(p *[8]uint64) {
+	if p == nil {
+		return
+	}
+
+	// amd64:"MOVUPS",-"MOVQ"
+	// arm64:"STP",-"MOVD"
+	p[0] = 0
+	// amd64:-"MOVUPS",-"MOVQ"
+	// arm64:-"STP",-"MOVD"
+	p[1] = 0
+}
+
+func wideStore2(p *[8]uint64, x, y uint64) {
+	if p == nil {
+		return
+	}
+
+	// s390x:"STMG"
+	p[0] = x
+	// s390x:-"STMG",-"MOVD"
+	p[1] = y
+}
+
+func store32le(p *struct{ a, b uint32 }, x uint64) {
+	// amd64:"MOVQ",-"MOVL",-"SHRQ"
+	// arm64:"MOVD",-"MOVW",-"LSR"
+	// ppc64le:"MOVD",-"MOVW",-"SRD"
+	p.a = uint32(x)
+	// amd64:-"MOVL",-"SHRQ"
+	// arm64:-"MOVW",-"LSR"
+	// ppc64le:-"MOVW",-"SRD"
+	p.b = uint32(x >> 32)
+}
+func store32be(p *struct{ a, b uint32 }, x uint64) {
+	// ppc64:"MOVD",-"MOVW",-"SRD"
+	// s390x:"MOVD",-"MOVW",-"SRD"
+	p.a = uint32(x >> 32)
+	// ppc64:-"MOVW",-"SRD"
+	// s390x:-"MOVW",-"SRD"
+	p.b = uint32(x)
+}
+func store16le(p *struct{ a, b uint16 }, x uint32) {
+	// amd64:"MOVL",-"MOVW",-"SHRL"
+	// arm64:"MOVW",-"MOVH",-"UBFX"
+	// ppc64le:"MOVW",-"MOVH",-"SRW"
+	p.a = uint16(x)
+	// amd64:-"MOVW",-"SHRL"
+	// arm64:-"MOVH",-"UBFX"
+	// ppc64le:-"MOVH",-"SRW"
+	p.b = uint16(x >> 16)
+}
+func store16be(p *struct{ a, b uint16 }, x uint32) {
+	// ppc64:"MOVW",-"MOVH",-"SRW"
+	// s390x:"MOVW",-"MOVH",-"SRW"
+	p.a = uint16(x >> 16)
+	// ppc64:-"MOVH",-"SRW"
+	// s390x:-"MOVH",-"SRW"
+	p.b = uint16(x)
+}
diff --git a/test/codegen/memops.go b/test/codegen/memops.go
index 7e59d88..e5e89c2 100644
--- a/test/codegen/memops.go
+++ b/test/codegen/memops.go
@@ -365,3 +365,39 @@
 	d /= b[i+4]
 	return c, d
 }
+
+func storeTest(a []bool, v int, i int) {
+	// amd64: `BTL\t\$0,`,`SETCS\t4\([A-Z]+[0-9]*\)`
+	a[4] = v&1 != 0
+	// amd64: `BTL\t\$1,`,`SETCS\t3\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)`
+	a[3+i] = v&2 != 0
+}
+
+func bitOps(p *[12]uint64) {
+	// amd64: `ORQ\t\$8, \(AX\)`
+	p[0] |= 8
+	// amd64: `ORQ\t\$1073741824, 8\(AX\)`
+	p[1] |= 1 << 30
+	// amd64: `BTSQ\t\$31, 16\(AX\)`
+	p[2] |= 1 << 31
+	// amd64: `BTSQ\t\$63, 24\(AX\)`
+	p[3] |= 1 << 63
+
+	// amd64: `ANDQ\t\$-9, 32\(AX\)`
+	p[4] &^= 8
+	// amd64: `ANDQ\t\$-1073741825, 40\(AX\)`
+	p[5] &^= 1 << 30
+	// amd64: `BTRQ\t\$31, 48\(AX\)`
+	p[6] &^= 1 << 31
+	// amd64: `BTRQ\t\$63, 56\(AX\)`
+	p[7] &^= 1 << 63
+
+	// amd64: `XORQ\t\$8, 64\(AX\)`
+	p[8] ^= 8
+	// amd64: `XORQ\t\$1073741824, 72\(AX\)`
+	p[9] ^= 1 << 30
+	// amd64: `BTCQ\t\$31, 80\(AX\)`
+	p[10] ^= 1 << 31
+	// amd64: `BTCQ\t\$63, 88\(AX\)`
+	p[11] ^= 1 << 63
+}
diff --git a/test/codegen/noextend.go b/test/codegen/noextend.go
index e4081e3..193f75b 100644
--- a/test/codegen/noextend.go
+++ b/test/codegen/noextend.go
@@ -6,6 +6,8 @@
 
 package codegen
 
+import "math/bits"
+
 var sval64 [8]int64
 var sval32 [8]int32
 var sval16 [8]int16
@@ -185,3 +187,99 @@
 	}
 	return false
 }
+
+// no unsign extension following 32 bits ops
+
+func noUnsignEXT(t1, t2, t3, t4 uint32, k int64) uint64 {
+	var ret uint64
+
+	// arm64:"RORW",-"MOVWU"
+	ret += uint64(bits.RotateLeft32(t1, 7))
+
+	// arm64:"MULW",-"MOVWU"
+	ret *= uint64(t1 * t2)
+
+	// arm64:"MNEGW",-"MOVWU"
+	ret += uint64(-t1 * t3)
+
+	// arm64:"UDIVW",-"MOVWU"
+	ret += uint64(t1 / t4)
+
+	// arm64:-"MOVWU"
+	ret += uint64(t2 % t3)
+
+	// arm64:"MSUBW",-"MOVWU"
+	ret += uint64(t1 - t2*t3)
+
+	// arm64:"MADDW",-"MOVWU"
+	ret += uint64(t3*t4 + t2)
+
+	// arm64:"REVW",-"MOVWU"
+	ret += uint64(bits.ReverseBytes32(t1))
+
+	// arm64:"RBITW",-"MOVWU"
+	ret += uint64(bits.Reverse32(t1))
+
+	// arm64:"CLZW",-"MOVWU"
+	ret += uint64(bits.LeadingZeros32(t1))
+
+	// arm64:"REV16W",-"MOVWU"
+	ret += uint64(((t1 & 0xff00ff00) >> 8) | ((t1 & 0x00ff00ff) << 8))
+
+	// arm64:"EXTRW",-"MOVWU"
+	ret += uint64((t1 << 25) | (t2 >> 7))
+
+	return ret
+}
+
+// no sign extension when the upper bits of the result are zero
+
+func noSignEXT(x int) int64 {
+	t1 := int32(x)
+
+	var ret int64
+
+	// arm64:-"MOVW"
+	ret += int64(t1 & 1)
+
+	// arm64:-"MOVW"
+	ret += int64(int32(x & 0x7fffffff))
+
+	// arm64:-"MOVH"
+	ret += int64(int16(x & 0x7fff))
+
+	// arm64:-"MOVB"
+	ret += int64(int8(x & 0x7f))
+
+	return ret
+}
+
+// corner cases that sign extension must not be omitted
+
+func shouldSignEXT(x int) int64 {
+	t1 := int32(x)
+
+	var ret int64
+
+	// arm64:"MOVW"
+	ret += int64(t1 & (-1))
+
+	// arm64:"MOVW"
+	ret += int64(int32(x & 0x80000000))
+
+	// arm64:"MOVW"
+	ret += int64(int32(x & 0x1100000011111111))
+
+	// arm64:"MOVH"
+	ret += int64(int16(x & 0x1100000000001111))
+
+	// arm64:"MOVB"
+	ret += int64(int8(x & 0x1100000000000011))
+
+	return ret
+}
+
+func noIntermediateExtension(a, b, c uint32) uint32 {
+	// arm64:-"MOVWU"
+	return a*b*9 + c
+}
diff --git a/test/codegen/retpoline.go b/test/codegen/retpoline.go
index a04a005..0e8f661 100644
--- a/test/codegen/retpoline.go
+++ b/test/codegen/retpoline.go
@@ -1,6 +1,7 @@
-// +build amd64
 // asmcheck -gcflags=-spectre=ret
 
+//go:build amd64
+
 package codegen
 
 func CallFunc(f func()) {
diff --git a/test/codegen/shift.go b/test/codegen/shift.go
index d34ff9b..50d6042 100644
--- a/test/codegen/shift.go
+++ b/test/codegen/shift.go
@@ -18,16 +18,46 @@
 
 func rshConst64Ux64(v uint64) uint64 {
 	// ppc64x:"SRD"
-	// riscv64:"SRLI",-"AND",-"SLTIU"
+	// riscv64:"SRLI\t",-"AND",-"SLTIU"
 	return v >> uint64(33)
 }
 
+func rshConst64Ux64Overflow32(v uint32) uint64 {
+	// riscv64:"MOV\t\\$0,",-"SRL"
+	return uint64(v) >> 32
+}
+
+func rshConst64Ux64Overflow16(v uint16) uint64 {
+	// riscv64:"MOV\t\\$0,",-"SRL"
+	return uint64(v) >> 16
+}
+
+func rshConst64Ux64Overflow8(v uint8) uint64 {
+	// riscv64:"MOV\t\\$0,",-"SRL"
+	return uint64(v) >> 8
+}
+
 func rshConst64x64(v int64) int64 {
 	// ppc64x:"SRAD"
-	// riscv64:"SRAI",-"OR",-"SLTIU"
+	// riscv64:"SRAI\t",-"OR",-"SLTIU"
 	return v >> uint64(33)
 }
 
+func rshConst64x64Overflow32(v int32) int64 {
+	// riscv64:"SRAIW",-"SLLI",-"SRAI\t"
+	return int64(v) >> 32
+}
+
+func rshConst64x64Overflow16(v int16) int64 {
+	// riscv64:"SLLI","SRAI",-"SRAIW"
+	return int64(v) >> 16
+}
+
+func rshConst64x64Overflow8(v int8) int64 {
+	// riscv64:"SLLI","SRAI",-"SRAIW"
+	return int64(v) >> 8
+}
+
 func lshConst32x64(v int32) int32 {
 	// ppc64x:"SLW"
 	// riscv64:"SLLI",-"AND",-"SLTIU", -"MOVW"
@@ -36,13 +66,13 @@
 
 func rshConst32Ux64(v uint32) uint32 {
 	// ppc64x:"SRW"
-	// riscv64:"SRLI",-"AND",-"SLTIU", -"MOVW"
+	// riscv64:"SRLIW",-"AND",-"SLTIU", -"MOVW"
 	return v >> uint64(29)
 }
 
 func rshConst32x64(v int32) int32 {
 	// ppc64x:"SRAW"
-	// riscv64:"SRAI",-"OR",-"SLTIU", -"MOVW"
+	// riscv64:"SRAIW",-"OR",-"SLTIU", -"MOVW"
 	return v >> uint64(29)
 }
 
@@ -54,13 +84,13 @@
 
 func rshConst64Ux32(v uint64) uint64 {
 	// ppc64x:"SRD"
-	// riscv64:"SRLI",-"AND",-"SLTIU"
+	// riscv64:"SRLI\t",-"AND",-"SLTIU"
 	return v >> uint32(33)
 }
 
 func rshConst64x32(v int64) int64 {
 	// ppc64x:"SRAD"
-	// riscv64:"SRAI",-"OR",-"SLTIU"
+	// riscv64:"SRAI\t",-"OR",-"SLTIU"
 	return v >> uint32(33)
 }
 
@@ -70,7 +100,7 @@
 
 func lshMask64x64(v int64, s uint64) int64 {
 	// arm64:"LSL",-"AND"
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
 	// riscv64:"SLL",-"AND\t",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v << (s & 63)
@@ -78,16 +108,16 @@
 
 func rshMask64Ux64(v uint64, s uint64) uint64 {
 	// arm64:"LSR",-"AND",-"CSEL"
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
-	// riscv64:"SRL",-"AND\t",-"SLTIU"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
+	// riscv64:"SRL\t",-"AND\t",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> (s & 63)
 }
 
 func rshMask64x64(v int64, s uint64) int64 {
 	// arm64:"ASR",-"AND",-"CSEL"
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
-	// riscv64:"SRA",-"OR",-"SLTIU"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
+	// riscv64:"SRA\t",-"OR",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> (s & 63)
 }
@@ -103,22 +133,32 @@
 func rshMask32Ux64(v uint32, s uint64) uint32 {
 	// arm64:"LSR",-"AND"
 	// ppc64x:"ISEL",-"ORN"
-	// riscv64:"SRL",-"AND\t",-"SLTIU"
+	// riscv64:"SRLW","SLTIU","NEG","AND\t",-"SRL\t"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> (s & 63)
 }
 
+func rsh5Mask32Ux64(v uint32, s uint64) uint32 {
+	// riscv64:"SRLW",-"AND\t",-"SLTIU",-"SRL\t"
+	return v >> (s & 31)
+}
+
 func rshMask32x64(v int32, s uint64) int32 {
 	// arm64:"ASR",-"AND"
 	// ppc64x:"ISEL",-"ORN"
-	// riscv64:"SRA",-"OR",-"SLTIU"
+	// riscv64:"SRAW","OR","SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> (s & 63)
 }
 
+func rsh5Mask32x64(v int32, s uint64) int32 {
+	// riscv64:"SRAW",-"OR",-"SLTIU"
+	return v >> (s & 31)
+}
+
 func lshMask64x32(v int64, s uint32) int64 {
 	// arm64:"LSL",-"AND"
-	// ppc64x:"ANDCC",-"ORN"
+	// ppc64x:"RLDICL",-"ORN"
 	// riscv64:"SLL",-"AND\t",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v << (s & 63)
@@ -126,37 +166,37 @@
 
 func rshMask64Ux32(v uint64, s uint32) uint64 {
 	// arm64:"LSR",-"AND",-"CSEL"
-	// ppc64x:"ANDCC",-"ORN"
-	// riscv64:"SRL",-"AND\t",-"SLTIU"
+	// ppc64x:"RLDICL",-"ORN"
+	// riscv64:"SRL\t",-"AND\t",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> (s & 63)
 }
 
 func rshMask64x32(v int64, s uint32) int64 {
 	// arm64:"ASR",-"AND",-"CSEL"
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
-	// riscv64:"SRA",-"OR",-"SLTIU"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
+	// riscv64:"SRA\t",-"OR",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> (s & 63)
 }
 
 func lshMask64x32Ext(v int64, s int32) int64 {
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
 	// riscv64:"SLL",-"AND\t",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v << uint(s&63)
 }
 
 func rshMask64Ux32Ext(v uint64, s int32) uint64 {
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
-	// riscv64:"SRL",-"AND\t",-"SLTIU"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
+	// riscv64:"SRL\t",-"AND\t",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> uint(s&63)
 }
 
 func rshMask64x32Ext(v int64, s int32) int64 {
-	// ppc64x:"ANDCC",-"ORN",-"ISEL"
-	// riscv64:"SRA",-"OR",-"SLTIU"
+	// ppc64x:"RLDICL",-"ORN",-"ISEL"
+	// riscv64:"SRA\t",-"OR",-"SLTIU"
 	// s390x:-"RISBGZ",-"AND",-"LOCGR"
 	return v >> uint(s&63)
 }
@@ -206,7 +246,7 @@
 
 func rshGuarded64U(v uint64, s uint) uint64 {
 	if s < 64 {
-		// riscv64:"SRL",-"AND",-"SLTIU"
+		// riscv64:"SRL\t",-"AND",-"SLTIU"
 		// s390x:-"RISBGZ",-"AND",-"LOCGR"
 		// wasm:-"Select",-".*LtU"
 		// arm64:"LSR",-"CSEL"
@@ -217,7 +257,7 @@
 
 func rshGuarded64(v int64, s uint) int64 {
 	if s < 64 {
-		// riscv64:"SRA",-"OR",-"SLTIU"
+		// riscv64:"SRA\t",-"OR",-"SLTIU"
 		// s390x:-"RISBGZ",-"AND",-"LOCGR"
 		// wasm:-"Select",-".*LtU"
 		// arm64:"ASR",-"CSEL"
diff --git a/test/codegen/spectre.go b/test/codegen/spectre.go
index d845da3..1b22b77 100644
--- a/test/codegen/spectre.go
+++ b/test/codegen/spectre.go
@@ -1,6 +1,7 @@
-// +build amd64
 // asmcheck -gcflags=-spectre=index
 
+//go:build amd64
+
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
@@ -13,12 +14,12 @@
 }
 
 func IndexString(x string, i int) byte {
-	// amd64:`CMOVQLS`
+	// amd64:`CMOVQ(LS|CC)`
 	return x[i]
 }
 
 func IndexSlice(x []float64, i int) float64 {
-	// amd64:`CMOVQLS`
+	// amd64:`CMOVQ(LS|CC)`
 	return x[i]
 }
 
diff --git a/test/codegen/strings.go b/test/codegen/strings.go
index 94512f5..f98c062 100644
--- a/test/codegen/strings.go
+++ b/test/codegen/strings.go
@@ -67,4 +67,14 @@
 	bsink = []byte("0123456789ab")
 }
 
+// self-equality is always true. See issue 60777.
+func EqualSelf(s string) bool {
+	// amd64:`MOVL\t\$1, AX`,-`.*memequal.*`
+	return s == s
+}
+func NotEqualSelf(s string) bool {
+	// amd64:`XORL\tAX, AX`,-`.*memequal.*`
+	return s != s
+}
+
 var bsink []byte
diff --git a/test/codegen/structs.go b/test/codegen/structs.go
index fc49a69..49a201f 100644
--- a/test/codegen/structs.go
+++ b/test/codegen/structs.go
@@ -1,7 +1,6 @@
 // asmcheck
 
 //go:build !goexperiment.cgocheck2
-// +build !goexperiment.cgocheck2
 
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/codegen/switch.go b/test/codegen/switch.go
index 603e0be..980ea70 100644
--- a/test/codegen/switch.go
+++ b/test/codegen/switch.go
@@ -99,3 +99,87 @@
 		return ""
 	}
 }
+
+// use jump tables for type switches to concrete types.
+func typeSwitch(x any) int {
+	// amd64:`JMP\s\(.*\)\(.*\)$`
+	// arm64:`MOVD\s\(R.*\)\(R.*<<3\)`,`JMP\s\(R.*\)$`
+	switch x.(type) {
+	case int:
+		return 0
+	case int8:
+		return 1
+	case int16:
+		return 2
+	case int32:
+		return 3
+	case int64:
+		return 4
+	}
+	return 7
+}
+
+type I interface {
+	foo()
+}
+type J interface {
+	bar()
+}
+type IJ interface {
+	I
+	J
+}
+type K interface {
+	baz()
+}
+
+// use a runtime call for type switches to interface types.
+func interfaceSwitch(x any) int {
+	// amd64:`CALL\truntime.interfaceSwitch`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*8)`
+	// arm64:`CALL\truntime.interfaceSwitch`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)`
+	switch x.(type) {
+	case I:
+		return 1
+	case J:
+		return 2
+	default:
+		return 3
+	}
+}
+
+func interfaceSwitch2(x K) int {
+	// amd64:`CALL\truntime.interfaceSwitch`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*8)`
+	// arm64:`CALL\truntime.interfaceSwitch`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)`
+	switch x.(type) {
+	case I:
+		return 1
+	case J:
+		return 2
+	default:
+		return 3
+	}
+}
+
+func interfaceCast(x any) int {
+	// amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*1)`
+	// arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)`
+	if _, ok := x.(I); ok {
+		return 3
+	}
+	return 5
+}
+
+func interfaceCast2(x K) int {
+	// amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*1)`
+	// arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)`
+	if _, ok := x.(I); ok {
+		return 3
+	}
+	return 5
+}
+
+func interfaceConv(x IJ) I {
+	// amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*1)`
+	// arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)`
+	return x
+}
diff --git a/test/codegen/writebarrier.go b/test/codegen/writebarrier.go
new file mode 100644
index 0000000..cfcfe15
--- /dev/null
+++ b/test/codegen/writebarrier.go
@@ -0,0 +1,55 @@
+// asmcheck
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codegen
+
+func combine2string(p *[2]string, a, b string) {
+	// amd64:`.*runtime[.]gcWriteBarrier4\(SB\)`
+	// arm64:`.*runtime[.]gcWriteBarrier4\(SB\)`
+	p[0] = a
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[1] = b
+}
+
+func combine4string(p *[4]string, a, b, c, d string) {
+	// amd64:`.*runtime[.]gcWriteBarrier8\(SB\)`
+	// arm64:`.*runtime[.]gcWriteBarrier8\(SB\)`
+	p[0] = a
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[1] = b
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[2] = c
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[3] = d
+}
+
+func combine2slice(p *[2][]byte, a, b []byte) {
+	// amd64:`.*runtime[.]gcWriteBarrier4\(SB\)`
+	// arm64:`.*runtime[.]gcWriteBarrier4\(SB\)`
+	p[0] = a
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[1] = b
+}
+
+func combine4slice(p *[4][]byte, a, b, c, d []byte) {
+	// amd64:`.*runtime[.]gcWriteBarrier8\(SB\)`
+	// arm64:`.*runtime[.]gcWriteBarrier8\(SB\)`
+	p[0] = a
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[1] = b
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[2] = c
+	// amd64:-`.*runtime[.]gcWriteBarrier`
+	// arm64:-`.*runtime[.]gcWriteBarrier`
+	p[3] = d
+}
diff --git a/test/escape2.go b/test/escape2.go
index e3e5904..3e5d11f 100644
--- a/test/escape2.go
+++ b/test/escape2.go
@@ -397,7 +397,6 @@
 		return nil
 	}
 	return nil
-
 }
 
 // assigning to an array element is like assigning to the array
@@ -1729,7 +1728,7 @@
 
 func stringtoslicebyte0() {
 	s := "foo"
-	x := []byte(s) // ERROR "\(\[\]byte\)\(s\) does not escape$"
+	x := []byte(s) // ERROR "\(\[\]byte\)\(s\) does not escape$" "zero-copy string->\[\]byte conversion"
 	_ = x
 }
 
diff --git a/test/escape2n.go b/test/escape2n.go
index 57cc1a0..2613152 100644
--- a/test/escape2n.go
+++ b/test/escape2n.go
@@ -397,7 +397,6 @@
 		return nil
 	}
 	return nil
-
 }
 
 // assigning to an array element is like assigning to the array
@@ -1729,7 +1728,7 @@
 
 func stringtoslicebyte0() {
 	s := "foo"
-	x := []byte(s) // ERROR "\(\[\]byte\)\(s\) does not escape$"
+	x := []byte(s) // ERROR "\(\[\]byte\)\(s\) does not escape$" "zero-copy string->\[\]byte conversion"
 	_ = x
 }
 
diff --git a/test/escape4.go b/test/escape4.go
index 710eb75..c4a2fc1 100644
--- a/test/escape4.go
+++ b/test/escape4.go
@@ -1,5 +1,7 @@
 // errorcheck -0 -m
 
+//go:build !goexperiment.newinliner
+
 // Copyright 2010 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/escape5.go b/test/escape5.go
index 089130d..133d973 100644
--- a/test/escape5.go
+++ b/test/escape5.go
@@ -151,7 +151,7 @@
 func f10() {
 	// These don't escape but are too big for the stack
 	var x [1 << 30]byte         // ERROR "moved to heap: x"
-	var y = make([]byte, 1<<30) // ERROR "make\(\[\]byte, 1 << 30\) escapes to heap"
+	var y = make([]byte, 1<<30) // ERROR "make\(\[\]byte, 1073741824\) escapes to heap"
 	_ = x[0] + y[0]
 }
 
diff --git a/test/escape_calls.go b/test/escape_calls.go
index aa7c7f5..5424c00 100644
--- a/test/escape_calls.go
+++ b/test/escape_calls.go
@@ -52,3 +52,10 @@
 	s := "string"
 	f([]string{s}) // ERROR "\[\]string{...} escapes to heap"
 }
+
+func strmin(a, b, c string) string { // ERROR "leaking param: a to result ~r0 level=0" "leaking param: b to result ~r0 level=0" "leaking param: c to result ~r0 level=0"
+	return min(a, b, c)
+}
+func strmax(a, b, c string) string { // ERROR "leaking param: a to result ~r0 level=0" "leaking param: b to result ~r0 level=0" "leaking param: c to result ~r0 level=0"
+	return max(a, b, c)
+}
diff --git a/test/escape_closure.go b/test/escape_closure.go
index bd6c025..0b19d6f 100644
--- a/test/escape_closure.go
+++ b/test/escape_closure.go
@@ -177,3 +177,17 @@
 }
 
 func nopFunc(p *int) {} // ERROR "p does not escape"
+
+func ClosureIndirect2() {
+	f := func(p *int) *int { return p } // ERROR "leaking param: p to result ~r0 level=0" "func literal does not escape"
+
+	f(new(int)) // ERROR "new\(int\) does not escape"
+
+	g := f
+	g(new(int)) // ERROR "new\(int\) does not escape"
+
+	h := nopFunc2
+	h(new(int)) // ERROR "new\(int\) does not escape"
+}
+
+func nopFunc2(p *int) *int { return p } // ERROR "leaking param: p to result ~r0 level=0"
diff --git a/test/escape_mutations.go b/test/escape_mutations.go
new file mode 100644
index 0000000..4365fc1
--- /dev/null
+++ b/test/escape_mutations.go
@@ -0,0 +1,77 @@
+// errorcheck -0 -m -d=escapemutationscalls,zerocopy -l
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "fmt"
+
+type B struct {
+	x  int
+	px *int
+	pb *B
+}
+
+func F1(b *B) { // ERROR "mutates param: b derefs=0"
+	b.x = 1
+}
+
+func F2(b *B) { // ERROR "mutates param: b derefs=1"
+	*b.px = 1
+}
+
+func F2a(b *B) { // ERROR "mutates param: b derefs=0"
+	b.px = nil
+}
+
+func F3(b *B) { // ERROR "leaking param: b"
+	fmt.Println(b) // ERROR "\.\.\. argument does not escape"
+}
+
+func F4(b *B) { // ERROR "leaking param content: b"
+	fmt.Println(*b) // ERROR "\.\.\. argument does not escape" "\*b escapes to heap"
+}
+
+func F4a(b *B) { // ERROR "leaking param content: b" "mutates param: b derefs=0"
+	b.x = 2
+	fmt.Println(*b) // ERROR "\.\.\. argument does not escape" "\*b escapes to heap"
+}
+
+func F5(b *B) { // ERROR "leaking param: b"
+	sink = b
+}
+
+func F6(b *B) int { // ERROR "b does not escape, mutate, or call"
+	return b.x
+}
+
+var sink any
+
+func M() {
+	var b B // ERROR "moved to heap: b"
+	F1(&b)
+	F2(&b)
+	F2a(&b)
+	F3(&b)
+	F4(&b)
+}
+
+func g(s string) { // ERROR "s does not escape, mutate, or call"
+	sink = &([]byte(s))[10] // ERROR "\(\[\]byte\)\(s\) escapes to heap"
+}
+
+func h(out []byte, s string) { // ERROR "mutates param: out derefs=0" "s does not escape, mutate, or call"
+	copy(out, []byte(s)) // ERROR "zero-copy string->\[\]byte conversion" "\(\[\]byte\)\(s\) does not escape"
+}
+
+func i(s string) byte { // ERROR "s does not escape, mutate, or call"
+	p := []byte(s) // ERROR "zero-copy string->\[\]byte conversion" "\(\[\]byte\)\(s\) does not escape"
+	return p[20]
+}
+
+func j(s string, x byte) { // ERROR "s does not escape, mutate, or call"
+	p := []byte(s) // ERROR "\(\[\]byte\)\(s\) does not escape"
+	p[20] = x
+}
diff --git a/test/escape_reflect.go b/test/escape_reflect.go
index b2d674a..99fbada 100644
--- a/test/escape_reflect.go
+++ b/test/escape_reflect.go
@@ -115,7 +115,7 @@
 	return v.IsValid() || v.IsNil() || v.IsZero()
 }
 
-func is3(x struct { a, b int }) bool {
+func is3(x struct{ a, b int }) bool {
 	v := reflect.ValueOf(x) // ERROR "x does not escape"
 	return v.IsValid() || v.IsNil() || v.IsZero()
 }
@@ -352,9 +352,9 @@
 }
 
 var (
-	intTyp    = reflect.TypeOf(int(0))     // ERROR "int\(0\) does not escape"
+	intTyp    = reflect.TypeOf(int(0))     // ERROR "0 does not escape"
 	uintTyp   = reflect.TypeOf(uint(0))    // ERROR "uint\(0\) does not escape"
-	stringTyp = reflect.TypeOf(string("")) // ERROR "string\(.*\) does not escape"
+	stringTyp = reflect.TypeOf(string("")) // ERROR ".. does not escape"
 	bytesTyp  = reflect.TypeOf([]byte{})   // ERROR "\[\]byte{} does not escape"
 )
 
diff --git a/test/escape_slice.go b/test/escape_slice.go
index 7f94a75..65181e5 100644
--- a/test/escape_slice.go
+++ b/test/escape_slice.go
@@ -137,7 +137,7 @@
 var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
 
 func IPv4(a, b, c, d byte) IP {
-	p := make(IP, IPv6len) // ERROR "make\(IP, IPv6len\) escapes to heap"
+	p := make(IP, IPv6len) // ERROR "make\(IP, 16\) escapes to heap"
 	copy(p, v4InV6Prefix)
 	p[12] = a
 	p[13] = b
diff --git a/test/finprofiled.go b/test/finprofiled.go
index ca7e3c8..ef8f61c 100644
--- a/test/finprofiled.go
+++ b/test/finprofiled.go
@@ -57,6 +57,11 @@
 	for _, p := range prof {
 		bytes := p.AllocBytes - p.FreeBytes
 		nobj := p.AllocObjects - p.FreeObjects
+		if nobj == 0 {
+			// There may be a record that has had all of its objects
+			// freed. That's fine. Avoid a divide-by-zero and skip.
+			continue
+		}
 		size := bytes / nobj
 		if size == tinyBlockSize {
 			totalBytes += bytes
diff --git a/test/fixedbugs/bug248.go b/test/fixedbugs/bug248.go
index 93d2fdb..bd305a8 100644
--- a/test/fixedbugs/bug248.go
+++ b/test/fixedbugs/bug248.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!plan9
 // errorcheckandrundir -1
 
+//go:build !nacl && !js && !plan9
+
 // Copyright 2009 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/bug369.go b/test/fixedbugs/bug369.go
index 47258fe..f5dad37 100644
--- a/test/fixedbugs/bug369.go
+++ b/test/fixedbugs/bug369.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,gc
 // run
 
+//go:build !nacl && !js && !wasip1 && gc
+
 // Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/bug385_32.go b/test/fixedbugs/bug385_32.go
index 73a1311..d9314c7 100644
--- a/test/fixedbugs/bug385_32.go
+++ b/test/fixedbugs/bug385_32.go
@@ -1,6 +1,7 @@
-// +build 386 amd64p32 arm
 // errorcheck
 
+//go:build 386 || amd64p32 || arm
+
 // Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/bug385_64.go b/test/fixedbugs/bug385_64.go
index 0f941ca..3240960 100644
--- a/test/fixedbugs/bug385_64.go
+++ b/test/fixedbugs/bug385_64.go
@@ -1,6 +1,7 @@
-// +build amd64
 // errorcheck
 
+//go:build amd64
+
 // Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/bug398.go b/test/fixedbugs/bug398.go
index db3e43c..2b00f60 100644
--- a/test/fixedbugs/bug398.go
+++ b/test/fixedbugs/bug398.go
@@ -1,4 +1,4 @@
-// compile -d=interfacecycles
+// errorcheck
 
 // Copyright 2012 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
@@ -11,11 +11,11 @@
 
 // exported interfaces
 
-type I1 interface {
+type I1 interface { // ERROR "invalid recursive type: anonymous interface refers to itself"
       F() interface{I1}
 }
 
-type I2 interface {
+type I2 interface { // ERROR "invalid recursive type: anonymous interface refers to itself"
       F() interface{I2}
 }
 
@@ -28,11 +28,11 @@
 
 // non-exported interfaces
 
-type i1 interface {
+type i1 interface { // ERROR "invalid recursive type: anonymous interface refers to itself"
       F() interface{i1}
 }
 
-type i2 interface {
+type i2 interface { // ERROR "invalid recursive type: anonymous interface refers to itself"
       F() interface{i2}
 }
 
diff --git a/test/fixedbugs/bug513.go b/test/fixedbugs/bug513.go
index 459a9c3..ce7ebf9 100644
--- a/test/fixedbugs/bug513.go
+++ b/test/fixedbugs/bug513.go
@@ -1,6 +1,6 @@
 // run -race -gcflags=all=-d=checkptr=0
-// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64
-// +build cgo
+
+//go:build ((linux && amd64) || (linux && ppc64le) || (darwin && amd64) || (freebsd && amd64) || (netbsd && amd64) || (windows && amd64)) && cgo
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/bug517.go b/test/fixedbugs/bug517.go
new file mode 100644
index 0000000..00860dc
--- /dev/null
+++ b/test/fixedbugs/bug517.go
@@ -0,0 +1,37 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The gofrontend used to mishandle this code due to a pass ordering issue.
+// It was inconsistent as to whether unsafe.Sizeof(byte(0)) was a constant,
+// and therefore as to whether it was a direct-iface type.
+
+package main
+
+import "unsafe"
+
+type A [unsafe.Sizeof(byte(0))]*byte
+
+func (r A) V() byte {
+	return *r[0]
+}
+
+func F() byte {
+	panic("F") // should never be called
+}
+
+type B [unsafe.Sizeof(F())]*byte
+
+func (r B) V() byte {
+	return *r[0]
+}
+
+func main() {
+	b := byte(1)
+	v := A{&b}.V() + B{&b}.V()
+	if v != 2 {
+		panic(v)
+	}
+}
diff --git a/test/fixedbugs/bug518.go b/test/fixedbugs/bug518.go
new file mode 100644
index 0000000..c3ec1f4
--- /dev/null
+++ b/test/fixedbugs/bug518.go
@@ -0,0 +1,15 @@
+// errorcheck
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The gofrontend used to accept this.
+
+package p
+
+func F2(a int32) bool {
+	return a == C	// ERROR "invalid|incompatible"
+}
+
+const C = uint32(34)
diff --git a/test/fixedbugs/issue10607.go b/test/fixedbugs/issue10607.go
index 8a04bc9..d7be618 100644
--- a/test/fixedbugs/issue10607.go
+++ b/test/fixedbugs/issue10607.go
@@ -1,6 +1,7 @@
-// +build linux,!ppc64,gc
 // run
 
+//go:build linux && !ppc64 && gc && cgo
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
@@ -19,7 +20,7 @@
 
 func main() {
 	test("internal")
-	test("external")
+	test("external") // The 'cgo' build constraint should imply that a linker is available.
 }
 
 func test(linkmode string) {
diff --git a/test/fixedbugs/issue10958.go b/test/fixedbugs/issue10958.go
index 52487fb..d0b3958 100644
--- a/test/fixedbugs/issue10958.go
+++ b/test/fixedbugs/issue10958.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,disabled_see_issue_18589
 // buildrun -t 10  -gcflags=-d=ssa/insert_resched_checks/on,ssa/check/on
 
+//go:build !nacl && !js && disabled_see_issue_18589
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue11326b.go b/test/fixedbugs/issue11326b.go
index b5f933b..5db083a 100644
--- a/test/fixedbugs/issue11326b.go
+++ b/test/fixedbugs/issue11326b.go
@@ -2,7 +2,7 @@
 
 // Does not work with gccgo, which uses a smaller (but still permitted)
 // exponent size.
-// +build !gccgo
+//go:build !gccgo
 
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue11656.dir/asm.go b/test/fixedbugs/issue11656.dir/asm.go
index cdcb064..c64302b 100644
--- a/test/fixedbugs/issue11656.dir/asm.go
+++ b/test/fixedbugs/issue11656.dir/asm.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build ppc64 || ppc64le
-// +build ppc64 ppc64le
 
 package main
 
diff --git a/test/fixedbugs/issue11656.dir/asm_generic.go b/test/fixedbugs/issue11656.dir/asm_generic.go
index 104d44d..a8b8f6a 100644
--- a/test/fixedbugs/issue11656.dir/asm_generic.go
+++ b/test/fixedbugs/issue11656.dir/asm_generic.go
@@ -3,7 +3,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build !ppc64 && !ppc64le
-// +build !ppc64,!ppc64le
 
 package main
 
diff --git a/test/fixedbugs/issue11656.go b/test/fixedbugs/issue11656.go
index dba8e35..2d74689 100644
--- a/test/fixedbugs/issue11656.go
+++ b/test/fixedbugs/issue11656.go
@@ -17,6 +17,5 @@
 // able to unwind past that point.
 
 //go:build !windows && !wasm && !gccgo
-// +build !windows,!wasm,!gccgo
 
 package ignored
diff --git a/test/fixedbugs/issue11771.go b/test/fixedbugs/issue11771.go
index 657e1b0..0c6b769 100644
--- a/test/fixedbugs/issue11771.go
+++ b/test/fixedbugs/issue11771.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,gc
 // run
 
+//go:build !nacl && !js && !wasip1 && gc
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue12411.go b/test/fixedbugs/issue12411.go
index ff49314..19275d9 100644
--- a/test/fixedbugs/issue12411.go
+++ b/test/fixedbugs/issue12411.go
@@ -1,6 +1,7 @@
-// +build !386
 // run
 
+//go:build !386
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue13265.go b/test/fixedbugs/issue13265.go
index 3e16cee..0a393a3 100644
--- a/test/fixedbugs/issue13265.go
+++ b/test/fixedbugs/issue13265.go
@@ -1,5 +1,6 @@
 // errorcheck -0 -race
-// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64
+
+//go:build (linux && amd64) || (linux && ppc64le) || (darwin && amd64) || (freebsd && amd64) || (netbsd && amd64) || (windows && amd64)
 
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue13268.go b/test/fixedbugs/issue13268.go
index 53a82d5..da54b70 100644
--- a/test/fixedbugs/issue13268.go
+++ b/test/fixedbugs/issue13268.go
@@ -1,6 +1,7 @@
-// +build gc
 // run
 
+//go:build gc
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue13799.go b/test/fixedbugs/issue13799.go
index 7ab4040..f06f198 100644
--- a/test/fixedbugs/issue13799.go
+++ b/test/fixedbugs/issue13799.go
@@ -61,7 +61,7 @@
 	}
 
 	if len(m) != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
+		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "500 escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -85,7 +85,7 @@
 	}
 
 	if len(m) != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
+		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "500 escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -111,7 +111,7 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "500 escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -137,7 +137,7 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "500 escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -168,7 +168,7 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "500 escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -186,6 +186,6 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "500 escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
diff --git a/test/fixedbugs/issue14636.go b/test/fixedbugs/issue14636.go
index 770fb4a..c8e751f 100644
--- a/test/fixedbugs/issue14636.go
+++ b/test/fixedbugs/issue14636.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,!android,gc
 // run
 
+//go:build !nacl && !js && !wasip1 && !android && gc
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue15002.go b/test/fixedbugs/issue15002.go
index 936105e..bb0fe42 100644
--- a/test/fixedbugs/issue15002.go
+++ b/test/fixedbugs/issue15002.go
@@ -1,6 +1,6 @@
 // run
-// +build amd64
-// +build linux darwin
+
+//go:build amd64 && (linux || darwin)
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue15091.go b/test/fixedbugs/issue15091.go
index 678e791..115ad68 100644
--- a/test/fixedbugs/issue15091.go
+++ b/test/fixedbugs/issue15091.go
@@ -1,5 +1,6 @@
 // errorcheck -0 -race
-// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64
+
+//go:build (linux && amd64) || (linux && ppc64le) || (darwin && amd64) || (freebsd && amd64) || (netbsd && amd64) || (windows && amd64)
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue15277.go b/test/fixedbugs/issue15277.go
index af165f7..d22a305 100644
--- a/test/fixedbugs/issue15277.go
+++ b/test/fixedbugs/issue15277.go
@@ -1,9 +1,10 @@
 // run
 
+//go:build amd64
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
-// +build amd64
 
 package main
 
diff --git a/test/fixedbugs/issue15609.dir/call.go b/test/fixedbugs/issue15609.dir/call.go
index 41a489c..48f90fd 100644
--- a/test/fixedbugs/issue15609.dir/call.go
+++ b/test/fixedbugs/issue15609.dir/call.go
@@ -1,4 +1,4 @@
-// +build !amd64,!386
+//go:build !amd64 && !386
 
 package main
 
diff --git a/test/fixedbugs/issue15609.dir/call_decl.go b/test/fixedbugs/issue15609.dir/call_decl.go
index d9c5a4e..cdca44a 100644
--- a/test/fixedbugs/issue15609.dir/call_decl.go
+++ b/test/fixedbugs/issue15609.dir/call_decl.go
@@ -1,4 +1,4 @@
-// +build amd64 386
+//go:build amd64 || 386
 
 package main
 
diff --git a/test/fixedbugs/issue15609.go b/test/fixedbugs/issue15609.go
index e0bf8a4..fa3b755 100644
--- a/test/fixedbugs/issue15609.go
+++ b/test/fixedbugs/issue15609.go
@@ -1,6 +1,6 @@
 // runindir
 
-// +build !nacl
+//go:build !nacl
 
 // Copyright 2019 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue15747.go b/test/fixedbugs/issue15747.go
index 7825958..92e762c 100644
--- a/test/fixedbugs/issue15747.go
+++ b/test/fixedbugs/issue15747.go
@@ -1,7 +1,6 @@
 // errorcheck -0 -live
 
 //go:build !goexperiment.cgocheck2
-// +build !goexperiment.cgocheck2
 
 // Copyright 2016 The Go Authors.  All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue16008.go b/test/fixedbugs/issue16008.go
index b88e235..9631379 100644
--- a/test/fixedbugs/issue16008.go
+++ b/test/fixedbugs/issue16008.go
@@ -1,5 +1,6 @@
 // errorcheck -0 -race
-// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64
+
+//go:build (linux && amd64) || (linux && ppc64le) || (darwin && amd64) || (freebsd && amd64) || (netbsd && amd64) || (windows && amd64)
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue16037_run.go b/test/fixedbugs/issue16037_run.go
index 610fd2d..d5ad477 100644
--- a/test/fixedbugs/issue16037_run.go
+++ b/test/fixedbugs/issue16037_run.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,!android,!gccgo
 // run
 
+//go:build !nacl && !js && !wasip1 && !android && !gccgo
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue16369.go b/test/fixedbugs/issue16369.go
index 3a7bb7e..86d0ce6 100644
--- a/test/fixedbugs/issue16369.go
+++ b/test/fixedbugs/issue16369.go
@@ -1,4 +1,4 @@
-// compile -d=interfacecycles
+// errorcheck
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
@@ -6,7 +6,7 @@
 
 package p
 
-type T interface {
+type T interface { // ERROR "invalid recursive type: anonymous interface refers to itself"
 	M(interface {
 		T
 	})
diff --git a/test/fixedbugs/issue17449.go b/test/fixedbugs/issue17449.go
index 51cc8ea..4e092fa 100644
--- a/test/fixedbugs/issue17449.go
+++ b/test/fixedbugs/issue17449.go
@@ -1,5 +1,6 @@
 // errorcheck -0 -race
-// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64
+
+//go:build (linux && amd64) || (linux && ppc64le) || (darwin && amd64) || (freebsd && amd64) || (netbsd && amd64) || (windows && amd64)
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue19182.go b/test/fixedbugs/issue19182.go
index f5e44e9..544c4b0 100644
--- a/test/fixedbugs/issue19182.go
+++ b/test/fixedbugs/issue19182.go
@@ -1,5 +1,6 @@
 // run
-// +build !js,!wasip1
+
+//go:build !js && !wasip1
 
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue19261.go b/test/fixedbugs/issue19261.go
index 61cff6e..1da90e9 100644
--- a/test/fixedbugs/issue19261.go
+++ b/test/fixedbugs/issue19261.go
@@ -1,5 +1,7 @@
 // errorcheckdir -0 -m
 
+//go:build !goexperiment.newinliner
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue19507.dir/main.go b/test/fixedbugs/issue19507.dir/main.go
index c115556..e4e72be 100644
--- a/test/fixedbugs/issue19507.dir/main.go
+++ b/test/fixedbugs/issue19507.dir/main.go
@@ -1,4 +1,4 @@
-// +build arm
+//go:build arm
 
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue19507.go b/test/fixedbugs/issue19507.go
index 543e17e..ef0a9a1 100644
--- a/test/fixedbugs/issue19507.go
+++ b/test/fixedbugs/issue19507.go
@@ -1,6 +1,7 @@
-// +build arm
 // builddir
 
+//go:build arm
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue20250.go b/test/fixedbugs/issue20250.go
index c739b6f..34184c4 100644
--- a/test/fixedbugs/issue20250.go
+++ b/test/fixedbugs/issue20250.go
@@ -1,7 +1,6 @@
 // errorcheck -0 -live -l
 
 //go:build !goexperiment.cgocheck2
-// +build !goexperiment.cgocheck2
 
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue20529.go b/test/fixedbugs/issue20529.go
index eeaaf37..118365d 100644
--- a/test/fixedbugs/issue20529.go
+++ b/test/fixedbugs/issue20529.go
@@ -1,6 +1,6 @@
 // errorcheck
 
-// +build amd64
+//go:build amd64
 
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue20780b.go b/test/fixedbugs/issue20780b.go
index c8bf1f8..8aba854 100644
--- a/test/fixedbugs/issue20780b.go
+++ b/test/fixedbugs/issue20780b.go
@@ -1,6 +1,7 @@
-// +build cgo,linux,amd64
 // run -race
 
+//go:build cgo && linux && amd64
+
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue21317.go b/test/fixedbugs/issue21317.go
index 75d871c..cebfc0f 100644
--- a/test/fixedbugs/issue21317.go
+++ b/test/fixedbugs/issue21317.go
@@ -1,6 +1,7 @@
-// +build !js,!wasip1,gc
 // run
 
+//go:build !js && !wasip1 && gc
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue21576.go b/test/fixedbugs/issue21576.go
index 8e1e3ad..d5bfc28 100644
--- a/test/fixedbugs/issue21576.go
+++ b/test/fixedbugs/issue21576.go
@@ -1,6 +1,6 @@
 // run
 
-// +build !nacl,!js,!wasip1,!gccgo
+//go:build !nacl && !js && !wasip1 && !gccgo
 
 // Copyright 2019 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue22200b.go b/test/fixedbugs/issue22200b.go
index ce20923..7babeec 100644
--- a/test/fixedbugs/issue22200b.go
+++ b/test/fixedbugs/issue22200b.go
@@ -1,11 +1,11 @@
 // errorcheck
 
+//go:build !386 && !amd64p32 && !arm && !mips && !mipsle
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !386,!amd64p32,!arm,!mips,!mipsle
-
 package p
 
 func f3(x *[1 << 31]byte) byte { // GC_ERROR "stack frame too large"
diff --git a/test/fixedbugs/issue22660.go b/test/fixedbugs/issue22660.go
index 150de17..1062203 100644
--- a/test/fixedbugs/issue22660.go
+++ b/test/fixedbugs/issue22660.go
@@ -1,6 +1,7 @@
-// +build !js,!wasip1,gc
 // run
 
+//go:build !js && !wasip1 && gc
+
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue22662b.go b/test/fixedbugs/issue22662b.go
index c7a1e05..a58a2e0 100644
--- a/test/fixedbugs/issue22662b.go
+++ b/test/fixedbugs/issue22662b.go
@@ -1,6 +1,7 @@
-// +build !js,!wasip1,gc
 // run
 
+//go:build !js && !wasip1 && gc
+
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue23781.go b/test/fixedbugs/issue23781.go
index 5c03cf7..1947951 100644
--- a/test/fixedbugs/issue23781.go
+++ b/test/fixedbugs/issue23781.go
@@ -1,6 +1,7 @@
-// +build amd64
 // compile
 
+//go:build amd64
+
 // Copyright 2009 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue24187.go b/test/fixedbugs/issue24187.go
index 45fc929..f697968 100644
--- a/test/fixedbugs/issue24187.go
+++ b/test/fixedbugs/issue24187.go
@@ -1,6 +1,7 @@
-// +build amd64p32
 // run
 
+//go:build amd64p32
+
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue24491a.go b/test/fixedbugs/issue24491a.go
index d30b65b..1f74818 100644
--- a/test/fixedbugs/issue24491a.go
+++ b/test/fixedbugs/issue24491a.go
@@ -74,8 +74,8 @@
 			break
 		}
 	}()
-
 	<-done
+
 	func() {
 		s := &S{}
 		defer s.test("method call", uintptr(setup()), uintptr(setup()), uintptr(setup()), uintptr(setup()))
diff --git a/test/fixedbugs/issue24651a.go b/test/fixedbugs/issue24651a.go
index 1bfe8ac..74bcbb1 100644
--- a/test/fixedbugs/issue24651a.go
+++ b/test/fixedbugs/issue24651a.go
@@ -1,5 +1,6 @@
 //errorcheck -0 -race -m -m
-// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64
+
+//go:build (linux && amd64) || (linux && ppc64le) || (darwin && amd64) || (freebsd && amd64) || (netbsd && amd64) || (windows && amd64)
 
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue26411.go b/test/fixedbugs/issue26411.go
index 3ae9e03..d7b2a26 100644
--- a/test/fixedbugs/issue26411.go
+++ b/test/fixedbugs/issue26411.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1
 // run
 
+//go:build !nacl && !js && !wasip1
+
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue29329.go b/test/fixedbugs/issue29329.go
index 7818bca..07f96f4 100644
--- a/test/fixedbugs/issue29329.go
+++ b/test/fixedbugs/issue29329.go
@@ -1,12 +1,11 @@
-// +build cgo
 // run -race
 
+//go:build cgo && linux && amd64
+
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build linux,amd64
-
 package main
 
 import (
diff --git a/test/fixedbugs/issue30908.go b/test/fixedbugs/issue30908.go
index 27f070e..98dd641 100644
--- a/test/fixedbugs/issue30908.go
+++ b/test/fixedbugs/issue30908.go
@@ -5,6 +5,5 @@
 // license that can be found in the LICENSE file.
 
 //go:build !nacl && !js
-// +build !nacl,!js
 
 package ignored
diff --git a/test/fixedbugs/issue31573.go b/test/fixedbugs/issue31573.go
index eaab563..5197163 100644
--- a/test/fixedbugs/issue31573.go
+++ b/test/fixedbugs/issue31573.go
@@ -1,4 +1,4 @@
-// errorcheck -0 -m
+// errorcheck -0 -m -l
 
 // Copyright 2019 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
@@ -6,7 +6,7 @@
 
 package p
 
-func f(...*int) {} // ERROR "can inline f$"
+func f(...*int) {}
 
 func g() {
 	defer f()
@@ -19,31 +19,31 @@
 	defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
 
 	go f()
-	go f(new(int))           // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
-	go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+	go f(new(int))           // ERROR "... argument does not escape$" "new\(int\) does not escape$"
+	go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) does not escape$"
 
 	go f(nil...)
 	go f([]*int{}...)                   // ERROR "\[\]\*int{} does not escape$"
-	go f([]*int{new(int)}...)           // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
-	go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+	go f([]*int{new(int)}...)           // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
+	go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
 
 	for {
 		defer f()
-		defer f(new(int))           // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
-		defer f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+		defer f(new(int))           // ERROR "... argument does not escape$" "new\(int\) does not escape$"
+		defer f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) does not escape$"
 
 		defer f(nil...)
 		defer f([]*int{}...)                   // ERROR "\[\]\*int{} does not escape$"
-		defer f([]*int{new(int)}...)           // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
-		defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+		defer f([]*int{new(int)}...)           // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
+		defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
 
 		go f()
-		go f(new(int))           // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
-		go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+		go f(new(int))           // ERROR "... argument does not escape$" "new\(int\) does not escape$"
+		go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) does not escape$"
 
 		go f(nil...)
 		go f([]*int{}...)                   // ERROR "\[\]\*int{} does not escape$"
-		go f([]*int{new(int)}...)           // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
-		go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+		go f([]*int{new(int)}...)           // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
+		go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
 	}
 }
diff --git a/test/fixedbugs/issue32680b.go b/test/fixedbugs/issue32680b.go
index 61e5317..09bc3a9 100644
--- a/test/fixedbugs/issue32680b.go
+++ b/test/fixedbugs/issue32680b.go
@@ -12,5 +12,4 @@
 
 func doStuff(data []byte) uint64 {
 	return hashBytesRaw(data[0], data[1], data[2], data[3], data[7])
-
 }
diff --git a/test/fixedbugs/issue33275_run.go b/test/fixedbugs/issue33275_run.go
index f2818b0..542cc03 100644
--- a/test/fixedbugs/issue33275_run.go
+++ b/test/fixedbugs/issue33275_run.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,!gccgo
 // run
 
+//go:build !nacl && !js && !wasip1 && !gccgo
+
 // Copyright 2019 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue33555.go b/test/fixedbugs/issue33555.go
index e7fe41e..90d0244 100644
--- a/test/fixedbugs/issue33555.go
+++ b/test/fixedbugs/issue33555.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,!gccgo
 // run
 
+//go:build !nacl && !js && !wasip1 && !gccgo
+
 // Copyright 2019 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue34968.go b/test/fixedbugs/issue34968.go
index 6b1dbd1..f328f04 100644
--- a/test/fixedbugs/issue34968.go
+++ b/test/fixedbugs/issue34968.go
@@ -1,6 +1,7 @@
-// +build cgo
 // run -gcflags=all=-d=checkptr
 
+//go:build cgo
+
 // Copyright 2019 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue35073.go b/test/fixedbugs/issue35073a.go
similarity index 100%
rename from test/fixedbugs/issue35073.go
rename to test/fixedbugs/issue35073a.go
diff --git a/test/fixedbugs/issue35073b.go b/test/fixedbugs/issue35073b.go
new file mode 100644
index 0000000..8cdc6c1
--- /dev/null
+++ b/test/fixedbugs/issue35073b.go
@@ -0,0 +1,23 @@
+// errorcheck -0 -d=checkptr -m
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that we can inline the receiver arguments for
+// reflect.Value.UnsafeAddr/Pointer, even in checkptr mode.
+
+package main
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+func main() {
+	n := 10                      // ERROR "moved to heap: n"
+	m := make(map[string]string) // ERROR "moved to heap: m" "make\(map\[string\]string\) escapes to heap"
+
+	_ = unsafe.Pointer(reflect.ValueOf(&n).Elem().UnsafeAddr()) // ERROR "inlining call"
+	_ = unsafe.Pointer(reflect.ValueOf(&m).Elem().Pointer())    // ERROR "inlining call"
+}
diff --git a/test/fixedbugs/issue36437.go b/test/fixedbugs/issue36437.go
index c16e7ef..72ea50b 100644
--- a/test/fixedbugs/issue36437.go
+++ b/test/fixedbugs/issue36437.go
@@ -1,6 +1,6 @@
 // run
 
-// +build !nacl,!js,!wasip1,gc
+//go:build !nacl && !js && !wasip1 && gc
 
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue36516.go b/test/fixedbugs/issue36516.go
index d4e28b6..68f655a 100644
--- a/test/fixedbugs/issue36516.go
+++ b/test/fixedbugs/issue36516.go
@@ -1,6 +1,7 @@
-// +build cgo,linux,amd64
 // run -race
 
+//go:build cgo && linux && amd64
+
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue36705.go b/test/fixedbugs/issue36705.go
index b0485c6..9aad8a7 100644
--- a/test/fixedbugs/issue36705.go
+++ b/test/fixedbugs/issue36705.go
@@ -1,6 +1,7 @@
-// +build cgo,!windows
 // run fake-arg-to-force-use-of-go-run
 
+//go:build cgo && !windows
+
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue37513.go b/test/fixedbugs/issue37513.go
index e05b2d8..9570325 100644
--- a/test/fixedbugs/issue37513.go
+++ b/test/fixedbugs/issue37513.go
@@ -4,6 +4,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build linux,amd64 darwin,amd64 linux,386
+//go:build (linux && amd64) || (darwin && amd64) || (linux && 386)
 
 package ignored
diff --git a/test/fixedbugs/issue38093.go b/test/fixedbugs/issue38093.go
index db92664..af476ab 100644
--- a/test/fixedbugs/issue38093.go
+++ b/test/fixedbugs/issue38093.go
@@ -1,6 +1,7 @@
-// +build js
 // run
 
+//go:build js
+
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue4099.go b/test/fixedbugs/issue4099.go
index 5a4ea7c..edbbfeb 100644
--- a/test/fixedbugs/issue4099.go
+++ b/test/fixedbugs/issue4099.go
@@ -1,5 +1,7 @@
 // errorcheck -0 -m
 
+//go:build !goexperiment.newinliner
+
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
@@ -20,7 +22,7 @@
 func G() {
 	var buf1 [10]byte
 	F1(buf1[:])
-	
+
 	var buf2 [10]byte // ERROR "moved to heap: buf2"
 	F2(buf2[:])
 }
diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go
index f7fd80b..ccf54fa 100644
--- a/test/fixedbugs/issue42284.dir/a.go
+++ b/test/fixedbugs/issue42284.dir/a.go
@@ -20,7 +20,7 @@
 
 func g() {
 	h := E() // ERROR "inlining call to E" "T\(0\) does not escape"
-	h.M()    // ERROR "devirtualizing h.M to T"
+	h.M()    // ERROR "devirtualizing h.M to T" "inlining call to T.M"
 
 	// BAD: T(0) could be stack allocated.
 	i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap"
diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go
index 8cd93b8..559de59 100644
--- a/test/fixedbugs/issue42284.dir/b.go
+++ b/test/fixedbugs/issue42284.dir/b.go
@@ -8,7 +8,7 @@
 
 func g() {
 	h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape"
-	h.M()      // ERROR "devirtualizing h.M to a.T"
+	h.M()      // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M"
 
 	// BAD: T(0) could be stack allocated.
 	i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap"
diff --git a/test/fixedbugs/issue42284.go b/test/fixedbugs/issue42284.go
index e5d6173..a1a525b 100644
--- a/test/fixedbugs/issue42284.go
+++ b/test/fixedbugs/issue42284.go
@@ -1,5 +1,7 @@
 // errorcheckdir -0 -m
 
+//go:build !goexperiment.newinliner
+
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue46234.go b/test/fixedbugs/issue46234.go
index ae28019..9c346dd 100644
--- a/test/fixedbugs/issue46234.go
+++ b/test/fixedbugs/issue46234.go
@@ -1,7 +1,6 @@
 // buildrun -t 45
 
 //go:build !js && !wasip1
-// +build !js,!wasip1
 
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue47185.go b/test/fixedbugs/issue47185.go
index 9c921b8..91c7e0a 100644
--- a/test/fixedbugs/issue47185.go
+++ b/test/fixedbugs/issue47185.go
@@ -1,6 +1,7 @@
-// +build cgo
 // runindir
 
+//go:build cgo
+
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue47227.go b/test/fixedbugs/issue47227.go
index a14efc9..14c3b15 100644
--- a/test/fixedbugs/issue47227.go
+++ b/test/fixedbugs/issue47227.go
@@ -5,7 +5,6 @@
 // license that can be found in the LICENSE file.
 
 //go:build cgo
-// +build cgo
 
 package main
 
diff --git a/test/fixedbugs/issue51913.go b/test/fixedbugs/issue51913.go
new file mode 100644
index 0000000..50b670c
--- /dev/null
+++ b/test/fixedbugs/issue51913.go
@@ -0,0 +1,21 @@
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+var _ = func() int {
+	a = false
+	return 0
+}()
+
+var a = true
+var b = a
+
+func main() {
+	if b {
+		panic("FAIL")
+	}
+}
diff --git a/test/fixedbugs/issue52127.go b/test/fixedbugs/issue52127.go
index 68abe3f..b40c194 100644
--- a/test/fixedbugs/issue52127.go
+++ b/test/fixedbugs/issue52127.go
@@ -1,6 +1,6 @@
 // run
+
 //go:build !js && !wasip1
-// +build !js,!wasip1
 
 // Copyright 2022 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue52193.go b/test/fixedbugs/issue52193.go
new file mode 100644
index 0000000..40e6dcb
--- /dev/null
+++ b/test/fixedbugs/issue52193.go
@@ -0,0 +1,46 @@
+// errorcheck -0 -m
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import (
+	"crypto/ecdh"
+	"crypto/rand"
+)
+
+func F(peerShare []byte) ([]byte, error) { // ERROR "leaking param: peerShare"
+	p256 := ecdh.P256() // ERROR "inlining call to ecdh.P256"
+
+	ourKey, err := p256.GenerateKey(rand.Reader) // ERROR "devirtualizing p256.GenerateKey" "inlining call to ecdh.*GenerateKey"
+	if err != nil {
+		return nil, err
+	}
+
+	peerPublic, err := p256.NewPublicKey(peerShare) // ERROR "devirtualizing p256.NewPublicKey" "inlining call to ecdh.*NewPublicKey"
+	if err != nil {
+		return nil, err
+	}
+
+	return ourKey.ECDH(peerPublic)
+}
+
+// Test that inlining doesn't break if devirtualization exposes a new
+// inlinable callee.
+
+func f() { // ERROR "can inline f"
+	var i interface{ m() } = T(0) // ERROR "T\(0\) does not escape"
+	i.m()                         // ERROR "devirtualizing i.m"
+}
+
+type T int
+
+func (T) m() { // ERROR "can inline T.m"
+	if never {
+		f() // ERROR "inlining call to f" "devirtualizing i.m" "T\(0\) does not escape"
+	}
+}
+
+var never bool
diff --git a/test/fixedbugs/issue52697.go b/test/fixedbugs/issue52697.go
new file mode 100644
index 0000000..da08a4f
--- /dev/null
+++ b/test/fixedbugs/issue52697.go
@@ -0,0 +1,25 @@
+// errorcheck
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !386 && !amd64p32 && !arm && !mips && !mipsle
+
+package main
+
+func g() { // GC_ERROR "stack frame too large"
+	xs := [3000 * 2000][33]int{}
+	for _, x := range xs {
+		if len(x) > 50 {
+
+		}
+	}
+}
+
+func main() { // GC_ERROR "stack frame too large"
+	defer f()
+	g()
+}
+
+func f() {}
diff --git a/test/fixedbugs/issue56923.go b/test/fixedbugs/issue56923.go
index 700a104..7cc9494 100644
--- a/test/fixedbugs/issue56923.go
+++ b/test/fixedbugs/issue56923.go
@@ -18,7 +18,6 @@
 
 func New[T any](f func(a, b T) bool) Eq[T] {
 	return EqFunc[T](f)
-
 }
 
 func Equal(a, b []byte) bool {
diff --git a/test/fixedbugs/issue6036.go b/test/fixedbugs/issue6036.go
index 8ebef5a..e7c2557 100644
--- a/test/fixedbugs/issue6036.go
+++ b/test/fixedbugs/issue6036.go
@@ -1,6 +1,7 @@
-// +build !386,!arm,!mips,!mipsle,!amd64p32
 // compile
 
+//go:build !386 && !arm && !mips && !mipsle && !amd64p32
+
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue61895.go b/test/fixedbugs/issue61895.go
new file mode 100644
index 0000000..cda6494
--- /dev/null
+++ b/test/fixedbugs/issue61895.go
@@ -0,0 +1,15 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+	for {
+	}
+
+	defer func() {}()
+	defer func() {}()
+}
diff --git a/test/fixedbugs/issue62313.go b/test/fixedbugs/issue62313.go
new file mode 100644
index 0000000..139f1eb
--- /dev/null
+++ b/test/fixedbugs/issue62313.go
@@ -0,0 +1,13 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f() {
+	var err error = nil
+	defer func() { _ = &err }()
+	err.Error()
+}
diff --git a/test/fixedbugs/issue62360.go b/test/fixedbugs/issue62360.go
new file mode 100644
index 0000000..e81c60f
--- /dev/null
+++ b/test/fixedbugs/issue62360.go
@@ -0,0 +1,24 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"math/big"
+)
+
+//go:noinline
+func f(x uint32) *big.Int {
+	return big.NewInt(int64(x))
+}
+func main() {
+	b := f(0xffffffff)
+	c := big.NewInt(0xffffffff)
+	if b.Cmp(c) != 0 {
+		panic(fmt.Sprintf("b:%x c:%x", b, c))
+	}
+}
diff --git a/test/fixedbugs/issue62515.go b/test/fixedbugs/issue62515.go
new file mode 100644
index 0000000..8d9a580
--- /dev/null
+++ b/test/fixedbugs/issue62515.go
@@ -0,0 +1,27 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Unified frontend generated unnecessary temporaries for expressions
+// within unsafe.Sizeof, etc functions.
+
+package main
+
+import "unsafe"
+
+func F[G int](g G) (uintptr, uintptr, uintptr) {
+	var c chan func() int
+	type s struct {
+		g G
+		x []int
+	}
+	return unsafe.Sizeof(s{g, make([]int, (<-c)())}),
+		unsafe.Alignof(s{g, make([]int, (<-c)())}),
+		unsafe.Offsetof(s{g, make([]int, (<-c)())}.x)
+}
+
+func main() {
+	F(0)
+}
diff --git a/test/fixedbugs/issue63333.go b/test/fixedbugs/issue63333.go
new file mode 100644
index 0000000..e14b367
--- /dev/null
+++ b/test/fixedbugs/issue63333.go
@@ -0,0 +1,15 @@
+// errorcheck -goexperiment fieldtrack
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f(interface{ m() }) {}
+func g()                 { f(new(T)) } // ERROR "m method is marked 'nointerface'"
+
+type T struct{}
+
+//go:nointerface
+func (*T) m() {}
diff --git a/test/fixedbugs/issue63436.go b/test/fixedbugs/issue63436.go
new file mode 100644
index 0000000..bbd34a5
--- /dev/null
+++ b/test/fixedbugs/issue63436.go
@@ -0,0 +1,9 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = (new)(int)
diff --git a/test/fixedbugs/issue63462.go b/test/fixedbugs/issue63462.go
new file mode 100644
index 0000000..09b7e25
--- /dev/null
+++ b/test/fixedbugs/issue63462.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f() {
+	for b := "" < join([]string{}, "") && true; ; {
+		_ = b
+	}
+}
+
+//go:noinline
+func join(elems []string, sep string) string {
+	return ""
+}
diff --git a/test/fixedbugs/issue63489a.go b/test/fixedbugs/issue63489a.go
new file mode 100644
index 0000000..b88120f
--- /dev/null
+++ b/test/fixedbugs/issue63489a.go
@@ -0,0 +1,16 @@
+// errorcheck -lang=go1.21
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.4
+
+package p
+
+const c = 0o123 // ERROR "file declares //go:build go1.4"
+
+// ERROR "file declares //go:build go1.4"
+
+//line issue63489a.go:13:1
+const d = 0o124
diff --git a/test/fixedbugs/issue63489b.go b/test/fixedbugs/issue63489b.go
new file mode 100644
index 0000000..2ad590d
--- /dev/null
+++ b/test/fixedbugs/issue63489b.go
@@ -0,0 +1,11 @@
+// errorcheck -lang=go1.4
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.4
+
+package p
+
+const c = 0o123 // ERROR "file declares //go:build go1.4"
diff --git a/test/fixedbugs/issue63490.go b/test/fixedbugs/issue63490.go
new file mode 100644
index 0000000..740ce9b
--- /dev/null
+++ b/test/fixedbugs/issue63490.go
@@ -0,0 +1,36 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type ResourceFunc struct {
+	junk [8]int
+	base assignmentBaseResource
+}
+
+type SubscriptionAssignmentResource struct {
+	base assignmentBaseResource
+}
+
+type assignmentBaseResource struct{}
+
+//go:noinline
+func (a assignmentBaseResource) f(s string) ResourceFunc {
+	println(s)
+	return ResourceFunc{}
+}
+
+//go:noinline
+func (r SubscriptionAssignmentResource) Hi() ResourceFunc {
+	rf := r.base.f("Hello world")
+	rf.base = r.base
+	return rf
+}
+
+func main() {
+	var r SubscriptionAssignmentResource
+	r.Hi()
+}
diff --git a/test/fixedbugs/issue63505.go b/test/fixedbugs/issue63505.go
new file mode 100644
index 0000000..2bec17d
--- /dev/null
+++ b/test/fixedbugs/issue63505.go
@@ -0,0 +1,45 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type explainer struct {
+	m map[string]string
+}
+
+func init() {
+	RegisterExplainer(newExplainer())
+}
+
+type Explainer interface {
+	Name() string
+	Map() map[string]string
+}
+
+func (e explainer) Name() string {
+	return "HelloWorldExplainer"
+}
+
+func (e explainer) Map() map[string]string {
+	return e.m
+}
+
+//go:noinline
+func newExplainer() explainer {
+	m := make(map[string]string)
+	m["Hello"] = "World!"
+	return explainer{m}
+}
+
+var explainers = make(map[string]Explainer)
+
+func RegisterExplainer(e Explainer) {
+	explainers[e.Name()] = e
+}
+
+func main() {
+
+}
diff --git a/test/fixedbugs/issue63657.go b/test/fixedbugs/issue63657.go
new file mode 100644
index 0000000..e32a4a3
--- /dev/null
+++ b/test/fixedbugs/issue63657.go
@@ -0,0 +1,48 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure address calculations don't float up before
+// the corresponding nil check.
+
+package main
+
+type T struct {
+	a, b int
+}
+
+//go:noinline
+func f(x *T, p *bool, n int) {
+	*p = n != 0
+	useStack(1000)
+	g(&x.b)
+}
+
+//go:noinline
+func g(p *int) {
+}
+
+func useStack(n int) {
+	if n == 0 {
+		return
+	}
+	useStack(n - 1)
+}
+
+func main() {
+	mustPanic(func() {
+		var b bool
+		f(nil, &b, 3)
+	})
+}
+
+func mustPanic(f func()) {
+	defer func() {
+		if recover() == nil {
+			panic("expected panic, got nil")
+		}
+	}()
+	f()
+}
diff --git a/test/fixedbugs/issue63955.go b/test/fixedbugs/issue63955.go
new file mode 100644
index 0000000..258e874
--- /dev/null
+++ b/test/fixedbugs/issue63955.go
@@ -0,0 +1,22 @@
+// compile
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package j
+
+func f(try func() int, shouldInc func() bool, N func(int) int) {
+	var n int
+loop: // we want to have 3 preds here, the function entry and both gotos
+	if v := try(); v == 42 || v == 1337 { // the two || are to trick findIndVar
+		if n < 30 { // this aims to be the matched block
+			if shouldInc() {
+				n++
+				goto loop
+			}
+			n = N(n) // try to prevent some block joining
+			goto loop
+		}
+	}
+}
diff --git a/test/fixedbugs/issue64565.go b/test/fixedbugs/issue64565.go
new file mode 100644
index 0000000..634025c
--- /dev/null
+++ b/test/fixedbugs/issue64565.go
@@ -0,0 +1,15 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+	m := "0"
+	for _, c := range "321" {
+		m = max(string(c), m)
+		println(m)
+	}
+}
diff --git a/test/fixedbugs/issue64565.out b/test/fixedbugs/issue64565.out
new file mode 100644
index 0000000..1f242fa
--- /dev/null
+++ b/test/fixedbugs/issue64565.out
@@ -0,0 +1,3 @@
+3
+3
+3
diff --git a/test/fixedbugs/issue64606.go b/test/fixedbugs/issue64606.go
new file mode 100644
index 0000000..9b53c10
--- /dev/null
+++ b/test/fixedbugs/issue64606.go
@@ -0,0 +1,32 @@
+// build -race
+
+//go:build race
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+	var o any = uint64(5)
+	switch o.(type) {
+	case int:
+		goto ret
+	case int8:
+		goto ret
+	case int16:
+		goto ret
+	case int32:
+		goto ret
+	case int64:
+		goto ret
+	case float32:
+		goto ret
+	case float64:
+		goto ret
+	default:
+		goto ret
+	}
+ret:
+}
diff --git a/test/fixedbugs/issue64715.go b/test/fixedbugs/issue64715.go
new file mode 100644
index 0000000..bf11716
--- /dev/null
+++ b/test/fixedbugs/issue64715.go
@@ -0,0 +1,25 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func boolInt32(b bool) int32 {
+	if b {
+		return 1
+	}
+
+	return 0
+}
+
+func f(left uint16, right int32) (r uint16) {
+	return left >> right
+}
+
+var n = uint16(65535)
+
+func main() {
+	println(f(n, boolInt32(int64(n^n) > 1)))
+}
diff --git a/test/fixedbugs/issue64715.out b/test/fixedbugs/issue64715.out
new file mode 100644
index 0000000..7a53b35
--- /dev/null
+++ b/test/fixedbugs/issue64715.out
@@ -0,0 +1 @@
+65535
diff --git a/test/fixedbugs/issue64826.go b/test/fixedbugs/issue64826.go
new file mode 100644
index 0000000..864c474
--- /dev/null
+++ b/test/fixedbugs/issue64826.go
@@ -0,0 +1,38 @@
+// build
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+	f(g(false))
+}
+func g(b bool) string {
+	if b {
+		return "z"
+	}
+	return "q"
+}
+func f(x string) int {
+	switch len(x) {
+	case 4:
+		return 4
+	case 5:
+		return 5
+	case 6:
+		return 6
+	case 7:
+		return 7
+	case 8:
+		return 8
+	case 9:
+		return 9
+	case 10:
+		return 10
+	case 11:
+		return 11
+	}
+	return 0
+}
diff --git a/test/fixedbugs/issue7921.go b/test/fixedbugs/issue7921.go
index f9efb7f..0f09951 100644
--- a/test/fixedbugs/issue7921.go
+++ b/test/fixedbugs/issue7921.go
@@ -1,6 +1,7 @@
-// +build !gcflags_noopt
 // errorcheck -0 -m
 
+//go:build !gcflags_noopt && !goexperiment.newinliner
+
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fixedbugs/issue8606b.go b/test/fixedbugs/issue8606b.go
index 41b9a3d..6a56c0d 100644
--- a/test/fixedbugs/issue8606b.go
+++ b/test/fixedbugs/issue8606b.go
@@ -1,5 +1,6 @@
 // run
-// +build linux darwin
+
+//go:build linux || darwin
 
 // Copyright 2020 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue9355.go b/test/fixedbugs/issue9355.go
index 2670f15..4e6cbf0 100644
--- a/test/fixedbugs/issue9355.go
+++ b/test/fixedbugs/issue9355.go
@@ -1,6 +1,7 @@
-// +build !js,!wasip1,gc
 // run
 
+//go:build !js && !wasip1 && gc
+
 // Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
@@ -32,10 +33,10 @@
 
 	// 6g/8g print the offset as dec, but 5g/9g print the offset as hex.
 	patterns := []string{
-		`rel 0\+\d t=1 p\.x\+8\r?\n`,       // y = &x.b
-		`rel 0\+\d t=1 p\.x\+(28|1c)\r?\n`, // z = &x.d.q
-		`rel 0\+\d t=1 p\.b\+5\r?\n`,       // c = &b[5]
-		`rel 0\+\d t=1 p\.x\+(88|58)\r?\n`, // w = &x.f[3].r
+		`rel 0\+\d t=R_ADDR p\.x\+8\r?\n`,       // y = &x.b
+		`rel 0\+\d t=R_ADDR p\.x\+(28|1c)\r?\n`, // z = &x.d.q
+		`rel 0\+\d t=R_ADDR p\.b\+5\r?\n`,       // c = &b[5]
+		`rel 0\+\d t=R_ADDR p\.x\+(88|58)\r?\n`, // w = &x.f[3].r
 	}
 	for _, p := range patterns {
 		if ok, err := regexp.Match(p, out); !ok || err != nil {
diff --git a/test/fixedbugs/issue9604b.go b/test/fixedbugs/issue9604b.go
index d32116b..0033a6f 100644
--- a/test/fixedbugs/issue9604b.go
+++ b/test/fixedbugs/issue9604b.go
@@ -5,7 +5,7 @@
 // license that can be found in the LICENSE file.
 
 // terribly slow on wasm
-// +build !wasm
+//go:build !wasm
 
 package main
 
diff --git a/test/fixedbugs/issue9862_run.go b/test/fixedbugs/issue9862_run.go
index edf8dfa..8c5caa3 100644
--- a/test/fixedbugs/issue9862_run.go
+++ b/test/fixedbugs/issue9862_run.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,gc
 // run
 
+//go:build !nacl && !js && !wasip1 && gc
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/fuse.go b/test/fuse.go
index ea908bf..f64a087 100644
--- a/test/fuse.go
+++ b/test/fuse.go
@@ -1,6 +1,7 @@
-// +build amd64,!gcflags_noopt arm64,!gcflags_noopt
 // errorcheck -0 -d=ssa/late_fuse/debug=1
 
+//go:build (amd64 && !gcflags_noopt) || (arm64 && !gcflags_noopt)
+
 // Copyright 2021 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/gc2.go b/test/gc2.go
index 2f8eb9b..954a021 100644
--- a/test/gc2.go
+++ b/test/gc2.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js
 // run
 
+//go:build !nacl && !js
+
 // Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/inline.go b/test/inline.go
index 3a9cd5c..fd14f25 100644
--- a/test/inline.go
+++ b/test/inline.go
@@ -1,5 +1,7 @@
 // errorcheckwithauto -0 -m -d=inlfuncswithclosures=1
 
+//go:build !goexperiment.newinliner
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
@@ -393,3 +395,33 @@
 		select2(x, y) // ERROR "inlining call to select2"
 	}
 }
+
+// Issue #62211: inlining a function with unreachable "return"
+// statements could trip up phi insertion.
+func issue62211(x bool) { // ERROR "can inline issue62211"
+	if issue62211F(x) { // ERROR "inlining call to issue62211F"
+	}
+	if issue62211G(x) { // ERROR "inlining call to issue62211G"
+	}
+
+	// Initial fix CL caused a "non-monotonic scope positions" failure
+	// on code like this.
+	if z := 0; false {
+		panic(z)
+	}
+}
+
+func issue62211F(x bool) bool { // ERROR "can inline issue62211F"
+	if x || true {
+		return true
+	}
+	return true
+}
+
+func issue62211G(x bool) bool { // ERROR "can inline issue62211G"
+	if x || true {
+		return true
+	} else {
+		return true
+	}
+}
diff --git a/test/inline_math_bits_rotate.go b/test/inline_math_bits_rotate.go
index a0341ea..ad15d60 100644
--- a/test/inline_math_bits_rotate.go
+++ b/test/inline_math_bits_rotate.go
@@ -1,6 +1,7 @@
-// +build amd64
 // errorcheck -0 -m
 
+//go:build amd64
+
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/inline_sync.go b/test/inline_sync.go
index 69e2a0e..eaa2176 100644
--- a/test/inline_sync.go
+++ b/test/inline_sync.go
@@ -37,7 +37,7 @@
 
 func small7() { // ERROR "can inline small7"
 	// the Do fast path should be inlined
-	once.Do(small5) // ERROR "inlining call to sync\.\(\*Once\)\.Do"
+	once.Do(small5) // ERROR "inlining call to sync\.\(\*Once\)\.Do" "inlining call to atomic\.\(\*Uint32\)\.Load"
 }
 
 var rwmutex *sync.RWMutex
diff --git a/test/intrinsic.go b/test/intrinsic.go
index 0b783d1..702b6ac 100644
--- a/test/intrinsic.go
+++ b/test/intrinsic.go
@@ -1,5 +1,6 @@
 // errorcheckandrundir -0 -d=ssa/intrinsics/debug
-// +build amd64 arm64 arm s390x
+
+//go:build amd64 || arm64 || arm || s390x
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/intrinsic_atomic.go b/test/intrinsic_atomic.go
index a1004c8..72038e6 100644
--- a/test/intrinsic_atomic.go
+++ b/test/intrinsic_atomic.go
@@ -1,5 +1,6 @@
 // errorcheck -0 -d=ssa/intrinsics/debug
-// +build amd64 arm64 loong64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x
+
+//go:build amd64 || arm64 || loong64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/linkmain.go b/test/linkmain.go
index af20ca5..18962a9 100644
--- a/test/linkmain.go
+++ b/test/linkmain.go
@@ -1,4 +1,4 @@
-// +build ignore
+//go:build ignore
 
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/linkmain_run.go b/test/linkmain_run.go
index f2ab681..55aa652 100644
--- a/test/linkmain_run.go
+++ b/test/linkmain_run.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1
 // run
 
+//go:build !nacl && !js && !wasip1
+
 // Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/linkobj.go b/test/linkobj.go
index fcf3f88..8557026 100644
--- a/test/linkobj.go
+++ b/test/linkobj.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,gc,!wasip1
 // run
 
+//go:build !nacl && !js && gc && !wasip1
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/linkx_run.go b/test/linkx_run.go
index b7b1ea8..15f7c9a 100644
--- a/test/linkx_run.go
+++ b/test/linkx_run.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!wasip1,gc
 // run
 
+//go:build !nacl && !js && !wasip1 && gc
+
 // Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/live.go b/test/live.go
index 0e015db..5658c8b 100644
--- a/test/live.go
+++ b/test/live.go
@@ -1,6 +1,6 @@
 // errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
+
 //go:build !ppc64 && !ppc64le && !goexperiment.regabiargs
-// +build !ppc64,!ppc64le,!goexperiment.regabiargs
 
 // ppc64 needs a better tighten pass to make f18 pass
 // rescheduling checks need to be turned off because there are some live variables across the inserted check call
@@ -167,7 +167,7 @@
 
 // this used to have a spurious "live at entry to f11a: ~r0"
 func f11a() *int {
-	select { // ERROR "stack object .autotmp_[0-9]+ \[2\]struct"
+	select { // ERROR "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
 	case <-c:
 		return nil
 	case <-c:
@@ -182,7 +182,7 @@
 		// get to the bottom of the function.
 		// This used to have a spurious "live at call to printint: p".
 		printint(1) // nothing live here!
-		select {    // ERROR "stack object .autotmp_[0-9]+ \[2\]struct"
+		select {    // ERROR "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
 		case <-c:
 			return nil
 		case <-c:
@@ -202,7 +202,7 @@
 		// Unlike previous, the cases in this select fall through,
 		// so we can get to the println, so p is not dead.
 		printint(1) // ERROR "live at call to printint: p$"
-		select {    // ERROR "live at call to selectgo: p$" "stack object .autotmp_[0-9]+ \[2\]struct"
+		select {    // ERROR "live at call to selectgo: p$" "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
 		case <-c:
 		case <-c:
 		}
@@ -458,7 +458,7 @@
 
 func f29(b bool) {
 	if b {
-		for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.iter\[string\]int$"
+		for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hiter$"
 			printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
 		}
 	}
@@ -600,7 +600,7 @@
 	// we care that the println lines have no live variables
 	// and therefore no output.
 	if b {
-		select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ \[4\]struct \{"
+		select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ \[4\]runtime.scase$"
 		case <-fc38():
 			printnl()
 		case fc38() <- *fi38(1): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$"
@@ -667,7 +667,7 @@
 
 func good40() {
 	ret := T40{}              // ERROR "stack object ret T40$"
-	ret.m = make(map[int]int) // ERROR "live at call to fastrand: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.hdr\[int\]int$"
+	ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$"
 	t := &ret
 	printnl() // ERROR "live at call to printnl: ret$"
 	// Note: ret is live at the printnl because the compiler moves &ret
diff --git a/test/live2.go b/test/live2.go
index 83a6cb7..2beac4f 100644
--- a/test/live2.go
+++ b/test/live2.go
@@ -27,14 +27,14 @@
 }
 
 func bad40() {
-	t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ map.hdr\[int\]int$"
+	t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ runtime.hmap$"
 	printnl()     // ERROR "live at call to printnl: ret$"
 	useT40(t)
 }
 
 func good40() {
 	ret := T40{}                  // ERROR "stack object ret T40$"
-	ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ map.hdr\[int\]int$"
+	ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ runtime.hmap$"
 	t := &ret
 	printnl() // ERROR "live at call to printnl: ret$"
 	useT40(t)
diff --git a/test/live_regabi.go b/test/live_regabi.go
index bae319d..a335126 100644
--- a/test/live_regabi.go
+++ b/test/live_regabi.go
@@ -1,6 +1,6 @@
 // errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
+
 //go:build (amd64 && goexperiment.regabiargs) || (arm64 && goexperiment.regabiargs)
-// +build amd64,goexperiment.regabiargs arm64,goexperiment.regabiargs
 
 // Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
@@ -164,7 +164,7 @@
 
 // this used to have a spurious "live at entry to f11a: ~r0"
 func f11a() *int {
-	select { // ERROR "stack object .autotmp_[0-9]+ \[2\]struct"
+	select { // ERROR "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
 	case <-c:
 		return nil
 	case <-c:
@@ -179,7 +179,7 @@
 		// get to the bottom of the function.
 		// This used to have a spurious "live at call to printint: p".
 		printint(1) // nothing live here!
-		select {    // ERROR "stack object .autotmp_[0-9]+ \[2\]struct"
+		select {    // ERROR "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
 		case <-c:
 			return nil
 		case <-c:
@@ -199,7 +199,7 @@
 		// Unlike previous, the cases in this select fall through,
 		// so we can get to the println, so p is not dead.
 		printint(1) // ERROR "live at call to printint: p$"
-		select {    // ERROR "live at call to selectgo: p$" "stack object .autotmp_[0-9]+ \[2\]struct"
+		select {    // ERROR "live at call to selectgo: p$" "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
 		case <-c:
 		case <-c:
 		}
@@ -455,7 +455,7 @@
 
 func f29(b bool) {
 	if b {
-		for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.iter\[string\]int$"
+		for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hiter$"
 			printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
 		}
 	}
@@ -597,7 +597,7 @@
 	// we care that the println lines have no live variables
 	// and therefore no output.
 	if b {
-		select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ \[4\]struct \{"
+		select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ \[4\]runtime.scase$"
 		case <-fc38():
 			printnl()
 		case fc38() <- *fi38(1): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$"
@@ -664,7 +664,7 @@
 
 func good40() {
 	ret := T40{}              // ERROR "stack object ret T40$"
-	ret.m = make(map[int]int) // ERROR "live at call to fastrand: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.hdr\[int\]int$"
+	ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$"
 	t := &ret
 	printnl() // ERROR "live at call to printnl: ret$"
 	// Note: ret is live at the printnl because the compiler moves &ret
diff --git a/test/live_uintptrkeepalive.go b/test/live_uintptrkeepalive.go
index 566734e..ae41101 100644
--- a/test/live_uintptrkeepalive.go
+++ b/test/live_uintptrkeepalive.go
@@ -1,6 +1,6 @@
 // errorcheck -0 -m -live -std
 
-// +build !windows,!js,!wasip1
+//go:build !windows && !js && !wasip1
 
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
@@ -22,7 +22,7 @@
 	"unsafe"
 )
 
-func implicit(uintptr) // ERROR "assuming arg#1 is unsafe uintptr"
+func implicit(uintptr) // ERROR "assuming ~p0 is unsafe uintptr"
 
 //go:uintptrkeepalive
 //go:nosplit
@@ -47,13 +47,13 @@
 func localImplicit() { // ERROR "can inline localImplicit"
 	var t int
 	p := unsafe.Pointer(&t)
-	implicit(uintptr(p))           // ERROR "live at call to implicit: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
+	implicit(uintptr(p)) // ERROR "live at call to implicit: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
 }
 
 func localExplicit() { // ERROR "can inline localExplicit"
 	var t int
 	p := unsafe.Pointer(&t)
-	explicit(uintptr(p))           // ERROR "live at call to explicit: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
+	explicit(uintptr(p)) // ERROR "live at call to explicit: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$"
 }
 
 func localSyscall() { // ERROR "can inline localSyscall"
diff --git a/test/loopbce.go b/test/loopbce.go
index fcf0d8d..04c186b 100644
--- a/test/loopbce.go
+++ b/test/loopbce.go
@@ -1,6 +1,7 @@
-// +build amd64
 // errorcheck -0 -d=ssa/prove/debug=1
 
+//go:build amd64
+
 package main
 
 import "math"
@@ -58,7 +59,7 @@
 func f5(a [10]int) int {
 	x := 0
 	for i := -10; i < len(a); i += 2 { // ERROR "Induction variable: limits \[-10,8\], increment 2$"
-		x += a[i]
+		x += a[i+10]
 	}
 	return x
 }
@@ -66,7 +67,7 @@
 func f5_int32(a [10]int) int {
 	x := 0
 	for i := int32(-10); i < int32(len(a)); i += 2 { // ERROR "Induction variable: limits \[-10,8\], increment 2$"
-		x += a[i]
+		x += a[i+10]
 	}
 	return x
 }
@@ -74,7 +75,7 @@
 func f5_int16(a [10]int) int {
 	x := 0
 	for i := int16(-10); i < int16(len(a)); i += 2 { // ERROR "Induction variable: limits \[-10,8\], increment 2$"
-		x += a[i]
+		x += a[i+10]
 	}
 	return x
 }
@@ -82,7 +83,7 @@
 func f5_int8(a [10]int) int {
 	x := 0
 	for i := int8(-10); i < int8(len(a)); i += 2 { // ERROR "Induction variable: limits \[-10,8\], increment 2$"
-		x += a[i]
+		x += a[i+10]
 	}
 	return x
 }
@@ -201,6 +202,10 @@
 
 func k0(a [100]int) [100]int {
 	for i := 10; i < 90; i++ { // ERROR "Induction variable: limits \[10,90\), increment 1$"
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		a[i-11] = i
 		a[i-10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$"
 		a[i-5] = i  // ERROR "(\([0-9]+\) )?Proved IsInBounds$"
@@ -214,6 +219,10 @@
 
 func k1(a [100]int) [100]int {
 	for i := 10; i < 90; i++ { // ERROR "Induction variable: limits \[10,90\), increment 1$"
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		useSlice(a[:i-11])
 		useSlice(a[:i-10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$"
 		useSlice(a[:i-5])  // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$"
@@ -229,6 +238,10 @@
 
 func k2(a [100]int) [100]int {
 	for i := 10; i < 90; i++ { // ERROR "Induction variable: limits \[10,90\), increment 1$"
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		useSlice(a[i-11:])
 		useSlice(a[i-10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$"
 		useSlice(a[i-5:])  // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$"
@@ -243,6 +256,10 @@
 
 func k3(a [100]int) [100]int {
 	for i := -10; i < 90; i++ { // ERROR "Induction variable: limits \[-10,90\), increment 1$"
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		a[i+9] = i
 		a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$"
 		a[i+11] = i
@@ -252,6 +269,10 @@
 
 func k3neg(a [100]int) [100]int {
 	for i := 89; i > -11; i-- { // ERROR "Induction variable: limits \(-11,89\], increment 1$"
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		a[i+9] = i
 		a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$"
 		a[i+11] = i
@@ -261,6 +282,10 @@
 
 func k3neg2(a [100]int) [100]int {
 	for i := 89; i >= -10; i-- { // ERROR "Induction variable: limits \[-10,89\], increment 1$"
+		if a[0] == 0xdeadbeef {
+			// This is a trick to prohibit sccp to optimize out the following out of bound check
+			continue
+		}
 		a[i+9] = i
 		a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$"
 		a[i+11] = i
@@ -411,7 +436,6 @@
 	min := int64((-1) << 63)
 	max := int64((1 << 63) - 1)
 	for i := min; i < max; i++ { // ERROR "Induction variable: limits \[-9223372036854775808,9223372036854775807\), increment 1$"
-		a[i] = i
 	}
 	return a
 }
diff --git a/test/maplinear.go b/test/maplinear.go
index 34d0914..dbc6827 100644
--- a/test/maplinear.go
+++ b/test/maplinear.go
@@ -1,6 +1,7 @@
-// +build darwin linux
 // run
 
+//go:build darwin || linux
+
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/newinline.go b/test/newinline.go
new file mode 100644
index 0000000..69f1310
--- /dev/null
+++ b/test/newinline.go
@@ -0,0 +1,397 @@
+// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1
+
+//go:build goexperiment.newinliner
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test, using compiler diagnostic flags, that inlining is working.
+// Compiles but does not run.
+
+package foo
+
+import (
+	"errors"
+	"runtime"
+	"unsafe"
+)
+
+func add2(p *byte, n uintptr) *byte { // ERROR "can inline add2" "leaking param: p to result"
+	return (*byte)(add1(unsafe.Pointer(p), n)) // ERROR "inlining call to add1"
+}
+
+func add1(p unsafe.Pointer, x uintptr) unsafe.Pointer { // ERROR "can inline add1" "leaking param: p to result"
+	return unsafe.Pointer(uintptr(p) + x)
+}
+
+func f(x *byte) *byte { // ERROR "can inline f" "leaking param: x to result"
+	return add2(x, 1) // ERROR "inlining call to add2" "inlining call to add1"
+}
+
+//go:noinline
+func g(x int) int {
+	return x + 1
+}
+
+func h(x int) int { // ERROR "can inline h"
+	return x + 2
+}
+
+func i(x int) int { // ERROR "can inline i"
+	const y = 2
+	return x + y
+}
+
+func j(x int) int { // ERROR "can inline j"
+	switch {
+	case x > 0:
+		return x + 2
+	default:
+		return x + 1
+	}
+}
+
+func f2() int { // ERROR "can inline f2"
+	tmp1 := h
+	tmp2 := tmp1
+	return tmp2(0) // ERROR "inlining call to h"
+}
+
+var abc = errors.New("abc") // ERROR "inlining call to errors.New"
+
+var somethingWrong error
+
+// local closures can be inlined
+func l(x, y int) (int, int, error) { // ERROR "can inline l"
+	e := func(err error) (int, int, error) { // ERROR "can inline l.func1" "func literal does not escape" "leaking param: err to result"
+		return 0, 0, err
+	}
+	if x == y {
+		e(somethingWrong) // ERROR "inlining call to l.func1"
+	} else {
+		f := e
+		f(nil) // ERROR "inlining call to l.func1"
+	}
+	return y, x, nil
+}
+
+// any re-assignment prevents closure inlining
+func m() int {
+	foo := func() int { return 1 } // ERROR "can inline m.func1" "func literal does not escape"
+	x := foo()
+	foo = func() int { return 2 } // ERROR "can inline m.func2" "func literal does not escape"
+	return x + foo()
+}
+
+// address taking prevents closure inlining
+func n() int { // ERROR "can inline n"
+	foo := func() int { return 1 } // ERROR "can inline n.func1" "func literal does not escape"
+	bar := &foo
+	x := (*bar)() + foo()
+	return x
+}
+
+// make sure assignment inside closure is detected
+func o() int { // ERROR "can inline o"
+	foo := func() int { return 1 } // ERROR "can inline o.func1" "func literal does not escape"
+	func(x int) {                  // ERROR "can inline o.func2"
+		if x > 10 {
+			foo = func() int { return 2 } // ERROR "can inline o.func2"
+		}
+	}(11) // ERROR "func literal does not escape" "inlining call to o.func2"
+	return foo()
+}
+
+func p() int { // ERROR "can inline p"
+	return func() int { return 42 }() // ERROR "can inline p.func1" "inlining call to p.func1"
+}
+
+func q(x int) int { // ERROR "can inline q"
+	foo := func() int { return x * 2 } // ERROR "can inline q.func1" "func literal does not escape"
+	return foo()                       // ERROR "inlining call to q.func1"
+}
+
+func r(z int) int { // ERROR "can inline r"
+	foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape"
+		return x + z
+	}
+	bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2"
+		return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.r.func2.func3"
+			return 2*y + x*z
+		}(x) // ERROR "inlining call to r.func2.1"
+	}
+	return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.r.func2.func3"
+}
+
+func s0(x int) int { // ERROR "can inline s0"
+	foo := func() { // ERROR "can inline s0.func1" "func literal does not escape"
+		x = x + 1
+	}
+	foo() // ERROR "inlining call to s0.func1"
+	return x
+}
+
+func s1(x int) int { // ERROR "can inline s1"
+	foo := func() int { // ERROR "can inline s1.func1" "func literal does not escape"
+		return x
+	}
+	x = x + 1
+	return foo() // ERROR "inlining call to s1.func1"
+}
+
+func switchBreak(x, y int) int { // ERROR "can inline switchBreak"
+	var n int
+	switch x {
+	case 0:
+		n = 1
+	Done:
+		switch y {
+		case 0:
+			n += 10
+			break Done
+		}
+		n = 2
+	}
+	return n
+}
+
+func switchType(x interface{}) int { // ERROR "can inline switchType" "x does not escape"
+	switch x.(type) {
+	case int:
+		return x.(int)
+	default:
+		return 0
+	}
+}
+
+// Test that switches on constant things, with constant cases, only cost anything for
+// the case that matches. See issue 50253.
+func switchConst1(p func(string)) { // ERROR "can inline switchConst" "p does not escape"
+	const c = 1
+	switch c {
+	case 0:
+		p("zero")
+	case 1:
+		p("one")
+	case 2:
+		p("two")
+	default:
+		p("other")
+	}
+}
+
+func switchConst2() string { // ERROR "can inline switchConst2"
+	switch runtime.GOOS {
+	case "linux":
+		return "Leenooks"
+	case "windows":
+		return "Windoze"
+	case "darwin":
+		return "MackBone"
+	case "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100":
+		return "Numbers"
+	default:
+		return "oh nose!"
+	}
+}
+func switchConst3() string { // ERROR "can inline switchConst3"
+	switch runtime.GOOS {
+	case "Linux":
+		panic("Linux")
+	case "Windows":
+		panic("Windows")
+	case "Darwin":
+		panic("Darwin")
+	case "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100":
+		panic("Numbers")
+	default:
+		return "oh nose!"
+	}
+}
+func switchConst4() { // ERROR "can inline switchConst4"
+	const intSize = 32 << (^uint(0) >> 63)
+	want := func() string { // ERROR "can inline switchConst4.func1"
+		switch intSize {
+		case 32:
+			return "32"
+		case 64:
+			return "64"
+		default:
+			panic("unreachable")
+		}
+	}() // ERROR "inlining call to switchConst4.func1"
+	_ = want
+}
+
+func inlineRangeIntoMe(data []int) { // ERROR "can inline inlineRangeIntoMe" "data does not escape"
+	rangeFunc(data, 12) // ERROR "inlining call to rangeFunc"
+}
+
+func rangeFunc(xs []int, b int) int { // ERROR "can inline rangeFunc" "xs does not escape"
+	for i, x := range xs {
+		if x == b {
+			return i
+		}
+	}
+	return -1
+}
+
+type T struct{}
+
+func (T) meth(int, int) {} // ERROR "can inline T.meth"
+
+func k() (T, int, int) { return T{}, 0, 0 } // ERROR "can inline k"
+
+func f3() { // ERROR "can inline f3"
+	T.meth(k()) // ERROR "inlining call to k" "inlining call to T.meth"
+	// ERRORAUTO "inlining call to T.meth"
+}
+
+func small1() { // ERROR "can inline small1"
+	runtime.GC()
+}
+func small2() int { // ERROR "can inline small2"
+	return runtime.GOMAXPROCS(0)
+}
+func small3(t T) { // ERROR "can inline small3"
+	t.meth2(3, 5)
+}
+func small4(t T) { // ERROR "can inline small4"
+	t.meth2(runtime.GOMAXPROCS(0), 5)
+}
+func (T) meth2(int, int) { // ERROR "can inline T.meth2"
+	runtime.GC()
+	runtime.GC()
+}
+
+// Issue #29737 - make sure we can do inlining for a chain of recursive functions
+func ee() { // ERROR "can inline ee"
+	ff(100) // ERROR "inlining call to ff" "inlining call to gg" "inlining call to hh"
+}
+
+func ff(x int) { // ERROR "can inline ff"
+	if x < 0 {
+		return
+	}
+	gg(x - 1) // ERROR "inlining call to gg" "inlining call to hh"
+}
+func gg(x int) { // ERROR "can inline gg"
+	hh(x - 1) // ERROR "inlining call to hh" "inlining call to ff"
+}
+func hh(x int) { // ERROR "can inline hh"
+	ff(x - 1) // ERROR "inlining call to ff" "inlining call to gg"
+}
+
+// Issue #14768 - make sure we can inline for loops.
+func for1(fn func() bool) { // ERROR "can inline for1" "fn does not escape"
+	for {
+		if fn() {
+			break
+		} else {
+			continue
+		}
+	}
+}
+
+func for2(fn func() bool) { // ERROR "can inline for2" "fn does not escape"
+Loop:
+	for {
+		if fn() {
+			break Loop
+		} else {
+			continue Loop
+		}
+	}
+}
+
+// Issue #18493 - make sure we can do inlining of functions with a method value
+type T1 struct{}
+
+func (a T1) meth(val int) int { // ERROR "can inline T1.meth"
+	return val + 5
+}
+
+func getMeth(t1 T1) func(int) int { // ERROR "can inline getMeth"
+	return t1.meth // ERROR "t1.meth escapes to heap"
+	// ERRORAUTO "inlining call to T1.meth"
+}
+
+func ii() { // ERROR "can inline ii"
+	var t1 T1
+	f := getMeth(t1) // ERROR "inlining call to getMeth" "t1.meth does not escape"
+	_ = f(3)
+}
+
+// Issue #42194 - make sure that functions evaluated in
+// go and defer statements can be inlined.
+func gd1(int) {
+	defer gd1(gd2()) // ERROR "inlining call to gd2" "can inline gd1.deferwrap1"
+	defer gd3()()    // ERROR "inlining call to gd3"
+	go gd1(gd2())    // ERROR "inlining call to gd2" "can inline gd1.gowrap2"
+	go gd3()()       // ERROR "inlining call to gd3"
+}
+
+func gd2() int { // ERROR "can inline gd2"
+	return 1
+}
+
+func gd3() func() { // ERROR "can inline gd3"
+	return ii
+}
+
+// Issue #42788 - ensure ODEREF OCONVNOP* OADDR is low cost.
+func EncodeQuad(d []uint32, x [6]float32) { // ERROR "can inline EncodeQuad" "d does not escape"
+	_ = d[:6]
+	d[0] = float32bits(x[0]) // ERROR "inlining call to float32bits"
+	d[1] = float32bits(x[1]) // ERROR "inlining call to float32bits"
+	d[2] = float32bits(x[2]) // ERROR "inlining call to float32bits"
+	d[3] = float32bits(x[3]) // ERROR "inlining call to float32bits"
+	d[4] = float32bits(x[4]) // ERROR "inlining call to float32bits"
+	d[5] = float32bits(x[5]) // ERROR "inlining call to float32bits"
+}
+
+// float32bits is a copy of math.Float32bits to ensure that
+// these tests pass with `-gcflags=-l`.
+func float32bits(f float32) uint32 { // ERROR "can inline float32bits"
+	return *(*uint32)(unsafe.Pointer(&f))
+}
+
+// Ensure OCONVNOP is zero cost.
+func Conv(v uint64) uint64 { // ERROR "can inline Conv"
+	return conv2(conv2(conv2(v))) // ERROR "inlining call to (conv1|conv2)"
+}
+func conv2(v uint64) uint64 { // ERROR "can inline conv2"
+	return conv1(conv1(conv1(conv1(v)))) // ERROR "inlining call to conv1"
+}
+func conv1(v uint64) uint64 { // ERROR "can inline conv1"
+	return uint64(uint64(uint64(uint64(uint64(uint64(uint64(uint64(uint64(uint64(uint64(v)))))))))))
+}
+
+func select1(x, y chan bool) int { // ERROR "can inline select1" "x does not escape" "y does not escape"
+	select {
+	case <-x:
+		return 1
+	case <-y:
+		return 2
+	}
+}
+
+func select2(x, y chan bool) { // ERROR "can inline select2" "x does not escape" "y does not escape"
+loop: // test that labeled select can be inlined.
+	select {
+	case <-x:
+		break loop
+	case <-y:
+	}
+}
+
+func inlineSelect2(x, y chan bool) { // ERROR "can inline inlineSelect2" ERROR "x does not escape" "y does not escape"
+loop:
+	for i := 0; i < 5; i++ {
+		if i == 3 {
+			break loop
+		}
+		select2(x, y) // ERROR "inlining call to select2"
+	}
+}
diff --git a/test/nilptr.go b/test/nilptr.go
index b296c88..7f42e93 100644
--- a/test/nilptr.go
+++ b/test/nilptr.go
@@ -7,10 +7,8 @@
 // Test that the implementation catches nil ptr indirection
 // in a large address space.
 
-// +build !aix
-// +build !darwin !arm64
-// +build !windows !arm64
 // Address space starts at 1<<32 on AIX and on darwin/arm64 and on windows/arm64, so dummy is too far.
+//go:build !aix && (!darwin || !arm64) && (!windows || !arm64)
 
 package main
 
diff --git a/test/nilptr3.go b/test/nilptr3.go
index 5f08a5b..2cc510b 100644
--- a/test/nilptr3.go
+++ b/test/nilptr3.go
@@ -1,7 +1,6 @@
 // errorcheck -0 -d=nil
 
-// +build !wasm
-// +build !aix
+//go:build !wasm && !aix
 
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/nilptr5.go b/test/nilptr5.go
index 118746e..51a302f 100644
--- a/test/nilptr5.go
+++ b/test/nilptr5.go
@@ -1,7 +1,6 @@
 // errorcheck -0 -d=nil
 
 //go:build !wasm && !aix
-// +build !wasm,!aix
 
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/nilptr5_aix.go b/test/nilptr5_aix.go
index 1427807..3b116ca 100644
--- a/test/nilptr5_aix.go
+++ b/test/nilptr5_aix.go
@@ -1,6 +1,6 @@
 // errorcheck -0 -d=nil
 
-// +build aix
+//go:build aix
 
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/nilptr5_wasm.go b/test/nilptr5_wasm.go
index 6ef8a02..ea380d1 100644
--- a/test/nilptr5_wasm.go
+++ b/test/nilptr5_wasm.go
@@ -1,6 +1,6 @@
 // errorcheck -0 -d=nil
 
-// +build wasm
+//go:build wasm
 
 // Copyright 2018 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/nilptr_aix.go b/test/nilptr_aix.go
index ea5fcc3..f7a7e61 100644
--- a/test/nilptr_aix.go
+++ b/test/nilptr_aix.go
@@ -7,7 +7,7 @@
 // Test that the implementation catches nil ptr indirection
 // in a large address space.
 
-// +build aix
+//go:build aix
 
 package main
 
diff --git a/test/nosplit.go b/test/nosplit.go
index 2b1bb54..e171d1d 100644
--- a/test/nosplit.go
+++ b/test/nosplit.go
@@ -1,6 +1,7 @@
-// +build !nacl,!js,!aix,!wasip1,!gcflags_noopt,gc
 // run
 
+//go:build !nacl && !js && !aix && !wasip1 && !gcflags_noopt && gc
+
 // Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/nowritebarrier.go b/test/nowritebarrier.go
index 654f16d..d176e28 100644
--- a/test/nowritebarrier.go
+++ b/test/nowritebarrier.go
@@ -1,12 +1,14 @@
-// errorcheck -+
+// errorcheck -+ -p=runtime
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
 // Test go:nowritebarrier and related directives.
+// This must appear to be in package runtime so the compiler
+// recognizes "systemstack".
 
-package p
+package runtime
 
 type t struct {
 	f *t
diff --git a/test/opt_branchlikely.go b/test/opt_branchlikely.go
index 884c349..0aee33f 100644
--- a/test/opt_branchlikely.go
+++ b/test/opt_branchlikely.go
@@ -1,7 +1,8 @@
-// +build amd64
 // errorcheck -0 -d=ssa/likelyadjust/debug=1,ssa/insert_resched_checks/off
 // rescheduling check insertion is turned off because the inserted conditional branches perturb the errorcheck
 
+//go:build amd64
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/phiopt.go b/test/phiopt.go
index e04373e..9e21bfd 100644
--- a/test/phiopt.go
+++ b/test/phiopt.go
@@ -1,6 +1,7 @@
-// +build amd64 s390x arm64
 // errorcheck -0 -d=ssa/phiopt/debug=3
 
+//go:build amd64 || s390x || arm64
+
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/prove.go b/test/prove.go
index 91d1f55..1aea282 100644
--- a/test/prove.go
+++ b/test/prove.go
@@ -1,7 +1,6 @@
 // errorcheck -0 -d=ssa/prove/debug=1
 
 //go:build amd64
-// +build amd64
 
 // Copyright 2016 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/prove_constant_folding.go b/test/prove_constant_folding.go
index d4bdb20..ed63e68 100644
--- a/test/prove_constant_folding.go
+++ b/test/prove_constant_folding.go
@@ -1,6 +1,7 @@
-// +build amd64
 // errorcheck -0 -d=ssa/prove/debug=2
 
+//go:build amd64
+
 // Copyright 2022 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/prove_invert_loop_with_unused_iterators.go b/test/prove_invert_loop_with_unused_iterators.go
new file mode 100644
index 0000000..c66f20b
--- /dev/null
+++ b/test/prove_invert_loop_with_unused_iterators.go
@@ -0,0 +1,11 @@
+// errorcheck -0 -d=ssa/prove/debug=1
+
+//go:build amd64
+
+package main
+
+func invert(b func(), n int) {
+	for i := 0; i < n; i++ { // ERROR "(Inverted loop iteration|Induction variable: limits \[0,\?\), increment 1)"
+		b()
+	}
+}
diff --git a/test/range2.go b/test/range2.go
new file mode 100644
index 0000000..6ccf1e5
--- /dev/null
+++ b/test/range2.go
@@ -0,0 +1,24 @@
+// errorcheck -goexperiment rangefunc
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See ../internal/types/testdata/spec/range.go for most tests.
+// The ones in this file cannot be expressed in that framework
+// due to conflicts between that framework's error location pickiness
+// and gofmt's comment location pickiness.
+
+package p
+
+type T struct{}
+
+func (*T) PM() {}
+func (T) M()   {}
+
+func test() {
+	for range T.M { // ERROR "cannot range over T.M \(value of type func\(T\)\): func must be func\(yield func\(...\) bool\): argument is not func"
+	}
+	for range (*T).PM { // ERROR "cannot range over \(\*T\).PM \(value of type func\(\*T\)\): func must be func\(yield func\(...\) bool\): argument is not func"
+	}
+}
diff --git a/test/range3.go b/test/range3.go
new file mode 100644
index 0000000..f58a398
--- /dev/null
+++ b/test/range3.go
@@ -0,0 +1,90 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test the 'for range' construct ranging over integers.
+
+package main
+
+func testint1() {
+	bad := false
+	j := 0
+	for i := range int(4) {
+		if i != j {
+			println("range var", i, "want", j)
+			bad = true
+		}
+		j++
+	}
+	if j != 4 {
+		println("wrong count ranging over 4:", j)
+		bad = true
+	}
+	if bad {
+		panic("testint1")
+	}
+}
+
+func testint2() {
+	bad := false
+	j := 0
+	for i := range 4 {
+		if i != j {
+			println("range var", i, "want", j)
+			bad = true
+		}
+		j++
+	}
+	if j != 4 {
+		println("wrong count ranging over 4:", j)
+		bad = true
+	}
+	if bad {
+		panic("testint2")
+	}
+}
+
+func testint3() {
+	bad := false
+	type MyInt int
+	j := MyInt(0)
+	for i := range MyInt(4) {
+		if i != j {
+			println("range var", i, "want", j)
+			bad = true
+		}
+		j++
+	}
+	if j != 4 {
+		println("wrong count ranging over 4:", j)
+		bad = true
+	}
+	if bad {
+		panic("testint3")
+	}
+}
+
+// Issue #63378.
+func testint4() {
+	for i := range -1 {
+		_ = i
+		panic("must not be executed")
+	}
+}
+
+// Issue #64471.
+func testint5() {
+	for i := range 'a' {
+		var _ *rune = &i // ensure i has type rune
+	}
+}
+
+func main() {
+	testint1()
+	testint2()
+	testint3()
+	testint4()
+	testint5()
+}
diff --git a/test/range4.go b/test/range4.go
new file mode 100644
index 0000000..0b051f6
--- /dev/null
+++ b/test/range4.go
@@ -0,0 +1,351 @@
+// run -goexperiment rangefunc
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test the 'for range' construct ranging over functions.
+
+package main
+
+var gj int
+
+func yield4x(yield func() bool) {
+	_ = yield() && yield() && yield() && yield()
+}
+
+func yield4(yield func(int) bool) {
+	_ = yield(1) && yield(2) && yield(3) && yield(4)
+}
+
+func yield3(yield func(int) bool) {
+	_ = yield(1) && yield(2) && yield(3)
+}
+
+func yield2(yield func(int) bool) {
+	_ = yield(1) && yield(2)
+}
+
+func testfunc0() {
+	j := 0
+	for range yield4x {
+		j++
+	}
+	if j != 4 {
+		println("wrong count ranging over yield4x:", j)
+		panic("testfunc0")
+	}
+
+	j = 0
+	for _ = range yield4 {
+		j++
+	}
+	if j != 4 {
+		println("wrong count ranging over yield4:", j)
+		panic("testfunc0")
+	}
+}
+
+func testfunc1() {
+	bad := false
+	j := 1
+	for i := range yield4 {
+		if i != j {
+			println("range var", i, "want", j)
+			bad = true
+		}
+		j++
+	}
+	if j != 5 {
+		println("wrong count ranging over f:", j)
+		bad = true
+	}
+	if bad {
+		panic("testfunc1")
+	}
+}
+
+func testfunc2() {
+	bad := false
+	j := 1
+	var i int
+	for i = range yield4 {
+		if i != j {
+			println("range var", i, "want", j)
+			bad = true
+		}
+		j++
+	}
+	if j != 5 {
+		println("wrong count ranging over f:", j)
+		bad = true
+	}
+	if i != 4 {
+		println("wrong final i ranging over f:", i)
+		bad = true
+	}
+	if bad {
+		panic("testfunc2")
+	}
+}
+
+func testfunc3() {
+	bad := false
+	j := 1
+	var i int
+	for i = range yield4 {
+		if i != j {
+			println("range var", i, "want", j)
+			bad = true
+		}
+		j++
+		if i == 2 {
+			break
+		}
+		continue
+	}
+	if j != 3 {
+		println("wrong count ranging over f:", j)
+		bad = true
+	}
+	if i != 2 {
+		println("wrong final i ranging over f:", i)
+		bad = true
+	}
+	if bad {
+		panic("testfunc3")
+	}
+}
+
+func testfunc4() {
+	bad := false
+	j := 1
+	var i int
+	func() {
+		for i = range yield4 {
+			if i != j {
+				println("range var", i, "want", j)
+				bad = true
+			}
+			j++
+			if i == 2 {
+				return
+			}
+		}
+	}()
+	if j != 3 {
+		println("wrong count ranging over f:", j)
+		bad = true
+	}
+	if i != 2 {
+		println("wrong final i ranging over f:", i)
+		bad = true
+	}
+	if bad {
+		panic("testfunc3")
+	}
+}
+
+func func5() (int, int) {
+	for i := range yield4 {
+		return 10, i
+	}
+	panic("still here")
+}
+
+func testfunc5() {
+	x, y := func5()
+	if x != 10 || y != 1 {
+		println("wrong results", x, y, "want", 10, 1)
+		panic("testfunc5")
+	}
+}
+
+func func6() (z, w int) {
+	for i := range yield4 {
+		z = 10
+		w = i
+		return
+	}
+	panic("still here")
+}
+
+func testfunc6() {
+	x, y := func6()
+	if x != 10 || y != 1 {
+		println("wrong results", x, y, "want", 10, 1)
+		panic("testfunc6")
+	}
+}
+
+var saved []int
+
+func save(x int) {
+	saved = append(saved, x)
+}
+
+func printslice(s []int) {
+	print("[")
+	for i, x := range s {
+		if i > 0 {
+			print(", ")
+		}
+		print(x)
+	}
+	print("]")
+}
+
+func eqslice(s, t []int) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, x := range s {
+		if x != t[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func func7() {
+	defer save(-1)
+	for i := range yield4 {
+		defer save(i)
+	}
+	defer save(5)
+}
+
+func checkslice(name string, saved, want []int) {
+	if !eqslice(saved, want) {
+		print("wrong results ")
+		printslice(saved)
+		print(" want ")
+		printslice(want)
+		print("\n")
+		panic(name)
+	}
+}
+
+func testfunc7() {
+	saved = nil
+	func7()
+	want := []int{5, 4, 3, 2, 1, -1}
+	checkslice("testfunc7", saved, want)
+}
+
+func func8() {
+	defer save(-1)
+	for i := range yield2 {
+		for j := range yield3 {
+			defer save(i*10 + j)
+		}
+		defer save(i)
+	}
+	defer save(-2)
+	for i := range yield4 {
+		defer save(i)
+	}
+	defer save(-3)
+}
+
+func testfunc8() {
+	saved = nil
+	func8()
+	want := []int{-3, 4, 3, 2, 1, -2, 2, 23, 22, 21, 1, 13, 12, 11, -1}
+	checkslice("testfunc8", saved, want)
+}
+
+func func9() {
+	n := 0
+	for _ = range yield2 {
+		for _ = range yield3 {
+			n++
+			defer save(n)
+		}
+	}
+}
+
+func testfunc9() {
+	saved = nil
+	func9()
+	want := []int{6, 5, 4, 3, 2, 1}
+	checkslice("testfunc9", saved, want)
+}
+
+// test that range evaluates the index and value expressions
+// exactly once per iteration.
+
+var ncalls = 0
+
+func getvar(p *int) *int {
+	ncalls++
+	return p
+}
+
+func iter2(list ...int) func(func(int, int) bool) {
+	return func(yield func(int, int) bool) {
+		for i, x := range list {
+			if !yield(i, x) {
+				return
+			}
+		}
+	}
+}
+
+func testcalls() {
+	var i, v int
+	ncalls = 0
+	si := 0
+	sv := 0
+	for *getvar(&i), *getvar(&v) = range iter2(1, 2) {
+		si += i
+		sv += v
+	}
+	if ncalls != 4 {
+		println("wrong number of calls:", ncalls, "!= 4")
+		panic("fail")
+	}
+	if si != 1 || sv != 3 {
+		println("wrong sum in testcalls", si, sv)
+		panic("fail")
+	}
+}
+
+type iter3YieldFunc func(int, int) bool
+
+func iter3(list ...int) func(iter3YieldFunc) {
+	return func(yield iter3YieldFunc) {
+		for k, v := range list {
+			if !yield(k, v) {
+				return
+			}
+		}
+	}
+}
+
+func testcalls1() {
+	ncalls := 0
+	for k, v := range iter3(1, 2, 3) {
+		_, _ = k, v
+		ncalls++
+	}
+	if ncalls != 3 {
+		println("wrong number of calls:", ncalls, "!= 3")
+		panic("fail")
+	}
+}
+
+func main() {
+	testfunc0()
+	testfunc1()
+	testfunc2()
+	testfunc3()
+	testfunc4()
+	testfunc5()
+	testfunc6()
+	testfunc7()
+	testfunc8()
+	testfunc9()
+	testcalls()
+	testcalls1()
+}
diff --git a/test/rangegen.go b/test/rangegen.go
new file mode 100644
index 0000000..8231c64
--- /dev/null
+++ b/test/rangegen.go
@@ -0,0 +1,350 @@
+// runoutput -goexperiment rangefunc
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Torture test for range-over-func.
+//
+// cmd/internal/testdir runs this like
+//
+//	go run rangegen.go >x.go
+//	go run x.go
+//
+// but a longer version can be run using
+//
+//	go run rangegen.go long
+//
+// In that second form, rangegen takes care of compiling
+// and running the code it generates, in batches.
+// That form takes 10-20 minutes to run.
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"math/bits"
+	"os"
+	"os/exec"
+	"strings"
+)
+
+const verbose = false
+
+func main() {
+	long := len(os.Args) > 1 && os.Args[1] == "long"
+	log.SetFlags(0)
+	log.SetPrefix("rangegen: ")
+
+	if !long && bits.UintSize == 32 {
+		// Skip this test on 32-bit platforms, where it seems to
+		// cause timeouts and build problems.
+		skip()
+		return
+	}
+
+	b := new(bytes.Buffer)
+	tests := ""
+	flush := func(force bool) {
+		if !long || (strings.Count(tests, "\n") < 1000 && !force) {
+			return
+		}
+		p(b, mainCode, tests)
+		err := os.WriteFile("tmp.go", b.Bytes(), 0666)
+		if err != nil {
+			log.Fatal(err)
+		}
+		out, err := exec.Command("go", "run", "tmp.go").CombinedOutput()
+		if err != nil {
+			log.Fatalf("go run tmp.go: %v\n%s", err, out)
+		}
+		print(".")
+		if force {
+			print("\nPASS\n")
+		}
+		b.Reset()
+		tests = ""
+		p(b, "package main\n\n")
+		p(b, "const verbose = %v\n\n", verbose)
+	}
+
+	p(b, "package main\n\n")
+	p(b, "const verbose = %v\n\n", verbose)
+	max := 2
+	if !long {
+		max = 5
+	}
+	for i := 1; i <= max; i++ {
+		maxDouble := -1
+		if long {
+			maxDouble = i
+		}
+		for double := -1; double <= maxDouble; double++ {
+			code := gen(new(bytes.Buffer), "", "", "", i, double, func(c int) bool { return true })
+			for j := 0; j < code; j++ {
+				hi := j + 1
+				if long {
+					hi = code
+				}
+				for k := j; k < hi && k < code; k++ {
+					s := fmt.Sprintf("%d_%d_%d_%d", i, double+1, j, k)
+					code0 := gen(b, "testFunc"+s, "", "yield2", i, double, func(c int) bool { return c == j || c == k })
+					code1 := gen(b, "testSlice"+s, "_, ", "slice2", i, double, func(c int) bool { return c == j || c == k })
+					if code0 != code1 {
+						panic("bad generator")
+					}
+					tests += "test" + s + "()\n"
+					p(b, testCode, "test"+s, []int{j, k}, "testFunc"+s, "testSlice"+s)
+					flush(false)
+				}
+			}
+		}
+	}
+	for i := 1; i <= max; i++ {
+		maxDouble := -1
+		if long {
+			maxDouble = i
+		}
+		for double := -1; double <= maxDouble; double++ {
+			s := fmt.Sprintf("%d_%d", i, double+1)
+			code := gen(b, "testFunc"+s, "", "yield2", i, double, func(c int) bool { return true })
+			code1 := gen(b, "testSlice"+s, "_, ", "slice2", i, double, func(c int) bool { return true })
+			if code != code1 {
+				panic("bad generator")
+			}
+			tests += "test" + s + "()\n"
+			var all []int
+			for j := 0; j < code; j++ {
+				all = append(all, j)
+			}
+			p(b, testCode, "test"+s, all, "testFunc"+s, "testSlice"+s)
+			flush(false)
+		}
+	}
+	if long {
+		flush(true)
+		os.Remove("tmp.go")
+		return
+	}
+
+	p(b, mainCode, tests)
+
+	os.Stdout.Write(b.Bytes())
+}
+
+func p(b *bytes.Buffer, format string, args ...any) {
+	fmt.Fprintf(b, format, args...)
+}
+
+func gen(b *bytes.Buffer, name, prefix, rangeExpr string, depth, double int, allowed func(int) bool) int {
+	p(b, "func %s(o *output, code int) int {\n", name)
+	p(b, "	dfr := 0; _ = dfr\n")
+	code := genLoop(b, 0, prefix, rangeExpr, depth, double, 0, "", allowed)
+	p(b, "	return 0\n")
+	p(b, "}\n\n")
+	return code
+}
+
+func genLoop(b *bytes.Buffer, d int, prefix, rangeExpr string, depth, double, code int, labelSuffix string, allowed func(int) bool) int {
+	limit := 1
+	if d == double {
+		limit = 2
+	}
+	for rep := 0; rep < limit; rep++ {
+		if rep == 1 {
+			labelSuffix = "R"
+		}
+		s := fmt.Sprintf("%d%s", d, labelSuffix)
+		p(b, "	o.log(`top%s`)\n", s)
+		p(b, "	l%sa := 0\n", s)
+		p(b, "goto L%sa; L%sa:	o.log(`L%sa`)\n", s, s, s)
+		p(b, "	if l%sa++; l%sa >= 2 { o.log(`loop L%sa`); return -1 }\n", s, s, s)
+		p(b, "	l%sfor := 0\n", s)
+		p(b, "goto L%sfor; L%sfor: for f := 0; f < 1; f++ { o.log(`L%sfor`)\n", s, s, s)
+		p(b, "	if l%sfor++; l%sfor >= 2 { o.log(`loop L%sfor`); return -1 }\n", s, s, s)
+		p(b, "	l%ssw := 0\n", s)
+		p(b, "goto L%ssw; L%ssw: switch { default: o.log(`L%ssw`)\n", s, s, s)
+		p(b, "	if l%ssw++; l%ssw >= 2 { o.log(`loop L%ssw`); return -1 }\n", s, s, s)
+		p(b, "	l%ssel := 0\n", s)
+		p(b, "goto L%ssel; L%ssel: select { default: o.log(`L%ssel`)\n", s, s, s)
+		p(b, "	if l%ssel++; l%ssel >= 2 { o.log(`loop L%ssel`); return -1 }\n", s, s, s)
+		p(b, "	l%s := 0\n", s)
+		p(b, "goto L%s; L%s:	for %s i%s := range %s {\n", s, s, prefix, s, rangeExpr)
+		p(b, "	o.log1(`L%s top`, i%s)\n", s, s)
+		p(b, "	if l%s++; l%s >= 4 { o.log(`loop L%s`); return -1 }\n", s, s, s)
+		printTests := func() {
+			if code++; allowed(code) {
+				p(b, "	if code == %v { break }\n", code)
+			}
+			if code++; allowed(code) {
+				p(b, "	if code == %v { continue }\n", code)
+			}
+			if code++; allowed(code) {
+				p(b, "	switch { case code == %v: continue }\n", code)
+			}
+			if code++; allowed(code) {
+				p(b, "	if code == %v { return %[1]v }\n", code)
+			}
+			if code++; allowed(code) {
+				p(b, "	if code == %v { select { default: break } }\n", code)
+			}
+			if code++; allowed(code) {
+				p(b, "	if code == %v { switch { default: break } }\n", code)
+			}
+			if code++; allowed(code) {
+				p(b, "	if code == %v { dfr++; defer o.log1(`defer %d`, dfr) }\n", code, code)
+			}
+			for i := d; i > 0; i-- {
+				suffix := labelSuffix
+				if i < double {
+					suffix = ""
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { break L%d%s }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { select { default: break L%d%s } }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { break L%d%s }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { break L%d%ssw }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { break L%d%ssel }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { break L%d%sfor }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { continue L%d%sfor }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { goto L%d%sa }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { goto L%d%s }\n", code, i, suffix)
+				}
+				if code++; allowed(code) {
+					p(b, "	if code == %v { goto L%d%sb }\n", code, i, suffix)
+				}
+			}
+		}
+		printTests()
+		if d < depth {
+			if rep == 1 {
+				double = d // signal to children to use the rep=1 labels
+			}
+			code = genLoop(b, d+1, prefix, rangeExpr, depth, double, code, labelSuffix, allowed)
+			printTests()
+		}
+		p(b, "	o.log(`L%s bot`)\n", s)
+		p(b, "	}\n")
+		p(b, "	o.log(`L%ssel bot`)\n", s)
+		p(b, "	}\n")
+		p(b, "	o.log(`L%ssw bot`)\n", s)
+		p(b, "	}\n")
+		p(b, "	o.log(`L%sfor bot`)\n", s)
+		p(b, "	}\n")
+		p(b, "	o.log(`done%s`)\n", s)
+		p(b, "goto L%sb; L%sb: o.log(`L%sb`)\n", s, s, s)
+	}
+	return code
+}
+
+var testCode = `
+func %s() {
+	all := %#v
+	for i := 0; i < len(all); i++ {
+		c := all[i]
+		outFunc := run(%s, c)
+		outSlice := run(%s, c)
+		if !outFunc.eq(outSlice) {
+			println("mismatch", "%[3]s", "%[4]s", c)
+			println()
+			println("func:")
+			outFunc.print()
+			println()
+			println("slice:")
+			outSlice.print()
+			panic("mismatch")
+		}
+	}
+	if verbose {
+		println("did", "%[3]s", "%[4]s", len(all))
+	}
+}
+`
+
+var mainCode = `
+
+func main() {
+	if verbose {
+		println("main")
+	}
+	%s
+}
+
+func yield2(yield func(int)bool) { _ = yield(1) && yield(2) }
+var slice2 = []int{1,2}
+
+type output struct {
+	ret int
+	trace []any
+}
+
+func (o *output) log(x any) {
+	o.trace = append(o.trace, x)
+}
+
+func (o *output) log1(x, y any) {
+	o.trace = append(o.trace, x, y)
+}
+
+func (o *output) eq(p *output) bool{
+	if o.ret != p.ret  || len(o.trace) != len(p.trace) {
+		return false
+	}
+	for i ,x := range o.trace {
+		if x != p.trace[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (o *output) print() {
+	println("ret", o.ret, "trace-len", len(o.trace))
+	for i := 0; i < len(o.trace); i++ {
+		print("#", i, " ")
+		switch x := o.trace[i].(type) {
+		case int:
+			print(x)
+		case string:
+			print(x)
+		default:
+			print(x)
+		}
+		print("\n")
+	}
+}
+
+func run(f func(*output, int)int, i int) *output {
+	o := &output{}
+	o.ret = f(o, i)
+	return o
+}
+
+`
+
+func skip() {
+	const code = `
+package main
+func main() {
+}
+`
+	fmt.Printf("%s\n", code)
+}
diff --git a/test/recover4.go b/test/recover4.go
index 7cab15a..19b9494 100644
--- a/test/recover4.go
+++ b/test/recover4.go
@@ -1,6 +1,7 @@
-// +build linux darwin
 // run
 
+//go:build linux || darwin
+
 // Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/sigchld.go b/test/sigchld.go
index fc6cf91..ad5b18e 100644
--- a/test/sigchld.go
+++ b/test/sigchld.go
@@ -1,6 +1,7 @@
-// +build !plan9,!windows,!wasip1
 // run
 
+//go:build !plan9 && !windows && !wasip1
+
 // Copyright 2009 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
diff --git a/test/typeparam/boundmethod.go b/test/typeparam/boundmethod.go
index 510519a..b78318c 100644
--- a/test/typeparam/boundmethod.go
+++ b/test/typeparam/boundmethod.go
@@ -5,7 +5,7 @@
 // license that can be found in the LICENSE file.
 
 // This test illustrates how a type bound method (String below) can be implemented
-// either by a concrete type (myint below) or a instantiated generic type
+// either by a concrete type (myint below) or an instantiated generic type
 // (StringInt[myint] below).
 
 package main
diff --git a/test/typeparam/issue50485.dir/a.go b/test/typeparam/issue50485.dir/a.go
index 3a7c71a..97cf4d2 100644
--- a/test/typeparam/issue50485.dir/a.go
+++ b/test/typeparam/issue50485.dir/a.go
@@ -219,7 +219,6 @@
 func (r ApplicativeFunctor2[H, HT, A1, A2, R]) Ap(a A1) ApplicativeFunctor1[Cons[A1, H], A1, A2, R] {
 
 	return r.ApOption(Some(a))
-
 }
 
 func Applicative2[A1, A2, R any](fn Func2[A1, A2, R]) ApplicativeFunctor2[Nil, Nil, A1, A2, R] {
diff --git a/test/typeparam/issue51232.go b/test/typeparam/issue51232.go
index 0d25e18..f4728f6 100644
--- a/test/typeparam/issue51232.go
+++ b/test/typeparam/issue51232.go
@@ -13,19 +13,19 @@
 type Fn[RCT RC[RG], RG any] func(RCT)
 
 type F[RCT RC[RG], RG any] interface {
-	Fn() Fn[RCT] // ERROR "got 1 arguments"
+	Fn() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2"
 }
 
 type concreteF[RCT RC[RG], RG any] struct {
-	makeFn func() Fn[RCT] // ERROR "got 1 arguments"
+	makeFn func() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2"
 }
 
-func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "got 1 arguments"
+func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "not enough type arguments for type Fn: have 1, want 2"
 	return c.makeFn()
 }
 
-func NewConcrete[RCT RC[RG], RG any](Rc RCT) F[RCT] { // ERROR "got 1 arguments"
-	return &concreteF[RCT]{ // ERROR "cannot use" "got 1 arguments"
+func NewConcrete[RCT RC[RG], RG any](Rc RCT) F[RCT] { // ERROR "not enough type arguments for type F: have 1, want 2"
+	return &concreteF[RCT]{ // ERROR "cannot use" "not enough type arguments for type concreteF: have 1, want 2"
 		makeFn: nil,
 	}
 }
diff --git a/test/typeparam/issue51233.go b/test/typeparam/issue51233.go
index 96a25dd..5f2a045 100644
--- a/test/typeparam/issue51233.go
+++ b/test/typeparam/issue51233.go
@@ -13,16 +13,16 @@
 
 type Fn[RCT RC[RG], RG any] func(RCT)
 
-type FFn[RCT RC[RG], RG any] func() Fn[RCT] // ERROR "got 1 arguments"
+type FFn[RCT RC[RG], RG any] func() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2"
 
 type F[RCT RC[RG], RG any] interface {
-	Fn() Fn[RCT] // ERROR "got 1 arguments"
+	Fn() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2"
 }
 
 type concreteF[RCT RC[RG], RG any] struct {
-	makeFn FFn[RCT] // ERROR "got 1 arguments"
+	makeFn FFn[RCT] // ERROR "not enough type arguments for type FFn: have 1, want 2"
 }
 
-func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "got 1 arguments"
+func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "not enough type arguments for type Fn: have 1, want 2"
 	return c.makeFn()
 }
diff --git a/test/typeparam/issue51925.go b/test/typeparam/issue51925.go
index 0a385ac..abebf67 100644
--- a/test/typeparam/issue51925.go
+++ b/test/typeparam/issue51925.go
@@ -25,7 +25,6 @@
 		return x
 	}
 	return y
-
 }
 
 // Min returns the minimum element of `nums`.
diff --git a/test/typeparam/issue54765.go b/test/typeparam/issue54765.go
index 364567d..69a0d37 100644
--- a/test/typeparam/issue54765.go
+++ b/test/typeparam/issue54765.go
@@ -8,7 +8,6 @@
 // arguments. (pointer-to-nih types are okay though.)
 
 //go:build cgo
-// +build cgo
 
 package p